--- a/src/HOL/AxClasses/Tutorial/Group.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/AxClasses/Tutorial/Group.thy Sun Sep 17 22:19:02 2000 +0200
@@ -3,127 +3,127 @@
Author: Markus Wenzel, TU Muenchen
*)
-theory Group = Main:;
+theory Group = Main:
-subsection {* Monoids and Groups *};
+subsection {* Monoids and Groups *}
consts
times :: "'a => 'a => 'a" (infixl "[*]" 70)
inverse :: "'a => 'a"
- one :: 'a;
+ one :: 'a
axclass
monoid < "term"
assoc: "(x [*] y) [*] z = x [*] (y [*] z)"
left_unit: "one [*] x = x"
- right_unit: "x [*] one = x";
+ right_unit: "x [*] one = x"
axclass
semigroup < "term"
- assoc: "(x [*] y) [*] z = x [*] (y [*] z)";
+ assoc: "(x [*] y) [*] z = x [*] (y [*] z)"
axclass
group < semigroup
left_unit: "one [*] x = x"
- left_inverse: "inverse x [*] x = one";
+ left_inverse: "inverse x [*] x = one"
axclass
agroup < group
- commute: "x [*] y = y [*] x";
+ commute: "x [*] y = y [*] x"
-subsection {* Abstract reasoning *};
+subsection {* Abstract reasoning *}
-theorem group_right_inverse: "x [*] inverse x = (one::'a::group)";
-proof -;
- have "x [*] inverse x = one [*] (x [*] inverse x)";
- by (simp only: group.left_unit);
- also; have "... = one [*] x [*] inverse x";
- by (simp only: semigroup.assoc);
- also; have "... = inverse (inverse x) [*] inverse x [*] x [*] inverse x";
- by (simp only: group.left_inverse);
- also; have "... = inverse (inverse x) [*] (inverse x [*] x) [*] inverse x";
- by (simp only: semigroup.assoc);
- also; have "... = inverse (inverse x) [*] one [*] inverse x";
- by (simp only: group.left_inverse);
- also; have "... = inverse (inverse x) [*] (one [*] inverse x)";
- by (simp only: semigroup.assoc);
- also; have "... = inverse (inverse x) [*] inverse x";
- by (simp only: group.left_unit);
- also; have "... = one";
- by (simp only: group.left_inverse);
- finally; show ?thesis; .;
-qed;
+theorem group_right_inverse: "x [*] inverse x = (one::'a::group)"
+proof -
+ have "x [*] inverse x = one [*] (x [*] inverse x)"
+ by (simp only: group.left_unit)
+ also have "... = one [*] x [*] inverse x"
+ by (simp only: semigroup.assoc)
+ also have "... = inverse (inverse x) [*] inverse x [*] x [*] inverse x"
+ by (simp only: group.left_inverse)
+ also have "... = inverse (inverse x) [*] (inverse x [*] x) [*] inverse x"
+ by (simp only: semigroup.assoc)
+ also have "... = inverse (inverse x) [*] one [*] inverse x"
+ by (simp only: group.left_inverse)
+ also have "... = inverse (inverse x) [*] (one [*] inverse x)"
+ by (simp only: semigroup.assoc)
+ also have "... = inverse (inverse x) [*] inverse x"
+ by (simp only: group.left_unit)
+ also have "... = one"
+ by (simp only: group.left_inverse)
+ finally show ?thesis .
+qed
-theorem group_right_unit: "x [*] one = (x::'a::group)";
-proof -;
- have "x [*] one = x [*] (inverse x [*] x)";
- by (simp only: group.left_inverse);
- also; have "... = x [*] inverse x [*] x";
- by (simp only: semigroup.assoc);
- also; have "... = one [*] x";
- by (simp only: group_right_inverse);
- also; have "... = x";
- by (simp only: group.left_unit);
- finally; show ?thesis; .;
-qed;
+theorem group_right_unit: "x [*] one = (x::'a::group)"
+proof -
+ have "x [*] one = x [*] (inverse x [*] x)"
+ by (simp only: group.left_inverse)
+ also have "... = x [*] inverse x [*] x"
+ by (simp only: semigroup.assoc)
+ also have "... = one [*] x"
+ by (simp only: group_right_inverse)
+ also have "... = x"
+ by (simp only: group.left_unit)
+ finally show ?thesis .
+qed
-subsection {* Abstract instantiation *};
+subsection {* Abstract instantiation *}
-instance monoid < semigroup;
-proof intro_classes;
- fix x y z :: "'a::monoid";
- show "x [*] y [*] z = x [*] (y [*] z)";
- by (rule monoid.assoc);
-qed;
+instance monoid < semigroup
+proof intro_classes
+ fix x y z :: "'a::monoid"
+ show "x [*] y [*] z = x [*] (y [*] z)"
+ by (rule monoid.assoc)
+qed
-instance group < monoid;
-proof intro_classes;
- fix x y z :: "'a::group";
- show "x [*] y [*] z = x [*] (y [*] z)";
- by (rule semigroup.assoc);
- show "one [*] x = x";
- by (rule group.left_unit);
- show "x [*] one = x";
- by (rule group_right_unit);
-qed;
+instance group < monoid
+proof intro_classes
+ fix x y z :: "'a::group"
+ show "x [*] y [*] z = x [*] (y [*] z)"
+ by (rule semigroup.assoc)
+ show "one [*] x = x"
+ by (rule group.left_unit)
+ show "x [*] one = x"
+ by (rule group_right_unit)
+qed
-subsection {* Concrete instantiation *};
+subsection {* Concrete instantiation *}
defs (overloaded)
times_bool_def: "x [*] y == x ~= (y::bool)"
inverse_bool_def: "inverse x == x::bool"
- unit_bool_def: "one == False";
+ unit_bool_def: "one == False"
-instance bool :: agroup;
+instance bool :: agroup
proof (intro_classes,
- unfold times_bool_def inverse_bool_def unit_bool_def);
- fix x y z;
- show "((x ~= y) ~= z) = (x ~= (y ~= z))"; by blast;
- show "(False ~= x) = x"; by blast;
- show "(x ~= x) = False"; by blast;
- show "(x ~= y) = (y ~= x)"; by blast;
-qed;
+ unfold times_bool_def inverse_bool_def unit_bool_def)
+ fix x y z
+ show "((x ~= y) ~= z) = (x ~= (y ~= z))" by blast
+ show "(False ~= x) = x" by blast
+ show "(x ~= x) = False" by blast
+ show "(x ~= y) = (y ~= x)" by blast
+qed
-subsection {* Lifting and Functors *};
+subsection {* Lifting and Functors *}
defs (overloaded)
- times_prod_def: "p [*] q == (fst p [*] fst q, snd p [*] snd q)";
+ times_prod_def: "p [*] q == (fst p [*] fst q, snd p [*] snd q)"
-instance * :: (semigroup, semigroup) semigroup;
-proof (intro_classes, unfold times_prod_def);
- fix p q r :: "'a::semigroup * 'b::semigroup";
+instance * :: (semigroup, semigroup) semigroup
+proof (intro_classes, unfold times_prod_def)
+ fix p q r :: "'a::semigroup * 'b::semigroup"
show
"(fst (fst p [*] fst q, snd p [*] snd q) [*] fst r,
snd (fst p [*] fst q, snd p [*] snd q) [*] snd r) =
(fst p [*] fst (fst q [*] fst r, snd q [*] snd r),
- snd p [*] snd (fst q [*] fst r, snd q [*] snd r))";
- by (simp add: semigroup.assoc);
-qed;
+ snd p [*] snd (fst q [*] fst r, snd q [*] snd r))"
+ by (simp add: semigroup.assoc)
+qed
-end;
+end
--- a/src/HOL/AxClasses/Tutorial/Product.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/AxClasses/Tutorial/Product.thy Sun Sep 17 22:19:02 2000 +0200
@@ -3,16 +3,16 @@
Author: Markus Wenzel, TU Muenchen
*)
-theory Product = Main:;
+theory Product = Main:
axclass
- product < "term";
+ product < "term"
consts
- product :: "'a::product => 'a => 'a" (infixl "[*]" 70);
+ product :: "'a::product => 'a => 'a" (infixl "[*]" 70)
-instance bool :: product;
- by intro_classes;
+instance bool :: product
+ by intro_classes
defs (overloaded)
- product_bool_def: "x [*] y == x & y";
+ product_bool_def: "x [*] y == x & y"
-end;
+end
--- a/src/HOL/AxClasses/Tutorial/Semigroups.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/AxClasses/Tutorial/Semigroups.thy Sun Sep 17 22:19:02 2000 +0200
@@ -3,18 +3,18 @@
Author: Markus Wenzel, TU Muenchen
*)
-theory Semigroups = Main:;
+theory Semigroups = Main:
consts
- times :: "'a => 'a => 'a" (infixl "[*]" 70);
+ times :: "'a => 'a => 'a" (infixl "[*]" 70)
axclass
semigroup < "term"
- assoc: "(x [*] y) [*] z = x [*] (y [*] z)";
+ assoc: "(x [*] y) [*] z = x [*] (y [*] z)"
consts
- plus :: "'a => 'a => 'a" (infixl "[+]" 70);
+ plus :: "'a => 'a => 'a" (infixl "[+]" 70)
axclass
plus_semigroup < "term"
- assoc: "(x [+] y) [+] z = x [+] (y [+] z)";
+ assoc: "(x [+] y) [+] z = x [+] (y [+] z)"
-end;
+end
--- a/src/HOL/Isar_examples/BasicLogic.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Isar_examples/BasicLogic.thy Sun Sep 17 22:19:02 2000 +0200
@@ -5,52 +5,52 @@
Basic propositional and quantifier reasoning.
*)
-header {* Basic logical reasoning *};
+header {* Basic logical reasoning *}
-theory BasicLogic = Main:;
+theory BasicLogic = Main:
-subsection {* Pure backward reasoning *};
+subsection {* Pure backward reasoning *}
text {*
In order to get a first idea of how Isabelle/Isar proof documents may
look like, we consider the propositions $I$, $K$, and $S$. The
following (rather explicit) proofs should require little extra
explanations.
-*};
+*}
-lemma I: "A --> A";
-proof;
- assume A;
- show A; by assumption;
-qed;
+lemma I: "A --> A"
+proof
+ assume A
+ show A by assumption
+qed
-lemma K: "A --> B --> A";
-proof;
- assume A;
- show "B --> A";
- proof;
- show A; by assumption;
- qed;
-qed;
+lemma K: "A --> B --> A"
+proof
+ assume A
+ show "B --> A"
+ proof
+ show A by assumption
+ qed
+qed
-lemma S: "(A --> B --> C) --> (A --> B) --> A --> C";
-proof;
- assume "A --> B --> C";
- show "(A --> B) --> A --> C";
- proof;
- assume "A --> B";
- show "A --> C";
- proof;
- assume A;
- show C;
- proof (rule mp);
- show "B --> C"; by (rule mp);
- show B; by (rule mp);
- qed;
- qed;
- qed;
-qed;
+lemma S: "(A --> B --> C) --> (A --> B) --> A --> C"
+proof
+ assume "A --> B --> C"
+ show "(A --> B) --> A --> C"
+ proof
+ assume "A --> B"
+ show "A --> C"
+ proof
+ assume A
+ show C
+ proof (rule mp)
+ show "B --> C" by (rule mp)
+ show B by (rule mp)
+ qed
+ qed
+ qed
+qed
text {*
Isar provides several ways to fine-tune the reasoning, avoiding
@@ -59,13 +59,13 @@
way, even without referring to any automated proof tools yet.
First of all, proof by assumption may be abbreviated as a single dot.
-*};
+*}
-lemma "A --> A";
-proof;
- assume A;
- show A; .;
-qed;
+lemma "A --> A"
+proof
+ assume A
+ show A .
+qed
text {*
In fact, concluding any (sub-)proof already involves solving any
@@ -73,11 +73,11 @@
trivial operation, as proof by assumption may involve full
higher-order unification.}. Thus we may skip the rather vacuous body
of the above proof as well.
-*};
+*}
-lemma "A --> A";
-proof;
-qed;
+lemma "A --> A"
+proof
+qed
text {*
Note that the \isacommand{proof} command refers to the $\idt{rule}$
@@ -85,22 +85,22 @@
single rule, as determined from the syntactic form of the statements
involved. The \isacommand{by} command abbreviates any proof with
empty body, so the proof may be further pruned.
-*};
+*}
-lemma "A --> A";
- by rule;
+lemma "A --> A"
+ by rule
text {*
Proof by a single rule may be abbreviated as double-dot.
-*};
+*}
-lemma "A --> A"; ..;
+lemma "A --> A" ..
text {*
Thus we have arrived at an adequate representation of the proof of a
tautology that holds by a single standard rule.\footnote{Apparently,
the rule here is implication introduction.}
-*};
+*}
text {*
Let us also reconsider $K$. Its statement is composed of iterated
@@ -110,20 +110,20 @@
The $\idt{intro}$ proof method repeatedly decomposes a goal's
conclusion.\footnote{The dual method is $\idt{elim}$, acting on a
goal's premises.}
-*};
+*}
-lemma "A --> B --> A";
-proof intro;
- assume A;
- show A; .;
-qed;
+lemma "A --> B --> A"
+proof intro
+ assume A
+ show A .
+qed
text {*
Again, the body may be collapsed.
-*};
+*}
-lemma "A --> B --> A";
- by intro;
+lemma "A --> B --> A"
+ by intro
text {*
Just like $\idt{rule}$, the $\idt{intro}$ and $\idt{elim}$ proof
@@ -140,10 +140,10 @@
terminal steps that solve a goal completely are usually performed by
actual automated proof methods (such as
\isacommand{by}~$\idt{blast}$).
-*};
+*}
-subsection {* Variations of backward vs.\ forward reasoning *};
+subsection {* Variations of backward vs.\ forward reasoning *}
text {*
Certainly, any proof may be performed in backward-style only. On the
@@ -154,17 +154,17 @@
A$.
The first version is purely backward.
-*};
+*}
-lemma "A & B --> B & A";
-proof;
- assume "A & B";
- show "B & A";
- proof;
- show B; by (rule conjunct2);
- show A; by (rule conjunct1);
- qed;
-qed;
+lemma "A & B --> B & A"
+proof
+ assume "A & B"
+ show "B & A"
+ proof
+ show B by (rule conjunct2)
+ show A by (rule conjunct1)
+ qed
+qed
text {*
Above, the $\idt{conjunct}_{1/2}$ projection rules had to be named
@@ -174,17 +174,17 @@
current facts, enabling the use of double-dot proofs. Note that
\isacommand{from} already does forward-chaining, involving the
\name{conjE} rule here.
-*};
+*}
-lemma "A & B --> B & A";
-proof;
- assume "A & B";
- show "B & A";
- proof;
- from prems; show B; ..;
- from prems; show A; ..;
- qed;
-qed;
+lemma "A & B --> B & A"
+proof
+ assume "A & B"
+ show "B & A"
+ proof
+ from prems show B ..
+ from prems show A ..
+ qed
+qed
text {*
In the next version, we move the forward step one level upwards.
@@ -194,48 +194,48 @@
introduction. The resulting proof structure directly corresponds to
that of the $\name{conjE}$ rule, including the repeated goal
proposition that is abbreviated as $\var{thesis}$ below.
-*};
+*}
-lemma "A & B --> B & A";
-proof;
- assume "A & B";
- then; show "B & A";
- proof -- {* rule \name{conjE} of $A \conj B$ *};
- assume A B;
- show ?thesis; .. -- {* rule \name{conjI} of $B \conj A$ *};
- qed;
-qed;
+lemma "A & B --> B & A"
+proof
+ assume "A & B"
+ then show "B & A"
+ proof -- {* rule \name{conjE} of $A \conj B$ *}
+ assume A B
+ show ?thesis .. -- {* rule \name{conjI} of $B \conj A$ *}
+ qed
+qed
text {*
In the subsequent version we flatten the structure of the main body
by doing forward reasoning all the time. Only the outermost
decomposition step is left as backward.
-*};
+*}
-lemma "A & B --> B & A";
-proof;
- assume ab: "A & B";
- from ab; have a: A; ..;
- from ab; have b: B; ..;
- from b a; show "B & A"; ..;
-qed;
+lemma "A & B --> B & A"
+proof
+ assume ab: "A & B"
+ from ab have a: A ..
+ from ab have b: B ..
+ from b a show "B & A" ..
+qed
text {*
We can still push forward-reasoning a bit further, even at the risk
of getting ridiculous. Note that we force the initial proof step to
do nothing here, by referring to the ``-'' proof method.
-*};
+*}
-lemma "A & B --> B & A";
-proof -;
- {;
- assume ab: "A & B";
- from ab; have a: A; ..;
- from ab; have b: B; ..;
- from b a; have "B & A"; ..;
- };
- thus ?thesis; .. -- {* rule \name{impI} *};
-qed;
+lemma "A & B --> B & A"
+proof -
+ {
+ assume ab: "A & B"
+ from ab have a: A ..
+ from ab have b: B ..
+ from b a have "B & A" ..
+ }
+ thus ?thesis .. -- {* rule \name{impI} *}
+qed
text {*
\medskip With these examples we have shifted through a whole range
@@ -252,7 +252,7 @@
etc., rules (and methods) on the one hand vs.\ facts on the other
hand have to be emphasized in an appropriate way. This requires the
proof writer to develop good taste, and some practice, of course.
-*};
+*}
text {*
For our example the most appropriate way of reasoning is probably the
@@ -261,47 +261,47 @@
abbreviates \isacommand{then}~\isacommand{show}.\footnote{In the same
vein, \isacommand{hence} abbreviates
\isacommand{then}~\isacommand{have}.}
-*};
+*}
-lemma "A & B --> B & A";
-proof;
- assume "A & B";
- thus "B & A";
- proof;
- assume A B;
- show ?thesis; ..;
- qed;
-qed;
+lemma "A & B --> B & A"
+proof
+ assume "A & B"
+ thus "B & A"
+ proof
+ assume A B
+ show ?thesis ..
+ qed
+qed
-subsection {* A few examples from ``Introduction to Isabelle'' *};
+subsection {* A few examples from ``Introduction to Isabelle'' *}
text {*
We rephrase some of the basic reasoning examples of
\cite{isabelle-intro}, using HOL rather than FOL.
-*};
+*}
-subsubsection {* A propositional proof *};
+subsubsection {* A propositional proof *}
text {*
We consider the proposition $P \disj P \impl P$. The proof below
involves forward-chaining from $P \disj P$, followed by an explicit
case-analysis on the two \emph{identical} cases.
-*};
+*}
-lemma "P | P --> P";
-proof;
- assume "P | P";
- thus P;
+lemma "P | P --> P"
+proof
+ assume "P | P"
+ thus P
proof -- {*
rule \name{disjE}: \smash{$\infer{C}{A \disj B & \infer*{C}{[A]} & \infer*{C}{[B]}}$}
- *};
- assume P; show P; .;
- next;
- assume P; show P; .;
- qed;
-qed;
+ *}
+ assume P show P .
+ next
+ assume P show P .
+ qed
+qed
text {*
Case splits are \emph{not} hardwired into the Isar language as a
@@ -325,34 +325,34 @@
\medskip In our example the situation is even simpler, since the two
cases actually coincide. Consequently the proof may be rephrased as
follows.
-*};
+*}
-lemma "P | P --> P";
-proof;
- assume "P | P";
- thus P;
- proof;
- assume P;
- show P; .;
- show P; .;
- qed;
-qed;
+lemma "P | P --> P"
+proof
+ assume "P | P"
+ thus P
+ proof
+ assume P
+ show P .
+ show P .
+ qed
+qed
text {*
Again, the rather vacuous body of the proof may be collapsed. Thus
the case analysis degenerates into two assumption steps, which are
implicitly performed when concluding the single rule step of the
double-dot proof as follows.
-*};
+*}
-lemma "P | P --> P";
-proof;
- assume "P | P";
- thus P; ..;
-qed;
+lemma "P | P --> P"
+proof
+ assume "P | P"
+ thus P ..
+qed
-subsubsection {* A quantifier proof *};
+subsubsection {* A quantifier proof *}
text {*
To illustrate quantifier reasoning, let us prove $(\ex x P \ap (f \ap
@@ -366,20 +366,20 @@
the \isacommand{fix} command augments the context by some new
``arbitrary, but fixed'' element; the \isacommand{is} annotation
binds term abbreviations by higher-order pattern matching.
-*};
+*}
-lemma "(EX x. P (f x)) --> (EX x. P x)";
-proof;
- assume "EX x. P (f x)";
- thus "EX x. P x";
+lemma "(EX x. P (f x)) --> (EX x. P x)"
+proof
+ assume "EX x. P (f x)"
+ thus "EX x. P x"
proof (rule exE) -- {*
rule \name{exE}: \smash{$\infer{B}{\ex x A(x) & \infer*{B}{[A(x)]_x}}$}
- *};
- fix a;
- assume "P (f a)" (is "P ?witness");
- show ?thesis; by (rule exI [of P ?witness]);
- qed;
-qed;
+ *}
+ fix a
+ assume "P (f a)" (is "P ?witness")
+ show ?thesis by (rule exI [of P ?witness])
+ qed
+qed
text {*
While explicit rule instantiation may occasionally improve
@@ -388,32 +388,32 @@
structural clues for the system to infer both the rules and their
instances (by higher-order unification). Thus we may as well prune
the text as follows.
-*};
+*}
-lemma "(EX x. P (f x)) --> (EX x. P x)";
-proof;
- assume "EX x. P (f x)";
- thus "EX x. P x";
- proof;
- fix a;
- assume "P (f a)";
- show ?thesis; ..;
- qed;
-qed;
+lemma "(EX x. P (f x)) --> (EX x. P x)"
+proof
+ assume "EX x. P (f x)"
+ thus "EX x. P x"
+ proof
+ fix a
+ assume "P (f a)"
+ show ?thesis ..
+ qed
+qed
text {*
Explicit $\exists$-elimination as seen above can become quite
cumbersome in practice. The derived Isar language element
``\isakeyword{obtain}'' provides a more handsome way to do
generalized existence reasoning.
-*};
+*}
-lemma "(EX x. P (f x)) --> (EX x. P x)";
-proof;
- assume "EX x. P (f x)";
- then; obtain a where "P (f a)"; by blast;
- thus "EX x. P x"; ..;
-qed;
+lemma "(EX x. P (f x)) --> (EX x. P x)"
+proof
+ assume "EX x. P (f x)"
+ then obtain a where "P (f a)" by blast
+ thus "EX x. P x" ..
+qed
text {*
Technically, \isakeyword{obtain} is similar to \isakeyword{fix} and
@@ -423,29 +423,29 @@
reasoning involved here, any result exported from the context of an
\isakeyword{obtain} statement may \emph{not} refer to the parameters
introduced there.
-*};
+*}
-subsubsection {* Deriving rules in Isabelle *};
+subsubsection {* Deriving rules in Isabelle *}
text {*
We derive the conjunction elimination rule from the corresponding
projections. The proof is quite straight-forward, since
Isabelle/Isar supports non-atomic goals and assumptions fully
transparently.
-*};
+*}
-theorem conjE: "A & B ==> (A ==> B ==> C) ==> C";
-proof -;
- assume "A & B";
- assume r: "A ==> B ==> C";
- show C;
- proof (rule r);
- show A; by (rule conjunct1);
- show B; by (rule conjunct2);
- qed;
-qed;
+theorem conjE: "A & B ==> (A ==> B ==> C) ==> C"
+proof -
+ assume "A & B"
+ assume r: "A ==> B ==> C"
+ show C
+ proof (rule r)
+ show A by (rule conjunct1)
+ show B by (rule conjunct2)
+ qed
+qed
text {*
Note that classic Isabelle handles higher rules in a slightly
@@ -454,6 +454,6 @@
\texttt{goal} command to decompose the rule into premises and
conclusion. The actual result would then emerge by discharging of
the context at \texttt{qed} time.
-*};
+*}
-end;
+end
--- a/src/HOL/Isar_examples/Cantor.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Isar_examples/Cantor.thy Sun Sep 17 22:19:02 2000 +0200
@@ -3,14 +3,14 @@
Author: Markus Wenzel, TU Muenchen
*)
-header {* Cantor's Theorem *};
+header {* Cantor's Theorem *}
-theory Cantor = Main:;
+theory Cantor = Main:
text_raw {*
\footnote{This is an Isar version of the final example of the
Isabelle/HOL manual \cite{isabelle-HOL}.}
-*};
+*}
text {*
Cantor's Theorem states that every set has more subsets than it has
@@ -28,33 +28,33 @@
\bigskip We first consider a slightly awkward version of the proof,
with the innermost reasoning expressed quite naively.
-*};
+*}
-theorem "EX S. S ~: range (f :: 'a => 'a set)";
-proof;
- let ?S = "{x. x ~: f x}";
- show "?S ~: range f";
- proof;
- assume "?S : range f";
- thus False;
- proof;
- fix y;
- assume "?S = f y";
- thus ?thesis;
- proof (rule equalityCE);
- assume in_S: "y : ?S";
- assume in_fy: "y : f y";
- from in_S; have notin_fy: "y ~: f y"; ..;
- from notin_fy in_fy; show ?thesis; by contradiction;
- next;
- assume notin_S: "y ~: ?S";
- assume notin_fy: "y ~: f y";
- from notin_S; have in_fy: "y : f y"; ..;
- from notin_fy in_fy; show ?thesis; by contradiction;
- qed;
- qed;
- qed;
-qed;
+theorem "EX S. S ~: range (f :: 'a => 'a set)"
+proof
+ let ?S = "{x. x ~: f x}"
+ show "?S ~: range f"
+ proof
+ assume "?S : range f"
+ thus False
+ proof
+ fix y
+ assume "?S = f y"
+ thus ?thesis
+ proof (rule equalityCE)
+ assume in_S: "y : ?S"
+ assume in_fy: "y : f y"
+ from in_S have notin_fy: "y ~: f y" ..
+ from notin_fy in_fy show ?thesis by contradiction
+ next
+ assume notin_S: "y ~: ?S"
+ assume notin_fy: "y ~: f y"
+ from notin_S have in_fy: "y : f y" ..
+ from notin_fy in_fy show ?thesis by contradiction
+ qed
+ qed
+ qed
+qed
text {*
The following version of the proof essentially does the same
@@ -67,31 +67,31 @@
basic logical structure has to be left intact, though. In
particular, assumptions ``belonging'' to some goal have to be
introduced \emph{before} its corresponding \isacommand{show}.}
-*};
+*}
-theorem "EX S. S ~: range (f :: 'a => 'a set)";
-proof;
- let ?S = "{x. x ~: f x}";
- show "?S ~: range f";
- proof;
- assume "?S : range f";
- thus False;
- proof;
- fix y;
- assume "?S = f y";
- thus ?thesis;
- proof (rule equalityCE);
- assume "y : f y";
- assume "y : ?S"; hence "y ~: f y"; ..;
- thus ?thesis; by contradiction;
- next;
- assume "y ~: f y";
- assume "y ~: ?S"; hence "y : f y"; ..;
- thus ?thesis; by contradiction;
- qed;
- qed;
- qed;
-qed;
+theorem "EX S. S ~: range (f :: 'a => 'a set)"
+proof
+ let ?S = "{x. x ~: f x}"
+ show "?S ~: range f"
+ proof
+ assume "?S : range f"
+ thus False
+ proof
+ fix y
+ assume "?S = f y"
+ thus ?thesis
+ proof (rule equalityCE)
+ assume "y : f y"
+ assume "y : ?S" hence "y ~: f y" ..
+ thus ?thesis by contradiction
+ next
+ assume "y ~: f y"
+ assume "y ~: ?S" hence "y : f y" ..
+ thus ?thesis by contradiction
+ qed
+ qed
+ qed
+qed
text {*
How much creativity is required? As it happens, Isabelle can prove
@@ -100,10 +100,10 @@
through the large search space. The context of Isabelle's classical
prover contains rules for the relevant constructs of HOL's set
theory.
-*};
+*}
-theorem "EX S. S ~: range (f :: 'a => 'a set)";
- by best;
+theorem "EX S. S ~: range (f :: 'a => 'a set)"
+ by best
text {*
While this establishes the same theorem internally, we do not get any
@@ -111,6 +111,6 @@
transform internal system-level representations of Isabelle proofs
back into Isar text. Writing intelligible proof documents
really is a creative process, after all.
-*};
+*}
-end;
+end
--- a/src/HOL/Isar_examples/ExprCompiler.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Isar_examples/ExprCompiler.thy Sun Sep 17 22:19:02 2000 +0200
@@ -5,75 +5,75 @@
Correctness of a simple expression/stack-machine compiler.
*)
-header {* Correctness of a simple expression compiler *};
+header {* Correctness of a simple expression compiler *}
-theory ExprCompiler = Main:;
+theory ExprCompiler = Main:
text {*
This is a (rather trivial) example of program verification. We model
a compiler for translating expressions to stack machine instructions,
and prove its correctness wrt.\ some evaluation semantics.
-*};
+*}
-subsection {* Binary operations *};
+subsection {* Binary operations *}
text {*
Binary operations are just functions over some type of values. This
is both for abstract syntax and semantics, i.e.\ we use a ``shallow
embedding'' here.
-*};
+*}
types
- 'val binop = "'val => 'val => 'val";
+ 'val binop = "'val => 'val => 'val"
-subsection {* Expressions *};
+subsection {* Expressions *}
text {*
The language of expressions is defined as an inductive type,
consisting of variables, constants, and binary operations on
expressions.
-*};
+*}
datatype ('adr, 'val) expr =
Variable 'adr |
Constant 'val |
- Binop "'val binop" "('adr, 'val) expr" "('adr, 'val) expr";
+ Binop "'val binop" "('adr, 'val) expr" "('adr, 'val) expr"
text {*
Evaluation (wrt.\ some environment of variable assignments) is
defined by primitive recursion over the structure of expressions.
-*};
+*}
consts
- eval :: "('adr, 'val) expr => ('adr => 'val) => 'val";
+ eval :: "('adr, 'val) expr => ('adr => 'val) => 'val"
primrec
"eval (Variable x) env = env x"
"eval (Constant c) env = c"
- "eval (Binop f e1 e2) env = f (eval e1 env) (eval e2 env)";
+ "eval (Binop f e1 e2) env = f (eval e1 env) (eval e2 env)"
-subsection {* Machine *};
+subsection {* Machine *}
text {*
Next we model a simple stack machine, with three instructions.
-*};
+*}
datatype ('adr, 'val) instr =
Const 'val |
Load 'adr |
- Apply "'val binop";
+ Apply "'val binop"
text {*
Execution of a list of stack machine instructions is easily defined
as follows.
-*};
+*}
consts
exec :: "(('adr, 'val) instr) list
- => 'val list => ('adr => 'val) => 'val list";
+ => 'val list => ('adr => 'val) => 'val list"
primrec
"exec [] stack env = stack"
@@ -82,62 +82,62 @@
Const c => exec instrs (c # stack) env
| Load x => exec instrs (env x # stack) env
| Apply f => exec instrs (f (hd stack) (hd (tl stack))
- # (tl (tl stack))) env)";
+ # (tl (tl stack))) env)"
constdefs
execute :: "(('adr, 'val) instr) list => ('adr => 'val) => 'val"
- "execute instrs env == hd (exec instrs [] env)";
+ "execute instrs env == hd (exec instrs [] env)"
-subsection {* Compiler *};
+subsection {* Compiler *}
text {*
We are ready to define the compilation function of expressions to
lists of stack machine instructions.
-*};
+*}
consts
- compile :: "('adr, 'val) expr => (('adr, 'val) instr) list";
+ compile :: "('adr, 'val) expr => (('adr, 'val) instr) list"
primrec
"compile (Variable x) = [Load x]"
"compile (Constant c) = [Const c]"
- "compile (Binop f e1 e2) = compile e2 @ compile e1 @ [Apply f]";
+ "compile (Binop f e1 e2) = compile e2 @ compile e1 @ [Apply f]"
text {*
The main result of this development is the correctness theorem for
$\idt{compile}$. We first establish a lemma about $\idt{exec}$ and
list append.
-*};
+*}
lemma exec_append:
"ALL stack. exec (xs @ ys) stack env =
- exec ys (exec xs stack env) env" (is "?P xs");
-proof (induct ?P xs type: list);
- show "?P []"; by simp;
+ exec ys (exec xs stack env) env" (is "?P xs")
+proof (induct ?P xs type: list)
+ show "?P []" by simp
- fix x xs; assume "?P xs";
- show "?P (x # xs)" (is "?Q x");
- proof (induct ?Q x type: instr);
- show "!!val. ?Q (Const val)"; by (simp!);
- show "!!adr. ?Q (Load adr)"; by (simp!);
- show "!!fun. ?Q (Apply fun)"; by (simp!);
- qed;
-qed;
+ fix x xs assume "?P xs"
+ show "?P (x # xs)" (is "?Q x")
+ proof (induct ?Q x type: instr)
+ show "!!val. ?Q (Const val)" by (simp!)
+ show "!!adr. ?Q (Load adr)" by (simp!)
+ show "!!fun. ?Q (Apply fun)" by (simp!)
+ qed
+qed
-theorem correctness: "execute (compile e) env = eval e env";
-proof -;
+theorem correctness: "execute (compile e) env = eval e env"
+proof -
have "ALL stack. exec (compile e) stack env =
- eval e env # stack" (is "?P e");
- proof (induct ?P e type: expr);
- show "!!adr. ?P (Variable adr)"; by simp;
- show "!!val. ?P (Constant val)"; by simp;
- show "!!fun e1 e2. (?P e1 ==> ?P e2 ==> ?P (Binop fun e1 e2))";
- by (simp add: exec_append);
- qed;
- thus ?thesis; by (simp add: execute_def);
-qed;
+ eval e env # stack" (is "?P e")
+ proof (induct ?P e type: expr)
+ show "!!adr. ?P (Variable adr)" by simp
+ show "!!val. ?P (Constant val)" by simp
+ show "!!fun e1 e2. (?P e1 ==> ?P e2 ==> ?P (Binop fun e1 e2))"
+ by (simp add: exec_append)
+ qed
+ thus ?thesis by (simp add: execute_def)
+qed
text {*
@@ -146,103 +146,103 @@
Subsequently, the same reasoning is elaborated in detail --- at most
one recursive function definition is used at a time. Thus we get a
better idea of what is actually going on.
-*};
+*}
lemma exec_append:
"ALL stack. exec (xs @ ys) stack env
- = exec ys (exec xs stack env) env" (is "?P xs");
-proof (induct ?P xs);
- show "?P []" (is "ALL s. ?Q s");
- proof;
- fix s; have "exec ([] @ ys) s env = exec ys s env"; by simp;
- also; have "... = exec ys (exec [] s env) env"; by simp;
- finally; show "?Q s"; .;
- qed;
- fix x xs; assume hyp: "?P xs";
- show "?P (x # xs)";
- proof (induct x);
- fix val;
- show "?P (Const val # xs)" (is "ALL s. ?Q s");
- proof;
- fix s;
+ = exec ys (exec xs stack env) env" (is "?P xs")
+proof (induct ?P xs)
+ show "?P []" (is "ALL s. ?Q s")
+ proof
+ fix s have "exec ([] @ ys) s env = exec ys s env" by simp
+ also have "... = exec ys (exec [] s env) env" by simp
+ finally show "?Q s" .
+ qed
+ fix x xs assume hyp: "?P xs"
+ show "?P (x # xs)"
+ proof (induct x)
+ fix val
+ show "?P (Const val # xs)" (is "ALL s. ?Q s")
+ proof
+ fix s
have "exec ((Const val # xs) @ ys) s env =
- exec (Const val # xs @ ys) s env";
- by simp;
- also; have "... = exec (xs @ ys) (val # s) env"; by simp;
- also; from hyp;
- have "... = exec ys (exec xs (val # s) env) env"; ..;
- also; have "... = exec ys (exec (Const val # xs) s env) env";
- by simp;
- finally; show "?Q s"; .;
- qed;
- next;
- fix adr; from hyp; show "?P (Load adr # xs)"; by simp -- {* same as above *};
- next;
- fix fun;
- show "?P (Apply fun # xs)" (is "ALL s. ?Q s");
- proof;
- fix s;
+ exec (Const val # xs @ ys) s env"
+ by simp
+ also have "... = exec (xs @ ys) (val # s) env" by simp
+ also from hyp
+ have "... = exec ys (exec xs (val # s) env) env" ..
+ also have "... = exec ys (exec (Const val # xs) s env) env"
+ by simp
+ finally show "?Q s" .
+ qed
+ next
+ fix adr from hyp show "?P (Load adr # xs)" by simp -- {* same as above *}
+ next
+ fix fun
+ show "?P (Apply fun # xs)" (is "ALL s. ?Q s")
+ proof
+ fix s
have "exec ((Apply fun # xs) @ ys) s env =
- exec (Apply fun # xs @ ys) s env";
- by simp;
- also; have "... =
- exec (xs @ ys) (fun (hd s) (hd (tl s)) # (tl (tl s))) env";
- by simp;
- also; from hyp; have "... =
- exec ys (exec xs (fun (hd s) (hd (tl s)) # tl (tl s)) env) env";
- ..;
- also; have "... = exec ys (exec (Apply fun # xs) s env) env"; by simp;
- finally; show "?Q s"; .;
- qed;
- qed;
-qed;
+ exec (Apply fun # xs @ ys) s env"
+ by simp
+ also have "... =
+ exec (xs @ ys) (fun (hd s) (hd (tl s)) # (tl (tl s))) env"
+ by simp
+ also from hyp have "... =
+ exec ys (exec xs (fun (hd s) (hd (tl s)) # tl (tl s)) env) env"
+ ..
+ also have "... = exec ys (exec (Apply fun # xs) s env) env" by simp
+ finally show "?Q s" .
+ qed
+ qed
+qed
-theorem correctness: "execute (compile e) env = eval e env";
-proof -;
+theorem correctness: "execute (compile e) env = eval e env"
+proof -
have exec_compile:
"ALL stack. exec (compile e) stack env = eval e env # stack"
- (is "?P e");
- proof (induct e);
- fix adr; show "?P (Variable adr)" (is "ALL s. ?Q s");
- proof;
- fix s;
- have "exec (compile (Variable adr)) s env = exec [Load adr] s env";
- by simp;
- also; have "... = env adr # s"; by simp;
- also; have "env adr = eval (Variable adr) env"; by simp;
- finally; show "?Q s"; .;
- qed;
- next;
- fix val; show "?P (Constant val)"; by simp -- {* same as above *};
- next;
- fix fun e1 e2; assume hyp1: "?P e1" and hyp2: "?P e2";
- show "?P (Binop fun e1 e2)" (is "ALL s. ?Q s");
- proof;
- fix s; have "exec (compile (Binop fun e1 e2)) s env
- = exec (compile e2 @ compile e1 @ [Apply fun]) s env"; by simp;
- also; have "... = exec [Apply fun]
- (exec (compile e1) (exec (compile e2) s env) env) env";
- by (simp only: exec_append);
- also; from hyp2;
- have "exec (compile e2) s env = eval e2 env # s"; ..;
- also; from hyp1;
- have "exec (compile e1) ... env = eval e1 env # ..."; ..;
- also; have "exec [Apply fun] ... env =
- fun (hd ...) (hd (tl ...)) # (tl (tl ...))"; by simp;
- also; have "... = fun (eval e1 env) (eval e2 env) # s"; by simp;
- also; have "fun (eval e1 env) (eval e2 env) =
- eval (Binop fun e1 e2) env";
- by simp;
- finally; show "?Q s"; .;
- qed;
- qed;
+ (is "?P e")
+ proof (induct e)
+ fix adr show "?P (Variable adr)" (is "ALL s. ?Q s")
+ proof
+ fix s
+ have "exec (compile (Variable adr)) s env = exec [Load adr] s env"
+ by simp
+ also have "... = env adr # s" by simp
+ also have "env adr = eval (Variable adr) env" by simp
+ finally show "?Q s" .
+ qed
+ next
+ fix val show "?P (Constant val)" by simp -- {* same as above *}
+ next
+ fix fun e1 e2 assume hyp1: "?P e1" and hyp2: "?P e2"
+ show "?P (Binop fun e1 e2)" (is "ALL s. ?Q s")
+ proof
+ fix s have "exec (compile (Binop fun e1 e2)) s env
+ = exec (compile e2 @ compile e1 @ [Apply fun]) s env" by simp
+ also have "... = exec [Apply fun]
+ (exec (compile e1) (exec (compile e2) s env) env) env"
+ by (simp only: exec_append)
+ also from hyp2
+ have "exec (compile e2) s env = eval e2 env # s" ..
+ also from hyp1
+ have "exec (compile e1) ... env = eval e1 env # ..." ..
+ also have "exec [Apply fun] ... env =
+ fun (hd ...) (hd (tl ...)) # (tl (tl ...))" by simp
+ also have "... = fun (eval e1 env) (eval e2 env) # s" by simp
+ also have "fun (eval e1 env) (eval e2 env) =
+ eval (Binop fun e1 e2) env"
+ by simp
+ finally show "?Q s" .
+ qed
+ qed
- have "execute (compile e) env = hd (exec (compile e) [] env)";
- by (simp add: execute_def);
- also; from exec_compile;
- have "exec (compile e) [] env = [eval e env]"; ..;
- also; have "hd ... = eval e env"; by simp;
- finally; show ?thesis; .;
-qed;
+ have "execute (compile e) env = hd (exec (compile e) [] env)"
+ by (simp add: execute_def)
+ also from exec_compile
+ have "exec (compile e) [] env = [eval e env]" ..
+ also have "hd ... = eval e env" by simp
+ finally show ?thesis .
+qed
-end;
+end
--- a/src/HOL/Isar_examples/Fibonacci.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Isar_examples/Fibonacci.thy Sun Sep 17 22:19:02 2000 +0200
@@ -13,153 +13,153 @@
(Addison-Wesley, 1989)
*)
-header {* Fib and Gcd commute *};
+header {* Fib and Gcd commute *}
-theory Fibonacci = Primes:;
+theory Fibonacci = Primes:
text_raw {*
\footnote{Isar version by Gertrud Bauer. Original tactic script by
Larry Paulson. A few proofs of laws taken from
\cite{Concrete-Math}.}
-*};
+*}
-subsection {* Fibonacci numbers *};
+subsection {* Fibonacci numbers *}
-consts fib :: "nat => nat";
+consts fib :: "nat => nat"
recdef fib less_than
"fib 0 = 0"
"fib 1 = 1"
- "fib (Suc (Suc x)) = fib x + fib (Suc x)";
+ "fib (Suc (Suc x)) = fib x + fib (Suc x)"
-lemma [simp]: "0 < fib (Suc n)";
- by (induct n rule: fib.induct) (simp+);
+lemma [simp]: "0 < fib (Suc n)"
+ by (induct n rule: fib.induct) (simp+)
-text {* Alternative induction rule. *};
+text {* Alternative induction rule. *}
theorem fib_induct:
- "P 0 ==> P 1 ==> (!!n. P (n + 1) ==> P n ==> P (n + 2)) ==> P n";
- by (induct rule: fib.induct, simp+);
+ "P 0 ==> P 1 ==> (!!n. P (n + 1) ==> P n ==> P (n + 2)) ==> P n"
+ by (induct rule: fib.induct, simp+)
-subsection {* Fib and gcd commute *};
+subsection {* Fib and gcd commute *}
-text {* A few laws taken from \cite{Concrete-Math}. *};
+text {* A few laws taken from \cite{Concrete-Math}. *}
lemma fib_add:
"fib (n + k + 1) = fib (k + 1) * fib (n + 1) + fib k * fib n"
(is "?P n")
- -- {* see \cite[page 280]{Concrete-Math} *};
-proof (induct ?P n rule: fib_induct);
- show "?P 0"; by simp;
- show "?P 1"; by simp;
- fix n;
+ -- {* see \cite[page 280]{Concrete-Math} *}
+proof (induct ?P n rule: fib_induct)
+ show "?P 0" by simp
+ show "?P 1" by simp
+ fix n
have "fib (n + 2 + k + 1)
- = fib (n + k + 1) + fib (n + 1 + k + 1)"; by simp;
- also; assume "fib (n + k + 1)
+ = fib (n + k + 1) + fib (n + 1 + k + 1)" by simp
+ also assume "fib (n + k + 1)
= fib (k + 1) * fib (n + 1) + fib k * fib n"
- (is " _ = ?R1");
- also; assume "fib (n + 1 + k + 1)
+ (is " _ = ?R1")
+ also assume "fib (n + 1 + k + 1)
= fib (k + 1) * fib (n + 1 + 1) + fib k * fib (n + 1)"
- (is " _ = ?R2");
- also; have "?R1 + ?R2
- = fib (k + 1) * fib (n + 2 + 1) + fib k * fib (n + 2)";
- by (simp add: add_mult_distrib2);
- finally; show "?P (n + 2)"; .;
-qed;
+ (is " _ = ?R2")
+ also have "?R1 + ?R2
+ = fib (k + 1) * fib (n + 2 + 1) + fib k * fib (n + 2)"
+ by (simp add: add_mult_distrib2)
+ finally show "?P (n + 2)" .
+qed
-lemma gcd_fib_Suc_eq_1: "gcd (fib n, fib (n + 1)) = 1" (is "?P n");
-proof (induct ?P n rule: fib_induct);
- show "?P 0"; by simp;
- show "?P 1"; by simp;
- fix n;
- have "fib (n + 2 + 1) = fib (n + 1) + fib (n + 2)";
- by simp;
- also; have "gcd (fib (n + 2), ...) = gcd (fib (n + 2), fib (n + 1))";
- by (simp only: gcd_add2');
- also; have "... = gcd (fib (n + 1), fib (n + 1 + 1))";
- by (simp add: gcd_commute);
- also; assume "... = 1";
- finally; show "?P (n + 2)"; .;
-qed;
+lemma gcd_fib_Suc_eq_1: "gcd (fib n, fib (n + 1)) = 1" (is "?P n")
+proof (induct ?P n rule: fib_induct)
+ show "?P 0" by simp
+ show "?P 1" by simp
+ fix n
+ have "fib (n + 2 + 1) = fib (n + 1) + fib (n + 2)"
+ by simp
+ also have "gcd (fib (n + 2), ...) = gcd (fib (n + 2), fib (n + 1))"
+ by (simp only: gcd_add2')
+ also have "... = gcd (fib (n + 1), fib (n + 1 + 1))"
+ by (simp add: gcd_commute)
+ also assume "... = 1"
+ finally show "?P (n + 2)" .
+qed
-lemma gcd_mult_add: "0 < n ==> gcd (n * k + m, n) = gcd (m, n)";
-proof -;
- assume "0 < n";
- hence "gcd (n * k + m, n) = gcd (n, m mod n)";
- by (simp add: gcd_non_0 add_commute);
- also; have "... = gcd (m, n)"; by (simp! add: gcd_non_0);
- finally; show ?thesis; .;
-qed;
+lemma gcd_mult_add: "0 < n ==> gcd (n * k + m, n) = gcd (m, n)"
+proof -
+ assume "0 < n"
+ hence "gcd (n * k + m, n) = gcd (n, m mod n)"
+ by (simp add: gcd_non_0 add_commute)
+ also have "... = gcd (m, n)" by (simp! add: gcd_non_0)
+ finally show ?thesis .
+qed
-lemma gcd_fib_add: "gcd (fib m, fib (n + m)) = gcd (fib m, fib n)";
-proof (cases m);
- assume "m = 0";
- thus ?thesis; by simp;
-next;
- fix k; assume "m = Suc k";
- hence "gcd (fib m, fib (n + m)) = gcd (fib (n + k + 1), fib (k + 1))";
- by (simp add: gcd_commute);
- also; have "fib (n + k + 1)
- = fib (k + 1) * fib (n + 1) + fib k * fib n";
- by (rule fib_add);
- also; have "gcd (..., fib (k + 1)) = gcd (fib k * fib n, fib (k + 1))";
- by (simp add: gcd_mult_add);
- also; have "... = gcd (fib n, fib (k + 1))";
- by (simp only: gcd_fib_Suc_eq_1 gcd_mult_cancel);
- also; have "... = gcd (fib m, fib n)";
- by (simp! add: gcd_commute);
- finally; show ?thesis; .;
-qed;
+lemma gcd_fib_add: "gcd (fib m, fib (n + m)) = gcd (fib m, fib n)"
+proof (cases m)
+ assume "m = 0"
+ thus ?thesis by simp
+next
+ fix k assume "m = Suc k"
+ hence "gcd (fib m, fib (n + m)) = gcd (fib (n + k + 1), fib (k + 1))"
+ by (simp add: gcd_commute)
+ also have "fib (n + k + 1)
+ = fib (k + 1) * fib (n + 1) + fib k * fib n"
+ by (rule fib_add)
+ also have "gcd (..., fib (k + 1)) = gcd (fib k * fib n, fib (k + 1))"
+ by (simp add: gcd_mult_add)
+ also have "... = gcd (fib n, fib (k + 1))"
+ by (simp only: gcd_fib_Suc_eq_1 gcd_mult_cancel)
+ also have "... = gcd (fib m, fib n)"
+ by (simp! add: gcd_commute)
+ finally show ?thesis .
+qed
lemma gcd_fib_diff:
- "m <= n ==> gcd (fib m, fib (n - m)) = gcd (fib m, fib n)";
-proof -;
- assume "m <= n";
- have "gcd (fib m, fib (n - m)) = gcd (fib m, fib (n - m + m))";
- by (simp add: gcd_fib_add);
- also; have "n - m + m = n"; by (simp!);
- finally; show ?thesis; .;
-qed;
+ "m <= n ==> gcd (fib m, fib (n - m)) = gcd (fib m, fib n)"
+proof -
+ assume "m <= n"
+ have "gcd (fib m, fib (n - m)) = gcd (fib m, fib (n - m + m))"
+ by (simp add: gcd_fib_add)
+ also have "n - m + m = n" by (simp!)
+ finally show ?thesis .
+qed
lemma gcd_fib_mod:
- "0 < m ==> gcd (fib m, fib (n mod m)) = gcd (fib m, fib n)";
-proof (induct n rule: nat_less_induct);
- fix n;
+ "0 < m ==> gcd (fib m, fib (n mod m)) = gcd (fib m, fib n)"
+proof (induct n rule: nat_less_induct)
+ fix n
assume m: "0 < m"
and hyp: "ALL ma. ma < n
- --> gcd (fib m, fib (ma mod m)) = gcd (fib m, fib ma)";
- have "n mod m = (if n < m then n else (n - m) mod m)";
- by (rule mod_if);
- also; have "gcd (fib m, fib ...) = gcd (fib m, fib n)";
- proof cases;
- assume "n < m"; thus ?thesis; by simp;
- next;
- assume not_lt: "~ n < m"; hence le: "m <= n"; by simp;
- have "n - m < n"; by (simp! add: diff_less);
- with hyp; have "gcd (fib m, fib ((n - m) mod m))
- = gcd (fib m, fib (n - m))"; by simp;
- also; from le; have "... = gcd (fib m, fib n)";
- by (rule gcd_fib_diff);
- finally; have "gcd (fib m, fib ((n - m) mod m)) =
- gcd (fib m, fib n)"; .;
- with not_lt; show ?thesis; by simp;
- qed;
- finally; show "gcd (fib m, fib (n mod m)) = gcd (fib m, fib n)"; .;
-qed;
+ --> gcd (fib m, fib (ma mod m)) = gcd (fib m, fib ma)"
+ have "n mod m = (if n < m then n else (n - m) mod m)"
+ by (rule mod_if)
+ also have "gcd (fib m, fib ...) = gcd (fib m, fib n)"
+ proof cases
+ assume "n < m" thus ?thesis by simp
+ next
+ assume not_lt: "~ n < m" hence le: "m <= n" by simp
+ have "n - m < n" by (simp! add: diff_less)
+ with hyp have "gcd (fib m, fib ((n - m) mod m))
+ = gcd (fib m, fib (n - m))" by simp
+ also from le have "... = gcd (fib m, fib n)"
+ by (rule gcd_fib_diff)
+ finally have "gcd (fib m, fib ((n - m) mod m)) =
+ gcd (fib m, fib n)" .
+ with not_lt show ?thesis by simp
+ qed
+ finally show "gcd (fib m, fib (n mod m)) = gcd (fib m, fib n)" .
+qed
-theorem fib_gcd: "fib (gcd (m, n)) = gcd (fib m, fib n)" (is "?P m n");
-proof (induct ?P m n rule: gcd_induct);
- fix m; show "fib (gcd (m, 0)) = gcd (fib m, fib 0)"; by simp;
- fix n :: nat; assume n: "0 < n";
- hence "gcd (m, n) = gcd (n, m mod n)"; by (rule gcd_non_0);
- also; assume hyp: "fib ... = gcd (fib n, fib (m mod n))";
- also; from n; have "... = gcd (fib n, fib m)"; by (rule gcd_fib_mod);
- also; have "... = gcd (fib m, fib n)"; by (rule gcd_commute);
- finally; show "fib (gcd (m, n)) = gcd (fib m, fib n)"; .;
-qed;
+theorem fib_gcd: "fib (gcd (m, n)) = gcd (fib m, fib n)" (is "?P m n")
+proof (induct ?P m n rule: gcd_induct)
+ fix m show "fib (gcd (m, 0)) = gcd (fib m, fib 0)" by simp
+ fix n :: nat assume n: "0 < n"
+ hence "gcd (m, n) = gcd (n, m mod n)" by (rule gcd_non_0)
+ also assume hyp: "fib ... = gcd (fib n, fib (m mod n))"
+ also from n have "... = gcd (fib n, fib m)" by (rule gcd_fib_mod)
+ also have "... = gcd (fib m, fib n)" by (rule gcd_commute)
+ finally show "fib (gcd (m, n)) = gcd (fib m, fib n)" .
+qed
-end;
+end
--- a/src/HOL/Isar_examples/Group.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Isar_examples/Group.thy Sun Sep 17 22:19:02 2000 +0200
@@ -3,73 +3,73 @@
Author: Markus Wenzel, TU Muenchen
*)
-header {* Basic group theory *};
+header {* Basic group theory *}
-theory Group = Main:;
+theory Group = Main:
-subsection {* Groups and calculational reasoning *};
+subsection {* Groups and calculational reasoning *}
text {*
Groups over signature $({\times} :: \alpha \To \alpha \To \alpha,
\idt{one} :: \alpha, \idt{inv} :: \alpha \To \alpha)$ are defined as
an axiomatic type class as follows. Note that the parent class
$\idt{times}$ is provided by the basic HOL theory.
-*};
+*}
consts
one :: "'a"
- inv :: "'a => 'a";
+ inv :: "'a => 'a"
axclass
group < times
group_assoc: "(x * y) * z = x * (y * z)"
group_left_unit: "one * x = x"
- group_left_inverse: "inv x * x = one";
+ group_left_inverse: "inv x * x = one"
text {*
The group axioms only state the properties of left unit and inverse,
the right versions may be derived as follows.
-*};
+*}
-theorem group_right_inverse: "x * inv x = (one::'a::group)";
-proof -;
- have "x * inv x = one * (x * inv x)";
- by (simp only: group_left_unit);
- also; have "... = one * x * inv x";
- by (simp only: group_assoc);
- also; have "... = inv (inv x) * inv x * x * inv x";
- by (simp only: group_left_inverse);
- also; have "... = inv (inv x) * (inv x * x) * inv x";
- by (simp only: group_assoc);
- also; have "... = inv (inv x) * one * inv x";
- by (simp only: group_left_inverse);
- also; have "... = inv (inv x) * (one * inv x)";
- by (simp only: group_assoc);
- also; have "... = inv (inv x) * inv x";
- by (simp only: group_left_unit);
- also; have "... = one";
- by (simp only: group_left_inverse);
- finally; show ?thesis; .;
-qed;
+theorem group_right_inverse: "x * inv x = (one::'a::group)"
+proof -
+ have "x * inv x = one * (x * inv x)"
+ by (simp only: group_left_unit)
+ also have "... = one * x * inv x"
+ by (simp only: group_assoc)
+ also have "... = inv (inv x) * inv x * x * inv x"
+ by (simp only: group_left_inverse)
+ also have "... = inv (inv x) * (inv x * x) * inv x"
+ by (simp only: group_assoc)
+ also have "... = inv (inv x) * one * inv x"
+ by (simp only: group_left_inverse)
+ also have "... = inv (inv x) * (one * inv x)"
+ by (simp only: group_assoc)
+ also have "... = inv (inv x) * inv x"
+ by (simp only: group_left_unit)
+ also have "... = one"
+ by (simp only: group_left_inverse)
+ finally show ?thesis .
+qed
text {*
With \name{group-right-inverse} already available,
\name{group-right-unit}\label{thm:group-right-unit} is now
established much easier.
-*};
+*}
-theorem group_right_unit: "x * one = (x::'a::group)";
-proof -;
- have "x * one = x * (inv x * x)";
- by (simp only: group_left_inverse);
- also; have "... = x * inv x * x";
- by (simp only: group_assoc);
- also; have "... = one * x";
- by (simp only: group_right_inverse);
- also; have "... = x";
- by (simp only: group_left_unit);
- finally; show ?thesis; .;
-qed;
+theorem group_right_unit: "x * one = (x::'a::group)"
+proof -
+ have "x * one = x * (inv x * x)"
+ by (simp only: group_left_inverse)
+ also have "... = x * inv x * x"
+ by (simp only: group_assoc)
+ also have "... = one * x"
+ by (simp only: group_right_inverse)
+ also have "... = x"
+ by (simp only: group_left_unit)
+ finally show ?thesis .
+qed
text {*
\medskip The calculational proof style above follows typical
@@ -93,38 +93,38 @@
Expanding the \isakeyword{also} and \isakeyword{finally} derived
language elements, calculations may be simulated by hand as
demonstrated below.
-*};
+*}
-theorem "x * one = (x::'a::group)";
-proof -;
- have "x * one = x * (inv x * x)";
- by (simp only: group_left_inverse);
+theorem "x * one = (x::'a::group)"
+proof -
+ have "x * one = x * (inv x * x)"
+ by (simp only: group_left_inverse)
note calculation = this
- -- {* first calculational step: init calculation register *};
+ -- {* first calculational step: init calculation register *}
- have "... = x * inv x * x";
- by (simp only: group_assoc);
+ have "... = x * inv x * x"
+ by (simp only: group_assoc)
note calculation = trans [OF calculation this]
- -- {* general calculational step: compose with transitivity rule *};
+ -- {* general calculational step: compose with transitivity rule *}
- have "... = one * x";
- by (simp only: group_right_inverse);
+ have "... = one * x"
+ by (simp only: group_right_inverse)
note calculation = trans [OF calculation this]
- -- {* general calculational step: compose with transitivity rule *};
+ -- {* general calculational step: compose with transitivity rule *}
- have "... = x";
- by (simp only: group_left_unit);
+ have "... = x"
+ by (simp only: group_left_unit)
note calculation = trans [OF calculation this]
- -- {* final calculational step: compose with transitivity rule ... *};
+ -- {* final calculational step: compose with transitivity rule ... *}
from calculation
- -- {* ... and pick up the final result *};
+ -- {* ... and pick up the final result *}
- show ?thesis; .;
-qed;
+ show ?thesis .
+qed
text {*
Note that this scheme of calculations is not restricted to plain
@@ -133,20 +133,20 @@
\isacommand{also} and \isacommand{finally}, Isabelle/Isar maintains
separate context information of ``transitivity'' rules. Rule
selection takes place automatically by higher-order unification.
-*};
+*}
-subsection {* Groups as monoids *};
+subsection {* Groups as monoids *}
text {*
Monoids over signature $({\times} :: \alpha \To \alpha \To \alpha,
\idt{one} :: \alpha)$ are defined like this.
-*};
+*}
axclass monoid < times
monoid_assoc: "(x * y) * z = x * (y * z)"
monoid_left_unit: "one * x = x"
- monoid_right_unit: "x * one = x";
+ monoid_right_unit: "x * one = x"
text {*
Groups are \emph{not} yet monoids directly from the definition. For
@@ -156,13 +156,13 @@
theorem of group theory (see page~\pageref{thm:group-right-unit}), we
may still instantiate $\idt{group} \subset \idt{monoid}$ properly as
follows.
-*};
+*}
-instance group < monoid;
+instance group < monoid
by (intro_classes,
rule group_assoc,
rule group_left_unit,
- rule group_right_unit);
+ rule group_right_unit)
text {*
The \isacommand{instance} command actually is a version of
@@ -171,6 +171,6 @@
language element may be involved to establish this statement. When
concluding the proof, the result is transformed into the intended
type signature extension behind the scenes.
-*};
+*}
-end;
+end
--- a/src/HOL/Isar_examples/KnasterTarski.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Isar_examples/KnasterTarski.thy Sun Sep 17 22:19:02 2000 +0200
@@ -5,12 +5,12 @@
Typical textbook proof example.
*)
-header {* Textbook-style reasoning: the Knaster-Tarski Theorem *};
+header {* Textbook-style reasoning: the Knaster-Tarski Theorem *}
-theory KnasterTarski = Main:;
+theory KnasterTarski = Main:
-subsection {* Prose version *};
+subsection {* Prose version *}
text {*
According to the textbook \cite[pages 93--94]{davey-priestley}, the
@@ -28,10 +28,10 @@
complete the proof that $a$ is a fixpoint. Since $f$ is
order-preserving, $f(f(a)) \le f(a)$. This says $f(a) \in H$, so $a
\le f(a)$.
-*};
+*}
-subsection {* Formal versions *};
+subsection {* Formal versions *}
text {*
The Isar proof below closely follows the original presentation.
@@ -39,34 +39,34 @@
formal Isar language elements. Just as many textbook-style proofs,
there is a strong bias towards forward proof, and several bends
in the course of reasoning.
-*};
+*}
-theorem KnasterTarski: "mono f ==> EX a::'a set. f a = a";
-proof;
- let ?H = "{u. f u <= u}";
- let ?a = "Inter ?H";
+theorem KnasterTarski: "mono f ==> EX a::'a set. f a = a"
+proof
+ let ?H = "{u. f u <= u}"
+ let ?a = "Inter ?H"
- assume mono: "mono f";
- show "f ?a = ?a";
- proof -;
- {;
- fix x;
- assume H: "x : ?H";
- hence "?a <= x"; by (rule Inter_lower);
- with mono; have "f ?a <= f x"; ..;
- also; from H; have "... <= x"; ..;
- finally; have "f ?a <= x"; .;
- };
- hence ge: "f ?a <= ?a"; by (rule Inter_greatest);
- {;
- also; presume "... <= f ?a";
- finally (order_antisym); show ?thesis; .;
- };
- from mono ge; have "f (f ?a) <= f ?a"; ..;
- hence "f ?a : ?H"; ..;
- thus "?a <= f ?a"; by (rule Inter_lower);
- qed;
-qed;
+ assume mono: "mono f"
+ show "f ?a = ?a"
+ proof -
+ {
+ fix x
+ assume H: "x : ?H"
+ hence "?a <= x" by (rule Inter_lower)
+ with mono have "f ?a <= f x" ..
+ also from H have "... <= x" ..
+ finally have "f ?a <= x" .
+ }
+ hence ge: "f ?a <= ?a" by (rule Inter_greatest)
+ {
+ also presume "... <= f ?a"
+ finally (order_antisym) show ?thesis .
+ }
+ from mono ge have "f (f ?a) <= f ?a" ..
+ hence "f ?a : ?H" ..
+ thus "?a <= f ?a" by (rule Inter_lower)
+ qed
+qed
text {*
Above we have used several advanced Isar language elements, such as
@@ -78,31 +78,31 @@
level, while only the inner steps of reasoning are done in a forward
manner. We are certainly more at ease here, requiring only the most
basic features of the Isar language.
-*};
+*}
-theorem KnasterTarski': "mono f ==> EX a::'a set. f a = a";
-proof;
- let ?H = "{u. f u <= u}";
- let ?a = "Inter ?H";
+theorem KnasterTarski': "mono f ==> EX a::'a set. f a = a"
+proof
+ let ?H = "{u. f u <= u}"
+ let ?a = "Inter ?H"
- assume mono: "mono f";
- show "f ?a = ?a";
- proof (rule order_antisym);
- show ge: "f ?a <= ?a";
- proof (rule Inter_greatest);
- fix x;
- assume H: "x : ?H";
- hence "?a <= x"; by (rule Inter_lower);
- with mono; have "f ?a <= f x"; ..;
- also; from H; have "... <= x"; ..;
- finally; show "f ?a <= x"; .;
- qed;
- show "?a <= f ?a";
- proof (rule Inter_lower);
- from mono ge; have "f (f ?a) <= f ?a"; ..;
- thus "f ?a : ?H"; ..;
- qed;
- qed;
-qed;
+ assume mono: "mono f"
+ show "f ?a = ?a"
+ proof (rule order_antisym)
+ show ge: "f ?a <= ?a"
+ proof (rule Inter_greatest)
+ fix x
+ assume H: "x : ?H"
+ hence "?a <= x" by (rule Inter_lower)
+ with mono have "f ?a <= f x" ..
+ also from H have "... <= x" ..
+ finally show "f ?a <= x" .
+ qed
+ show "?a <= f ?a"
+ proof (rule Inter_lower)
+ from mono ge have "f (f ?a) <= f ?a" ..
+ thus "f ?a : ?H" ..
+ qed
+ qed
+qed
-end;
+end
--- a/src/HOL/Isar_examples/MultisetOrder.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Isar_examples/MultisetOrder.thy Sun Sep 17 22:19:02 2000 +0200
@@ -5,146 +5,146 @@
Wellfoundedness proof for the multiset order.
*)
-header {* Wellfoundedness of multiset ordering *};
+header {* Wellfoundedness of multiset ordering *}
-theory MultisetOrder = Multiset:;
+theory MultisetOrder = Multiset:
text_raw {*
\footnote{Original tactic script by Tobias Nipkow (see
\url{http://isabelle.in.tum.de/library/HOL/Induct/Multiset.html}),
based on a pen-and-paper proof due to Wilfried Buchholz.}\isanewline
-*};
+*}
(* FIXME move? *)
-theorems [induct type: multiset] = multiset_induct;
-theorems [induct set: wf] = wf_induct;
-theorems [induct set: acc] = acc_induct;
+theorems [induct type: multiset] = multiset_induct
+theorems [induct set: wf] = wf_induct
+theorems [induct set: acc] = acc_induct
-subsection {* A technical lemma *};
+subsection {* A technical lemma *}
lemma less_add: "(N, M0 + {#a#}) : mult1 r ==>
(EX M. (M, M0) : mult1 r & N = M + {#a#}) |
(EX K. (ALL b. b :# K --> (b, a) : r) & N = M0 + K)"
- (concl is "?case1 (mult1 r) | ?case2");
-proof (unfold mult1_def);
- let ?r = "\<lambda>K a. ALL b. b :# K --> (b, a) : r";
- let ?R = "\<lambda>N M. EX a M0 K. M = M0 + {#a#} & N = M0 + K & ?r K a";
- let ?case1 = "?case1 {(N, M). ?R N M}";
+ (concl is "?case1 (mult1 r) | ?case2")
+proof (unfold mult1_def)
+ let ?r = "\<lambda>K a. ALL b. b :# K --> (b, a) : r"
+ let ?R = "\<lambda>N M. EX a M0 K. M = M0 + {#a#} & N = M0 + K & ?r K a"
+ let ?case1 = "?case1 {(N, M). ?R N M}"
- assume "(N, M0 + {#a#}) : {(N, M). ?R N M}";
+ assume "(N, M0 + {#a#}) : {(N, M). ?R N M}"
hence "EX a' M0' K.
- M0 + {#a#} = M0' + {#a'#} & N = M0' + K & ?r K a'"; by simp;
- thus "?case1 | ?case2";
- proof (elim exE conjE);
- fix a' M0' K;
- assume N: "N = M0' + K" and r: "?r K a'";
- assume "M0 + {#a#} = M0' + {#a'#}";
+ M0 + {#a#} = M0' + {#a'#} & N = M0' + K & ?r K a'" by simp
+ thus "?case1 | ?case2"
+ proof (elim exE conjE)
+ fix a' M0' K
+ assume N: "N = M0' + K" and r: "?r K a'"
+ assume "M0 + {#a#} = M0' + {#a'#}"
hence "M0 = M0' & a = a' |
- (EX K'. M0 = K' + {#a'#} & M0' = K' + {#a#})";
- by (simp only: add_eq_conv_ex);
- thus ?thesis;
- proof (elim disjE conjE exE);
- assume "M0 = M0'" "a = a'";
- with N r; have "?r K a & N = M0 + K"; by simp;
- hence ?case2; ..; thus ?thesis; ..;
- next;
- fix K';
- assume "M0' = K' + {#a#}";
- with N; have n: "N = K' + K + {#a#}"; by (simp add: union_ac);
+ (EX K'. M0 = K' + {#a'#} & M0' = K' + {#a#})"
+ by (simp only: add_eq_conv_ex)
+ thus ?thesis
+ proof (elim disjE conjE exE)
+ assume "M0 = M0'" "a = a'"
+ with N r have "?r K a & N = M0 + K" by simp
+ hence ?case2 .. thus ?thesis ..
+ next
+ fix K'
+ assume "M0' = K' + {#a#}"
+ with N have n: "N = K' + K + {#a#}" by (simp add: union_ac)
- assume "M0 = K' + {#a'#}";
- with r; have "?R (K' + K) M0"; by blast;
- with n; have ?case1; by simp; thus ?thesis; ..;
- qed;
- qed;
-qed;
+ assume "M0 = K' + {#a'#}"
+ with r have "?R (K' + K) M0" by blast
+ with n have ?case1 by simp thus ?thesis ..
+ qed
+ qed
+qed
-subsection {* The key property *};
+subsection {* The key property *}
-lemma all_accessible: "wf r ==> ALL M. M : acc (mult1 r)";
-proof;
- let ?R = "mult1 r";
- let ?W = "acc ?R";
- {;
- fix M M0 a;
+lemma all_accessible: "wf r ==> ALL M. M : acc (mult1 r)"
+proof
+ let ?R = "mult1 r"
+ let ?W = "acc ?R"
+ {
+ fix M M0 a
assume M0: "M0 : ?W"
and wf_hyp: "ALL b. (b, a) : r --> (ALL M:?W. M + {#b#} : ?W)"
- and acc_hyp: "ALL M. (M, M0) : ?R --> M + {#a#} : ?W";
- have "M0 + {#a#} : ?W";
- proof (rule accI [of "M0 + {#a#}"]);
- fix N;
- assume "(N, M0 + {#a#}) : ?R";
+ and acc_hyp: "ALL M. (M, M0) : ?R --> M + {#a#} : ?W"
+ have "M0 + {#a#} : ?W"
+ proof (rule accI [of "M0 + {#a#}"])
+ fix N
+ assume "(N, M0 + {#a#}) : ?R"
hence "((EX M. (M, M0) : ?R & N = M + {#a#}) |
- (EX K. (ALL b. b :# K --> (b, a) : r) & N = M0 + K))";
- by (rule less_add);
- thus "N : ?W";
- proof (elim exE disjE conjE);
- fix M; assume "(M, M0) : ?R" and N: "N = M + {#a#}";
- from acc_hyp; have "(M, M0) : ?R --> M + {#a#} : ?W"; ..;
- hence "M + {#a#} : ?W"; ..;
- thus "N : ?W"; by (simp only: N);
- next;
- fix K;
- assume N: "N = M0 + K";
- assume "ALL b. b :# K --> (b, a) : r";
- have "?this --> M0 + K : ?W" (is "?P K");
- proof (induct K);
- from M0; have "M0 + {#} : ?W"; by simp;
- thus "?P {#}"; ..;
+ (EX K. (ALL b. b :# K --> (b, a) : r) & N = M0 + K))"
+ by (rule less_add)
+ thus "N : ?W"
+ proof (elim exE disjE conjE)
+ fix M assume "(M, M0) : ?R" and N: "N = M + {#a#}"
+ from acc_hyp have "(M, M0) : ?R --> M + {#a#} : ?W" ..
+ hence "M + {#a#} : ?W" ..
+ thus "N : ?W" by (simp only: N)
+ next
+ fix K
+ assume N: "N = M0 + K"
+ assume "ALL b. b :# K --> (b, a) : r"
+ have "?this --> M0 + K : ?W" (is "?P K")
+ proof (induct K)
+ from M0 have "M0 + {#} : ?W" by simp
+ thus "?P {#}" ..
- fix K x; assume hyp: "?P K";
- show "?P (K + {#x#})";
- proof;
- assume a: "ALL b. b :# (K + {#x#}) --> (b, a) : r";
- hence "(x, a) : r"; by simp;
- with wf_hyp; have b: "ALL M:?W. M + {#x#} : ?W"; by blast;
+ fix K x assume hyp: "?P K"
+ show "?P (K + {#x#})"
+ proof
+ assume a: "ALL b. b :# (K + {#x#}) --> (b, a) : r"
+ hence "(x, a) : r" by simp
+ with wf_hyp have b: "ALL M:?W. M + {#x#} : ?W" by blast
- from a hyp; have "M0 + K : ?W"; by simp;
- with b; have "(M0 + K) + {#x#} : ?W"; ..;
- thus "M0 + (K + {#x#}) : ?W"; by (simp only: union_assoc);
- qed;
- qed;
- hence "M0 + K : ?W"; ..;
- thus "N : ?W"; by (simp only: N);
- qed;
- qed;
- }; note tedious_reasoning = this;
+ from a hyp have "M0 + K : ?W" by simp
+ with b have "(M0 + K) + {#x#} : ?W" ..
+ thus "M0 + (K + {#x#}) : ?W" by (simp only: union_assoc)
+ qed
+ qed
+ hence "M0 + K : ?W" ..
+ thus "N : ?W" by (simp only: N)
+ qed
+ qed
+ } note tedious_reasoning = this
- assume wf: "wf r";
- fix M;
- show "M : ?W";
- proof (induct M);
- show "{#} : ?W";
- proof (rule accI);
- fix b; assume "(b, {#}) : ?R";
- with not_less_empty; show "b : ?W"; by contradiction;
- qed;
+ assume wf: "wf r"
+ fix M
+ show "M : ?W"
+ proof (induct M)
+ show "{#} : ?W"
+ proof (rule accI)
+ fix b assume "(b, {#}) : ?R"
+ with not_less_empty show "b : ?W" by contradiction
+ qed
- fix M a; assume "M : ?W";
- from wf; have "ALL M:?W. M + {#a#} : ?W";
- proof induct;
- fix a;
- assume "ALL b. (b, a) : r --> (ALL M:?W. M + {#b#} : ?W)";
- show "ALL M:?W. M + {#a#} : ?W";
- proof;
- fix M; assume "M : ?W";
- thus "M + {#a#} : ?W";
- by (rule acc_induct) (rule tedious_reasoning);
- qed;
- qed;
- thus "M + {#a#} : ?W"; ..;
- qed;
-qed;
+ fix M a assume "M : ?W"
+ from wf have "ALL M:?W. M + {#a#} : ?W"
+ proof induct
+ fix a
+ assume "ALL b. (b, a) : r --> (ALL M:?W. M + {#b#} : ?W)"
+ show "ALL M:?W. M + {#a#} : ?W"
+ proof
+ fix M assume "M : ?W"
+ thus "M + {#a#} : ?W"
+ by (rule acc_induct) (rule tedious_reasoning)
+ qed
+ qed
+ thus "M + {#a#} : ?W" ..
+ qed
+qed
-subsection {* Main result *};
+subsection {* Main result *}
-theorem wf_mult1: "wf r ==> wf (mult1 r)";
- by (rule acc_wfI, rule all_accessible);
+theorem wf_mult1: "wf r ==> wf (mult1 r)"
+ by (rule acc_wfI, rule all_accessible)
-theorem wf_mult: "wf r ==> wf (mult r)";
- by (unfold mult_def, rule wf_trancl, rule wf_mult1);
+theorem wf_mult: "wf r ==> wf (mult r)"
+ by (unfold mult_def, rule wf_trancl, rule wf_mult1)
-end;
+end
--- a/src/HOL/Isar_examples/MutilatedCheckerboard.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Isar_examples/MutilatedCheckerboard.thy Sun Sep 17 22:19:02 2000 +0200
@@ -4,296 +4,296 @@
Lawrence C Paulson, Cambridge University Computer Laboratory (original scripts)
*)
-header {* The Mutilated Checker Board Problem *};
+header {* The Mutilated Checker Board Problem *}
-theory MutilatedCheckerboard = Main:;
+theory MutilatedCheckerboard = Main:
text {*
The Mutilated Checker Board Problem, formalized inductively. See
\cite{paulson-mutilated-board} and
\url{http://isabelle.in.tum.de/library/HOL/Induct/Mutil.html} for the
original tactic script version.
-*};
+*}
-subsection {* Tilings *};
+subsection {* Tilings *}
consts
- tiling :: "'a set set => 'a set set";
+ tiling :: "'a set set => 'a set set"
inductive "tiling A"
intros
empty: "{} : tiling A"
Un: "a : A ==> t : tiling A ==> a <= - t
- ==> a Un t : tiling A";
+ ==> a Un t : tiling A"
-text "The union of two disjoint tilings is a tiling.";
+text "The union of two disjoint tilings is a tiling."
lemma tiling_Un:
"t : tiling A --> u : tiling A --> t Int u = {}
- --> t Un u : tiling A";
-proof;
- assume "t : tiling A" (is "_ : ?T");
- thus "u : ?T --> t Int u = {} --> t Un u : ?T" (is "?P t");
- proof (induct (stripped) t);
+ --> t Un u : tiling A"
+proof
+ assume "t : tiling A" (is "_ : ?T")
+ thus "u : ?T --> t Int u = {} --> t Un u : ?T" (is "?P t")
+ proof (induct (stripped) t)
assume "u : ?T" "{} Int u = {}"
- thus "{} Un u : ?T" by simp;
+ thus "{} Un u : ?T" by simp
next
- fix a t;
- assume "a : A" "t : ?T" "?P t" "a <= - t";
- assume "u : ?T" "(a Un t) Int u = {}";
- have hyp: "t Un u: ?T"; by (blast!);
- have "a <= - (t Un u)"; by (blast!);
- with _ hyp; have "a Un (t Un u) : ?T"; by (rule tiling.Un);
- also; have "a Un (t Un u) = (a Un t) Un u";
- by (simp only: Un_assoc);
- finally; show "... : ?T"; .;
- qed;
-qed;
+ fix a t
+ assume "a : A" "t : ?T" "?P t" "a <= - t"
+ assume "u : ?T" "(a Un t) Int u = {}"
+ have hyp: "t Un u: ?T" by (blast!)
+ have "a <= - (t Un u)" by (blast!)
+ with _ hyp have "a Un (t Un u) : ?T" by (rule tiling.Un)
+ also have "a Un (t Un u) = (a Un t) Un u"
+ by (simp only: Un_assoc)
+ finally show "... : ?T" .
+ qed
+qed
-subsection {* Basic properties of ``below'' *};
+subsection {* Basic properties of ``below'' *}
constdefs
below :: "nat => nat set"
- "below n == {i. i < n}";
+ "below n == {i. i < n}"
-lemma below_less_iff [iff]: "(i: below k) = (i < k)";
- by (simp add: below_def);
+lemma below_less_iff [iff]: "(i: below k) = (i < k)"
+ by (simp add: below_def)
-lemma below_0: "below 0 = {}";
- by (simp add: below_def);
+lemma below_0: "below 0 = {}"
+ by (simp add: below_def)
lemma Sigma_Suc1:
- "m = n + 1 ==> below m <*> B = ({n} <*> B) Un (below n <*> B)";
- by (simp add: below_def less_Suc_eq) blast;
+ "m = n + 1 ==> below m <*> B = ({n} <*> B) Un (below n <*> B)"
+ by (simp add: below_def less_Suc_eq) blast
lemma Sigma_Suc2:
"m = n + 2 ==> A <*> below m =
- (A <*> {n}) Un (A <*> {n + 1}) Un (A <*> below n)";
- by (auto simp add: below_def) arith;
+ (A <*> {n}) Un (A <*> {n + 1}) Un (A <*> below n)"
+ by (auto simp add: below_def) arith
-lemmas Sigma_Suc = Sigma_Suc1 Sigma_Suc2;
+lemmas Sigma_Suc = Sigma_Suc1 Sigma_Suc2
-subsection {* Basic properties of ``evnodd'' *};
+subsection {* Basic properties of ``evnodd'' *}
constdefs
evnodd :: "(nat * nat) set => nat => (nat * nat) set"
- "evnodd A b == A Int {(i, j). (i + j) mod #2 = b}";
+ "evnodd A b == A Int {(i, j). (i + j) mod #2 = b}"
lemma evnodd_iff:
- "(i, j): evnodd A b = ((i, j): A & (i + j) mod #2 = b)";
- by (simp add: evnodd_def);
+ "(i, j): evnodd A b = ((i, j): A & (i + j) mod #2 = b)"
+ by (simp add: evnodd_def)
-lemma evnodd_subset: "evnodd A b <= A";
- by (unfold evnodd_def, rule Int_lower1);
+lemma evnodd_subset: "evnodd A b <= A"
+ by (unfold evnodd_def, rule Int_lower1)
-lemma evnoddD: "x : evnodd A b ==> x : A";
- by (rule subsetD, rule evnodd_subset);
+lemma evnoddD: "x : evnodd A b ==> x : A"
+ by (rule subsetD, rule evnodd_subset)
-lemma evnodd_finite: "finite A ==> finite (evnodd A b)";
- by (rule finite_subset, rule evnodd_subset);
+lemma evnodd_finite: "finite A ==> finite (evnodd A b)"
+ by (rule finite_subset, rule evnodd_subset)
-lemma evnodd_Un: "evnodd (A Un B) b = evnodd A b Un evnodd B b";
- by (unfold evnodd_def) blast;
+lemma evnodd_Un: "evnodd (A Un B) b = evnodd A b Un evnodd B b"
+ by (unfold evnodd_def) blast
-lemma evnodd_Diff: "evnodd (A - B) b = evnodd A b - evnodd B b";
- by (unfold evnodd_def) blast;
+lemma evnodd_Diff: "evnodd (A - B) b = evnodd A b - evnodd B b"
+ by (unfold evnodd_def) blast
-lemma evnodd_empty: "evnodd {} b = {}";
- by (simp add: evnodd_def);
+lemma evnodd_empty: "evnodd {} b = {}"
+ by (simp add: evnodd_def)
lemma evnodd_insert: "evnodd (insert (i, j) C) b =
(if (i + j) mod #2 = b
- then insert (i, j) (evnodd C b) else evnodd C b)";
- by (simp add: evnodd_def) blast;
+ then insert (i, j) (evnodd C b) else evnodd C b)"
+ by (simp add: evnodd_def) blast
-subsection {* Dominoes *};
+subsection {* Dominoes *}
consts
- domino :: "(nat * nat) set set";
+ domino :: "(nat * nat) set set"
inductive domino
intros
horiz: "{(i, j), (i, j + 1)} : domino"
- vertl: "{(i, j), (i + 1, j)} : domino";
+ vertl: "{(i, j), (i + 1, j)} : domino"
lemma dominoes_tile_row:
"{i} <*> below (2 * n) : tiling domino"
- (is "?P n" is "?B n : ?T");
-proof (induct n);
- show "?P 0"; by (simp add: below_0 tiling.empty);
+ (is "?P n" is "?B n : ?T")
+proof (induct n)
+ show "?P 0" by (simp add: below_0 tiling.empty)
- fix n; assume hyp: "?P n";
- let ?a = "{i} <*> {2 * n + 1} Un {i} <*> {2 * n}";
+ fix n assume hyp: "?P n"
+ let ?a = "{i} <*> {2 * n + 1} Un {i} <*> {2 * n}"
- have "?B (Suc n) = ?a Un ?B n";
- by (auto simp add: Sigma_Suc Un_assoc);
- also; have "... : ?T";
- proof (rule tiling.Un);
- have "{(i, 2 * n), (i, 2 * n + 1)} : domino";
- by (rule domino.horiz);
- also; have "{(i, 2 * n), (i, 2 * n + 1)} = ?a"; by blast;
- finally; show "... : domino"; .;
- from hyp; show "?B n : ?T"; .;
- show "?a <= - ?B n"; by blast;
- qed;
- finally; show "?P (Suc n)"; .;
-qed;
+ have "?B (Suc n) = ?a Un ?B n"
+ by (auto simp add: Sigma_Suc Un_assoc)
+ also have "... : ?T"
+ proof (rule tiling.Un)
+ have "{(i, 2 * n), (i, 2 * n + 1)} : domino"
+ by (rule domino.horiz)
+ also have "{(i, 2 * n), (i, 2 * n + 1)} = ?a" by blast
+ finally show "... : domino" .
+ from hyp show "?B n : ?T" .
+ show "?a <= - ?B n" by blast
+ qed
+ finally show "?P (Suc n)" .
+qed
lemma dominoes_tile_matrix:
"below m <*> below (2 * n) : tiling domino"
- (is "?P m" is "?B m : ?T");
-proof (induct m);
- show "?P 0"; by (simp add: below_0 tiling.empty);
+ (is "?P m" is "?B m : ?T")
+proof (induct m)
+ show "?P 0" by (simp add: below_0 tiling.empty)
- fix m; assume hyp: "?P m";
- let ?t = "{m} <*> below (2 * n)";
+ fix m assume hyp: "?P m"
+ let ?t = "{m} <*> below (2 * n)"
- have "?B (Suc m) = ?t Un ?B m"; by (simp add: Sigma_Suc);
- also; have "... : ?T";
- proof (rule tiling_Un [rule_format]);
- show "?t : ?T"; by (rule dominoes_tile_row);
- from hyp; show "?B m : ?T"; .;
- show "?t Int ?B m = {}"; by blast;
- qed;
- finally; show "?P (Suc m)"; .;
-qed;
+ have "?B (Suc m) = ?t Un ?B m" by (simp add: Sigma_Suc)
+ also have "... : ?T"
+ proof (rule tiling_Un [rule_format])
+ show "?t : ?T" by (rule dominoes_tile_row)
+ from hyp show "?B m : ?T" .
+ show "?t Int ?B m = {}" by blast
+ qed
+ finally show "?P (Suc m)" .
+qed
lemma domino_singleton:
- "d : domino ==> b < 2 ==> EX i j. evnodd d b = {(i, j)}";
-proof -;
- assume b: "b < 2";
- assume "d : domino";
- thus ?thesis (is "?P d");
- proof induct;
- from b; have b_cases: "b = 0 | b = 1"; by arith;
- fix i j;
- note [simp] = evnodd_empty evnodd_insert mod_Suc;
- from b_cases; show "?P {(i, j), (i, j + 1)}"; by rule auto;
- from b_cases; show "?P {(i, j), (i + 1, j)}"; by rule auto;
- qed;
-qed;
+ "d : domino ==> b < 2 ==> EX i j. evnodd d b = {(i, j)}"
+proof -
+ assume b: "b < 2"
+ assume "d : domino"
+ thus ?thesis (is "?P d")
+ proof induct
+ from b have b_cases: "b = 0 | b = 1" by arith
+ fix i j
+ note [simp] = evnodd_empty evnodd_insert mod_Suc
+ from b_cases show "?P {(i, j), (i, j + 1)}" by rule auto
+ from b_cases show "?P {(i, j), (i + 1, j)}" by rule auto
+ qed
+qed
-lemma domino_finite: "d: domino ==> finite d";
-proof (induct set: domino);
- fix i j :: nat;
- show "finite {(i, j), (i, j + 1)}"; by (intro Finites.intros);
- show "finite {(i, j), (i + 1, j)}"; by (intro Finites.intros);
-qed;
+lemma domino_finite: "d: domino ==> finite d"
+proof (induct set: domino)
+ fix i j :: nat
+ show "finite {(i, j), (i, j + 1)}" by (intro Finites.intros)
+ show "finite {(i, j), (i + 1, j)}" by (intro Finites.intros)
+qed
-subsection {* Tilings of dominoes *};
+subsection {* Tilings of dominoes *}
lemma tiling_domino_finite:
- "t : tiling domino ==> finite t" (is "t : ?T ==> ?F t");
-proof -;
- assume "t : ?T";
- thus "?F t";
- proof induct;
- show "?F {}"; by (rule Finites.emptyI);
- fix a t; assume "?F t";
- assume "a : domino"; hence "?F a"; by (rule domino_finite);
- thus "?F (a Un t)"; by (rule finite_UnI);
- qed;
-qed;
+ "t : tiling domino ==> finite t" (is "t : ?T ==> ?F t")
+proof -
+ assume "t : ?T"
+ thus "?F t"
+ proof induct
+ show "?F {}" by (rule Finites.emptyI)
+ fix a t assume "?F t"
+ assume "a : domino" hence "?F a" by (rule domino_finite)
+ thus "?F (a Un t)" by (rule finite_UnI)
+ qed
+qed
lemma tiling_domino_01:
"t : tiling domino ==> card (evnodd t 0) = card (evnodd t 1)"
- (is "t : ?T ==> ?P t");
-proof -;
- assume "t : ?T";
- thus "?P t";
- proof induct;
- show "?P {}"; by (simp add: evnodd_def);
+ (is "t : ?T ==> ?P t")
+proof -
+ assume "t : ?T"
+ thus "?P t"
+ proof induct
+ show "?P {}" by (simp add: evnodd_def)
- fix a t;
- let ?e = evnodd;
+ fix a t
+ let ?e = evnodd
assume "a : domino" "t : ?T"
and hyp: "card (?e t 0) = card (?e t 1)"
- and "a <= - t";
+ and "a <= - t"
have card_suc:
- "!!b. b < 2 ==> card (?e (a Un t) b) = Suc (card (?e t b))";
- proof -;
- fix b; assume "b < 2";
- have "?e (a Un t) b = ?e a b Un ?e t b"; by (rule evnodd_Un);
- also; obtain i j where "?e a b = {(i, j)}";
- proof -;
- have "EX i j. ?e a b = {(i, j)}"; by (rule domino_singleton);
- thus ?thesis; by blast;
- qed;
- also; have "... Un ?e t b = insert (i, j) (?e t b)"; by simp;
- also; have "card ... = Suc (card (?e t b))";
- proof (rule card_insert_disjoint);
- show "finite (?e t b)";
- by (rule evnodd_finite, rule tiling_domino_finite);
- have "(i, j) : ?e a b"; by (simp!);
- thus "(i, j) ~: ?e t b"; by (blast! dest: evnoddD);
- qed;
- finally; show "?thesis b"; .;
- qed;
- hence "card (?e (a Un t) 0) = Suc (card (?e t 0))"; by simp;
- also; from hyp; have "card (?e t 0) = card (?e t 1)"; .;
- also; from card_suc; have "Suc ... = card (?e (a Un t) 1)";
- by simp;
- finally; show "?P (a Un t)"; .;
- qed;
-qed;
+ "!!b. b < 2 ==> card (?e (a Un t) b) = Suc (card (?e t b))"
+ proof -
+ fix b assume "b < 2"
+ have "?e (a Un t) b = ?e a b Un ?e t b" by (rule evnodd_Un)
+ also obtain i j where "?e a b = {(i, j)}"
+ proof -
+ have "EX i j. ?e a b = {(i, j)}" by (rule domino_singleton)
+ thus ?thesis by blast
+ qed
+ also have "... Un ?e t b = insert (i, j) (?e t b)" by simp
+ also have "card ... = Suc (card (?e t b))"
+ proof (rule card_insert_disjoint)
+ show "finite (?e t b)"
+ by (rule evnodd_finite, rule tiling_domino_finite)
+ have "(i, j) : ?e a b" by (simp!)
+ thus "(i, j) ~: ?e t b" by (blast! dest: evnoddD)
+ qed
+ finally show "?thesis b" .
+ qed
+ hence "card (?e (a Un t) 0) = Suc (card (?e t 0))" by simp
+ also from hyp have "card (?e t 0) = card (?e t 1)" .
+ also from card_suc have "Suc ... = card (?e (a Un t) 1)"
+ by simp
+ finally show "?P (a Un t)" .
+ qed
+qed
-subsection {* Main theorem *};
+subsection {* Main theorem *}
constdefs
mutilated_board :: "nat => nat => (nat * nat) set"
"mutilated_board m n ==
below (2 * (m + 1)) <*> below (2 * (n + 1))
- - {(0, 0)} - {(2 * m + 1, 2 * n + 1)}";
+ - {(0, 0)} - {(2 * m + 1, 2 * n + 1)}"
-theorem mutil_not_tiling: "mutilated_board m n ~: tiling domino";
-proof (unfold mutilated_board_def);
- let ?T = "tiling domino";
- let ?t = "below (2 * (m + 1)) <*> below (2 * (n + 1))";
- let ?t' = "?t - {(0, 0)}";
- let ?t'' = "?t' - {(2 * m + 1, 2 * n + 1)}";
+theorem mutil_not_tiling: "mutilated_board m n ~: tiling domino"
+proof (unfold mutilated_board_def)
+ let ?T = "tiling domino"
+ let ?t = "below (2 * (m + 1)) <*> below (2 * (n + 1))"
+ let ?t' = "?t - {(0, 0)}"
+ let ?t'' = "?t' - {(2 * m + 1, 2 * n + 1)}"
- show "?t'' ~: ?T";
- proof;
- have t: "?t : ?T"; by (rule dominoes_tile_matrix);
- assume t'': "?t'' : ?T";
+ show "?t'' ~: ?T"
+ proof
+ have t: "?t : ?T" by (rule dominoes_tile_matrix)
+ assume t'': "?t'' : ?T"
- let ?e = evnodd;
- have fin: "finite (?e ?t 0)";
- by (rule evnodd_finite, rule tiling_domino_finite, rule t);
+ let ?e = evnodd
+ have fin: "finite (?e ?t 0)"
+ by (rule evnodd_finite, rule tiling_domino_finite, rule t)
- note [simp] = evnodd_iff evnodd_empty evnodd_insert evnodd_Diff;
- have "card (?e ?t'' 0) < card (?e ?t' 0)";
- proof -;
+ note [simp] = evnodd_iff evnodd_empty evnodd_insert evnodd_Diff
+ have "card (?e ?t'' 0) < card (?e ?t' 0)"
+ proof -
have "card (?e ?t' 0 - {(2 * m + 1, 2 * n + 1)})
- < card (?e ?t' 0)";
- proof (rule card_Diff1_less);
- from _ fin; show "finite (?e ?t' 0)";
- by (rule finite_subset) auto;
- show "(2 * m + 1, 2 * n + 1) : ?e ?t' 0"; by simp;
- qed;
- thus ?thesis; by simp;
- qed;
- also; have "... < card (?e ?t 0)";
- proof -;
- have "(0, 0) : ?e ?t 0"; by simp;
- with fin; have "card (?e ?t 0 - {(0, 0)}) < card (?e ?t 0)";
- by (rule card_Diff1_less);
- thus ?thesis; by simp;
- qed;
- also; from t; have "... = card (?e ?t 1)";
- by (rule tiling_domino_01);
- also; have "?e ?t 1 = ?e ?t'' 1"; by simp;
- also; from t''; have "card ... = card (?e ?t'' 0)";
- by (rule tiling_domino_01 [symmetric]);
- finally; have "... < ..."; .; thus False; ..;
- qed;
-qed;
+ < card (?e ?t' 0)"
+ proof (rule card_Diff1_less)
+ from _ fin show "finite (?e ?t' 0)"
+ by (rule finite_subset) auto
+ show "(2 * m + 1, 2 * n + 1) : ?e ?t' 0" by simp
+ qed
+ thus ?thesis by simp
+ qed
+ also have "... < card (?e ?t 0)"
+ proof -
+ have "(0, 0) : ?e ?t 0" by simp
+ with fin have "card (?e ?t 0 - {(0, 0)}) < card (?e ?t 0)"
+ by (rule card_Diff1_less)
+ thus ?thesis by simp
+ qed
+ also from t have "... = card (?e ?t 1)"
+ by (rule tiling_domino_01)
+ also have "?e ?t 1 = ?e ?t'' 1" by simp
+ also from t'' have "card ... = card (?e ?t'' 0)"
+ by (rule tiling_domino_01 [symmetric])
+ finally have "... < ..." . thus False ..
+ qed
+qed
-end;
+end
--- a/src/HOL/Isar_examples/NestedDatatype.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Isar_examples/NestedDatatype.thy Sun Sep 17 22:19:02 2000 +0200
@@ -1,91 +1,91 @@
-header {* Nested datatypes *};
+header {* Nested datatypes *}
-theory NestedDatatype = Main:;
+theory NestedDatatype = Main:
-subsection {* Terms and substitution *};
+subsection {* Terms and substitution *}
datatype ('a, 'b) "term" =
Var 'a
- | App 'b "('a, 'b) term list";
+ | App 'b "('a, 'b) term list"
consts
subst_term :: "('a => ('a, 'b) term) => ('a, 'b) term => ('a, 'b) term"
subst_term_list ::
- "('a => ('a, 'b) term) => ('a, 'b) term list => ('a, 'b) term list";
+ "('a => ('a, 'b) term) => ('a, 'b) term list => ('a, 'b) term list"
primrec (subst)
"subst_term f (Var a) = f a"
"subst_term f (App b ts) = App b (subst_term_list f ts)"
"subst_term_list f [] = []"
- "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts";
+ "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
text {*
\medskip A simple lemma about composition of substitutions.
-*};
+*}
lemma
"subst_term (subst_term f1 o f2) t =
subst_term f1 (subst_term f2 t) &
subst_term_list (subst_term f1 o f2) ts =
- subst_term_list f1 (subst_term_list f2 ts)";
- by (induct t and ts rule: term.induct) simp_all;
+ subst_term_list f1 (subst_term_list f2 ts)"
+ by (induct t and ts rule: term.induct) simp_all
lemma "subst_term (subst_term f1 o f2) t =
- subst_term f1 (subst_term f2 t)";
-proof -;
- let "?P t" = ?thesis;
- let ?Q = "\\<lambda>ts. subst_term_list (subst_term f1 o f2) ts =
- subst_term_list f1 (subst_term_list f2 ts)";
- show ?thesis;
- proof (induct t);
- fix a; show "?P (Var a)"; by simp;
- next;
- fix b ts; assume "?Q ts";
- thus "?P (App b ts)"; by (simp add: o_def);
- next;
- show "?Q []"; by simp;
- next;
- fix t ts;
- assume "?P t" "?Q ts"; thus "?Q (t # ts)"; by simp;
- qed;
-qed;
+ subst_term f1 (subst_term f2 t)"
+proof -
+ let "?P t" = ?thesis
+ let ?Q = "\<lambda>ts. subst_term_list (subst_term f1 o f2) ts =
+ subst_term_list f1 (subst_term_list f2 ts)"
+ show ?thesis
+ proof (induct t)
+ fix a show "?P (Var a)" by simp
+ next
+ fix b ts assume "?Q ts"
+ thus "?P (App b ts)" by (simp add: o_def)
+ next
+ show "?Q []" by simp
+ next
+ fix t ts
+ assume "?P t" "?Q ts" thus "?Q (t # ts)" by simp
+ qed
+qed
-subsection {* Alternative induction *};
+subsection {* Alternative induction *}
theorem term_induct' [case_names Var App]:
"(!!a. P (Var a)) ==>
- (!!b ts. list_all P ts ==> P (App b ts)) ==> P t";
-proof -;
- assume var: "!!a. P (Var a)";
- assume app: "!!b ts. list_all P ts ==> P (App b ts)";
- show ?thesis;
- proof (induct P t);
- fix a; show "P (Var a)"; by (rule var);
- next;
- fix b t ts; assume "list_all P ts";
- thus "P (App b ts)"; by (rule app);
- next;
- show "list_all P []"; by simp;
- next;
- fix t ts; assume "P t" "list_all P ts";
- thus "list_all P (t # ts)"; by simp;
- qed;
-qed;
+ (!!b ts. list_all P ts ==> P (App b ts)) ==> P t"
+proof -
+ assume var: "!!a. P (Var a)"
+ assume app: "!!b ts. list_all P ts ==> P (App b ts)"
+ show ?thesis
+ proof (induct P t)
+ fix a show "P (Var a)" by (rule var)
+ next
+ fix b t ts assume "list_all P ts"
+ thus "P (App b ts)" by (rule app)
+ next
+ show "list_all P []" by simp
+ next
+ fix t ts assume "P t" "list_all P ts"
+ thus "list_all P (t # ts)" by simp
+ qed
+qed
lemma
"subst_term (subst_term f1 o f2) t = subst_term f1 (subst_term f2 t)"
- (is "?P t");
-proof (induct (open) ?P t rule: term_induct');
- case Var;
- show "?P (Var a)"; by (simp add: o_def);
-next;
- case App;
- have "?this --> ?P (App b ts)";
- by (induct ts) simp_all;
- thus "..."; ..;
-qed;
+ (is "?P t")
+proof (induct (open) ?P t rule: term_induct')
+ case Var
+ show "?P (Var a)" by (simp add: o_def)
+next
+ case App
+ have "?this --> ?P (App b ts)"
+ by (induct ts) simp_all
+ thus "..." ..
+qed
-end;
+end
--- a/src/HOL/Isar_examples/Peirce.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Isar_examples/Peirce.thy Sun Sep 17 22:19:02 2000 +0200
@@ -3,9 +3,9 @@
Author: Markus Wenzel, TU Muenchen
*)
-header {* Peirce's Law *};
+header {* Peirce's Law *}
-theory Peirce = Main:;
+theory Peirce = Main:
text {*
We consider Peirce's Law: $((A \impl B) \impl A) \impl A$. This is
@@ -17,22 +17,22 @@
the negated goal may be introduced as additional assumption. This
eventually leads to a contradiction.\footnote{The rule involved there
is negation elimination; it holds in intuitionistic logic as well.}
-*};
+*}
-theorem "((A --> B) --> A) --> A";
-proof;
- assume aba: "(A --> B) --> A";
- show A;
- proof (rule classical);
- assume "~ A";
- have "A --> B";
- proof;
- assume A;
- thus B; by contradiction;
- qed;
- with aba; show A; ..;
- qed;
-qed;
+theorem "((A --> B) --> A) --> A"
+proof
+ assume aba: "(A --> B) --> A"
+ show A
+ proof (rule classical)
+ assume "~ A"
+ have "A --> B"
+ proof
+ assume A
+ thus B by contradiction
+ qed
+ with aba show A ..
+ qed
+qed
text {*
In the subsequent version the reasoning is rearranged by means of
@@ -48,24 +48,24 @@
give rise to new subgoals, which may be established separately. In
contrast, strong assumptions (as introduced by \isacommand{assume})
are solved immediately.
-*};
+*}
-theorem "((A --> B) --> A) --> A";
-proof;
- assume aba: "(A --> B) --> A";
- show A;
- proof (rule classical);
- presume "A --> B";
- with aba; show A; ..;
- next;
- assume "~ A";
- show "A --> B";
- proof;
- assume A;
- thus B; by contradiction;
- qed;
- qed;
-qed;
+theorem "((A --> B) --> A) --> A"
+proof
+ assume aba: "(A --> B) --> A"
+ show A
+ proof (rule classical)
+ presume "A --> B"
+ with aba show A ..
+ next
+ assume "~ A"
+ show "A --> B"
+ proof
+ assume A
+ thus B by contradiction
+ qed
+ qed
+qed
text {*
Note that the goals stemming from weak assumptions may be even left
@@ -84,6 +84,6 @@
assumptions these may be still solved in any order in a predictable
way, while weak ones would quickly lead to great confusion,
eventually demanding even some backtracking.
-*};
+*}
-end;
+end
--- a/src/HOL/Isar_examples/Puzzle.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Isar_examples/Puzzle.thy Sun Sep 17 22:19:02 2000 +0200
@@ -1,128 +1,128 @@
-header {* An old chestnut *};
+header {* An old chestnut *}
-theory Puzzle = Main:;
+theory Puzzle = Main:
text_raw {*
\footnote{A question from ``Bundeswettbewerb Mathematik''.
Original pen-and-paper proof due to Herbert Ehler; Isabelle tactic
script by Tobias Nipkow.}
-*};
+*}
-subsection {* Generalized mathematical induction *};
+subsection {* Generalized mathematical induction *}
text {*
The following derived rule admits induction over some expression
$f(x)$ wrt.\ the ${<}$ relation on natural numbers.
-*};
+*}
lemma gen_less_induct:
"(!!x. ALL y. f y < f x --> P y (f y) ==> P x (f x))
==> P x (f x :: nat)"
- (is "(!!x. ?H x ==> ?C x) ==> _");
-proof -;
- assume asm: "!!x. ?H x ==> ?C x";
- {;
- fix k;
- have "ALL x. k = f x --> ?C x" (is "?Q k");
- proof (rule nat_less_induct);
- fix k; assume hyp: "ALL m<k. ?Q m";
- show "?Q k";
- proof;
- fix x; show "k = f x --> ?C x";
- proof;
- assume "k = f x";
- with hyp; have "?H x"; by blast;
- thus "?C x"; by (rule asm);
- qed;
- qed;
- qed;
- };
- thus "?C x"; by simp;
-qed;
+ (is "(!!x. ?H x ==> ?C x) ==> _")
+proof -
+ assume asm: "!!x. ?H x ==> ?C x"
+ {
+ fix k
+ have "ALL x. k = f x --> ?C x" (is "?Q k")
+ proof (rule nat_less_induct)
+ fix k assume hyp: "ALL m<k. ?Q m"
+ show "?Q k"
+ proof
+ fix x show "k = f x --> ?C x"
+ proof
+ assume "k = f x"
+ with hyp have "?H x" by blast
+ thus "?C x" by (rule asm)
+ qed
+ qed
+ qed
+ }
+ thus "?C x" by simp
+qed
-subsection {* The problem *};
+subsection {* The problem *}
text {*
Given some function $f\colon \Nat \to \Nat$ such that $f \ap (f \ap
n) < f \ap (\idt{Suc} \ap n)$ for all $n$. Demonstrate that $f$ is
the identity.
-*};
+*}
-consts f :: "nat => nat";
-axioms f_ax: "f (f n) < f (Suc n)";
+consts f :: "nat => nat"
+axioms f_ax: "f (f n) < f (Suc n)"
-theorem "f n = n";
-proof (rule order_antisym);
+theorem "f n = n"
+proof (rule order_antisym)
txt {*
Note that the generalized form of $n \le f \ap n$ is required
later for monotonicity as well.
- *};
- show ge: "!!n. n <= f n";
- proof -;
- fix n;
- show "?thesis n" (is "?P n (f n)");
- proof (rule gen_less_induct [of f ?P]);
- fix n; assume hyp: "ALL m. f m < f n --> ?P m (f m)";
- show "?P n (f n)";
- proof (rule nat.exhaust);
- assume "n = 0"; thus ?thesis; by simp;
- next;
- fix m; assume n_Suc: "n = Suc m";
- from f_ax; have "f (f m) < f (Suc m)"; .;
- with hyp n_Suc; have "f m <= f (f m)"; by blast;
- also; from f_ax; have "... < f (Suc m)"; .;
- finally; have lt: "f m < f (Suc m)"; .;
- with hyp n_Suc; have "m <= f m"; by blast;
- also; note lt;
- finally; have "m < f (Suc m)"; .;
- thus "n <= f n"; by (simp only: n_Suc);
- qed;
- qed;
- qed;
+ *}
+ show ge: "!!n. n <= f n"
+ proof -
+ fix n
+ show "?thesis n" (is "?P n (f n)")
+ proof (rule gen_less_induct [of f ?P])
+ fix n assume hyp: "ALL m. f m < f n --> ?P m (f m)"
+ show "?P n (f n)"
+ proof (rule nat.exhaust)
+ assume "n = 0" thus ?thesis by simp
+ next
+ fix m assume n_Suc: "n = Suc m"
+ from f_ax have "f (f m) < f (Suc m)" .
+ with hyp n_Suc have "f m <= f (f m)" by blast
+ also from f_ax have "... < f (Suc m)" .
+ finally have lt: "f m < f (Suc m)" .
+ with hyp n_Suc have "m <= f m" by blast
+ also note lt
+ finally have "m < f (Suc m)" .
+ thus "n <= f n" by (simp only: n_Suc)
+ qed
+ qed
+ qed
txt {*
In order to show the other direction, we first establish
monotonicity of $f$.
- *};
- have mono: "!!m n. m <= n --> f m <= f n";
- proof -;
- fix m n;
- show "?thesis m n" (is "?P n");
- proof (induct n);
- show "?P 0"; by simp;
- fix n; assume hyp: "?P n";
- show "?P (Suc n)";
- proof;
- assume "m <= Suc n";
- thus "f m <= f (Suc n)";
- proof (rule le_SucE);
- assume "m <= n";
- with hyp; have "f m <= f n"; ..;
- also; from ge f_ax; have "... < f (Suc n)";
- by (rule le_less_trans);
- finally; show ?thesis; by simp;
- next;
- assume "m = Suc n";
- thus ?thesis; by simp;
- qed;
- qed;
- qed;
- qed;
+ *}
+ have mono: "!!m n. m <= n --> f m <= f n"
+ proof -
+ fix m n
+ show "?thesis m n" (is "?P n")
+ proof (induct n)
+ show "?P 0" by simp
+ fix n assume hyp: "?P n"
+ show "?P (Suc n)"
+ proof
+ assume "m <= Suc n"
+ thus "f m <= f (Suc n)"
+ proof (rule le_SucE)
+ assume "m <= n"
+ with hyp have "f m <= f n" ..
+ also from ge f_ax have "... < f (Suc n)"
+ by (rule le_less_trans)
+ finally show ?thesis by simp
+ next
+ assume "m = Suc n"
+ thus ?thesis by simp
+ qed
+ qed
+ qed
+ qed
- show "f n <= n";
- proof (rule leI);
- show "~ n < f n";
- proof;
- assume "n < f n";
- hence "Suc n <= f n"; by (rule Suc_leI);
- hence "f (Suc n) <= f (f n)"; by (rule mono [rule_format]);
- also; have "... < f (Suc n)"; by (rule f_ax);
- finally; have "... < ..."; .; thus False; ..;
- qed;
- qed;
-qed;
+ show "f n <= n"
+ proof (rule leI)
+ show "~ n < f n"
+ proof
+ assume "n < f n"
+ hence "Suc n <= f n" by (rule Suc_leI)
+ hence "f (Suc n) <= f (f n)" by (rule mono [rule_format])
+ also have "... < f (Suc n)" by (rule f_ax)
+ finally have "... < ..." . thus False ..
+ qed
+ qed
+qed
-end;
+end
--- a/src/HOL/Isar_examples/Summation.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Isar_examples/Summation.thy Sun Sep 17 22:19:02 2000 +0200
@@ -3,26 +3,26 @@
Author: Markus Wenzel
*)
-header {* Summing natural numbers *};
+header {* Summing natural numbers *}
-theory Summation = Main:;
+theory Summation = Main:
text_raw {*
\footnote{This example is somewhat reminiscent of the
\url{http://isabelle.in.tum.de/library/HOL/ex/NatSum.html}, which is
discussed in \cite{isabelle-ref} in the context of permutative
rewrite rules and ordered rewriting.}
-*};
+*}
text {*
Subsequently, we prove some summation laws of natural numbers
(including odds, squares, and cubes). These examples demonstrate how
plain natural deduction (including induction) may be combined with
calculational proof.
-*};
+*}
-subsection {* A summation operator *};
+subsection {* A summation operator *}
text {*
The binder operator $\idt{sum} :: (\idt{nat} \To \idt{nat}) \To
@@ -31,42 +31,42 @@
\[
\sum\limits_{i < k} f(i) = \idt{sum} \ap (\lam i f \ap i) \ap k
\]
-*};
+*}
consts
- sum :: "(nat => nat) => nat => nat";
+ sum :: "(nat => nat) => nat => nat"
primrec
"sum f 0 = 0"
- "sum f (Suc n) = f n + sum f n";
+ "sum f (Suc n) = f n + sum f n"
syntax
"_SUM" :: "idt => nat => nat => nat"
- ("SUM _ < _. _" [0, 0, 10] 10);
+ ("SUM _ < _. _" [0, 0, 10] 10)
translations
- "SUM i < k. b" == "sum (\\<lambda>i. b) k";
+ "SUM i < k. b" == "sum (\<lambda>i. b) k"
-subsection {* Summation laws *};
+subsection {* Summation laws *}
text {*
The sum of natural numbers $0 + \cdots + n$ equals $n \times (n +
1)/2$. Avoiding formal reasoning about division we prove this
equation multiplied by $2$.
-*};
+*}
theorem sum_of_naturals:
"2 * (SUM i < n + 1. i) = n * (n + 1)"
- (is "?P n" is "?S n = _");
-proof (induct n);
- show "?P 0"; by simp;
+ (is "?P n" is "?S n = _")
+proof (induct n)
+ show "?P 0" by simp
- fix n;
- have "?S (n + 1) = ?S n + 2 * (n + 1)"; by simp;
- also; assume "?S n = n * (n + 1)";
- also; have "... + 2 * (n + 1) = (n + 1) * (n + 2)"; by simp;
- finally; show "?P (Suc n)"; by simp;
-qed;
+ fix n
+ have "?S (n + 1) = ?S n + 2 * (n + 1)" by simp
+ also assume "?S n = n * (n + 1)"
+ also have "... + 2 * (n + 1) = (n + 1) * (n + 2)" by simp
+ finally show "?P (Suc n)" by simp
+qed
text {*
The above proof is a typical instance of mathematical induction. The
@@ -103,63 +103,63 @@
context elements of the form $x:A$ instead.
\end{enumerate}
-*};
+*}
text {*
\medskip We derive further summation laws for odds, squares, and
cubes as follows. The basic technique of induction plus calculation
is the same as before.
-*};
+*}
theorem sum_of_odds:
"(SUM i < n. 2 * i + 1) = n^2"
- (is "?P n" is "?S n = _");
-proof (induct n);
- show "?P 0"; by simp;
+ (is "?P n" is "?S n = _")
+proof (induct n)
+ show "?P 0" by simp
- fix n;
- have "?S (n + 1) = ?S n + 2 * n + 1"; by simp;
- also; assume "?S n = n^2";
- also; have "... + 2 * n + 1 = (n + 1)^2"; by simp;
- finally; show "?P (Suc n)"; by simp;
-qed;
+ fix n
+ have "?S (n + 1) = ?S n + 2 * n + 1" by simp
+ also assume "?S n = n^2"
+ also have "... + 2 * n + 1 = (n + 1)^2" by simp
+ finally show "?P (Suc n)" by simp
+qed
text {*
Subsequently we require some additional tweaking of Isabelle built-in
arithmetic simplifications, such as bringing in distributivity by
hand.
-*};
+*}
-lemmas distrib = add_mult_distrib add_mult_distrib2;
+lemmas distrib = add_mult_distrib add_mult_distrib2
theorem sum_of_squares:
"#6 * (SUM i < n + 1. i^2) = n * (n + 1) * (2 * n + 1)"
- (is "?P n" is "?S n = _");
-proof (induct n);
- show "?P 0"; by simp;
+ (is "?P n" is "?S n = _")
+proof (induct n)
+ show "?P 0" by simp
- fix n;
- have "?S (n + 1) = ?S n + #6 * (n + 1)^2"; by (simp add: distrib);
- also; assume "?S n = n * (n + 1) * (2 * n + 1)";
- also; have "... + #6 * (n + 1)^2 =
- (n + 1) * (n + 2) * (2 * (n + 1) + 1)"; by (simp add: distrib);
- finally; show "?P (Suc n)"; by simp;
-qed;
+ fix n
+ have "?S (n + 1) = ?S n + #6 * (n + 1)^2" by (simp add: distrib)
+ also assume "?S n = n * (n + 1) * (2 * n + 1)"
+ also have "... + #6 * (n + 1)^2 =
+ (n + 1) * (n + 2) * (2 * (n + 1) + 1)" by (simp add: distrib)
+ finally show "?P (Suc n)" by simp
+qed
theorem sum_of_cubes:
"#4 * (SUM i < n + 1. i^#3) = (n * (n + 1))^2"
- (is "?P n" is "?S n = _");
-proof (induct n);
- show "?P 0"; by (simp add: power_eq_if);
+ (is "?P n" is "?S n = _")
+proof (induct n)
+ show "?P 0" by (simp add: power_eq_if)
- fix n;
- have "?S (n + 1) = ?S n + #4 * (n + 1)^#3";
- by (simp add: power_eq_if distrib);
- also; assume "?S n = (n * (n + 1))^2";
- also; have "... + #4 * (n + 1)^#3 = ((n + 1) * ((n + 1) + 1))^2";
- by (simp add: power_eq_if distrib);
- finally; show "?P (Suc n)"; by simp;
-qed;
+ fix n
+ have "?S (n + 1) = ?S n + #4 * (n + 1)^#3"
+ by (simp add: power_eq_if distrib)
+ also assume "?S n = (n * (n + 1))^2"
+ also have "... + #4 * (n + 1)^#3 = ((n + 1) * ((n + 1) + 1))^2"
+ by (simp add: power_eq_if distrib)
+ finally show "?P (Suc n)" by simp
+qed
text {*
Comparing these examples with the tactic script version
@@ -180,6 +180,6 @@
such as $\idt{simp}$ or $\idt{auto}$ should normally be never used as
initial proof methods, but only as terminal ones, solving certain
goals completely.
-*};
+*}
-end;
+end
--- a/src/HOL/Isar_examples/W_correct.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Isar_examples/W_correct.thy Sun Sep 17 22:19:02 2000 +0200
@@ -5,76 +5,76 @@
Correctness of Milner's type inference algorithm W (let-free version).
*)
-header {* Milner's type inference algorithm~W (let-free version) *};
+header {* Milner's type inference algorithm~W (let-free version) *}
-theory W_correct = Main + Type:;
+theory W_correct = Main + Type:
text_raw {*
\footnote{Based upon \url{http://isabelle.in.tum.de/library/HOL/W0}
by Dieter Nazareth and Tobias Nipkow.}
-*};
+*}
-subsection "Mini ML with type inference rules";
+subsection "Mini ML with type inference rules"
datatype
- expr = Var nat | Abs expr | App expr expr;
+ expr = Var nat | Abs expr | App expr expr
-text {* Type inference rules. *};
+text {* Type inference rules. *}
consts
- has_type :: "(typ list * expr * typ) set";
+ has_type :: "(typ list * expr * typ) set"
syntax
"_has_type" :: "[typ list, expr, typ] => bool"
- ("((_) |-/ (_) :: (_))" [60, 0, 60] 60);
+ ("((_) |-/ (_) :: (_))" [60, 0, 60] 60)
translations
- "a |- e :: t" == "(a,e,t) : has_type";
+ "a |- e :: t" == "(a,e,t) : has_type"
inductive has_type
intros [simp]
Var: "n < length a ==> a |- Var n :: a ! n"
Abs: "t1#a |- e :: t2 ==> a |- Abs e :: t1 -> t2"
App: "a |- e1 :: t2 -> t1 ==> a |- e2 :: t2
- ==> a |- App e1 e2 :: t1";
+ ==> a |- App e1 e2 :: t1"
-text {* Type assigment is closed wrt.\ substitution. *};
+text {* Type assigment is closed wrt.\ substitution. *}
-lemma has_type_subst_closed: "a |- e :: t ==> $s a |- e :: $s t";
-proof -;
- assume "a |- e :: t";
- thus ?thesis (is "?P a e t");
- proof (induct (open) ?P a e t);
- case Var;
- hence "n < length (map ($ s) a)"; by simp;
- hence "map ($ s) a |- Var n :: map ($ s) a ! n";
- by (rule has_type.Var);
- also; have "map ($ s) a ! n = $ s (a ! n)";
- by (rule nth_map);
- also; have "map ($ s) a = $ s a";
- by (simp only: app_subst_list);
- finally; show "?P a (Var n) (a ! n)"; .;
- next;
- case Abs;
- hence "$ s t1 # map ($ s) a |- e :: $ s t2";
- by (simp add: app_subst_list);
- hence "map ($ s) a |- Abs e :: $ s t1 -> $ s t2";
- by (rule has_type.Abs);
- thus "?P a (Abs e) (t1 -> t2)";
- by (simp add: app_subst_list);
- next;
- case App;
- thus "?P a (App e1 e2) t1"; by simp;
- qed;
-qed;
+lemma has_type_subst_closed: "a |- e :: t ==> $s a |- e :: $s t"
+proof -
+ assume "a |- e :: t"
+ thus ?thesis (is "?P a e t")
+ proof (induct (open) ?P a e t)
+ case Var
+ hence "n < length (map ($ s) a)" by simp
+ hence "map ($ s) a |- Var n :: map ($ s) a ! n"
+ by (rule has_type.Var)
+ also have "map ($ s) a ! n = $ s (a ! n)"
+ by (rule nth_map)
+ also have "map ($ s) a = $ s a"
+ by (simp only: app_subst_list)
+ finally show "?P a (Var n) (a ! n)" .
+ next
+ case Abs
+ hence "$ s t1 # map ($ s) a |- e :: $ s t2"
+ by (simp add: app_subst_list)
+ hence "map ($ s) a |- Abs e :: $ s t1 -> $ s t2"
+ by (rule has_type.Abs)
+ thus "?P a (Abs e) (t1 -> t2)"
+ by (simp add: app_subst_list)
+ next
+ case App
+ thus "?P a (App e1 e2) t1" by simp
+ qed
+qed
-subsection {* Type inference algorithm W *};
+subsection {* Type inference algorithm W *}
consts
- W :: "[expr, typ list, nat] => (subst * typ * nat) maybe";
+ W :: "[expr, typ list, nat] => (subst * typ * nat) maybe"
primrec
"W (Var i) a n =
@@ -86,64 +86,64 @@
((s1, t1, m1) := W e1 a n;
(s2, t2, m2) := W e2 ($s1 a) m1;
u := mgu ($ s2 t1) (t2 -> TVar m2);
- Ok ($u o $s2 o s1, $u (TVar m2), Suc m2))";
+ Ok ($u o $s2 o s1, $u (TVar m2), Suc m2))"
-subsection {* Correctness theorem *};
+subsection {* Correctness theorem *}
-theorem W_correct: "W e a n = Ok (s, t, m) ==> $ s a |- e :: t";
-proof -;
- assume W_ok: "W e a n = Ok (s, t, m)";
+theorem W_correct: "W e a n = Ok (s, t, m) ==> $ s a |- e :: t"
+proof -
+ assume W_ok: "W e a n = Ok (s, t, m)"
have "ALL a s t m n. Ok (s, t, m) = W e a n --> $ s a |- e :: t"
- (is "?P e");
- proof (induct (stripped) e);
- fix a s t m n;
+ (is "?P e")
+ proof (induct (stripped) e)
+ fix a s t m n
{
- fix i;
- assume "Ok (s, t, m) = W (Var i) a n";
- thus "$ s a |- Var i :: t"; by (simp split: if_splits);
- next;
- fix e; assume hyp: "?P e";
- assume "Ok (s, t, m) = W (Abs e) a n";
- then; obtain t' where "t = s n -> t'"
- and "Ok (s, t', m) = W e (TVar n # a) (Suc n)";
- by (auto split: bind_splits);
- with hyp; show "$ s a |- Abs e :: t";
- by (force intro: has_type.Abs);
- next;
- fix e1 e2; assume hyp1: "?P e1" and hyp2: "?P e2";
- assume "Ok (s, t, m) = W (App e1 e2) a n";
- then; obtain s1 t1 n1 s2 t2 n2 u where
+ fix i
+ assume "Ok (s, t, m) = W (Var i) a n"
+ thus "$ s a |- Var i :: t" by (simp split: if_splits)
+ next
+ fix e assume hyp: "?P e"
+ assume "Ok (s, t, m) = W (Abs e) a n"
+ then obtain t' where "t = s n -> t'"
+ and "Ok (s, t', m) = W e (TVar n # a) (Suc n)"
+ by (auto split: bind_splits)
+ with hyp show "$ s a |- Abs e :: t"
+ by (force intro: has_type.Abs)
+ next
+ fix e1 e2 assume hyp1: "?P e1" and hyp2: "?P e2"
+ assume "Ok (s, t, m) = W (App e1 e2) a n"
+ then obtain s1 t1 n1 s2 t2 n2 u where
s: "s = $ u o $ s2 o s1"
and t: "t = u n2"
and mgu_ok: "mgu ($ s2 t1) (t2 -> TVar n2) = Ok u"
and W1_ok: "W e1 a n = Ok (s1, t1, n1)"
- and W2_ok: "W e2 ($ s1 a) n1 = Ok (s2, t2, n2)";
- by (auto split: bind_splits);
- show "$ s a |- App e1 e2 :: t";
- proof (rule has_type.App);
- from s; have s': "$ u ($ s2 ($ s1 a)) = $s a";
- by (simp add: subst_comp_tel o_def);
- show "$s a |- e1 :: $ u t2 -> t";
- proof -;
- from hyp1 W1_ok [symmetric]; have "$ s1 a |- e1 :: t1";
- by blast;
- hence "$ u ($ s2 ($ s1 a)) |- e1 :: $ u ($ s2 t1)";
- by (intro has_type_subst_closed);
- with s' t mgu_ok; show ?thesis; by simp;
- qed;
- show "$ s a |- e2 :: $ u t2";
- proof -;
- from hyp2 W2_ok [symmetric];
- have "$ s2 ($ s1 a) |- e2 :: t2"; by blast;
- hence "$ u ($ s2 ($ s1 a)) |- e2 :: $ u t2";
- by (rule has_type_subst_closed);
- with s'; show ?thesis; by simp;
- qed;
- qed;
- };
- qed;
- with W_ok [symmetric]; show ?thesis; by blast;
-qed;
+ and W2_ok: "W e2 ($ s1 a) n1 = Ok (s2, t2, n2)"
+ by (auto split: bind_splits)
+ show "$ s a |- App e1 e2 :: t"
+ proof (rule has_type.App)
+ from s have s': "$ u ($ s2 ($ s1 a)) = $s a"
+ by (simp add: subst_comp_tel o_def)
+ show "$s a |- e1 :: $ u t2 -> t"
+ proof -
+ from hyp1 W1_ok [symmetric] have "$ s1 a |- e1 :: t1"
+ by blast
+ hence "$ u ($ s2 ($ s1 a)) |- e1 :: $ u ($ s2 t1)"
+ by (intro has_type_subst_closed)
+ with s' t mgu_ok show ?thesis by simp
+ qed
+ show "$ s a |- e2 :: $ u t2"
+ proof -
+ from hyp2 W2_ok [symmetric]
+ have "$ s2 ($ s1 a) |- e2 :: t2" by blast
+ hence "$ u ($ s2 ($ s1 a)) |- e2 :: $ u t2"
+ by (rule has_type_subst_closed)
+ with s' show ?thesis by simp
+ qed
+ qed
+ }
+ qed
+ with W_ok [symmetric] show ?thesis by blast
+qed
-end;
+end
--- a/src/HOL/Real/HahnBanach/HahnBanachExtLemmas.thy Sun Sep 17 22:15:08 2000 +0200
+++ b/src/HOL/Real/HahnBanach/HahnBanachExtLemmas.thy Sun Sep 17 22:19:02 2000 +0200
@@ -3,9 +3,9 @@
Author: Gertrud Bauer, TU Munich
*)
-header {* Extending non-maximal functions *};
+header {* Extending non-maximal functions *}
-theory HahnBanachExtLemmas = FunctionNorm:;
+theory HahnBanachExtLemmas = FunctionNorm:
text{* In this section the following context is presumed.
Let $E$ be a real vector space with a
@@ -19,7 +19,7 @@
$h'\ap x = h\ap y + a \cdot \xi$ for a certain $\xi$.
Subsequently we show some properties of this extension $h'$ of $h$.
-*};
+*}
text {* This lemma will be used to show the existence of a linear
@@ -32,211 +32,211 @@
it suffices to show that
\begin{matharray}{l} \All
{u\in F}{\All {v\in F}{a\ap u \leq b \ap v}}
-\end{matharray} *};
+\end{matharray} *}
lemma ex_xi:
"[| is_vectorspace F; !! u v. [| u \<in> F; v \<in> F |] ==> a u <= b v |]
- ==> \<exists>xi::real. \<forall>y \<in> F. a y <= xi \<and> xi <= b y";
-proof -;
- assume vs: "is_vectorspace F";
- assume r: "(!! u v. [| u \<in> F; v \<in> F |] ==> a u <= (b v::real))";
+ ==> \<exists>xi::real. \<forall>y \<in> F. a y <= xi \<and> xi <= b y"
+proof -
+ assume vs: "is_vectorspace F"
+ assume r: "(!! u v. [| u \<in> F; v \<in> F |] ==> a u <= (b v::real))"
txt {* From the completeness of the reals follows:
The set $S = \{a\: u\dt\: u\in F\}$ has a supremum, if
- it is non-empty and has an upper bound. *};
+ it is non-empty and has an upper bound. *}
- let ?S = "{a u :: real | u. u \<in> F}";
+ let ?S = "{a u :: real | u. u \<in> F}"
- have "\<exists>xi. isLub UNIV ?S xi";
- proof (rule reals_complete);
+ have "\<exists>xi. isLub UNIV ?S xi"
+ proof (rule reals_complete)
- txt {* The set $S$ is non-empty, since $a\ap\zero \in S$: *};
+ txt {* The set $S$ is non-empty, since $a\ap\zero \in S$: *}
- from vs; have "a 0 \<in> ?S"; by force;
- thus "\<exists>X. X \<in> ?S"; ..;
+ from vs have "a 0 \<in> ?S" by force
+ thus "\<exists>X. X \<in> ?S" ..
- txt {* $b\ap \zero$ is an upper bound of $S$: *};
+ txt {* $b\ap \zero$ is an upper bound of $S$: *}
- show "\<exists>Y. isUb UNIV ?S Y";
- proof;
- show "isUb UNIV ?S (b 0)";
- proof (intro isUbI setleI ballI);
- show "b 0 \<in> UNIV"; ..;
- next;
+ show "\<exists>Y. isUb UNIV ?S Y"
+ proof
+ show "isUb UNIV ?S (b 0)"
+ proof (intro isUbI setleI ballI)
+ show "b 0 \<in> UNIV" ..
+ next
- txt {* Every element $y\in S$ is less than $b\ap \zero$: *};
+ txt {* Every element $y\in S$ is less than $b\ap \zero$: *}
- fix y; assume y: "y \<in> ?S";
- from y; have "\<exists>u \<in> F. y = a u"; by fast;
- thus "y <= b 0";
- proof;
- fix u; assume "u \<in> F";
- assume "y = a u";
- also; have "a u <= b 0"; by (rule r) (simp!)+;
- finally; show ?thesis; .;
- qed;
- qed;
- qed;
- qed;
+ fix y assume y: "y \<in> ?S"
+ from y have "\<exists>u \<in> F. y = a u" by fast
+ thus "y <= b 0"
+ proof
+ fix u assume "u \<in> F"
+ assume "y = a u"
+ also have "a u <= b 0" by (rule r) (simp!)+
+ finally show ?thesis .
+ qed
+ qed
+ qed
+ qed
- thus "\<exists>xi. \<forall>y \<in> F. a y <= xi \<and> xi <= b y";
- proof (elim exE);
- fix xi; assume "isLub UNIV ?S xi";
- show ?thesis;
- proof (intro exI conjI ballI);
+ thus "\<exists>xi. \<forall>y \<in> F. a y <= xi \<and> xi <= b y"
+ proof (elim exE)
+ fix xi assume "isLub UNIV ?S xi"
+ show ?thesis
+ proof (intro exI conjI ballI)
- txt {* For all $y\in F$ holds $a\ap y \leq \xi$: *};
+ txt {* For all $y\in F$ holds $a\ap y \leq \xi$: *}
- fix y; assume y: "y \<in> F";
- show "a y <= xi";
- proof (rule isUbD);
- show "isUb UNIV ?S xi"; ..;
- qed (force!);
- next;
+ fix y assume y: "y \<in> F"
+ show "a y <= xi"
+ proof (rule isUbD)
+ show "isUb UNIV ?S xi" ..
+ qed (force!)
+ next
- txt {* For all $y\in F$ holds $\xi\leq b\ap y$: *};
+ txt {* For all $y\in F$ holds $\xi\leq b\ap y$: *}
- fix y; assume "y \<in> F";
- show "xi <= b y";
- proof (intro isLub_le_isUb isUbI setleI);
- show "b y \<in> UNIV"; ..;
- show "\<forall>ya \<in> ?S. ya <= b y";
- proof;
- fix au; assume au: "au \<in> ?S ";
- hence "\<exists>u \<in> F. au = a u"; by fast;
- thus "au <= b y";
- proof;
- fix u; assume "u \<in> F"; assume "au = a u";
- also; have "... <= b y"; by (rule r);
- finally; show ?thesis; .;
- qed;
- qed;
- qed;
- qed;
- qed;
-qed;
+ fix y assume "y \<in> F"
+ show "xi <= b y"
+ proof (intro isLub_le_isUb isUbI setleI)
+ show "b y \<in> UNIV" ..
+ show "\<forall>ya \<in> ?S. ya <= b y"
+ proof
+ fix au assume au: "au \<in> ?S "
+ hence "\<exists>u \<in> F. au = a u" by fast
+ thus "au <= b y"
+ proof
+ fix u assume "u \<in> F" assume "au = a u"
+ also have "... <= b y" by (rule r)
+ finally show ?thesis .
+ qed
+ qed
+ qed
+ qed
+ qed
+qed
text{* \medskip The function $h'$ is defined as a
$h'\ap x = h\ap y + a\cdot \xi$ where $x = y + a\mult \xi$
-is a linear extension of $h$ to $H'$. *};
+is a linear extension of $h$ to $H'$. *}
lemma h'_lf:
"[| h' == (\<lambda>x. let (y, a) = SOME (y, a). x = y + a \<cdot> x0 \<and> y \<in> H
in h y + a * xi);
H' == H + lin x0; is_subspace H E; is_linearform H h; x0 \<notin> H;
x0 \<in> E; x0 \<noteq> 0; is_vectorspace E |]
- ==> is_linearform H' h'";
-proof -;
+ ==> is_linearform H' h'"
+proof -
assume h'_def:
"h' == (\<lambda>x. let (y, a) = SOME (y, a). x = y + a \<cdot> x0 \<and> y \<in> H
in h y + a * xi)"
and H'_def: "H' == H + lin x0"
and vs: "is_subspace H E" "is_linearform H h" "x0 \<notin> H"
- "x0 \<noteq> 0" "x0 \<in> E" "is_vectorspace E";
+ "x0 \<noteq> 0" "x0 \<in> E" "is_vectorspace E"
- have h': "is_vectorspace H'";
- proof (unfold H'_def, rule vs_sum_vs);
- show "is_subspace (lin x0) E"; ..;
- qed;
+ have h': "is_vectorspace H'"
+ proof (unfold H'_def, rule vs_sum_vs)
+ show "is_subspace (lin x0) E" ..
+ qed
- show ?thesis;
- proof;
- fix x1 x2; assume x1: "x1 \<in> H'" and x2: "x2 \<in> H'";
+ show ?thesis
+ proof
+ fix x1 x2 assume x1: "x1 \<in> H'" and x2: "x2 \<in> H'"
txt{* We now have to show that $h'$ is additive, i.~e.\
$h' \ap (x_1\plus x_2) = h'\ap x_1 + h'\ap x_2$
- for $x_1, x_2\in H$. *};
+ for $x_1, x_2\in H$. *}
- have x1x2: "x1 + x2 \<in> H'";
- by (rule vs_add_closed, rule h');
- from x1;
- have ex_x1: "\<exists>y1 a1. x1 = y1 + a1 \<cdot> x0 \<and> y1 \<in> H";
- by (unfold H'_def vs_sum_def lin_def) fast;
- from x2;
- have ex_x2: "\<exists>y2 a2. x2 = y2 + a2 \<cdot> x0 \<and> y2 \<in> H";
- by (unfold H'_def vs_sum_def lin_def) fast;
- from x1x2;
- have ex_x1x2: "\<exists>y a. x1 + x2 = y + a \<cdot> x0 \<and> y \<in> H";
- by (unfold H'_def vs_sum_def lin_def) fast;
+ have x1x2: "x1 + x2 \<in> H'"
+ by (rule vs_add_closed, rule h')
+ from x1
+ have ex_x1: "\<exists>y1 a1. x1 = y1 + a1 \<cdot> x0 \<and> y1 \<in> H"
+ by (unfold H'_def vs_sum_def lin_def) fast
+ from x2
+ have ex_x2: "\<exists>y2 a2. x2 = y2 + a2 \<cdot> x0 \<and> y2 \<in> H"
+ by (unfold H'_def vs_sum_def lin_def) fast
+ from x1x2
+ have ex_x1x2: "\<exists>y a. x1 + x2 = y + a \<cdot> x0 \<and> y \<in> H"
+ by (unfold H'_def vs_sum_def lin_def) fast
- from ex_x1 ex_x2 ex_x1x2;
- show "h' (x1 + x2) = h' x1 + h' x2";
- proof (elim exE conjE);
- fix y1 y2 y a1 a2 a;
+ from ex_x1 ex_x2 ex_x1x2
+ show "h' (x1 + x2) = h' x1 + h' x2"
+ proof (elim exE conjE)
+ fix y1 y2 y a1 a2 a
assume y1: "x1 = y1 + a1 \<cdot> x0" and y1': "y1 \<in> H"
and y2: "x2 = y2 + a2 \<cdot> x0" and y2': "y2 \<in> H"
- and y: "x1 + x2 = y + a \<cdot> x0" and y': "y \<in> H";
+ and y: "x1 + x2 = y + a \<cdot> x0" and y': "y \<in> H"
txt {* \label{decomp-H-use}*}
- have ya: "y1 + y2 = y \<and> a1 + a2 = a";
+ have ya: "y1 + y2 = y \<and> a1 + a2 = a"
proof (rule decomp_H')
- show "y1 + y2 + (a1 + a2) \<cdot> x0 = y + a \<cdot> x0";
- by (simp! add: vs_add_mult_distrib2 [of E]);
- show "y1 + y2 \<in> H"; ..;
- qed;
+ show "y1 + y2 + (a1 + a2) \<cdot> x0 = y + a \<cdot> x0"
+ by (simp! add: vs_add_mult_distrib2 [of E])
+ show "y1 + y2 \<in> H" ..
+ qed
- have "h' (x1 + x2) = h y + a * xi";
- by (rule h'_definite);
- also; have "... = h (y1 + y2) + (a1 + a2) * xi";
- by (simp add: ya);
- also; from vs y1' y2';
- have "... = h y1 + h y2 + a1 * xi + a2 * xi";
+ have "h' (x1 + x2) = h y + a * xi"
+ by (rule h'_definite)
+ also have "... = h (y1 + y2) + (a1 + a2) * xi"
+ by (simp add: ya)
+ also from vs y1' y2'
+ have "... = h y1 + h y2 + a1 * xi + a2 * xi"
by (simp add: linearform_add [of H]
- real_add_mult_distrib);
- also; have "... = (h y1 + a1 * xi) + (h y2 + a2 * xi)";
- by simp;
- also; have "h y1 + a1 * xi = h' x1";
- by (rule h'_definite [symmetric]);
- also; have "h y2 + a2 * xi = h' x2";
- by (rule h'_definite [symmetric]);
- finally; show ?thesis; .;
- qed;
+ real_add_mult_distrib)
+ also have "... = (h y1 + a1 * xi) + (h y2 + a2 * xi)"
+ by simp
+ also have "h y1 + a1 * xi = h' x1"
+ by (rule h'_definite [symmetric])
+ also have "h y2 + a2 * xi = h' x2"
+ by (rule h'_definite [symmetric])
+ finally show ?thesis .
+ qed
txt{* We further have to show that $h'$ is multiplicative,
i.~e.\ $h'\ap (c \mult x_1) = c \cdot h'\ap x_1$
for $x\in H$ and $c\in \bbbR$.
- *};
+ *}
- next;
- fix c x1; assume x1: "x1 \<in> H'";
- have ax1: "c \<cdot> x1 \<in> H'";
- by (rule vs_mult_closed, rule h');
- from x1;
- have ex_x: "!! x. x\<in> H' ==> \<exists>y a. x = y + a \<cdot> x0 \<and> y \<in> H";
- by (unfold H'_def vs_sum_def lin_def) fast;
+ next
+ fix c x1 assume x1: "x1 \<in> H'"
+ have ax1: "c \<cdot> x1 \<in> H'"
+ by (rule vs_mult_closed, rule h')
+ from x1
+ have ex_x: "!! x. x\<in> H' ==> \<exists>y a. x = y + a \<cdot> x0 \<and> y \<in> H"
+ by (unfold H'_def vs_sum_def lin_def) fast
- from x1; have ex_x1: "\<exists>y1 a1. x1 = y1 + a1 \<cdot> x0 \<and> y1 \<in> H";
- by (unfold H'_def vs_sum_def lin_def) fast;
- with ex_x [of "c \<cdot> x1", OF ax1];
- show "h' (c \<cdot> x1) = c * (h' x1)";
- proof (elim exE conjE);
- fix y1 y a1 a;
+ from x1 have ex_x1: "\<exists>y1 a1. x1 = y1 + a1 \<cdot> x0 \<and> y1 \<in> H"
+ by (unfold H'_def vs_sum_def lin_def) fast
+ with ex_x [of "c \<cdot> x1", OF ax1]
+ show "h' (c \<cdot> x1) = c * (h' x1)"
+ proof (elim exE conjE)
+ fix y1 y a1 a
assume y1: "x1 = y1 + a1 \<cdot> x0" and y1': "y1 \<in> H"
- and y: "c \<cdot> x1 = y + a \<cdot> x0" and y': "y \<in> H";
+ and y: "c \<cdot> x1 = y + a \<cdot> x0" and y': "y \<in> H"
- have ya: "c \<cdot> y1 = y \<and> c * a1 = a";
- proof (rule decomp_H');
- show "c \<cdot> y1 + (c * a1) \<cdot> x0 = y + a \<cdot> x0";
- by (simp! add: vs_add_mult_distrib1);
- show "c \<cdot> y1 \<in> H"; ..;
- qed;
+ have ya: "c \<cdot> y1 = y \<and> c * a1 = a"
+ proof (rule decomp_H')
+ show "c \<cdot> y1 + (c * a1) \<cdot> x0 = y + a \<cdot> x0"
+ by (simp! add: vs_add_mult_distrib1)
+ show "c \<cdot> y1 \<in> H" ..
+ qed
- have "h' (c \<cdot> x1) = h y + a * xi";
- by (rule h'_definite);
- also; have "... = h (c \<cdot> y1) + (c * a1) * xi";
- by (simp add: ya);
- also; from vs y1'; have "... = c * h y1 + c * a1 * xi";
- by (simp add: linearform_mult [of H]);
- also; from vs y1'; have "... = c * (h y1 + a1 * xi)";
- by (simp add: real_add_mult_distrib2 real_mult_assoc);
- also; have "h y1 + a1 * xi = h' x1";
- by (rule h'_definite [symmetric]);
- finally; show ?thesis; .;
- qed;
- qed;
-qed;
+ have "h' (c \<cdot> x1) = h y + a * xi"
+ by (rule h'_definite)
+ also have "... = h (c \<cdot> y1) + (c * a1) * xi"
+ by (simp add: ya)
+ also from vs y1' have "... = c * h y1 + c * a1 * xi"
+ by (simp add: linearform_mult [of H])
+ also from vs y1' have "... = c * (h y1 + a1 * xi)"
+ by (simp add: real_add_mult_distrib2 real_mult_assoc)
+ also have "h y1 + a1 * xi = h' x1"
+ by (rule h'_definite [symmetric])
+ finally show ?thesis .
+ qed
+ qed
+qed
text{* \medskip The linear extension $h'$ of $h$
-is bounded by the seminorm $p$. *};
+is bounded by the seminorm $p$. *}
lemma h'_norm_pres:
"[| h' == (\<lambda>x. let (y, a) = SOME (y, a). x = y + a \<cdot> x0 \<and> y \<in> H
@@ -245,100 +245,100 @@
is_subspace H E; is_seminorm E p; is_linearform H h;
\<forall>y \<in> H. h y <= p y;
\<forall>y \<in> H. - p (y + x0) - h y <= xi \<and> xi <= p (y + x0) - h y |]
- ==> \<forall>x \<in> H'. h' x <= p x";
-proof;
+ ==> \<forall>x \<in> H'. h' x <= p x"
+proof
assume h'_def:
"h' == (\<lambda>x. let (y, a) = SOME (y, a). x = y + a \<cdot> x0 \<and> y \<in> H
in (h y) + a * xi)"
and H'_def: "H' == H + lin x0"
and vs: "x0 \<notin> H" "x0 \<in> E" "x0 \<noteq> 0" "is_vectorspace E"
"is_subspace H E" "is_seminorm E p" "is_linearform H h"
- and a: "\<forall>y \<in> H. h y <= p y";
- presume a1: "\<forall>ya \<in> H. - p (ya + x0) - h ya <= xi";
- presume a2: "\<forall>ya \<in> H. xi <= p (ya + x0) - h ya";
- fix x; assume "x \<in> H'";
+ and a: "\<forall>y \<in> H. h y <= p y"
+ presume a1: "\<forall>ya \<in> H. - p (ya + x0) - h ya <= xi"
+ presume a2: "\<forall>ya \<in> H. xi <= p (ya + x0) - h ya"
+ fix x assume "x \<in> H'"
have ex_x:
- "!! x. x \<in> H' ==> \<exists>y a. x = y + a \<cdot> x0 \<and> y \<in> H";
- by (unfold H'_def vs_sum_def lin_def) fast;
- have "\<exists>y a. x = y + a \<cdot> x0 \<and> y \<in> H";
- by (rule ex_x);
- thus "h' x <= p x";
- proof (elim exE conjE);
- fix y a; assume x: "x = y + a \<cdot> x0" and y: "y \<in> H";
- have "h' x = h y + a * xi";
- by (rule h'_definite);
+ "!! x. x \<in> H' ==> \<exists>y a. x = y + a \<cdot> x0 \<and> y \<in> H"
+ by (unfold H'_def vs_sum_def lin_def) fast
+ have "\<exists>y a. x = y + a \<cdot> x0 \<and> y \<in> H"
+ by (rule ex_x)
+ thus "h' x <= p x"
+ proof (elim exE conjE)
+ fix y a assume x: "x = y + a \<cdot> x0" and y: "y \<in> H"
+ have "h' x = h y + a * xi"
+ by (rule h'_definite)
txt{* Now we show
$h\ap y + a \cdot \xi\leq p\ap (y\plus a \mult x_0)$
- by case analysis on $a$. *};
+ by case analysis on $a$. *}
- also; have "... <= p (y + a \<cdot> x0)";
- proof (rule linorder_cases);
+ also have "... <= p (y + a \<cdot> x0)"
+ proof (rule linorder_cases)
- assume z: "a = #0";
- with vs y a; show ?thesis; by simp;
+ assume z: "a = #0"
+ with vs y a show ?thesis by simp
txt {* In the case $a < 0$, we use $a_1$ with $\idt{ya}$
- taken as $y/a$: *};
+ taken as $y/a$: *}
- next;
- assume lz: "a < #0"; hence nz: "a \<noteq> #0"; by simp;
- from a1;
- have "- p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y) <= xi";
- by (rule bspec) (simp!);
+ next
+ assume lz: "a < #0" hence nz: "a \<noteq> #0" by simp
+ from a1
+ have "- p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y) <= xi"
+ by (rule bspec) (simp!)
txt {* The thesis for this case now follows by a short
- calculation. *};
- hence "a * xi <= a * (- p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y))";
- by (rule real_mult_less_le_anti [OF lz]);
- also;
- have "... = - a * (p (rinv a \<cdot> y + x0)) - a * (h (rinv a \<cdot> y))";
- by (rule real_mult_diff_distrib);
- also; from lz vs y;
- have "- a * (p (rinv a \<cdot> y + x0)) = p (a \<cdot> (rinv a \<cdot> y + x0))";
- by (simp add: seminorm_abs_homogenous abs_minus_eqI2);
- also; from nz vs y; have "... = p (y + a \<cdot> x0)";
- by (simp add: vs_add_mult_distrib1);
- also; from nz vs y; have "a * (h (rinv a \<cdot> y)) = h y";
- by (simp add: linearform_mult [symmetric]);
- finally; have "a * xi <= p (y + a \<cdot> x0) - h y"; .;
+ calculation. *}
+ hence "a * xi <= a * (- p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y))"
+ by (rule real_mult_less_le_anti [OF lz])
+ also
+ have "... = - a * (p (rinv a \<cdot> y + x0)) - a * (h (rinv a \<cdot> y))"
+ by (rule real_mult_diff_distrib)
+ also from lz vs y
+ have "- a * (p (rinv a \<cdot> y + x0)) = p (a \<cdot> (rinv a \<cdot> y + x0))"
+ by (simp add: seminorm_abs_homogenous abs_minus_eqI2)
+ also from nz vs y have "... = p (y + a \<cdot> x0)"
+ by (simp add: vs_add_mult_distrib1)
+ also from nz vs y have "a * (h (rinv a \<cdot> y)) = h y"
+ by (simp add: linearform_mult [symmetric])
+ finally have "a * xi <= p (y + a \<cdot> x0) - h y" .
- hence "h y + a * xi <= h y + p (y + a \<cdot> x0) - h y";
- by (simp add: real_add_left_cancel_le);
- thus ?thesis; by simp;
+ hence "h y + a * xi <= h y + p (y + a \<cdot> x0) - h y"
+ by (simp add: real_add_left_cancel_le)
+ thus ?thesis by simp
txt {* In the case $a > 0$, we use $a_2$ with $\idt{ya}$
- taken as $y/a$: *};
+ taken as $y/a$: *}
- next;
- assume gz: "#0 < a"; hence nz: "a \<noteq> #0"; by simp;
- from a2; have "xi <= p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y)";
- by (rule bspec) (simp!);
+ next
+ assume gz: "#0 < a" hence nz: "a \<noteq> #0" by simp
+ from a2 have "xi <= p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y)"
+ by (rule bspec) (simp!)
txt {* The thesis for this case follows by a short
- calculation: *};
+ calculation: *}
- with gz;
- have "a * xi <= a * (p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y))";
- by (rule real_mult_less_le_mono);
- also; have "... = a * p (rinv a \<cdot> y + x0) - a * h (rinv a \<cdot> y)";
- by (rule real_mult_diff_distrib2);
- also; from gz vs y;
- have "a * p (rinv a \<cdot> y + x0) = p (a \<cdot> (rinv a \<cdot> y + x0))";
- by (simp add: seminorm_abs_homogenous abs_eqI2);
- also; from nz vs y; have "... = p (y + a \<cdot> x0)";
- by (simp add: vs_add_mult_distrib1);
- also; from nz vs y; have "a * h (rinv a \<cdot> y) = h y";
- by (simp add: linearform_mult [symmetric]);
- finally; have "a * xi <= p (y + a \<cdot> x0) - h y"; .;
+ with gz
+ have "a * xi <= a * (p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y))"
+ by (rule real_mult_less_le_mono)
+ also have "... = a * p (rinv a \<cdot> y + x0) - a * h (rinv a \<cdot> y)"
+ by (rule real_mult_diff_distrib2)
+ also from gz vs y
+ have "a * p (rinv a \<cdot> y + x0) = p (a \<cdot> (rinv a \<cdot> y + x0))"
+ by (simp add: seminorm_abs_homogenous abs_eqI2)
+ also from nz vs y have "... = p (y + a \<cdot> x0)"
+ by (simp add: vs_add_mult_distrib1)
+ also from nz vs y have "a * h (rinv a \<cdot> y) = h y"
+ by (simp add: linearform_mult [symmetric])
+ finally have "a * xi <= p (y + a \<cdot> x0) - h y" .
- hence "h y + a * xi <= h y + (p (y + a \<cdot> x0) - h y)";
- by (simp add: real_add_left_cancel_le);
- thus ?thesis; by simp;
- qed;
- also; from x; have "... = p x"; by simp;
- finally; show ?thesis; .;
- qed;
-qed blast+;
+ hence "h y + a * xi <= h y + (p (y + a \<cdot> x0) - h y)"
+ by (simp add: real_add_left_cancel_le)
+ thus ?thesis by simp
+ qed
+ also from x have "... = p x" by simp
+ finally show ?thesis .
+ qed
+qed blast+
-end;
+end