--- a/doc-src/TutorialI/CTL/PDL.thy Fri Jan 04 19:23:28 2002 +0100
+++ b/doc-src/TutorialI/CTL/PDL.thy Fri Jan 04 19:24:43 2002 +0100
@@ -38,7 +38,7 @@
"s \<Turnstile> Neg f = (\<not>(s \<Turnstile> f))"
"s \<Turnstile> And f g = (s \<Turnstile> f \<and> s \<Turnstile> g)"
"s \<Turnstile> AX f = (\<forall>t. (s,t) \<in> M \<longrightarrow> t \<Turnstile> f)"
-"s \<Turnstile> EF f = (\<exists>t. (s,t) \<in> M\<^sup>* \<and> t \<Turnstile> f)";
+"s \<Turnstile> EF f = (\<exists>t. (s,t) \<in> M\<^sup>* \<and> t \<Turnstile> f)"
text{*\noindent
The first three equations should be self-explanatory. The temporal formula
@@ -51,7 +51,7 @@
states where the formula is true. It too is defined by recursion over the syntax:
*}
-consts mc :: "formula \<Rightarrow> state set";
+consts mc :: "formula \<Rightarrow> state set"
primrec
"mc(Atom a) = {s. a \<in> L s}"
"mc(Neg f) = -mc f"
@@ -92,8 +92,8 @@
includes the other; the inclusion is shown pointwise:
*}
-apply(rule equalityI);
- apply(rule subsetI);
+apply(rule equalityI)
+ apply(rule subsetI)
apply(simp)(*<*)apply(rename_tac s)(*>*)
txt{*\noindent
@@ -118,7 +118,7 @@
\isa{M\isactrlsup {\isacharasterisk}}.
*}
- apply(blast intro: rtrancl_trans);
+ apply(blast intro: rtrancl_trans)
txt{*
We now return to the second set inclusion subgoal, which is again proved
@@ -172,10 +172,10 @@
@{text auto} augmented with the lemma as a simplification rule.
*}
-theorem "mc f = {s. s \<Turnstile> f}";
-apply(induct_tac f);
-apply(auto simp add:EF_lemma);
-done;
+theorem "mc f = {s. s \<Turnstile> f}"
+apply(induct_tac f)
+apply(auto simp add:EF_lemma)
+done
text{*
\begin{exercise}
@@ -193,19 +193,19 @@
\index{PDL|)}
*}
(*<*)
-theorem main: "mc f = {s. s \<Turnstile> f}";
-apply(induct_tac f);
-apply(auto simp add:EF_lemma);
-done;
+theorem main: "mc f = {s. s \<Turnstile> f}"
+apply(induct_tac f)
+apply(auto simp add: EF_lemma)
+done
-lemma aux: "s \<Turnstile> f = (s : mc f)";
-apply(simp add:main);
-done;
+lemma aux: "s \<Turnstile> f = (s : mc f)"
+apply(simp add: main)
+done
-lemma "(s \<Turnstile> EF f) = (s \<Turnstile> f | s \<Turnstile> Neg(AX(Neg(EF f))))";
-apply(simp only:aux);
-apply(simp);
-apply(subst lfp_unfold[OF mono_ef], fast);
+lemma "(s \<Turnstile> EF f) = (s \<Turnstile> f | s \<Turnstile> Neg(AX(Neg(EF f))))"
+apply(simp only: aux)
+apply(simp)
+apply(subst lfp_unfold[OF mono_ef], fast)
done
end
--- a/doc-src/TutorialI/Datatype/Fundata.thy Fri Jan 04 19:23:28 2002 +0100
+++ b/doc-src/TutorialI/Datatype/Fundata.thy Fri Jan 04 19:24:43 2002 +0100
@@ -11,7 +11,7 @@
@{typ"nat"}, we have an infinitely branching tree because each node
has as many subtrees as there are natural numbers. How can we possibly
write down such a tree? Using functional notation! For example, the term
-@{term[display]"Br 0 (%i. Br i (%n. Tip))"}
+@{term[display]"Br (0::nat) (\<lambda>i. Br i (\<lambda>n. Tip))"}
of type @{typ"(nat,nat)bigtree"} is the tree whose
root is labeled with 0 and whose $i$th subtree is labeled with $i$ and
has merely @{term"Tip"}s as further subtrees.
--- a/doc-src/TutorialI/Misc/simp.thy Fri Jan 04 19:23:28 2002 +0100
+++ b/doc-src/TutorialI/Misc/simp.thy Fri Jan 04 19:24:43 2002 +0100
@@ -97,8 +97,8 @@
as simplification rules and are simplified themselves. For example:
*}
-lemma "\<lbrakk> xs @ zs = ys @ xs; [] @ xs = [] @ [] \<rbrakk> \<Longrightarrow> ys = zs";
-apply simp;
+lemma "\<lbrakk> xs @ zs = ys @ xs; [] @ xs = [] @ [] \<rbrakk> \<Longrightarrow> ys = zs"
+apply simp
done
text{*\noindent
@@ -109,7 +109,7 @@
In some cases, using the assumptions can lead to nontermination:
*}
-lemma "\<forall>x. f x = g (f (g x)) \<Longrightarrow> f [] = f [] @ []";
+lemma "\<forall>x. f x = g (f (g x)) \<Longrightarrow> f [] = f [] @ []"
txt{*\noindent
An unmodified application of @{text"simp"} loops. The culprit is the
@@ -119,7 +119,7 @@
telling the simplifier to ignore the assumptions:
*}
-apply(simp (no_asm));
+apply(simp (no_asm))
done
text{*\noindent
@@ -165,26 +165,26 @@
For example, given *}
constdefs xor :: "bool \<Rightarrow> bool \<Rightarrow> bool"
- "xor A B \<equiv> (A \<and> \<not>B) \<or> (\<not>A \<and> B)";
+ "xor A B \<equiv> (A \<and> \<not>B) \<or> (\<not>A \<and> B)"
text{*\noindent
we may want to prove
*}
-lemma "xor A (\<not>A)";
+lemma "xor A (\<not>A)"
txt{*\noindent
Typically, we begin by unfolding some definitions:
\indexbold{definitions!unfolding}
*}
-apply(simp only:xor_def);
+apply(simp only: xor_def)
txt{*\noindent
In this particular case, the resulting goal
@{subgoals[display,indent=0]}
can be proved by simplification. Thus we could have proved the lemma outright by
-*}(*<*)oops;lemma "xor A (\<not>A)";(*>*)
+*}(*<*)oops lemma "xor A (\<not>A)"(*>*)
apply(simp add: xor_def)
(*<*)done(*>*)
text{*\noindent
@@ -213,8 +213,8 @@
the predefined constant @{term"Let"}, expanding @{text"let"}-constructs
means rewriting with \tdx{Let_def}: *}
-lemma "(let xs = [] in xs@ys@xs) = ys";
-apply(simp add: Let_def);
+lemma "(let xs = [] in xs@ys@xs) = ys"
+apply(simp add: Let_def)
done
text{*
@@ -232,8 +232,8 @@
accepts \emph{conditional} equations, for example
*}
-lemma hd_Cons_tl[simp]: "xs \<noteq> [] \<Longrightarrow> hd xs # tl xs = xs";
-apply(case_tac xs, simp, simp);
+lemma hd_Cons_tl[simp]: "xs \<noteq> [] \<Longrightarrow> hd xs # tl xs = xs"
+apply(case_tac xs, simp, simp)
done
text{*\noindent
@@ -244,9 +244,9 @@
the lemma below is proved by plain simplification:
*}
-lemma "xs \<noteq> [] \<Longrightarrow> hd(rev xs) # tl(rev xs) = rev xs";
+lemma "xs \<noteq> [] \<Longrightarrow> hd(rev xs) # tl(rev xs) = rev xs"
(*<*)
-by(simp);
+by(simp)
(*>*)
text{*\noindent
The conditional equation @{thm[source]hd_Cons_tl} above
@@ -265,7 +265,7 @@
distinction on the boolean condition. Here is an example:
*}
-lemma "\<forall>xs. if xs = [] then rev xs = [] else rev xs \<noteq> []";
+lemma "\<forall>xs. if xs = [] then rev xs = [] else rev xs \<noteq> []"
txt{*\noindent
The goal can be split by a special method, \methdx{split}:
@@ -284,8 +284,8 @@
This splitting idea generalizes from @{text"if"} to \sdx{case}.
Let us simplify a case analysis over lists:\index{*list.split (theorem)}
*}(*<*)by simp(*>*)
-lemma "(case xs of [] \<Rightarrow> zs | y#ys \<Rightarrow> y#(ys@zs)) = xs@zs";
-apply(split list.split);
+lemma "(case xs of [] \<Rightarrow> zs | y#ys \<Rightarrow> y#(ys@zs)) = xs@zs"
+apply(split list.split)
txt{*
@{subgoals[display,indent=0]}
@@ -297,10 +297,10 @@
for adding splitting rules explicitly. The
lemma above can be proved in one step by
*}
-(*<*)oops;
-lemma "(case xs of [] \<Rightarrow> zs | y#ys \<Rightarrow> y#(ys@zs)) = xs@zs";
+(*<*)oops
+lemma "(case xs of [] \<Rightarrow> zs | y#ys \<Rightarrow> y#(ys@zs)) = xs@zs"
(*>*)
-apply(simp split: list.split);
+apply(simp split: list.split)
(*<*)done(*>*)
text{*\noindent
whereas \isacommand{apply}@{text"(simp)"} alone will not succeed.
@@ -317,11 +317,11 @@
either locally
*}
(*<*)
-lemma "dummy=dummy";
+lemma "dummy=dummy"
(*>*)
-apply(simp split del: split_if);
+apply(simp split del: split_if)
(*<*)
-oops;
+oops
(*>*)
text{*\noindent
or globally:
@@ -373,9 +373,9 @@
on:
*}
-ML "set trace_simp";
-lemma "rev [a] = []";
-apply(simp);
+ML "set trace_simp"
+lemma "rev [a] = []"
+apply(simp)
(*<*)oops(*>*)
text{*\noindent
@@ -411,7 +411,7 @@
rules. Thus it is advisable to reset it:
*}
-ML "reset trace_simp";
+ML "reset trace_simp"
(*<*)
end
--- a/doc-src/TutorialI/Overview/FP1.thy Fri Jan 04 19:23:28 2002 +0100
+++ b/doc-src/TutorialI/Overview/FP1.thy Fri Jan 04 19:24:43 2002 +0100
@@ -23,15 +23,15 @@
primrec "sum 0 = 0"
"sum (Suc n) = Suc n + sum n"
-lemma "sum n + sum n = n*(Suc n)";
-apply(induct_tac n);
-apply(auto);
+lemma "sum n + sum n = n*(Suc n)"
+apply(induct_tac n)
+apply(auto)
done
lemma "\<lbrakk> \<not> m < n; m < n+1 \<rbrakk> \<Longrightarrow> m = n"
by(auto)
-lemma "min i (max j k) = max (min k i) (min i (j::nat))";
+lemma "min i (max j k) = max (min k i) (min i (j::nat))"
by(arith)
lemma "n*n = n \<Longrightarrow> n=0 \<or> n=1"
@@ -91,19 +91,19 @@
subsubsection{*Assumptions*}
-lemma "\<lbrakk> xs @ zs = ys @ xs; [] @ xs = [] @ [] \<rbrakk> \<Longrightarrow> ys = zs";
-apply simp;
+lemma "\<lbrakk> xs @ zs = ys @ xs; [] @ xs = [] @ [] \<rbrakk> \<Longrightarrow> ys = zs"
+apply simp
done
-lemma "\<forall>x. f x = g (f (g x)) \<Longrightarrow> f [] = f [] @ []";
-apply(simp (no_asm));
+lemma "\<forall>x. f x = g (f (g x)) \<Longrightarrow> f [] = f [] @ []"
+apply(simp (no_asm))
done
subsubsection{*Rewriting with Definitions*}
-lemma "xor A (\<not>A)";
-apply(simp only:xor_def);
+lemma "xor A (\<not>A)"
+apply(simp only: xor_def)
by simp
@@ -119,11 +119,11 @@
subsubsection{*Automatic Case Splits*}
-lemma "\<forall>xs. if xs = [] then A else B";
+lemma "\<forall>xs. if xs = [] then A else B"
apply simp
oops
-lemma "case xs @ [] of [] \<Rightarrow> P | y#ys \<Rightarrow> Q ys y";
+lemma "case xs @ [] of [] \<Rightarrow> P | y#ys \<Rightarrow> Q ys y"
apply simp
apply(simp split: list.split)
oops
@@ -134,7 +134,7 @@
lemma "\<lbrakk> \<not> m < n; m < n+1 \<rbrakk> \<Longrightarrow> m = n"
by simp
-lemma "\<not> m < n \<and> m < n+1 \<Longrightarrow> m = n";
+lemma "\<not> m < n \<and> m < n+1 \<Longrightarrow> m = n"
apply simp
by(arith)
@@ -152,43 +152,43 @@
subsubsection{*Expressions*}
-types 'v binop = "'v \<Rightarrow> 'v \<Rightarrow> 'v";
+types 'v binop = "'v \<Rightarrow> 'v \<Rightarrow> 'v"
datatype ('a,'v)expr = Cex 'v
| Vex 'a
- | Bex "'v binop" "('a,'v)expr" "('a,'v)expr";
+ | Bex "'v binop" "('a,'v)expr" "('a,'v)expr"
-consts value :: "('a,'v)expr \<Rightarrow> ('a \<Rightarrow> 'v) \<Rightarrow> 'v";
+consts value :: "('a,'v)expr \<Rightarrow> ('a \<Rightarrow> 'v) \<Rightarrow> 'v"
primrec
"value (Cex v) env = v"
"value (Vex a) env = env a"
-"value (Bex f e1 e2) env = f (value e1 env) (value e2 env)";
+"value (Bex f e1 e2) env = f (value e1 env) (value e2 env)"
subsubsection{*The Stack Machine*}
datatype ('a,'v) instr = Const 'v
| Load 'a
- | Apply "'v binop";
+ | Apply "'v binop"
-consts exec :: "('a,'v)instr list \<Rightarrow> ('a\<Rightarrow>'v) \<Rightarrow> 'v list \<Rightarrow> 'v list";
+consts exec :: "('a,'v)instr list \<Rightarrow> ('a\<Rightarrow>'v) \<Rightarrow> 'v list \<Rightarrow> 'v list"
primrec
"exec [] s vs = vs"
"exec (i#is) s vs = (case i of
Const v \<Rightarrow> exec is s (v#vs)
| Load a \<Rightarrow> exec is s ((s a)#vs)
- | Apply f \<Rightarrow> exec is s ((f (hd vs) (hd(tl vs)))#(tl(tl vs))))";
+ | Apply f \<Rightarrow> exec is s ((f (hd vs) (hd(tl vs)))#(tl(tl vs))))"
subsubsection{*The Compiler*}
-consts comp :: "('a,'v)expr \<Rightarrow> ('a,'v)instr list";
+consts comp :: "('a,'v)expr \<Rightarrow> ('a,'v)instr list"
primrec
"comp (Cex v) = [Const v]"
"comp (Vex a) = [Load a]"
-"comp (Bex f e1 e2) = (comp e2) @ (comp e1) @ [Apply f]";
+"comp (Bex f e1 e2) = (comp e2) @ (comp e1) @ [Apply f]"
-theorem "exec (comp e) s [] = [value e s]";
+theorem "exec (comp e) s [] = [value e s]"
oops
@@ -204,11 +204,11 @@
| Num nat
and 'a bexp = Less "'a aexp" "'a aexp"
| And "'a bexp" "'a bexp"
- | Neg "'a bexp";
+ | Neg "'a bexp"
consts evala :: "'a aexp \<Rightarrow> ('a \<Rightarrow> nat) \<Rightarrow> nat"
- evalb :: "'a bexp \<Rightarrow> ('a \<Rightarrow> nat) \<Rightarrow> bool";
+ evalb :: "'a bexp \<Rightarrow> ('a \<Rightarrow> nat) \<Rightarrow> bool"
primrec
"evala (IF b a1 a2) env =
@@ -237,8 +237,8 @@
lemma substitution_lemma:
"evala (substa s a) env = evala a (\<lambda>x. evala (s x) env) \<and>
- evalb (substb s b) env = evalb b (\<lambda>x. evala (s x) env)";
-apply(induct_tac a and b);
+ evalb (substb s b) env = evalb b (\<lambda>x. evala (s x) env)"
+apply(induct_tac a and b)
by simp_all
@@ -251,7 +251,7 @@
consts
mirror :: "tree \<Rightarrow> tree"
-mirrors:: "tree list \<Rightarrow> tree list";
+mirrors:: "tree list \<Rightarrow> tree list"
primrec
"mirror(C ts) = C(mirrors ts)"
--- a/doc-src/TutorialI/ToyList/ToyList.thy Fri Jan 04 19:23:28 2002 +0100
+++ b/doc-src/TutorialI/ToyList/ToyList.thy Fri Jan 04 19:24:43 2002 +0100
@@ -97,7 +97,7 @@
syntax with keywords like \isacommand{datatype} and \isacommand{end}.
% (see Fig.~\ref{fig:keywords} in Appendix~\ref{sec:Appendix} for a full list).
Embedded in this syntax are the types and formulae of HOL, whose syntax is
-extensible (see \S\ref{sec:syntax-anno}), e.g.\ by new user-defined infix operators.
+extensible (see \S\ref{sec:concrete-syntax}), e.g.\ by new user-defined infix operators.
To distinguish the two levels, everything
HOL-specific (terms and types) should be enclosed in
\texttt{"}\dots\texttt{"}.
--- a/doc-src/TutorialI/Types/Pairs.thy Fri Jan 04 19:23:28 2002 +0100
+++ b/doc-src/TutorialI/Types/Pairs.thy Fri Jan 04 19:24:43 2002 +0100
@@ -161,7 +161,7 @@
(*<*)by simp(*>*)
lemma "\<And>p q. swap(swap p) = q \<longrightarrow> p = q"
-apply(simp only:split_paired_all)
+apply(simp only: split_paired_all)
txt{*\noindent
@{subgoals[display,indent=0]}