--- a/Admin/isatest/settings/at-mac-poly-5.1-para Mon Sep 06 15:01:37 2010 +0200
+++ b/Admin/isatest/settings/at-mac-poly-5.1-para Mon Sep 06 19:23:54 2010 +0200
@@ -1,7 +1,7 @@
# -*- shell-script -*- :mode=shellscript:
- POLYML_HOME="/home/polyml/polyml-svn"
- ML_SYSTEM="polyml-5.4.0"
+ POLYML_HOME="/home/polyml/polyml-5.3.0"
+ ML_SYSTEM="polyml-5.3.0"
ML_PLATFORM="x86-darwin"
ML_HOME="$POLYML_HOME/$ML_PLATFORM"
ML_OPTIONS="--mutable 800 --immutable 2000"
--- a/src/CCL/CCL.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/CCL/CCL.thy Mon Sep 06 19:23:54 2010 +0200
@@ -238,9 +238,9 @@
fun mk_thm_str thy a b = "~ " ^ (saturate thy a "a") ^ " = " ^ (saturate thy b "b")
- val lemma = thm "lem";
- val eq_lemma = thm "eq_lemma";
- val distinctness = thm "distinctness";
+ val lemma = @{thm lem};
+ val eq_lemma = @{thm eq_lemma};
+ val distinctness = @{thm distinctness};
fun mk_lemma (ra,rb) = [lemma] RL [ra RS (rb RS eq_lemma)] RL
[distinctness RS notE, @{thm sym} RS (distinctness RS notE)]
in
--- a/src/CCL/Type.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/CCL/Type.thy Mon Sep 06 19:23:54 2010 +0200
@@ -99,7 +99,7 @@
unfolding simp_type_defs by blast+
ML {*
-bind_thms ("case_rls", XH_to_Es (thms "XHs"));
+bind_thms ("case_rls", XH_to_Es @{thms XHs});
*}
@@ -260,7 +260,7 @@
lemmas iXHs = NatXH ListXH
-ML {* bind_thms ("icase_rls", XH_to_Es (thms "iXHs")) *}
+ML {* bind_thms ("icase_rls", XH_to_Es @{thms iXHs}) *}
subsection {* Type Rules *}
--- a/src/CCL/ex/Stream.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/CCL/ex/Stream.thy Mon Sep 06 19:23:54 2010 +0200
@@ -88,7 +88,7 @@
apply (tactic "EQgen_tac @{context} [] 1")
prefer 2
apply blast
- apply (tactic {* DEPTH_SOLVE (etac (XH_to_E (thm "ListsXH")) 1
+ apply (tactic {* DEPTH_SOLVE (etac (XH_to_E @{thm ListsXH}) 1
THEN EQgen_tac @{context} [] 1) *})
done
@@ -135,7 +135,7 @@
apply (tactic {* eq_coinduct3_tac @{context}
"{p. EX x y. p=<x,y> & (EX n:Nat. x=iter1 (f,f^n`a) & y=map (f) ^n`iter2 (f,a))}" 1*})
apply (fast intro!: napplyBzero [symmetric] napplyBzero [symmetric, THEN arg_cong])
- apply (tactic {* EQgen_tac @{context} [thm "iter1B", thm "iter2Blemma"] 1 *})
+ apply (tactic {* EQgen_tac @{context} [@{thm iter1B}, @{thm iter2Blemma}] 1 *})
apply (subst napply_f, assumption)
apply (rule_tac f1 = f in napplyBsucc [THEN subst])
apply blast
--- a/src/CTT/Arith.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/CTT/Arith.thy Mon Sep 06 19:23:54 2010 +0200
@@ -82,12 +82,12 @@
lemma mult_typing: "[| a:N; b:N |] ==> a #* b : N"
apply (unfold arith_defs)
-apply (tactic {* typechk_tac [thm "add_typing"] *})
+apply (tactic {* typechk_tac [@{thm add_typing}] *})
done
lemma mult_typingL: "[| a=c:N; b=d:N |] ==> a #* b = c #* d : N"
apply (unfold arith_defs)
-apply (tactic {* equal_tac [thm "add_typingL"] *})
+apply (tactic {* equal_tac [@{thm add_typingL}] *})
done
(*computation for mult: 0 and successor cases*)
@@ -159,19 +159,19 @@
structure Arith_simp_data: TSIMP_DATA =
struct
- val refl = thm "refl_elem"
- val sym = thm "sym_elem"
- val trans = thm "trans_elem"
- val refl_red = thm "refl_red"
- val trans_red = thm "trans_red"
- val red_if_equal = thm "red_if_equal"
- val default_rls = thms "arithC_rls" @ thms "comp_rls"
- val routine_tac = routine_tac (thms "arith_typing_rls" @ thms "routine_rls")
+ val refl = @{thm refl_elem}
+ val sym = @{thm sym_elem}
+ val trans = @{thm trans_elem}
+ val refl_red = @{thm refl_red}
+ val trans_red = @{thm trans_red}
+ val red_if_equal = @{thm red_if_equal}
+ val default_rls = @{thms arithC_rls} @ @{thms comp_rls}
+ val routine_tac = routine_tac (@{thms arith_typing_rls} @ @{thms routine_rls})
end
structure Arith_simp = TSimpFun (Arith_simp_data)
-local val congr_rls = thms "congr_rls" in
+local val congr_rls = @{thms congr_rls} in
fun arith_rew_tac prems = make_rew_tac
(Arith_simp.norm_tac(congr_rls, prems))
@@ -271,7 +271,7 @@
(*Solves first 0 goal, simplifies others. Two sugbgoals remain.
Both follow by rewriting, (2) using quantified induction hyp*)
apply (tactic "intr_tac []") (*strips remaining PRODs*)
-apply (tactic {* hyp_arith_rew_tac [thm "add_0_right"] *})
+apply (tactic {* hyp_arith_rew_tac [@{thm add_0_right}] *})
apply assumption
done
@@ -303,7 +303,7 @@
lemma absdiff_self_eq_0: "a:N ==> a |-| a = 0 : N"
apply (unfold absdiff_def)
-apply (tactic {* arith_rew_tac [thm "diff_self_eq_0"] *})
+apply (tactic {* arith_rew_tac [@{thm diff_self_eq_0}] *})
done
lemma absdiffC0: "a:N ==> 0 |-| a = a : N"
@@ -321,7 +321,7 @@
lemma absdiff_commute: "[| a:N; b:N |] ==> a |-| b = b |-| a : N"
apply (unfold absdiff_def)
apply (rule add_commute)
-apply (tactic {* typechk_tac [thm "diff_typing"] *})
+apply (tactic {* typechk_tac [@{thm diff_typing}] *})
done
(*If a+b=0 then a=0. Surprisingly tedious*)
@@ -332,7 +332,7 @@
apply (tactic "intr_tac []") (*strips remaining PRODs*)
apply (rule_tac [2] zero_ne_succ [THEN FE])
apply (erule_tac [3] EqE [THEN sym_elem])
-apply (tactic {* typechk_tac [thm "add_typing"] *})
+apply (tactic {* typechk_tac [@{thm add_typing}] *})
done
(*Version of above with the premise a+b=0.
@@ -354,7 +354,7 @@
apply (rule_tac [2] add_eq0)
apply (rule add_eq0)
apply (rule_tac [6] add_commute [THEN trans_elem])
-apply (tactic {* typechk_tac [thm "diff_typing"] *})
+apply (tactic {* typechk_tac [@{thm diff_typing}] *})
done
(*if a |-| b = 0 then a = b
@@ -366,7 +366,7 @@
apply (tactic eqintr_tac)
apply (rule add_diff_inverse [THEN sym_elem, THEN trans_elem])
apply (rule_tac [3] EqE, tactic "assume_tac 3")
-apply (tactic {* hyp_arith_rew_tac [thm "add_0_right"] *})
+apply (tactic {* hyp_arith_rew_tac [@{thm add_0_right}] *})
done
@@ -376,12 +376,12 @@
lemma mod_typing: "[| a:N; b:N |] ==> a mod b : N"
apply (unfold mod_def)
-apply (tactic {* typechk_tac [thm "absdiff_typing"] *})
+apply (tactic {* typechk_tac [@{thm absdiff_typing}] *})
done
lemma mod_typingL: "[| a=c:N; b=d:N |] ==> a mod b = c mod d : N"
apply (unfold mod_def)
-apply (tactic {* equal_tac [thm "absdiff_typingL"] *})
+apply (tactic {* equal_tac [@{thm absdiff_typingL}] *})
done
@@ -389,13 +389,13 @@
lemma modC0: "b:N ==> 0 mod b = 0 : N"
apply (unfold mod_def)
-apply (tactic {* rew_tac [thm "absdiff_typing"] *})
+apply (tactic {* rew_tac [@{thm absdiff_typing}] *})
done
lemma modC_succ:
"[| a:N; b:N |] ==> succ(a) mod b = rec(succ(a mod b) |-| b, 0, %x y. succ(a mod b)) : N"
apply (unfold mod_def)
-apply (tactic {* rew_tac [thm "absdiff_typing"] *})
+apply (tactic {* rew_tac [@{thm absdiff_typing}] *})
done
@@ -403,12 +403,12 @@
lemma div_typing: "[| a:N; b:N |] ==> a div b : N"
apply (unfold div_def)
-apply (tactic {* typechk_tac [thm "absdiff_typing", thm "mod_typing"] *})
+apply (tactic {* typechk_tac [@{thm absdiff_typing}, @{thm mod_typing}] *})
done
lemma div_typingL: "[| a=c:N; b=d:N |] ==> a div b = c div d : N"
apply (unfold div_def)
-apply (tactic {* equal_tac [thm "absdiff_typingL", thm "mod_typingL"] *})
+apply (tactic {* equal_tac [@{thm absdiff_typingL}, @{thm mod_typingL}] *})
done
lemmas div_typing_rls = mod_typing div_typing absdiff_typing
@@ -418,14 +418,14 @@
lemma divC0: "b:N ==> 0 div b = 0 : N"
apply (unfold div_def)
-apply (tactic {* rew_tac [thm "mod_typing", thm "absdiff_typing"] *})
+apply (tactic {* rew_tac [@{thm mod_typing}, @{thm absdiff_typing}] *})
done
lemma divC_succ:
"[| a:N; b:N |] ==> succ(a) div b =
rec(succ(a) mod b, succ(a div b), %x y. a div b) : N"
apply (unfold div_def)
-apply (tactic {* rew_tac [thm "mod_typing"] *})
+apply (tactic {* rew_tac [@{thm mod_typing}] *})
done
@@ -433,9 +433,9 @@
lemma divC_succ2: "[| a:N; b:N |] ==>
succ(a) div b =rec(succ(a mod b) |-| b, succ(a div b), %x y. a div b) : N"
apply (rule divC_succ [THEN trans_elem])
-apply (tactic {* rew_tac (thms "div_typing_rls" @ [thm "modC_succ"]) *})
+apply (tactic {* rew_tac (@{thms div_typing_rls} @ [@{thm modC_succ}]) *})
apply (tactic {* NE_tac @{context} "succ (a mod b) |-|b" 1 *})
-apply (tactic {* rew_tac [thm "mod_typing", thm "div_typing", thm "absdiff_typing"] *})
+apply (tactic {* rew_tac [@{thm mod_typing}, @{thm div_typing}, @{thm absdiff_typing}] *})
done
(*for case analysis on whether a number is 0 or a successor*)
@@ -451,19 +451,19 @@
(*Main Result. Holds when b is 0 since a mod 0 = a and a div 0 = 0 *)
lemma mod_div_equality: "[| a:N; b:N |] ==> a mod b #+ (a div b) #* b = a : N"
apply (tactic {* NE_tac @{context} "a" 1 *})
-apply (tactic {* arith_rew_tac (thms "div_typing_rls" @
- [thm "modC0", thm "modC_succ", thm "divC0", thm "divC_succ2"]) *})
+apply (tactic {* arith_rew_tac (@{thms div_typing_rls} @
+ [@{thm modC0}, @{thm modC_succ}, @{thm divC0}, @{thm divC_succ2}]) *})
apply (rule EqE)
(*case analysis on succ(u mod b)|-|b *)
apply (rule_tac a1 = "succ (u mod b) |-| b" in iszero_decidable [THEN PlusE])
apply (erule_tac [3] SumE)
-apply (tactic {* hyp_arith_rew_tac (thms "div_typing_rls" @
- [thm "modC0", thm "modC_succ", thm "divC0", thm "divC_succ2"]) *})
+apply (tactic {* hyp_arith_rew_tac (@{thms div_typing_rls} @
+ [@{thm modC0}, @{thm modC_succ}, @{thm divC0}, @{thm divC_succ2}]) *})
(*Replace one occurence of b by succ(u mod b). Clumsy!*)
apply (rule add_typingL [THEN trans_elem])
apply (erule EqE [THEN absdiff_eq0, THEN sym_elem])
apply (rule_tac [3] refl_elem)
-apply (tactic {* hyp_arith_rew_tac (thms "div_typing_rls") *})
+apply (tactic {* hyp_arith_rew_tac @{thms div_typing_rls} *})
done
end
--- a/src/CTT/ex/Elimination.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/CTT/ex/Elimination.thy Mon Sep 06 19:23:54 2010 +0200
@@ -53,7 +53,7 @@
and "!!x. x:A ==> B(x) type"
and "!!x. x:A ==> C(x) type"
shows "?a : (SUM x:A. B(x) + C(x)) --> (SUM x:A. B(x)) + (SUM x:A. C(x))"
-apply (tactic {* pc_tac (thms "prems") 1 *})
+apply (tactic {* pc_tac @{thms assms} 1 *})
done
text "Construction of the currying functional"
@@ -68,7 +68,7 @@
and "!!z. z: (SUM x:A. B(x)) ==> C(z) type"
shows "?a : PROD f: (PROD z : (SUM x:A . B(x)) . C(z)).
(PROD x:A . PROD y:B(x) . C(<x,y>))"
-apply (tactic {* pc_tac (thms "prems") 1 *})
+apply (tactic {* pc_tac @{thms assms} 1 *})
done
text "Martin-Lof (1984), page 48: axiom of sum-elimination (uncurry)"
@@ -83,7 +83,7 @@
and "!!z. z: (SUM x:A . B(x)) ==> C(z) type"
shows "?a : (PROD x:A . PROD y:B(x) . C(<x,y>))
--> (PROD z : (SUM x:A . B(x)) . C(z))"
-apply (tactic {* pc_tac (thms "prems") 1 *})
+apply (tactic {* pc_tac @{thms assms} 1 *})
done
text "Function application"
@@ -99,7 +99,7 @@
shows
"?a : (SUM y:B . PROD x:A . C(x,y))
--> (PROD x:A . SUM y:B . C(x,y))"
-apply (tactic {* pc_tac (thms "prems") 1 *})
+apply (tactic {* pc_tac @{thms assms} 1 *})
done
text "Martin-Lof (1984) pages 36-7: the combinator S"
@@ -109,7 +109,7 @@
and "!!x y.[| x:A; y:B(x) |] ==> C(x,y) type"
shows "?a : (PROD x:A. PROD y:B(x). C(x,y))
--> (PROD f: (PROD x:A. B(x)). PROD x:A. C(x, f`x))"
-apply (tactic {* pc_tac (thms "prems") 1 *})
+apply (tactic {* pc_tac @{thms assms} 1 *})
done
text "Martin-Lof (1984) page 58: the axiom of disjunction elimination"
@@ -119,7 +119,7 @@
and "!!z. z: A+B ==> C(z) type"
shows "?a : (PROD x:A. C(inl(x))) --> (PROD y:B. C(inr(y)))
--> (PROD z: A+B. C(z))"
-apply (tactic {* pc_tac (thms "prems") 1 *})
+apply (tactic {* pc_tac @{thms assms} 1 *})
done
(*towards AXIOM OF CHOICE*)
@@ -137,7 +137,7 @@
and "!!x y.[| x:A; y:B(x) |] ==> C(x,y) type"
shows "?a : PROD h: (PROD x:A. SUM y:B(x). C(x,y)).
(SUM f: (PROD x:A. B(x)). PROD x:A. C(x, f`x))"
-apply (tactic {* intr_tac (thms "prems") *})
+apply (tactic {* intr_tac @{thms assms} *})
apply (tactic "add_mp_tac 2")
apply (tactic "add_mp_tac 1")
apply (erule SumE_fst)
@@ -145,7 +145,7 @@
apply (rule subst_eqtyparg)
apply (rule comp_rls)
apply (rule_tac [4] SumE_snd)
-apply (tactic {* typechk_tac (thm "SumE_fst" :: thms "prems") *})
+apply (tactic {* typechk_tac (@{thm SumE_fst} :: @{thms prems}) *})
done
text "Axiom of choice. Proof without fst, snd. Harder still!"
@@ -155,7 +155,7 @@
and "!!x y.[| x:A; y:B(x) |] ==> C(x,y) type"
shows "?a : PROD h: (PROD x:A. SUM y:B(x). C(x,y)).
(SUM f: (PROD x:A. B(x)). PROD x:A. C(x, f`x))"
-apply (tactic {* intr_tac (thms "prems") *})
+apply (tactic {* intr_tac @{thms assms} *})
(*Must not use add_mp_tac as subst_prodE hides the construction.*)
apply (rule ProdE [THEN SumE], assumption)
apply (tactic "TRYALL assume_tac")
@@ -163,11 +163,11 @@
apply (rule subst_eqtyparg)
apply (rule comp_rls)
apply (erule_tac [4] ProdE [THEN SumE])
-apply (tactic {* typechk_tac (thms "prems") *})
+apply (tactic {* typechk_tac @{thms assms} *})
apply (rule replace_type)
apply (rule subst_eqtyparg)
apply (rule comp_rls)
-apply (tactic {* typechk_tac (thms "prems") *})
+apply (tactic {* typechk_tac @{thms assms} *})
apply assumption
done
@@ -183,11 +183,11 @@
apply (tactic {* biresolve_tac safe_brls 2 *})
(*Now must convert assumption C(z) into antecedent C(<kd,ke>) *)
apply (rule_tac [2] a = "y" in ProdE)
-apply (tactic {* typechk_tac (thms "prems") *})
+apply (tactic {* typechk_tac @{thms assms} *})
apply (rule SumE, assumption)
apply (tactic "intr_tac []")
apply (tactic "TRYALL assume_tac")
-apply (tactic {* typechk_tac (thms "prems") *})
+apply (tactic {* typechk_tac @{thms assms} *})
done
end
--- a/src/CTT/ex/Typechecking.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/CTT/ex/Typechecking.thy Mon Sep 06 19:23:54 2010 +0200
@@ -66,7 +66,7 @@
(*Proofs involving arbitrary types.
For concreteness, every type variable left over is forced to be N*)
-ML {* val N_tac = TRYALL (rtac (thm "NF")) *}
+ML {* val N_tac = TRYALL (rtac @{thm NF}) *}
schematic_lemma "lam w. <w,w> : ?A"
apply (tactic "typechk_tac []")
--- a/src/CTT/rew.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/CTT/rew.ML Mon Sep 06 19:23:54 2010 +0200
@@ -8,7 +8,7 @@
(*Make list of ProdE RS ProdE ... RS ProdE RS EqE
for using assumptions as rewrite rules*)
fun peEs 0 = []
- | peEs n = thm "EqE" :: map (curry (op RS) (thm "ProdE")) (peEs (n-1));
+ | peEs n = @{thm EqE} :: map (curry (op RS) @{thm ProdE}) (peEs (n-1));
(*Tactic used for proving conditions for the cond_rls*)
val prove_cond_tac = eresolve_tac (peEs 5);
@@ -16,19 +16,19 @@
structure TSimp_data: TSIMP_DATA =
struct
- val refl = thm "refl_elem"
- val sym = thm "sym_elem"
- val trans = thm "trans_elem"
- val refl_red = thm "refl_red"
- val trans_red = thm "trans_red"
- val red_if_equal = thm "red_if_equal"
- val default_rls = thms "comp_rls"
- val routine_tac = routine_tac (thms "routine_rls")
+ val refl = @{thm refl_elem}
+ val sym = @{thm sym_elem}
+ val trans = @{thm trans_elem}
+ val refl_red = @{thm refl_red}
+ val trans_red = @{thm trans_red}
+ val red_if_equal = @{thm red_if_equal}
+ val default_rls = @{thms comp_rls}
+ val routine_tac = routine_tac (@{thms routine_rls})
end;
structure TSimp = TSimpFun (TSimp_data);
-val standard_congr_rls = thms "intrL2_rls" @ thms "elimL_rls";
+val standard_congr_rls = @{thms intrL2_rls} @ @{thms elimL_rls};
(*Make a rewriting tactic from a normalization tactic*)
fun make_rew_tac ntac =
--- a/src/FOL/IFOL.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/FOL/IFOL.thy Mon Sep 06 19:23:54 2010 +0200
@@ -340,7 +340,7 @@
shows "(P&Q) <-> (P'&Q')"
apply (insert assms)
apply (assumption | rule iffI conjI | erule iffE conjE mp |
- tactic {* iff_tac (thms "assms") 1 *})+
+ tactic {* iff_tac @{thms assms} 1 *})+
done
(*Reversed congruence rule! Used in ZF/Order*)
@@ -350,7 +350,7 @@
shows "(Q&P) <-> (Q'&P')"
apply (insert assms)
apply (assumption | rule iffI conjI | erule iffE conjE mp |
- tactic {* iff_tac (thms "assms") 1 *})+
+ tactic {* iff_tac @{thms assms} 1 *})+
done
lemma disj_cong:
@@ -366,7 +366,7 @@
shows "(P-->Q) <-> (P'-->Q')"
apply (insert assms)
apply (assumption | rule iffI impI | erule iffE | erule (1) notE impE |
- tactic {* iff_tac (thms "assms") 1 *})+
+ tactic {* iff_tac @{thms assms} 1 *})+
done
lemma iff_cong: "[| P <-> P'; Q <-> Q' |] ==> (P<->Q) <-> (P'<->Q')"
@@ -381,21 +381,21 @@
assumes "!!x. P(x) <-> Q(x)"
shows "(ALL x. P(x)) <-> (ALL x. Q(x))"
apply (assumption | rule iffI allI | erule (1) notE impE | erule allE |
- tactic {* iff_tac (thms "assms") 1 *})+
+ tactic {* iff_tac @{thms assms} 1 *})+
done
lemma ex_cong:
assumes "!!x. P(x) <-> Q(x)"
shows "(EX x. P(x)) <-> (EX x. Q(x))"
apply (erule exE | assumption | rule iffI exI | erule (1) notE impE |
- tactic {* iff_tac (thms "assms") 1 *})+
+ tactic {* iff_tac @{thms assms} 1 *})+
done
lemma ex1_cong:
assumes "!!x. P(x) <-> Q(x)"
shows "(EX! x. P(x)) <-> (EX! x. Q(x))"
apply (erule ex1E spec [THEN mp] | assumption | rule iffI ex1I | erule (1) notE impE |
- tactic {* iff_tac (thms "assms") 1 *})+
+ tactic {* iff_tac @{thms assms} 1 *})+
done
(*** Equality rules ***)
--- a/src/FOL/hypsubstdata.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/FOL/hypsubstdata.ML Mon Sep 06 19:23:54 2010 +0200
@@ -6,13 +6,13 @@
val dest_eq = FOLogic.dest_eq
val dest_Trueprop = FOLogic.dest_Trueprop
val dest_imp = FOLogic.dest_imp
- val eq_reflection = thm "eq_reflection"
- val rev_eq_reflection = thm "meta_eq_to_obj_eq"
- val imp_intr = thm "impI"
- val rev_mp = thm "rev_mp"
- val subst = thm "subst"
- val sym = thm "sym"
- val thin_refl = thm "thin_refl"
+ val eq_reflection = @{thm eq_reflection}
+ val rev_eq_reflection = @{thm meta_eq_to_obj_eq}
+ val imp_intr = @{thm impI}
+ val rev_mp = @{thm rev_mp}
+ val subst = @{thm subst}
+ val sym = @{thm sym}
+ val thin_refl = @{thm thin_refl}
val prop_subst = @{lemma "PROP P(t) ==> PROP prop (x = t ==> PROP P(x))"
by (unfold prop_def) (drule eq_reflection, unfold)}
end;
--- a/src/HOL/Algebra/abstract/Ring2.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Algebra/abstract/Ring2.thy Mon Sep 06 19:23:54 2010 +0200
@@ -211,11 +211,11 @@
fun termless_ring (a, b) = (Term_Ord.term_lpo ring_ord (a, b) = LESS);
val ring_ss = HOL_basic_ss settermless termless_ring addsimps
- [thm "a_assoc", thm "l_zero", thm "l_neg", thm "a_comm", thm "m_assoc",
- thm "l_one", thm "l_distr", thm "m_comm", thm "minus_def",
- thm "r_zero", thm "r_neg", thm "r_neg2", thm "r_neg1", thm "minus_add",
- thm "minus_minus", thm "minus0", thm "a_lcomm", thm "m_lcomm", (*thm "r_one",*)
- thm "r_distr", thm "l_null", thm "r_null", thm "l_minus", thm "r_minus"];
+ [@{thm a_assoc}, @{thm l_zero}, @{thm l_neg}, @{thm a_comm}, @{thm m_assoc},
+ @{thm l_one}, @{thm l_distr}, @{thm m_comm}, @{thm minus_def},
+ @{thm r_zero}, @{thm r_neg}, @{thm r_neg2}, @{thm r_neg1}, @{thm minus_add},
+ @{thm minus_minus}, @{thm minus0}, @{thm a_lcomm}, @{thm m_lcomm}, (*@{thm r_one},*)
+ @{thm r_distr}, @{thm l_null}, @{thm r_null}, @{thm l_minus}, @{thm r_minus}];
*} (* Note: r_one is not necessary in ring_ss *)
method_setup ring =
--- a/src/HOL/Algebra/poly/LongDiv.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Algebra/poly/LongDiv.thy Mon Sep 06 19:23:54 2010 +0200
@@ -133,8 +133,8 @@
apply (tactic {* simp_tac (@{simpset} addsimps [@{thm l_distr}, @{thm a_assoc}]
delsimprocs [ring_simproc]) 1 *})
apply (tactic {* asm_simp_tac (@{simpset} delsimprocs [ring_simproc]) 1 *})
- apply (tactic {* simp_tac (@{simpset} addsimps [thm "minus_def", thm "smult_r_distr",
- thm "smult_r_minus", thm "monom_mult_smult", thm "smult_assoc2"]
+ apply (tactic {* simp_tac (@{simpset} addsimps [@{thm minus_def}, @{thm smult_r_distr},
+ @{thm smult_r_minus}, @{thm monom_mult_smult}, @{thm smult_assoc2}]
delsimprocs [ring_simproc]) 1 *})
apply (simp add: smult_assoc1 [symmetric])
done
@@ -169,7 +169,7 @@
apply (rule conjI)
apply (drule sym)
apply (tactic {* asm_simp_tac
- (@{simpset} addsimps [thm "smult_r_distr" RS sym, thm "smult_assoc2"]
+ (@{simpset} addsimps [@{thm smult_r_distr} RS sym, @{thm smult_assoc2}]
delsimprocs [ring_simproc]) 1 *})
apply (simp (no_asm_simp) add: l_inverse_ring unit_power smult_assoc1 [symmetric])
(* degree property *)
@@ -216,21 +216,21 @@
apply (erule disjE)
(* r2 = 0 *)
apply (tactic {* asm_full_simp_tac (@{simpset}
- addsimps [thm "integral_iff", thm "minus_def", thm "l_zero", thm "uminus_zero"]
+ addsimps [@{thm integral_iff}, @{thm minus_def}, @{thm l_zero}, @{thm uminus_zero}]
delsimprocs [ring_simproc]) 1 *})
(* r2 ~= 0 *)
apply (drule_tac f = "deg" and y = "r2 - r1" in arg_cong)
apply (tactic {* asm_full_simp_tac (@{simpset} addsimps
- [thm "minus_def", thm "l_zero", thm "uminus_zero"] delsimprocs [ring_simproc]) 1 *})
+ [@{thm minus_def}, @{thm l_zero}, @{thm uminus_zero}] delsimprocs [ring_simproc]) 1 *})
(* r1 ~=0 *)
apply (erule disjE)
(* r2 = 0 *)
apply (drule_tac f = "deg" and y = "r2 - r1" in arg_cong)
apply (tactic {* asm_full_simp_tac (@{simpset} addsimps
- [thm "minus_def", thm "l_zero", thm "uminus_zero"] delsimprocs [ring_simproc]) 1 *})
+ [@{thm minus_def}, @{thm l_zero}, @{thm uminus_zero}] delsimprocs [ring_simproc]) 1 *})
(* r2 ~= 0 *)
apply (drule_tac f = "deg" and y = "r2 - r1" in arg_cong)
- apply (tactic {* asm_full_simp_tac (@{simpset} addsimps [thm "minus_def"]
+ apply (tactic {* asm_full_simp_tac (@{simpset} addsimps [@{thm minus_def}]
delsimprocs [ring_simproc]) 1 *})
apply (drule order_eq_refl [THEN add_leD2])
apply (drule leD)
--- a/src/HOL/Bali/AxCompl.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Bali/AxCompl.thy Mon Sep 06 19:23:54 2010 +0200
@@ -1392,7 +1392,7 @@
apply -
apply (induct_tac "n")
apply (tactic "ALLGOALS (clarsimp_tac @{clasimpset})")
-apply (tactic {* dtac (Thm.permute_prems 0 1 (thm "card_seteq")) 1 *})
+apply (tactic {* dtac (Thm.permute_prems 0 1 @{thm card_seteq}) 1 *})
apply simp
apply (erule finite_imageI)
apply (simp add: MGF_asm ax_derivs_asm)
--- a/src/HOL/Bali/AxSem.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Bali/AxSem.thy Mon Sep 06 19:23:54 2010 +0200
@@ -671,7 +671,7 @@
(* 37 subgoals *)
prefer 18 (* Methd *)
apply (rule ax_derivs.Methd, drule spec, erule mp, fast)
-apply (tactic {* TRYALL (resolve_tac ((funpow 5 tl) (thms "ax_derivs.intros"))) *})
+apply (tactic {* TRYALL (resolve_tac ((funpow 5 tl) @{thms ax_derivs.intros})) *})
apply auto
done
@@ -691,8 +691,8 @@
apply (erule ax_derivs.induct)
(*42 subgoals*)
apply (tactic "ALLGOALS strip_tac")
-apply (tactic {* ALLGOALS(REPEAT o (EVERY'[dtac (thm "subset_singletonD"),
- etac disjE, fast_tac (@{claset} addSIs [thm "ax_derivs.empty"])]))*})
+apply (tactic {* ALLGOALS(REPEAT o (EVERY'[dtac @{thm subset_singletonD},
+ etac disjE, fast_tac (@{claset} addSIs @{thms ax_derivs.empty})]))*})
apply (tactic "TRYALL hyp_subst_tac")
apply (simp, rule ax_derivs.empty)
apply (drule subset_insertD)
@@ -702,7 +702,7 @@
apply (fast intro: ax_derivs.weaken)
apply (rule ax_derivs.conseq, clarify, tactic "smp_tac 3 1", blast(* unused *))
(*37 subgoals*)
-apply (tactic {* TRYALL (resolve_tac ((funpow 5 tl) (thms "ax_derivs.intros"))
+apply (tactic {* TRYALL (resolve_tac ((funpow 5 tl) @{thms ax_derivs.intros})
THEN_ALL_NEW fast_tac @{claset}) *})
(*1 subgoal*)
apply (clarsimp simp add: subset_mtriples_iff)
--- a/src/HOL/Bali/Eval.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Bali/Eval.thy Mon Sep 06 19:23:54 2010 +0200
@@ -1168,7 +1168,7 @@
"G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (w, s') \<Longrightarrow> (\<forall>w' s''. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (w', s'') \<longrightarrow> w' = w \<and> s'' = s')"
apply (erule eval_induct)
apply (tactic {* ALLGOALS (EVERY'
- [strip_tac, rotate_tac ~1, eresolve_tac (thms "eval_elim_cases")]) *})
+ [strip_tac, rotate_tac ~1, eresolve_tac @{thms eval_elim_cases}]) *})
(* 31 subgoals *)
prefer 28 (* Try *)
apply (simp (no_asm_use) only: split add: split_if_asm)
--- a/src/HOL/Bali/Evaln.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Bali/Evaln.thy Mon Sep 06 19:23:54 2010 +0200
@@ -449,9 +449,9 @@
lemma evaln_nonstrict [rule_format (no_asm), elim]:
"G\<turnstile>s \<midarrow>t\<succ>\<midarrow>n\<rightarrow> (w, s') \<Longrightarrow> \<forall>m. n\<le>m \<longrightarrow> G\<turnstile>s \<midarrow>t\<succ>\<midarrow>m\<rightarrow> (w, s')"
apply (erule evaln.induct)
-apply (tactic {* ALLGOALS (EVERY'[strip_tac, TRY o etac (thm "Suc_le_D_lemma"),
+apply (tactic {* ALLGOALS (EVERY'[strip_tac, TRY o etac @{thm Suc_le_D_lemma},
REPEAT o smp_tac 1,
- resolve_tac (thms "evaln.intros") THEN_ALL_NEW TRY o atac]) *})
+ resolve_tac @{thms evaln.intros} THEN_ALL_NEW TRY o atac]) *})
(* 3 subgoals *)
apply (auto split del: split_if)
done
--- a/src/HOL/Decision_Procs/mir_tac.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Decision_Procs/mir_tac.ML Mon Sep 06 19:23:54 2010 +0200
@@ -16,18 +16,18 @@
fun trace_msg s = if !trace then tracing s else ();
val mir_ss =
-let val ths = map thm ["real_of_int_inject", "real_of_int_less_iff", "real_of_int_le_iff"]
+let val ths = [@{thm "real_of_int_inject"}, @{thm "real_of_int_less_iff"}, @{thm "real_of_int_le_iff"}]
in @{simpset} delsimps ths addsimps (map (fn th => th RS sym) ths)
end;
val nT = HOLogic.natT;
- val nat_arith = map thm ["add_nat_number_of", "diff_nat_number_of",
- "mult_nat_number_of", "eq_nat_number_of", "less_nat_number_of"];
+ val nat_arith = [@{thm "add_nat_number_of"}, @{thm "diff_nat_number_of"},
+ @{thm "mult_nat_number_of"}, @{thm "eq_nat_number_of"}, @{thm "less_nat_number_of"}];
- val comp_arith = (map thm ["Let_def", "if_False", "if_True", "add_0",
- "add_Suc", "add_number_of_left", "mult_number_of_left",
- "Suc_eq_plus1"])@
- (map (fn s => thm s RS sym) ["numeral_1_eq_1", "numeral_0_eq_0"])
+ val comp_arith = [@{thm "Let_def"}, @{thm "if_False"}, @{thm "if_True"}, @{thm "add_0"},
+ @{thm "add_Suc"}, @{thm "add_number_of_left"}, @{thm "mult_number_of_left"},
+ @{thm "Suc_eq_plus1"}] @
+ (map (fn th => th RS sym) [@{thm "numeral_1_eq_1"}, @{thm "numeral_0_eq_0"}])
@ @{thms arith_simps} @ nat_arith @ @{thms rel_simps}
val ths = [@{thm "mult_numeral_1"}, @{thm "mult_numeral_1_right"},
@{thm "real_of_nat_number_of"},
--- a/src/HOL/Extraction/Euclid.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,279 +0,0 @@
-(* Title: HOL/Extraction/Euclid.thy
- Author: Markus Wenzel, TU Muenchen
- Author: Freek Wiedijk, Radboud University Nijmegen
- Author: Stefan Berghofer, TU Muenchen
-*)
-
-header {* Euclid's theorem *}
-
-theory Euclid
-imports "~~/src/HOL/Number_Theory/UniqueFactorization" Util Efficient_Nat
-begin
-
-text {*
-A constructive version of the proof of Euclid's theorem by
-Markus Wenzel and Freek Wiedijk \cite{Wenzel-Wiedijk-JAR2002}.
-*}
-
-lemma factor_greater_one1: "n = m * k \<Longrightarrow> m < n \<Longrightarrow> k < n \<Longrightarrow> Suc 0 < m"
- by (induct m) auto
-
-lemma factor_greater_one2: "n = m * k \<Longrightarrow> m < n \<Longrightarrow> k < n \<Longrightarrow> Suc 0 < k"
- by (induct k) auto
-
-lemma prod_mn_less_k:
- "(0::nat) < n ==> 0 < k ==> Suc 0 < m ==> m * n = k ==> n < k"
- by (induct m) auto
-
-lemma prime_eq: "prime (p::nat) = (1 < p \<and> (\<forall>m. m dvd p \<longrightarrow> 1 < m \<longrightarrow> m = p))"
- apply (simp add: prime_nat_def)
- apply (rule iffI)
- apply blast
- apply (erule conjE)
- apply (rule conjI)
- apply assumption
- apply (rule allI impI)+
- apply (erule allE)
- apply (erule impE)
- apply assumption
- apply (case_tac "m=0")
- apply simp
- apply (case_tac "m=Suc 0")
- apply simp
- apply simp
- done
-
-lemma prime_eq': "prime (p::nat) = (1 < p \<and> (\<forall>m k. p = m * k \<longrightarrow> 1 < m \<longrightarrow> m = p))"
- by (simp add: prime_eq dvd_def HOL.all_simps [symmetric] del: HOL.all_simps)
-
-lemma not_prime_ex_mk:
- assumes n: "Suc 0 < n"
- shows "(\<exists>m k. Suc 0 < m \<and> Suc 0 < k \<and> m < n \<and> k < n \<and> n = m * k) \<or> prime n"
-proof -
- {
- fix k
- from nat_eq_dec
- have "(\<exists>m<n. n = m * k) \<or> \<not> (\<exists>m<n. n = m * k)"
- by (rule search)
- }
- hence "(\<exists>k<n. \<exists>m<n. n = m * k) \<or> \<not> (\<exists>k<n. \<exists>m<n. n = m * k)"
- by (rule search)
- thus ?thesis
- proof
- assume "\<exists>k<n. \<exists>m<n. n = m * k"
- then obtain k m where k: "k<n" and m: "m<n" and nmk: "n = m * k"
- by iprover
- from nmk m k have "Suc 0 < m" by (rule factor_greater_one1)
- moreover from nmk m k have "Suc 0 < k" by (rule factor_greater_one2)
- ultimately show ?thesis using k m nmk by iprover
- next
- assume "\<not> (\<exists>k<n. \<exists>m<n. n = m * k)"
- hence A: "\<forall>k<n. \<forall>m<n. n \<noteq> m * k" by iprover
- have "\<forall>m k. n = m * k \<longrightarrow> Suc 0 < m \<longrightarrow> m = n"
- proof (intro allI impI)
- fix m k
- assume nmk: "n = m * k"
- assume m: "Suc 0 < m"
- from n m nmk have k: "0 < k"
- by (cases k) auto
- moreover from n have n: "0 < n" by simp
- moreover note m
- moreover from nmk have "m * k = n" by simp
- ultimately have kn: "k < n" by (rule prod_mn_less_k)
- show "m = n"
- proof (cases "k = Suc 0")
- case True
- with nmk show ?thesis by (simp only: mult_Suc_right)
- next
- case False
- from m have "0 < m" by simp
- moreover note n
- moreover from False n nmk k have "Suc 0 < k" by auto
- moreover from nmk have "k * m = n" by (simp only: mult_ac)
- ultimately have mn: "m < n" by (rule prod_mn_less_k)
- with kn A nmk show ?thesis by iprover
- qed
- qed
- with n have "prime n"
- by (simp only: prime_eq' One_nat_def simp_thms)
- thus ?thesis ..
- qed
-qed
-
-lemma dvd_factorial: "0 < m \<Longrightarrow> m \<le> n \<Longrightarrow> m dvd fact (n::nat)"
-proof (induct n rule: nat_induct)
- case 0
- then show ?case by simp
-next
- case (Suc n)
- from `m \<le> Suc n` show ?case
- proof (rule le_SucE)
- assume "m \<le> n"
- with `0 < m` have "m dvd fact n" by (rule Suc)
- then have "m dvd (fact n * Suc n)" by (rule dvd_mult2)
- then show ?thesis by (simp add: mult_commute)
- next
- assume "m = Suc n"
- then have "m dvd (fact n * Suc n)"
- by (auto intro: dvdI simp: mult_ac)
- then show ?thesis by (simp add: mult_commute)
- qed
-qed
-
-lemma dvd_prod [iff]: "n dvd (PROD m\<Colon>nat:#multiset_of (n # ns). m)"
- by (simp add: msetprod_Un msetprod_singleton)
-
-definition all_prime :: "nat list \<Rightarrow> bool" where
- "all_prime ps \<longleftrightarrow> (\<forall>p\<in>set ps. prime p)"
-
-lemma all_prime_simps:
- "all_prime []"
- "all_prime (p # ps) \<longleftrightarrow> prime p \<and> all_prime ps"
- by (simp_all add: all_prime_def)
-
-lemma all_prime_append:
- "all_prime (ps @ qs) \<longleftrightarrow> all_prime ps \<and> all_prime qs"
- by (simp add: all_prime_def ball_Un)
-
-lemma split_all_prime:
- assumes "all_prime ms" and "all_prime ns"
- shows "\<exists>qs. all_prime qs \<and> (PROD m\<Colon>nat:#multiset_of qs. m) =
- (PROD m\<Colon>nat:#multiset_of ms. m) * (PROD m\<Colon>nat:#multiset_of ns. m)" (is "\<exists>qs. ?P qs \<and> ?Q qs")
-proof -
- from assms have "all_prime (ms @ ns)"
- by (simp add: all_prime_append)
- moreover from assms have "(PROD m\<Colon>nat:#multiset_of (ms @ ns). m) =
- (PROD m\<Colon>nat:#multiset_of ms. m) * (PROD m\<Colon>nat:#multiset_of ns. m)"
- by (simp add: msetprod_Un)
- ultimately have "?P (ms @ ns) \<and> ?Q (ms @ ns)" ..
- then show ?thesis ..
-qed
-
-lemma all_prime_nempty_g_one:
- assumes "all_prime ps" and "ps \<noteq> []"
- shows "Suc 0 < (PROD m\<Colon>nat:#multiset_of ps. m)"
- using `ps \<noteq> []` `all_prime ps` unfolding One_nat_def [symmetric] by (induct ps rule: list_nonempty_induct)
- (simp_all add: all_prime_simps msetprod_singleton msetprod_Un prime_gt_1_nat less_1_mult del: One_nat_def)
-
-lemma factor_exists: "Suc 0 < n \<Longrightarrow> (\<exists>ps. all_prime ps \<and> (PROD m\<Colon>nat:#multiset_of ps. m) = n)"
-proof (induct n rule: nat_wf_ind)
- case (1 n)
- from `Suc 0 < n`
- have "(\<exists>m k. Suc 0 < m \<and> Suc 0 < k \<and> m < n \<and> k < n \<and> n = m * k) \<or> prime n"
- by (rule not_prime_ex_mk)
- then show ?case
- proof
- assume "\<exists>m k. Suc 0 < m \<and> Suc 0 < k \<and> m < n \<and> k < n \<and> n = m * k"
- then obtain m k where m: "Suc 0 < m" and k: "Suc 0 < k" and mn: "m < n"
- and kn: "k < n" and nmk: "n = m * k" by iprover
- from mn and m have "\<exists>ps. all_prime ps \<and> (PROD m\<Colon>nat:#multiset_of ps. m) = m" by (rule 1)
- then obtain ps1 where "all_prime ps1" and prod_ps1_m: "(PROD m\<Colon>nat:#multiset_of ps1. m) = m"
- by iprover
- from kn and k have "\<exists>ps. all_prime ps \<and> (PROD m\<Colon>nat:#multiset_of ps. m) = k" by (rule 1)
- then obtain ps2 where "all_prime ps2" and prod_ps2_k: "(PROD m\<Colon>nat:#multiset_of ps2. m) = k"
- by iprover
- from `all_prime ps1` `all_prime ps2`
- have "\<exists>ps. all_prime ps \<and> (PROD m\<Colon>nat:#multiset_of ps. m) =
- (PROD m\<Colon>nat:#multiset_of ps1. m) * (PROD m\<Colon>nat:#multiset_of ps2. m)"
- by (rule split_all_prime)
- with prod_ps1_m prod_ps2_k nmk show ?thesis by simp
- next
- assume "prime n" then have "all_prime [n]" by (simp add: all_prime_simps)
- moreover have "(PROD m\<Colon>nat:#multiset_of [n]. m) = n" by (simp add: msetprod_singleton)
- ultimately have "all_prime [n] \<and> (PROD m\<Colon>nat:#multiset_of [n]. m) = n" ..
- then show ?thesis ..
- qed
-qed
-
-lemma prime_factor_exists:
- assumes N: "(1::nat) < n"
- shows "\<exists>p. prime p \<and> p dvd n"
-proof -
- from N obtain ps where "all_prime ps"
- and prod_ps: "n = (PROD m\<Colon>nat:#multiset_of ps. m)" using factor_exists
- by simp iprover
- with N have "ps \<noteq> []"
- by (auto simp add: all_prime_nempty_g_one msetprod_empty)
- then obtain p qs where ps: "ps = p # qs" by (cases ps) simp
- with `all_prime ps` have "prime p" by (simp add: all_prime_simps)
- moreover from `all_prime ps` ps prod_ps
- have "p dvd n" by (simp only: dvd_prod)
- ultimately show ?thesis by iprover
-qed
-
-text {*
-Euclid's theorem: there are infinitely many primes.
-*}
-
-lemma Euclid: "\<exists>p::nat. prime p \<and> n < p"
-proof -
- let ?k = "fact n + 1"
- have "1 < fact n + 1" by simp
- then obtain p where prime: "prime p" and dvd: "p dvd ?k" using prime_factor_exists by iprover
- have "n < p"
- proof -
- have "\<not> p \<le> n"
- proof
- assume pn: "p \<le> n"
- from `prime p` have "0 < p" by (rule prime_gt_0_nat)
- then have "p dvd fact n" using pn by (rule dvd_factorial)
- with dvd have "p dvd ?k - fact n" by (rule dvd_diff_nat)
- then have "p dvd 1" by simp
- with prime show False by auto
- qed
- then show ?thesis by simp
- qed
- with prime show ?thesis by iprover
-qed
-
-extract Euclid
-
-text {*
-The program extracted from the proof of Euclid's theorem looks as follows.
-@{thm [display] Euclid_def}
-The program corresponding to the proof of the factorization theorem is
-@{thm [display] factor_exists_def}
-*}
-
-instantiation nat :: default
-begin
-
-definition "default = (0::nat)"
-
-instance ..
-
-end
-
-instantiation list :: (type) default
-begin
-
-definition "default = []"
-
-instance ..
-
-end
-
-primrec iterate :: "nat \<Rightarrow> ('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a list" where
- "iterate 0 f x = []"
- | "iterate (Suc n) f x = (let y = f x in y # iterate n f y)"
-
-lemma "factor_exists 1007 = [53, 19]" by eval
-lemma "factor_exists 567 = [7, 3, 3, 3, 3]" by eval
-lemma "factor_exists 345 = [23, 5, 3]" by eval
-lemma "factor_exists 999 = [37, 3, 3, 3]" by eval
-lemma "factor_exists 876 = [73, 3, 2, 2]" by eval
-
-lemma "iterate 4 Euclid 0 = [2, 3, 7, 71]" by eval
-
-consts_code
- default ("(error \"default\")")
-
-lemma "factor_exists 1007 = [53, 19]" by evaluation
-lemma "factor_exists 567 = [7, 3, 3, 3, 3]" by evaluation
-lemma "factor_exists 345 = [23, 5, 3]" by evaluation
-lemma "factor_exists 999 = [37, 3, 3, 3]" by evaluation
-lemma "factor_exists 876 = [73, 3, 2, 2]" by evaluation
-
-lemma "iterate 4 Euclid 0 = [2, 3, 7, 71]" by evaluation
-
-end
--- a/src/HOL/Extraction/Greatest_Common_Divisor.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,96 +0,0 @@
-(* Title: HOL/Extraction/Greatest_Common_Divisor.thy
- Author: Stefan Berghofer, TU Muenchen
- Helmut Schwichtenberg, LMU Muenchen
-*)
-
-header {* Greatest common divisor *}
-
-theory Greatest_Common_Divisor
-imports QuotRem
-begin
-
-theorem greatest_common_divisor:
- "\<And>n::nat. Suc m < n \<Longrightarrow> \<exists>k n1 m1. k * n1 = n \<and> k * m1 = Suc m \<and>
- (\<forall>l l1 l2. l * l1 = n \<longrightarrow> l * l2 = Suc m \<longrightarrow> l \<le> k)"
-proof (induct m rule: nat_wf_ind)
- case (1 m n)
- from division obtain r q where h1: "n = Suc m * q + r" and h2: "r \<le> m"
- by iprover
- show ?case
- proof (cases r)
- case 0
- with h1 have "Suc m * q = n" by simp
- moreover have "Suc m * 1 = Suc m" by simp
- moreover {
- fix l2 have "\<And>l l1. l * l1 = n \<Longrightarrow> l * l2 = Suc m \<Longrightarrow> l \<le> Suc m"
- by (cases l2) simp_all }
- ultimately show ?thesis by iprover
- next
- case (Suc nat)
- with h2 have h: "nat < m" by simp
- moreover from h have "Suc nat < Suc m" by simp
- ultimately have "\<exists>k m1 r1. k * m1 = Suc m \<and> k * r1 = Suc nat \<and>
- (\<forall>l l1 l2. l * l1 = Suc m \<longrightarrow> l * l2 = Suc nat \<longrightarrow> l \<le> k)"
- by (rule 1)
- then obtain k m1 r1 where
- h1': "k * m1 = Suc m"
- and h2': "k * r1 = Suc nat"
- and h3': "\<And>l l1 l2. l * l1 = Suc m \<Longrightarrow> l * l2 = Suc nat \<Longrightarrow> l \<le> k"
- by iprover
- have mn: "Suc m < n" by (rule 1)
- from h1 h1' h2' Suc have "k * (m1 * q + r1) = n"
- by (simp add: add_mult_distrib2 nat_mult_assoc [symmetric])
- moreover have "\<And>l l1 l2. l * l1 = n \<Longrightarrow> l * l2 = Suc m \<Longrightarrow> l \<le> k"
- proof -
- fix l l1 l2
- assume ll1n: "l * l1 = n"
- assume ll2m: "l * l2 = Suc m"
- moreover have "l * (l1 - l2 * q) = Suc nat"
- by (simp add: diff_mult_distrib2 h1 Suc [symmetric] mn ll1n ll2m [symmetric])
- ultimately show "l \<le> k" by (rule h3')
- qed
- ultimately show ?thesis using h1' by iprover
- qed
-qed
-
-extract greatest_common_divisor
-
-text {*
-The extracted program for computing the greatest common divisor is
-@{thm [display] greatest_common_divisor_def}
-*}
-
-instantiation nat :: default
-begin
-
-definition "default = (0::nat)"
-
-instance ..
-
-end
-
-instantiation prod :: (default, default) default
-begin
-
-definition "default = (default, default)"
-
-instance ..
-
-end
-
-instantiation "fun" :: (type, default) default
-begin
-
-definition "default = (\<lambda>x. default)"
-
-instance ..
-
-end
-
-consts_code
- default ("(error \"default\")")
-
-lemma "greatest_common_divisor 7 12 = (4, 3, 2)" by evaluation
-lemma "greatest_common_divisor 7 12 = (4, 3, 2)" by eval
-
-end
--- a/src/HOL/Extraction/Higman.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,462 +0,0 @@
-(* Title: HOL/Extraction/Higman.thy
- Author: Stefan Berghofer, TU Muenchen
- Author: Monika Seisenberger, LMU Muenchen
-*)
-
-header {* Higman's lemma *}
-
-theory Higman
-imports Main State_Monad Random
-begin
-
-text {*
- Formalization by Stefan Berghofer and Monika Seisenberger,
- based on Coquand and Fridlender \cite{Coquand93}.
-*}
-
-datatype letter = A | B
-
-inductive emb :: "letter list \<Rightarrow> letter list \<Rightarrow> bool"
-where
- emb0 [Pure.intro]: "emb [] bs"
- | emb1 [Pure.intro]: "emb as bs \<Longrightarrow> emb as (b # bs)"
- | emb2 [Pure.intro]: "emb as bs \<Longrightarrow> emb (a # as) (a # bs)"
-
-inductive L :: "letter list \<Rightarrow> letter list list \<Rightarrow> bool"
- for v :: "letter list"
-where
- L0 [Pure.intro]: "emb w v \<Longrightarrow> L v (w # ws)"
- | L1 [Pure.intro]: "L v ws \<Longrightarrow> L v (w # ws)"
-
-inductive good :: "letter list list \<Rightarrow> bool"
-where
- good0 [Pure.intro]: "L w ws \<Longrightarrow> good (w # ws)"
- | good1 [Pure.intro]: "good ws \<Longrightarrow> good (w # ws)"
-
-inductive R :: "letter \<Rightarrow> letter list list \<Rightarrow> letter list list \<Rightarrow> bool"
- for a :: letter
-where
- R0 [Pure.intro]: "R a [] []"
- | R1 [Pure.intro]: "R a vs ws \<Longrightarrow> R a (w # vs) ((a # w) # ws)"
-
-inductive T :: "letter \<Rightarrow> letter list list \<Rightarrow> letter list list \<Rightarrow> bool"
- for a :: letter
-where
- T0 [Pure.intro]: "a \<noteq> b \<Longrightarrow> R b ws zs \<Longrightarrow> T a (w # zs) ((a # w) # zs)"
- | T1 [Pure.intro]: "T a ws zs \<Longrightarrow> T a (w # ws) ((a # w) # zs)"
- | T2 [Pure.intro]: "a \<noteq> b \<Longrightarrow> T a ws zs \<Longrightarrow> T a ws ((b # w) # zs)"
-
-inductive bar :: "letter list list \<Rightarrow> bool"
-where
- bar1 [Pure.intro]: "good ws \<Longrightarrow> bar ws"
- | bar2 [Pure.intro]: "(\<And>w. bar (w # ws)) \<Longrightarrow> bar ws"
-
-theorem prop1: "bar ([] # ws)" by iprover
-
-theorem lemma1: "L as ws \<Longrightarrow> L (a # as) ws"
- by (erule L.induct, iprover+)
-
-lemma lemma2': "R a vs ws \<Longrightarrow> L as vs \<Longrightarrow> L (a # as) ws"
- apply (induct set: R)
- apply (erule L.cases)
- apply simp+
- apply (erule L.cases)
- apply simp_all
- apply (rule L0)
- apply (erule emb2)
- apply (erule L1)
- done
-
-lemma lemma2: "R a vs ws \<Longrightarrow> good vs \<Longrightarrow> good ws"
- apply (induct set: R)
- apply iprover
- apply (erule good.cases)
- apply simp_all
- apply (rule good0)
- apply (erule lemma2')
- apply assumption
- apply (erule good1)
- done
-
-lemma lemma3': "T a vs ws \<Longrightarrow> L as vs \<Longrightarrow> L (a # as) ws"
- apply (induct set: T)
- apply (erule L.cases)
- apply simp_all
- apply (rule L0)
- apply (erule emb2)
- apply (rule L1)
- apply (erule lemma1)
- apply (erule L.cases)
- apply simp_all
- apply iprover+
- done
-
-lemma lemma3: "T a ws zs \<Longrightarrow> good ws \<Longrightarrow> good zs"
- apply (induct set: T)
- apply (erule good.cases)
- apply simp_all
- apply (rule good0)
- apply (erule lemma1)
- apply (erule good1)
- apply (erule good.cases)
- apply simp_all
- apply (rule good0)
- apply (erule lemma3')
- apply iprover+
- done
-
-lemma lemma4: "R a ws zs \<Longrightarrow> ws \<noteq> [] \<Longrightarrow> T a ws zs"
- apply (induct set: R)
- apply iprover
- apply (case_tac vs)
- apply (erule R.cases)
- apply simp
- apply (case_tac a)
- apply (rule_tac b=B in T0)
- apply simp
- apply (rule R0)
- apply (rule_tac b=A in T0)
- apply simp
- apply (rule R0)
- apply simp
- apply (rule T1)
- apply simp
- done
-
-lemma letter_neq: "(a::letter) \<noteq> b \<Longrightarrow> c \<noteq> a \<Longrightarrow> c = b"
- apply (case_tac a)
- apply (case_tac b)
- apply (case_tac c, simp, simp)
- apply (case_tac c, simp, simp)
- apply (case_tac b)
- apply (case_tac c, simp, simp)
- apply (case_tac c, simp, simp)
- done
-
-lemma letter_eq_dec: "(a::letter) = b \<or> a \<noteq> b"
- apply (case_tac a)
- apply (case_tac b)
- apply simp
- apply simp
- apply (case_tac b)
- apply simp
- apply simp
- done
-
-theorem prop2:
- assumes ab: "a \<noteq> b" and bar: "bar xs"
- shows "\<And>ys zs. bar ys \<Longrightarrow> T a xs zs \<Longrightarrow> T b ys zs \<Longrightarrow> bar zs" using bar
-proof induct
- fix xs zs assume "T a xs zs" and "good xs"
- hence "good zs" by (rule lemma3)
- then show "bar zs" by (rule bar1)
-next
- fix xs ys
- assume I: "\<And>w ys zs. bar ys \<Longrightarrow> T a (w # xs) zs \<Longrightarrow> T b ys zs \<Longrightarrow> bar zs"
- assume "bar ys"
- thus "\<And>zs. T a xs zs \<Longrightarrow> T b ys zs \<Longrightarrow> bar zs"
- proof induct
- fix ys zs assume "T b ys zs" and "good ys"
- then have "good zs" by (rule lemma3)
- then show "bar zs" by (rule bar1)
- next
- fix ys zs assume I': "\<And>w zs. T a xs zs \<Longrightarrow> T b (w # ys) zs \<Longrightarrow> bar zs"
- and ys: "\<And>w. bar (w # ys)" and Ta: "T a xs zs" and Tb: "T b ys zs"
- show "bar zs"
- proof (rule bar2)
- fix w
- show "bar (w # zs)"
- proof (cases w)
- case Nil
- thus ?thesis by simp (rule prop1)
- next
- case (Cons c cs)
- from letter_eq_dec show ?thesis
- proof
- assume ca: "c = a"
- from ab have "bar ((a # cs) # zs)" by (iprover intro: I ys Ta Tb)
- thus ?thesis by (simp add: Cons ca)
- next
- assume "c \<noteq> a"
- with ab have cb: "c = b" by (rule letter_neq)
- from ab have "bar ((b # cs) # zs)" by (iprover intro: I' Ta Tb)
- thus ?thesis by (simp add: Cons cb)
- qed
- qed
- qed
- qed
-qed
-
-theorem prop3:
- assumes bar: "bar xs"
- shows "\<And>zs. xs \<noteq> [] \<Longrightarrow> R a xs zs \<Longrightarrow> bar zs" using bar
-proof induct
- fix xs zs
- assume "R a xs zs" and "good xs"
- then have "good zs" by (rule lemma2)
- then show "bar zs" by (rule bar1)
-next
- fix xs zs
- assume I: "\<And>w zs. w # xs \<noteq> [] \<Longrightarrow> R a (w # xs) zs \<Longrightarrow> bar zs"
- and xsb: "\<And>w. bar (w # xs)" and xsn: "xs \<noteq> []" and R: "R a xs zs"
- show "bar zs"
- proof (rule bar2)
- fix w
- show "bar (w # zs)"
- proof (induct w)
- case Nil
- show ?case by (rule prop1)
- next
- case (Cons c cs)
- from letter_eq_dec show ?case
- proof
- assume "c = a"
- thus ?thesis by (iprover intro: I [simplified] R)
- next
- from R xsn have T: "T a xs zs" by (rule lemma4)
- assume "c \<noteq> a"
- thus ?thesis by (iprover intro: prop2 Cons xsb xsn R T)
- qed
- qed
- qed
-qed
-
-theorem higman: "bar []"
-proof (rule bar2)
- fix w
- show "bar [w]"
- proof (induct w)
- show "bar [[]]" by (rule prop1)
- next
- fix c cs assume "bar [cs]"
- thus "bar [c # cs]" by (rule prop3) (simp, iprover)
- qed
-qed
-
-primrec
- is_prefix :: "'a list \<Rightarrow> (nat \<Rightarrow> 'a) \<Rightarrow> bool"
-where
- "is_prefix [] f = True"
- | "is_prefix (x # xs) f = (x = f (length xs) \<and> is_prefix xs f)"
-
-theorem L_idx:
- assumes L: "L w ws"
- shows "is_prefix ws f \<Longrightarrow> \<exists>i. emb (f i) w \<and> i < length ws" using L
-proof induct
- case (L0 v ws)
- hence "emb (f (length ws)) w" by simp
- moreover have "length ws < length (v # ws)" by simp
- ultimately show ?case by iprover
-next
- case (L1 ws v)
- then obtain i where emb: "emb (f i) w" and "i < length ws"
- by simp iprover
- hence "i < length (v # ws)" by simp
- with emb show ?case by iprover
-qed
-
-theorem good_idx:
- assumes good: "good ws"
- shows "is_prefix ws f \<Longrightarrow> \<exists>i j. emb (f i) (f j) \<and> i < j" using good
-proof induct
- case (good0 w ws)
- hence "w = f (length ws)" and "is_prefix ws f" by simp_all
- with good0 show ?case by (iprover dest: L_idx)
-next
- case (good1 ws w)
- thus ?case by simp
-qed
-
-theorem bar_idx:
- assumes bar: "bar ws"
- shows "is_prefix ws f \<Longrightarrow> \<exists>i j. emb (f i) (f j) \<and> i < j" using bar
-proof induct
- case (bar1 ws)
- thus ?case by (rule good_idx)
-next
- case (bar2 ws)
- hence "is_prefix (f (length ws) # ws) f" by simp
- thus ?case by (rule bar2)
-qed
-
-text {*
-Strong version: yields indices of words that can be embedded into each other.
-*}
-
-theorem higman_idx: "\<exists>(i::nat) j. emb (f i) (f j) \<and> i < j"
-proof (rule bar_idx)
- show "bar []" by (rule higman)
- show "is_prefix [] f" by simp
-qed
-
-text {*
-Weak version: only yield sequence containing words
-that can be embedded into each other.
-*}
-
-theorem good_prefix_lemma:
- assumes bar: "bar ws"
- shows "is_prefix ws f \<Longrightarrow> \<exists>vs. is_prefix vs f \<and> good vs" using bar
-proof induct
- case bar1
- thus ?case by iprover
-next
- case (bar2 ws)
- from bar2.prems have "is_prefix (f (length ws) # ws) f" by simp
- thus ?case by (iprover intro: bar2)
-qed
-
-theorem good_prefix: "\<exists>vs. is_prefix vs f \<and> good vs"
- using higman
- by (rule good_prefix_lemma) simp+
-
-subsection {* Extracting the program *}
-
-declare R.induct [ind_realizer]
-declare T.induct [ind_realizer]
-declare L.induct [ind_realizer]
-declare good.induct [ind_realizer]
-declare bar.induct [ind_realizer]
-
-extract higman_idx
-
-text {*
- Program extracted from the proof of @{text higman_idx}:
- @{thm [display] higman_idx_def [no_vars]}
- Corresponding correctness theorem:
- @{thm [display] higman_idx_correctness [no_vars]}
- Program extracted from the proof of @{text higman}:
- @{thm [display] higman_def [no_vars]}
- Program extracted from the proof of @{text prop1}:
- @{thm [display] prop1_def [no_vars]}
- Program extracted from the proof of @{text prop2}:
- @{thm [display] prop2_def [no_vars]}
- Program extracted from the proof of @{text prop3}:
- @{thm [display] prop3_def [no_vars]}
-*}
-
-
-subsection {* Some examples *}
-
-instantiation LT and TT :: default
-begin
-
-definition "default = L0 [] []"
-
-definition "default = T0 A [] [] [] R0"
-
-instance ..
-
-end
-
-function mk_word_aux :: "nat \<Rightarrow> Random.seed \<Rightarrow> letter list \<times> Random.seed" where
- "mk_word_aux k = exec {
- i \<leftarrow> Random.range 10;
- (if i > 7 \<and> k > 2 \<or> k > 1000 then return []
- else exec {
- let l = (if i mod 2 = 0 then A else B);
- ls \<leftarrow> mk_word_aux (Suc k);
- return (l # ls)
- })}"
-by pat_completeness auto
-termination by (relation "measure ((op -) 1001)") auto
-
-definition mk_word :: "Random.seed \<Rightarrow> letter list \<times> Random.seed" where
- "mk_word = mk_word_aux 0"
-
-primrec mk_word_s :: "nat \<Rightarrow> Random.seed \<Rightarrow> letter list \<times> Random.seed" where
- "mk_word_s 0 = mk_word"
- | "mk_word_s (Suc n) = exec {
- _ \<leftarrow> mk_word;
- mk_word_s n
- }"
-
-definition g1 :: "nat \<Rightarrow> letter list" where
- "g1 s = fst (mk_word_s s (20000, 1))"
-
-definition g2 :: "nat \<Rightarrow> letter list" where
- "g2 s = fst (mk_word_s s (50000, 1))"
-
-fun f1 :: "nat \<Rightarrow> letter list" where
- "f1 0 = [A, A]"
- | "f1 (Suc 0) = [B]"
- | "f1 (Suc (Suc 0)) = [A, B]"
- | "f1 _ = []"
-
-fun f2 :: "nat \<Rightarrow> letter list" where
- "f2 0 = [A, A]"
- | "f2 (Suc 0) = [B]"
- | "f2 (Suc (Suc 0)) = [B, A]"
- | "f2 _ = []"
-
-ML {*
-local
- val higman_idx = @{code higman_idx};
- val g1 = @{code g1};
- val g2 = @{code g2};
- val f1 = @{code f1};
- val f2 = @{code f2};
-in
- val (i1, j1) = higman_idx g1;
- val (v1, w1) = (g1 i1, g1 j1);
- val (i2, j2) = higman_idx g2;
- val (v2, w2) = (g2 i2, g2 j2);
- val (i3, j3) = higman_idx f1;
- val (v3, w3) = (f1 i3, f1 j3);
- val (i4, j4) = higman_idx f2;
- val (v4, w4) = (f2 i4, f2 j4);
-end;
-*}
-
-code_module Higman
-contains
- higman = higman_idx
-
-ML {*
-local open Higman in
-
-val a = 16807.0;
-val m = 2147483647.0;
-
-fun nextRand seed =
- let val t = a*seed
- in t - m * real (Real.floor(t/m)) end;
-
-fun mk_word seed l =
- let
- val r = nextRand seed;
- val i = Real.round (r / m * 10.0);
- in if i > 7 andalso l > 2 then (r, []) else
- apsnd (cons (if i mod 2 = 0 then A else B)) (mk_word r (l+1))
- end;
-
-fun f s zero = mk_word s 0
- | f s (Suc n) = f (fst (mk_word s 0)) n;
-
-val g1 = snd o (f 20000.0);
-
-val g2 = snd o (f 50000.0);
-
-fun f1 zero = [A,A]
- | f1 (Suc zero) = [B]
- | f1 (Suc (Suc zero)) = [A,B]
- | f1 _ = [];
-
-fun f2 zero = [A,A]
- | f2 (Suc zero) = [B]
- | f2 (Suc (Suc zero)) = [B,A]
- | f2 _ = [];
-
-val (i1, j1) = higman g1;
-val (v1, w1) = (g1 i1, g1 j1);
-val (i2, j2) = higman g2;
-val (v2, w2) = (g2 i2, g2 j2);
-val (i3, j3) = higman f1;
-val (v3, w3) = (f1 i3, f1 j3);
-val (i4, j4) = higman f2;
-val (v4, w4) = (f2 i4, f2 j4);
-
-end;
-*}
-
-end
--- a/src/HOL/Extraction/Pigeonhole.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,275 +0,0 @@
-(* Title: HOL/Extraction/Pigeonhole.thy
- Author: Stefan Berghofer, TU Muenchen
-*)
-
-header {* The pigeonhole principle *}
-
-theory Pigeonhole
-imports Util Efficient_Nat
-begin
-
-text {*
-We formalize two proofs of the pigeonhole principle, which lead
-to extracted programs of quite different complexity. The original
-formalization of these proofs in {\sc Nuprl} is due to
-Aleksey Nogin \cite{Nogin-ENTCS-2000}.
-
-This proof yields a polynomial program.
-*}
-
-theorem pigeonhole:
- "\<And>f. (\<And>i. i \<le> Suc n \<Longrightarrow> f i \<le> n) \<Longrightarrow> \<exists>i j. i \<le> Suc n \<and> j < i \<and> f i = f j"
-proof (induct n)
- case 0
- hence "Suc 0 \<le> Suc 0 \<and> 0 < Suc 0 \<and> f (Suc 0) = f 0" by simp
- thus ?case by iprover
-next
- case (Suc n)
- {
- fix k
- have
- "k \<le> Suc (Suc n) \<Longrightarrow>
- (\<And>i j. Suc k \<le> i \<Longrightarrow> i \<le> Suc (Suc n) \<Longrightarrow> j < i \<Longrightarrow> f i \<noteq> f j) \<Longrightarrow>
- (\<exists>i j. i \<le> k \<and> j < i \<and> f i = f j)"
- proof (induct k)
- case 0
- let ?f = "\<lambda>i. if f i = Suc n then f (Suc (Suc n)) else f i"
- have "\<not> (\<exists>i j. i \<le> Suc n \<and> j < i \<and> ?f i = ?f j)"
- proof
- assume "\<exists>i j. i \<le> Suc n \<and> j < i \<and> ?f i = ?f j"
- then obtain i j where i: "i \<le> Suc n" and j: "j < i"
- and f: "?f i = ?f j" by iprover
- from j have i_nz: "Suc 0 \<le> i" by simp
- from i have iSSn: "i \<le> Suc (Suc n)" by simp
- have S0SSn: "Suc 0 \<le> Suc (Suc n)" by simp
- show False
- proof cases
- assume fi: "f i = Suc n"
- show False
- proof cases
- assume fj: "f j = Suc n"
- from i_nz and iSSn and j have "f i \<noteq> f j" by (rule 0)
- moreover from fi have "f i = f j"
- by (simp add: fj [symmetric])
- ultimately show ?thesis ..
- next
- from i and j have "j < Suc (Suc n)" by simp
- with S0SSn and le_refl have "f (Suc (Suc n)) \<noteq> f j"
- by (rule 0)
- moreover assume "f j \<noteq> Suc n"
- with fi and f have "f (Suc (Suc n)) = f j" by simp
- ultimately show False ..
- qed
- next
- assume fi: "f i \<noteq> Suc n"
- show False
- proof cases
- from i have "i < Suc (Suc n)" by simp
- with S0SSn and le_refl have "f (Suc (Suc n)) \<noteq> f i"
- by (rule 0)
- moreover assume "f j = Suc n"
- with fi and f have "f (Suc (Suc n)) = f i" by simp
- ultimately show False ..
- next
- from i_nz and iSSn and j
- have "f i \<noteq> f j" by (rule 0)
- moreover assume "f j \<noteq> Suc n"
- with fi and f have "f i = f j" by simp
- ultimately show False ..
- qed
- qed
- qed
- moreover have "\<And>i. i \<le> Suc n \<Longrightarrow> ?f i \<le> n"
- proof -
- fix i assume "i \<le> Suc n"
- hence i: "i < Suc (Suc n)" by simp
- have "f (Suc (Suc n)) \<noteq> f i"
- by (rule 0) (simp_all add: i)
- moreover have "f (Suc (Suc n)) \<le> Suc n"
- by (rule Suc) simp
- moreover from i have "i \<le> Suc (Suc n)" by simp
- hence "f i \<le> Suc n" by (rule Suc)
- ultimately show "?thesis i"
- by simp
- qed
- hence "\<exists>i j. i \<le> Suc n \<and> j < i \<and> ?f i = ?f j"
- by (rule Suc)
- ultimately show ?case ..
- next
- case (Suc k)
- from search [OF nat_eq_dec] show ?case
- proof
- assume "\<exists>j<Suc k. f (Suc k) = f j"
- thus ?case by (iprover intro: le_refl)
- next
- assume nex: "\<not> (\<exists>j<Suc k. f (Suc k) = f j)"
- have "\<exists>i j. i \<le> k \<and> j < i \<and> f i = f j"
- proof (rule Suc)
- from Suc show "k \<le> Suc (Suc n)" by simp
- fix i j assume k: "Suc k \<le> i" and i: "i \<le> Suc (Suc n)"
- and j: "j < i"
- show "f i \<noteq> f j"
- proof cases
- assume eq: "i = Suc k"
- show ?thesis
- proof
- assume "f i = f j"
- hence "f (Suc k) = f j" by (simp add: eq)
- with nex and j and eq show False by iprover
- qed
- next
- assume "i \<noteq> Suc k"
- with k have "Suc (Suc k) \<le> i" by simp
- thus ?thesis using i and j by (rule Suc)
- qed
- qed
- thus ?thesis by (iprover intro: le_SucI)
- qed
- qed
- }
- note r = this
- show ?case by (rule r) simp_all
-qed
-
-text {*
-The following proof, although quite elegant from a mathematical point of view,
-leads to an exponential program:
-*}
-
-theorem pigeonhole_slow:
- "\<And>f. (\<And>i. i \<le> Suc n \<Longrightarrow> f i \<le> n) \<Longrightarrow> \<exists>i j. i \<le> Suc n \<and> j < i \<and> f i = f j"
-proof (induct n)
- case 0
- have "Suc 0 \<le> Suc 0" ..
- moreover have "0 < Suc 0" ..
- moreover from 0 have "f (Suc 0) = f 0" by simp
- ultimately show ?case by iprover
-next
- case (Suc n)
- from search [OF nat_eq_dec] show ?case
- proof
- assume "\<exists>j < Suc (Suc n). f (Suc (Suc n)) = f j"
- thus ?case by (iprover intro: le_refl)
- next
- assume "\<not> (\<exists>j < Suc (Suc n). f (Suc (Suc n)) = f j)"
- hence nex: "\<forall>j < Suc (Suc n). f (Suc (Suc n)) \<noteq> f j" by iprover
- let ?f = "\<lambda>i. if f i = Suc n then f (Suc (Suc n)) else f i"
- have "\<And>i. i \<le> Suc n \<Longrightarrow> ?f i \<le> n"
- proof -
- fix i assume i: "i \<le> Suc n"
- show "?thesis i"
- proof (cases "f i = Suc n")
- case True
- from i and nex have "f (Suc (Suc n)) \<noteq> f i" by simp
- with True have "f (Suc (Suc n)) \<noteq> Suc n" by simp
- moreover from Suc have "f (Suc (Suc n)) \<le> Suc n" by simp
- ultimately have "f (Suc (Suc n)) \<le> n" by simp
- with True show ?thesis by simp
- next
- case False
- from Suc and i have "f i \<le> Suc n" by simp
- with False show ?thesis by simp
- qed
- qed
- hence "\<exists>i j. i \<le> Suc n \<and> j < i \<and> ?f i = ?f j" by (rule Suc)
- then obtain i j where i: "i \<le> Suc n" and ji: "j < i" and f: "?f i = ?f j"
- by iprover
- have "f i = f j"
- proof (cases "f i = Suc n")
- case True
- show ?thesis
- proof (cases "f j = Suc n")
- assume "f j = Suc n"
- with True show ?thesis by simp
- next
- assume "f j \<noteq> Suc n"
- moreover from i ji nex have "f (Suc (Suc n)) \<noteq> f j" by simp
- ultimately show ?thesis using True f by simp
- qed
- next
- case False
- show ?thesis
- proof (cases "f j = Suc n")
- assume "f j = Suc n"
- moreover from i nex have "f (Suc (Suc n)) \<noteq> f i" by simp
- ultimately show ?thesis using False f by simp
- next
- assume "f j \<noteq> Suc n"
- with False f show ?thesis by simp
- qed
- qed
- moreover from i have "i \<le> Suc (Suc n)" by simp
- ultimately show ?thesis using ji by iprover
- qed
-qed
-
-extract pigeonhole pigeonhole_slow
-
-text {*
-The programs extracted from the above proofs look as follows:
-@{thm [display] pigeonhole_def}
-@{thm [display] pigeonhole_slow_def}
-The program for searching for an element in an array is
-@{thm [display,eta_contract=false] search_def}
-The correctness statement for @{term "pigeonhole"} is
-@{thm [display] pigeonhole_correctness [no_vars]}
-
-In order to analyze the speed of the above programs,
-we generate ML code from them.
-*}
-
-instantiation nat :: default
-begin
-
-definition "default = (0::nat)"
-
-instance ..
-
-end
-
-instantiation prod :: (default, default) default
-begin
-
-definition "default = (default, default)"
-
-instance ..
-
-end
-
-definition
- "test n u = pigeonhole n (\<lambda>m. m - 1)"
-definition
- "test' n u = pigeonhole_slow n (\<lambda>m. m - 1)"
-definition
- "test'' u = pigeonhole 8 (op ! [0, 1, 2, 3, 4, 5, 6, 3, 7, 8])"
-
-ML "timeit (@{code test} 10)"
-ML "timeit (@{code test'} 10)"
-ML "timeit (@{code test} 20)"
-ML "timeit (@{code test'} 20)"
-ML "timeit (@{code test} 25)"
-ML "timeit (@{code test'} 25)"
-ML "timeit (@{code test} 500)"
-ML "timeit @{code test''}"
-
-consts_code
- "default :: nat" ("{* 0::nat *}")
- "default :: nat \<times> nat" ("{* (0::nat, 0::nat) *}")
-
-code_module PH
-contains
- test = test
- test' = test'
- test'' = test''
-
-ML "timeit (PH.test 10)"
-ML "timeit (PH.test' 10)"
-ML "timeit (PH.test 20)"
-ML "timeit (PH.test' 20)"
-ML "timeit (PH.test 25)"
-ML "timeit (PH.test' 25)"
-ML "timeit (PH.test 500)"
-ML "timeit PH.test''"
-
-end
-
--- a/src/HOL/Extraction/QuotRem.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
-(* Title: HOL/Extraction/QuotRem.thy
- Author: Stefan Berghofer, TU Muenchen
-*)
-
-header {* Quotient and remainder *}
-
-theory QuotRem
-imports Util
-begin
-
-text {* Derivation of quotient and remainder using program extraction. *}
-
-theorem division: "\<exists>r q. a = Suc b * q + r \<and> r \<le> b"
-proof (induct a)
- case 0
- have "0 = Suc b * 0 + 0 \<and> 0 \<le> b" by simp
- thus ?case by iprover
-next
- case (Suc a)
- then obtain r q where I: "a = Suc b * q + r" and "r \<le> b" by iprover
- from nat_eq_dec show ?case
- proof
- assume "r = b"
- with I have "Suc a = Suc b * (Suc q) + 0 \<and> 0 \<le> b" by simp
- thus ?case by iprover
- next
- assume "r \<noteq> b"
- with `r \<le> b` have "r < b" by (simp add: order_less_le)
- with I have "Suc a = Suc b * q + (Suc r) \<and> (Suc r) \<le> b" by simp
- thus ?case by iprover
- qed
-qed
-
-extract division
-
-text {*
- The program extracted from the above proof looks as follows
- @{thm [display] division_def [no_vars]}
- The corresponding correctness theorem is
- @{thm [display] division_correctness [no_vars]}
-*}
-
-lemma "division 9 2 = (0, 3)" by evaluation
-lemma "division 9 2 = (0, 3)" by eval
-
-end
--- a/src/HOL/Extraction/ROOT.ML Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,6 +0,0 @@
-(* Examples for program extraction in Higher-Order Logic *)
-
-no_document use_thys ["Efficient_Nat", "Monad_Syntax", "~~/src/HOL/Number_Theory/Primes"];
-share_common_data ();
-no_document use_thys ["~~/src/HOL/Number_Theory/UniqueFactorization"];
-use_thys ["Greatest_Common_Divisor", "Warshall", "Higman", "Pigeonhole", "Euclid"];
--- a/src/HOL/Extraction/Util.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,97 +0,0 @@
-(* Title: HOL/Extraction/Util.thy
- Author: Stefan Berghofer, TU Muenchen
-*)
-
-header {* Auxiliary lemmas used in program extraction examples *}
-
-theory Util
-imports Main
-begin
-
-text {*
-Decidability of equality on natural numbers.
-*}
-
-lemma nat_eq_dec: "\<And>n::nat. m = n \<or> m \<noteq> n"
- apply (induct m)
- apply (case_tac n)
- apply (case_tac [3] n)
- apply (simp only: nat.simps, iprover?)+
- done
-
-text {*
-Well-founded induction on natural numbers, derived using the standard
-structural induction rule.
-*}
-
-lemma nat_wf_ind:
- assumes R: "\<And>x::nat. (\<And>y. y < x \<Longrightarrow> P y) \<Longrightarrow> P x"
- shows "P z"
-proof (rule R)
- show "\<And>y. y < z \<Longrightarrow> P y"
- proof (induct z)
- case 0
- thus ?case by simp
- next
- case (Suc n y)
- from nat_eq_dec show ?case
- proof
- assume ny: "n = y"
- have "P n"
- by (rule R) (rule Suc)
- with ny show ?case by simp
- next
- assume "n \<noteq> y"
- with Suc have "y < n" by simp
- thus ?case by (rule Suc)
- qed
- qed
-qed
-
-text {*
-Bounded search for a natural number satisfying a decidable predicate.
-*}
-
-lemma search:
- assumes dec: "\<And>x::nat. P x \<or> \<not> P x"
- shows "(\<exists>x<y. P x) \<or> \<not> (\<exists>x<y. P x)"
-proof (induct y)
- case 0 show ?case by simp
-next
- case (Suc z)
- thus ?case
- proof
- assume "\<exists>x<z. P x"
- then obtain x where le: "x < z" and P: "P x" by iprover
- from le have "x < Suc z" by simp
- with P show ?case by iprover
- next
- assume nex: "\<not> (\<exists>x<z. P x)"
- from dec show ?case
- proof
- assume P: "P z"
- have "z < Suc z" by simp
- with P show ?thesis by iprover
- next
- assume nP: "\<not> P z"
- have "\<not> (\<exists>x<Suc z. P x)"
- proof
- assume "\<exists>x<Suc z. P x"
- then obtain x where le: "x < Suc z" and P: "P x" by iprover
- have "x < z"
- proof (cases "x = z")
- case True
- with nP and P show ?thesis by simp
- next
- case False
- with le show ?thesis by simp
- qed
- with P have "\<exists>x<z. P x" by iprover
- with nex show False ..
- qed
- thus ?case by iprover
- qed
- qed
-qed
-
-end
--- a/src/HOL/Extraction/Warshall.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,261 +0,0 @@
-(* Title: HOL/Extraction/Warshall.thy
- Author: Stefan Berghofer, TU Muenchen
-*)
-
-header {* Warshall's algorithm *}
-
-theory Warshall
-imports Main
-begin
-
-text {*
- Derivation of Warshall's algorithm using program extraction,
- based on Berger, Schwichtenberg and Seisenberger \cite{Berger-JAR-2001}.
-*}
-
-datatype b = T | F
-
-primrec
- is_path' :: "('a \<Rightarrow> 'a \<Rightarrow> b) \<Rightarrow> 'a \<Rightarrow> 'a list \<Rightarrow> 'a \<Rightarrow> bool"
-where
- "is_path' r x [] z = (r x z = T)"
- | "is_path' r x (y # ys) z = (r x y = T \<and> is_path' r y ys z)"
-
-definition
- is_path :: "(nat \<Rightarrow> nat \<Rightarrow> b) \<Rightarrow> (nat * nat list * nat) \<Rightarrow>
- nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool"
-where
- "is_path r p i j k \<longleftrightarrow> fst p = j \<and> snd (snd p) = k \<and>
- list_all (\<lambda>x. x < i) (fst (snd p)) \<and>
- is_path' r (fst p) (fst (snd p)) (snd (snd p))"
-
-definition
- conc :: "('a * 'a list * 'a) \<Rightarrow> ('a * 'a list * 'a) \<Rightarrow> ('a * 'a list * 'a)"
-where
- "conc p q = (fst p, fst (snd p) @ fst q # fst (snd q), snd (snd q))"
-
-theorem is_path'_snoc [simp]:
- "\<And>x. is_path' r x (ys @ [y]) z = (is_path' r x ys y \<and> r y z = T)"
- by (induct ys) simp+
-
-theorem list_all_scoc [simp]: "list_all P (xs @ [x]) \<longleftrightarrow> P x \<and> list_all P xs"
- by (induct xs, simp+, iprover)
-
-theorem list_all_lemma:
- "list_all P xs \<Longrightarrow> (\<And>x. P x \<Longrightarrow> Q x) \<Longrightarrow> list_all Q xs"
-proof -
- assume PQ: "\<And>x. P x \<Longrightarrow> Q x"
- show "list_all P xs \<Longrightarrow> list_all Q xs"
- proof (induct xs)
- case Nil
- show ?case by simp
- next
- case (Cons y ys)
- hence Py: "P y" by simp
- from Cons have Pys: "list_all P ys" by simp
- show ?case
- by simp (rule conjI PQ Py Cons Pys)+
- qed
-qed
-
-theorem lemma1: "\<And>p. is_path r p i j k \<Longrightarrow> is_path r p (Suc i) j k"
- apply (unfold is_path_def)
- apply (simp cong add: conj_cong add: split_paired_all)
- apply (erule conjE)+
- apply (erule list_all_lemma)
- apply simp
- done
-
-theorem lemma2: "\<And>p. is_path r p 0 j k \<Longrightarrow> r j k = T"
- apply (unfold is_path_def)
- apply (simp cong add: conj_cong add: split_paired_all)
- apply (case_tac "aa")
- apply simp+
- done
-
-theorem is_path'_conc: "is_path' r j xs i \<Longrightarrow> is_path' r i ys k \<Longrightarrow>
- is_path' r j (xs @ i # ys) k"
-proof -
- assume pys: "is_path' r i ys k"
- show "\<And>j. is_path' r j xs i \<Longrightarrow> is_path' r j (xs @ i # ys) k"
- proof (induct xs)
- case (Nil j)
- hence "r j i = T" by simp
- with pys show ?case by simp
- next
- case (Cons z zs j)
- hence jzr: "r j z = T" by simp
- from Cons have pzs: "is_path' r z zs i" by simp
- show ?case
- by simp (rule conjI jzr Cons pzs)+
- qed
-qed
-
-theorem lemma3:
- "\<And>p q. is_path r p i j i \<Longrightarrow> is_path r q i i k \<Longrightarrow>
- is_path r (conc p q) (Suc i) j k"
- apply (unfold is_path_def conc_def)
- apply (simp cong add: conj_cong add: split_paired_all)
- apply (erule conjE)+
- apply (rule conjI)
- apply (erule list_all_lemma)
- apply simp
- apply (rule conjI)
- apply (erule list_all_lemma)
- apply simp
- apply (rule is_path'_conc)
- apply assumption+
- done
-
-theorem lemma5:
- "\<And>p. is_path r p (Suc i) j k \<Longrightarrow> ~ is_path r p i j k \<Longrightarrow>
- (\<exists>q. is_path r q i j i) \<and> (\<exists>q'. is_path r q' i i k)"
-proof (simp cong add: conj_cong add: split_paired_all is_path_def, (erule conjE)+)
- fix xs
- assume asms:
- "list_all (\<lambda>x. x < Suc i) xs"
- "is_path' r j xs k"
- "\<not> list_all (\<lambda>x. x < i) xs"
- show "(\<exists>ys. list_all (\<lambda>x. x < i) ys \<and> is_path' r j ys i) \<and>
- (\<exists>ys. list_all (\<lambda>x. x < i) ys \<and> is_path' r i ys k)"
- proof
- show "\<And>j. list_all (\<lambda>x. x < Suc i) xs \<Longrightarrow> is_path' r j xs k \<Longrightarrow>
- \<not> list_all (\<lambda>x. x < i) xs \<Longrightarrow>
- \<exists>ys. list_all (\<lambda>x. x < i) ys \<and> is_path' r j ys i" (is "PROP ?ih xs")
- proof (induct xs)
- case Nil
- thus ?case by simp
- next
- case (Cons a as j)
- show ?case
- proof (cases "a=i")
- case True
- show ?thesis
- proof
- from True and Cons have "r j i = T" by simp
- thus "list_all (\<lambda>x. x < i) [] \<and> is_path' r j [] i" by simp
- qed
- next
- case False
- have "PROP ?ih as" by (rule Cons)
- then obtain ys where ys: "list_all (\<lambda>x. x < i) ys \<and> is_path' r a ys i"
- proof
- from Cons show "list_all (\<lambda>x. x < Suc i) as" by simp
- from Cons show "is_path' r a as k" by simp
- from Cons and False show "\<not> list_all (\<lambda>x. x < i) as" by (simp)
- qed
- show ?thesis
- proof
- from Cons False ys
- show "list_all (\<lambda>x. x<i) (a#ys) \<and> is_path' r j (a#ys) i" by simp
- qed
- qed
- qed
- show "\<And>k. list_all (\<lambda>x. x < Suc i) xs \<Longrightarrow> is_path' r j xs k \<Longrightarrow>
- \<not> list_all (\<lambda>x. x < i) xs \<Longrightarrow>
- \<exists>ys. list_all (\<lambda>x. x < i) ys \<and> is_path' r i ys k" (is "PROP ?ih xs")
- proof (induct xs rule: rev_induct)
- case Nil
- thus ?case by simp
- next
- case (snoc a as k)
- show ?case
- proof (cases "a=i")
- case True
- show ?thesis
- proof
- from True and snoc have "r i k = T" by simp
- thus "list_all (\<lambda>x. x < i) [] \<and> is_path' r i [] k" by simp
- qed
- next
- case False
- have "PROP ?ih as" by (rule snoc)
- then obtain ys where ys: "list_all (\<lambda>x. x < i) ys \<and> is_path' r i ys a"
- proof
- from snoc show "list_all (\<lambda>x. x < Suc i) as" by simp
- from snoc show "is_path' r j as a" by simp
- from snoc and False show "\<not> list_all (\<lambda>x. x < i) as" by simp
- qed
- show ?thesis
- proof
- from snoc False ys
- show "list_all (\<lambda>x. x < i) (ys @ [a]) \<and> is_path' r i (ys @ [a]) k"
- by simp
- qed
- qed
- qed
- qed (rule asms)+
-qed
-
-theorem lemma5':
- "\<And>p. is_path r p (Suc i) j k \<Longrightarrow> \<not> is_path r p i j k \<Longrightarrow>
- \<not> (\<forall>q. \<not> is_path r q i j i) \<and> \<not> (\<forall>q'. \<not> is_path r q' i i k)"
- by (iprover dest: lemma5)
-
-theorem warshall:
- "\<And>j k. \<not> (\<exists>p. is_path r p i j k) \<or> (\<exists>p. is_path r p i j k)"
-proof (induct i)
- case (0 j k)
- show ?case
- proof (cases "r j k")
- assume "r j k = T"
- hence "is_path r (j, [], k) 0 j k"
- by (simp add: is_path_def)
- hence "\<exists>p. is_path r p 0 j k" ..
- thus ?thesis ..
- next
- assume "r j k = F"
- hence "r j k ~= T" by simp
- hence "\<not> (\<exists>p. is_path r p 0 j k)"
- by (iprover dest: lemma2)
- thus ?thesis ..
- qed
-next
- case (Suc i j k)
- thus ?case
- proof
- assume h1: "\<not> (\<exists>p. is_path r p i j k)"
- from Suc show ?case
- proof
- assume "\<not> (\<exists>p. is_path r p i j i)"
- with h1 have "\<not> (\<exists>p. is_path r p (Suc i) j k)"
- by (iprover dest: lemma5')
- thus ?case ..
- next
- assume "\<exists>p. is_path r p i j i"
- then obtain p where h2: "is_path r p i j i" ..
- from Suc show ?case
- proof
- assume "\<not> (\<exists>p. is_path r p i i k)"
- with h1 have "\<not> (\<exists>p. is_path r p (Suc i) j k)"
- by (iprover dest: lemma5')
- thus ?case ..
- next
- assume "\<exists>q. is_path r q i i k"
- then obtain q where "is_path r q i i k" ..
- with h2 have "is_path r (conc p q) (Suc i) j k"
- by (rule lemma3)
- hence "\<exists>pq. is_path r pq (Suc i) j k" ..
- thus ?case ..
- qed
- qed
- next
- assume "\<exists>p. is_path r p i j k"
- hence "\<exists>p. is_path r p (Suc i) j k"
- by (iprover intro: lemma1)
- thus ?case ..
- qed
-qed
-
-extract warshall
-
-text {*
- The program extracted from the above proof looks as follows
- @{thm [display, eta_contract=false] warshall_def [no_vars]}
- The corresponding correctness theorem is
- @{thm [display] warshall_correctness [no_vars]}
-*}
-
-ML "@{code warshall}"
-
-end
--- a/src/HOL/Extraction/document/root.bib Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-@Article{Berger-JAR-2001,
- author = {Ulrich Berger and Helmut Schwichtenberg and Monika Seisenberger},
- title = {The {Warshall} Algorithm and {Dickson's} Lemma: Two
- Examples of Realistic Program Extraction},
- journal = {Journal of Automated Reasoning},
- publisher = {Kluwer Academic Publishers},
- year = 2001,
- volume = 26,
- pages = {205--221}
-}
-
-@TechReport{Coquand93,
- author = {Thierry Coquand and Daniel Fridlender},
- title = {A proof of {Higman's} lemma by structural induction},
- institution = {Chalmers University},
- year = 1993,
- month = {November}
-}
-
-@InProceedings{Nogin-ENTCS-2000,
- author = {Aleksey Nogin},
- title = {Writing constructive proofs yielding efficient extracted programs},
- booktitle = {Proceedings of the Workshop on Type-Theoretic Languages: Proof Search and Semantics},
- year = 2000,
- editor = {Didier Galmiche},
- volume = 37,
- series = {Electronic Notes in Theoretical Computer Science},
- publisher = {Elsevier Science Publishers}
-}
-
-@Article{Wenzel-Wiedijk-JAR2002,
- author = {Markus Wenzel and Freek Wiedijk},
- title = {A comparison of the mathematical proof languages {M}izar and {I}sar},
- journal = {Journal of Automated Reasoning},
- year = 2002,
- volume = 29,
- number = {3-4},
- pages = {389-411}
-}
--- a/src/HOL/Extraction/document/root.tex Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-\documentclass[11pt,a4paper]{article}
-\usepackage{isabelle,isabellesym}
-\usepackage{pdfsetup}
-
-\urlstyle{rm}
-\isabellestyle{it}
-
-\begin{document}
-
-\title{Examples for program extraction in Higher-Order Logic}
-\author{Stefan Berghofer}
-\maketitle
-
-\nocite{Berger-JAR-2001,Coquand93}
-
-\tableofcontents
-\newcommand{\isasymbind}{\isamath{\mathbin{>\!\!\!>\mkern-6.7mu=}}}
-\newcommand{\isasymthen}{\isamath{\mathbin{>\!\!\!>}}}
-
-\parindent 0pt\parskip 0.5ex
-
-\input{session}
-
-\bibliographystyle{abbrv}
-\bibliography{root}
-
-\end{document}
--- a/src/HOL/HOL.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/HOL.thy Mon Sep 06 19:23:54 2010 +0200
@@ -2089,47 +2089,47 @@
fun smp_tac j = EVERY'[dresolve_tac (smp j), atac];
end;
-val all_conj_distrib = thm "all_conj_distrib";
-val all_simps = thms "all_simps";
-val atomize_not = thm "atomize_not";
-val case_split = thm "case_split";
-val cases_simp = thm "cases_simp";
-val choice_eq = thm "choice_eq"
-val cong = thm "cong"
-val conj_comms = thms "conj_comms";
-val conj_cong = thm "conj_cong";
-val de_Morgan_conj = thm "de_Morgan_conj";
-val de_Morgan_disj = thm "de_Morgan_disj";
-val disj_assoc = thm "disj_assoc";
-val disj_comms = thms "disj_comms";
-val disj_cong = thm "disj_cong";
-val eq_ac = thms "eq_ac";
-val eq_cong2 = thm "eq_cong2"
-val Eq_FalseI = thm "Eq_FalseI";
-val Eq_TrueI = thm "Eq_TrueI";
-val Ex1_def = thm "Ex1_def"
-val ex_disj_distrib = thm "ex_disj_distrib";
-val ex_simps = thms "ex_simps";
-val if_cancel = thm "if_cancel";
-val if_eq_cancel = thm "if_eq_cancel";
-val if_False = thm "if_False";
-val iff_conv_conj_imp = thm "iff_conv_conj_imp";
-val iff = thm "iff"
-val if_splits = thms "if_splits";
-val if_True = thm "if_True";
-val if_weak_cong = thm "if_weak_cong"
-val imp_all = thm "imp_all";
-val imp_cong = thm "imp_cong";
-val imp_conjL = thm "imp_conjL";
-val imp_conjR = thm "imp_conjR";
-val imp_conv_disj = thm "imp_conv_disj";
-val simp_implies_def = thm "simp_implies_def";
-val simp_thms = thms "simp_thms";
-val split_if = thm "split_if";
-val the1_equality = thm "the1_equality"
-val theI = thm "theI"
-val theI' = thm "theI'"
-val True_implies_equals = thm "True_implies_equals";
+val all_conj_distrib = @{thm all_conj_distrib};
+val all_simps = @{thms all_simps};
+val atomize_not = @{thm atomize_not};
+val case_split = @{thm case_split};
+val cases_simp = @{thm cases_simp};
+val choice_eq = @{thm choice_eq};
+val cong = @{thm cong};
+val conj_comms = @{thms conj_comms};
+val conj_cong = @{thm conj_cong};
+val de_Morgan_conj = @{thm de_Morgan_conj};
+val de_Morgan_disj = @{thm de_Morgan_disj};
+val disj_assoc = @{thm disj_assoc};
+val disj_comms = @{thms disj_comms};
+val disj_cong = @{thm disj_cong};
+val eq_ac = @{thms eq_ac};
+val eq_cong2 = @{thm eq_cong2}
+val Eq_FalseI = @{thm Eq_FalseI};
+val Eq_TrueI = @{thm Eq_TrueI};
+val Ex1_def = @{thm Ex1_def};
+val ex_disj_distrib = @{thm ex_disj_distrib};
+val ex_simps = @{thms ex_simps};
+val if_cancel = @{thm if_cancel};
+val if_eq_cancel = @{thm if_eq_cancel};
+val if_False = @{thm if_False};
+val iff_conv_conj_imp = @{thm iff_conv_conj_imp};
+val iff = @{thm iff};
+val if_splits = @{thms if_splits};
+val if_True = @{thm if_True};
+val if_weak_cong = @{thm if_weak_cong};
+val imp_all = @{thm imp_all};
+val imp_cong = @{thm imp_cong};
+val imp_conjL = @{thm imp_conjL};
+val imp_conjR = @{thm imp_conjR};
+val imp_conv_disj = @{thm imp_conv_disj};
+val simp_implies_def = @{thm simp_implies_def};
+val simp_thms = @{thms simp_thms};
+val split_if = @{thm split_if};
+val the1_equality = @{thm the1_equality};
+val theI = @{thm theI};
+val theI' = @{thm theI'};
+val True_implies_equals = @{thm True_implies_equals};
val nnf_conv = Simplifier.rewrite (HOL_basic_ss addsimps simp_thms @ @{thms "nnf_simps"})
*}
--- a/src/HOL/Hoare_Parallel/Mul_Gar_Coll.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Hoare_Parallel/Mul_Gar_Coll.thy Mon Sep 06 19:23:54 2010 +0200
@@ -1199,15 +1199,15 @@
apply(case_tac [!] "M x!(T (Muts x ! j))=Black")
apply(simp_all add:Graph10)
--{* 47 subgoals left *}
-apply(tactic {* TRYALL(EVERY'[REPEAT o (rtac disjI2),etac (thm "subset_psubset_trans"),etac (thm "Graph11"),force_tac @{clasimpset}]) *})
+apply(tactic {* TRYALL(EVERY'[REPEAT o (rtac disjI2),etac @{thm subset_psubset_trans}, etac @{thm Graph11},force_tac @{clasimpset}]) *})
--{* 41 subgoals left *}
apply(tactic {* TRYALL(EVERY'[rtac disjI2, rtac disjI1, etac @{thm le_trans}, force_tac (@{claset},@{simpset} addsimps [@{thm Queue_def}, @{thm less_Suc_eq_le}, @{thm le_length_filter_update}])]) *})
--{* 35 subgoals left *}
-apply(tactic {* TRYALL(EVERY'[rtac disjI2,rtac disjI1,etac (thm "psubset_subset_trans"),rtac (thm "Graph9"),force_tac @{clasimpset}]) *})
+apply(tactic {* TRYALL(EVERY'[rtac disjI2,rtac disjI1,etac @{thm psubset_subset_trans},rtac @{thm Graph9},force_tac @{clasimpset}]) *})
--{* 31 subgoals left *}
-apply(tactic {* TRYALL(EVERY'[rtac disjI2,rtac disjI1,etac (thm "subset_psubset_trans"),etac (thm "Graph11"),force_tac @{clasimpset}]) *})
+apply(tactic {* TRYALL(EVERY'[rtac disjI2,rtac disjI1,etac @{thm subset_psubset_trans},etac @{thm Graph11},force_tac @{clasimpset}]) *})
--{* 29 subgoals left *}
-apply(tactic {* TRYALL(EVERY'[REPEAT o (rtac disjI2),etac (thm "subset_psubset_trans"),etac (thm "subset_psubset_trans"),etac (thm "Graph11"),force_tac @{clasimpset}]) *})
+apply(tactic {* TRYALL(EVERY'[REPEAT o (rtac disjI2),etac @{thm subset_psubset_trans},etac @{thm subset_psubset_trans},etac @{thm Graph11},force_tac @{clasimpset}]) *})
--{* 25 subgoals left *}
apply(tactic {* TRYALL(EVERY'[rtac disjI2, rtac disjI2, rtac disjI1, etac @{thm le_trans}, force_tac (@{claset},@{simpset} addsimps [@{thm Queue_def}, @{thm less_Suc_eq_le}, @{thm le_length_filter_update}])]) *})
--{* 10 subgoals left *}
--- a/src/HOL/Hoare_Parallel/OG_Tactics.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Hoare_Parallel/OG_Tactics.thy Mon Sep 06 19:23:54 2010 +0200
@@ -273,11 +273,11 @@
lemmas ParallelConseq_list = INTER_def Collect_conj_eq length_map length_upt length_append list_length
ML {*
-val before_interfree_simp_tac = (simp_tac (HOL_basic_ss addsimps [thm "com.simps", thm "post.simps"]))
+val before_interfree_simp_tac = simp_tac (HOL_basic_ss addsimps [@{thm com.simps}, @{thm post.simps}])
-val interfree_simp_tac = (asm_simp_tac (HOL_ss addsimps [@{thm split}, thm "ball_Un", thm "ball_empty"]@(thms "my_simp_list")))
+val interfree_simp_tac = asm_simp_tac (HOL_ss addsimps [@{thm split}, @{thm ball_Un}, @{thm ball_empty}] @ @{thms my_simp_list})
-val ParallelConseq = (simp_tac (HOL_basic_ss addsimps (thms "ParallelConseq_list")@(thms "my_simp_list")))
+val ParallelConseq = simp_tac (HOL_basic_ss addsimps @{thms ParallelConseq_list} @ @{thms my_simp_list})
*}
text {* The following tactic applies @{text tac} to each conjunct in a
--- a/src/HOL/IMPP/Hoare.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/IMPP/Hoare.thy Mon Sep 06 19:23:54 2010 +0200
@@ -218,7 +218,7 @@
apply (rule hoare_derivs.conseq, intro strip, tactic "smp_tac 2 1", clarify, tactic "smp_tac 1 1",rule exI, rule exI, erule (1) conjI)
prefer 7
apply (rule_tac hoare_derivs.Body, drule_tac spec, erule_tac mp, fast)
-apply (tactic {* ALLGOALS (resolve_tac ((funpow 5 tl) (thms "hoare_derivs.intros")) THEN_ALL_NEW (fast_tac @{claset})) *})
+apply (tactic {* ALLGOALS (resolve_tac ((funpow 5 tl) @{thms hoare_derivs.intros}) THEN_ALL_NEW (fast_tac @{claset})) *})
done
lemma weak_Body: "G|-{P}. the (body pn) .{Q} ==> G|-{P}. BODY pn .{Q}"
@@ -278,7 +278,7 @@
lemma hoare_sound: "G||-ts ==> G||=ts"
apply (erule hoare_derivs.induct)
-apply (tactic {* TRYALL (eresolve_tac [thm "Loop_sound_lemma", thm "Body_sound_lemma"] THEN_ALL_NEW atac) *})
+apply (tactic {* TRYALL (eresolve_tac [@{thm Loop_sound_lemma}, @{thm Body_sound_lemma}] THEN_ALL_NEW atac) *})
apply (unfold hoare_valids_def)
apply blast
apply blast
@@ -349,7 +349,7 @@
rule_tac [3] P1 = "%Z' s. s= (Z[Loc loc::=fun Z])" in hoare_derivs.Local [THEN conseq1],
erule_tac [3] conseq12)
apply (erule_tac [5] hoare_derivs.Comp, erule_tac [5] conseq12)
-apply (tactic {* (rtac (thm "hoare_derivs.If") THEN_ALL_NEW etac (thm "conseq12")) 6 *})
+apply (tactic {* (rtac @{thm hoare_derivs.If} THEN_ALL_NEW etac @{thm conseq12}) 6 *})
apply (rule_tac [8] hoare_derivs.Loop [THEN conseq2], erule_tac [8] conseq12)
apply auto
done
--- a/src/HOL/IMPP/Natural.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/IMPP/Natural.thy Mon Sep 06 19:23:54 2010 +0200
@@ -114,7 +114,7 @@
lemma evaln_evalc: "<c,s> -n-> t ==> <c,s> -c-> t"
apply (erule evaln.induct)
-apply (tactic {* ALLGOALS (resolve_tac (thms "evalc.intros") THEN_ALL_NEW atac) *})
+apply (tactic {* ALLGOALS (resolve_tac @{thms evalc.intros} THEN_ALL_NEW atac) *})
done
lemma Suc_le_D_lemma: "[| Suc n <= m'; (!!m. n <= m ==> P (Suc m)) |] ==> P m'"
@@ -124,8 +124,8 @@
lemma evaln_nonstrict [rule_format]: "<c,s> -n-> t ==> !m. n<=m --> <c,s> -m-> t"
apply (erule evaln.induct)
-apply (tactic {* ALLGOALS (EVERY'[strip_tac,TRY o etac (thm "Suc_le_D_lemma"), REPEAT o smp_tac 1]) *})
-apply (tactic {* ALLGOALS (resolve_tac (thms "evaln.intros") THEN_ALL_NEW atac) *})
+apply (tactic {* ALLGOALS (EVERY'[strip_tac,TRY o etac @{thm Suc_le_D_lemma}, REPEAT o smp_tac 1]) *})
+apply (tactic {* ALLGOALS (resolve_tac @{thms evaln.intros} THEN_ALL_NEW atac) *})
done
lemma evaln_Suc: "<c,s> -n-> s' ==> <c,s> -Suc n-> s'"
@@ -142,8 +142,8 @@
lemma evalc_evaln: "<c,s> -c-> t ==> ? n. <c,s> -n-> t"
apply (erule evalc.induct)
apply (tactic {* ALLGOALS (REPEAT o etac exE) *})
-apply (tactic {* TRYALL (EVERY'[datac (thm "evaln_max2") 1, REPEAT o eresolve_tac [exE, conjE]]) *})
-apply (tactic {* ALLGOALS (rtac exI THEN' resolve_tac (thms "evaln.intros") THEN_ALL_NEW atac) *})
+apply (tactic {* TRYALL (EVERY' [datac @{thm evaln_max2} 1, REPEAT o eresolve_tac [exE, conjE]]) *})
+apply (tactic {* ALLGOALS (rtac exI THEN' resolve_tac @{thms evaln.intros} THEN_ALL_NEW atac) *})
done
lemma eval_eq: "<c,s> -c-> t = (? n. <c,s> -n-> t)"
--- a/src/HOL/IOA/Solve.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/IOA/Solve.thy Mon Sep 06 19:23:54 2010 +0200
@@ -146,7 +146,7 @@
apply (simp (no_asm) add: conj_disj_distribR cong add: conj_cong split add: split_if)
apply (tactic {*
REPEAT((resolve_tac [conjI,impI] 1 ORELSE etac conjE 1) THEN
- asm_full_simp_tac(@{simpset} addsimps[thm "comp1_reachable", thm "comp2_reachable"]) 1) *})
+ asm_full_simp_tac(@{simpset} addsimps [@{thm comp1_reachable}, @{thm comp2_reachable}]) 1) *})
done
--- a/src/HOL/Import/HOL/ROOT.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Import/HOL/ROOT.ML Mon Sep 06 19:23:54 2010 +0200
@@ -1,4 +1,2 @@
-
use_thy "~~/src/HOL/Old_Number_Theory/Primes";
-setmp_noncritical quick_and_dirty true use_thy "HOL4Prob";
-setmp_noncritical quick_and_dirty true use_thy "HOL4";
+setmp_noncritical quick_and_dirty true use_thys ["HOL4Prob", "HOL4"];
--- a/src/HOL/Import/hol4rews.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Import/hol4rews.ML Mon Sep 06 19:23:54 2010 +0200
@@ -156,13 +156,10 @@
val hol4_debug = Unsynchronized.ref false
fun message s = if !hol4_debug then writeln s else ()
-local
- val eq_reflection = thm "eq_reflection"
-in
fun add_hol4_rewrite (Context.Theory thy, th) =
let
val _ = message "Adding HOL4 rewrite"
- val th1 = th RS eq_reflection
+ val th1 = th RS @{thm eq_reflection}
val current_rews = HOL4Rewrites.get thy
val new_rews = insert Thm.eq_thm th1 current_rews
val updated_thy = HOL4Rewrites.put new_rews thy
@@ -170,7 +167,6 @@
(Context.Theory updated_thy,th1)
end
| add_hol4_rewrite (context, th) = (context, th RS eq_reflection);
-end
fun ignore_hol4 bthy bthm thy =
let
--- a/src/HOL/Import/proof_kernel.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Import/proof_kernel.ML Mon Sep 06 19:23:54 2010 +0200
@@ -977,28 +977,28 @@
| NONE => raise ERR "uniq_compose" "No result"
end
-val reflexivity_thm = thm "refl"
-val substitution_thm = thm "subst"
-val mp_thm = thm "mp"
-val imp_antisym_thm = thm "light_imp_as"
-val disch_thm = thm "impI"
-val ccontr_thm = thm "ccontr"
+val reflexivity_thm = @{thm refl}
+val substitution_thm = @{thm subst}
+val mp_thm = @{thm mp}
+val imp_antisym_thm = @{thm light_imp_as}
+val disch_thm = @{thm impI}
+val ccontr_thm = @{thm ccontr}
-val meta_eq_to_obj_eq_thm = thm "meta_eq_to_obj_eq"
+val meta_eq_to_obj_eq_thm = @{thm meta_eq_to_obj_eq}
-val gen_thm = thm "HOLallI"
-val choose_thm = thm "exE"
-val exists_thm = thm "exI"
-val conj_thm = thm "conjI"
-val conjunct1_thm = thm "conjunct1"
-val conjunct2_thm = thm "conjunct2"
-val spec_thm = thm "spec"
-val disj_cases_thm = thm "disjE"
-val disj1_thm = thm "disjI1"
-val disj2_thm = thm "disjI2"
+val gen_thm = @{thm HOLallI}
+val choose_thm = @{thm exE}
+val exists_thm = @{thm exI}
+val conj_thm = @{thm conjI}
+val conjunct1_thm = @{thm conjunct1}
+val conjunct2_thm = @{thm conjunct2}
+val spec_thm = @{thm spec}
+val disj_cases_thm = @{thm disjE}
+val disj1_thm = @{thm disjI1}
+val disj2_thm = @{thm disjI2}
local
- val th = thm "not_def"
+ val th = @{thm not_def}
val thy = theory_of_thm th
val pp = Thm.reflexive (cterm_of thy (Const(@{const_name Trueprop},HOLogic.boolT-->propT)))
in
@@ -1006,13 +1006,13 @@
end
val not_intro_thm = Thm.symmetric not_elim_thm
-val abs_thm = thm "ext"
-val trans_thm = thm "trans"
-val symmetry_thm = thm "sym"
-val transitivity_thm = thm "trans"
-val eqmp_thm = thm "iffD1"
-val eqimp_thm = thm "HOL4Setup.eq_imp"
-val comb_thm = thm "cong"
+val abs_thm = @{thm ext}
+val trans_thm = @{thm trans}
+val symmetry_thm = @{thm sym}
+val transitivity_thm = @{thm trans}
+val eqmp_thm = @{thm iffD1}
+val eqimp_thm = @{thm HOL4Setup.eq_imp}
+val comb_thm = @{thm cong}
(* Beta-eta normalizes a theorem (only the conclusion, not the *
hypotheses!) *)
@@ -1971,9 +1971,6 @@
(thy22',hth)
end
-local
- val helper = thm "termspec_help"
-in
fun new_specification thyname thmname names hth thy =
case HOL4DefThy.get thy of
Replaying _ => (thy,hth)
@@ -2054,8 +2051,6 @@
intern_store_thm false thyname thmname hth thy''
end
-end
-
fun new_axiom name tm thy = raise ERR "new_axiom" ("Oh, no you don't! (" ^ name ^ ")")
fun to_isa_thm (hth as HOLThm(_,th)) =
@@ -2068,10 +2063,10 @@
fun to_isa_term tm = tm
local
- val light_nonempty = thm "light_ex_imp_nonempty"
- val ex_imp_nonempty = thm "ex_imp_nonempty"
- val typedef_hol2hol4 = thm "typedef_hol2hol4"
- val typedef_hol2hollight = thm "typedef_hol2hollight"
+ val light_nonempty = @{thm light_ex_imp_nonempty}
+ val ex_imp_nonempty = @{thm ex_imp_nonempty}
+ val typedef_hol2hol4 = @{thm typedef_hol2hol4}
+ val typedef_hol2hollight = @{thm typedef_hol2hollight}
in
fun new_type_definition thyname thmname tycname hth thy =
case HOL4DefThy.get thy of
--- a/src/HOL/Import/shuffler.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Import/shuffler.ML Mon Sep 06 19:23:54 2010 +0200
@@ -346,7 +346,7 @@
fun beta_fun thy assume t =
SOME (Thm.beta_conversion true (cterm_of thy t))
-val meta_sym_rew = thm "refl"
+val meta_sym_rew = @{thm refl}
fun equals_fun thy assume t =
case t of
--- a/src/HOL/IsaMakefile Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/IsaMakefile Mon Sep 06 19:23:54 2010 +0200
@@ -57,6 +57,7 @@
HOL-Quotient_Examples \
HOL-Predicate_Compile_Examples \
HOL-Prolog \
+ HOL-Proofs-ex \
HOL-Proofs-Extraction \
HOL-Proofs-Lambda \
HOL-SET_Protocol \
@@ -92,8 +93,6 @@
HOL-Main: Pure $(OUT)/HOL-Main
-HOL-Proofs: Pure $(OUT)/HOL-Proofs
-
Pure:
@cd $(SRC)/Pure; $(ISABELLE_TOOL) make Pure
@@ -145,7 +144,7 @@
$(OUT)/HOL-Base: base.ML $(BASE_DEPENDENCIES)
@$(ISABELLE_TOOL) usedir -b -f base.ML -d false -g false $(OUT)/Pure HOL-Base
-PLAIN_DEPENDENCIES = $(BASE_DEPENDENCIES)\
+PLAIN_DEPENDENCIES = $(BASE_DEPENDENCIES) \
Complete_Lattice.thy \
Datatype.thy \
Extraction.thy \
@@ -355,9 +354,6 @@
$(OUT)/HOL-Main: main.ML $(MAIN_DEPENDENCIES)
@$(ISABELLE_TOOL) usedir -b -f main.ML -g true $(OUT)/Pure HOL-Main
-$(OUT)/HOL-Proofs: main.ML $(MAIN_DEPENDENCIES)
- @$(ISABELLE_TOOL) usedir -b -f main.ML -g true -p 2 -q 0 $(OUT)/Pure HOL-Proofs
-
HOL_DEPENDENCIES = $(MAIN_DEPENDENCIES) \
Archimedean_Field.thy \
Complex.thy \
@@ -390,7 +386,6 @@
@$(ISABELLE_TOOL) usedir -b -g true $(OUT)/Pure HOL
-
## HOL-Library
HOL-Library: HOL $(OUT)/HOL-Library
@@ -785,7 +780,7 @@
$(LOG)/HOL-Unix.gz: $(OUT)/HOL Library/Nested_Environment.thy \
Library/List_Prefix.thy Unix/ROOT.ML Unix/Unix.thy \
Unix/document/root.bib Unix/document/root.tex
- @$(ISABELLE_TOOL) usedir $(OUT)/HOL Unix
+ @$(ISABELLE_TOOL) usedir -m no_brackets -m no_type_brackets $(OUT)/HOL Unix
## HOL-ZF
@@ -855,17 +850,52 @@
@$(ISABELLE_TOOL) usedir -D generated $(OUT)/HOL Docs
+## HOL-Proofs
+
+HOL-Proofs: Pure $(OUT)/HOL-Proofs
+
+$(OUT)/HOL-Proofs: main.ML $(MAIN_DEPENDENCIES)
+ @$(ISABELLE_TOOL) usedir -b -f main.ML -g true -p 2 -q 0 $(OUT)/Pure HOL-Proofs
+
+
+## HOL-Proofs-ex
+
+HOL-Proofs-ex: HOL-Proofs $(LOG)/HOL-Proofs-ex.gz
+
+$(LOG)/HOL-Proofs-ex.gz: $(OUT)/HOL-Proofs \
+ Proofs/ex/ROOT.ML Proofs/ex/Hilbert_Classical.thy
+ @cd Proofs; $(ISABELLE_TOOL) usedir -p 2 -q 0 $(OUT)/HOL-Proofs ex
+
+
+## HOL-Proofs-Extraction
+
+HOL-Proofs-Extraction: HOL-Proofs $(LOG)/HOL-Proofs-Extraction.gz
+
+$(LOG)/HOL-Proofs-Extraction.gz: $(OUT)/HOL-Proofs \
+ Library/Efficient_Nat.thy Proofs/Extraction/Euclid.thy \
+ Proofs/Extraction/Greatest_Common_Divisor.thy \
+ Proofs/Extraction/Higman.thy Proofs/Extraction/Pigeonhole.thy \
+ Proofs/Extraction/QuotRem.thy Proofs/Extraction/ROOT.ML \
+ Proofs/Extraction/Util.thy Proofs/Extraction/Warshall.thy \
+ Proofs/Extraction/document/root.tex \
+ Proofs/Extraction/document/root.bib
+ @cd Proofs; $(ISABELLE_TOOL) usedir -p 2 -q 0 $(OUT)/HOL-Proofs Extraction
+
+
## HOL-Proofs-Lambda
HOL-Proofs-Lambda: HOL-Proofs $(LOG)/HOL-Proofs-Lambda.gz
-$(LOG)/HOL-Proofs-Lambda.gz: $(OUT)/HOL-Proofs Lambda/Commutation.thy \
- Lambda/Eta.thy Lambda/InductTermi.thy Lambda/Lambda.thy \
- Lambda/ListApplication.thy Lambda/ListBeta.thy Lambda/ListOrder.thy \
- Lambda/NormalForm.thy Lambda/ParRed.thy Lambda/Standardization.thy \
- Lambda/StrongNorm.thy Lambda/Type.thy Lambda/WeakNorm.thy \
- Lambda/ROOT.ML Lambda/document/root.bib Lambda/document/root.tex
- @$(ISABELLE_TOOL) usedir -g true -m no_brackets -p 2 -q 0 $(OUT)/HOL-Proofs Lambda
+$(LOG)/HOL-Proofs-Lambda.gz: $(OUT)/HOL-Proofs \
+ Proofs/Lambda/Commutation.thy Proofs/Lambda/Eta.thy \
+ Proofs/Lambda/InductTermi.thy Proofs/Lambda/Lambda.thy \
+ Proofs/Lambda/ListApplication.thy Proofs/Lambda/ListBeta.thy \
+ Proofs/Lambda/ListOrder.thy Proofs/Lambda/NormalForm.thy \
+ Proofs/Lambda/ParRed.thy Proofs/Lambda/Standardization.thy \
+ Proofs/Lambda/StrongNorm.thy Proofs/Lambda/Type.thy \
+ Proofs/Lambda/WeakNorm.thy Proofs/Lambda/ROOT.ML \
+ Proofs/Lambda/document/root.bib Proofs/Lambda/document/root.tex
+ @cd Proofs; $(ISABELLE_TOOL) usedir -g true -m no_brackets -p 2 -q 0 $(OUT)/HOL-Proofs Lambda
## HOL-Prolog
@@ -940,19 +970,6 @@
@$(ISABELLE_TOOL) usedir -g true $(OUT)/HOL Bali
-## HOL-Proofs-Extraction
-
-HOL-Proofs-Extraction: HOL-Proofs $(LOG)/HOL-Proofs-Extraction.gz
-
-$(LOG)/HOL-Proofs-Extraction.gz: $(OUT)/HOL-Proofs \
- Library/Efficient_Nat.thy Extraction/Euclid.thy \
- Extraction/Greatest_Common_Divisor.thy Extraction/Higman.thy \
- Extraction/Pigeonhole.thy Extraction/QuotRem.thy Extraction/ROOT.ML \
- Extraction/Util.thy Extraction/Warshall.thy \
- Extraction/document/root.tex Extraction/document/root.bib
- @$(ISABELLE_TOOL) usedir -p 2 -q 0 $(OUT)/HOL-Proofs Extraction
-
-
## HOL-IOA
HOL-IOA: HOL $(LOG)/HOL-IOA.gz
@@ -977,29 +994,27 @@
HOL-ex: HOL $(LOG)/HOL-ex.gz
$(LOG)/HOL-ex.gz: $(OUT)/HOL Decision_Procs/Commutative_Ring.thy \
- Number_Theory/Primes.thy \
- ex/Abstract_NAT.thy ex/Antiquote.thy ex/Arith_Examples.thy \
- ex/Arithmetic_Series_Complex.thy ex/BT.thy ex/BinEx.thy \
- ex/Binary.thy ex/CTL.thy ex/Chinese.thy ex/Classical.thy \
- ex/CodegenSML_Test.thy ex/Coherent.thy ex/Dedekind_Real.thy \
- ex/Efficient_Nat_examples.thy ex/Eval_Examples.thy ex/Fundefs.thy \
- ex/Gauge_Integration.thy ex/Groebner_Examples.thy ex/Guess.thy \
- ex/HarmonicSeries.thy ex/Hebrew.thy ex/Hex_Bin_Examples.thy \
- ex/Higher_Order_Logic.thy ex/Hilbert_Classical.thy \
+ Number_Theory/Primes.thy ex/Abstract_NAT.thy ex/Antiquote.thy \
+ ex/Arith_Examples.thy ex/Arithmetic_Series_Complex.thy ex/BT.thy \
+ ex/BinEx.thy ex/Binary.thy ex/CTL.thy ex/Chinese.thy \
+ ex/Classical.thy ex/CodegenSML_Test.thy ex/Coherent.thy \
+ ex/Dedekind_Real.thy ex/Efficient_Nat_examples.thy \
+ ex/Eval_Examples.thy ex/Fundefs.thy ex/Gauge_Integration.thy \
+ ex/Groebner_Examples.thy ex/Guess.thy ex/HarmonicSeries.thy \
+ ex/Hebrew.thy ex/Hex_Bin_Examples.thy ex/Higher_Order_Logic.thy \
ex/Induction_Schema.thy ex/InductiveInvariant.thy \
ex/InductiveInvariant_examples.thy ex/Intuitionistic.thy \
- ex/Lagrange.thy ex/LocaleTest2.thy ex/MT.thy \
- ex/MergeSort.thy ex/Meson_Test.thy ex/MonoidGroup.thy \
- ex/Multiquote.thy ex/NatSum.thy ex/Numeral.thy ex/PER.thy \
- ex/PresburgerEx.thy ex/Primrec.thy \
- ex/Quickcheck_Examples.thy ex/Quickcheck_Lattice_Examples.thy \
+ ex/Lagrange.thy ex/LocaleTest2.thy ex/MT.thy ex/MergeSort.thy \
+ ex/Meson_Test.thy ex/MonoidGroup.thy ex/Multiquote.thy ex/NatSum.thy \
+ ex/Numeral.thy ex/PER.thy ex/PresburgerEx.thy ex/Primrec.thy \
+ ex/Quickcheck_Examples.thy ex/Quickcheck_Lattice_Examples.thy \
ex/ROOT.ML ex/Recdefs.thy ex/Records.thy ex/ReflectionEx.thy \
ex/Refute_Examples.thy ex/SAT_Examples.thy ex/SVC_Oracle.thy \
ex/Serbian.thy ex/Sqrt.thy ex/Sqrt_Script.thy ex/Sudoku.thy \
ex/Tarski.thy ex/Termination.thy ex/Transfer_Ex.thy ex/Tree23.thy \
- ex/Unification.thy ex/While_Combinator_Example.thy ex/document/root.bib \
- ex/document/root.tex \
- ex/set.thy ex/svc_funcs.ML ex/svc_test.thy
+ ex/Unification.thy ex/While_Combinator_Example.thy \
+ ex/document/root.bib ex/document/root.tex ex/set.thy ex/svc_funcs.ML \
+ ex/svc_test.thy
@$(ISABELLE_TOOL) usedir $(OUT)/HOL ex
@@ -1137,6 +1152,7 @@
Library/Nat_Bijection.thy Library/Countable.thy
@cd Probability; $(ISABELLE_TOOL) usedir -b -g true $(OUT)/HOL-Multivariate_Analysis HOL-Probability
+
## HOL-Nominal
HOL-Nominal: HOL $(OUT)/HOL-Nominal
@@ -1160,7 +1176,7 @@
HOL-Nominal-Examples: HOL-Nominal $(LOG)/HOL-Nominal-Examples.gz
-$(LOG)/HOL-Nominal-Examples.gz: $(OUT)/HOL-Nominal \
+$(LOG)/HOL-Nominal-Examples.gz: $(OUT)/HOL-Nominal \
Nominal/Examples/Nominal_Examples.thy \
Nominal/Examples/CK_Machine.thy \
Nominal/Examples/CR.thy \
@@ -1352,7 +1368,8 @@
$(LOG)/HOL-Old_Number_Theory.gz $(LOG)/HOL-Plain.gz \
$(LOG)/HOL-Predicate_Compile_Examples.gz \
$(LOG)/HOL-Probability.gz $(LOG)/HOL-Prolog.gz \
- $(LOG)/HOL-Proofs.gz $(LOG)/HOL-Proofs-Extraction.gz \
+ $(LOG)/HOL-Proofs.gz $(LOG)/HOL-Proofs-ex.gz \
+ $(LOG)/HOL-Proofs-Extraction.gz \
$(LOG)/HOL-Proofs-Lambda.gz $(LOG)/HOL-SET_Protocol.gz \
$(LOG)/HOL-Word-SMT_Examples.gz \
$(LOG)/HOL-Statespace.gz $(LOG)/HOL-Subst.gz \
--- a/src/HOL/Lambda/Commutation.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,268 +0,0 @@
-(* Title: HOL/Lambda/Commutation.thy
- Author: Tobias Nipkow
- Copyright 1995 TU Muenchen
-*)
-
-header {* Abstract commutation and confluence notions *}
-
-theory Commutation imports Main begin
-
-declare [[syntax_ambiguity_level = 100]]
-
-
-subsection {* Basic definitions *}
-
-definition
- square :: "['a => 'a => bool, 'a => 'a => bool, 'a => 'a => bool, 'a => 'a => bool] => bool" where
- "square R S T U =
- (\<forall>x y. R x y --> (\<forall>z. S x z --> (\<exists>u. T y u \<and> U z u)))"
-
-definition
- commute :: "['a => 'a => bool, 'a => 'a => bool] => bool" where
- "commute R S = square R S S R"
-
-definition
- diamond :: "('a => 'a => bool) => bool" where
- "diamond R = commute R R"
-
-definition
- Church_Rosser :: "('a => 'a => bool) => bool" where
- "Church_Rosser R =
- (\<forall>x y. (sup R (R^--1))^** x y --> (\<exists>z. R^** x z \<and> R^** y z))"
-
-abbreviation
- confluent :: "('a => 'a => bool) => bool" where
- "confluent R == diamond (R^**)"
-
-
-subsection {* Basic lemmas *}
-
-subsubsection {* @{text "square"} *}
-
-lemma square_sym: "square R S T U ==> square S R U T"
- apply (unfold square_def)
- apply blast
- done
-
-lemma square_subset:
- "[| square R S T U; T \<le> T' |] ==> square R S T' U"
- apply (unfold square_def)
- apply (blast dest: predicate2D)
- done
-
-lemma square_reflcl:
- "[| square R S T (R^==); S \<le> T |] ==> square (R^==) S T (R^==)"
- apply (unfold square_def)
- apply (blast dest: predicate2D)
- done
-
-lemma square_rtrancl:
- "square R S S T ==> square (R^**) S S (T^**)"
- apply (unfold square_def)
- apply (intro strip)
- apply (erule rtranclp_induct)
- apply blast
- apply (blast intro: rtranclp.rtrancl_into_rtrancl)
- done
-
-lemma square_rtrancl_reflcl_commute:
- "square R S (S^**) (R^==) ==> commute (R^**) (S^**)"
- apply (unfold commute_def)
- apply (fastsimp dest: square_reflcl square_sym [THEN square_rtrancl])
- done
-
-
-subsubsection {* @{text "commute"} *}
-
-lemma commute_sym: "commute R S ==> commute S R"
- apply (unfold commute_def)
- apply (blast intro: square_sym)
- done
-
-lemma commute_rtrancl: "commute R S ==> commute (R^**) (S^**)"
- apply (unfold commute_def)
- apply (blast intro: square_rtrancl square_sym)
- done
-
-lemma commute_Un:
- "[| commute R T; commute S T |] ==> commute (sup R S) T"
- apply (unfold commute_def square_def)
- apply blast
- done
-
-
-subsubsection {* @{text "diamond"}, @{text "confluence"}, and @{text "union"} *}
-
-lemma diamond_Un:
- "[| diamond R; diamond S; commute R S |] ==> diamond (sup R S)"
- apply (unfold diamond_def)
- apply (blast intro: commute_Un commute_sym)
- done
-
-lemma diamond_confluent: "diamond R ==> confluent R"
- apply (unfold diamond_def)
- apply (erule commute_rtrancl)
- done
-
-lemma square_reflcl_confluent:
- "square R R (R^==) (R^==) ==> confluent R"
- apply (unfold diamond_def)
- apply (fast intro: square_rtrancl_reflcl_commute elim: square_subset)
- done
-
-lemma confluent_Un:
- "[| confluent R; confluent S; commute (R^**) (S^**) |] ==> confluent (sup R S)"
- apply (rule rtranclp_sup_rtranclp [THEN subst])
- apply (blast dest: diamond_Un intro: diamond_confluent)
- done
-
-lemma diamond_to_confluence:
- "[| diamond R; T \<le> R; R \<le> T^** |] ==> confluent T"
- apply (force intro: diamond_confluent
- dest: rtranclp_subset [symmetric])
- done
-
-
-subsection {* Church-Rosser *}
-
-lemma Church_Rosser_confluent: "Church_Rosser R = confluent R"
- apply (unfold square_def commute_def diamond_def Church_Rosser_def)
- apply (tactic {* safe_tac HOL_cs *})
- apply (tactic {*
- blast_tac (HOL_cs addIs
- [thm "sup_ge2" RS thm "rtranclp_mono" RS thm "predicate2D" RS thm "rtranclp_trans",
- thm "rtranclp_converseI", thm "conversepI",
- thm "sup_ge1" RS thm "rtranclp_mono" RS thm "predicate2D"]) 1 *})
- apply (erule rtranclp_induct)
- apply blast
- apply (blast del: rtranclp.rtrancl_refl intro: rtranclp_trans)
- done
-
-
-subsection {* Newman's lemma *}
-
-text {* Proof by Stefan Berghofer *}
-
-theorem newman:
- assumes wf: "wfP (R\<inverse>\<inverse>)"
- and lc: "\<And>a b c. R a b \<Longrightarrow> R a c \<Longrightarrow>
- \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d"
- shows "\<And>b c. R\<^sup>*\<^sup>* a b \<Longrightarrow> R\<^sup>*\<^sup>* a c \<Longrightarrow>
- \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d"
- using wf
-proof induct
- case (less x b c)
- have xc: "R\<^sup>*\<^sup>* x c" by fact
- have xb: "R\<^sup>*\<^sup>* x b" by fact thus ?case
- proof (rule converse_rtranclpE)
- assume "x = b"
- with xc have "R\<^sup>*\<^sup>* b c" by simp
- thus ?thesis by iprover
- next
- fix y
- assume xy: "R x y"
- assume yb: "R\<^sup>*\<^sup>* y b"
- from xc show ?thesis
- proof (rule converse_rtranclpE)
- assume "x = c"
- with xb have "R\<^sup>*\<^sup>* c b" by simp
- thus ?thesis by iprover
- next
- fix y'
- assume y'c: "R\<^sup>*\<^sup>* y' c"
- assume xy': "R x y'"
- with xy have "\<exists>u. R\<^sup>*\<^sup>* y u \<and> R\<^sup>*\<^sup>* y' u" by (rule lc)
- then obtain u where yu: "R\<^sup>*\<^sup>* y u" and y'u: "R\<^sup>*\<^sup>* y' u" by iprover
- from xy have "R\<inverse>\<inverse> y x" ..
- from this and yb yu have "\<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* u d" by (rule less)
- then obtain v where bv: "R\<^sup>*\<^sup>* b v" and uv: "R\<^sup>*\<^sup>* u v" by iprover
- from xy' have "R\<inverse>\<inverse> y' x" ..
- moreover from y'u and uv have "R\<^sup>*\<^sup>* y' v" by (rule rtranclp_trans)
- moreover note y'c
- ultimately have "\<exists>d. R\<^sup>*\<^sup>* v d \<and> R\<^sup>*\<^sup>* c d" by (rule less)
- then obtain w where vw: "R\<^sup>*\<^sup>* v w" and cw: "R\<^sup>*\<^sup>* c w" by iprover
- from bv vw have "R\<^sup>*\<^sup>* b w" by (rule rtranclp_trans)
- with cw show ?thesis by iprover
- qed
- qed
-qed
-
-text {*
- Alternative version. Partly automated by Tobias
- Nipkow. Takes 2 minutes (2002).
-
- This is the maximal amount of automation possible using @{text blast}.
-*}
-
-theorem newman':
- assumes wf: "wfP (R\<inverse>\<inverse>)"
- and lc: "\<And>a b c. R a b \<Longrightarrow> R a c \<Longrightarrow>
- \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d"
- shows "\<And>b c. R\<^sup>*\<^sup>* a b \<Longrightarrow> R\<^sup>*\<^sup>* a c \<Longrightarrow>
- \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d"
- using wf
-proof induct
- case (less x b c)
- note IH = `\<And>y b c. \<lbrakk>R\<inverse>\<inverse> y x; R\<^sup>*\<^sup>* y b; R\<^sup>*\<^sup>* y c\<rbrakk>
- \<Longrightarrow> \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d`
- have xc: "R\<^sup>*\<^sup>* x c" by fact
- have xb: "R\<^sup>*\<^sup>* x b" by fact
- thus ?case
- proof (rule converse_rtranclpE)
- assume "x = b"
- with xc have "R\<^sup>*\<^sup>* b c" by simp
- thus ?thesis by iprover
- next
- fix y
- assume xy: "R x y"
- assume yb: "R\<^sup>*\<^sup>* y b"
- from xc show ?thesis
- proof (rule converse_rtranclpE)
- assume "x = c"
- with xb have "R\<^sup>*\<^sup>* c b" by simp
- thus ?thesis by iprover
- next
- fix y'
- assume y'c: "R\<^sup>*\<^sup>* y' c"
- assume xy': "R x y'"
- with xy obtain u where u: "R\<^sup>*\<^sup>* y u" "R\<^sup>*\<^sup>* y' u"
- by (blast dest: lc)
- from yb u y'c show ?thesis
- by (blast del: rtranclp.rtrancl_refl
- intro: rtranclp_trans
- dest: IH [OF conversepI, OF xy] IH [OF conversepI, OF xy'])
- qed
- qed
-qed
-
-text {*
- Using the coherent logic prover, the proof of the induction step
- is completely automatic.
-*}
-
-lemma eq_imp_rtranclp: "x = y \<Longrightarrow> r\<^sup>*\<^sup>* x y"
- by simp
-
-theorem newman'':
- assumes wf: "wfP (R\<inverse>\<inverse>)"
- and lc: "\<And>a b c. R a b \<Longrightarrow> R a c \<Longrightarrow>
- \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d"
- shows "\<And>b c. R\<^sup>*\<^sup>* a b \<Longrightarrow> R\<^sup>*\<^sup>* a c \<Longrightarrow>
- \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d"
- using wf
-proof induct
- case (less x b c)
- note IH = `\<And>y b c. \<lbrakk>R\<inverse>\<inverse> y x; R\<^sup>*\<^sup>* y b; R\<^sup>*\<^sup>* y c\<rbrakk>
- \<Longrightarrow> \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d`
- show ?case
- by (coherent
- `R\<^sup>*\<^sup>* x c` `R\<^sup>*\<^sup>* x b`
- refl [where 'a='a] sym
- eq_imp_rtranclp
- r_into_rtranclp [of R]
- rtranclp_trans
- lc IH [OF conversepI]
- converse_rtranclpE)
-qed
-
-end
--- a/src/HOL/Lambda/Eta.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,394 +0,0 @@
-(* Title: HOL/Lambda/Eta.thy
- Author: Tobias Nipkow and Stefan Berghofer
- Copyright 1995, 2005 TU Muenchen
-*)
-
-header {* Eta-reduction *}
-
-theory Eta imports ParRed begin
-
-
-subsection {* Definition of eta-reduction and relatives *}
-
-primrec
- free :: "dB => nat => bool"
-where
- "free (Var j) i = (j = i)"
- | "free (s \<degree> t) i = (free s i \<or> free t i)"
- | "free (Abs s) i = free s (i + 1)"
-
-inductive
- eta :: "[dB, dB] => bool" (infixl "\<rightarrow>\<^sub>\<eta>" 50)
-where
- eta [simp, intro]: "\<not> free s 0 ==> Abs (s \<degree> Var 0) \<rightarrow>\<^sub>\<eta> s[dummy/0]"
- | appL [simp, intro]: "s \<rightarrow>\<^sub>\<eta> t ==> s \<degree> u \<rightarrow>\<^sub>\<eta> t \<degree> u"
- | appR [simp, intro]: "s \<rightarrow>\<^sub>\<eta> t ==> u \<degree> s \<rightarrow>\<^sub>\<eta> u \<degree> t"
- | abs [simp, intro]: "s \<rightarrow>\<^sub>\<eta> t ==> Abs s \<rightarrow>\<^sub>\<eta> Abs t"
-
-abbreviation
- eta_reds :: "[dB, dB] => bool" (infixl "-e>>" 50) where
- "s -e>> t == eta^** s t"
-
-abbreviation
- eta_red0 :: "[dB, dB] => bool" (infixl "-e>=" 50) where
- "s -e>= t == eta^== s t"
-
-notation (xsymbols)
- eta_reds (infixl "\<rightarrow>\<^sub>\<eta>\<^sup>*" 50) and
- eta_red0 (infixl "\<rightarrow>\<^sub>\<eta>\<^sup>=" 50)
-
-inductive_cases eta_cases [elim!]:
- "Abs s \<rightarrow>\<^sub>\<eta> z"
- "s \<degree> t \<rightarrow>\<^sub>\<eta> u"
- "Var i \<rightarrow>\<^sub>\<eta> t"
-
-
-subsection {* Properties of @{text "eta"}, @{text "subst"} and @{text "free"} *}
-
-lemma subst_not_free [simp]: "\<not> free s i \<Longrightarrow> s[t/i] = s[u/i]"
- by (induct s arbitrary: i t u) (simp_all add: subst_Var)
-
-lemma free_lift [simp]:
- "free (lift t k) i = (i < k \<and> free t i \<or> k < i \<and> free t (i - 1))"
- apply (induct t arbitrary: i k)
- apply (auto cong: conj_cong)
- done
-
-lemma free_subst [simp]:
- "free (s[t/k]) i =
- (free s k \<and> free t i \<or> free s (if i < k then i else i + 1))"
- apply (induct s arbitrary: i k t)
- prefer 2
- apply simp
- apply blast
- prefer 2
- apply simp
- apply (simp add: diff_Suc subst_Var split: nat.split)
- done
-
-lemma free_eta: "s \<rightarrow>\<^sub>\<eta> t ==> free t i = free s i"
- by (induct arbitrary: i set: eta) (simp_all cong: conj_cong)
-
-lemma not_free_eta:
- "[| s \<rightarrow>\<^sub>\<eta> t; \<not> free s i |] ==> \<not> free t i"
- by (simp add: free_eta)
-
-lemma eta_subst [simp]:
- "s \<rightarrow>\<^sub>\<eta> t ==> s[u/i] \<rightarrow>\<^sub>\<eta> t[u/i]"
- by (induct arbitrary: u i set: eta) (simp_all add: subst_subst [symmetric])
-
-theorem lift_subst_dummy: "\<not> free s i \<Longrightarrow> lift (s[dummy/i]) i = s"
- by (induct s arbitrary: i dummy) simp_all
-
-
-subsection {* Confluence of @{text "eta"} *}
-
-lemma square_eta: "square eta eta (eta^==) (eta^==)"
- apply (unfold square_def id_def)
- apply (rule impI [THEN allI [THEN allI]])
- apply (erule eta.induct)
- apply (slowsimp intro: subst_not_free eta_subst free_eta [THEN iffD1])
- apply safe
- prefer 5
- apply (blast intro!: eta_subst intro: free_eta [THEN iffD1])
- apply blast+
- done
-
-theorem eta_confluent: "confluent eta"
- apply (rule square_eta [THEN square_reflcl_confluent])
- done
-
-
-subsection {* Congruence rules for @{text "eta\<^sup>*"} *}
-
-lemma rtrancl_eta_Abs: "s \<rightarrow>\<^sub>\<eta>\<^sup>* s' ==> Abs s \<rightarrow>\<^sub>\<eta>\<^sup>* Abs s'"
- by (induct set: rtranclp)
- (blast intro: rtranclp.rtrancl_into_rtrancl)+
-
-lemma rtrancl_eta_AppL: "s \<rightarrow>\<^sub>\<eta>\<^sup>* s' ==> s \<degree> t \<rightarrow>\<^sub>\<eta>\<^sup>* s' \<degree> t"
- by (induct set: rtranclp)
- (blast intro: rtranclp.rtrancl_into_rtrancl)+
-
-lemma rtrancl_eta_AppR: "t \<rightarrow>\<^sub>\<eta>\<^sup>* t' ==> s \<degree> t \<rightarrow>\<^sub>\<eta>\<^sup>* s \<degree> t'"
- by (induct set: rtranclp) (blast intro: rtranclp.rtrancl_into_rtrancl)+
-
-lemma rtrancl_eta_App:
- "[| s \<rightarrow>\<^sub>\<eta>\<^sup>* s'; t \<rightarrow>\<^sub>\<eta>\<^sup>* t' |] ==> s \<degree> t \<rightarrow>\<^sub>\<eta>\<^sup>* s' \<degree> t'"
- by (blast intro!: rtrancl_eta_AppL rtrancl_eta_AppR intro: rtranclp_trans)
-
-
-subsection {* Commutation of @{text "beta"} and @{text "eta"} *}
-
-lemma free_beta:
- "s \<rightarrow>\<^sub>\<beta> t ==> free t i \<Longrightarrow> free s i"
- by (induct arbitrary: i set: beta) auto
-
-lemma beta_subst [intro]: "s \<rightarrow>\<^sub>\<beta> t ==> s[u/i] \<rightarrow>\<^sub>\<beta> t[u/i]"
- by (induct arbitrary: u i set: beta) (simp_all add: subst_subst [symmetric])
-
-lemma subst_Var_Suc [simp]: "t[Var i/i] = t[Var(i)/i + 1]"
- by (induct t arbitrary: i) (auto elim!: linorder_neqE simp: subst_Var)
-
-lemma eta_lift [simp]: "s \<rightarrow>\<^sub>\<eta> t ==> lift s i \<rightarrow>\<^sub>\<eta> lift t i"
- by (induct arbitrary: i set: eta) simp_all
-
-lemma rtrancl_eta_subst: "s \<rightarrow>\<^sub>\<eta> t \<Longrightarrow> u[s/i] \<rightarrow>\<^sub>\<eta>\<^sup>* u[t/i]"
- apply (induct u arbitrary: s t i)
- apply (simp_all add: subst_Var)
- apply blast
- apply (blast intro: rtrancl_eta_App)
- apply (blast intro!: rtrancl_eta_Abs eta_lift)
- done
-
-lemma rtrancl_eta_subst':
- fixes s t :: dB
- assumes eta: "s \<rightarrow>\<^sub>\<eta>\<^sup>* t"
- shows "s[u/i] \<rightarrow>\<^sub>\<eta>\<^sup>* t[u/i]" using eta
- by induct (iprover intro: eta_subst)+
-
-lemma rtrancl_eta_subst'':
- fixes s t :: dB
- assumes eta: "s \<rightarrow>\<^sub>\<eta>\<^sup>* t"
- shows "u[s/i] \<rightarrow>\<^sub>\<eta>\<^sup>* u[t/i]" using eta
- by induct (iprover intro: rtrancl_eta_subst rtranclp_trans)+
-
-lemma square_beta_eta: "square beta eta (eta^**) (beta^==)"
- apply (unfold square_def)
- apply (rule impI [THEN allI [THEN allI]])
- apply (erule beta.induct)
- apply (slowsimp intro: rtrancl_eta_subst eta_subst)
- apply (blast intro: rtrancl_eta_AppL)
- apply (blast intro: rtrancl_eta_AppR)
- apply simp;
- apply (slowsimp intro: rtrancl_eta_Abs free_beta
- iff del: dB.distinct simp: dB.distinct) (*23 seconds?*)
- done
-
-lemma confluent_beta_eta: "confluent (sup beta eta)"
- apply (assumption |
- rule square_rtrancl_reflcl_commute confluent_Un
- beta_confluent eta_confluent square_beta_eta)+
- done
-
-
-subsection {* Implicit definition of @{text "eta"} *}
-
-text {* @{term "Abs (lift s 0 \<degree> Var 0) \<rightarrow>\<^sub>\<eta> s"} *}
-
-lemma not_free_iff_lifted:
- "(\<not> free s i) = (\<exists>t. s = lift t i)"
- apply (induct s arbitrary: i)
- apply simp
- apply (rule iffI)
- apply (erule linorder_neqE)
- apply (rule_tac x = "Var nat" in exI)
- apply simp
- apply (rule_tac x = "Var (nat - 1)" in exI)
- apply simp
- apply clarify
- apply (rule notE)
- prefer 2
- apply assumption
- apply (erule thin_rl)
- apply (case_tac t)
- apply simp
- apply simp
- apply simp
- apply simp
- apply (erule thin_rl)
- apply (erule thin_rl)
- apply (rule iffI)
- apply (elim conjE exE)
- apply (rename_tac u1 u2)
- apply (rule_tac x = "u1 \<degree> u2" in exI)
- apply simp
- apply (erule exE)
- apply (erule rev_mp)
- apply (case_tac t)
- apply simp
- apply simp
- apply blast
- apply simp
- apply simp
- apply (erule thin_rl)
- apply (rule iffI)
- apply (erule exE)
- apply (rule_tac x = "Abs t" in exI)
- apply simp
- apply (erule exE)
- apply (erule rev_mp)
- apply (case_tac t)
- apply simp
- apply simp
- apply simp
- apply blast
- done
-
-theorem explicit_is_implicit:
- "(\<forall>s u. (\<not> free s 0) --> R (Abs (s \<degree> Var 0)) (s[u/0])) =
- (\<forall>s. R (Abs (lift s 0 \<degree> Var 0)) s)"
- by (auto simp add: not_free_iff_lifted)
-
-
-subsection {* Eta-postponement theorem *}
-
-text {*
- Based on a paper proof due to Andreas Abel.
- Unlike the proof by Masako Takahashi \cite{Takahashi-IandC}, it does not
- use parallel eta reduction, which only seems to complicate matters unnecessarily.
-*}
-
-theorem eta_case:
- fixes s :: dB
- assumes free: "\<not> free s 0"
- and s: "s[dummy/0] => u"
- shows "\<exists>t'. Abs (s \<degree> Var 0) => t' \<and> t' \<rightarrow>\<^sub>\<eta>\<^sup>* u"
-proof -
- from s have "lift (s[dummy/0]) 0 => lift u 0" by (simp del: lift_subst)
- with free have "s => lift u 0" by (simp add: lift_subst_dummy del: lift_subst)
- hence "Abs (s \<degree> Var 0) => Abs (lift u 0 \<degree> Var 0)" by simp
- moreover have "\<not> free (lift u 0) 0" by simp
- hence "Abs (lift u 0 \<degree> Var 0) \<rightarrow>\<^sub>\<eta> lift u 0[dummy/0]"
- by (rule eta.eta)
- hence "Abs (lift u 0 \<degree> Var 0) \<rightarrow>\<^sub>\<eta>\<^sup>* u" by simp
- ultimately show ?thesis by iprover
-qed
-
-theorem eta_par_beta:
- assumes st: "s \<rightarrow>\<^sub>\<eta> t"
- and tu: "t => u"
- shows "\<exists>t'. s => t' \<and> t' \<rightarrow>\<^sub>\<eta>\<^sup>* u" using tu st
-proof (induct arbitrary: s)
- case (var n)
- thus ?case by (iprover intro: par_beta_refl)
-next
- case (abs s' t)
- note abs' = this
- from `s \<rightarrow>\<^sub>\<eta> Abs s'` show ?case
- proof cases
- case (eta s'' dummy)
- from abs have "Abs s' => Abs t" by simp
- with eta have "s''[dummy/0] => Abs t" by simp
- with `\<not> free s'' 0` have "\<exists>t'. Abs (s'' \<degree> Var 0) => t' \<and> t' \<rightarrow>\<^sub>\<eta>\<^sup>* Abs t"
- by (rule eta_case)
- with eta show ?thesis by simp
- next
- case (abs r)
- from `r \<rightarrow>\<^sub>\<eta> s'`
- obtain t' where r: "r => t'" and t': "t' \<rightarrow>\<^sub>\<eta>\<^sup>* t" by (iprover dest: abs')
- from r have "Abs r => Abs t'" ..
- moreover from t' have "Abs t' \<rightarrow>\<^sub>\<eta>\<^sup>* Abs t" by (rule rtrancl_eta_Abs)
- ultimately show ?thesis using abs by simp iprover
- qed
-next
- case (app u u' t t')
- from `s \<rightarrow>\<^sub>\<eta> u \<degree> t` show ?case
- proof cases
- case (eta s' dummy)
- from app have "u \<degree> t => u' \<degree> t'" by simp
- with eta have "s'[dummy/0] => u' \<degree> t'" by simp
- with `\<not> free s' 0` have "\<exists>r. Abs (s' \<degree> Var 0) => r \<and> r \<rightarrow>\<^sub>\<eta>\<^sup>* u' \<degree> t'"
- by (rule eta_case)
- with eta show ?thesis by simp
- next
- case (appL s')
- from `s' \<rightarrow>\<^sub>\<eta> u`
- obtain r where s': "s' => r" and r: "r \<rightarrow>\<^sub>\<eta>\<^sup>* u'" by (iprover dest: app)
- from s' and app have "s' \<degree> t => r \<degree> t'" by simp
- moreover from r have "r \<degree> t' \<rightarrow>\<^sub>\<eta>\<^sup>* u' \<degree> t'" by (simp add: rtrancl_eta_AppL)
- ultimately show ?thesis using appL by simp iprover
- next
- case (appR s')
- from `s' \<rightarrow>\<^sub>\<eta> t`
- obtain r where s': "s' => r" and r: "r \<rightarrow>\<^sub>\<eta>\<^sup>* t'" by (iprover dest: app)
- from s' and app have "u \<degree> s' => u' \<degree> r" by simp
- moreover from r have "u' \<degree> r \<rightarrow>\<^sub>\<eta>\<^sup>* u' \<degree> t'" by (simp add: rtrancl_eta_AppR)
- ultimately show ?thesis using appR by simp iprover
- qed
-next
- case (beta u u' t t')
- from `s \<rightarrow>\<^sub>\<eta> Abs u \<degree> t` show ?case
- proof cases
- case (eta s' dummy)
- from beta have "Abs u \<degree> t => u'[t'/0]" by simp
- with eta have "s'[dummy/0] => u'[t'/0]" by simp
- with `\<not> free s' 0` have "\<exists>r. Abs (s' \<degree> Var 0) => r \<and> r \<rightarrow>\<^sub>\<eta>\<^sup>* u'[t'/0]"
- by (rule eta_case)
- with eta show ?thesis by simp
- next
- case (appL s')
- from `s' \<rightarrow>\<^sub>\<eta> Abs u` show ?thesis
- proof cases
- case (eta s'' dummy)
- have "Abs (lift u 1) = lift (Abs u) 0" by simp
- also from eta have "\<dots> = s''" by (simp add: lift_subst_dummy del: lift_subst)
- finally have s: "s = Abs (Abs (lift u 1) \<degree> Var 0) \<degree> t" using appL and eta by simp
- from beta have "lift u 1 => lift u' 1" by simp
- hence "Abs (lift u 1) \<degree> Var 0 => lift u' 1[Var 0/0]"
- using par_beta.var ..
- hence "Abs (Abs (lift u 1) \<degree> Var 0) \<degree> t => lift u' 1[Var 0/0][t'/0]"
- using `t => t'` ..
- with s have "s => u'[t'/0]" by simp
- thus ?thesis by iprover
- next
- case (abs r)
- from `r \<rightarrow>\<^sub>\<eta> u`
- obtain r'' where r: "r => r''" and r'': "r'' \<rightarrow>\<^sub>\<eta>\<^sup>* u'" by (iprover dest: beta)
- from r and beta have "Abs r \<degree> t => r''[t'/0]" by simp
- moreover from r'' have "r''[t'/0] \<rightarrow>\<^sub>\<eta>\<^sup>* u'[t'/0]"
- by (rule rtrancl_eta_subst')
- ultimately show ?thesis using abs and appL by simp iprover
- qed
- next
- case (appR s')
- from `s' \<rightarrow>\<^sub>\<eta> t`
- obtain r where s': "s' => r" and r: "r \<rightarrow>\<^sub>\<eta>\<^sup>* t'" by (iprover dest: beta)
- from s' and beta have "Abs u \<degree> s' => u'[r/0]" by simp
- moreover from r have "u'[r/0] \<rightarrow>\<^sub>\<eta>\<^sup>* u'[t'/0]"
- by (rule rtrancl_eta_subst'')
- ultimately show ?thesis using appR by simp iprover
- qed
-qed
-
-theorem eta_postponement':
- assumes eta: "s \<rightarrow>\<^sub>\<eta>\<^sup>* t" and beta: "t => u"
- shows "\<exists>t'. s => t' \<and> t' \<rightarrow>\<^sub>\<eta>\<^sup>* u" using eta beta
-proof (induct arbitrary: u)
- case base
- thus ?case by blast
-next
- case (step s' s'' s''')
- then obtain t' where s': "s' => t'" and t': "t' \<rightarrow>\<^sub>\<eta>\<^sup>* s'''"
- by (auto dest: eta_par_beta)
- from s' obtain t'' where s: "s => t''" and t'': "t'' \<rightarrow>\<^sub>\<eta>\<^sup>* t'" using step
- by blast
- from t'' and t' have "t'' \<rightarrow>\<^sub>\<eta>\<^sup>* s'''" by (rule rtranclp_trans)
- with s show ?case by iprover
-qed
-
-theorem eta_postponement:
- assumes "(sup beta eta)\<^sup>*\<^sup>* s t"
- shows "(beta\<^sup>*\<^sup>* OO eta\<^sup>*\<^sup>*) s t" using assms
-proof induct
- case base
- show ?case by blast
-next
- case (step s' s'')
- from step(3) obtain t' where s: "s \<rightarrow>\<^sub>\<beta>\<^sup>* t'" and t': "t' \<rightarrow>\<^sub>\<eta>\<^sup>* s'" by blast
- from step(2) show ?case
- proof
- assume "s' \<rightarrow>\<^sub>\<beta> s''"
- with beta_subset_par_beta have "s' => s''" ..
- with t' obtain t'' where st: "t' => t''" and tu: "t'' \<rightarrow>\<^sub>\<eta>\<^sup>* s''"
- by (auto dest: eta_postponement')
- from par_beta_subset_beta st have "t' \<rightarrow>\<^sub>\<beta>\<^sup>* t''" ..
- with s have "s \<rightarrow>\<^sub>\<beta>\<^sup>* t''" by (rule rtranclp_trans)
- thus ?thesis using tu ..
- next
- assume "s' \<rightarrow>\<^sub>\<eta> s''"
- with t' have "t' \<rightarrow>\<^sub>\<eta>\<^sup>* s''" ..
- with s show ?thesis ..
- qed
-qed
-
-end
--- a/src/HOL/Lambda/InductTermi.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,108 +0,0 @@
-(* Title: HOL/Lambda/InductTermi.thy
- Author: Tobias Nipkow
- Copyright 1998 TU Muenchen
-
-Inductive characterization of terminating lambda terms. Goes back to
-Raamsdonk & Severi. On normalization. CWI TR CS-R9545, 1995. Also
-rediscovered by Matthes and Joachimski.
-*)
-
-header {* Inductive characterization of terminating lambda terms *}
-
-theory InductTermi imports ListBeta begin
-
-subsection {* Terminating lambda terms *}
-
-inductive IT :: "dB => bool"
- where
- Var [intro]: "listsp IT rs ==> IT (Var n \<degree>\<degree> rs)"
- | Lambda [intro]: "IT r ==> IT (Abs r)"
- | Beta [intro]: "IT ((r[s/0]) \<degree>\<degree> ss) ==> IT s ==> IT ((Abs r \<degree> s) \<degree>\<degree> ss)"
-
-
-subsection {* Every term in @{text "IT"} terminates *}
-
-lemma double_induction_lemma [rule_format]:
- "termip beta s ==> \<forall>t. termip beta t -->
- (\<forall>r ss. t = r[s/0] \<degree>\<degree> ss --> termip beta (Abs r \<degree> s \<degree>\<degree> ss))"
- apply (erule accp_induct)
- apply (rule allI)
- apply (rule impI)
- apply (erule thin_rl)
- apply (erule accp_induct)
- apply clarify
- apply (rule accp.accI)
- apply (safe elim!: apps_betasE)
- apply (blast intro: subst_preserves_beta apps_preserves_beta)
- apply (blast intro: apps_preserves_beta2 subst_preserves_beta2 rtranclp_converseI
- dest: accp_downwards) (* FIXME: acc_downwards can be replaced by acc(R ^* ) = acc(r) *)
- apply (blast dest: apps_preserves_betas)
- done
-
-lemma IT_implies_termi: "IT t ==> termip beta t"
- apply (induct set: IT)
- apply (drule rev_predicate1D [OF _ listsp_mono [where B="termip beta"]])
- apply (fast intro!: predicate1I)
- apply (drule lists_accD)
- apply (erule accp_induct)
- apply (rule accp.accI)
- apply (blast dest: head_Var_reduction)
- apply (erule accp_induct)
- apply (rule accp.accI)
- apply blast
- apply (blast intro: double_induction_lemma)
- done
-
-
-subsection {* Every terminating term is in @{text "IT"} *}
-
-declare Var_apps_neq_Abs_apps [symmetric, simp]
-
-lemma [simp, THEN not_sym, simp]: "Var n \<degree>\<degree> ss \<noteq> Abs r \<degree> s \<degree>\<degree> ts"
- by (simp add: foldl_Cons [symmetric] del: foldl_Cons)
-
-lemma [simp]:
- "(Abs r \<degree> s \<degree>\<degree> ss = Abs r' \<degree> s' \<degree>\<degree> ss') = (r = r' \<and> s = s' \<and> ss = ss')"
- by (simp add: foldl_Cons [symmetric] del: foldl_Cons)
-
-inductive_cases [elim!]:
- "IT (Var n \<degree>\<degree> ss)"
- "IT (Abs t)"
- "IT (Abs r \<degree> s \<degree>\<degree> ts)"
-
-theorem termi_implies_IT: "termip beta r ==> IT r"
- apply (erule accp_induct)
- apply (rename_tac r)
- apply (erule thin_rl)
- apply (erule rev_mp)
- apply simp
- apply (rule_tac t = r in Apps_dB_induct)
- apply clarify
- apply (rule IT.intros)
- apply clarify
- apply (drule bspec, assumption)
- apply (erule mp)
- apply clarify
- apply (drule_tac r=beta in conversepI)
- apply (drule_tac r="beta^--1" in ex_step1I, assumption)
- apply clarify
- apply (rename_tac us)
- apply (erule_tac x = "Var n \<degree>\<degree> us" in allE)
- apply force
- apply (rename_tac u ts)
- apply (case_tac ts)
- apply simp
- apply blast
- apply (rename_tac s ss)
- apply simp
- apply clarify
- apply (rule IT.intros)
- apply (blast intro: apps_preserves_beta)
- apply (erule mp)
- apply clarify
- apply (rename_tac t)
- apply (erule_tac x = "Abs u \<degree> t \<degree>\<degree> ss" in allE)
- apply force
- done
-
-end
--- a/src/HOL/Lambda/Lambda.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,190 +0,0 @@
-(* Title: HOL/Lambda/Lambda.thy
- Author: Tobias Nipkow
- Copyright 1995 TU Muenchen
-*)
-
-header {* Basic definitions of Lambda-calculus *}
-
-theory Lambda imports Main begin
-
-declare [[syntax_ambiguity_level = 100]]
-
-
-subsection {* Lambda-terms in de Bruijn notation and substitution *}
-
-datatype dB =
- Var nat
- | App dB dB (infixl "\<degree>" 200)
- | Abs dB
-
-primrec
- lift :: "[dB, nat] => dB"
-where
- "lift (Var i) k = (if i < k then Var i else Var (i + 1))"
- | "lift (s \<degree> t) k = lift s k \<degree> lift t k"
- | "lift (Abs s) k = Abs (lift s (k + 1))"
-
-primrec
- subst :: "[dB, dB, nat] => dB" ("_[_'/_]" [300, 0, 0] 300)
-where (* FIXME base names *)
- subst_Var: "(Var i)[s/k] =
- (if k < i then Var (i - 1) else if i = k then s else Var i)"
- | subst_App: "(t \<degree> u)[s/k] = t[s/k] \<degree> u[s/k]"
- | subst_Abs: "(Abs t)[s/k] = Abs (t[lift s 0 / k+1])"
-
-declare subst_Var [simp del]
-
-text {* Optimized versions of @{term subst} and @{term lift}. *}
-
-primrec
- liftn :: "[nat, dB, nat] => dB"
-where
- "liftn n (Var i) k = (if i < k then Var i else Var (i + n))"
- | "liftn n (s \<degree> t) k = liftn n s k \<degree> liftn n t k"
- | "liftn n (Abs s) k = Abs (liftn n s (k + 1))"
-
-primrec
- substn :: "[dB, dB, nat] => dB"
-where
- "substn (Var i) s k =
- (if k < i then Var (i - 1) else if i = k then liftn k s 0 else Var i)"
- | "substn (t \<degree> u) s k = substn t s k \<degree> substn u s k"
- | "substn (Abs t) s k = Abs (substn t s (k + 1))"
-
-
-subsection {* Beta-reduction *}
-
-inductive beta :: "[dB, dB] => bool" (infixl "\<rightarrow>\<^sub>\<beta>" 50)
- where
- beta [simp, intro!]: "Abs s \<degree> t \<rightarrow>\<^sub>\<beta> s[t/0]"
- | appL [simp, intro!]: "s \<rightarrow>\<^sub>\<beta> t ==> s \<degree> u \<rightarrow>\<^sub>\<beta> t \<degree> u"
- | appR [simp, intro!]: "s \<rightarrow>\<^sub>\<beta> t ==> u \<degree> s \<rightarrow>\<^sub>\<beta> u \<degree> t"
- | abs [simp, intro!]: "s \<rightarrow>\<^sub>\<beta> t ==> Abs s \<rightarrow>\<^sub>\<beta> Abs t"
-
-abbreviation
- beta_reds :: "[dB, dB] => bool" (infixl "->>" 50) where
- "s ->> t == beta^** s t"
-
-notation (latex)
- beta_reds (infixl "\<rightarrow>\<^sub>\<beta>\<^sup>*" 50)
-
-inductive_cases beta_cases [elim!]:
- "Var i \<rightarrow>\<^sub>\<beta> t"
- "Abs r \<rightarrow>\<^sub>\<beta> s"
- "s \<degree> t \<rightarrow>\<^sub>\<beta> u"
-
-declare if_not_P [simp] not_less_eq [simp]
- -- {* don't add @{text "r_into_rtrancl[intro!]"} *}
-
-
-subsection {* Congruence rules *}
-
-lemma rtrancl_beta_Abs [intro!]:
- "s \<rightarrow>\<^sub>\<beta>\<^sup>* s' ==> Abs s \<rightarrow>\<^sub>\<beta>\<^sup>* Abs s'"
- by (induct set: rtranclp) (blast intro: rtranclp.rtrancl_into_rtrancl)+
-
-lemma rtrancl_beta_AppL:
- "s \<rightarrow>\<^sub>\<beta>\<^sup>* s' ==> s \<degree> t \<rightarrow>\<^sub>\<beta>\<^sup>* s' \<degree> t"
- by (induct set: rtranclp) (blast intro: rtranclp.rtrancl_into_rtrancl)+
-
-lemma rtrancl_beta_AppR:
- "t \<rightarrow>\<^sub>\<beta>\<^sup>* t' ==> s \<degree> t \<rightarrow>\<^sub>\<beta>\<^sup>* s \<degree> t'"
- by (induct set: rtranclp) (blast intro: rtranclp.rtrancl_into_rtrancl)+
-
-lemma rtrancl_beta_App [intro]:
- "[| s \<rightarrow>\<^sub>\<beta>\<^sup>* s'; t \<rightarrow>\<^sub>\<beta>\<^sup>* t' |] ==> s \<degree> t \<rightarrow>\<^sub>\<beta>\<^sup>* s' \<degree> t'"
- by (blast intro!: rtrancl_beta_AppL rtrancl_beta_AppR intro: rtranclp_trans)
-
-
-subsection {* Substitution-lemmas *}
-
-lemma subst_eq [simp]: "(Var k)[u/k] = u"
- by (simp add: subst_Var)
-
-lemma subst_gt [simp]: "i < j ==> (Var j)[u/i] = Var (j - 1)"
- by (simp add: subst_Var)
-
-lemma subst_lt [simp]: "j < i ==> (Var j)[u/i] = Var j"
- by (simp add: subst_Var)
-
-lemma lift_lift:
- "i < k + 1 \<Longrightarrow> lift (lift t i) (Suc k) = lift (lift t k) i"
- by (induct t arbitrary: i k) auto
-
-lemma lift_subst [simp]:
- "j < i + 1 \<Longrightarrow> lift (t[s/j]) i = (lift t (i + 1)) [lift s i / j]"
- by (induct t arbitrary: i j s)
- (simp_all add: diff_Suc subst_Var lift_lift split: nat.split)
-
-lemma lift_subst_lt:
- "i < j + 1 \<Longrightarrow> lift (t[s/j]) i = (lift t i) [lift s i / j + 1]"
- by (induct t arbitrary: i j s) (simp_all add: subst_Var lift_lift)
-
-lemma subst_lift [simp]:
- "(lift t k)[s/k] = t"
- by (induct t arbitrary: k s) simp_all
-
-lemma subst_subst:
- "i < j + 1 \<Longrightarrow> t[lift v i / Suc j][u[v/j]/i] = t[u/i][v/j]"
- by (induct t arbitrary: i j u v)
- (simp_all add: diff_Suc subst_Var lift_lift [symmetric] lift_subst_lt
- split: nat.split)
-
-
-subsection {* Equivalence proof for optimized substitution *}
-
-lemma liftn_0 [simp]: "liftn 0 t k = t"
- by (induct t arbitrary: k) (simp_all add: subst_Var)
-
-lemma liftn_lift [simp]: "liftn (Suc n) t k = lift (liftn n t k) k"
- by (induct t arbitrary: k) (simp_all add: subst_Var)
-
-lemma substn_subst_n [simp]: "substn t s n = t[liftn n s 0 / n]"
- by (induct t arbitrary: n) (simp_all add: subst_Var)
-
-theorem substn_subst_0: "substn t s 0 = t[s/0]"
- by simp
-
-
-subsection {* Preservation theorems *}
-
-text {* Not used in Church-Rosser proof, but in Strong
- Normalization. \medskip *}
-
-theorem subst_preserves_beta [simp]:
- "r \<rightarrow>\<^sub>\<beta> s ==> r[t/i] \<rightarrow>\<^sub>\<beta> s[t/i]"
- by (induct arbitrary: t i set: beta) (simp_all add: subst_subst [symmetric])
-
-theorem subst_preserves_beta': "r \<rightarrow>\<^sub>\<beta>\<^sup>* s ==> r[t/i] \<rightarrow>\<^sub>\<beta>\<^sup>* s[t/i]"
- apply (induct set: rtranclp)
- apply (rule rtranclp.rtrancl_refl)
- apply (erule rtranclp.rtrancl_into_rtrancl)
- apply (erule subst_preserves_beta)
- done
-
-theorem lift_preserves_beta [simp]:
- "r \<rightarrow>\<^sub>\<beta> s ==> lift r i \<rightarrow>\<^sub>\<beta> lift s i"
- by (induct arbitrary: i set: beta) auto
-
-theorem lift_preserves_beta': "r \<rightarrow>\<^sub>\<beta>\<^sup>* s ==> lift r i \<rightarrow>\<^sub>\<beta>\<^sup>* lift s i"
- apply (induct set: rtranclp)
- apply (rule rtranclp.rtrancl_refl)
- apply (erule rtranclp.rtrancl_into_rtrancl)
- apply (erule lift_preserves_beta)
- done
-
-theorem subst_preserves_beta2 [simp]: "r \<rightarrow>\<^sub>\<beta> s ==> t[r/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t[s/i]"
- apply (induct t arbitrary: r s i)
- apply (simp add: subst_Var r_into_rtranclp)
- apply (simp add: rtrancl_beta_App)
- apply (simp add: rtrancl_beta_Abs)
- done
-
-theorem subst_preserves_beta2': "r \<rightarrow>\<^sub>\<beta>\<^sup>* s ==> t[r/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t[s/i]"
- apply (induct set: rtranclp)
- apply (rule rtranclp.rtrancl_refl)
- apply (erule rtranclp_trans)
- apply (erule subst_preserves_beta2)
- done
-
-end
--- a/src/HOL/Lambda/ListApplication.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,145 +0,0 @@
-(* Title: HOL/Lambda/ListApplication.thy
- Author: Tobias Nipkow
- Copyright 1998 TU Muenchen
-*)
-
-header {* Application of a term to a list of terms *}
-
-theory ListApplication imports Lambda begin
-
-abbreviation
- list_application :: "dB => dB list => dB" (infixl "\<degree>\<degree>" 150) where
- "t \<degree>\<degree> ts == foldl (op \<degree>) t ts"
-
-lemma apps_eq_tail_conv [iff]: "(r \<degree>\<degree> ts = s \<degree>\<degree> ts) = (r = s)"
- by (induct ts rule: rev_induct) auto
-
-lemma Var_eq_apps_conv [iff]: "(Var m = s \<degree>\<degree> ss) = (Var m = s \<and> ss = [])"
- by (induct ss arbitrary: s) auto
-
-lemma Var_apps_eq_Var_apps_conv [iff]:
- "(Var m \<degree>\<degree> rs = Var n \<degree>\<degree> ss) = (m = n \<and> rs = ss)"
- apply (induct rs arbitrary: ss rule: rev_induct)
- apply simp
- apply blast
- apply (induct_tac ss rule: rev_induct)
- apply auto
- done
-
-lemma App_eq_foldl_conv:
- "(r \<degree> s = t \<degree>\<degree> ts) =
- (if ts = [] then r \<degree> s = t
- else (\<exists>ss. ts = ss @ [s] \<and> r = t \<degree>\<degree> ss))"
- apply (rule_tac xs = ts in rev_exhaust)
- apply auto
- done
-
-lemma Abs_eq_apps_conv [iff]:
- "(Abs r = s \<degree>\<degree> ss) = (Abs r = s \<and> ss = [])"
- by (induct ss rule: rev_induct) auto
-
-lemma apps_eq_Abs_conv [iff]: "(s \<degree>\<degree> ss = Abs r) = (s = Abs r \<and> ss = [])"
- by (induct ss rule: rev_induct) auto
-
-lemma Abs_apps_eq_Abs_apps_conv [iff]:
- "(Abs r \<degree>\<degree> rs = Abs s \<degree>\<degree> ss) = (r = s \<and> rs = ss)"
- apply (induct rs arbitrary: ss rule: rev_induct)
- apply simp
- apply blast
- apply (induct_tac ss rule: rev_induct)
- apply auto
- done
-
-lemma Abs_App_neq_Var_apps [iff]:
- "Abs s \<degree> t \<noteq> Var n \<degree>\<degree> ss"
- by (induct ss arbitrary: s t rule: rev_induct) auto
-
-lemma Var_apps_neq_Abs_apps [iff]:
- "Var n \<degree>\<degree> ts \<noteq> Abs r \<degree>\<degree> ss"
- apply (induct ss arbitrary: ts rule: rev_induct)
- apply simp
- apply (induct_tac ts rule: rev_induct)
- apply auto
- done
-
-lemma ex_head_tail:
- "\<exists>ts h. t = h \<degree>\<degree> ts \<and> ((\<exists>n. h = Var n) \<or> (\<exists>u. h = Abs u))"
- apply (induct t)
- apply (rule_tac x = "[]" in exI)
- apply simp
- apply clarify
- apply (rename_tac ts1 ts2 h1 h2)
- apply (rule_tac x = "ts1 @ [h2 \<degree>\<degree> ts2]" in exI)
- apply simp
- apply simp
- done
-
-lemma size_apps [simp]:
- "size (r \<degree>\<degree> rs) = size r + foldl (op +) 0 (map size rs) + length rs"
- by (induct rs rule: rev_induct) auto
-
-lemma lem0: "[| (0::nat) < k; m <= n |] ==> m < n + k"
- by simp
-
-lemma lift_map [simp]:
- "lift (t \<degree>\<degree> ts) i = lift t i \<degree>\<degree> map (\<lambda>t. lift t i) ts"
- by (induct ts arbitrary: t) simp_all
-
-lemma subst_map [simp]:
- "subst (t \<degree>\<degree> ts) u i = subst t u i \<degree>\<degree> map (\<lambda>t. subst t u i) ts"
- by (induct ts arbitrary: t) simp_all
-
-lemma app_last: "(t \<degree>\<degree> ts) \<degree> u = t \<degree>\<degree> (ts @ [u])"
- by simp
-
-
-text {* \medskip A customized induction schema for @{text "\<degree>\<degree>"}. *}
-
-lemma lem:
- assumes "!!n ts. \<forall>t \<in> set ts. P t ==> P (Var n \<degree>\<degree> ts)"
- and "!!u ts. [| P u; \<forall>t \<in> set ts. P t |] ==> P (Abs u \<degree>\<degree> ts)"
- shows "size t = n \<Longrightarrow> P t"
- apply (induct n arbitrary: t rule: nat_less_induct)
- apply (cut_tac t = t in ex_head_tail)
- apply clarify
- apply (erule disjE)
- apply clarify
- apply (rule assms)
- apply clarify
- apply (erule allE, erule impE)
- prefer 2
- apply (erule allE, erule mp, rule refl)
- apply simp
- apply (rule lem0)
- apply force
- apply (rule elem_le_sum)
- apply force
- apply clarify
- apply (rule assms)
- apply (erule allE, erule impE)
- prefer 2
- apply (erule allE, erule mp, rule refl)
- apply simp
- apply clarify
- apply (erule allE, erule impE)
- prefer 2
- apply (erule allE, erule mp, rule refl)
- apply simp
- apply (rule le_imp_less_Suc)
- apply (rule trans_le_add1)
- apply (rule trans_le_add2)
- apply (rule elem_le_sum)
- apply force
- done
-
-theorem Apps_dB_induct:
- assumes "!!n ts. \<forall>t \<in> set ts. P t ==> P (Var n \<degree>\<degree> ts)"
- and "!!u ts. [| P u; \<forall>t \<in> set ts. P t |] ==> P (Abs u \<degree>\<degree> ts)"
- shows "P t"
- apply (rule_tac t = t in lem)
- prefer 3
- apply (rule refl)
- using assms apply iprover+
- done
-
-end
--- a/src/HOL/Lambda/ListBeta.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,90 +0,0 @@
-(* Title: HOL/Lambda/ListBeta.thy
- Author: Tobias Nipkow
- Copyright 1998 TU Muenchen
-*)
-
-header {* Lifting beta-reduction to lists *}
-
-theory ListBeta imports ListApplication ListOrder begin
-
-text {*
- Lifting beta-reduction to lists of terms, reducing exactly one element.
-*}
-
-abbreviation
- list_beta :: "dB list => dB list => bool" (infixl "=>" 50) where
- "rs => ss == step1 beta rs ss"
-
-lemma head_Var_reduction:
- "Var n \<degree>\<degree> rs \<rightarrow>\<^sub>\<beta> v \<Longrightarrow> \<exists>ss. rs => ss \<and> v = Var n \<degree>\<degree> ss"
- apply (induct u == "Var n \<degree>\<degree> rs" v arbitrary: rs set: beta)
- apply simp
- apply (rule_tac xs = rs in rev_exhaust)
- apply simp
- apply (atomize, force intro: append_step1I)
- apply (rule_tac xs = rs in rev_exhaust)
- apply simp
- apply (auto 0 3 intro: disjI2 [THEN append_step1I])
- done
-
-lemma apps_betasE [elim!]:
- assumes major: "r \<degree>\<degree> rs \<rightarrow>\<^sub>\<beta> s"
- and cases: "!!r'. [| r \<rightarrow>\<^sub>\<beta> r'; s = r' \<degree>\<degree> rs |] ==> R"
- "!!rs'. [| rs => rs'; s = r \<degree>\<degree> rs' |] ==> R"
- "!!t u us. [| r = Abs t; rs = u # us; s = t[u/0] \<degree>\<degree> us |] ==> R"
- shows R
-proof -
- from major have
- "(\<exists>r'. r \<rightarrow>\<^sub>\<beta> r' \<and> s = r' \<degree>\<degree> rs) \<or>
- (\<exists>rs'. rs => rs' \<and> s = r \<degree>\<degree> rs') \<or>
- (\<exists>t u us. r = Abs t \<and> rs = u # us \<and> s = t[u/0] \<degree>\<degree> us)"
- apply (induct u == "r \<degree>\<degree> rs" s arbitrary: r rs set: beta)
- apply (case_tac r)
- apply simp
- apply (simp add: App_eq_foldl_conv)
- apply (split split_if_asm)
- apply simp
- apply blast
- apply simp
- apply (simp add: App_eq_foldl_conv)
- apply (split split_if_asm)
- apply simp
- apply simp
- apply (drule App_eq_foldl_conv [THEN iffD1])
- apply (split split_if_asm)
- apply simp
- apply blast
- apply (force intro!: disjI1 [THEN append_step1I])
- apply (drule App_eq_foldl_conv [THEN iffD1])
- apply (split split_if_asm)
- apply simp
- apply blast
- apply (clarify, auto 0 3 intro!: exI intro: append_step1I)
- done
- with cases show ?thesis by blast
-qed
-
-lemma apps_preserves_beta [simp]:
- "r \<rightarrow>\<^sub>\<beta> s ==> r \<degree>\<degree> ss \<rightarrow>\<^sub>\<beta> s \<degree>\<degree> ss"
- by (induct ss rule: rev_induct) auto
-
-lemma apps_preserves_beta2 [simp]:
- "r ->> s ==> r \<degree>\<degree> ss ->> s \<degree>\<degree> ss"
- apply (induct set: rtranclp)
- apply blast
- apply (blast intro: apps_preserves_beta rtranclp.rtrancl_into_rtrancl)
- done
-
-lemma apps_preserves_betas [simp]:
- "rs => ss \<Longrightarrow> r \<degree>\<degree> rs \<rightarrow>\<^sub>\<beta> r \<degree>\<degree> ss"
- apply (induct rs arbitrary: ss rule: rev_induct)
- apply simp
- apply simp
- apply (rule_tac xs = ss in rev_exhaust)
- apply simp
- apply simp
- apply (drule Snoc_step1_SnocD)
- apply blast
- done
-
-end
--- a/src/HOL/Lambda/ListOrder.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,124 +0,0 @@
-(* Title: HOL/Lambda/ListOrder.thy
- Author: Tobias Nipkow
- Copyright 1998 TU Muenchen
-*)
-
-header {* Lifting an order to lists of elements *}
-
-theory ListOrder imports Main begin
-
-declare [[syntax_ambiguity_level = 100]]
-
-
-text {*
- Lifting an order to lists of elements, relating exactly one
- element.
-*}
-
-definition
- step1 :: "('a => 'a => bool) => 'a list => 'a list => bool" where
- "step1 r =
- (\<lambda>ys xs. \<exists>us z z' vs. xs = us @ z # vs \<and> r z' z \<and> ys =
- us @ z' # vs)"
-
-
-lemma step1_converse [simp]: "step1 (r^--1) = (step1 r)^--1"
- apply (unfold step1_def)
- apply (blast intro!: order_antisym)
- done
-
-lemma in_step1_converse [iff]: "(step1 (r^--1) x y) = ((step1 r)^--1 x y)"
- apply auto
- done
-
-lemma not_Nil_step1 [iff]: "\<not> step1 r [] xs"
- apply (unfold step1_def)
- apply blast
- done
-
-lemma not_step1_Nil [iff]: "\<not> step1 r xs []"
- apply (unfold step1_def)
- apply blast
- done
-
-lemma Cons_step1_Cons [iff]:
- "(step1 r (y # ys) (x # xs)) =
- (r y x \<and> xs = ys \<or> x = y \<and> step1 r ys xs)"
- apply (unfold step1_def)
- apply (rule iffI)
- apply (erule exE)
- apply (rename_tac ts)
- apply (case_tac ts)
- apply fastsimp
- apply force
- apply (erule disjE)
- apply blast
- apply (blast intro: Cons_eq_appendI)
- done
-
-lemma append_step1I:
- "step1 r ys xs \<and> vs = us \<or> ys = xs \<and> step1 r vs us
- ==> step1 r (ys @ vs) (xs @ us)"
- apply (unfold step1_def)
- apply auto
- apply blast
- apply (blast intro: append_eq_appendI)
- done
-
-lemma Cons_step1E [elim!]:
- assumes "step1 r ys (x # xs)"
- and "!!y. ys = y # xs \<Longrightarrow> r y x \<Longrightarrow> R"
- and "!!zs. ys = x # zs \<Longrightarrow> step1 r zs xs \<Longrightarrow> R"
- shows R
- using assms
- apply (cases ys)
- apply (simp add: step1_def)
- apply blast
- done
-
-lemma Snoc_step1_SnocD:
- "step1 r (ys @ [y]) (xs @ [x])
- ==> (step1 r ys xs \<and> y = x \<or> ys = xs \<and> r y x)"
- apply (unfold step1_def)
- apply (clarify del: disjCI)
- apply (rename_tac vs)
- apply (rule_tac xs = vs in rev_exhaust)
- apply force
- apply simp
- apply blast
- done
-
-lemma Cons_acc_step1I [intro!]:
- "accp r x ==> accp (step1 r) xs \<Longrightarrow> accp (step1 r) (x # xs)"
- apply (induct arbitrary: xs set: accp)
- apply (erule thin_rl)
- apply (erule accp_induct)
- apply (rule accp.accI)
- apply blast
- done
-
-lemma lists_accD: "listsp (accp r) xs ==> accp (step1 r) xs"
- apply (induct set: listsp)
- apply (rule accp.accI)
- apply simp
- apply (rule accp.accI)
- apply (fast dest: accp_downward)
- done
-
-lemma ex_step1I:
- "[| x \<in> set xs; r y x |]
- ==> \<exists>ys. step1 r ys xs \<and> y \<in> set ys"
- apply (unfold step1_def)
- apply (drule in_set_conv_decomp [THEN iffD1])
- apply force
- done
-
-lemma lists_accI: "accp (step1 r) xs ==> listsp (accp r) xs"
- apply (induct set: accp)
- apply clarify
- apply (rule accp.accI)
- apply (drule_tac r=r in ex_step1I, assumption)
- apply blast
- done
-
-end
--- a/src/HOL/Lambda/NormalForm.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,247 +0,0 @@
-(* Title: HOL/Lambda/NormalForm.thy
- Author: Stefan Berghofer, TU Muenchen, 2003
-*)
-
-header {* Inductive characterization of lambda terms in normal form *}
-
-theory NormalForm
-imports ListBeta
-begin
-
-subsection {* Terms in normal form *}
-
-definition
- listall :: "('a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> bool" where
- "listall P xs \<equiv> (\<forall>i. i < length xs \<longrightarrow> P (xs ! i))"
-
-declare listall_def [extraction_expand_def]
-
-theorem listall_nil: "listall P []"
- by (simp add: listall_def)
-
-theorem listall_nil_eq [simp]: "listall P [] = True"
- by (iprover intro: listall_nil)
-
-theorem listall_cons: "P x \<Longrightarrow> listall P xs \<Longrightarrow> listall P (x # xs)"
- apply (simp add: listall_def)
- apply (rule allI impI)+
- apply (case_tac i)
- apply simp+
- done
-
-theorem listall_cons_eq [simp]: "listall P (x # xs) = (P x \<and> listall P xs)"
- apply (rule iffI)
- prefer 2
- apply (erule conjE)
- apply (erule listall_cons)
- apply assumption
- apply (unfold listall_def)
- apply (rule conjI)
- apply (erule_tac x=0 in allE)
- apply simp
- apply simp
- apply (rule allI)
- apply (erule_tac x="Suc i" in allE)
- apply simp
- done
-
-lemma listall_conj1: "listall (\<lambda>x. P x \<and> Q x) xs \<Longrightarrow> listall P xs"
- by (induct xs) simp_all
-
-lemma listall_conj2: "listall (\<lambda>x. P x \<and> Q x) xs \<Longrightarrow> listall Q xs"
- by (induct xs) simp_all
-
-lemma listall_app: "listall P (xs @ ys) = (listall P xs \<and> listall P ys)"
- apply (induct xs)
- apply (rule iffI, simp, simp)
- apply (rule iffI, simp, simp)
- done
-
-lemma listall_snoc [simp]: "listall P (xs @ [x]) = (listall P xs \<and> P x)"
- apply (rule iffI)
- apply (simp add: listall_app)+
- done
-
-lemma listall_cong [cong, extraction_expand]:
- "xs = ys \<Longrightarrow> listall P xs = listall P ys"
- -- {* Currently needed for strange technical reasons *}
- by (unfold listall_def) simp
-
-text {*
-@{term "listsp"} is equivalent to @{term "listall"}, but cannot be
-used for program extraction.
-*}
-
-lemma listall_listsp_eq: "listall P xs = listsp P xs"
- by (induct xs) (auto intro: listsp.intros)
-
-inductive NF :: "dB \<Rightarrow> bool"
-where
- App: "listall NF ts \<Longrightarrow> NF (Var x \<degree>\<degree> ts)"
-| Abs: "NF t \<Longrightarrow> NF (Abs t)"
-monos listall_def
-
-lemma nat_eq_dec: "\<And>n::nat. m = n \<or> m \<noteq> n"
- apply (induct m)
- apply (case_tac n)
- apply (case_tac [3] n)
- apply (simp only: nat.simps, iprover?)+
- done
-
-lemma nat_le_dec: "\<And>n::nat. m < n \<or> \<not> (m < n)"
- apply (induct m)
- apply (case_tac n)
- apply (case_tac [3] n)
- apply (simp del: simp_thms, iprover?)+
- done
-
-lemma App_NF_D: assumes NF: "NF (Var n \<degree>\<degree> ts)"
- shows "listall NF ts" using NF
- by cases simp_all
-
-
-subsection {* Properties of @{text NF} *}
-
-lemma Var_NF: "NF (Var n)"
- apply (subgoal_tac "NF (Var n \<degree>\<degree> [])")
- apply simp
- apply (rule NF.App)
- apply simp
- done
-
-lemma Abs_NF:
- assumes NF: "NF (Abs t \<degree>\<degree> ts)"
- shows "ts = []" using NF
-proof cases
- case (App us i)
- thus ?thesis by (simp add: Var_apps_neq_Abs_apps [THEN not_sym])
-next
- case (Abs u)
- thus ?thesis by simp
-qed
-
-lemma subst_terms_NF: "listall NF ts \<Longrightarrow>
- listall (\<lambda>t. \<forall>i j. NF (t[Var i/j])) ts \<Longrightarrow>
- listall NF (map (\<lambda>t. t[Var i/j]) ts)"
- by (induct ts) simp_all
-
-lemma subst_Var_NF: "NF t \<Longrightarrow> NF (t[Var i/j])"
- apply (induct arbitrary: i j set: NF)
- apply simp
- apply (frule listall_conj1)
- apply (drule listall_conj2)
- apply (drule_tac i=i and j=j in subst_terms_NF)
- apply assumption
- apply (rule_tac m=x and n=j in nat_eq_dec [THEN disjE, standard])
- apply simp
- apply (erule NF.App)
- apply (rule_tac m=j and n=x in nat_le_dec [THEN disjE, standard])
- apply simp
- apply (iprover intro: NF.App)
- apply simp
- apply (iprover intro: NF.App)
- apply simp
- apply (iprover intro: NF.Abs)
- done
-
-lemma app_Var_NF: "NF t \<Longrightarrow> \<exists>t'. t \<degree> Var i \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'"
- apply (induct set: NF)
- apply (simplesubst app_last) --{*Using @{text subst} makes extraction fail*}
- apply (rule exI)
- apply (rule conjI)
- apply (rule rtranclp.rtrancl_refl)
- apply (rule NF.App)
- apply (drule listall_conj1)
- apply (simp add: listall_app)
- apply (rule Var_NF)
- apply (rule exI)
- apply (rule conjI)
- apply (rule rtranclp.rtrancl_into_rtrancl)
- apply (rule rtranclp.rtrancl_refl)
- apply (rule beta)
- apply (erule subst_Var_NF)
- done
-
-lemma lift_terms_NF: "listall NF ts \<Longrightarrow>
- listall (\<lambda>t. \<forall>i. NF (lift t i)) ts \<Longrightarrow>
- listall NF (map (\<lambda>t. lift t i) ts)"
- by (induct ts) simp_all
-
-lemma lift_NF: "NF t \<Longrightarrow> NF (lift t i)"
- apply (induct arbitrary: i set: NF)
- apply (frule listall_conj1)
- apply (drule listall_conj2)
- apply (drule_tac i=i in lift_terms_NF)
- apply assumption
- apply (rule_tac m=x and n=i in nat_le_dec [THEN disjE, standard])
- apply simp
- apply (rule NF.App)
- apply assumption
- apply simp
- apply (rule NF.App)
- apply assumption
- apply simp
- apply (rule NF.Abs)
- apply simp
- done
-
-text {*
-@{term NF} characterizes exactly the terms that are in normal form.
-*}
-
-lemma NF_eq: "NF t = (\<forall>t'. \<not> t \<rightarrow>\<^sub>\<beta> t')"
-proof
- assume "NF t"
- then have "\<And>t'. \<not> t \<rightarrow>\<^sub>\<beta> t'"
- proof induct
- case (App ts t)
- show ?case
- proof
- assume "Var t \<degree>\<degree> ts \<rightarrow>\<^sub>\<beta> t'"
- then obtain rs where "ts => rs"
- by (iprover dest: head_Var_reduction)
- with App show False
- by (induct rs arbitrary: ts) auto
- qed
- next
- case (Abs t)
- show ?case
- proof
- assume "Abs t \<rightarrow>\<^sub>\<beta> t'"
- then show False using Abs by cases simp_all
- qed
- qed
- then show "\<forall>t'. \<not> t \<rightarrow>\<^sub>\<beta> t'" ..
-next
- assume H: "\<forall>t'. \<not> t \<rightarrow>\<^sub>\<beta> t'"
- then show "NF t"
- proof (induct t rule: Apps_dB_induct)
- case (1 n ts)
- then have "\<forall>ts'. \<not> ts => ts'"
- by (iprover intro: apps_preserves_betas)
- with 1(1) have "listall NF ts"
- by (induct ts) auto
- then show ?case by (rule NF.App)
- next
- case (2 u ts)
- show ?case
- proof (cases ts)
- case Nil
- from 2 have "\<forall>u'. \<not> u \<rightarrow>\<^sub>\<beta> u'"
- by (auto intro: apps_preserves_beta)
- then have "NF u" by (rule 2)
- then have "NF (Abs u)" by (rule NF.Abs)
- with Nil show ?thesis by simp
- next
- case (Cons r rs)
- have "Abs u \<degree> r \<rightarrow>\<^sub>\<beta> u[r/0]" ..
- then have "Abs u \<degree> r \<degree>\<degree> rs \<rightarrow>\<^sub>\<beta> u[r/0] \<degree>\<degree> rs"
- by (rule apps_preserves_beta)
- with Cons have "Abs u \<degree>\<degree> ts \<rightarrow>\<^sub>\<beta> u[r/0] \<degree>\<degree> rs"
- by simp
- with 2 show ?thesis by iprover
- qed
- qed
-qed
-
-end
--- a/src/HOL/Lambda/ParRed.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,115 +0,0 @@
-(* Title: HOL/Lambda/ParRed.thy
- Author: Tobias Nipkow
- Copyright 1995 TU Muenchen
-
-Properties of => and "cd", in particular the diamond property of => and
-confluence of beta.
-*)
-
-header {* Parallel reduction and a complete developments *}
-
-theory ParRed imports Lambda Commutation begin
-
-
-subsection {* Parallel reduction *}
-
-inductive par_beta :: "[dB, dB] => bool" (infixl "=>" 50)
- where
- var [simp, intro!]: "Var n => Var n"
- | abs [simp, intro!]: "s => t ==> Abs s => Abs t"
- | app [simp, intro!]: "[| s => s'; t => t' |] ==> s \<degree> t => s' \<degree> t'"
- | beta [simp, intro!]: "[| s => s'; t => t' |] ==> (Abs s) \<degree> t => s'[t'/0]"
-
-inductive_cases par_beta_cases [elim!]:
- "Var n => t"
- "Abs s => Abs t"
- "(Abs s) \<degree> t => u"
- "s \<degree> t => u"
- "Abs s => t"
-
-
-subsection {* Inclusions *}
-
-text {* @{text "beta \<subseteq> par_beta \<subseteq> beta^*"} \medskip *}
-
-lemma par_beta_varL [simp]:
- "(Var n => t) = (t = Var n)"
- by blast
-
-lemma par_beta_refl [simp]: "t => t" (* par_beta_refl [intro!] causes search to blow up *)
- by (induct t) simp_all
-
-lemma beta_subset_par_beta: "beta <= par_beta"
- apply (rule predicate2I)
- apply (erule beta.induct)
- apply (blast intro!: par_beta_refl)+
- done
-
-lemma par_beta_subset_beta: "par_beta <= beta^**"
- apply (rule predicate2I)
- apply (erule par_beta.induct)
- apply blast
- apply (blast del: rtranclp.rtrancl_refl intro: rtranclp.rtrancl_into_rtrancl)+
- -- {* @{thm[source] rtrancl_refl} complicates the proof by increasing the branching factor *}
- done
-
-
-subsection {* Misc properties of @{text "par_beta"} *}
-
-lemma par_beta_lift [simp]:
- "t => t' \<Longrightarrow> lift t n => lift t' n"
- by (induct t arbitrary: t' n) fastsimp+
-
-lemma par_beta_subst:
- "s => s' \<Longrightarrow> t => t' \<Longrightarrow> t[s/n] => t'[s'/n]"
- apply (induct t arbitrary: s s' t' n)
- apply (simp add: subst_Var)
- apply (erule par_beta_cases)
- apply simp
- apply (simp add: subst_subst [symmetric])
- apply (fastsimp intro!: par_beta_lift)
- apply fastsimp
- done
-
-
-subsection {* Confluence (directly) *}
-
-lemma diamond_par_beta: "diamond par_beta"
- apply (unfold diamond_def commute_def square_def)
- apply (rule impI [THEN allI [THEN allI]])
- apply (erule par_beta.induct)
- apply (blast intro!: par_beta_subst)+
- done
-
-
-subsection {* Complete developments *}
-
-fun
- "cd" :: "dB => dB"
-where
- "cd (Var n) = Var n"
-| "cd (Var n \<degree> t) = Var n \<degree> cd t"
-| "cd ((s1 \<degree> s2) \<degree> t) = cd (s1 \<degree> s2) \<degree> cd t"
-| "cd (Abs u \<degree> t) = (cd u)[cd t/0]"
-| "cd (Abs s) = Abs (cd s)"
-
-lemma par_beta_cd: "s => t \<Longrightarrow> t => cd s"
- apply (induct s arbitrary: t rule: cd.induct)
- apply auto
- apply (fast intro!: par_beta_subst)
- done
-
-
-subsection {* Confluence (via complete developments) *}
-
-lemma diamond_par_beta2: "diamond par_beta"
- apply (unfold diamond_def commute_def square_def)
- apply (blast intro: par_beta_cd)
- done
-
-theorem beta_confluent: "confluent beta"
- apply (rule diamond_par_beta2 diamond_to_confluence
- par_beta_subset_beta beta_subset_par_beta)+
- done
-
-end
--- a/src/HOL/Lambda/README.html Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,29 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-
-<HTML>
-
-<HEAD>
- <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
- <TITLE>HOL/Lambda</TITLE>
-</HEAD>
-
-<BODY>
-
-<H1>Lambda Calculus in de Bruijn's Notation</H1>
-
-This theory defines lambda-calculus terms with de Bruijn indixes and proves
-confluence of beta, eta and beta+eta.
-<P>
-
-
-The paper
-<A HREF="http://www.in.tum.de/~nipkow/pubs/jar2001.html">
-More Church-Rosser Proofs (in Isabelle/HOL)</A>
-describes the whole theory.
-
-<HR>
-
-<P>Last modified 20.5.2000
-
-</BODY>
-</HTML>
--- a/src/HOL/Lambda/ROOT.ML Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-no_document use_thys ["Code_Integer"];
-use_thys ["Eta", "StrongNorm", "Standardization", "WeakNorm"];
--- a/src/HOL/Lambda/Standardization.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,360 +0,0 @@
-(* Title: HOL/Lambda/Standardization.thy
- Author: Stefan Berghofer
- Copyright 2005 TU Muenchen
-*)
-
-header {* Standardization *}
-
-theory Standardization
-imports NormalForm
-begin
-
-text {*
-Based on lecture notes by Ralph Matthes \cite{Matthes-ESSLLI2000},
-original proof idea due to Ralph Loader \cite{Loader1998}.
-*}
-
-
-subsection {* Standard reduction relation *}
-
-declare listrel_mono [mono_set]
-
-inductive
- sred :: "dB \<Rightarrow> dB \<Rightarrow> bool" (infixl "\<rightarrow>\<^sub>s" 50)
- and sredlist :: "dB list \<Rightarrow> dB list \<Rightarrow> bool" (infixl "[\<rightarrow>\<^sub>s]" 50)
-where
- "s [\<rightarrow>\<^sub>s] t \<equiv> listrelp op \<rightarrow>\<^sub>s s t"
-| Var: "rs [\<rightarrow>\<^sub>s] rs' \<Longrightarrow> Var x \<degree>\<degree> rs \<rightarrow>\<^sub>s Var x \<degree>\<degree> rs'"
-| Abs: "r \<rightarrow>\<^sub>s r' \<Longrightarrow> ss [\<rightarrow>\<^sub>s] ss' \<Longrightarrow> Abs r \<degree>\<degree> ss \<rightarrow>\<^sub>s Abs r' \<degree>\<degree> ss'"
-| Beta: "r[s/0] \<degree>\<degree> ss \<rightarrow>\<^sub>s t \<Longrightarrow> Abs r \<degree> s \<degree>\<degree> ss \<rightarrow>\<^sub>s t"
-
-lemma refl_listrelp: "\<forall>x\<in>set xs. R x x \<Longrightarrow> listrelp R xs xs"
- by (induct xs) (auto intro: listrelp.intros)
-
-lemma refl_sred: "t \<rightarrow>\<^sub>s t"
- by (induct t rule: Apps_dB_induct) (auto intro: refl_listrelp sred.intros)
-
-lemma refl_sreds: "ts [\<rightarrow>\<^sub>s] ts"
- by (simp add: refl_sred refl_listrelp)
-
-lemma listrelp_conj1: "listrelp (\<lambda>x y. R x y \<and> S x y) x y \<Longrightarrow> listrelp R x y"
- by (erule listrelp.induct) (auto intro: listrelp.intros)
-
-lemma listrelp_conj2: "listrelp (\<lambda>x y. R x y \<and> S x y) x y \<Longrightarrow> listrelp S x y"
- by (erule listrelp.induct) (auto intro: listrelp.intros)
-
-lemma listrelp_app:
- assumes xsys: "listrelp R xs ys"
- shows "listrelp R xs' ys' \<Longrightarrow> listrelp R (xs @ xs') (ys @ ys')" using xsys
- by (induct arbitrary: xs' ys') (auto intro: listrelp.intros)
-
-lemma lemma1:
- assumes r: "r \<rightarrow>\<^sub>s r'" and s: "s \<rightarrow>\<^sub>s s'"
- shows "r \<degree> s \<rightarrow>\<^sub>s r' \<degree> s'" using r
-proof induct
- case (Var rs rs' x)
- then have "rs [\<rightarrow>\<^sub>s] rs'" by (rule listrelp_conj1)
- moreover have "[s] [\<rightarrow>\<^sub>s] [s']" by (iprover intro: s listrelp.intros)
- ultimately have "rs @ [s] [\<rightarrow>\<^sub>s] rs' @ [s']" by (rule listrelp_app)
- hence "Var x \<degree>\<degree> (rs @ [s]) \<rightarrow>\<^sub>s Var x \<degree>\<degree> (rs' @ [s'])" by (rule sred.Var)
- thus ?case by (simp only: app_last)
-next
- case (Abs r r' ss ss')
- from Abs(3) have "ss [\<rightarrow>\<^sub>s] ss'" by (rule listrelp_conj1)
- moreover have "[s] [\<rightarrow>\<^sub>s] [s']" by (iprover intro: s listrelp.intros)
- ultimately have "ss @ [s] [\<rightarrow>\<^sub>s] ss' @ [s']" by (rule listrelp_app)
- with `r \<rightarrow>\<^sub>s r'` have "Abs r \<degree>\<degree> (ss @ [s]) \<rightarrow>\<^sub>s Abs r' \<degree>\<degree> (ss' @ [s'])"
- by (rule sred.Abs)
- thus ?case by (simp only: app_last)
-next
- case (Beta r u ss t)
- hence "r[u/0] \<degree>\<degree> (ss @ [s]) \<rightarrow>\<^sub>s t \<degree> s'" by (simp only: app_last)
- hence "Abs r \<degree> u \<degree>\<degree> (ss @ [s]) \<rightarrow>\<^sub>s t \<degree> s'" by (rule sred.Beta)
- thus ?case by (simp only: app_last)
-qed
-
-lemma lemma1':
- assumes ts: "ts [\<rightarrow>\<^sub>s] ts'"
- shows "r \<rightarrow>\<^sub>s r' \<Longrightarrow> r \<degree>\<degree> ts \<rightarrow>\<^sub>s r' \<degree>\<degree> ts'" using ts
- by (induct arbitrary: r r') (auto intro: lemma1)
-
-lemma lemma2_1:
- assumes beta: "t \<rightarrow>\<^sub>\<beta> u"
- shows "t \<rightarrow>\<^sub>s u" using beta
-proof induct
- case (beta s t)
- have "Abs s \<degree> t \<degree>\<degree> [] \<rightarrow>\<^sub>s s[t/0] \<degree>\<degree> []" by (iprover intro: sred.Beta refl_sred)
- thus ?case by simp
-next
- case (appL s t u)
- thus ?case by (iprover intro: lemma1 refl_sred)
-next
- case (appR s t u)
- thus ?case by (iprover intro: lemma1 refl_sred)
-next
- case (abs s t)
- hence "Abs s \<degree>\<degree> [] \<rightarrow>\<^sub>s Abs t \<degree>\<degree> []" by (iprover intro: sred.Abs listrelp.Nil)
- thus ?case by simp
-qed
-
-lemma listrelp_betas:
- assumes ts: "listrelp op \<rightarrow>\<^sub>\<beta>\<^sup>* ts ts'"
- shows "\<And>t t'. t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<Longrightarrow> t \<degree>\<degree> ts \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<degree>\<degree> ts'" using ts
- by induct auto
-
-lemma lemma2_2:
- assumes t: "t \<rightarrow>\<^sub>s u"
- shows "t \<rightarrow>\<^sub>\<beta>\<^sup>* u" using t
- by induct (auto dest: listrelp_conj2
- intro: listrelp_betas apps_preserves_beta converse_rtranclp_into_rtranclp)
-
-lemma sred_lift:
- assumes s: "s \<rightarrow>\<^sub>s t"
- shows "lift s i \<rightarrow>\<^sub>s lift t i" using s
-proof (induct arbitrary: i)
- case (Var rs rs' x)
- hence "map (\<lambda>t. lift t i) rs [\<rightarrow>\<^sub>s] map (\<lambda>t. lift t i) rs'"
- by induct (auto intro: listrelp.intros)
- thus ?case by (cases "x < i") (auto intro: sred.Var)
-next
- case (Abs r r' ss ss')
- from Abs(3) have "map (\<lambda>t. lift t i) ss [\<rightarrow>\<^sub>s] map (\<lambda>t. lift t i) ss'"
- by induct (auto intro: listrelp.intros)
- thus ?case by (auto intro: sred.Abs Abs)
-next
- case (Beta r s ss t)
- thus ?case by (auto intro: sred.Beta)
-qed
-
-lemma lemma3:
- assumes r: "r \<rightarrow>\<^sub>s r'"
- shows "s \<rightarrow>\<^sub>s s' \<Longrightarrow> r[s/x] \<rightarrow>\<^sub>s r'[s'/x]" using r
-proof (induct arbitrary: s s' x)
- case (Var rs rs' y)
- hence "map (\<lambda>t. t[s/x]) rs [\<rightarrow>\<^sub>s] map (\<lambda>t. t[s'/x]) rs'"
- by induct (auto intro: listrelp.intros Var)
- moreover have "Var y[s/x] \<rightarrow>\<^sub>s Var y[s'/x]"
- proof (cases "y < x")
- case True thus ?thesis by simp (rule refl_sred)
- next
- case False
- thus ?thesis
- by (cases "y = x") (auto simp add: Var intro: refl_sred)
- qed
- ultimately show ?case by simp (rule lemma1')
-next
- case (Abs r r' ss ss')
- from Abs(4) have "lift s 0 \<rightarrow>\<^sub>s lift s' 0" by (rule sred_lift)
- hence "r[lift s 0/Suc x] \<rightarrow>\<^sub>s r'[lift s' 0/Suc x]" by (fast intro: Abs.hyps)
- moreover from Abs(3) have "map (\<lambda>t. t[s/x]) ss [\<rightarrow>\<^sub>s] map (\<lambda>t. t[s'/x]) ss'"
- by induct (auto intro: listrelp.intros Abs)
- ultimately show ?case by simp (rule sred.Abs)
-next
- case (Beta r u ss t)
- thus ?case by (auto simp add: subst_subst intro: sred.Beta)
-qed
-
-lemma lemma4_aux:
- assumes rs: "listrelp (\<lambda>t u. t \<rightarrow>\<^sub>s u \<and> (\<forall>r. u \<rightarrow>\<^sub>\<beta> r \<longrightarrow> t \<rightarrow>\<^sub>s r)) rs rs'"
- shows "rs' => ss \<Longrightarrow> rs [\<rightarrow>\<^sub>s] ss" using rs
-proof (induct arbitrary: ss)
- case Nil
- thus ?case by cases (auto intro: listrelp.Nil)
-next
- case (Cons x y xs ys)
- note Cons' = Cons
- show ?case
- proof (cases ss)
- case Nil with Cons show ?thesis by simp
- next
- case (Cons y' ys')
- hence ss: "ss = y' # ys'" by simp
- from Cons Cons' have "y \<rightarrow>\<^sub>\<beta> y' \<and> ys' = ys \<or> y' = y \<and> ys => ys'" by simp
- hence "x # xs [\<rightarrow>\<^sub>s] y' # ys'"
- proof
- assume H: "y \<rightarrow>\<^sub>\<beta> y' \<and> ys' = ys"
- with Cons' have "x \<rightarrow>\<^sub>s y'" by blast
- moreover from Cons' have "xs [\<rightarrow>\<^sub>s] ys" by (iprover dest: listrelp_conj1)
- ultimately have "x # xs [\<rightarrow>\<^sub>s] y' # ys" by (rule listrelp.Cons)
- with H show ?thesis by simp
- next
- assume H: "y' = y \<and> ys => ys'"
- with Cons' have "x \<rightarrow>\<^sub>s y'" by blast
- moreover from H have "xs [\<rightarrow>\<^sub>s] ys'" by (blast intro: Cons')
- ultimately show ?thesis by (rule listrelp.Cons)
- qed
- with ss show ?thesis by simp
- qed
-qed
-
-lemma lemma4:
- assumes r: "r \<rightarrow>\<^sub>s r'"
- shows "r' \<rightarrow>\<^sub>\<beta> r'' \<Longrightarrow> r \<rightarrow>\<^sub>s r''" using r
-proof (induct arbitrary: r'')
- case (Var rs rs' x)
- then obtain ss where rs: "rs' => ss" and r'': "r'' = Var x \<degree>\<degree> ss"
- by (blast dest: head_Var_reduction)
- from Var(1) rs have "rs [\<rightarrow>\<^sub>s] ss" by (rule lemma4_aux)
- hence "Var x \<degree>\<degree> rs \<rightarrow>\<^sub>s Var x \<degree>\<degree> ss" by (rule sred.Var)
- with r'' show ?case by simp
-next
- case (Abs r r' ss ss')
- from `Abs r' \<degree>\<degree> ss' \<rightarrow>\<^sub>\<beta> r''` show ?case
- proof
- fix s
- assume r'': "r'' = s \<degree>\<degree> ss'"
- assume "Abs r' \<rightarrow>\<^sub>\<beta> s"
- then obtain r''' where s: "s = Abs r'''" and r''': "r' \<rightarrow>\<^sub>\<beta> r'''" by cases auto
- from r''' have "r \<rightarrow>\<^sub>s r'''" by (blast intro: Abs)
- moreover from Abs have "ss [\<rightarrow>\<^sub>s] ss'" by (iprover dest: listrelp_conj1)
- ultimately have "Abs r \<degree>\<degree> ss \<rightarrow>\<^sub>s Abs r''' \<degree>\<degree> ss'" by (rule sred.Abs)
- with r'' s show "Abs r \<degree>\<degree> ss \<rightarrow>\<^sub>s r''" by simp
- next
- fix rs'
- assume "ss' => rs'"
- with Abs(3) have "ss [\<rightarrow>\<^sub>s] rs'" by (rule lemma4_aux)
- with `r \<rightarrow>\<^sub>s r'` have "Abs r \<degree>\<degree> ss \<rightarrow>\<^sub>s Abs r' \<degree>\<degree> rs'" by (rule sred.Abs)
- moreover assume "r'' = Abs r' \<degree>\<degree> rs'"
- ultimately show "Abs r \<degree>\<degree> ss \<rightarrow>\<^sub>s r''" by simp
- next
- fix t u' us'
- assume "ss' = u' # us'"
- with Abs(3) obtain u us where
- ss: "ss = u # us" and u: "u \<rightarrow>\<^sub>s u'" and us: "us [\<rightarrow>\<^sub>s] us'"
- by cases (auto dest!: listrelp_conj1)
- have "r[u/0] \<rightarrow>\<^sub>s r'[u'/0]" using Abs(1) and u by (rule lemma3)
- with us have "r[u/0] \<degree>\<degree> us \<rightarrow>\<^sub>s r'[u'/0] \<degree>\<degree> us'" by (rule lemma1')
- hence "Abs r \<degree> u \<degree>\<degree> us \<rightarrow>\<^sub>s r'[u'/0] \<degree>\<degree> us'" by (rule sred.Beta)
- moreover assume "Abs r' = Abs t" and "r'' = t[u'/0] \<degree>\<degree> us'"
- ultimately show "Abs r \<degree>\<degree> ss \<rightarrow>\<^sub>s r''" using ss by simp
- qed
-next
- case (Beta r s ss t)
- show ?case
- by (rule sred.Beta) (rule Beta)+
-qed
-
-lemma rtrancl_beta_sred:
- assumes r: "r \<rightarrow>\<^sub>\<beta>\<^sup>* r'"
- shows "r \<rightarrow>\<^sub>s r'" using r
- by induct (iprover intro: refl_sred lemma4)+
-
-
-subsection {* Leftmost reduction and weakly normalizing terms *}
-
-inductive
- lred :: "dB \<Rightarrow> dB \<Rightarrow> bool" (infixl "\<rightarrow>\<^sub>l" 50)
- and lredlist :: "dB list \<Rightarrow> dB list \<Rightarrow> bool" (infixl "[\<rightarrow>\<^sub>l]" 50)
-where
- "s [\<rightarrow>\<^sub>l] t \<equiv> listrelp op \<rightarrow>\<^sub>l s t"
-| Var: "rs [\<rightarrow>\<^sub>l] rs' \<Longrightarrow> Var x \<degree>\<degree> rs \<rightarrow>\<^sub>l Var x \<degree>\<degree> rs'"
-| Abs: "r \<rightarrow>\<^sub>l r' \<Longrightarrow> Abs r \<rightarrow>\<^sub>l Abs r'"
-| Beta: "r[s/0] \<degree>\<degree> ss \<rightarrow>\<^sub>l t \<Longrightarrow> Abs r \<degree> s \<degree>\<degree> ss \<rightarrow>\<^sub>l t"
-
-lemma lred_imp_sred:
- assumes lred: "s \<rightarrow>\<^sub>l t"
- shows "s \<rightarrow>\<^sub>s t" using lred
-proof induct
- case (Var rs rs' x)
- then have "rs [\<rightarrow>\<^sub>s] rs'"
- by induct (iprover intro: listrelp.intros)+
- then show ?case by (rule sred.Var)
-next
- case (Abs r r')
- from `r \<rightarrow>\<^sub>s r'`
- have "Abs r \<degree>\<degree> [] \<rightarrow>\<^sub>s Abs r' \<degree>\<degree> []" using listrelp.Nil
- by (rule sred.Abs)
- then show ?case by simp
-next
- case (Beta r s ss t)
- from `r[s/0] \<degree>\<degree> ss \<rightarrow>\<^sub>s t`
- show ?case by (rule sred.Beta)
-qed
-
-inductive WN :: "dB => bool"
- where
- Var: "listsp WN rs \<Longrightarrow> WN (Var n \<degree>\<degree> rs)"
- | Lambda: "WN r \<Longrightarrow> WN (Abs r)"
- | Beta: "WN ((r[s/0]) \<degree>\<degree> ss) \<Longrightarrow> WN ((Abs r \<degree> s) \<degree>\<degree> ss)"
-
-lemma listrelp_imp_listsp1:
- assumes H: "listrelp (\<lambda>x y. P x) xs ys"
- shows "listsp P xs" using H
- by induct auto
-
-lemma listrelp_imp_listsp2:
- assumes H: "listrelp (\<lambda>x y. P y) xs ys"
- shows "listsp P ys" using H
- by induct auto
-
-lemma lemma5:
- assumes lred: "r \<rightarrow>\<^sub>l r'"
- shows "WN r" and "NF r'" using lred
- by induct
- (iprover dest: listrelp_conj1 listrelp_conj2
- listrelp_imp_listsp1 listrelp_imp_listsp2 intro: WN.intros
- NF.intros [simplified listall_listsp_eq])+
-
-lemma lemma6:
- assumes wn: "WN r"
- shows "\<exists>r'. r \<rightarrow>\<^sub>l r'" using wn
-proof induct
- case (Var rs n)
- then have "\<exists>rs'. rs [\<rightarrow>\<^sub>l] rs'"
- by induct (iprover intro: listrelp.intros)+
- then show ?case by (iprover intro: lred.Var)
-qed (iprover intro: lred.intros)+
-
-lemma lemma7:
- assumes r: "r \<rightarrow>\<^sub>s r'"
- shows "NF r' \<Longrightarrow> r \<rightarrow>\<^sub>l r'" using r
-proof induct
- case (Var rs rs' x)
- from `NF (Var x \<degree>\<degree> rs')` have "listall NF rs'"
- by cases simp_all
- with Var(1) have "rs [\<rightarrow>\<^sub>l] rs'"
- proof induct
- case Nil
- show ?case by (rule listrelp.Nil)
- next
- case (Cons x y xs ys)
- hence "x \<rightarrow>\<^sub>l y" and "xs [\<rightarrow>\<^sub>l] ys" by simp_all
- thus ?case by (rule listrelp.Cons)
- qed
- thus ?case by (rule lred.Var)
-next
- case (Abs r r' ss ss')
- from `NF (Abs r' \<degree>\<degree> ss')`
- have ss': "ss' = []" by (rule Abs_NF)
- from Abs(3) have ss: "ss = []" using ss'
- by cases simp_all
- from ss' Abs have "NF (Abs r')" by simp
- hence "NF r'" by cases simp_all
- with Abs have "r \<rightarrow>\<^sub>l r'" by simp
- hence "Abs r \<rightarrow>\<^sub>l Abs r'" by (rule lred.Abs)
- with ss ss' show ?case by simp
-next
- case (Beta r s ss t)
- hence "r[s/0] \<degree>\<degree> ss \<rightarrow>\<^sub>l t" by simp
- thus ?case by (rule lred.Beta)
-qed
-
-lemma WN_eq: "WN t = (\<exists>t'. t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t')"
-proof
- assume "WN t"
- then have "\<exists>t'. t \<rightarrow>\<^sub>l t'" by (rule lemma6)
- then obtain t' where t': "t \<rightarrow>\<^sub>l t'" ..
- then have NF: "NF t'" by (rule lemma5)
- from t' have "t \<rightarrow>\<^sub>s t'" by (rule lred_imp_sred)
- then have "t \<rightarrow>\<^sub>\<beta>\<^sup>* t'" by (rule lemma2_2)
- with NF show "\<exists>t'. t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'" by iprover
-next
- assume "\<exists>t'. t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'"
- then obtain t' where t': "t \<rightarrow>\<^sub>\<beta>\<^sup>* t'" and NF: "NF t'"
- by iprover
- from t' have "t \<rightarrow>\<^sub>s t'" by (rule rtrancl_beta_sred)
- then have "t \<rightarrow>\<^sub>l t'" using NF by (rule lemma7)
- then show "WN t" by (rule lemma5)
-qed
-
-end
--- a/src/HOL/Lambda/StrongNorm.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,286 +0,0 @@
-(* Title: HOL/Lambda/StrongNorm.thy
- Author: Stefan Berghofer
- Copyright 2000 TU Muenchen
-*)
-
-header {* Strong normalization for simply-typed lambda calculus *}
-
-theory StrongNorm imports Type InductTermi begin
-
-text {*
-Formalization by Stefan Berghofer. Partly based on a paper proof by
-Felix Joachimski and Ralph Matthes \cite{Matthes-Joachimski-AML}.
-*}
-
-
-subsection {* Properties of @{text IT} *}
-
-lemma lift_IT [intro!]: "IT t \<Longrightarrow> IT (lift t i)"
- apply (induct arbitrary: i set: IT)
- apply (simp (no_asm))
- apply (rule conjI)
- apply
- (rule impI,
- rule IT.Var,
- erule listsp.induct,
- simp (no_asm),
- rule listsp.Nil,
- simp (no_asm),
- rule listsp.Cons,
- blast,
- assumption)+
- apply auto
- done
-
-lemma lifts_IT: "listsp IT ts \<Longrightarrow> listsp IT (map (\<lambda>t. lift t 0) ts)"
- by (induct ts) auto
-
-lemma subst_Var_IT: "IT r \<Longrightarrow> IT (r[Var i/j])"
- apply (induct arbitrary: i j set: IT)
- txt {* Case @{term Var}: *}
- apply (simp (no_asm) add: subst_Var)
- apply
- ((rule conjI impI)+,
- rule IT.Var,
- erule listsp.induct,
- simp (no_asm),
- rule listsp.Nil,
- simp (no_asm),
- rule listsp.Cons,
- fast,
- assumption)+
- txt {* Case @{term Lambda}: *}
- apply atomize
- apply simp
- apply (rule IT.Lambda)
- apply fast
- txt {* Case @{term Beta}: *}
- apply atomize
- apply (simp (no_asm_use) add: subst_subst [symmetric])
- apply (rule IT.Beta)
- apply auto
- done
-
-lemma Var_IT: "IT (Var n)"
- apply (subgoal_tac "IT (Var n \<degree>\<degree> [])")
- apply simp
- apply (rule IT.Var)
- apply (rule listsp.Nil)
- done
-
-lemma app_Var_IT: "IT t \<Longrightarrow> IT (t \<degree> Var i)"
- apply (induct set: IT)
- apply (subst app_last)
- apply (rule IT.Var)
- apply simp
- apply (rule listsp.Cons)
- apply (rule Var_IT)
- apply (rule listsp.Nil)
- apply (rule IT.Beta [where ?ss = "[]", unfolded foldl_Nil [THEN eq_reflection]])
- apply (erule subst_Var_IT)
- apply (rule Var_IT)
- apply (subst app_last)
- apply (rule IT.Beta)
- apply (subst app_last [symmetric])
- apply assumption
- apply assumption
- done
-
-
-subsection {* Well-typed substitution preserves termination *}
-
-lemma subst_type_IT:
- "\<And>t e T u i. IT t \<Longrightarrow> e\<langle>i:U\<rangle> \<turnstile> t : T \<Longrightarrow>
- IT u \<Longrightarrow> e \<turnstile> u : U \<Longrightarrow> IT (t[u/i])"
- (is "PROP ?P U" is "\<And>t e T u i. _ \<Longrightarrow> PROP ?Q t e T u i U")
-proof (induct U)
- fix T t
- assume MI1: "\<And>T1 T2. T = T1 \<Rightarrow> T2 \<Longrightarrow> PROP ?P T1"
- assume MI2: "\<And>T1 T2. T = T1 \<Rightarrow> T2 \<Longrightarrow> PROP ?P T2"
- assume "IT t"
- thus "\<And>e T' u i. PROP ?Q t e T' u i T"
- proof induct
- fix e T' u i
- assume uIT: "IT u"
- assume uT: "e \<turnstile> u : T"
- {
- case (Var rs n e_ T'_ u_ i_)
- assume nT: "e\<langle>i:T\<rangle> \<turnstile> Var n \<degree>\<degree> rs : T'"
- let ?ty = "\<lambda>t. \<exists>T'. e\<langle>i:T\<rangle> \<turnstile> t : T'"
- let ?R = "\<lambda>t. \<forall>e T' u i.
- e\<langle>i:T\<rangle> \<turnstile> t : T' \<longrightarrow> IT u \<longrightarrow> e \<turnstile> u : T \<longrightarrow> IT (t[u/i])"
- show "IT ((Var n \<degree>\<degree> rs)[u/i])"
- proof (cases "n = i")
- case True
- show ?thesis
- proof (cases rs)
- case Nil
- with uIT True show ?thesis by simp
- next
- case (Cons a as)
- with nT have "e\<langle>i:T\<rangle> \<turnstile> Var n \<degree> a \<degree>\<degree> as : T'" by simp
- then obtain Ts
- where headT: "e\<langle>i:T\<rangle> \<turnstile> Var n \<degree> a : Ts \<Rrightarrow> T'"
- and argsT: "e\<langle>i:T\<rangle> \<tturnstile> as : Ts"
- by (rule list_app_typeE)
- from headT obtain T''
- where varT: "e\<langle>i:T\<rangle> \<turnstile> Var n : T'' \<Rightarrow> Ts \<Rrightarrow> T'"
- and argT: "e\<langle>i:T\<rangle> \<turnstile> a : T''"
- by cases simp_all
- from varT True have T: "T = T'' \<Rightarrow> Ts \<Rrightarrow> T'"
- by cases auto
- with uT have uT': "e \<turnstile> u : T'' \<Rightarrow> Ts \<Rrightarrow> T'" by simp
- from T have "IT ((Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0)
- (map (\<lambda>t. t[u/i]) as))[(u \<degree> a[u/i])/0])"
- proof (rule MI2)
- from T have "IT ((lift u 0 \<degree> Var 0)[a[u/i]/0])"
- proof (rule MI1)
- have "IT (lift u 0)" by (rule lift_IT [OF uIT])
- thus "IT (lift u 0 \<degree> Var 0)" by (rule app_Var_IT)
- show "e\<langle>0:T''\<rangle> \<turnstile> lift u 0 \<degree> Var 0 : Ts \<Rrightarrow> T'"
- proof (rule typing.App)
- show "e\<langle>0:T''\<rangle> \<turnstile> lift u 0 : T'' \<Rightarrow> Ts \<Rrightarrow> T'"
- by (rule lift_type) (rule uT')
- show "e\<langle>0:T''\<rangle> \<turnstile> Var 0 : T''"
- by (rule typing.Var) simp
- qed
- from Var have "?R a" by cases (simp_all add: Cons)
- with argT uIT uT show "IT (a[u/i])" by simp
- from argT uT show "e \<turnstile> a[u/i] : T''"
- by (rule subst_lemma) simp
- qed
- thus "IT (u \<degree> a[u/i])" by simp
- from Var have "listsp ?R as"
- by cases (simp_all add: Cons)
- moreover from argsT have "listsp ?ty as"
- by (rule lists_typings)
- ultimately have "listsp (\<lambda>t. ?R t \<and> ?ty t) as"
- by simp
- hence "listsp IT (map (\<lambda>t. lift t 0) (map (\<lambda>t. t[u/i]) as))"
- (is "listsp IT (?ls as)")
- proof induct
- case Nil
- show ?case by fastsimp
- next
- case (Cons b bs)
- hence I: "?R b" by simp
- from Cons obtain U where "e\<langle>i:T\<rangle> \<turnstile> b : U" by fast
- with uT uIT I have "IT (b[u/i])" by simp
- hence "IT (lift (b[u/i]) 0)" by (rule lift_IT)
- hence "listsp IT (lift (b[u/i]) 0 # ?ls bs)"
- by (rule listsp.Cons) (rule Cons)
- thus ?case by simp
- qed
- thus "IT (Var 0 \<degree>\<degree> ?ls as)" by (rule IT.Var)
- have "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<turnstile> Var 0 : Ts \<Rrightarrow> T'"
- by (rule typing.Var) simp
- moreover from uT argsT have "e \<tturnstile> map (\<lambda>t. t[u/i]) as : Ts"
- by (rule substs_lemma)
- hence "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<tturnstile> ?ls as : Ts"
- by (rule lift_types)
- ultimately show "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<turnstile> Var 0 \<degree>\<degree> ?ls as : T'"
- by (rule list_app_typeI)
- from argT uT have "e \<turnstile> a[u/i] : T''"
- by (rule subst_lemma) (rule refl)
- with uT' show "e \<turnstile> u \<degree> a[u/i] : Ts \<Rrightarrow> T'"
- by (rule typing.App)
- qed
- with Cons True show ?thesis
- by (simp add: comp_def)
- qed
- next
- case False
- from Var have "listsp ?R rs" by simp
- moreover from nT obtain Ts where "e\<langle>i:T\<rangle> \<tturnstile> rs : Ts"
- by (rule list_app_typeE)
- hence "listsp ?ty rs" by (rule lists_typings)
- ultimately have "listsp (\<lambda>t. ?R t \<and> ?ty t) rs"
- by simp
- hence "listsp IT (map (\<lambda>x. x[u/i]) rs)"
- proof induct
- case Nil
- show ?case by fastsimp
- next
- case (Cons a as)
- hence I: "?R a" by simp
- from Cons obtain U where "e\<langle>i:T\<rangle> \<turnstile> a : U" by fast
- with uT uIT I have "IT (a[u/i])" by simp
- hence "listsp IT (a[u/i] # map (\<lambda>t. t[u/i]) as)"
- by (rule listsp.Cons) (rule Cons)
- thus ?case by simp
- qed
- with False show ?thesis by (auto simp add: subst_Var)
- qed
- next
- case (Lambda r e_ T'_ u_ i_)
- assume "e\<langle>i:T\<rangle> \<turnstile> Abs r : T'"
- and "\<And>e T' u i. PROP ?Q r e T' u i T"
- with uIT uT show "IT (Abs r[u/i])"
- by fastsimp
- next
- case (Beta r a as e_ T'_ u_ i_)
- assume T: "e\<langle>i:T\<rangle> \<turnstile> Abs r \<degree> a \<degree>\<degree> as : T'"
- assume SI1: "\<And>e T' u i. PROP ?Q (r[a/0] \<degree>\<degree> as) e T' u i T"
- assume SI2: "\<And>e T' u i. PROP ?Q a e T' u i T"
- have "IT (Abs (r[lift u 0/Suc i]) \<degree> a[u/i] \<degree>\<degree> map (\<lambda>t. t[u/i]) as)"
- proof (rule IT.Beta)
- have "Abs r \<degree> a \<degree>\<degree> as \<rightarrow>\<^sub>\<beta> r[a/0] \<degree>\<degree> as"
- by (rule apps_preserves_beta) (rule beta.beta)
- with T have "e\<langle>i:T\<rangle> \<turnstile> r[a/0] \<degree>\<degree> as : T'"
- by (rule subject_reduction)
- hence "IT ((r[a/0] \<degree>\<degree> as)[u/i])"
- using uIT uT by (rule SI1)
- thus "IT (r[lift u 0/Suc i][a[u/i]/0] \<degree>\<degree> map (\<lambda>t. t[u/i]) as)"
- by (simp del: subst_map add: subst_subst subst_map [symmetric])
- from T obtain U where "e\<langle>i:T\<rangle> \<turnstile> Abs r \<degree> a : U"
- by (rule list_app_typeE) fast
- then obtain T'' where "e\<langle>i:T\<rangle> \<turnstile> a : T''" by cases simp_all
- thus "IT (a[u/i])" using uIT uT by (rule SI2)
- qed
- thus "IT ((Abs r \<degree> a \<degree>\<degree> as)[u/i])" by simp
- }
- qed
-qed
-
-
-subsection {* Well-typed terms are strongly normalizing *}
-
-lemma type_implies_IT:
- assumes "e \<turnstile> t : T"
- shows "IT t"
- using assms
-proof induct
- case Var
- show ?case by (rule Var_IT)
-next
- case Abs
- show ?case by (rule IT.Lambda) (rule Abs)
-next
- case (App e s T U t)
- have "IT ((Var 0 \<degree> lift t 0)[s/0])"
- proof (rule subst_type_IT)
- have "IT (lift t 0)" using `IT t` by (rule lift_IT)
- hence "listsp IT [lift t 0]" by (rule listsp.Cons) (rule listsp.Nil)
- hence "IT (Var 0 \<degree>\<degree> [lift t 0])" by (rule IT.Var)
- also have "Var 0 \<degree>\<degree> [lift t 0] = Var 0 \<degree> lift t 0" by simp
- finally show "IT \<dots>" .
- have "e\<langle>0:T \<Rightarrow> U\<rangle> \<turnstile> Var 0 : T \<Rightarrow> U"
- by (rule typing.Var) simp
- moreover have "e\<langle>0:T \<Rightarrow> U\<rangle> \<turnstile> lift t 0 : T"
- by (rule lift_type) (rule App.hyps)
- ultimately show "e\<langle>0:T \<Rightarrow> U\<rangle> \<turnstile> Var 0 \<degree> lift t 0 : U"
- by (rule typing.App)
- show "IT s" by fact
- show "e \<turnstile> s : T \<Rightarrow> U" by fact
- qed
- thus ?case by simp
-qed
-
-theorem type_implies_termi: "e \<turnstile> t : T \<Longrightarrow> termip beta t"
-proof -
- assume "e \<turnstile> t : T"
- hence "IT t" by (rule type_implies_IT)
- thus ?thesis by (rule IT_implies_termi)
-qed
-
-end
--- a/src/HOL/Lambda/Type.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,365 +0,0 @@
-(* Title: HOL/Lambda/Type.thy
- Author: Stefan Berghofer
- Copyright 2000 TU Muenchen
-*)
-
-header {* Simply-typed lambda terms *}
-
-theory Type imports ListApplication begin
-
-
-subsection {* Environments *}
-
-definition
- shift :: "(nat \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> nat \<Rightarrow> 'a" ("_<_:_>" [90, 0, 0] 91) where
- "e<i:a> = (\<lambda>j. if j < i then e j else if j = i then a else e (j - 1))"
-
-notation (xsymbols)
- shift ("_\<langle>_:_\<rangle>" [90, 0, 0] 91)
-
-notation (HTML output)
- shift ("_\<langle>_:_\<rangle>" [90, 0, 0] 91)
-
-lemma shift_eq [simp]: "i = j \<Longrightarrow> (e\<langle>i:T\<rangle>) j = T"
- by (simp add: shift_def)
-
-lemma shift_gt [simp]: "j < i \<Longrightarrow> (e\<langle>i:T\<rangle>) j = e j"
- by (simp add: shift_def)
-
-lemma shift_lt [simp]: "i < j \<Longrightarrow> (e\<langle>i:T\<rangle>) j = e (j - 1)"
- by (simp add: shift_def)
-
-lemma shift_commute [simp]: "e\<langle>i:U\<rangle>\<langle>0:T\<rangle> = e\<langle>0:T\<rangle>\<langle>Suc i:U\<rangle>"
- apply (rule ext)
- apply (case_tac x)
- apply simp
- apply (case_tac nat)
- apply (simp_all add: shift_def)
- done
-
-
-subsection {* Types and typing rules *}
-
-datatype type =
- Atom nat
- | Fun type type (infixr "\<Rightarrow>" 200)
-
-inductive typing :: "(nat \<Rightarrow> type) \<Rightarrow> dB \<Rightarrow> type \<Rightarrow> bool" ("_ \<turnstile> _ : _" [50, 50, 50] 50)
- where
- Var [intro!]: "env x = T \<Longrightarrow> env \<turnstile> Var x : T"
- | Abs [intro!]: "env\<langle>0:T\<rangle> \<turnstile> t : U \<Longrightarrow> env \<turnstile> Abs t : (T \<Rightarrow> U)"
- | App [intro!]: "env \<turnstile> s : T \<Rightarrow> U \<Longrightarrow> env \<turnstile> t : T \<Longrightarrow> env \<turnstile> (s \<degree> t) : U"
-
-inductive_cases typing_elims [elim!]:
- "e \<turnstile> Var i : T"
- "e \<turnstile> t \<degree> u : T"
- "e \<turnstile> Abs t : T"
-
-primrec
- typings :: "(nat \<Rightarrow> type) \<Rightarrow> dB list \<Rightarrow> type list \<Rightarrow> bool"
-where
- "typings e [] Ts = (Ts = [])"
- | "typings e (t # ts) Ts =
- (case Ts of
- [] \<Rightarrow> False
- | T # Ts \<Rightarrow> e \<turnstile> t : T \<and> typings e ts Ts)"
-
-abbreviation
- typings_rel :: "(nat \<Rightarrow> type) \<Rightarrow> dB list \<Rightarrow> type list \<Rightarrow> bool"
- ("_ ||- _ : _" [50, 50, 50] 50) where
- "env ||- ts : Ts == typings env ts Ts"
-
-notation (latex)
- typings_rel ("_ \<tturnstile> _ : _" [50, 50, 50] 50)
-
-abbreviation
- funs :: "type list \<Rightarrow> type \<Rightarrow> type" (infixr "=>>" 200) where
- "Ts =>> T == foldr Fun Ts T"
-
-notation (latex)
- funs (infixr "\<Rrightarrow>" 200)
-
-
-subsection {* Some examples *}
-
-schematic_lemma "e \<turnstile> Abs (Abs (Abs (Var 1 \<degree> (Var 2 \<degree> Var 1 \<degree> Var 0)))) : ?T"
- by force
-
-schematic_lemma "e \<turnstile> Abs (Abs (Abs (Var 2 \<degree> Var 0 \<degree> (Var 1 \<degree> Var 0)))) : ?T"
- by force
-
-
-subsection {* Lists of types *}
-
-lemma lists_typings:
- "e \<tturnstile> ts : Ts \<Longrightarrow> listsp (\<lambda>t. \<exists>T. e \<turnstile> t : T) ts"
- apply (induct ts arbitrary: Ts)
- apply (case_tac Ts)
- apply simp
- apply (rule listsp.Nil)
- apply simp
- apply (case_tac Ts)
- apply simp
- apply simp
- apply (rule listsp.Cons)
- apply blast
- apply blast
- done
-
-lemma types_snoc: "e \<tturnstile> ts : Ts \<Longrightarrow> e \<turnstile> t : T \<Longrightarrow> e \<tturnstile> ts @ [t] : Ts @ [T]"
- apply (induct ts arbitrary: Ts)
- apply simp
- apply (case_tac Ts)
- apply simp+
- done
-
-lemma types_snoc_eq: "e \<tturnstile> ts @ [t] : Ts @ [T] =
- (e \<tturnstile> ts : Ts \<and> e \<turnstile> t : T)"
- apply (induct ts arbitrary: Ts)
- apply (case_tac Ts)
- apply simp+
- apply (case_tac Ts)
- apply (case_tac "ts @ [t]")
- apply simp+
- done
-
-lemma rev_exhaust2 [extraction_expand]:
- obtains (Nil) "xs = []" | (snoc) ys y where "xs = ys @ [y]"
- -- {* Cannot use @{text rev_exhaust} from the @{text List}
- theory, since it is not constructive *}
- apply (subgoal_tac "\<forall>ys. xs = rev ys \<longrightarrow> thesis")
- apply (erule_tac x="rev xs" in allE)
- apply simp
- apply (rule allI)
- apply (rule impI)
- apply (case_tac ys)
- apply simp
- apply simp
- apply atomize
- apply (erule allE)+
- apply (erule mp, rule conjI)
- apply (rule refl)+
- done
-
-lemma types_snocE: "e \<tturnstile> ts @ [t] : Ts \<Longrightarrow>
- (\<And>Us U. Ts = Us @ [U] \<Longrightarrow> e \<tturnstile> ts : Us \<Longrightarrow> e \<turnstile> t : U \<Longrightarrow> P) \<Longrightarrow> P"
- apply (cases Ts rule: rev_exhaust2)
- apply simp
- apply (case_tac "ts @ [t]")
- apply (simp add: types_snoc_eq)+
- apply iprover
- done
-
-
-subsection {* n-ary function types *}
-
-lemma list_app_typeD:
- "e \<turnstile> t \<degree>\<degree> ts : T \<Longrightarrow> \<exists>Ts. e \<turnstile> t : Ts \<Rrightarrow> T \<and> e \<tturnstile> ts : Ts"
- apply (induct ts arbitrary: t T)
- apply simp
- apply atomize
- apply simp
- apply (erule_tac x = "t \<degree> a" in allE)
- apply (erule_tac x = T in allE)
- apply (erule impE)
- apply assumption
- apply (elim exE conjE)
- apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
- apply (rule_tac x = "Ta # Ts" in exI)
- apply simp
- done
-
-lemma list_app_typeE:
- "e \<turnstile> t \<degree>\<degree> ts : T \<Longrightarrow> (\<And>Ts. e \<turnstile> t : Ts \<Rrightarrow> T \<Longrightarrow> e \<tturnstile> ts : Ts \<Longrightarrow> C) \<Longrightarrow> C"
- by (insert list_app_typeD) fast
-
-lemma list_app_typeI:
- "e \<turnstile> t : Ts \<Rrightarrow> T \<Longrightarrow> e \<tturnstile> ts : Ts \<Longrightarrow> e \<turnstile> t \<degree>\<degree> ts : T"
- apply (induct ts arbitrary: t T Ts)
- apply simp
- apply atomize
- apply (case_tac Ts)
- apply simp
- apply simp
- apply (erule_tac x = "t \<degree> a" in allE)
- apply (erule_tac x = T in allE)
- apply (erule_tac x = list in allE)
- apply (erule impE)
- apply (erule conjE)
- apply (erule typing.App)
- apply assumption
- apply blast
- done
-
-text {*
-For the specific case where the head of the term is a variable,
-the following theorems allow to infer the types of the arguments
-without analyzing the typing derivation. This is crucial
-for program extraction.
-*}
-
-theorem var_app_type_eq:
- "e \<turnstile> Var i \<degree>\<degree> ts : T \<Longrightarrow> e \<turnstile> Var i \<degree>\<degree> ts : U \<Longrightarrow> T = U"
- apply (induct ts arbitrary: T U rule: rev_induct)
- apply simp
- apply (ind_cases "e \<turnstile> Var i : T" for T)
- apply (ind_cases "e \<turnstile> Var i : T" for T)
- apply simp
- apply simp
- apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
- apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
- apply atomize
- apply (erule_tac x="Ta \<Rightarrow> T" in allE)
- apply (erule_tac x="Tb \<Rightarrow> U" in allE)
- apply (erule impE)
- apply assumption
- apply (erule impE)
- apply assumption
- apply simp
- done
-
-lemma var_app_types: "e \<turnstile> Var i \<degree>\<degree> ts \<degree>\<degree> us : T \<Longrightarrow> e \<tturnstile> ts : Ts \<Longrightarrow>
- e \<turnstile> Var i \<degree>\<degree> ts : U \<Longrightarrow> \<exists>Us. U = Us \<Rrightarrow> T \<and> e \<tturnstile> us : Us"
- apply (induct us arbitrary: ts Ts U)
- apply simp
- apply (erule var_app_type_eq)
- apply assumption
- apply simp
- apply atomize
- apply (case_tac U)
- apply (rule FalseE)
- apply simp
- apply (erule list_app_typeE)
- apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
- apply (drule_tac T="Atom nat" and U="Ta \<Rightarrow> Tsa \<Rrightarrow> T" in var_app_type_eq)
- apply assumption
- apply simp
- apply (erule_tac x="ts @ [a]" in allE)
- apply (erule_tac x="Ts @ [type1]" in allE)
- apply (erule_tac x="type2" in allE)
- apply simp
- apply (erule impE)
- apply (rule types_snoc)
- apply assumption
- apply (erule list_app_typeE)
- apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
- apply (drule_tac T="type1 \<Rightarrow> type2" and U="Ta \<Rightarrow> Tsa \<Rrightarrow> T" in var_app_type_eq)
- apply assumption
- apply simp
- apply (erule impE)
- apply (rule typing.App)
- apply assumption
- apply (erule list_app_typeE)
- apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
- apply (frule_tac T="type1 \<Rightarrow> type2" and U="Ta \<Rightarrow> Tsa \<Rrightarrow> T" in var_app_type_eq)
- apply assumption
- apply simp
- apply (erule exE)
- apply (rule_tac x="type1 # Us" in exI)
- apply simp
- apply (erule list_app_typeE)
- apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
- apply (frule_tac T="type1 \<Rightarrow> Us \<Rrightarrow> T" and U="Ta \<Rightarrow> Tsa \<Rrightarrow> T" in var_app_type_eq)
- apply assumption
- apply simp
- done
-
-lemma var_app_typesE: "e \<turnstile> Var i \<degree>\<degree> ts : T \<Longrightarrow>
- (\<And>Ts. e \<turnstile> Var i : Ts \<Rrightarrow> T \<Longrightarrow> e \<tturnstile> ts : Ts \<Longrightarrow> P) \<Longrightarrow> P"
- apply (drule var_app_types [of _ _ "[]", simplified])
- apply (iprover intro: typing.Var)+
- done
-
-lemma abs_typeE: "e \<turnstile> Abs t : T \<Longrightarrow> (\<And>U V. e\<langle>0:U\<rangle> \<turnstile> t : V \<Longrightarrow> P) \<Longrightarrow> P"
- apply (cases T)
- apply (rule FalseE)
- apply (erule typing.cases)
- apply simp_all
- apply atomize
- apply (erule_tac x="type1" in allE)
- apply (erule_tac x="type2" in allE)
- apply (erule mp)
- apply (erule typing.cases)
- apply simp_all
- done
-
-
-subsection {* Lifting preserves well-typedness *}
-
-lemma lift_type [intro!]: "e \<turnstile> t : T \<Longrightarrow> e\<langle>i:U\<rangle> \<turnstile> lift t i : T"
- by (induct arbitrary: i U set: typing) auto
-
-lemma lift_types:
- "e \<tturnstile> ts : Ts \<Longrightarrow> e\<langle>i:U\<rangle> \<tturnstile> (map (\<lambda>t. lift t i) ts) : Ts"
- apply (induct ts arbitrary: Ts)
- apply simp
- apply (case_tac Ts)
- apply auto
- done
-
-
-subsection {* Substitution lemmas *}
-
-lemma subst_lemma:
- "e \<turnstile> t : T \<Longrightarrow> e' \<turnstile> u : U \<Longrightarrow> e = e'\<langle>i:U\<rangle> \<Longrightarrow> e' \<turnstile> t[u/i] : T"
- apply (induct arbitrary: e' i U u set: typing)
- apply (rule_tac x = x and y = i in linorder_cases)
- apply auto
- apply blast
- done
-
-lemma substs_lemma:
- "e \<turnstile> u : T \<Longrightarrow> e\<langle>i:T\<rangle> \<tturnstile> ts : Ts \<Longrightarrow>
- e \<tturnstile> (map (\<lambda>t. t[u/i]) ts) : Ts"
- apply (induct ts arbitrary: Ts)
- apply (case_tac Ts)
- apply simp
- apply simp
- apply atomize
- apply (case_tac Ts)
- apply simp
- apply simp
- apply (erule conjE)
- apply (erule (1) subst_lemma)
- apply (rule refl)
- done
-
-
-subsection {* Subject reduction *}
-
-lemma subject_reduction: "e \<turnstile> t : T \<Longrightarrow> t \<rightarrow>\<^sub>\<beta> t' \<Longrightarrow> e \<turnstile> t' : T"
- apply (induct arbitrary: t' set: typing)
- apply blast
- apply blast
- apply atomize
- apply (ind_cases "s \<degree> t \<rightarrow>\<^sub>\<beta> t'" for s t t')
- apply hypsubst
- apply (ind_cases "env \<turnstile> Abs t : T \<Rightarrow> U" for env t T U)
- apply (rule subst_lemma)
- apply assumption
- apply assumption
- apply (rule ext)
- apply (case_tac x)
- apply auto
- done
-
-theorem subject_reduction': "t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<Longrightarrow> e \<turnstile> t : T \<Longrightarrow> e \<turnstile> t' : T"
- by (induct set: rtranclp) (iprover intro: subject_reduction)+
-
-
-subsection {* Alternative induction rule for types *}
-
-lemma type_induct [induct type]:
- assumes
- "(\<And>T. (\<And>T1 T2. T = T1 \<Rightarrow> T2 \<Longrightarrow> P T1) \<Longrightarrow>
- (\<And>T1 T2. T = T1 \<Rightarrow> T2 \<Longrightarrow> P T2) \<Longrightarrow> P T)"
- shows "P T"
-proof (induct T)
- case Atom
- show ?case by (rule assms) simp_all
-next
- case Fun
- show ?case by (rule assms) (insert Fun, simp_all)
-qed
-
-end
--- a/src/HOL/Lambda/WeakNorm.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,515 +0,0 @@
-(* Title: HOL/Lambda/WeakNorm.thy
- Author: Stefan Berghofer
- Copyright 2003 TU Muenchen
-*)
-
-header {* Weak normalization for simply-typed lambda calculus *}
-
-theory WeakNorm
-imports Type NormalForm Code_Integer
-begin
-
-text {*
-Formalization by Stefan Berghofer. Partly based on a paper proof by
-Felix Joachimski and Ralph Matthes \cite{Matthes-Joachimski-AML}.
-*}
-
-
-subsection {* Main theorems *}
-
-lemma norm_list:
- assumes f_compat: "\<And>t t'. t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<Longrightarrow> f t \<rightarrow>\<^sub>\<beta>\<^sup>* f t'"
- and f_NF: "\<And>t. NF t \<Longrightarrow> NF (f t)"
- and uNF: "NF u" and uT: "e \<turnstile> u : T"
- shows "\<And>Us. e\<langle>i:T\<rangle> \<tturnstile> as : Us \<Longrightarrow>
- listall (\<lambda>t. \<forall>e T' u i. e\<langle>i:T\<rangle> \<turnstile> t : T' \<longrightarrow>
- NF u \<longrightarrow> e \<turnstile> u : T \<longrightarrow> (\<exists>t'. t[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t')) as \<Longrightarrow>
- \<exists>as'. \<forall>j. Var j \<degree>\<degree> map (\<lambda>t. f (t[u/i])) as \<rightarrow>\<^sub>\<beta>\<^sup>*
- Var j \<degree>\<degree> map f as' \<and> NF (Var j \<degree>\<degree> map f as')"
- (is "\<And>Us. _ \<Longrightarrow> listall ?R as \<Longrightarrow> \<exists>as'. ?ex Us as as'")
-proof (induct as rule: rev_induct)
- case (Nil Us)
- with Var_NF have "?ex Us [] []" by simp
- thus ?case ..
-next
- case (snoc b bs Us)
- have "e\<langle>i:T\<rangle> \<tturnstile> bs @ [b] : Us" by fact
- then obtain Vs W where Us: "Us = Vs @ [W]"
- and bs: "e\<langle>i:T\<rangle> \<tturnstile> bs : Vs" and bT: "e\<langle>i:T\<rangle> \<turnstile> b : W"
- by (rule types_snocE)
- from snoc have "listall ?R bs" by simp
- with bs have "\<exists>bs'. ?ex Vs bs bs'" by (rule snoc)
- then obtain bs' where
- bsred: "\<And>j. Var j \<degree>\<degree> map (\<lambda>t. f (t[u/i])) bs \<rightarrow>\<^sub>\<beta>\<^sup>* Var j \<degree>\<degree> map f bs'"
- and bsNF: "\<And>j. NF (Var j \<degree>\<degree> map f bs')" by iprover
- from snoc have "?R b" by simp
- with bT and uNF and uT have "\<exists>b'. b[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* b' \<and> NF b'"
- by iprover
- then obtain b' where bred: "b[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* b'" and bNF: "NF b'"
- by iprover
- from bsNF [of 0] have "listall NF (map f bs')"
- by (rule App_NF_D)
- moreover have "NF (f b')" using bNF by (rule f_NF)
- ultimately have "listall NF (map f (bs' @ [b']))"
- by simp
- hence "\<And>j. NF (Var j \<degree>\<degree> map f (bs' @ [b']))" by (rule NF.App)
- moreover from bred have "f (b[u/i]) \<rightarrow>\<^sub>\<beta>\<^sup>* f b'"
- by (rule f_compat)
- with bsred have
- "\<And>j. (Var j \<degree>\<degree> map (\<lambda>t. f (t[u/i])) bs) \<degree> f (b[u/i]) \<rightarrow>\<^sub>\<beta>\<^sup>*
- (Var j \<degree>\<degree> map f bs') \<degree> f b'" by (rule rtrancl_beta_App)
- ultimately have "?ex Us (bs @ [b]) (bs' @ [b'])" by simp
- thus ?case ..
-qed
-
-lemma subst_type_NF:
- "\<And>t e T u i. NF t \<Longrightarrow> e\<langle>i:U\<rangle> \<turnstile> t : T \<Longrightarrow> NF u \<Longrightarrow> e \<turnstile> u : U \<Longrightarrow> \<exists>t'. t[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'"
- (is "PROP ?P U" is "\<And>t e T u i. _ \<Longrightarrow> PROP ?Q t e T u i U")
-proof (induct U)
- fix T t
- let ?R = "\<lambda>t. \<forall>e T' u i.
- e\<langle>i:T\<rangle> \<turnstile> t : T' \<longrightarrow> NF u \<longrightarrow> e \<turnstile> u : T \<longrightarrow> (\<exists>t'. t[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t')"
- assume MI1: "\<And>T1 T2. T = T1 \<Rightarrow> T2 \<Longrightarrow> PROP ?P T1"
- assume MI2: "\<And>T1 T2. T = T1 \<Rightarrow> T2 \<Longrightarrow> PROP ?P T2"
- assume "NF t"
- thus "\<And>e T' u i. PROP ?Q t e T' u i T"
- proof induct
- fix e T' u i assume uNF: "NF u" and uT: "e \<turnstile> u : T"
- {
- case (App ts x e_ T'_ u_ i_)
- assume "e\<langle>i:T\<rangle> \<turnstile> Var x \<degree>\<degree> ts : T'"
- then obtain Us
- where varT: "e\<langle>i:T\<rangle> \<turnstile> Var x : Us \<Rrightarrow> T'"
- and argsT: "e\<langle>i:T\<rangle> \<tturnstile> ts : Us"
- by (rule var_app_typesE)
- from nat_eq_dec show "\<exists>t'. (Var x \<degree>\<degree> ts)[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'"
- proof
- assume eq: "x = i"
- show ?thesis
- proof (cases ts)
- case Nil
- with eq have "(Var x \<degree>\<degree> [])[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* u" by simp
- with Nil and uNF show ?thesis by simp iprover
- next
- case (Cons a as)
- with argsT obtain T'' Ts where Us: "Us = T'' # Ts"
- by (cases Us) (rule FalseE, simp+, erule that)
- from varT and Us have varT: "e\<langle>i:T\<rangle> \<turnstile> Var x : T'' \<Rightarrow> Ts \<Rrightarrow> T'"
- by simp
- from varT eq have T: "T = T'' \<Rightarrow> Ts \<Rrightarrow> T'" by cases auto
- with uT have uT': "e \<turnstile> u : T'' \<Rightarrow> Ts \<Rrightarrow> T'" by simp
- from argsT Us Cons have argsT': "e\<langle>i:T\<rangle> \<tturnstile> as : Ts" by simp
- from argsT Us Cons have argT: "e\<langle>i:T\<rangle> \<turnstile> a : T''" by simp
- from argT uT refl have aT: "e \<turnstile> a[u/i] : T''" by (rule subst_lemma)
- from App and Cons have "listall ?R as" by simp (iprover dest: listall_conj2)
- with lift_preserves_beta' lift_NF uNF uT argsT'
- have "\<exists>as'. \<forall>j. Var j \<degree>\<degree> map (\<lambda>t. lift (t[u/i]) 0) as \<rightarrow>\<^sub>\<beta>\<^sup>*
- Var j \<degree>\<degree> map (\<lambda>t. lift t 0) as' \<and>
- NF (Var j \<degree>\<degree> map (\<lambda>t. lift t 0) as')" by (rule norm_list)
- then obtain as' where
- asred: "Var 0 \<degree>\<degree> map (\<lambda>t. lift (t[u/i]) 0) as \<rightarrow>\<^sub>\<beta>\<^sup>*
- Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as'"
- and asNF: "NF (Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as')" by iprover
- from App and Cons have "?R a" by simp
- with argT and uNF and uT have "\<exists>a'. a[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* a' \<and> NF a'"
- by iprover
- then obtain a' where ared: "a[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* a'" and aNF: "NF a'" by iprover
- from uNF have "NF (lift u 0)" by (rule lift_NF)
- hence "\<exists>u'. lift u 0 \<degree> Var 0 \<rightarrow>\<^sub>\<beta>\<^sup>* u' \<and> NF u'" by (rule app_Var_NF)
- then obtain u' where ured: "lift u 0 \<degree> Var 0 \<rightarrow>\<^sub>\<beta>\<^sup>* u'" and u'NF: "NF u'"
- by iprover
- from T and u'NF have "\<exists>ua. u'[a'/0] \<rightarrow>\<^sub>\<beta>\<^sup>* ua \<and> NF ua"
- proof (rule MI1)
- have "e\<langle>0:T''\<rangle> \<turnstile> lift u 0 \<degree> Var 0 : Ts \<Rrightarrow> T'"
- proof (rule typing.App)
- from uT' show "e\<langle>0:T''\<rangle> \<turnstile> lift u 0 : T'' \<Rightarrow> Ts \<Rrightarrow> T'" by (rule lift_type)
- show "e\<langle>0:T''\<rangle> \<turnstile> Var 0 : T''" by (rule typing.Var) simp
- qed
- with ured show "e\<langle>0:T''\<rangle> \<turnstile> u' : Ts \<Rrightarrow> T'" by (rule subject_reduction')
- from ared aT show "e \<turnstile> a' : T''" by (rule subject_reduction')
- show "NF a'" by fact
- qed
- then obtain ua where uared: "u'[a'/0] \<rightarrow>\<^sub>\<beta>\<^sup>* ua" and uaNF: "NF ua"
- by iprover
- from ared have "(lift u 0 \<degree> Var 0)[a[u/i]/0] \<rightarrow>\<^sub>\<beta>\<^sup>* (lift u 0 \<degree> Var 0)[a'/0]"
- by (rule subst_preserves_beta2')
- also from ured have "(lift u 0 \<degree> Var 0)[a'/0] \<rightarrow>\<^sub>\<beta>\<^sup>* u'[a'/0]"
- by (rule subst_preserves_beta')
- also note uared
- finally have "(lift u 0 \<degree> Var 0)[a[u/i]/0] \<rightarrow>\<^sub>\<beta>\<^sup>* ua" .
- hence uared': "u \<degree> a[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* ua" by simp
- from T asNF _ uaNF have "\<exists>r. (Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as')[ua/0] \<rightarrow>\<^sub>\<beta>\<^sup>* r \<and> NF r"
- proof (rule MI2)
- have "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<turnstile> Var 0 \<degree>\<degree> map (\<lambda>t. lift (t[u/i]) 0) as : T'"
- proof (rule list_app_typeI)
- show "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<turnstile> Var 0 : Ts \<Rrightarrow> T'" by (rule typing.Var) simp
- from uT argsT' have "e \<tturnstile> map (\<lambda>t. t[u/i]) as : Ts"
- by (rule substs_lemma)
- hence "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<tturnstile> map (\<lambda>t. lift t 0) (map (\<lambda>t. t[u/i]) as) : Ts"
- by (rule lift_types)
- thus "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<tturnstile> map (\<lambda>t. lift (t[u/i]) 0) as : Ts"
- by (simp_all add: o_def)
- qed
- with asred show "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<turnstile> Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as' : T'"
- by (rule subject_reduction')
- from argT uT refl have "e \<turnstile> a[u/i] : T''" by (rule subst_lemma)
- with uT' have "e \<turnstile> u \<degree> a[u/i] : Ts \<Rrightarrow> T'" by (rule typing.App)
- with uared' show "e \<turnstile> ua : Ts \<Rrightarrow> T'" by (rule subject_reduction')
- qed
- then obtain r where rred: "(Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as')[ua/0] \<rightarrow>\<^sub>\<beta>\<^sup>* r"
- and rnf: "NF r" by iprover
- from asred have
- "(Var 0 \<degree>\<degree> map (\<lambda>t. lift (t[u/i]) 0) as)[u \<degree> a[u/i]/0] \<rightarrow>\<^sub>\<beta>\<^sup>*
- (Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as')[u \<degree> a[u/i]/0]"
- by (rule subst_preserves_beta')
- also from uared' have "(Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as')[u \<degree> a[u/i]/0] \<rightarrow>\<^sub>\<beta>\<^sup>*
- (Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as')[ua/0]" by (rule subst_preserves_beta2')
- also note rred
- finally have "(Var 0 \<degree>\<degree> map (\<lambda>t. lift (t[u/i]) 0) as)[u \<degree> a[u/i]/0] \<rightarrow>\<^sub>\<beta>\<^sup>* r" .
- with rnf Cons eq show ?thesis
- by (simp add: o_def) iprover
- qed
- next
- assume neq: "x \<noteq> i"
- from App have "listall ?R ts" by (iprover dest: listall_conj2)
- with TrueI TrueI uNF uT argsT
- have "\<exists>ts'. \<forall>j. Var j \<degree>\<degree> map (\<lambda>t. t[u/i]) ts \<rightarrow>\<^sub>\<beta>\<^sup>* Var j \<degree>\<degree> ts' \<and>
- NF (Var j \<degree>\<degree> ts')" (is "\<exists>ts'. ?ex ts'")
- by (rule norm_list [of "\<lambda>t. t", simplified])
- then obtain ts' where NF: "?ex ts'" ..
- from nat_le_dec show ?thesis
- proof
- assume "i < x"
- with NF show ?thesis by simp iprover
- next
- assume "\<not> (i < x)"
- with NF neq show ?thesis by (simp add: subst_Var) iprover
- qed
- qed
- next
- case (Abs r e_ T'_ u_ i_)
- assume absT: "e\<langle>i:T\<rangle> \<turnstile> Abs r : T'"
- then obtain R S where "e\<langle>0:R\<rangle>\<langle>Suc i:T\<rangle> \<turnstile> r : S" by (rule abs_typeE) simp
- moreover have "NF (lift u 0)" using `NF u` by (rule lift_NF)
- moreover have "e\<langle>0:R\<rangle> \<turnstile> lift u 0 : T" using uT by (rule lift_type)
- ultimately have "\<exists>t'. r[lift u 0/Suc i] \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'" by (rule Abs)
- thus "\<exists>t'. Abs r[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'"
- by simp (iprover intro: rtrancl_beta_Abs NF.Abs)
- }
- qed
-qed
-
-
--- {* A computationally relevant copy of @{term "e \<turnstile> t : T"} *}
-inductive rtyping :: "(nat \<Rightarrow> type) \<Rightarrow> dB \<Rightarrow> type \<Rightarrow> bool" ("_ \<turnstile>\<^sub>R _ : _" [50, 50, 50] 50)
- where
- Var: "e x = T \<Longrightarrow> e \<turnstile>\<^sub>R Var x : T"
- | Abs: "e\<langle>0:T\<rangle> \<turnstile>\<^sub>R t : U \<Longrightarrow> e \<turnstile>\<^sub>R Abs t : (T \<Rightarrow> U)"
- | App: "e \<turnstile>\<^sub>R s : T \<Rightarrow> U \<Longrightarrow> e \<turnstile>\<^sub>R t : T \<Longrightarrow> e \<turnstile>\<^sub>R (s \<degree> t) : U"
-
-lemma rtyping_imp_typing: "e \<turnstile>\<^sub>R t : T \<Longrightarrow> e \<turnstile> t : T"
- apply (induct set: rtyping)
- apply (erule typing.Var)
- apply (erule typing.Abs)
- apply (erule typing.App)
- apply assumption
- done
-
-
-theorem type_NF:
- assumes "e \<turnstile>\<^sub>R t : T"
- shows "\<exists>t'. t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'" using assms
-proof induct
- case Var
- show ?case by (iprover intro: Var_NF)
-next
- case Abs
- thus ?case by (iprover intro: rtrancl_beta_Abs NF.Abs)
-next
- case (App e s T U t)
- from App obtain s' t' where
- sred: "s \<rightarrow>\<^sub>\<beta>\<^sup>* s'" and "NF s'"
- and tred: "t \<rightarrow>\<^sub>\<beta>\<^sup>* t'" and tNF: "NF t'" by iprover
- have "\<exists>u. (Var 0 \<degree> lift t' 0)[s'/0] \<rightarrow>\<^sub>\<beta>\<^sup>* u \<and> NF u"
- proof (rule subst_type_NF)
- have "NF (lift t' 0)" using tNF by (rule lift_NF)
- hence "listall NF [lift t' 0]" by (rule listall_cons) (rule listall_nil)
- hence "NF (Var 0 \<degree>\<degree> [lift t' 0])" by (rule NF.App)
- thus "NF (Var 0 \<degree> lift t' 0)" by simp
- show "e\<langle>0:T \<Rightarrow> U\<rangle> \<turnstile> Var 0 \<degree> lift t' 0 : U"
- proof (rule typing.App)
- show "e\<langle>0:T \<Rightarrow> U\<rangle> \<turnstile> Var 0 : T \<Rightarrow> U"
- by (rule typing.Var) simp
- from tred have "e \<turnstile> t' : T"
- by (rule subject_reduction') (rule rtyping_imp_typing, rule App.hyps)
- thus "e\<langle>0:T \<Rightarrow> U\<rangle> \<turnstile> lift t' 0 : T"
- by (rule lift_type)
- qed
- from sred show "e \<turnstile> s' : T \<Rightarrow> U"
- by (rule subject_reduction') (rule rtyping_imp_typing, rule App.hyps)
- show "NF s'" by fact
- qed
- then obtain u where ured: "s' \<degree> t' \<rightarrow>\<^sub>\<beta>\<^sup>* u" and unf: "NF u" by simp iprover
- from sred tred have "s \<degree> t \<rightarrow>\<^sub>\<beta>\<^sup>* s' \<degree> t'" by (rule rtrancl_beta_App)
- hence "s \<degree> t \<rightarrow>\<^sub>\<beta>\<^sup>* u" using ured by (rule rtranclp_trans)
- with unf show ?case by iprover
-qed
-
-
-subsection {* Extracting the program *}
-
-declare NF.induct [ind_realizer]
-declare rtranclp.induct [ind_realizer irrelevant]
-declare rtyping.induct [ind_realizer]
-lemmas [extraction_expand] = conj_assoc listall_cons_eq
-
-extract type_NF
-
-lemma rtranclR_rtrancl_eq: "rtranclpR r a b = r\<^sup>*\<^sup>* a b"
- apply (rule iffI)
- apply (erule rtranclpR.induct)
- apply (rule rtranclp.rtrancl_refl)
- apply (erule rtranclp.rtrancl_into_rtrancl)
- apply assumption
- apply (erule rtranclp.induct)
- apply (rule rtranclpR.rtrancl_refl)
- apply (erule rtranclpR.rtrancl_into_rtrancl)
- apply assumption
- done
-
-lemma NFR_imp_NF: "NFR nf t \<Longrightarrow> NF t"
- apply (erule NFR.induct)
- apply (rule NF.intros)
- apply (simp add: listall_def)
- apply (erule NF.intros)
- done
-
-text_raw {*
-\begin{figure}
-\renewcommand{\isastyle}{\scriptsize\it}%
-@{thm [display,eta_contract=false,margin=100] subst_type_NF_def}
-\renewcommand{\isastyle}{\small\it}%
-\caption{Program extracted from @{text subst_type_NF}}
-\label{fig:extr-subst-type-nf}
-\end{figure}
-
-\begin{figure}
-\renewcommand{\isastyle}{\scriptsize\it}%
-@{thm [display,margin=100] subst_Var_NF_def}
-@{thm [display,margin=100] app_Var_NF_def}
-@{thm [display,margin=100] lift_NF_def}
-@{thm [display,eta_contract=false,margin=100] type_NF_def}
-\renewcommand{\isastyle}{\small\it}%
-\caption{Program extracted from lemmas and main theorem}
-\label{fig:extr-type-nf}
-\end{figure}
-*}
-
-text {*
-The program corresponding to the proof of the central lemma, which
-performs substitution and normalization, is shown in Figure
-\ref{fig:extr-subst-type-nf}. The correctness
-theorem corresponding to the program @{text "subst_type_NF"} is
-@{thm [display,margin=100] subst_type_NF_correctness
- [simplified rtranclR_rtrancl_eq Collect_mem_eq, no_vars]}
-where @{text NFR} is the realizability predicate corresponding to
-the datatype @{text NFT}, which is inductively defined by the rules
-\pagebreak
-@{thm [display,margin=90] NFR.App [of ts nfs x] NFR.Abs [of nf t]}
-
-The programs corresponding to the main theorem @{text "type_NF"}, as
-well as to some lemmas, are shown in Figure \ref{fig:extr-type-nf}.
-The correctness statement for the main function @{text "type_NF"} is
-@{thm [display,margin=100] type_NF_correctness
- [simplified rtranclR_rtrancl_eq Collect_mem_eq, no_vars]}
-where the realizability predicate @{text "rtypingR"} corresponding to the
-computationally relevant version of the typing judgement is inductively
-defined by the rules
-@{thm [display,margin=100] rtypingR.Var [no_vars]
- rtypingR.Abs [of ty, no_vars] rtypingR.App [of ty e s T U ty' t]}
-*}
-
-subsection {* Generating executable code *}
-
-instantiation NFT :: default
-begin
-
-definition "default = Dummy ()"
-
-instance ..
-
-end
-
-instantiation dB :: default
-begin
-
-definition "default = dB.Var 0"
-
-instance ..
-
-end
-
-instantiation prod :: (default, default) default
-begin
-
-definition "default = (default, default)"
-
-instance ..
-
-end
-
-instantiation list :: (type) default
-begin
-
-definition "default = []"
-
-instance ..
-
-end
-
-instantiation "fun" :: (type, default) default
-begin
-
-definition "default = (\<lambda>x. default)"
-
-instance ..
-
-end
-
-definition int_of_nat :: "nat \<Rightarrow> int" where
- "int_of_nat = of_nat"
-
-text {*
- The following functions convert between Isabelle's built-in {\tt term}
- datatype and the generated {\tt dB} datatype. This allows to
- generate example terms using Isabelle's parser and inspect
- normalized terms using Isabelle's pretty printer.
-*}
-
-ML {*
-fun dBtype_of_typ (Type ("fun", [T, U])) =
- @{code Fun} (dBtype_of_typ T, dBtype_of_typ U)
- | dBtype_of_typ (TFree (s, _)) = (case explode s of
- ["'", a] => @{code Atom} (@{code nat} (ord a - 97))
- | _ => error "dBtype_of_typ: variable name")
- | dBtype_of_typ _ = error "dBtype_of_typ: bad type";
-
-fun dB_of_term (Bound i) = @{code dB.Var} (@{code nat} i)
- | dB_of_term (t $ u) = @{code dB.App} (dB_of_term t, dB_of_term u)
- | dB_of_term (Abs (_, _, t)) = @{code dB.Abs} (dB_of_term t)
- | dB_of_term _ = error "dB_of_term: bad term";
-
-fun term_of_dB Ts (Type ("fun", [T, U])) (@{code dB.Abs} dBt) =
- Abs ("x", T, term_of_dB (T :: Ts) U dBt)
- | term_of_dB Ts _ dBt = term_of_dB' Ts dBt
-and term_of_dB' Ts (@{code dB.Var} n) = Bound (@{code int_of_nat} n)
- | term_of_dB' Ts (@{code dB.App} (dBt, dBu)) =
- let val t = term_of_dB' Ts dBt
- in case fastype_of1 (Ts, t) of
- Type ("fun", [T, U]) => t $ term_of_dB Ts T dBu
- | _ => error "term_of_dB: function type expected"
- end
- | term_of_dB' _ _ = error "term_of_dB: term not in normal form";
-
-fun typing_of_term Ts e (Bound i) =
- @{code Var} (e, @{code nat} i, dBtype_of_typ (nth Ts i))
- | typing_of_term Ts e (t $ u) = (case fastype_of1 (Ts, t) of
- Type ("fun", [T, U]) => @{code App} (e, dB_of_term t,
- dBtype_of_typ T, dBtype_of_typ U, dB_of_term u,
- typing_of_term Ts e t, typing_of_term Ts e u)
- | _ => error "typing_of_term: function type expected")
- | typing_of_term Ts e (Abs (s, T, t)) =
- let val dBT = dBtype_of_typ T
- in @{code Abs} (e, dBT, dB_of_term t,
- dBtype_of_typ (fastype_of1 (T :: Ts, t)),
- typing_of_term (T :: Ts) (@{code shift} e @{code "0::nat"} dBT) t)
- end
- | typing_of_term _ _ _ = error "typing_of_term: bad term";
-
-fun dummyf _ = error "dummy";
-
-val ct1 = @{cterm "%f. ((%f x. f (f (f x))) ((%f x. f (f (f (f x)))) f))"};
-val (dB1, _) = @{code type_NF} (typing_of_term [] dummyf (term_of ct1));
-val ct1' = cterm_of @{theory} (term_of_dB [] (#T (rep_cterm ct1)) dB1);
-
-val ct2 = @{cterm "%f x. (%x. f x x) ((%x. f x x) ((%x. f x x) ((%x. f x x) ((%x. f x x) ((%x. f x x) x)))))"};
-val (dB2, _) = @{code type_NF} (typing_of_term [] dummyf (term_of ct2));
-val ct2' = cterm_of @{theory} (term_of_dB [] (#T (rep_cterm ct2)) dB2);
-*}
-
-text {*
-The same story again for the SML code generator.
-*}
-
-consts_code
- "default" ("(error \"default\")")
- "default :: 'a \<Rightarrow> 'b::default" ("(fn '_ => error \"default\")")
-
-code_module Norm
-contains
- test = "type_NF"
-
-ML {*
-fun nat_of_int 0 = Norm.zero
- | nat_of_int n = Norm.Suc (nat_of_int (n-1));
-
-fun int_of_nat Norm.zero = 0
- | int_of_nat (Norm.Suc n) = 1 + int_of_nat n;
-
-fun dBtype_of_typ (Type ("fun", [T, U])) =
- Norm.Fun (dBtype_of_typ T, dBtype_of_typ U)
- | dBtype_of_typ (TFree (s, _)) = (case explode s of
- ["'", a] => Norm.Atom (nat_of_int (ord a - 97))
- | _ => error "dBtype_of_typ: variable name")
- | dBtype_of_typ _ = error "dBtype_of_typ: bad type";
-
-fun dB_of_term (Bound i) = Norm.dB_Var (nat_of_int i)
- | dB_of_term (t $ u) = Norm.App (dB_of_term t, dB_of_term u)
- | dB_of_term (Abs (_, _, t)) = Norm.Abs (dB_of_term t)
- | dB_of_term _ = error "dB_of_term: bad term";
-
-fun term_of_dB Ts (Type ("fun", [T, U])) (Norm.Abs dBt) =
- Abs ("x", T, term_of_dB (T :: Ts) U dBt)
- | term_of_dB Ts _ dBt = term_of_dB' Ts dBt
-and term_of_dB' Ts (Norm.dB_Var n) = Bound (int_of_nat n)
- | term_of_dB' Ts (Norm.App (dBt, dBu)) =
- let val t = term_of_dB' Ts dBt
- in case fastype_of1 (Ts, t) of
- Type ("fun", [T, U]) => t $ term_of_dB Ts T dBu
- | _ => error "term_of_dB: function type expected"
- end
- | term_of_dB' _ _ = error "term_of_dB: term not in normal form";
-
-fun typing_of_term Ts e (Bound i) =
- Norm.Var (e, nat_of_int i, dBtype_of_typ (List.nth (Ts, i)))
- | typing_of_term Ts e (t $ u) = (case fastype_of1 (Ts, t) of
- Type ("fun", [T, U]) => Norm.rtypingT_App (e, dB_of_term t,
- dBtype_of_typ T, dBtype_of_typ U, dB_of_term u,
- typing_of_term Ts e t, typing_of_term Ts e u)
- | _ => error "typing_of_term: function type expected")
- | typing_of_term Ts e (Abs (s, T, t)) =
- let val dBT = dBtype_of_typ T
- in Norm.rtypingT_Abs (e, dBT, dB_of_term t,
- dBtype_of_typ (fastype_of1 (T :: Ts, t)),
- typing_of_term (T :: Ts) (Norm.shift e Norm.zero dBT) t)
- end
- | typing_of_term _ _ _ = error "typing_of_term: bad term";
-
-fun dummyf _ = error "dummy";
-*}
-
-text {*
-We now try out the extracted program @{text "type_NF"} on some example terms.
-*}
-
-ML {*
-val ct1 = @{cterm "%f. ((%f x. f (f (f x))) ((%f x. f (f (f (f x)))) f))"};
-val (dB1, _) = Norm.type_NF (typing_of_term [] dummyf (term_of ct1));
-val ct1' = cterm_of @{theory} (term_of_dB [] (#T (rep_cterm ct1)) dB1);
-
-val ct2 = @{cterm "%f x. (%x. f x x) ((%x. f x x) ((%x. f x x) ((%x. f x x) ((%x. f x x) ((%x. f x x) x)))))"};
-val (dB2, _) = Norm.type_NF (typing_of_term [] dummyf (term_of ct2));
-val ct2' = cterm_of @{theory} (term_of_dB [] (#T (rep_cterm ct2)) dB2);
-*}
-
-end
--- a/src/HOL/Lambda/document/root.bib Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-@TechReport{Loader1998,
- author = {Ralph Loader},
- title = {{N}otes on {S}imply {T}yped {L}ambda {C}alculus},
- institution = {Laboratory for Foundations of Computer Science,
- School of Informatics, University of Edinburgh},
- year = 1998,
- number = {ECS-LFCS-98-381}
-}
-
-@InProceedings{Matthes-ESSLLI2000,
- author = {Ralph Matthes},
- title = {{L}ambda {C}alculus: {A} {C}ase for {I}nductive
- {D}efinitions},
- booktitle = {Lecture notes of the 12th European Summer School in
- Logic, Language and Information (ESSLLI 2000)},
- year = 2000,
- month = {August},
- publisher = {School of Computer Science, University of
- Birmingham}
-}
-
-@Article{Matthes-Joachimski-AML,
- author = {Felix Joachimski and Ralph Matthes},
- title = {Short Proofs of Normalization for the simply-typed
- $\lambda$-calculus, permutative conversions and
- {G}{\"o}del's {T}},
- journal = {Archive for Mathematical Logic},
- year = 2003,
- volume = 42,
- number = 1,
- pages = {59--87}
-}
-
-@Article{Takahashi-IandC,
- author = {Masako Takahashi},
- title = {Parallel reductions in $\lambda$-calculus},
- journal = {Information and Computation},
- year = 1995,
- volume = 118,
- number = 1,
- pages = {120--127},
- month = {April}
-}
--- a/src/HOL/Lambda/document/root.tex Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-\documentclass[11pt,a4paper]{article}
-\usepackage{graphicx}
-\usepackage[english]{babel}
-\usepackage[latin1]{inputenc}
-\usepackage{amssymb}
-\usepackage{isabelle,isabellesym,pdfsetup}
-
-\isabellestyle{it}
-\renewcommand{\isamarkupsubsubsection}[1]{\subsubsection*{#1}}
-\newcommand{\isasymRrightarrow}{\isamath{\Rrightarrow}}
-
-\begin{document}
-
-\title{Fundamental Properties of Lambda-calculus}
-\author{Tobias Nipkow \\ Stefan Berghofer}
-\maketitle
-
-\tableofcontents
-
-\begin{center}
- \includegraphics[scale=0.7]{session_graph}
-\end{center}
-
-\newpage
-
-\parindent 0pt \parskip 0.5ex
-
-\input{session}
-
-\bibliographystyle{abbrv}
-\bibliography{root}
-
-\end{document}
--- a/src/HOL/MicroJava/J/JTypeSafe.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/MicroJava/J/JTypeSafe.thy Mon Sep 06 19:23:54 2010 +0200
@@ -201,7 +201,7 @@
-- "several simplifications, XcptE, XcptEs, XcptS, Skip, Nil??"
apply( simp_all)
apply( tactic "ALLGOALS strip_tac")
-apply( tactic {* ALLGOALS (eresolve_tac [thm "ty_expr.cases", thm "ty_exprs.cases", thm "wt_stmt.cases"]
+apply( tactic {* ALLGOALS (eresolve_tac [@{thm ty_expr.cases}, @{thm ty_exprs.cases}, @{thm wt_stmt.cases}]
THEN_ALL_NEW (full_simp_tac (global_simpset_of @{theory Conform}))) *})
apply(tactic "ALLGOALS (EVERY' [REPEAT o (etac conjE), REPEAT o hyp_subst_tac])")
@@ -241,7 +241,7 @@
apply( fast elim: conforms_localD [THEN lconfD])
-- "for FAss"
-apply( tactic {* EVERY'[eresolve_tac [thm "ty_expr.cases", thm "ty_exprs.cases", thm "wt_stmt.cases"]
+apply( tactic {* EVERY'[eresolve_tac [@{thm ty_expr.cases}, @{thm ty_exprs.cases}, @{thm wt_stmt.cases}]
THEN_ALL_NEW (full_simp_tac @{simpset}), REPEAT o (etac conjE), hyp_subst_tac] 3*})
-- "for if"
@@ -277,7 +277,7 @@
-- "7 LAss"
apply (fold fun_upd_def)
-apply( tactic {* (eresolve_tac [thm "ty_expr.cases", thm "ty_exprs.cases", thm "wt_stmt.cases"]
+apply( tactic {* (eresolve_tac [@{thm ty_expr.cases}, @{thm ty_exprs.cases}, @{thm wt_stmt.cases}]
THEN_ALL_NEW (full_simp_tac @{simpset})) 1 *})
apply (intro strip)
apply (case_tac E)
--- a/src/HOL/Mirabelle/Mirabelle.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Mirabelle/Mirabelle.thy Mon Sep 06 19:23:54 2010 +0200
@@ -7,7 +7,7 @@
uses "Tools/mirabelle.ML"
begin
-(* no multithreading, no parallel proofs *)
+(* no multithreading, no parallel proofs *) (* FIXME *)
ML {* Multithreading.max_threads := 1 *}
ML {* Goal.parallel_proofs := 0 *}
--- a/src/HOL/NSA/NSA.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/NSA/NSA.thy Mon Sep 06 19:23:54 2010 +0200
@@ -663,9 +663,9 @@
***)
(*reorientation simprules using ==, for the following simproc*)
-val meta_zero_approx_reorient = thm "zero_approx_reorient" RS eq_reflection;
-val meta_one_approx_reorient = thm "one_approx_reorient" RS eq_reflection;
-val meta_number_of_approx_reorient = thm "number_of_approx_reorient" RS eq_reflection
+val meta_zero_approx_reorient = @{thm zero_approx_reorient} RS eq_reflection;
+val meta_one_approx_reorient = @{thm one_approx_reorient} RS eq_reflection;
+val meta_number_of_approx_reorient = @{thm number_of_approx_reorient} RS eq_reflection
(*reorientation simplification procedure: reorients (polymorphic)
0 = x, 1 = x, nnn = x provided x isn't 0, 1 or a numeral.*)
--- a/src/HOL/Nominal/nominal_datatype.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Nominal/nominal_datatype.ML Mon Sep 06 19:23:54 2010 +0200
@@ -24,18 +24,18 @@
structure NominalDatatype : NOMINAL_DATATYPE =
struct
-val finite_emptyI = thm "finite.emptyI";
-val finite_Diff = thm "finite_Diff";
-val finite_Un = thm "finite_Un";
-val Un_iff = thm "Un_iff";
-val In0_eq = thm "In0_eq";
-val In1_eq = thm "In1_eq";
-val In0_not_In1 = thm "In0_not_In1";
-val In1_not_In0 = thm "In1_not_In0";
-val Un_assoc = thm "Un_assoc";
-val Collect_disj_eq = thm "Collect_disj_eq";
+val finite_emptyI = @{thm finite.emptyI};
+val finite_Diff = @{thm finite_Diff};
+val finite_Un = @{thm finite_Un};
+val Un_iff = @{thm Un_iff};
+val In0_eq = @{thm In0_eq};
+val In1_eq = @{thm In1_eq};
+val In0_not_In1 = @{thm In0_not_In1};
+val In1_not_In0 = @{thm In1_not_In0};
+val Un_assoc = @{thm Un_assoc};
+val Collect_disj_eq = @{thm Collect_disj_eq};
val Collect_False_empty = @{thm empty_def [THEN sym, THEN eq_reflection]};
-val empty_iff = thm "empty_iff";
+val empty_iff = @{thm empty_iff};
open Datatype_Aux;
open NominalAtoms;
@@ -119,7 +119,7 @@
(** simplification procedure for sorting permutations **)
-val dj_cp = thm "dj_cp";
+val dj_cp = @{thm dj_cp};
fun dest_permT (Type ("fun", [Type ("List.list", [Type (@{type_name Product_Type.prod}, [T, _])]),
Type ("fun", [_, U])])) = (T, U);
@@ -148,23 +148,21 @@
val perm_simproc =
Simplifier.simproc_global @{theory} "perm_simp" ["pi1 \<bullet> (pi2 \<bullet> x)"] perm_simproc';
-val meta_spec = thm "meta_spec";
-
fun projections rule =
Project_Rule.projections (ProofContext.init_global (Thm.theory_of_thm rule)) rule
|> map (Drule.export_without_context #> Rule_Cases.save rule);
-val supp_prod = thm "supp_prod";
-val fresh_prod = thm "fresh_prod";
-val supports_fresh = thm "supports_fresh";
-val supports_def = thm "Nominal.supports_def";
-val fresh_def = thm "fresh_def";
-val supp_def = thm "supp_def";
-val rev_simps = thms "rev.simps";
-val app_simps = thms "append.simps";
-val at_fin_set_supp = thm "at_fin_set_supp";
-val at_fin_set_fresh = thm "at_fin_set_fresh";
-val abs_fun_eq1 = thm "abs_fun_eq1";
+val supp_prod = @{thm supp_prod};
+val fresh_prod = @{thm fresh_prod};
+val supports_fresh = @{thm supports_fresh};
+val supports_def = @{thm Nominal.supports_def};
+val fresh_def = @{thm fresh_def};
+val supp_def = @{thm supp_def};
+val rev_simps = @{thms rev.simps};
+val app_simps = @{thms append.simps};
+val at_fin_set_supp = @{thm at_fin_set_supp};
+val at_fin_set_fresh = @{thm at_fin_set_fresh};
+val abs_fun_eq1 = @{thm abs_fun_eq1};
val collect_simp = rewrite_rule [mk_meta_eq mem_Collect_eq];
@@ -1406,7 +1404,7 @@
[rtac induct_aux' 1,
REPEAT (resolve_tac fs_atoms 1),
REPEAT ((resolve_tac prems THEN_ALL_NEW
- (etac meta_spec ORELSE' full_simp_tac (HOL_basic_ss addsimps [fresh_def]))) 1)])
+ (etac @{thm meta_spec} ORELSE' full_simp_tac (HOL_basic_ss addsimps [fresh_def]))) 1)])
val (_, thy10) = thy9 |>
Sign.add_path big_name |>
--- a/src/HOL/Nominal/nominal_inductive.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Nominal/nominal_inductive.ML Mon Sep 06 19:23:54 2010 +0200
@@ -30,10 +30,10 @@
fun preds_of ps t = inter (op = o apfst dest_Free) (Term.add_frees t []) ps;
-val fresh_prod = thm "fresh_prod";
+val fresh_prod = @{thm fresh_prod};
-val perm_bool = mk_meta_eq (thm "perm_bool");
-val perm_boolI = thm "perm_boolI";
+val perm_bool = mk_meta_eq @{thm perm_bool};
+val perm_boolI = @{thm perm_boolI};
val (_, [perm_boolI_pi, _]) = Drule.strip_comb (snd (Thm.dest_comb
(Drule.strip_imp_concl (cprop_of perm_boolI))));
--- a/src/HOL/Nominal/nominal_inductive2.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Nominal/nominal_inductive2.ML Mon Sep 06 19:23:54 2010 +0200
@@ -36,8 +36,8 @@
fun preds_of ps t = inter (op = o apfst dest_Free) (Term.add_frees t []) ps;
-val perm_bool = mk_meta_eq (thm "perm_bool");
-val perm_boolI = thm "perm_boolI";
+val perm_bool = mk_meta_eq @{thm perm_bool};
+val perm_boolI = @{thm perm_boolI};
val (_, [perm_boolI_pi, _]) = Drule.strip_comb (snd (Thm.dest_comb
(Drule.strip_imp_concl (cprop_of perm_boolI))));
--- a/src/HOL/Old_Number_Theory/Chinese.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Old_Number_Theory/Chinese.thy Mon Sep 06 19:23:54 2010 +0200
@@ -169,11 +169,11 @@
"0 < n ==> i \<le> n ==> m_cond n mf ==> km_cond n kf mf
==> \<exists>!x. 0 \<le> x \<and> x < mf i \<and> [kf i * mhf mf n i * x = bf i] (mod mf i)"
apply (rule zcong_lineq_unique)
- apply (tactic {* stac (thm "zgcd_zmult_cancel") 2 *})
+ apply (tactic {* stac @{thm zgcd_zmult_cancel} 2 *})
apply (unfold m_cond_def km_cond_def mhf_def)
apply (simp_all (no_asm_simp))
apply safe
- apply (tactic {* stac (thm "zgcd_zmult_cancel") 3 *})
+ apply (tactic {* stac @{thm zgcd_zmult_cancel} 3 *})
apply (rule_tac [!] funprod_zgcd)
apply safe
apply simp_all
@@ -231,12 +231,12 @@
apply (rule_tac x = "x_sol n kf bf mf mod funprod mf 0 n" in exI)
apply (unfold lincong_sol_def)
apply safe
- apply (tactic {* stac (thm "zcong_zmod") 3 *})
- apply (tactic {* stac (thm "mod_mult_eq") 3 *})
- apply (tactic {* stac (thm "mod_mod_cancel") 3 *})
- apply (tactic {* stac (thm "x_sol_lin") 4 *})
- apply (tactic {* stac (thm "mod_mult_eq" RS sym) 6 *})
- apply (tactic {* stac (thm "zcong_zmod" RS sym) 6 *})
+ apply (tactic {* stac @{thm zcong_zmod} 3 *})
+ apply (tactic {* stac @{thm mod_mult_eq} 3 *})
+ apply (tactic {* stac @{thm mod_mod_cancel} 3 *})
+ apply (tactic {* stac @{thm x_sol_lin} 4 *})
+ apply (tactic {* stac (@{thm mod_mult_eq} RS sym) 6 *})
+ apply (tactic {* stac (@{thm zcong_zmod} RS sym) 6 *})
apply (subgoal_tac [6]
"0 \<le> xilin_sol i n kf bf mf \<and> xilin_sol i n kf bf mf < mf i
\<and> [kf i * mhf mf n i * xilin_sol i n kf bf mf = bf i] (mod mf i)")
--- a/src/HOL/Old_Number_Theory/IntPrimes.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Old_Number_Theory/IntPrimes.thy Mon Sep 06 19:23:54 2010 +0200
@@ -399,7 +399,7 @@
zgcd a n = 1 ==> \<exists>!x. 0 \<le> x \<and> x < n \<and> [a * x = b] (mod n)"
apply auto
apply (rule_tac [2] zcong_zless_imp_eq)
- apply (tactic {* stac (thm "zcong_cancel2" RS sym) 6 *})
+ apply (tactic {* stac (@{thm zcong_cancel2} RS sym) 6 *})
apply (rule_tac [8] zcong_trans)
apply (simp_all (no_asm_simp))
prefer 2
--- a/src/HOL/Old_Number_Theory/WilsonBij.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Old_Number_Theory/WilsonBij.thy Mon Sep 06 19:23:54 2010 +0200
@@ -145,9 +145,9 @@
apply (unfold inj_on_def)
apply auto
apply (rule zcong_zless_imp_eq)
- apply (tactic {* stac (thm "zcong_cancel" RS sym) 5 *})
+ apply (tactic {* stac (@{thm zcong_cancel} RS sym) 5 *})
apply (rule_tac [7] zcong_trans)
- apply (tactic {* stac (thm "zcong_sym") 8 *})
+ apply (tactic {* stac @{thm zcong_sym} 8 *})
apply (erule_tac [7] inv_is_inv)
apply (tactic "asm_simp_tac @{simpset} 9")
apply (erule_tac [9] inv_is_inv)
@@ -198,15 +198,15 @@
apply (unfold reciR_def uniqP_def)
apply auto
apply (rule zcong_zless_imp_eq)
- apply (tactic {* stac (thm "zcong_cancel2" RS sym) 5 *})
+ apply (tactic {* stac (@{thm zcong_cancel2} RS sym) 5 *})
apply (rule_tac [7] zcong_trans)
- apply (tactic {* stac (thm "zcong_sym") 8 *})
+ apply (tactic {* stac @{thm zcong_sym} 8 *})
apply (rule_tac [6] zless_zprime_imp_zrelprime)
apply auto
apply (rule zcong_zless_imp_eq)
- apply (tactic {* stac (thm "zcong_cancel" RS sym) 5 *})
+ apply (tactic {* stac (@{thm zcong_cancel} RS sym) 5 *})
apply (rule_tac [7] zcong_trans)
- apply (tactic {* stac (thm "zcong_sym") 8 *})
+ apply (tactic {* stac @{thm zcong_sym} 8 *})
apply (rule_tac [6] zless_zprime_imp_zrelprime)
apply auto
done
--- a/src/HOL/Old_Number_Theory/WilsonRuss.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Old_Number_Theory/WilsonRuss.thy Mon Sep 06 19:23:54 2010 +0200
@@ -257,7 +257,7 @@
apply (subst wset.simps)
apply (auto, unfold Let_def, auto)
apply (subst setprod_insert)
- apply (tactic {* stac (thm "setprod_insert") 3 *})
+ apply (tactic {* stac @{thm setprod_insert} 3 *})
apply (subgoal_tac [5]
"zcong (a * inv p a * (\<Prod>x\<in>wset (a - 1) p. x)) (1 * 1) p")
prefer 5
--- a/src/HOL/Option.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Option.thy Mon Sep 06 19:23:54 2010 +0200
@@ -47,7 +47,7 @@
by simp
declaration {* fn _ =>
- Classical.map_cs (fn cs => cs addSD2 ("ospec", thm "ospec"))
+ Classical.map_cs (fn cs => cs addSD2 ("ospec", @{thm ospec}))
*}
lemma elem_set [iff]: "(x : set xo) = (xo = Some x)"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Extraction/Euclid.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,279 @@
+(* Title: HOL/Proofs/Extraction/Euclid.thy
+ Author: Markus Wenzel, TU Muenchen
+ Author: Freek Wiedijk, Radboud University Nijmegen
+ Author: Stefan Berghofer, TU Muenchen
+*)
+
+header {* Euclid's theorem *}
+
+theory Euclid
+imports "~~/src/HOL/Number_Theory/UniqueFactorization" Util Efficient_Nat
+begin
+
+text {*
+A constructive version of the proof of Euclid's theorem by
+Markus Wenzel and Freek Wiedijk \cite{Wenzel-Wiedijk-JAR2002}.
+*}
+
+lemma factor_greater_one1: "n = m * k \<Longrightarrow> m < n \<Longrightarrow> k < n \<Longrightarrow> Suc 0 < m"
+ by (induct m) auto
+
+lemma factor_greater_one2: "n = m * k \<Longrightarrow> m < n \<Longrightarrow> k < n \<Longrightarrow> Suc 0 < k"
+ by (induct k) auto
+
+lemma prod_mn_less_k:
+ "(0::nat) < n ==> 0 < k ==> Suc 0 < m ==> m * n = k ==> n < k"
+ by (induct m) auto
+
+lemma prime_eq: "prime (p::nat) = (1 < p \<and> (\<forall>m. m dvd p \<longrightarrow> 1 < m \<longrightarrow> m = p))"
+ apply (simp add: prime_nat_def)
+ apply (rule iffI)
+ apply blast
+ apply (erule conjE)
+ apply (rule conjI)
+ apply assumption
+ apply (rule allI impI)+
+ apply (erule allE)
+ apply (erule impE)
+ apply assumption
+ apply (case_tac "m=0")
+ apply simp
+ apply (case_tac "m=Suc 0")
+ apply simp
+ apply simp
+ done
+
+lemma prime_eq': "prime (p::nat) = (1 < p \<and> (\<forall>m k. p = m * k \<longrightarrow> 1 < m \<longrightarrow> m = p))"
+ by (simp add: prime_eq dvd_def HOL.all_simps [symmetric] del: HOL.all_simps)
+
+lemma not_prime_ex_mk:
+ assumes n: "Suc 0 < n"
+ shows "(\<exists>m k. Suc 0 < m \<and> Suc 0 < k \<and> m < n \<and> k < n \<and> n = m * k) \<or> prime n"
+proof -
+ {
+ fix k
+ from nat_eq_dec
+ have "(\<exists>m<n. n = m * k) \<or> \<not> (\<exists>m<n. n = m * k)"
+ by (rule search)
+ }
+ hence "(\<exists>k<n. \<exists>m<n. n = m * k) \<or> \<not> (\<exists>k<n. \<exists>m<n. n = m * k)"
+ by (rule search)
+ thus ?thesis
+ proof
+ assume "\<exists>k<n. \<exists>m<n. n = m * k"
+ then obtain k m where k: "k<n" and m: "m<n" and nmk: "n = m * k"
+ by iprover
+ from nmk m k have "Suc 0 < m" by (rule factor_greater_one1)
+ moreover from nmk m k have "Suc 0 < k" by (rule factor_greater_one2)
+ ultimately show ?thesis using k m nmk by iprover
+ next
+ assume "\<not> (\<exists>k<n. \<exists>m<n. n = m * k)"
+ hence A: "\<forall>k<n. \<forall>m<n. n \<noteq> m * k" by iprover
+ have "\<forall>m k. n = m * k \<longrightarrow> Suc 0 < m \<longrightarrow> m = n"
+ proof (intro allI impI)
+ fix m k
+ assume nmk: "n = m * k"
+ assume m: "Suc 0 < m"
+ from n m nmk have k: "0 < k"
+ by (cases k) auto
+ moreover from n have n: "0 < n" by simp
+ moreover note m
+ moreover from nmk have "m * k = n" by simp
+ ultimately have kn: "k < n" by (rule prod_mn_less_k)
+ show "m = n"
+ proof (cases "k = Suc 0")
+ case True
+ with nmk show ?thesis by (simp only: mult_Suc_right)
+ next
+ case False
+ from m have "0 < m" by simp
+ moreover note n
+ moreover from False n nmk k have "Suc 0 < k" by auto
+ moreover from nmk have "k * m = n" by (simp only: mult_ac)
+ ultimately have mn: "m < n" by (rule prod_mn_less_k)
+ with kn A nmk show ?thesis by iprover
+ qed
+ qed
+ with n have "prime n"
+ by (simp only: prime_eq' One_nat_def simp_thms)
+ thus ?thesis ..
+ qed
+qed
+
+lemma dvd_factorial: "0 < m \<Longrightarrow> m \<le> n \<Longrightarrow> m dvd fact (n::nat)"
+proof (induct n rule: nat_induct)
+ case 0
+ then show ?case by simp
+next
+ case (Suc n)
+ from `m \<le> Suc n` show ?case
+ proof (rule le_SucE)
+ assume "m \<le> n"
+ with `0 < m` have "m dvd fact n" by (rule Suc)
+ then have "m dvd (fact n * Suc n)" by (rule dvd_mult2)
+ then show ?thesis by (simp add: mult_commute)
+ next
+ assume "m = Suc n"
+ then have "m dvd (fact n * Suc n)"
+ by (auto intro: dvdI simp: mult_ac)
+ then show ?thesis by (simp add: mult_commute)
+ qed
+qed
+
+lemma dvd_prod [iff]: "n dvd (PROD m\<Colon>nat:#multiset_of (n # ns). m)"
+ by (simp add: msetprod_Un msetprod_singleton)
+
+definition all_prime :: "nat list \<Rightarrow> bool" where
+ "all_prime ps \<longleftrightarrow> (\<forall>p\<in>set ps. prime p)"
+
+lemma all_prime_simps:
+ "all_prime []"
+ "all_prime (p # ps) \<longleftrightarrow> prime p \<and> all_prime ps"
+ by (simp_all add: all_prime_def)
+
+lemma all_prime_append:
+ "all_prime (ps @ qs) \<longleftrightarrow> all_prime ps \<and> all_prime qs"
+ by (simp add: all_prime_def ball_Un)
+
+lemma split_all_prime:
+ assumes "all_prime ms" and "all_prime ns"
+ shows "\<exists>qs. all_prime qs \<and> (PROD m\<Colon>nat:#multiset_of qs. m) =
+ (PROD m\<Colon>nat:#multiset_of ms. m) * (PROD m\<Colon>nat:#multiset_of ns. m)" (is "\<exists>qs. ?P qs \<and> ?Q qs")
+proof -
+ from assms have "all_prime (ms @ ns)"
+ by (simp add: all_prime_append)
+ moreover from assms have "(PROD m\<Colon>nat:#multiset_of (ms @ ns). m) =
+ (PROD m\<Colon>nat:#multiset_of ms. m) * (PROD m\<Colon>nat:#multiset_of ns. m)"
+ by (simp add: msetprod_Un)
+ ultimately have "?P (ms @ ns) \<and> ?Q (ms @ ns)" ..
+ then show ?thesis ..
+qed
+
+lemma all_prime_nempty_g_one:
+ assumes "all_prime ps" and "ps \<noteq> []"
+ shows "Suc 0 < (PROD m\<Colon>nat:#multiset_of ps. m)"
+ using `ps \<noteq> []` `all_prime ps` unfolding One_nat_def [symmetric] by (induct ps rule: list_nonempty_induct)
+ (simp_all add: all_prime_simps msetprod_singleton msetprod_Un prime_gt_1_nat less_1_mult del: One_nat_def)
+
+lemma factor_exists: "Suc 0 < n \<Longrightarrow> (\<exists>ps. all_prime ps \<and> (PROD m\<Colon>nat:#multiset_of ps. m) = n)"
+proof (induct n rule: nat_wf_ind)
+ case (1 n)
+ from `Suc 0 < n`
+ have "(\<exists>m k. Suc 0 < m \<and> Suc 0 < k \<and> m < n \<and> k < n \<and> n = m * k) \<or> prime n"
+ by (rule not_prime_ex_mk)
+ then show ?case
+ proof
+ assume "\<exists>m k. Suc 0 < m \<and> Suc 0 < k \<and> m < n \<and> k < n \<and> n = m * k"
+ then obtain m k where m: "Suc 0 < m" and k: "Suc 0 < k" and mn: "m < n"
+ and kn: "k < n" and nmk: "n = m * k" by iprover
+ from mn and m have "\<exists>ps. all_prime ps \<and> (PROD m\<Colon>nat:#multiset_of ps. m) = m" by (rule 1)
+ then obtain ps1 where "all_prime ps1" and prod_ps1_m: "(PROD m\<Colon>nat:#multiset_of ps1. m) = m"
+ by iprover
+ from kn and k have "\<exists>ps. all_prime ps \<and> (PROD m\<Colon>nat:#multiset_of ps. m) = k" by (rule 1)
+ then obtain ps2 where "all_prime ps2" and prod_ps2_k: "(PROD m\<Colon>nat:#multiset_of ps2. m) = k"
+ by iprover
+ from `all_prime ps1` `all_prime ps2`
+ have "\<exists>ps. all_prime ps \<and> (PROD m\<Colon>nat:#multiset_of ps. m) =
+ (PROD m\<Colon>nat:#multiset_of ps1. m) * (PROD m\<Colon>nat:#multiset_of ps2. m)"
+ by (rule split_all_prime)
+ with prod_ps1_m prod_ps2_k nmk show ?thesis by simp
+ next
+ assume "prime n" then have "all_prime [n]" by (simp add: all_prime_simps)
+ moreover have "(PROD m\<Colon>nat:#multiset_of [n]. m) = n" by (simp add: msetprod_singleton)
+ ultimately have "all_prime [n] \<and> (PROD m\<Colon>nat:#multiset_of [n]. m) = n" ..
+ then show ?thesis ..
+ qed
+qed
+
+lemma prime_factor_exists:
+ assumes N: "(1::nat) < n"
+ shows "\<exists>p. prime p \<and> p dvd n"
+proof -
+ from N obtain ps where "all_prime ps"
+ and prod_ps: "n = (PROD m\<Colon>nat:#multiset_of ps. m)" using factor_exists
+ by simp iprover
+ with N have "ps \<noteq> []"
+ by (auto simp add: all_prime_nempty_g_one msetprod_empty)
+ then obtain p qs where ps: "ps = p # qs" by (cases ps) simp
+ with `all_prime ps` have "prime p" by (simp add: all_prime_simps)
+ moreover from `all_prime ps` ps prod_ps
+ have "p dvd n" by (simp only: dvd_prod)
+ ultimately show ?thesis by iprover
+qed
+
+text {*
+Euclid's theorem: there are infinitely many primes.
+*}
+
+lemma Euclid: "\<exists>p::nat. prime p \<and> n < p"
+proof -
+ let ?k = "fact n + 1"
+ have "1 < fact n + 1" by simp
+ then obtain p where prime: "prime p" and dvd: "p dvd ?k" using prime_factor_exists by iprover
+ have "n < p"
+ proof -
+ have "\<not> p \<le> n"
+ proof
+ assume pn: "p \<le> n"
+ from `prime p` have "0 < p" by (rule prime_gt_0_nat)
+ then have "p dvd fact n" using pn by (rule dvd_factorial)
+ with dvd have "p dvd ?k - fact n" by (rule dvd_diff_nat)
+ then have "p dvd 1" by simp
+ with prime show False by auto
+ qed
+ then show ?thesis by simp
+ qed
+ with prime show ?thesis by iprover
+qed
+
+extract Euclid
+
+text {*
+The program extracted from the proof of Euclid's theorem looks as follows.
+@{thm [display] Euclid_def}
+The program corresponding to the proof of the factorization theorem is
+@{thm [display] factor_exists_def}
+*}
+
+instantiation nat :: default
+begin
+
+definition "default = (0::nat)"
+
+instance ..
+
+end
+
+instantiation list :: (type) default
+begin
+
+definition "default = []"
+
+instance ..
+
+end
+
+primrec iterate :: "nat \<Rightarrow> ('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a list" where
+ "iterate 0 f x = []"
+ | "iterate (Suc n) f x = (let y = f x in y # iterate n f y)"
+
+lemma "factor_exists 1007 = [53, 19]" by eval
+lemma "factor_exists 567 = [7, 3, 3, 3, 3]" by eval
+lemma "factor_exists 345 = [23, 5, 3]" by eval
+lemma "factor_exists 999 = [37, 3, 3, 3]" by eval
+lemma "factor_exists 876 = [73, 3, 2, 2]" by eval
+
+lemma "iterate 4 Euclid 0 = [2, 3, 7, 71]" by eval
+
+consts_code
+ default ("(error \"default\")")
+
+lemma "factor_exists 1007 = [53, 19]" by evaluation
+lemma "factor_exists 567 = [7, 3, 3, 3, 3]" by evaluation
+lemma "factor_exists 345 = [23, 5, 3]" by evaluation
+lemma "factor_exists 999 = [37, 3, 3, 3]" by evaluation
+lemma "factor_exists 876 = [73, 3, 2, 2]" by evaluation
+
+lemma "iterate 4 Euclid 0 = [2, 3, 7, 71]" by evaluation
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Extraction/Greatest_Common_Divisor.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,96 @@
+(* Title: HOL/Proofs/Extraction/Greatest_Common_Divisor.thy
+ Author: Stefan Berghofer, TU Muenchen
+ Author: Helmut Schwichtenberg, LMU Muenchen
+*)
+
+header {* Greatest common divisor *}
+
+theory Greatest_Common_Divisor
+imports QuotRem
+begin
+
+theorem greatest_common_divisor:
+ "\<And>n::nat. Suc m < n \<Longrightarrow> \<exists>k n1 m1. k * n1 = n \<and> k * m1 = Suc m \<and>
+ (\<forall>l l1 l2. l * l1 = n \<longrightarrow> l * l2 = Suc m \<longrightarrow> l \<le> k)"
+proof (induct m rule: nat_wf_ind)
+ case (1 m n)
+ from division obtain r q where h1: "n = Suc m * q + r" and h2: "r \<le> m"
+ by iprover
+ show ?case
+ proof (cases r)
+ case 0
+ with h1 have "Suc m * q = n" by simp
+ moreover have "Suc m * 1 = Suc m" by simp
+ moreover {
+ fix l2 have "\<And>l l1. l * l1 = n \<Longrightarrow> l * l2 = Suc m \<Longrightarrow> l \<le> Suc m"
+ by (cases l2) simp_all }
+ ultimately show ?thesis by iprover
+ next
+ case (Suc nat)
+ with h2 have h: "nat < m" by simp
+ moreover from h have "Suc nat < Suc m" by simp
+ ultimately have "\<exists>k m1 r1. k * m1 = Suc m \<and> k * r1 = Suc nat \<and>
+ (\<forall>l l1 l2. l * l1 = Suc m \<longrightarrow> l * l2 = Suc nat \<longrightarrow> l \<le> k)"
+ by (rule 1)
+ then obtain k m1 r1 where
+ h1': "k * m1 = Suc m"
+ and h2': "k * r1 = Suc nat"
+ and h3': "\<And>l l1 l2. l * l1 = Suc m \<Longrightarrow> l * l2 = Suc nat \<Longrightarrow> l \<le> k"
+ by iprover
+ have mn: "Suc m < n" by (rule 1)
+ from h1 h1' h2' Suc have "k * (m1 * q + r1) = n"
+ by (simp add: add_mult_distrib2 nat_mult_assoc [symmetric])
+ moreover have "\<And>l l1 l2. l * l1 = n \<Longrightarrow> l * l2 = Suc m \<Longrightarrow> l \<le> k"
+ proof -
+ fix l l1 l2
+ assume ll1n: "l * l1 = n"
+ assume ll2m: "l * l2 = Suc m"
+ moreover have "l * (l1 - l2 * q) = Suc nat"
+ by (simp add: diff_mult_distrib2 h1 Suc [symmetric] mn ll1n ll2m [symmetric])
+ ultimately show "l \<le> k" by (rule h3')
+ qed
+ ultimately show ?thesis using h1' by iprover
+ qed
+qed
+
+extract greatest_common_divisor
+
+text {*
+The extracted program for computing the greatest common divisor is
+@{thm [display] greatest_common_divisor_def}
+*}
+
+instantiation nat :: default
+begin
+
+definition "default = (0::nat)"
+
+instance ..
+
+end
+
+instantiation prod :: (default, default) default
+begin
+
+definition "default = (default, default)"
+
+instance ..
+
+end
+
+instantiation "fun" :: (type, default) default
+begin
+
+definition "default = (\<lambda>x. default)"
+
+instance ..
+
+end
+
+consts_code
+ default ("(error \"default\")")
+
+lemma "greatest_common_divisor 7 12 = (4, 3, 2)" by evaluation
+lemma "greatest_common_divisor 7 12 = (4, 3, 2)" by eval
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Extraction/Higman.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,462 @@
+(* Title: HOL/Proofs/Extraction/Higman.thy
+ Author: Stefan Berghofer, TU Muenchen
+ Author: Monika Seisenberger, LMU Muenchen
+*)
+
+header {* Higman's lemma *}
+
+theory Higman
+imports Main State_Monad Random
+begin
+
+text {*
+ Formalization by Stefan Berghofer and Monika Seisenberger,
+ based on Coquand and Fridlender \cite{Coquand93}.
+*}
+
+datatype letter = A | B
+
+inductive emb :: "letter list \<Rightarrow> letter list \<Rightarrow> bool"
+where
+ emb0 [Pure.intro]: "emb [] bs"
+ | emb1 [Pure.intro]: "emb as bs \<Longrightarrow> emb as (b # bs)"
+ | emb2 [Pure.intro]: "emb as bs \<Longrightarrow> emb (a # as) (a # bs)"
+
+inductive L :: "letter list \<Rightarrow> letter list list \<Rightarrow> bool"
+ for v :: "letter list"
+where
+ L0 [Pure.intro]: "emb w v \<Longrightarrow> L v (w # ws)"
+ | L1 [Pure.intro]: "L v ws \<Longrightarrow> L v (w # ws)"
+
+inductive good :: "letter list list \<Rightarrow> bool"
+where
+ good0 [Pure.intro]: "L w ws \<Longrightarrow> good (w # ws)"
+ | good1 [Pure.intro]: "good ws \<Longrightarrow> good (w # ws)"
+
+inductive R :: "letter \<Rightarrow> letter list list \<Rightarrow> letter list list \<Rightarrow> bool"
+ for a :: letter
+where
+ R0 [Pure.intro]: "R a [] []"
+ | R1 [Pure.intro]: "R a vs ws \<Longrightarrow> R a (w # vs) ((a # w) # ws)"
+
+inductive T :: "letter \<Rightarrow> letter list list \<Rightarrow> letter list list \<Rightarrow> bool"
+ for a :: letter
+where
+ T0 [Pure.intro]: "a \<noteq> b \<Longrightarrow> R b ws zs \<Longrightarrow> T a (w # zs) ((a # w) # zs)"
+ | T1 [Pure.intro]: "T a ws zs \<Longrightarrow> T a (w # ws) ((a # w) # zs)"
+ | T2 [Pure.intro]: "a \<noteq> b \<Longrightarrow> T a ws zs \<Longrightarrow> T a ws ((b # w) # zs)"
+
+inductive bar :: "letter list list \<Rightarrow> bool"
+where
+ bar1 [Pure.intro]: "good ws \<Longrightarrow> bar ws"
+ | bar2 [Pure.intro]: "(\<And>w. bar (w # ws)) \<Longrightarrow> bar ws"
+
+theorem prop1: "bar ([] # ws)" by iprover
+
+theorem lemma1: "L as ws \<Longrightarrow> L (a # as) ws"
+ by (erule L.induct, iprover+)
+
+lemma lemma2': "R a vs ws \<Longrightarrow> L as vs \<Longrightarrow> L (a # as) ws"
+ apply (induct set: R)
+ apply (erule L.cases)
+ apply simp+
+ apply (erule L.cases)
+ apply simp_all
+ apply (rule L0)
+ apply (erule emb2)
+ apply (erule L1)
+ done
+
+lemma lemma2: "R a vs ws \<Longrightarrow> good vs \<Longrightarrow> good ws"
+ apply (induct set: R)
+ apply iprover
+ apply (erule good.cases)
+ apply simp_all
+ apply (rule good0)
+ apply (erule lemma2')
+ apply assumption
+ apply (erule good1)
+ done
+
+lemma lemma3': "T a vs ws \<Longrightarrow> L as vs \<Longrightarrow> L (a # as) ws"
+ apply (induct set: T)
+ apply (erule L.cases)
+ apply simp_all
+ apply (rule L0)
+ apply (erule emb2)
+ apply (rule L1)
+ apply (erule lemma1)
+ apply (erule L.cases)
+ apply simp_all
+ apply iprover+
+ done
+
+lemma lemma3: "T a ws zs \<Longrightarrow> good ws \<Longrightarrow> good zs"
+ apply (induct set: T)
+ apply (erule good.cases)
+ apply simp_all
+ apply (rule good0)
+ apply (erule lemma1)
+ apply (erule good1)
+ apply (erule good.cases)
+ apply simp_all
+ apply (rule good0)
+ apply (erule lemma3')
+ apply iprover+
+ done
+
+lemma lemma4: "R a ws zs \<Longrightarrow> ws \<noteq> [] \<Longrightarrow> T a ws zs"
+ apply (induct set: R)
+ apply iprover
+ apply (case_tac vs)
+ apply (erule R.cases)
+ apply simp
+ apply (case_tac a)
+ apply (rule_tac b=B in T0)
+ apply simp
+ apply (rule R0)
+ apply (rule_tac b=A in T0)
+ apply simp
+ apply (rule R0)
+ apply simp
+ apply (rule T1)
+ apply simp
+ done
+
+lemma letter_neq: "(a::letter) \<noteq> b \<Longrightarrow> c \<noteq> a \<Longrightarrow> c = b"
+ apply (case_tac a)
+ apply (case_tac b)
+ apply (case_tac c, simp, simp)
+ apply (case_tac c, simp, simp)
+ apply (case_tac b)
+ apply (case_tac c, simp, simp)
+ apply (case_tac c, simp, simp)
+ done
+
+lemma letter_eq_dec: "(a::letter) = b \<or> a \<noteq> b"
+ apply (case_tac a)
+ apply (case_tac b)
+ apply simp
+ apply simp
+ apply (case_tac b)
+ apply simp
+ apply simp
+ done
+
+theorem prop2:
+ assumes ab: "a \<noteq> b" and bar: "bar xs"
+ shows "\<And>ys zs. bar ys \<Longrightarrow> T a xs zs \<Longrightarrow> T b ys zs \<Longrightarrow> bar zs" using bar
+proof induct
+ fix xs zs assume "T a xs zs" and "good xs"
+ hence "good zs" by (rule lemma3)
+ then show "bar zs" by (rule bar1)
+next
+ fix xs ys
+ assume I: "\<And>w ys zs. bar ys \<Longrightarrow> T a (w # xs) zs \<Longrightarrow> T b ys zs \<Longrightarrow> bar zs"
+ assume "bar ys"
+ thus "\<And>zs. T a xs zs \<Longrightarrow> T b ys zs \<Longrightarrow> bar zs"
+ proof induct
+ fix ys zs assume "T b ys zs" and "good ys"
+ then have "good zs" by (rule lemma3)
+ then show "bar zs" by (rule bar1)
+ next
+ fix ys zs assume I': "\<And>w zs. T a xs zs \<Longrightarrow> T b (w # ys) zs \<Longrightarrow> bar zs"
+ and ys: "\<And>w. bar (w # ys)" and Ta: "T a xs zs" and Tb: "T b ys zs"
+ show "bar zs"
+ proof (rule bar2)
+ fix w
+ show "bar (w # zs)"
+ proof (cases w)
+ case Nil
+ thus ?thesis by simp (rule prop1)
+ next
+ case (Cons c cs)
+ from letter_eq_dec show ?thesis
+ proof
+ assume ca: "c = a"
+ from ab have "bar ((a # cs) # zs)" by (iprover intro: I ys Ta Tb)
+ thus ?thesis by (simp add: Cons ca)
+ next
+ assume "c \<noteq> a"
+ with ab have cb: "c = b" by (rule letter_neq)
+ from ab have "bar ((b # cs) # zs)" by (iprover intro: I' Ta Tb)
+ thus ?thesis by (simp add: Cons cb)
+ qed
+ qed
+ qed
+ qed
+qed
+
+theorem prop3:
+ assumes bar: "bar xs"
+ shows "\<And>zs. xs \<noteq> [] \<Longrightarrow> R a xs zs \<Longrightarrow> bar zs" using bar
+proof induct
+ fix xs zs
+ assume "R a xs zs" and "good xs"
+ then have "good zs" by (rule lemma2)
+ then show "bar zs" by (rule bar1)
+next
+ fix xs zs
+ assume I: "\<And>w zs. w # xs \<noteq> [] \<Longrightarrow> R a (w # xs) zs \<Longrightarrow> bar zs"
+ and xsb: "\<And>w. bar (w # xs)" and xsn: "xs \<noteq> []" and R: "R a xs zs"
+ show "bar zs"
+ proof (rule bar2)
+ fix w
+ show "bar (w # zs)"
+ proof (induct w)
+ case Nil
+ show ?case by (rule prop1)
+ next
+ case (Cons c cs)
+ from letter_eq_dec show ?case
+ proof
+ assume "c = a"
+ thus ?thesis by (iprover intro: I [simplified] R)
+ next
+ from R xsn have T: "T a xs zs" by (rule lemma4)
+ assume "c \<noteq> a"
+ thus ?thesis by (iprover intro: prop2 Cons xsb xsn R T)
+ qed
+ qed
+ qed
+qed
+
+theorem higman: "bar []"
+proof (rule bar2)
+ fix w
+ show "bar [w]"
+ proof (induct w)
+ show "bar [[]]" by (rule prop1)
+ next
+ fix c cs assume "bar [cs]"
+ thus "bar [c # cs]" by (rule prop3) (simp, iprover)
+ qed
+qed
+
+primrec
+ is_prefix :: "'a list \<Rightarrow> (nat \<Rightarrow> 'a) \<Rightarrow> bool"
+where
+ "is_prefix [] f = True"
+ | "is_prefix (x # xs) f = (x = f (length xs) \<and> is_prefix xs f)"
+
+theorem L_idx:
+ assumes L: "L w ws"
+ shows "is_prefix ws f \<Longrightarrow> \<exists>i. emb (f i) w \<and> i < length ws" using L
+proof induct
+ case (L0 v ws)
+ hence "emb (f (length ws)) w" by simp
+ moreover have "length ws < length (v # ws)" by simp
+ ultimately show ?case by iprover
+next
+ case (L1 ws v)
+ then obtain i where emb: "emb (f i) w" and "i < length ws"
+ by simp iprover
+ hence "i < length (v # ws)" by simp
+ with emb show ?case by iprover
+qed
+
+theorem good_idx:
+ assumes good: "good ws"
+ shows "is_prefix ws f \<Longrightarrow> \<exists>i j. emb (f i) (f j) \<and> i < j" using good
+proof induct
+ case (good0 w ws)
+ hence "w = f (length ws)" and "is_prefix ws f" by simp_all
+ with good0 show ?case by (iprover dest: L_idx)
+next
+ case (good1 ws w)
+ thus ?case by simp
+qed
+
+theorem bar_idx:
+ assumes bar: "bar ws"
+ shows "is_prefix ws f \<Longrightarrow> \<exists>i j. emb (f i) (f j) \<and> i < j" using bar
+proof induct
+ case (bar1 ws)
+ thus ?case by (rule good_idx)
+next
+ case (bar2 ws)
+ hence "is_prefix (f (length ws) # ws) f" by simp
+ thus ?case by (rule bar2)
+qed
+
+text {*
+Strong version: yields indices of words that can be embedded into each other.
+*}
+
+theorem higman_idx: "\<exists>(i::nat) j. emb (f i) (f j) \<and> i < j"
+proof (rule bar_idx)
+ show "bar []" by (rule higman)
+ show "is_prefix [] f" by simp
+qed
+
+text {*
+Weak version: only yield sequence containing words
+that can be embedded into each other.
+*}
+
+theorem good_prefix_lemma:
+ assumes bar: "bar ws"
+ shows "is_prefix ws f \<Longrightarrow> \<exists>vs. is_prefix vs f \<and> good vs" using bar
+proof induct
+ case bar1
+ thus ?case by iprover
+next
+ case (bar2 ws)
+ from bar2.prems have "is_prefix (f (length ws) # ws) f" by simp
+ thus ?case by (iprover intro: bar2)
+qed
+
+theorem good_prefix: "\<exists>vs. is_prefix vs f \<and> good vs"
+ using higman
+ by (rule good_prefix_lemma) simp+
+
+subsection {* Extracting the program *}
+
+declare R.induct [ind_realizer]
+declare T.induct [ind_realizer]
+declare L.induct [ind_realizer]
+declare good.induct [ind_realizer]
+declare bar.induct [ind_realizer]
+
+extract higman_idx
+
+text {*
+ Program extracted from the proof of @{text higman_idx}:
+ @{thm [display] higman_idx_def [no_vars]}
+ Corresponding correctness theorem:
+ @{thm [display] higman_idx_correctness [no_vars]}
+ Program extracted from the proof of @{text higman}:
+ @{thm [display] higman_def [no_vars]}
+ Program extracted from the proof of @{text prop1}:
+ @{thm [display] prop1_def [no_vars]}
+ Program extracted from the proof of @{text prop2}:
+ @{thm [display] prop2_def [no_vars]}
+ Program extracted from the proof of @{text prop3}:
+ @{thm [display] prop3_def [no_vars]}
+*}
+
+
+subsection {* Some examples *}
+
+instantiation LT and TT :: default
+begin
+
+definition "default = L0 [] []"
+
+definition "default = T0 A [] [] [] R0"
+
+instance ..
+
+end
+
+function mk_word_aux :: "nat \<Rightarrow> Random.seed \<Rightarrow> letter list \<times> Random.seed" where
+ "mk_word_aux k = exec {
+ i \<leftarrow> Random.range 10;
+ (if i > 7 \<and> k > 2 \<or> k > 1000 then return []
+ else exec {
+ let l = (if i mod 2 = 0 then A else B);
+ ls \<leftarrow> mk_word_aux (Suc k);
+ return (l # ls)
+ })}"
+by pat_completeness auto
+termination by (relation "measure ((op -) 1001)") auto
+
+definition mk_word :: "Random.seed \<Rightarrow> letter list \<times> Random.seed" where
+ "mk_word = mk_word_aux 0"
+
+primrec mk_word_s :: "nat \<Rightarrow> Random.seed \<Rightarrow> letter list \<times> Random.seed" where
+ "mk_word_s 0 = mk_word"
+ | "mk_word_s (Suc n) = exec {
+ _ \<leftarrow> mk_word;
+ mk_word_s n
+ }"
+
+definition g1 :: "nat \<Rightarrow> letter list" where
+ "g1 s = fst (mk_word_s s (20000, 1))"
+
+definition g2 :: "nat \<Rightarrow> letter list" where
+ "g2 s = fst (mk_word_s s (50000, 1))"
+
+fun f1 :: "nat \<Rightarrow> letter list" where
+ "f1 0 = [A, A]"
+ | "f1 (Suc 0) = [B]"
+ | "f1 (Suc (Suc 0)) = [A, B]"
+ | "f1 _ = []"
+
+fun f2 :: "nat \<Rightarrow> letter list" where
+ "f2 0 = [A, A]"
+ | "f2 (Suc 0) = [B]"
+ | "f2 (Suc (Suc 0)) = [B, A]"
+ | "f2 _ = []"
+
+ML {*
+local
+ val higman_idx = @{code higman_idx};
+ val g1 = @{code g1};
+ val g2 = @{code g2};
+ val f1 = @{code f1};
+ val f2 = @{code f2};
+in
+ val (i1, j1) = higman_idx g1;
+ val (v1, w1) = (g1 i1, g1 j1);
+ val (i2, j2) = higman_idx g2;
+ val (v2, w2) = (g2 i2, g2 j2);
+ val (i3, j3) = higman_idx f1;
+ val (v3, w3) = (f1 i3, f1 j3);
+ val (i4, j4) = higman_idx f2;
+ val (v4, w4) = (f2 i4, f2 j4);
+end;
+*}
+
+code_module Higman
+contains
+ higman = higman_idx
+
+ML {*
+local open Higman in
+
+val a = 16807.0;
+val m = 2147483647.0;
+
+fun nextRand seed =
+ let val t = a*seed
+ in t - m * real (Real.floor(t/m)) end;
+
+fun mk_word seed l =
+ let
+ val r = nextRand seed;
+ val i = Real.round (r / m * 10.0);
+ in if i > 7 andalso l > 2 then (r, []) else
+ apsnd (cons (if i mod 2 = 0 then A else B)) (mk_word r (l+1))
+ end;
+
+fun f s zero = mk_word s 0
+ | f s (Suc n) = f (fst (mk_word s 0)) n;
+
+val g1 = snd o (f 20000.0);
+
+val g2 = snd o (f 50000.0);
+
+fun f1 zero = [A,A]
+ | f1 (Suc zero) = [B]
+ | f1 (Suc (Suc zero)) = [A,B]
+ | f1 _ = [];
+
+fun f2 zero = [A,A]
+ | f2 (Suc zero) = [B]
+ | f2 (Suc (Suc zero)) = [B,A]
+ | f2 _ = [];
+
+val (i1, j1) = higman g1;
+val (v1, w1) = (g1 i1, g1 j1);
+val (i2, j2) = higman g2;
+val (v2, w2) = (g2 i2, g2 j2);
+val (i3, j3) = higman f1;
+val (v3, w3) = (f1 i3, f1 j3);
+val (i4, j4) = higman f2;
+val (v4, w4) = (f2 i4, f2 j4);
+
+end;
+*}
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Extraction/Pigeonhole.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,275 @@
+(* Title: HOL/Proofs/Extraction/Pigeonhole.thy
+ Author: Stefan Berghofer, TU Muenchen
+*)
+
+header {* The pigeonhole principle *}
+
+theory Pigeonhole
+imports Util Efficient_Nat
+begin
+
+text {*
+We formalize two proofs of the pigeonhole principle, which lead
+to extracted programs of quite different complexity. The original
+formalization of these proofs in {\sc Nuprl} is due to
+Aleksey Nogin \cite{Nogin-ENTCS-2000}.
+
+This proof yields a polynomial program.
+*}
+
+theorem pigeonhole:
+ "\<And>f. (\<And>i. i \<le> Suc n \<Longrightarrow> f i \<le> n) \<Longrightarrow> \<exists>i j. i \<le> Suc n \<and> j < i \<and> f i = f j"
+proof (induct n)
+ case 0
+ hence "Suc 0 \<le> Suc 0 \<and> 0 < Suc 0 \<and> f (Suc 0) = f 0" by simp
+ thus ?case by iprover
+next
+ case (Suc n)
+ {
+ fix k
+ have
+ "k \<le> Suc (Suc n) \<Longrightarrow>
+ (\<And>i j. Suc k \<le> i \<Longrightarrow> i \<le> Suc (Suc n) \<Longrightarrow> j < i \<Longrightarrow> f i \<noteq> f j) \<Longrightarrow>
+ (\<exists>i j. i \<le> k \<and> j < i \<and> f i = f j)"
+ proof (induct k)
+ case 0
+ let ?f = "\<lambda>i. if f i = Suc n then f (Suc (Suc n)) else f i"
+ have "\<not> (\<exists>i j. i \<le> Suc n \<and> j < i \<and> ?f i = ?f j)"
+ proof
+ assume "\<exists>i j. i \<le> Suc n \<and> j < i \<and> ?f i = ?f j"
+ then obtain i j where i: "i \<le> Suc n" and j: "j < i"
+ and f: "?f i = ?f j" by iprover
+ from j have i_nz: "Suc 0 \<le> i" by simp
+ from i have iSSn: "i \<le> Suc (Suc n)" by simp
+ have S0SSn: "Suc 0 \<le> Suc (Suc n)" by simp
+ show False
+ proof cases
+ assume fi: "f i = Suc n"
+ show False
+ proof cases
+ assume fj: "f j = Suc n"
+ from i_nz and iSSn and j have "f i \<noteq> f j" by (rule 0)
+ moreover from fi have "f i = f j"
+ by (simp add: fj [symmetric])
+ ultimately show ?thesis ..
+ next
+ from i and j have "j < Suc (Suc n)" by simp
+ with S0SSn and le_refl have "f (Suc (Suc n)) \<noteq> f j"
+ by (rule 0)
+ moreover assume "f j \<noteq> Suc n"
+ with fi and f have "f (Suc (Suc n)) = f j" by simp
+ ultimately show False ..
+ qed
+ next
+ assume fi: "f i \<noteq> Suc n"
+ show False
+ proof cases
+ from i have "i < Suc (Suc n)" by simp
+ with S0SSn and le_refl have "f (Suc (Suc n)) \<noteq> f i"
+ by (rule 0)
+ moreover assume "f j = Suc n"
+ with fi and f have "f (Suc (Suc n)) = f i" by simp
+ ultimately show False ..
+ next
+ from i_nz and iSSn and j
+ have "f i \<noteq> f j" by (rule 0)
+ moreover assume "f j \<noteq> Suc n"
+ with fi and f have "f i = f j" by simp
+ ultimately show False ..
+ qed
+ qed
+ qed
+ moreover have "\<And>i. i \<le> Suc n \<Longrightarrow> ?f i \<le> n"
+ proof -
+ fix i assume "i \<le> Suc n"
+ hence i: "i < Suc (Suc n)" by simp
+ have "f (Suc (Suc n)) \<noteq> f i"
+ by (rule 0) (simp_all add: i)
+ moreover have "f (Suc (Suc n)) \<le> Suc n"
+ by (rule Suc) simp
+ moreover from i have "i \<le> Suc (Suc n)" by simp
+ hence "f i \<le> Suc n" by (rule Suc)
+ ultimately show "?thesis i"
+ by simp
+ qed
+ hence "\<exists>i j. i \<le> Suc n \<and> j < i \<and> ?f i = ?f j"
+ by (rule Suc)
+ ultimately show ?case ..
+ next
+ case (Suc k)
+ from search [OF nat_eq_dec] show ?case
+ proof
+ assume "\<exists>j<Suc k. f (Suc k) = f j"
+ thus ?case by (iprover intro: le_refl)
+ next
+ assume nex: "\<not> (\<exists>j<Suc k. f (Suc k) = f j)"
+ have "\<exists>i j. i \<le> k \<and> j < i \<and> f i = f j"
+ proof (rule Suc)
+ from Suc show "k \<le> Suc (Suc n)" by simp
+ fix i j assume k: "Suc k \<le> i" and i: "i \<le> Suc (Suc n)"
+ and j: "j < i"
+ show "f i \<noteq> f j"
+ proof cases
+ assume eq: "i = Suc k"
+ show ?thesis
+ proof
+ assume "f i = f j"
+ hence "f (Suc k) = f j" by (simp add: eq)
+ with nex and j and eq show False by iprover
+ qed
+ next
+ assume "i \<noteq> Suc k"
+ with k have "Suc (Suc k) \<le> i" by simp
+ thus ?thesis using i and j by (rule Suc)
+ qed
+ qed
+ thus ?thesis by (iprover intro: le_SucI)
+ qed
+ qed
+ }
+ note r = this
+ show ?case by (rule r) simp_all
+qed
+
+text {*
+The following proof, although quite elegant from a mathematical point of view,
+leads to an exponential program:
+*}
+
+theorem pigeonhole_slow:
+ "\<And>f. (\<And>i. i \<le> Suc n \<Longrightarrow> f i \<le> n) \<Longrightarrow> \<exists>i j. i \<le> Suc n \<and> j < i \<and> f i = f j"
+proof (induct n)
+ case 0
+ have "Suc 0 \<le> Suc 0" ..
+ moreover have "0 < Suc 0" ..
+ moreover from 0 have "f (Suc 0) = f 0" by simp
+ ultimately show ?case by iprover
+next
+ case (Suc n)
+ from search [OF nat_eq_dec] show ?case
+ proof
+ assume "\<exists>j < Suc (Suc n). f (Suc (Suc n)) = f j"
+ thus ?case by (iprover intro: le_refl)
+ next
+ assume "\<not> (\<exists>j < Suc (Suc n). f (Suc (Suc n)) = f j)"
+ hence nex: "\<forall>j < Suc (Suc n). f (Suc (Suc n)) \<noteq> f j" by iprover
+ let ?f = "\<lambda>i. if f i = Suc n then f (Suc (Suc n)) else f i"
+ have "\<And>i. i \<le> Suc n \<Longrightarrow> ?f i \<le> n"
+ proof -
+ fix i assume i: "i \<le> Suc n"
+ show "?thesis i"
+ proof (cases "f i = Suc n")
+ case True
+ from i and nex have "f (Suc (Suc n)) \<noteq> f i" by simp
+ with True have "f (Suc (Suc n)) \<noteq> Suc n" by simp
+ moreover from Suc have "f (Suc (Suc n)) \<le> Suc n" by simp
+ ultimately have "f (Suc (Suc n)) \<le> n" by simp
+ with True show ?thesis by simp
+ next
+ case False
+ from Suc and i have "f i \<le> Suc n" by simp
+ with False show ?thesis by simp
+ qed
+ qed
+ hence "\<exists>i j. i \<le> Suc n \<and> j < i \<and> ?f i = ?f j" by (rule Suc)
+ then obtain i j where i: "i \<le> Suc n" and ji: "j < i" and f: "?f i = ?f j"
+ by iprover
+ have "f i = f j"
+ proof (cases "f i = Suc n")
+ case True
+ show ?thesis
+ proof (cases "f j = Suc n")
+ assume "f j = Suc n"
+ with True show ?thesis by simp
+ next
+ assume "f j \<noteq> Suc n"
+ moreover from i ji nex have "f (Suc (Suc n)) \<noteq> f j" by simp
+ ultimately show ?thesis using True f by simp
+ qed
+ next
+ case False
+ show ?thesis
+ proof (cases "f j = Suc n")
+ assume "f j = Suc n"
+ moreover from i nex have "f (Suc (Suc n)) \<noteq> f i" by simp
+ ultimately show ?thesis using False f by simp
+ next
+ assume "f j \<noteq> Suc n"
+ with False f show ?thesis by simp
+ qed
+ qed
+ moreover from i have "i \<le> Suc (Suc n)" by simp
+ ultimately show ?thesis using ji by iprover
+ qed
+qed
+
+extract pigeonhole pigeonhole_slow
+
+text {*
+The programs extracted from the above proofs look as follows:
+@{thm [display] pigeonhole_def}
+@{thm [display] pigeonhole_slow_def}
+The program for searching for an element in an array is
+@{thm [display,eta_contract=false] search_def}
+The correctness statement for @{term "pigeonhole"} is
+@{thm [display] pigeonhole_correctness [no_vars]}
+
+In order to analyze the speed of the above programs,
+we generate ML code from them.
+*}
+
+instantiation nat :: default
+begin
+
+definition "default = (0::nat)"
+
+instance ..
+
+end
+
+instantiation prod :: (default, default) default
+begin
+
+definition "default = (default, default)"
+
+instance ..
+
+end
+
+definition
+ "test n u = pigeonhole n (\<lambda>m. m - 1)"
+definition
+ "test' n u = pigeonhole_slow n (\<lambda>m. m - 1)"
+definition
+ "test'' u = pigeonhole 8 (op ! [0, 1, 2, 3, 4, 5, 6, 3, 7, 8])"
+
+ML "timeit (@{code test} 10)"
+ML "timeit (@{code test'} 10)"
+ML "timeit (@{code test} 20)"
+ML "timeit (@{code test'} 20)"
+ML "timeit (@{code test} 25)"
+ML "timeit (@{code test'} 25)"
+ML "timeit (@{code test} 500)"
+ML "timeit @{code test''}"
+
+consts_code
+ "default :: nat" ("{* 0::nat *}")
+ "default :: nat \<times> nat" ("{* (0::nat, 0::nat) *}")
+
+code_module PH
+contains
+ test = test
+ test' = test'
+ test'' = test''
+
+ML "timeit (PH.test 10)"
+ML "timeit (PH.test' 10)"
+ML "timeit (PH.test 20)"
+ML "timeit (PH.test' 20)"
+ML "timeit (PH.test 25)"
+ML "timeit (PH.test' 25)"
+ML "timeit (PH.test 500)"
+ML "timeit PH.test''"
+
+end
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Extraction/QuotRem.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,46 @@
+(* Title: HOL/Proofs/Extraction/QuotRem.thy
+ Author: Stefan Berghofer, TU Muenchen
+*)
+
+header {* Quotient and remainder *}
+
+theory QuotRem
+imports Util
+begin
+
+text {* Derivation of quotient and remainder using program extraction. *}
+
+theorem division: "\<exists>r q. a = Suc b * q + r \<and> r \<le> b"
+proof (induct a)
+ case 0
+ have "0 = Suc b * 0 + 0 \<and> 0 \<le> b" by simp
+ thus ?case by iprover
+next
+ case (Suc a)
+ then obtain r q where I: "a = Suc b * q + r" and "r \<le> b" by iprover
+ from nat_eq_dec show ?case
+ proof
+ assume "r = b"
+ with I have "Suc a = Suc b * (Suc q) + 0 \<and> 0 \<le> b" by simp
+ thus ?case by iprover
+ next
+ assume "r \<noteq> b"
+ with `r \<le> b` have "r < b" by (simp add: order_less_le)
+ with I have "Suc a = Suc b * q + (Suc r) \<and> (Suc r) \<le> b" by simp
+ thus ?case by iprover
+ qed
+qed
+
+extract division
+
+text {*
+ The program extracted from the above proof looks as follows
+ @{thm [display] division_def [no_vars]}
+ The corresponding correctness theorem is
+ @{thm [display] division_correctness [no_vars]}
+*}
+
+lemma "division 9 2 = (0, 3)" by evaluation
+lemma "division 9 2 = (0, 3)" by eval
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Extraction/ROOT.ML Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,6 @@
+(* Examples for program extraction in Higher-Order Logic *)
+
+no_document use_thys ["Efficient_Nat", "Monad_Syntax", "~~/src/HOL/Number_Theory/Primes"];
+share_common_data ();
+no_document use_thys ["~~/src/HOL/Number_Theory/UniqueFactorization"];
+use_thys ["Greatest_Common_Divisor", "Warshall", "Higman", "Pigeonhole", "Euclid"];
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Extraction/Util.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,97 @@
+(* Title: HOL/Proofs/Extraction/Util.thy
+ Author: Stefan Berghofer, TU Muenchen
+*)
+
+header {* Auxiliary lemmas used in program extraction examples *}
+
+theory Util
+imports Main
+begin
+
+text {*
+Decidability of equality on natural numbers.
+*}
+
+lemma nat_eq_dec: "\<And>n::nat. m = n \<or> m \<noteq> n"
+ apply (induct m)
+ apply (case_tac n)
+ apply (case_tac [3] n)
+ apply (simp only: nat.simps, iprover?)+
+ done
+
+text {*
+Well-founded induction on natural numbers, derived using the standard
+structural induction rule.
+*}
+
+lemma nat_wf_ind:
+ assumes R: "\<And>x::nat. (\<And>y. y < x \<Longrightarrow> P y) \<Longrightarrow> P x"
+ shows "P z"
+proof (rule R)
+ show "\<And>y. y < z \<Longrightarrow> P y"
+ proof (induct z)
+ case 0
+ thus ?case by simp
+ next
+ case (Suc n y)
+ from nat_eq_dec show ?case
+ proof
+ assume ny: "n = y"
+ have "P n"
+ by (rule R) (rule Suc)
+ with ny show ?case by simp
+ next
+ assume "n \<noteq> y"
+ with Suc have "y < n" by simp
+ thus ?case by (rule Suc)
+ qed
+ qed
+qed
+
+text {*
+Bounded search for a natural number satisfying a decidable predicate.
+*}
+
+lemma search:
+ assumes dec: "\<And>x::nat. P x \<or> \<not> P x"
+ shows "(\<exists>x<y. P x) \<or> \<not> (\<exists>x<y. P x)"
+proof (induct y)
+ case 0 show ?case by simp
+next
+ case (Suc z)
+ thus ?case
+ proof
+ assume "\<exists>x<z. P x"
+ then obtain x where le: "x < z" and P: "P x" by iprover
+ from le have "x < Suc z" by simp
+ with P show ?case by iprover
+ next
+ assume nex: "\<not> (\<exists>x<z. P x)"
+ from dec show ?case
+ proof
+ assume P: "P z"
+ have "z < Suc z" by simp
+ with P show ?thesis by iprover
+ next
+ assume nP: "\<not> P z"
+ have "\<not> (\<exists>x<Suc z. P x)"
+ proof
+ assume "\<exists>x<Suc z. P x"
+ then obtain x where le: "x < Suc z" and P: "P x" by iprover
+ have "x < z"
+ proof (cases "x = z")
+ case True
+ with nP and P show ?thesis by simp
+ next
+ case False
+ with le show ?thesis by simp
+ qed
+ with P have "\<exists>x<z. P x" by iprover
+ with nex show False ..
+ qed
+ thus ?case by iprover
+ qed
+ qed
+qed
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Extraction/Warshall.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,261 @@
+(* Title: HOL/Proofs/Extraction/Warshall.thy
+ Author: Stefan Berghofer, TU Muenchen
+*)
+
+header {* Warshall's algorithm *}
+
+theory Warshall
+imports Main
+begin
+
+text {*
+ Derivation of Warshall's algorithm using program extraction,
+ based on Berger, Schwichtenberg and Seisenberger \cite{Berger-JAR-2001}.
+*}
+
+datatype b = T | F
+
+primrec
+ is_path' :: "('a \<Rightarrow> 'a \<Rightarrow> b) \<Rightarrow> 'a \<Rightarrow> 'a list \<Rightarrow> 'a \<Rightarrow> bool"
+where
+ "is_path' r x [] z = (r x z = T)"
+ | "is_path' r x (y # ys) z = (r x y = T \<and> is_path' r y ys z)"
+
+definition
+ is_path :: "(nat \<Rightarrow> nat \<Rightarrow> b) \<Rightarrow> (nat * nat list * nat) \<Rightarrow>
+ nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool"
+where
+ "is_path r p i j k \<longleftrightarrow> fst p = j \<and> snd (snd p) = k \<and>
+ list_all (\<lambda>x. x < i) (fst (snd p)) \<and>
+ is_path' r (fst p) (fst (snd p)) (snd (snd p))"
+
+definition
+ conc :: "('a * 'a list * 'a) \<Rightarrow> ('a * 'a list * 'a) \<Rightarrow> ('a * 'a list * 'a)"
+where
+ "conc p q = (fst p, fst (snd p) @ fst q # fst (snd q), snd (snd q))"
+
+theorem is_path'_snoc [simp]:
+ "\<And>x. is_path' r x (ys @ [y]) z = (is_path' r x ys y \<and> r y z = T)"
+ by (induct ys) simp+
+
+theorem list_all_scoc [simp]: "list_all P (xs @ [x]) \<longleftrightarrow> P x \<and> list_all P xs"
+ by (induct xs, simp+, iprover)
+
+theorem list_all_lemma:
+ "list_all P xs \<Longrightarrow> (\<And>x. P x \<Longrightarrow> Q x) \<Longrightarrow> list_all Q xs"
+proof -
+ assume PQ: "\<And>x. P x \<Longrightarrow> Q x"
+ show "list_all P xs \<Longrightarrow> list_all Q xs"
+ proof (induct xs)
+ case Nil
+ show ?case by simp
+ next
+ case (Cons y ys)
+ hence Py: "P y" by simp
+ from Cons have Pys: "list_all P ys" by simp
+ show ?case
+ by simp (rule conjI PQ Py Cons Pys)+
+ qed
+qed
+
+theorem lemma1: "\<And>p. is_path r p i j k \<Longrightarrow> is_path r p (Suc i) j k"
+ apply (unfold is_path_def)
+ apply (simp cong add: conj_cong add: split_paired_all)
+ apply (erule conjE)+
+ apply (erule list_all_lemma)
+ apply simp
+ done
+
+theorem lemma2: "\<And>p. is_path r p 0 j k \<Longrightarrow> r j k = T"
+ apply (unfold is_path_def)
+ apply (simp cong add: conj_cong add: split_paired_all)
+ apply (case_tac "aa")
+ apply simp+
+ done
+
+theorem is_path'_conc: "is_path' r j xs i \<Longrightarrow> is_path' r i ys k \<Longrightarrow>
+ is_path' r j (xs @ i # ys) k"
+proof -
+ assume pys: "is_path' r i ys k"
+ show "\<And>j. is_path' r j xs i \<Longrightarrow> is_path' r j (xs @ i # ys) k"
+ proof (induct xs)
+ case (Nil j)
+ hence "r j i = T" by simp
+ with pys show ?case by simp
+ next
+ case (Cons z zs j)
+ hence jzr: "r j z = T" by simp
+ from Cons have pzs: "is_path' r z zs i" by simp
+ show ?case
+ by simp (rule conjI jzr Cons pzs)+
+ qed
+qed
+
+theorem lemma3:
+ "\<And>p q. is_path r p i j i \<Longrightarrow> is_path r q i i k \<Longrightarrow>
+ is_path r (conc p q) (Suc i) j k"
+ apply (unfold is_path_def conc_def)
+ apply (simp cong add: conj_cong add: split_paired_all)
+ apply (erule conjE)+
+ apply (rule conjI)
+ apply (erule list_all_lemma)
+ apply simp
+ apply (rule conjI)
+ apply (erule list_all_lemma)
+ apply simp
+ apply (rule is_path'_conc)
+ apply assumption+
+ done
+
+theorem lemma5:
+ "\<And>p. is_path r p (Suc i) j k \<Longrightarrow> ~ is_path r p i j k \<Longrightarrow>
+ (\<exists>q. is_path r q i j i) \<and> (\<exists>q'. is_path r q' i i k)"
+proof (simp cong add: conj_cong add: split_paired_all is_path_def, (erule conjE)+)
+ fix xs
+ assume asms:
+ "list_all (\<lambda>x. x < Suc i) xs"
+ "is_path' r j xs k"
+ "\<not> list_all (\<lambda>x. x < i) xs"
+ show "(\<exists>ys. list_all (\<lambda>x. x < i) ys \<and> is_path' r j ys i) \<and>
+ (\<exists>ys. list_all (\<lambda>x. x < i) ys \<and> is_path' r i ys k)"
+ proof
+ show "\<And>j. list_all (\<lambda>x. x < Suc i) xs \<Longrightarrow> is_path' r j xs k \<Longrightarrow>
+ \<not> list_all (\<lambda>x. x < i) xs \<Longrightarrow>
+ \<exists>ys. list_all (\<lambda>x. x < i) ys \<and> is_path' r j ys i" (is "PROP ?ih xs")
+ proof (induct xs)
+ case Nil
+ thus ?case by simp
+ next
+ case (Cons a as j)
+ show ?case
+ proof (cases "a=i")
+ case True
+ show ?thesis
+ proof
+ from True and Cons have "r j i = T" by simp
+ thus "list_all (\<lambda>x. x < i) [] \<and> is_path' r j [] i" by simp
+ qed
+ next
+ case False
+ have "PROP ?ih as" by (rule Cons)
+ then obtain ys where ys: "list_all (\<lambda>x. x < i) ys \<and> is_path' r a ys i"
+ proof
+ from Cons show "list_all (\<lambda>x. x < Suc i) as" by simp
+ from Cons show "is_path' r a as k" by simp
+ from Cons and False show "\<not> list_all (\<lambda>x. x < i) as" by (simp)
+ qed
+ show ?thesis
+ proof
+ from Cons False ys
+ show "list_all (\<lambda>x. x<i) (a#ys) \<and> is_path' r j (a#ys) i" by simp
+ qed
+ qed
+ qed
+ show "\<And>k. list_all (\<lambda>x. x < Suc i) xs \<Longrightarrow> is_path' r j xs k \<Longrightarrow>
+ \<not> list_all (\<lambda>x. x < i) xs \<Longrightarrow>
+ \<exists>ys. list_all (\<lambda>x. x < i) ys \<and> is_path' r i ys k" (is "PROP ?ih xs")
+ proof (induct xs rule: rev_induct)
+ case Nil
+ thus ?case by simp
+ next
+ case (snoc a as k)
+ show ?case
+ proof (cases "a=i")
+ case True
+ show ?thesis
+ proof
+ from True and snoc have "r i k = T" by simp
+ thus "list_all (\<lambda>x. x < i) [] \<and> is_path' r i [] k" by simp
+ qed
+ next
+ case False
+ have "PROP ?ih as" by (rule snoc)
+ then obtain ys where ys: "list_all (\<lambda>x. x < i) ys \<and> is_path' r i ys a"
+ proof
+ from snoc show "list_all (\<lambda>x. x < Suc i) as" by simp
+ from snoc show "is_path' r j as a" by simp
+ from snoc and False show "\<not> list_all (\<lambda>x. x < i) as" by simp
+ qed
+ show ?thesis
+ proof
+ from snoc False ys
+ show "list_all (\<lambda>x. x < i) (ys @ [a]) \<and> is_path' r i (ys @ [a]) k"
+ by simp
+ qed
+ qed
+ qed
+ qed (rule asms)+
+qed
+
+theorem lemma5':
+ "\<And>p. is_path r p (Suc i) j k \<Longrightarrow> \<not> is_path r p i j k \<Longrightarrow>
+ \<not> (\<forall>q. \<not> is_path r q i j i) \<and> \<not> (\<forall>q'. \<not> is_path r q' i i k)"
+ by (iprover dest: lemma5)
+
+theorem warshall:
+ "\<And>j k. \<not> (\<exists>p. is_path r p i j k) \<or> (\<exists>p. is_path r p i j k)"
+proof (induct i)
+ case (0 j k)
+ show ?case
+ proof (cases "r j k")
+ assume "r j k = T"
+ hence "is_path r (j, [], k) 0 j k"
+ by (simp add: is_path_def)
+ hence "\<exists>p. is_path r p 0 j k" ..
+ thus ?thesis ..
+ next
+ assume "r j k = F"
+ hence "r j k ~= T" by simp
+ hence "\<not> (\<exists>p. is_path r p 0 j k)"
+ by (iprover dest: lemma2)
+ thus ?thesis ..
+ qed
+next
+ case (Suc i j k)
+ thus ?case
+ proof
+ assume h1: "\<not> (\<exists>p. is_path r p i j k)"
+ from Suc show ?case
+ proof
+ assume "\<not> (\<exists>p. is_path r p i j i)"
+ with h1 have "\<not> (\<exists>p. is_path r p (Suc i) j k)"
+ by (iprover dest: lemma5')
+ thus ?case ..
+ next
+ assume "\<exists>p. is_path r p i j i"
+ then obtain p where h2: "is_path r p i j i" ..
+ from Suc show ?case
+ proof
+ assume "\<not> (\<exists>p. is_path r p i i k)"
+ with h1 have "\<not> (\<exists>p. is_path r p (Suc i) j k)"
+ by (iprover dest: lemma5')
+ thus ?case ..
+ next
+ assume "\<exists>q. is_path r q i i k"
+ then obtain q where "is_path r q i i k" ..
+ with h2 have "is_path r (conc p q) (Suc i) j k"
+ by (rule lemma3)
+ hence "\<exists>pq. is_path r pq (Suc i) j k" ..
+ thus ?case ..
+ qed
+ qed
+ next
+ assume "\<exists>p. is_path r p i j k"
+ hence "\<exists>p. is_path r p (Suc i) j k"
+ by (iprover intro: lemma1)
+ thus ?case ..
+ qed
+qed
+
+extract warshall
+
+text {*
+ The program extracted from the above proof looks as follows
+ @{thm [display, eta_contract=false] warshall_def [no_vars]}
+ The corresponding correctness theorem is
+ @{thm [display] warshall_correctness [no_vars]}
+*}
+
+ML "@{code warshall}"
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Extraction/document/root.bib Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,39 @@
+@Article{Berger-JAR-2001,
+ author = {Ulrich Berger and Helmut Schwichtenberg and Monika Seisenberger},
+ title = {The {Warshall} Algorithm and {Dickson's} Lemma: Two
+ Examples of Realistic Program Extraction},
+ journal = {Journal of Automated Reasoning},
+ publisher = {Kluwer Academic Publishers},
+ year = 2001,
+ volume = 26,
+ pages = {205--221}
+}
+
+@TechReport{Coquand93,
+ author = {Thierry Coquand and Daniel Fridlender},
+ title = {A proof of {Higman's} lemma by structural induction},
+ institution = {Chalmers University},
+ year = 1993,
+ month = {November}
+}
+
+@InProceedings{Nogin-ENTCS-2000,
+ author = {Aleksey Nogin},
+ title = {Writing constructive proofs yielding efficient extracted programs},
+ booktitle = {Proceedings of the Workshop on Type-Theoretic Languages: Proof Search and Semantics},
+ year = 2000,
+ editor = {Didier Galmiche},
+ volume = 37,
+ series = {Electronic Notes in Theoretical Computer Science},
+ publisher = {Elsevier Science Publishers}
+}
+
+@Article{Wenzel-Wiedijk-JAR2002,
+ author = {Markus Wenzel and Freek Wiedijk},
+ title = {A comparison of the mathematical proof languages {M}izar and {I}sar},
+ journal = {Journal of Automated Reasoning},
+ year = 2002,
+ volume = 29,
+ number = {3-4},
+ pages = {389-411}
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Extraction/document/root.tex Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,27 @@
+\documentclass[11pt,a4paper]{article}
+\usepackage{isabelle,isabellesym}
+\usepackage{pdfsetup}
+
+\urlstyle{rm}
+\isabellestyle{it}
+
+\begin{document}
+
+\title{Examples for program extraction in Higher-Order Logic}
+\author{Stefan Berghofer}
+\maketitle
+
+\nocite{Berger-JAR-2001,Coquand93}
+
+\tableofcontents
+\newcommand{\isasymbind}{\isamath{\mathbin{>\!\!\!>\mkern-6.7mu=}}}
+\newcommand{\isasymthen}{\isamath{\mathbin{>\!\!\!>}}}
+
+\parindent 0pt\parskip 0.5ex
+
+\input{session}
+
+\bibliographystyle{abbrv}
+\bibliography{root}
+
+\end{document}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/Commutation.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,268 @@
+(* Title: HOL/Proofs/Lambda/Commutation.thy
+ Author: Tobias Nipkow
+ Copyright 1995 TU Muenchen
+*)
+
+header {* Abstract commutation and confluence notions *}
+
+theory Commutation imports Main begin
+
+declare [[syntax_ambiguity_level = 100]]
+
+
+subsection {* Basic definitions *}
+
+definition
+ square :: "['a => 'a => bool, 'a => 'a => bool, 'a => 'a => bool, 'a => 'a => bool] => bool" where
+ "square R S T U =
+ (\<forall>x y. R x y --> (\<forall>z. S x z --> (\<exists>u. T y u \<and> U z u)))"
+
+definition
+ commute :: "['a => 'a => bool, 'a => 'a => bool] => bool" where
+ "commute R S = square R S S R"
+
+definition
+ diamond :: "('a => 'a => bool) => bool" where
+ "diamond R = commute R R"
+
+definition
+ Church_Rosser :: "('a => 'a => bool) => bool" where
+ "Church_Rosser R =
+ (\<forall>x y. (sup R (R^--1))^** x y --> (\<exists>z. R^** x z \<and> R^** y z))"
+
+abbreviation
+ confluent :: "('a => 'a => bool) => bool" where
+ "confluent R == diamond (R^**)"
+
+
+subsection {* Basic lemmas *}
+
+subsubsection {* @{text "square"} *}
+
+lemma square_sym: "square R S T U ==> square S R U T"
+ apply (unfold square_def)
+ apply blast
+ done
+
+lemma square_subset:
+ "[| square R S T U; T \<le> T' |] ==> square R S T' U"
+ apply (unfold square_def)
+ apply (blast dest: predicate2D)
+ done
+
+lemma square_reflcl:
+ "[| square R S T (R^==); S \<le> T |] ==> square (R^==) S T (R^==)"
+ apply (unfold square_def)
+ apply (blast dest: predicate2D)
+ done
+
+lemma square_rtrancl:
+ "square R S S T ==> square (R^**) S S (T^**)"
+ apply (unfold square_def)
+ apply (intro strip)
+ apply (erule rtranclp_induct)
+ apply blast
+ apply (blast intro: rtranclp.rtrancl_into_rtrancl)
+ done
+
+lemma square_rtrancl_reflcl_commute:
+ "square R S (S^**) (R^==) ==> commute (R^**) (S^**)"
+ apply (unfold commute_def)
+ apply (fastsimp dest: square_reflcl square_sym [THEN square_rtrancl])
+ done
+
+
+subsubsection {* @{text "commute"} *}
+
+lemma commute_sym: "commute R S ==> commute S R"
+ apply (unfold commute_def)
+ apply (blast intro: square_sym)
+ done
+
+lemma commute_rtrancl: "commute R S ==> commute (R^**) (S^**)"
+ apply (unfold commute_def)
+ apply (blast intro: square_rtrancl square_sym)
+ done
+
+lemma commute_Un:
+ "[| commute R T; commute S T |] ==> commute (sup R S) T"
+ apply (unfold commute_def square_def)
+ apply blast
+ done
+
+
+subsubsection {* @{text "diamond"}, @{text "confluence"}, and @{text "union"} *}
+
+lemma diamond_Un:
+ "[| diamond R; diamond S; commute R S |] ==> diamond (sup R S)"
+ apply (unfold diamond_def)
+ apply (blast intro: commute_Un commute_sym)
+ done
+
+lemma diamond_confluent: "diamond R ==> confluent R"
+ apply (unfold diamond_def)
+ apply (erule commute_rtrancl)
+ done
+
+lemma square_reflcl_confluent:
+ "square R R (R^==) (R^==) ==> confluent R"
+ apply (unfold diamond_def)
+ apply (fast intro: square_rtrancl_reflcl_commute elim: square_subset)
+ done
+
+lemma confluent_Un:
+ "[| confluent R; confluent S; commute (R^**) (S^**) |] ==> confluent (sup R S)"
+ apply (rule rtranclp_sup_rtranclp [THEN subst])
+ apply (blast dest: diamond_Un intro: diamond_confluent)
+ done
+
+lemma diamond_to_confluence:
+ "[| diamond R; T \<le> R; R \<le> T^** |] ==> confluent T"
+ apply (force intro: diamond_confluent
+ dest: rtranclp_subset [symmetric])
+ done
+
+
+subsection {* Church-Rosser *}
+
+lemma Church_Rosser_confluent: "Church_Rosser R = confluent R"
+ apply (unfold square_def commute_def diamond_def Church_Rosser_def)
+ apply (tactic {* safe_tac HOL_cs *})
+ apply (tactic {*
+ blast_tac (HOL_cs addIs
+ [@{thm sup_ge2} RS @{thm rtranclp_mono} RS @{thm predicate2D} RS @{thm rtranclp_trans},
+ @{thm rtranclp_converseI}, @{thm conversepI},
+ @{thm sup_ge1} RS @{thm rtranclp_mono} RS @{thm predicate2D}]) 1 *})
+ apply (erule rtranclp_induct)
+ apply blast
+ apply (blast del: rtranclp.rtrancl_refl intro: rtranclp_trans)
+ done
+
+
+subsection {* Newman's lemma *}
+
+text {* Proof by Stefan Berghofer *}
+
+theorem newman:
+ assumes wf: "wfP (R\<inverse>\<inverse>)"
+ and lc: "\<And>a b c. R a b \<Longrightarrow> R a c \<Longrightarrow>
+ \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d"
+ shows "\<And>b c. R\<^sup>*\<^sup>* a b \<Longrightarrow> R\<^sup>*\<^sup>* a c \<Longrightarrow>
+ \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d"
+ using wf
+proof induct
+ case (less x b c)
+ have xc: "R\<^sup>*\<^sup>* x c" by fact
+ have xb: "R\<^sup>*\<^sup>* x b" by fact thus ?case
+ proof (rule converse_rtranclpE)
+ assume "x = b"
+ with xc have "R\<^sup>*\<^sup>* b c" by simp
+ thus ?thesis by iprover
+ next
+ fix y
+ assume xy: "R x y"
+ assume yb: "R\<^sup>*\<^sup>* y b"
+ from xc show ?thesis
+ proof (rule converse_rtranclpE)
+ assume "x = c"
+ with xb have "R\<^sup>*\<^sup>* c b" by simp
+ thus ?thesis by iprover
+ next
+ fix y'
+ assume y'c: "R\<^sup>*\<^sup>* y' c"
+ assume xy': "R x y'"
+ with xy have "\<exists>u. R\<^sup>*\<^sup>* y u \<and> R\<^sup>*\<^sup>* y' u" by (rule lc)
+ then obtain u where yu: "R\<^sup>*\<^sup>* y u" and y'u: "R\<^sup>*\<^sup>* y' u" by iprover
+ from xy have "R\<inverse>\<inverse> y x" ..
+ from this and yb yu have "\<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* u d" by (rule less)
+ then obtain v where bv: "R\<^sup>*\<^sup>* b v" and uv: "R\<^sup>*\<^sup>* u v" by iprover
+ from xy' have "R\<inverse>\<inverse> y' x" ..
+ moreover from y'u and uv have "R\<^sup>*\<^sup>* y' v" by (rule rtranclp_trans)
+ moreover note y'c
+ ultimately have "\<exists>d. R\<^sup>*\<^sup>* v d \<and> R\<^sup>*\<^sup>* c d" by (rule less)
+ then obtain w where vw: "R\<^sup>*\<^sup>* v w" and cw: "R\<^sup>*\<^sup>* c w" by iprover
+ from bv vw have "R\<^sup>*\<^sup>* b w" by (rule rtranclp_trans)
+ with cw show ?thesis by iprover
+ qed
+ qed
+qed
+
+text {*
+ Alternative version. Partly automated by Tobias
+ Nipkow. Takes 2 minutes (2002).
+
+ This is the maximal amount of automation possible using @{text blast}.
+*}
+
+theorem newman':
+ assumes wf: "wfP (R\<inverse>\<inverse>)"
+ and lc: "\<And>a b c. R a b \<Longrightarrow> R a c \<Longrightarrow>
+ \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d"
+ shows "\<And>b c. R\<^sup>*\<^sup>* a b \<Longrightarrow> R\<^sup>*\<^sup>* a c \<Longrightarrow>
+ \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d"
+ using wf
+proof induct
+ case (less x b c)
+ note IH = `\<And>y b c. \<lbrakk>R\<inverse>\<inverse> y x; R\<^sup>*\<^sup>* y b; R\<^sup>*\<^sup>* y c\<rbrakk>
+ \<Longrightarrow> \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d`
+ have xc: "R\<^sup>*\<^sup>* x c" by fact
+ have xb: "R\<^sup>*\<^sup>* x b" by fact
+ thus ?case
+ proof (rule converse_rtranclpE)
+ assume "x = b"
+ with xc have "R\<^sup>*\<^sup>* b c" by simp
+ thus ?thesis by iprover
+ next
+ fix y
+ assume xy: "R x y"
+ assume yb: "R\<^sup>*\<^sup>* y b"
+ from xc show ?thesis
+ proof (rule converse_rtranclpE)
+ assume "x = c"
+ with xb have "R\<^sup>*\<^sup>* c b" by simp
+ thus ?thesis by iprover
+ next
+ fix y'
+ assume y'c: "R\<^sup>*\<^sup>* y' c"
+ assume xy': "R x y'"
+ with xy obtain u where u: "R\<^sup>*\<^sup>* y u" "R\<^sup>*\<^sup>* y' u"
+ by (blast dest: lc)
+ from yb u y'c show ?thesis
+ by (blast del: rtranclp.rtrancl_refl
+ intro: rtranclp_trans
+ dest: IH [OF conversepI, OF xy] IH [OF conversepI, OF xy'])
+ qed
+ qed
+qed
+
+text {*
+ Using the coherent logic prover, the proof of the induction step
+ is completely automatic.
+*}
+
+lemma eq_imp_rtranclp: "x = y \<Longrightarrow> r\<^sup>*\<^sup>* x y"
+ by simp
+
+theorem newman'':
+ assumes wf: "wfP (R\<inverse>\<inverse>)"
+ and lc: "\<And>a b c. R a b \<Longrightarrow> R a c \<Longrightarrow>
+ \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d"
+ shows "\<And>b c. R\<^sup>*\<^sup>* a b \<Longrightarrow> R\<^sup>*\<^sup>* a c \<Longrightarrow>
+ \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d"
+ using wf
+proof induct
+ case (less x b c)
+ note IH = `\<And>y b c. \<lbrakk>R\<inverse>\<inverse> y x; R\<^sup>*\<^sup>* y b; R\<^sup>*\<^sup>* y c\<rbrakk>
+ \<Longrightarrow> \<exists>d. R\<^sup>*\<^sup>* b d \<and> R\<^sup>*\<^sup>* c d`
+ show ?case
+ by (coherent
+ `R\<^sup>*\<^sup>* x c` `R\<^sup>*\<^sup>* x b`
+ refl [where 'a='a] sym
+ eq_imp_rtranclp
+ r_into_rtranclp [of R]
+ rtranclp_trans
+ lc IH [OF conversepI]
+ converse_rtranclpE)
+qed
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/Eta.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,394 @@
+(* Title: HOL/Proofs/Lambda/Eta.thy
+ Author: Tobias Nipkow and Stefan Berghofer
+ Copyright 1995, 2005 TU Muenchen
+*)
+
+header {* Eta-reduction *}
+
+theory Eta imports ParRed begin
+
+
+subsection {* Definition of eta-reduction and relatives *}
+
+primrec
+ free :: "dB => nat => bool"
+where
+ "free (Var j) i = (j = i)"
+ | "free (s \<degree> t) i = (free s i \<or> free t i)"
+ | "free (Abs s) i = free s (i + 1)"
+
+inductive
+ eta :: "[dB, dB] => bool" (infixl "\<rightarrow>\<^sub>\<eta>" 50)
+where
+ eta [simp, intro]: "\<not> free s 0 ==> Abs (s \<degree> Var 0) \<rightarrow>\<^sub>\<eta> s[dummy/0]"
+ | appL [simp, intro]: "s \<rightarrow>\<^sub>\<eta> t ==> s \<degree> u \<rightarrow>\<^sub>\<eta> t \<degree> u"
+ | appR [simp, intro]: "s \<rightarrow>\<^sub>\<eta> t ==> u \<degree> s \<rightarrow>\<^sub>\<eta> u \<degree> t"
+ | abs [simp, intro]: "s \<rightarrow>\<^sub>\<eta> t ==> Abs s \<rightarrow>\<^sub>\<eta> Abs t"
+
+abbreviation
+ eta_reds :: "[dB, dB] => bool" (infixl "-e>>" 50) where
+ "s -e>> t == eta^** s t"
+
+abbreviation
+ eta_red0 :: "[dB, dB] => bool" (infixl "-e>=" 50) where
+ "s -e>= t == eta^== s t"
+
+notation (xsymbols)
+ eta_reds (infixl "\<rightarrow>\<^sub>\<eta>\<^sup>*" 50) and
+ eta_red0 (infixl "\<rightarrow>\<^sub>\<eta>\<^sup>=" 50)
+
+inductive_cases eta_cases [elim!]:
+ "Abs s \<rightarrow>\<^sub>\<eta> z"
+ "s \<degree> t \<rightarrow>\<^sub>\<eta> u"
+ "Var i \<rightarrow>\<^sub>\<eta> t"
+
+
+subsection {* Properties of @{text "eta"}, @{text "subst"} and @{text "free"} *}
+
+lemma subst_not_free [simp]: "\<not> free s i \<Longrightarrow> s[t/i] = s[u/i]"
+ by (induct s arbitrary: i t u) (simp_all add: subst_Var)
+
+lemma free_lift [simp]:
+ "free (lift t k) i = (i < k \<and> free t i \<or> k < i \<and> free t (i - 1))"
+ apply (induct t arbitrary: i k)
+ apply (auto cong: conj_cong)
+ done
+
+lemma free_subst [simp]:
+ "free (s[t/k]) i =
+ (free s k \<and> free t i \<or> free s (if i < k then i else i + 1))"
+ apply (induct s arbitrary: i k t)
+ prefer 2
+ apply simp
+ apply blast
+ prefer 2
+ apply simp
+ apply (simp add: diff_Suc subst_Var split: nat.split)
+ done
+
+lemma free_eta: "s \<rightarrow>\<^sub>\<eta> t ==> free t i = free s i"
+ by (induct arbitrary: i set: eta) (simp_all cong: conj_cong)
+
+lemma not_free_eta:
+ "[| s \<rightarrow>\<^sub>\<eta> t; \<not> free s i |] ==> \<not> free t i"
+ by (simp add: free_eta)
+
+lemma eta_subst [simp]:
+ "s \<rightarrow>\<^sub>\<eta> t ==> s[u/i] \<rightarrow>\<^sub>\<eta> t[u/i]"
+ by (induct arbitrary: u i set: eta) (simp_all add: subst_subst [symmetric])
+
+theorem lift_subst_dummy: "\<not> free s i \<Longrightarrow> lift (s[dummy/i]) i = s"
+ by (induct s arbitrary: i dummy) simp_all
+
+
+subsection {* Confluence of @{text "eta"} *}
+
+lemma square_eta: "square eta eta (eta^==) (eta^==)"
+ apply (unfold square_def id_def)
+ apply (rule impI [THEN allI [THEN allI]])
+ apply (erule eta.induct)
+ apply (slowsimp intro: subst_not_free eta_subst free_eta [THEN iffD1])
+ apply safe
+ prefer 5
+ apply (blast intro!: eta_subst intro: free_eta [THEN iffD1])
+ apply blast+
+ done
+
+theorem eta_confluent: "confluent eta"
+ apply (rule square_eta [THEN square_reflcl_confluent])
+ done
+
+
+subsection {* Congruence rules for @{text "eta\<^sup>*"} *}
+
+lemma rtrancl_eta_Abs: "s \<rightarrow>\<^sub>\<eta>\<^sup>* s' ==> Abs s \<rightarrow>\<^sub>\<eta>\<^sup>* Abs s'"
+ by (induct set: rtranclp)
+ (blast intro: rtranclp.rtrancl_into_rtrancl)+
+
+lemma rtrancl_eta_AppL: "s \<rightarrow>\<^sub>\<eta>\<^sup>* s' ==> s \<degree> t \<rightarrow>\<^sub>\<eta>\<^sup>* s' \<degree> t"
+ by (induct set: rtranclp)
+ (blast intro: rtranclp.rtrancl_into_rtrancl)+
+
+lemma rtrancl_eta_AppR: "t \<rightarrow>\<^sub>\<eta>\<^sup>* t' ==> s \<degree> t \<rightarrow>\<^sub>\<eta>\<^sup>* s \<degree> t'"
+ by (induct set: rtranclp) (blast intro: rtranclp.rtrancl_into_rtrancl)+
+
+lemma rtrancl_eta_App:
+ "[| s \<rightarrow>\<^sub>\<eta>\<^sup>* s'; t \<rightarrow>\<^sub>\<eta>\<^sup>* t' |] ==> s \<degree> t \<rightarrow>\<^sub>\<eta>\<^sup>* s' \<degree> t'"
+ by (blast intro!: rtrancl_eta_AppL rtrancl_eta_AppR intro: rtranclp_trans)
+
+
+subsection {* Commutation of @{text "beta"} and @{text "eta"} *}
+
+lemma free_beta:
+ "s \<rightarrow>\<^sub>\<beta> t ==> free t i \<Longrightarrow> free s i"
+ by (induct arbitrary: i set: beta) auto
+
+lemma beta_subst [intro]: "s \<rightarrow>\<^sub>\<beta> t ==> s[u/i] \<rightarrow>\<^sub>\<beta> t[u/i]"
+ by (induct arbitrary: u i set: beta) (simp_all add: subst_subst [symmetric])
+
+lemma subst_Var_Suc [simp]: "t[Var i/i] = t[Var(i)/i + 1]"
+ by (induct t arbitrary: i) (auto elim!: linorder_neqE simp: subst_Var)
+
+lemma eta_lift [simp]: "s \<rightarrow>\<^sub>\<eta> t ==> lift s i \<rightarrow>\<^sub>\<eta> lift t i"
+ by (induct arbitrary: i set: eta) simp_all
+
+lemma rtrancl_eta_subst: "s \<rightarrow>\<^sub>\<eta> t \<Longrightarrow> u[s/i] \<rightarrow>\<^sub>\<eta>\<^sup>* u[t/i]"
+ apply (induct u arbitrary: s t i)
+ apply (simp_all add: subst_Var)
+ apply blast
+ apply (blast intro: rtrancl_eta_App)
+ apply (blast intro!: rtrancl_eta_Abs eta_lift)
+ done
+
+lemma rtrancl_eta_subst':
+ fixes s t :: dB
+ assumes eta: "s \<rightarrow>\<^sub>\<eta>\<^sup>* t"
+ shows "s[u/i] \<rightarrow>\<^sub>\<eta>\<^sup>* t[u/i]" using eta
+ by induct (iprover intro: eta_subst)+
+
+lemma rtrancl_eta_subst'':
+ fixes s t :: dB
+ assumes eta: "s \<rightarrow>\<^sub>\<eta>\<^sup>* t"
+ shows "u[s/i] \<rightarrow>\<^sub>\<eta>\<^sup>* u[t/i]" using eta
+ by induct (iprover intro: rtrancl_eta_subst rtranclp_trans)+
+
+lemma square_beta_eta: "square beta eta (eta^**) (beta^==)"
+ apply (unfold square_def)
+ apply (rule impI [THEN allI [THEN allI]])
+ apply (erule beta.induct)
+ apply (slowsimp intro: rtrancl_eta_subst eta_subst)
+ apply (blast intro: rtrancl_eta_AppL)
+ apply (blast intro: rtrancl_eta_AppR)
+ apply simp;
+ apply (slowsimp intro: rtrancl_eta_Abs free_beta
+ iff del: dB.distinct simp: dB.distinct) (*23 seconds?*)
+ done
+
+lemma confluent_beta_eta: "confluent (sup beta eta)"
+ apply (assumption |
+ rule square_rtrancl_reflcl_commute confluent_Un
+ beta_confluent eta_confluent square_beta_eta)+
+ done
+
+
+subsection {* Implicit definition of @{text "eta"} *}
+
+text {* @{term "Abs (lift s 0 \<degree> Var 0) \<rightarrow>\<^sub>\<eta> s"} *}
+
+lemma not_free_iff_lifted:
+ "(\<not> free s i) = (\<exists>t. s = lift t i)"
+ apply (induct s arbitrary: i)
+ apply simp
+ apply (rule iffI)
+ apply (erule linorder_neqE)
+ apply (rule_tac x = "Var nat" in exI)
+ apply simp
+ apply (rule_tac x = "Var (nat - 1)" in exI)
+ apply simp
+ apply clarify
+ apply (rule notE)
+ prefer 2
+ apply assumption
+ apply (erule thin_rl)
+ apply (case_tac t)
+ apply simp
+ apply simp
+ apply simp
+ apply simp
+ apply (erule thin_rl)
+ apply (erule thin_rl)
+ apply (rule iffI)
+ apply (elim conjE exE)
+ apply (rename_tac u1 u2)
+ apply (rule_tac x = "u1 \<degree> u2" in exI)
+ apply simp
+ apply (erule exE)
+ apply (erule rev_mp)
+ apply (case_tac t)
+ apply simp
+ apply simp
+ apply blast
+ apply simp
+ apply simp
+ apply (erule thin_rl)
+ apply (rule iffI)
+ apply (erule exE)
+ apply (rule_tac x = "Abs t" in exI)
+ apply simp
+ apply (erule exE)
+ apply (erule rev_mp)
+ apply (case_tac t)
+ apply simp
+ apply simp
+ apply simp
+ apply blast
+ done
+
+theorem explicit_is_implicit:
+ "(\<forall>s u. (\<not> free s 0) --> R (Abs (s \<degree> Var 0)) (s[u/0])) =
+ (\<forall>s. R (Abs (lift s 0 \<degree> Var 0)) s)"
+ by (auto simp add: not_free_iff_lifted)
+
+
+subsection {* Eta-postponement theorem *}
+
+text {*
+ Based on a paper proof due to Andreas Abel.
+ Unlike the proof by Masako Takahashi \cite{Takahashi-IandC}, it does not
+ use parallel eta reduction, which only seems to complicate matters unnecessarily.
+*}
+
+theorem eta_case:
+ fixes s :: dB
+ assumes free: "\<not> free s 0"
+ and s: "s[dummy/0] => u"
+ shows "\<exists>t'. Abs (s \<degree> Var 0) => t' \<and> t' \<rightarrow>\<^sub>\<eta>\<^sup>* u"
+proof -
+ from s have "lift (s[dummy/0]) 0 => lift u 0" by (simp del: lift_subst)
+ with free have "s => lift u 0" by (simp add: lift_subst_dummy del: lift_subst)
+ hence "Abs (s \<degree> Var 0) => Abs (lift u 0 \<degree> Var 0)" by simp
+ moreover have "\<not> free (lift u 0) 0" by simp
+ hence "Abs (lift u 0 \<degree> Var 0) \<rightarrow>\<^sub>\<eta> lift u 0[dummy/0]"
+ by (rule eta.eta)
+ hence "Abs (lift u 0 \<degree> Var 0) \<rightarrow>\<^sub>\<eta>\<^sup>* u" by simp
+ ultimately show ?thesis by iprover
+qed
+
+theorem eta_par_beta:
+ assumes st: "s \<rightarrow>\<^sub>\<eta> t"
+ and tu: "t => u"
+ shows "\<exists>t'. s => t' \<and> t' \<rightarrow>\<^sub>\<eta>\<^sup>* u" using tu st
+proof (induct arbitrary: s)
+ case (var n)
+ thus ?case by (iprover intro: par_beta_refl)
+next
+ case (abs s' t)
+ note abs' = this
+ from `s \<rightarrow>\<^sub>\<eta> Abs s'` show ?case
+ proof cases
+ case (eta s'' dummy)
+ from abs have "Abs s' => Abs t" by simp
+ with eta have "s''[dummy/0] => Abs t" by simp
+ with `\<not> free s'' 0` have "\<exists>t'. Abs (s'' \<degree> Var 0) => t' \<and> t' \<rightarrow>\<^sub>\<eta>\<^sup>* Abs t"
+ by (rule eta_case)
+ with eta show ?thesis by simp
+ next
+ case (abs r)
+ from `r \<rightarrow>\<^sub>\<eta> s'`
+ obtain t' where r: "r => t'" and t': "t' \<rightarrow>\<^sub>\<eta>\<^sup>* t" by (iprover dest: abs')
+ from r have "Abs r => Abs t'" ..
+ moreover from t' have "Abs t' \<rightarrow>\<^sub>\<eta>\<^sup>* Abs t" by (rule rtrancl_eta_Abs)
+ ultimately show ?thesis using abs by simp iprover
+ qed
+next
+ case (app u u' t t')
+ from `s \<rightarrow>\<^sub>\<eta> u \<degree> t` show ?case
+ proof cases
+ case (eta s' dummy)
+ from app have "u \<degree> t => u' \<degree> t'" by simp
+ with eta have "s'[dummy/0] => u' \<degree> t'" by simp
+ with `\<not> free s' 0` have "\<exists>r. Abs (s' \<degree> Var 0) => r \<and> r \<rightarrow>\<^sub>\<eta>\<^sup>* u' \<degree> t'"
+ by (rule eta_case)
+ with eta show ?thesis by simp
+ next
+ case (appL s')
+ from `s' \<rightarrow>\<^sub>\<eta> u`
+ obtain r where s': "s' => r" and r: "r \<rightarrow>\<^sub>\<eta>\<^sup>* u'" by (iprover dest: app)
+ from s' and app have "s' \<degree> t => r \<degree> t'" by simp
+ moreover from r have "r \<degree> t' \<rightarrow>\<^sub>\<eta>\<^sup>* u' \<degree> t'" by (simp add: rtrancl_eta_AppL)
+ ultimately show ?thesis using appL by simp iprover
+ next
+ case (appR s')
+ from `s' \<rightarrow>\<^sub>\<eta> t`
+ obtain r where s': "s' => r" and r: "r \<rightarrow>\<^sub>\<eta>\<^sup>* t'" by (iprover dest: app)
+ from s' and app have "u \<degree> s' => u' \<degree> r" by simp
+ moreover from r have "u' \<degree> r \<rightarrow>\<^sub>\<eta>\<^sup>* u' \<degree> t'" by (simp add: rtrancl_eta_AppR)
+ ultimately show ?thesis using appR by simp iprover
+ qed
+next
+ case (beta u u' t t')
+ from `s \<rightarrow>\<^sub>\<eta> Abs u \<degree> t` show ?case
+ proof cases
+ case (eta s' dummy)
+ from beta have "Abs u \<degree> t => u'[t'/0]" by simp
+ with eta have "s'[dummy/0] => u'[t'/0]" by simp
+ with `\<not> free s' 0` have "\<exists>r. Abs (s' \<degree> Var 0) => r \<and> r \<rightarrow>\<^sub>\<eta>\<^sup>* u'[t'/0]"
+ by (rule eta_case)
+ with eta show ?thesis by simp
+ next
+ case (appL s')
+ from `s' \<rightarrow>\<^sub>\<eta> Abs u` show ?thesis
+ proof cases
+ case (eta s'' dummy)
+ have "Abs (lift u 1) = lift (Abs u) 0" by simp
+ also from eta have "\<dots> = s''" by (simp add: lift_subst_dummy del: lift_subst)
+ finally have s: "s = Abs (Abs (lift u 1) \<degree> Var 0) \<degree> t" using appL and eta by simp
+ from beta have "lift u 1 => lift u' 1" by simp
+ hence "Abs (lift u 1) \<degree> Var 0 => lift u' 1[Var 0/0]"
+ using par_beta.var ..
+ hence "Abs (Abs (lift u 1) \<degree> Var 0) \<degree> t => lift u' 1[Var 0/0][t'/0]"
+ using `t => t'` ..
+ with s have "s => u'[t'/0]" by simp
+ thus ?thesis by iprover
+ next
+ case (abs r)
+ from `r \<rightarrow>\<^sub>\<eta> u`
+ obtain r'' where r: "r => r''" and r'': "r'' \<rightarrow>\<^sub>\<eta>\<^sup>* u'" by (iprover dest: beta)
+ from r and beta have "Abs r \<degree> t => r''[t'/0]" by simp
+ moreover from r'' have "r''[t'/0] \<rightarrow>\<^sub>\<eta>\<^sup>* u'[t'/0]"
+ by (rule rtrancl_eta_subst')
+ ultimately show ?thesis using abs and appL by simp iprover
+ qed
+ next
+ case (appR s')
+ from `s' \<rightarrow>\<^sub>\<eta> t`
+ obtain r where s': "s' => r" and r: "r \<rightarrow>\<^sub>\<eta>\<^sup>* t'" by (iprover dest: beta)
+ from s' and beta have "Abs u \<degree> s' => u'[r/0]" by simp
+ moreover from r have "u'[r/0] \<rightarrow>\<^sub>\<eta>\<^sup>* u'[t'/0]"
+ by (rule rtrancl_eta_subst'')
+ ultimately show ?thesis using appR by simp iprover
+ qed
+qed
+
+theorem eta_postponement':
+ assumes eta: "s \<rightarrow>\<^sub>\<eta>\<^sup>* t" and beta: "t => u"
+ shows "\<exists>t'. s => t' \<and> t' \<rightarrow>\<^sub>\<eta>\<^sup>* u" using eta beta
+proof (induct arbitrary: u)
+ case base
+ thus ?case by blast
+next
+ case (step s' s'' s''')
+ then obtain t' where s': "s' => t'" and t': "t' \<rightarrow>\<^sub>\<eta>\<^sup>* s'''"
+ by (auto dest: eta_par_beta)
+ from s' obtain t'' where s: "s => t''" and t'': "t'' \<rightarrow>\<^sub>\<eta>\<^sup>* t'" using step
+ by blast
+ from t'' and t' have "t'' \<rightarrow>\<^sub>\<eta>\<^sup>* s'''" by (rule rtranclp_trans)
+ with s show ?case by iprover
+qed
+
+theorem eta_postponement:
+ assumes "(sup beta eta)\<^sup>*\<^sup>* s t"
+ shows "(beta\<^sup>*\<^sup>* OO eta\<^sup>*\<^sup>*) s t" using assms
+proof induct
+ case base
+ show ?case by blast
+next
+ case (step s' s'')
+ from step(3) obtain t' where s: "s \<rightarrow>\<^sub>\<beta>\<^sup>* t'" and t': "t' \<rightarrow>\<^sub>\<eta>\<^sup>* s'" by blast
+ from step(2) show ?case
+ proof
+ assume "s' \<rightarrow>\<^sub>\<beta> s''"
+ with beta_subset_par_beta have "s' => s''" ..
+ with t' obtain t'' where st: "t' => t''" and tu: "t'' \<rightarrow>\<^sub>\<eta>\<^sup>* s''"
+ by (auto dest: eta_postponement')
+ from par_beta_subset_beta st have "t' \<rightarrow>\<^sub>\<beta>\<^sup>* t''" ..
+ with s have "s \<rightarrow>\<^sub>\<beta>\<^sup>* t''" by (rule rtranclp_trans)
+ thus ?thesis using tu ..
+ next
+ assume "s' \<rightarrow>\<^sub>\<eta> s''"
+ with t' have "t' \<rightarrow>\<^sub>\<eta>\<^sup>* s''" ..
+ with s show ?thesis ..
+ qed
+qed
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/InductTermi.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,108 @@
+(* Title: HOL/Proofs/Lambda/InductTermi.thy
+ Author: Tobias Nipkow
+ Copyright 1998 TU Muenchen
+
+Inductive characterization of terminating lambda terms. Goes back to
+Raamsdonk & Severi. On normalization. CWI TR CS-R9545, 1995. Also
+rediscovered by Matthes and Joachimski.
+*)
+
+header {* Inductive characterization of terminating lambda terms *}
+
+theory InductTermi imports ListBeta begin
+
+subsection {* Terminating lambda terms *}
+
+inductive IT :: "dB => bool"
+ where
+ Var [intro]: "listsp IT rs ==> IT (Var n \<degree>\<degree> rs)"
+ | Lambda [intro]: "IT r ==> IT (Abs r)"
+ | Beta [intro]: "IT ((r[s/0]) \<degree>\<degree> ss) ==> IT s ==> IT ((Abs r \<degree> s) \<degree>\<degree> ss)"
+
+
+subsection {* Every term in @{text "IT"} terminates *}
+
+lemma double_induction_lemma [rule_format]:
+ "termip beta s ==> \<forall>t. termip beta t -->
+ (\<forall>r ss. t = r[s/0] \<degree>\<degree> ss --> termip beta (Abs r \<degree> s \<degree>\<degree> ss))"
+ apply (erule accp_induct)
+ apply (rule allI)
+ apply (rule impI)
+ apply (erule thin_rl)
+ apply (erule accp_induct)
+ apply clarify
+ apply (rule accp.accI)
+ apply (safe elim!: apps_betasE)
+ apply (blast intro: subst_preserves_beta apps_preserves_beta)
+ apply (blast intro: apps_preserves_beta2 subst_preserves_beta2 rtranclp_converseI
+ dest: accp_downwards) (* FIXME: acc_downwards can be replaced by acc(R ^* ) = acc(r) *)
+ apply (blast dest: apps_preserves_betas)
+ done
+
+lemma IT_implies_termi: "IT t ==> termip beta t"
+ apply (induct set: IT)
+ apply (drule rev_predicate1D [OF _ listsp_mono [where B="termip beta"]])
+ apply (fast intro!: predicate1I)
+ apply (drule lists_accD)
+ apply (erule accp_induct)
+ apply (rule accp.accI)
+ apply (blast dest: head_Var_reduction)
+ apply (erule accp_induct)
+ apply (rule accp.accI)
+ apply blast
+ apply (blast intro: double_induction_lemma)
+ done
+
+
+subsection {* Every terminating term is in @{text "IT"} *}
+
+declare Var_apps_neq_Abs_apps [symmetric, simp]
+
+lemma [simp, THEN not_sym, simp]: "Var n \<degree>\<degree> ss \<noteq> Abs r \<degree> s \<degree>\<degree> ts"
+ by (simp add: foldl_Cons [symmetric] del: foldl_Cons)
+
+lemma [simp]:
+ "(Abs r \<degree> s \<degree>\<degree> ss = Abs r' \<degree> s' \<degree>\<degree> ss') = (r = r' \<and> s = s' \<and> ss = ss')"
+ by (simp add: foldl_Cons [symmetric] del: foldl_Cons)
+
+inductive_cases [elim!]:
+ "IT (Var n \<degree>\<degree> ss)"
+ "IT (Abs t)"
+ "IT (Abs r \<degree> s \<degree>\<degree> ts)"
+
+theorem termi_implies_IT: "termip beta r ==> IT r"
+ apply (erule accp_induct)
+ apply (rename_tac r)
+ apply (erule thin_rl)
+ apply (erule rev_mp)
+ apply simp
+ apply (rule_tac t = r in Apps_dB_induct)
+ apply clarify
+ apply (rule IT.intros)
+ apply clarify
+ apply (drule bspec, assumption)
+ apply (erule mp)
+ apply clarify
+ apply (drule_tac r=beta in conversepI)
+ apply (drule_tac r="beta^--1" in ex_step1I, assumption)
+ apply clarify
+ apply (rename_tac us)
+ apply (erule_tac x = "Var n \<degree>\<degree> us" in allE)
+ apply force
+ apply (rename_tac u ts)
+ apply (case_tac ts)
+ apply simp
+ apply blast
+ apply (rename_tac s ss)
+ apply simp
+ apply clarify
+ apply (rule IT.intros)
+ apply (blast intro: apps_preserves_beta)
+ apply (erule mp)
+ apply clarify
+ apply (rename_tac t)
+ apply (erule_tac x = "Abs u \<degree> t \<degree>\<degree> ss" in allE)
+ apply force
+ done
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/Lambda.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,190 @@
+(* Title: HOL/Proofs/Lambda/Lambda.thy
+ Author: Tobias Nipkow
+ Copyright 1995 TU Muenchen
+*)
+
+header {* Basic definitions of Lambda-calculus *}
+
+theory Lambda imports Main begin
+
+declare [[syntax_ambiguity_level = 100]]
+
+
+subsection {* Lambda-terms in de Bruijn notation and substitution *}
+
+datatype dB =
+ Var nat
+ | App dB dB (infixl "\<degree>" 200)
+ | Abs dB
+
+primrec
+ lift :: "[dB, nat] => dB"
+where
+ "lift (Var i) k = (if i < k then Var i else Var (i + 1))"
+ | "lift (s \<degree> t) k = lift s k \<degree> lift t k"
+ | "lift (Abs s) k = Abs (lift s (k + 1))"
+
+primrec
+ subst :: "[dB, dB, nat] => dB" ("_[_'/_]" [300, 0, 0] 300)
+where (* FIXME base names *)
+ subst_Var: "(Var i)[s/k] =
+ (if k < i then Var (i - 1) else if i = k then s else Var i)"
+ | subst_App: "(t \<degree> u)[s/k] = t[s/k] \<degree> u[s/k]"
+ | subst_Abs: "(Abs t)[s/k] = Abs (t[lift s 0 / k+1])"
+
+declare subst_Var [simp del]
+
+text {* Optimized versions of @{term subst} and @{term lift}. *}
+
+primrec
+ liftn :: "[nat, dB, nat] => dB"
+where
+ "liftn n (Var i) k = (if i < k then Var i else Var (i + n))"
+ | "liftn n (s \<degree> t) k = liftn n s k \<degree> liftn n t k"
+ | "liftn n (Abs s) k = Abs (liftn n s (k + 1))"
+
+primrec
+ substn :: "[dB, dB, nat] => dB"
+where
+ "substn (Var i) s k =
+ (if k < i then Var (i - 1) else if i = k then liftn k s 0 else Var i)"
+ | "substn (t \<degree> u) s k = substn t s k \<degree> substn u s k"
+ | "substn (Abs t) s k = Abs (substn t s (k + 1))"
+
+
+subsection {* Beta-reduction *}
+
+inductive beta :: "[dB, dB] => bool" (infixl "\<rightarrow>\<^sub>\<beta>" 50)
+ where
+ beta [simp, intro!]: "Abs s \<degree> t \<rightarrow>\<^sub>\<beta> s[t/0]"
+ | appL [simp, intro!]: "s \<rightarrow>\<^sub>\<beta> t ==> s \<degree> u \<rightarrow>\<^sub>\<beta> t \<degree> u"
+ | appR [simp, intro!]: "s \<rightarrow>\<^sub>\<beta> t ==> u \<degree> s \<rightarrow>\<^sub>\<beta> u \<degree> t"
+ | abs [simp, intro!]: "s \<rightarrow>\<^sub>\<beta> t ==> Abs s \<rightarrow>\<^sub>\<beta> Abs t"
+
+abbreviation
+ beta_reds :: "[dB, dB] => bool" (infixl "->>" 50) where
+ "s ->> t == beta^** s t"
+
+notation (latex)
+ beta_reds (infixl "\<rightarrow>\<^sub>\<beta>\<^sup>*" 50)
+
+inductive_cases beta_cases [elim!]:
+ "Var i \<rightarrow>\<^sub>\<beta> t"
+ "Abs r \<rightarrow>\<^sub>\<beta> s"
+ "s \<degree> t \<rightarrow>\<^sub>\<beta> u"
+
+declare if_not_P [simp] not_less_eq [simp]
+ -- {* don't add @{text "r_into_rtrancl[intro!]"} *}
+
+
+subsection {* Congruence rules *}
+
+lemma rtrancl_beta_Abs [intro!]:
+ "s \<rightarrow>\<^sub>\<beta>\<^sup>* s' ==> Abs s \<rightarrow>\<^sub>\<beta>\<^sup>* Abs s'"
+ by (induct set: rtranclp) (blast intro: rtranclp.rtrancl_into_rtrancl)+
+
+lemma rtrancl_beta_AppL:
+ "s \<rightarrow>\<^sub>\<beta>\<^sup>* s' ==> s \<degree> t \<rightarrow>\<^sub>\<beta>\<^sup>* s' \<degree> t"
+ by (induct set: rtranclp) (blast intro: rtranclp.rtrancl_into_rtrancl)+
+
+lemma rtrancl_beta_AppR:
+ "t \<rightarrow>\<^sub>\<beta>\<^sup>* t' ==> s \<degree> t \<rightarrow>\<^sub>\<beta>\<^sup>* s \<degree> t'"
+ by (induct set: rtranclp) (blast intro: rtranclp.rtrancl_into_rtrancl)+
+
+lemma rtrancl_beta_App [intro]:
+ "[| s \<rightarrow>\<^sub>\<beta>\<^sup>* s'; t \<rightarrow>\<^sub>\<beta>\<^sup>* t' |] ==> s \<degree> t \<rightarrow>\<^sub>\<beta>\<^sup>* s' \<degree> t'"
+ by (blast intro!: rtrancl_beta_AppL rtrancl_beta_AppR intro: rtranclp_trans)
+
+
+subsection {* Substitution-lemmas *}
+
+lemma subst_eq [simp]: "(Var k)[u/k] = u"
+ by (simp add: subst_Var)
+
+lemma subst_gt [simp]: "i < j ==> (Var j)[u/i] = Var (j - 1)"
+ by (simp add: subst_Var)
+
+lemma subst_lt [simp]: "j < i ==> (Var j)[u/i] = Var j"
+ by (simp add: subst_Var)
+
+lemma lift_lift:
+ "i < k + 1 \<Longrightarrow> lift (lift t i) (Suc k) = lift (lift t k) i"
+ by (induct t arbitrary: i k) auto
+
+lemma lift_subst [simp]:
+ "j < i + 1 \<Longrightarrow> lift (t[s/j]) i = (lift t (i + 1)) [lift s i / j]"
+ by (induct t arbitrary: i j s)
+ (simp_all add: diff_Suc subst_Var lift_lift split: nat.split)
+
+lemma lift_subst_lt:
+ "i < j + 1 \<Longrightarrow> lift (t[s/j]) i = (lift t i) [lift s i / j + 1]"
+ by (induct t arbitrary: i j s) (simp_all add: subst_Var lift_lift)
+
+lemma subst_lift [simp]:
+ "(lift t k)[s/k] = t"
+ by (induct t arbitrary: k s) simp_all
+
+lemma subst_subst:
+ "i < j + 1 \<Longrightarrow> t[lift v i / Suc j][u[v/j]/i] = t[u/i][v/j]"
+ by (induct t arbitrary: i j u v)
+ (simp_all add: diff_Suc subst_Var lift_lift [symmetric] lift_subst_lt
+ split: nat.split)
+
+
+subsection {* Equivalence proof for optimized substitution *}
+
+lemma liftn_0 [simp]: "liftn 0 t k = t"
+ by (induct t arbitrary: k) (simp_all add: subst_Var)
+
+lemma liftn_lift [simp]: "liftn (Suc n) t k = lift (liftn n t k) k"
+ by (induct t arbitrary: k) (simp_all add: subst_Var)
+
+lemma substn_subst_n [simp]: "substn t s n = t[liftn n s 0 / n]"
+ by (induct t arbitrary: n) (simp_all add: subst_Var)
+
+theorem substn_subst_0: "substn t s 0 = t[s/0]"
+ by simp
+
+
+subsection {* Preservation theorems *}
+
+text {* Not used in Church-Rosser proof, but in Strong
+ Normalization. \medskip *}
+
+theorem subst_preserves_beta [simp]:
+ "r \<rightarrow>\<^sub>\<beta> s ==> r[t/i] \<rightarrow>\<^sub>\<beta> s[t/i]"
+ by (induct arbitrary: t i set: beta) (simp_all add: subst_subst [symmetric])
+
+theorem subst_preserves_beta': "r \<rightarrow>\<^sub>\<beta>\<^sup>* s ==> r[t/i] \<rightarrow>\<^sub>\<beta>\<^sup>* s[t/i]"
+ apply (induct set: rtranclp)
+ apply (rule rtranclp.rtrancl_refl)
+ apply (erule rtranclp.rtrancl_into_rtrancl)
+ apply (erule subst_preserves_beta)
+ done
+
+theorem lift_preserves_beta [simp]:
+ "r \<rightarrow>\<^sub>\<beta> s ==> lift r i \<rightarrow>\<^sub>\<beta> lift s i"
+ by (induct arbitrary: i set: beta) auto
+
+theorem lift_preserves_beta': "r \<rightarrow>\<^sub>\<beta>\<^sup>* s ==> lift r i \<rightarrow>\<^sub>\<beta>\<^sup>* lift s i"
+ apply (induct set: rtranclp)
+ apply (rule rtranclp.rtrancl_refl)
+ apply (erule rtranclp.rtrancl_into_rtrancl)
+ apply (erule lift_preserves_beta)
+ done
+
+theorem subst_preserves_beta2 [simp]: "r \<rightarrow>\<^sub>\<beta> s ==> t[r/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t[s/i]"
+ apply (induct t arbitrary: r s i)
+ apply (simp add: subst_Var r_into_rtranclp)
+ apply (simp add: rtrancl_beta_App)
+ apply (simp add: rtrancl_beta_Abs)
+ done
+
+theorem subst_preserves_beta2': "r \<rightarrow>\<^sub>\<beta>\<^sup>* s ==> t[r/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t[s/i]"
+ apply (induct set: rtranclp)
+ apply (rule rtranclp.rtrancl_refl)
+ apply (erule rtranclp_trans)
+ apply (erule subst_preserves_beta2)
+ done
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/ListApplication.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,145 @@
+(* Title: HOL/Proofs/Lambda/ListApplication.thy
+ Author: Tobias Nipkow
+ Copyright 1998 TU Muenchen
+*)
+
+header {* Application of a term to a list of terms *}
+
+theory ListApplication imports Lambda begin
+
+abbreviation
+ list_application :: "dB => dB list => dB" (infixl "\<degree>\<degree>" 150) where
+ "t \<degree>\<degree> ts == foldl (op \<degree>) t ts"
+
+lemma apps_eq_tail_conv [iff]: "(r \<degree>\<degree> ts = s \<degree>\<degree> ts) = (r = s)"
+ by (induct ts rule: rev_induct) auto
+
+lemma Var_eq_apps_conv [iff]: "(Var m = s \<degree>\<degree> ss) = (Var m = s \<and> ss = [])"
+ by (induct ss arbitrary: s) auto
+
+lemma Var_apps_eq_Var_apps_conv [iff]:
+ "(Var m \<degree>\<degree> rs = Var n \<degree>\<degree> ss) = (m = n \<and> rs = ss)"
+ apply (induct rs arbitrary: ss rule: rev_induct)
+ apply simp
+ apply blast
+ apply (induct_tac ss rule: rev_induct)
+ apply auto
+ done
+
+lemma App_eq_foldl_conv:
+ "(r \<degree> s = t \<degree>\<degree> ts) =
+ (if ts = [] then r \<degree> s = t
+ else (\<exists>ss. ts = ss @ [s] \<and> r = t \<degree>\<degree> ss))"
+ apply (rule_tac xs = ts in rev_exhaust)
+ apply auto
+ done
+
+lemma Abs_eq_apps_conv [iff]:
+ "(Abs r = s \<degree>\<degree> ss) = (Abs r = s \<and> ss = [])"
+ by (induct ss rule: rev_induct) auto
+
+lemma apps_eq_Abs_conv [iff]: "(s \<degree>\<degree> ss = Abs r) = (s = Abs r \<and> ss = [])"
+ by (induct ss rule: rev_induct) auto
+
+lemma Abs_apps_eq_Abs_apps_conv [iff]:
+ "(Abs r \<degree>\<degree> rs = Abs s \<degree>\<degree> ss) = (r = s \<and> rs = ss)"
+ apply (induct rs arbitrary: ss rule: rev_induct)
+ apply simp
+ apply blast
+ apply (induct_tac ss rule: rev_induct)
+ apply auto
+ done
+
+lemma Abs_App_neq_Var_apps [iff]:
+ "Abs s \<degree> t \<noteq> Var n \<degree>\<degree> ss"
+ by (induct ss arbitrary: s t rule: rev_induct) auto
+
+lemma Var_apps_neq_Abs_apps [iff]:
+ "Var n \<degree>\<degree> ts \<noteq> Abs r \<degree>\<degree> ss"
+ apply (induct ss arbitrary: ts rule: rev_induct)
+ apply simp
+ apply (induct_tac ts rule: rev_induct)
+ apply auto
+ done
+
+lemma ex_head_tail:
+ "\<exists>ts h. t = h \<degree>\<degree> ts \<and> ((\<exists>n. h = Var n) \<or> (\<exists>u. h = Abs u))"
+ apply (induct t)
+ apply (rule_tac x = "[]" in exI)
+ apply simp
+ apply clarify
+ apply (rename_tac ts1 ts2 h1 h2)
+ apply (rule_tac x = "ts1 @ [h2 \<degree>\<degree> ts2]" in exI)
+ apply simp
+ apply simp
+ done
+
+lemma size_apps [simp]:
+ "size (r \<degree>\<degree> rs) = size r + foldl (op +) 0 (map size rs) + length rs"
+ by (induct rs rule: rev_induct) auto
+
+lemma lem0: "[| (0::nat) < k; m <= n |] ==> m < n + k"
+ by simp
+
+lemma lift_map [simp]:
+ "lift (t \<degree>\<degree> ts) i = lift t i \<degree>\<degree> map (\<lambda>t. lift t i) ts"
+ by (induct ts arbitrary: t) simp_all
+
+lemma subst_map [simp]:
+ "subst (t \<degree>\<degree> ts) u i = subst t u i \<degree>\<degree> map (\<lambda>t. subst t u i) ts"
+ by (induct ts arbitrary: t) simp_all
+
+lemma app_last: "(t \<degree>\<degree> ts) \<degree> u = t \<degree>\<degree> (ts @ [u])"
+ by simp
+
+
+text {* \medskip A customized induction schema for @{text "\<degree>\<degree>"}. *}
+
+lemma lem:
+ assumes "!!n ts. \<forall>t \<in> set ts. P t ==> P (Var n \<degree>\<degree> ts)"
+ and "!!u ts. [| P u; \<forall>t \<in> set ts. P t |] ==> P (Abs u \<degree>\<degree> ts)"
+ shows "size t = n \<Longrightarrow> P t"
+ apply (induct n arbitrary: t rule: nat_less_induct)
+ apply (cut_tac t = t in ex_head_tail)
+ apply clarify
+ apply (erule disjE)
+ apply clarify
+ apply (rule assms)
+ apply clarify
+ apply (erule allE, erule impE)
+ prefer 2
+ apply (erule allE, erule mp, rule refl)
+ apply simp
+ apply (rule lem0)
+ apply force
+ apply (rule elem_le_sum)
+ apply force
+ apply clarify
+ apply (rule assms)
+ apply (erule allE, erule impE)
+ prefer 2
+ apply (erule allE, erule mp, rule refl)
+ apply simp
+ apply clarify
+ apply (erule allE, erule impE)
+ prefer 2
+ apply (erule allE, erule mp, rule refl)
+ apply simp
+ apply (rule le_imp_less_Suc)
+ apply (rule trans_le_add1)
+ apply (rule trans_le_add2)
+ apply (rule elem_le_sum)
+ apply force
+ done
+
+theorem Apps_dB_induct:
+ assumes "!!n ts. \<forall>t \<in> set ts. P t ==> P (Var n \<degree>\<degree> ts)"
+ and "!!u ts. [| P u; \<forall>t \<in> set ts. P t |] ==> P (Abs u \<degree>\<degree> ts)"
+ shows "P t"
+ apply (rule_tac t = t in lem)
+ prefer 3
+ apply (rule refl)
+ using assms apply iprover+
+ done
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/ListBeta.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,90 @@
+(* Title: HOL/Proofs/Lambda/ListBeta.thy
+ Author: Tobias Nipkow
+ Copyright 1998 TU Muenchen
+*)
+
+header {* Lifting beta-reduction to lists *}
+
+theory ListBeta imports ListApplication ListOrder begin
+
+text {*
+ Lifting beta-reduction to lists of terms, reducing exactly one element.
+*}
+
+abbreviation
+ list_beta :: "dB list => dB list => bool" (infixl "=>" 50) where
+ "rs => ss == step1 beta rs ss"
+
+lemma head_Var_reduction:
+ "Var n \<degree>\<degree> rs \<rightarrow>\<^sub>\<beta> v \<Longrightarrow> \<exists>ss. rs => ss \<and> v = Var n \<degree>\<degree> ss"
+ apply (induct u == "Var n \<degree>\<degree> rs" v arbitrary: rs set: beta)
+ apply simp
+ apply (rule_tac xs = rs in rev_exhaust)
+ apply simp
+ apply (atomize, force intro: append_step1I)
+ apply (rule_tac xs = rs in rev_exhaust)
+ apply simp
+ apply (auto 0 3 intro: disjI2 [THEN append_step1I])
+ done
+
+lemma apps_betasE [elim!]:
+ assumes major: "r \<degree>\<degree> rs \<rightarrow>\<^sub>\<beta> s"
+ and cases: "!!r'. [| r \<rightarrow>\<^sub>\<beta> r'; s = r' \<degree>\<degree> rs |] ==> R"
+ "!!rs'. [| rs => rs'; s = r \<degree>\<degree> rs' |] ==> R"
+ "!!t u us. [| r = Abs t; rs = u # us; s = t[u/0] \<degree>\<degree> us |] ==> R"
+ shows R
+proof -
+ from major have
+ "(\<exists>r'. r \<rightarrow>\<^sub>\<beta> r' \<and> s = r' \<degree>\<degree> rs) \<or>
+ (\<exists>rs'. rs => rs' \<and> s = r \<degree>\<degree> rs') \<or>
+ (\<exists>t u us. r = Abs t \<and> rs = u # us \<and> s = t[u/0] \<degree>\<degree> us)"
+ apply (induct u == "r \<degree>\<degree> rs" s arbitrary: r rs set: beta)
+ apply (case_tac r)
+ apply simp
+ apply (simp add: App_eq_foldl_conv)
+ apply (split split_if_asm)
+ apply simp
+ apply blast
+ apply simp
+ apply (simp add: App_eq_foldl_conv)
+ apply (split split_if_asm)
+ apply simp
+ apply simp
+ apply (drule App_eq_foldl_conv [THEN iffD1])
+ apply (split split_if_asm)
+ apply simp
+ apply blast
+ apply (force intro!: disjI1 [THEN append_step1I])
+ apply (drule App_eq_foldl_conv [THEN iffD1])
+ apply (split split_if_asm)
+ apply simp
+ apply blast
+ apply (clarify, auto 0 3 intro!: exI intro: append_step1I)
+ done
+ with cases show ?thesis by blast
+qed
+
+lemma apps_preserves_beta [simp]:
+ "r \<rightarrow>\<^sub>\<beta> s ==> r \<degree>\<degree> ss \<rightarrow>\<^sub>\<beta> s \<degree>\<degree> ss"
+ by (induct ss rule: rev_induct) auto
+
+lemma apps_preserves_beta2 [simp]:
+ "r ->> s ==> r \<degree>\<degree> ss ->> s \<degree>\<degree> ss"
+ apply (induct set: rtranclp)
+ apply blast
+ apply (blast intro: apps_preserves_beta rtranclp.rtrancl_into_rtrancl)
+ done
+
+lemma apps_preserves_betas [simp]:
+ "rs => ss \<Longrightarrow> r \<degree>\<degree> rs \<rightarrow>\<^sub>\<beta> r \<degree>\<degree> ss"
+ apply (induct rs arbitrary: ss rule: rev_induct)
+ apply simp
+ apply simp
+ apply (rule_tac xs = ss in rev_exhaust)
+ apply simp
+ apply simp
+ apply (drule Snoc_step1_SnocD)
+ apply blast
+ done
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/ListOrder.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,124 @@
+(* Title: HOL/Proofs/Lambda/ListOrder.thy
+ Author: Tobias Nipkow
+ Copyright 1998 TU Muenchen
+*)
+
+header {* Lifting an order to lists of elements *}
+
+theory ListOrder imports Main begin
+
+declare [[syntax_ambiguity_level = 100]]
+
+
+text {*
+ Lifting an order to lists of elements, relating exactly one
+ element.
+*}
+
+definition
+ step1 :: "('a => 'a => bool) => 'a list => 'a list => bool" where
+ "step1 r =
+ (\<lambda>ys xs. \<exists>us z z' vs. xs = us @ z # vs \<and> r z' z \<and> ys =
+ us @ z' # vs)"
+
+
+lemma step1_converse [simp]: "step1 (r^--1) = (step1 r)^--1"
+ apply (unfold step1_def)
+ apply (blast intro!: order_antisym)
+ done
+
+lemma in_step1_converse [iff]: "(step1 (r^--1) x y) = ((step1 r)^--1 x y)"
+ apply auto
+ done
+
+lemma not_Nil_step1 [iff]: "\<not> step1 r [] xs"
+ apply (unfold step1_def)
+ apply blast
+ done
+
+lemma not_step1_Nil [iff]: "\<not> step1 r xs []"
+ apply (unfold step1_def)
+ apply blast
+ done
+
+lemma Cons_step1_Cons [iff]:
+ "(step1 r (y # ys) (x # xs)) =
+ (r y x \<and> xs = ys \<or> x = y \<and> step1 r ys xs)"
+ apply (unfold step1_def)
+ apply (rule iffI)
+ apply (erule exE)
+ apply (rename_tac ts)
+ apply (case_tac ts)
+ apply fastsimp
+ apply force
+ apply (erule disjE)
+ apply blast
+ apply (blast intro: Cons_eq_appendI)
+ done
+
+lemma append_step1I:
+ "step1 r ys xs \<and> vs = us \<or> ys = xs \<and> step1 r vs us
+ ==> step1 r (ys @ vs) (xs @ us)"
+ apply (unfold step1_def)
+ apply auto
+ apply blast
+ apply (blast intro: append_eq_appendI)
+ done
+
+lemma Cons_step1E [elim!]:
+ assumes "step1 r ys (x # xs)"
+ and "!!y. ys = y # xs \<Longrightarrow> r y x \<Longrightarrow> R"
+ and "!!zs. ys = x # zs \<Longrightarrow> step1 r zs xs \<Longrightarrow> R"
+ shows R
+ using assms
+ apply (cases ys)
+ apply (simp add: step1_def)
+ apply blast
+ done
+
+lemma Snoc_step1_SnocD:
+ "step1 r (ys @ [y]) (xs @ [x])
+ ==> (step1 r ys xs \<and> y = x \<or> ys = xs \<and> r y x)"
+ apply (unfold step1_def)
+ apply (clarify del: disjCI)
+ apply (rename_tac vs)
+ apply (rule_tac xs = vs in rev_exhaust)
+ apply force
+ apply simp
+ apply blast
+ done
+
+lemma Cons_acc_step1I [intro!]:
+ "accp r x ==> accp (step1 r) xs \<Longrightarrow> accp (step1 r) (x # xs)"
+ apply (induct arbitrary: xs set: accp)
+ apply (erule thin_rl)
+ apply (erule accp_induct)
+ apply (rule accp.accI)
+ apply blast
+ done
+
+lemma lists_accD: "listsp (accp r) xs ==> accp (step1 r) xs"
+ apply (induct set: listsp)
+ apply (rule accp.accI)
+ apply simp
+ apply (rule accp.accI)
+ apply (fast dest: accp_downward)
+ done
+
+lemma ex_step1I:
+ "[| x \<in> set xs; r y x |]
+ ==> \<exists>ys. step1 r ys xs \<and> y \<in> set ys"
+ apply (unfold step1_def)
+ apply (drule in_set_conv_decomp [THEN iffD1])
+ apply force
+ done
+
+lemma lists_accI: "accp (step1 r) xs ==> listsp (accp r) xs"
+ apply (induct set: accp)
+ apply clarify
+ apply (rule accp.accI)
+ apply (drule_tac r=r in ex_step1I, assumption)
+ apply blast
+ done
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/NormalForm.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,247 @@
+(* Title: HOL/Proofs/Lambda/NormalForm.thy
+ Author: Stefan Berghofer, TU Muenchen, 2003
+*)
+
+header {* Inductive characterization of lambda terms in normal form *}
+
+theory NormalForm
+imports ListBeta
+begin
+
+subsection {* Terms in normal form *}
+
+definition
+ listall :: "('a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> bool" where
+ "listall P xs \<equiv> (\<forall>i. i < length xs \<longrightarrow> P (xs ! i))"
+
+declare listall_def [extraction_expand_def]
+
+theorem listall_nil: "listall P []"
+ by (simp add: listall_def)
+
+theorem listall_nil_eq [simp]: "listall P [] = True"
+ by (iprover intro: listall_nil)
+
+theorem listall_cons: "P x \<Longrightarrow> listall P xs \<Longrightarrow> listall P (x # xs)"
+ apply (simp add: listall_def)
+ apply (rule allI impI)+
+ apply (case_tac i)
+ apply simp+
+ done
+
+theorem listall_cons_eq [simp]: "listall P (x # xs) = (P x \<and> listall P xs)"
+ apply (rule iffI)
+ prefer 2
+ apply (erule conjE)
+ apply (erule listall_cons)
+ apply assumption
+ apply (unfold listall_def)
+ apply (rule conjI)
+ apply (erule_tac x=0 in allE)
+ apply simp
+ apply simp
+ apply (rule allI)
+ apply (erule_tac x="Suc i" in allE)
+ apply simp
+ done
+
+lemma listall_conj1: "listall (\<lambda>x. P x \<and> Q x) xs \<Longrightarrow> listall P xs"
+ by (induct xs) simp_all
+
+lemma listall_conj2: "listall (\<lambda>x. P x \<and> Q x) xs \<Longrightarrow> listall Q xs"
+ by (induct xs) simp_all
+
+lemma listall_app: "listall P (xs @ ys) = (listall P xs \<and> listall P ys)"
+ apply (induct xs)
+ apply (rule iffI, simp, simp)
+ apply (rule iffI, simp, simp)
+ done
+
+lemma listall_snoc [simp]: "listall P (xs @ [x]) = (listall P xs \<and> P x)"
+ apply (rule iffI)
+ apply (simp add: listall_app)+
+ done
+
+lemma listall_cong [cong, extraction_expand]:
+ "xs = ys \<Longrightarrow> listall P xs = listall P ys"
+ -- {* Currently needed for strange technical reasons *}
+ by (unfold listall_def) simp
+
+text {*
+@{term "listsp"} is equivalent to @{term "listall"}, but cannot be
+used for program extraction.
+*}
+
+lemma listall_listsp_eq: "listall P xs = listsp P xs"
+ by (induct xs) (auto intro: listsp.intros)
+
+inductive NF :: "dB \<Rightarrow> bool"
+where
+ App: "listall NF ts \<Longrightarrow> NF (Var x \<degree>\<degree> ts)"
+| Abs: "NF t \<Longrightarrow> NF (Abs t)"
+monos listall_def
+
+lemma nat_eq_dec: "\<And>n::nat. m = n \<or> m \<noteq> n"
+ apply (induct m)
+ apply (case_tac n)
+ apply (case_tac [3] n)
+ apply (simp only: nat.simps, iprover?)+
+ done
+
+lemma nat_le_dec: "\<And>n::nat. m < n \<or> \<not> (m < n)"
+ apply (induct m)
+ apply (case_tac n)
+ apply (case_tac [3] n)
+ apply (simp del: simp_thms, iprover?)+
+ done
+
+lemma App_NF_D: assumes NF: "NF (Var n \<degree>\<degree> ts)"
+ shows "listall NF ts" using NF
+ by cases simp_all
+
+
+subsection {* Properties of @{text NF} *}
+
+lemma Var_NF: "NF (Var n)"
+ apply (subgoal_tac "NF (Var n \<degree>\<degree> [])")
+ apply simp
+ apply (rule NF.App)
+ apply simp
+ done
+
+lemma Abs_NF:
+ assumes NF: "NF (Abs t \<degree>\<degree> ts)"
+ shows "ts = []" using NF
+proof cases
+ case (App us i)
+ thus ?thesis by (simp add: Var_apps_neq_Abs_apps [THEN not_sym])
+next
+ case (Abs u)
+ thus ?thesis by simp
+qed
+
+lemma subst_terms_NF: "listall NF ts \<Longrightarrow>
+ listall (\<lambda>t. \<forall>i j. NF (t[Var i/j])) ts \<Longrightarrow>
+ listall NF (map (\<lambda>t. t[Var i/j]) ts)"
+ by (induct ts) simp_all
+
+lemma subst_Var_NF: "NF t \<Longrightarrow> NF (t[Var i/j])"
+ apply (induct arbitrary: i j set: NF)
+ apply simp
+ apply (frule listall_conj1)
+ apply (drule listall_conj2)
+ apply (drule_tac i=i and j=j in subst_terms_NF)
+ apply assumption
+ apply (rule_tac m=x and n=j in nat_eq_dec [THEN disjE, standard])
+ apply simp
+ apply (erule NF.App)
+ apply (rule_tac m=j and n=x in nat_le_dec [THEN disjE, standard])
+ apply simp
+ apply (iprover intro: NF.App)
+ apply simp
+ apply (iprover intro: NF.App)
+ apply simp
+ apply (iprover intro: NF.Abs)
+ done
+
+lemma app_Var_NF: "NF t \<Longrightarrow> \<exists>t'. t \<degree> Var i \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'"
+ apply (induct set: NF)
+ apply (simplesubst app_last) --{*Using @{text subst} makes extraction fail*}
+ apply (rule exI)
+ apply (rule conjI)
+ apply (rule rtranclp.rtrancl_refl)
+ apply (rule NF.App)
+ apply (drule listall_conj1)
+ apply (simp add: listall_app)
+ apply (rule Var_NF)
+ apply (rule exI)
+ apply (rule conjI)
+ apply (rule rtranclp.rtrancl_into_rtrancl)
+ apply (rule rtranclp.rtrancl_refl)
+ apply (rule beta)
+ apply (erule subst_Var_NF)
+ done
+
+lemma lift_terms_NF: "listall NF ts \<Longrightarrow>
+ listall (\<lambda>t. \<forall>i. NF (lift t i)) ts \<Longrightarrow>
+ listall NF (map (\<lambda>t. lift t i) ts)"
+ by (induct ts) simp_all
+
+lemma lift_NF: "NF t \<Longrightarrow> NF (lift t i)"
+ apply (induct arbitrary: i set: NF)
+ apply (frule listall_conj1)
+ apply (drule listall_conj2)
+ apply (drule_tac i=i in lift_terms_NF)
+ apply assumption
+ apply (rule_tac m=x and n=i in nat_le_dec [THEN disjE, standard])
+ apply simp
+ apply (rule NF.App)
+ apply assumption
+ apply simp
+ apply (rule NF.App)
+ apply assumption
+ apply simp
+ apply (rule NF.Abs)
+ apply simp
+ done
+
+text {*
+@{term NF} characterizes exactly the terms that are in normal form.
+*}
+
+lemma NF_eq: "NF t = (\<forall>t'. \<not> t \<rightarrow>\<^sub>\<beta> t')"
+proof
+ assume "NF t"
+ then have "\<And>t'. \<not> t \<rightarrow>\<^sub>\<beta> t'"
+ proof induct
+ case (App ts t)
+ show ?case
+ proof
+ assume "Var t \<degree>\<degree> ts \<rightarrow>\<^sub>\<beta> t'"
+ then obtain rs where "ts => rs"
+ by (iprover dest: head_Var_reduction)
+ with App show False
+ by (induct rs arbitrary: ts) auto
+ qed
+ next
+ case (Abs t)
+ show ?case
+ proof
+ assume "Abs t \<rightarrow>\<^sub>\<beta> t'"
+ then show False using Abs by cases simp_all
+ qed
+ qed
+ then show "\<forall>t'. \<not> t \<rightarrow>\<^sub>\<beta> t'" ..
+next
+ assume H: "\<forall>t'. \<not> t \<rightarrow>\<^sub>\<beta> t'"
+ then show "NF t"
+ proof (induct t rule: Apps_dB_induct)
+ case (1 n ts)
+ then have "\<forall>ts'. \<not> ts => ts'"
+ by (iprover intro: apps_preserves_betas)
+ with 1(1) have "listall NF ts"
+ by (induct ts) auto
+ then show ?case by (rule NF.App)
+ next
+ case (2 u ts)
+ show ?case
+ proof (cases ts)
+ case Nil
+ from 2 have "\<forall>u'. \<not> u \<rightarrow>\<^sub>\<beta> u'"
+ by (auto intro: apps_preserves_beta)
+ then have "NF u" by (rule 2)
+ then have "NF (Abs u)" by (rule NF.Abs)
+ with Nil show ?thesis by simp
+ next
+ case (Cons r rs)
+ have "Abs u \<degree> r \<rightarrow>\<^sub>\<beta> u[r/0]" ..
+ then have "Abs u \<degree> r \<degree>\<degree> rs \<rightarrow>\<^sub>\<beta> u[r/0] \<degree>\<degree> rs"
+ by (rule apps_preserves_beta)
+ with Cons have "Abs u \<degree>\<degree> ts \<rightarrow>\<^sub>\<beta> u[r/0] \<degree>\<degree> rs"
+ by simp
+ with 2 show ?thesis by iprover
+ qed
+ qed
+qed
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/ParRed.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,115 @@
+(* Title: HOL/Proofs/Lambda/ParRed.thy
+ Author: Tobias Nipkow
+ Copyright 1995 TU Muenchen
+
+Properties of => and "cd", in particular the diamond property of => and
+confluence of beta.
+*)
+
+header {* Parallel reduction and a complete developments *}
+
+theory ParRed imports Lambda Commutation begin
+
+
+subsection {* Parallel reduction *}
+
+inductive par_beta :: "[dB, dB] => bool" (infixl "=>" 50)
+ where
+ var [simp, intro!]: "Var n => Var n"
+ | abs [simp, intro!]: "s => t ==> Abs s => Abs t"
+ | app [simp, intro!]: "[| s => s'; t => t' |] ==> s \<degree> t => s' \<degree> t'"
+ | beta [simp, intro!]: "[| s => s'; t => t' |] ==> (Abs s) \<degree> t => s'[t'/0]"
+
+inductive_cases par_beta_cases [elim!]:
+ "Var n => t"
+ "Abs s => Abs t"
+ "(Abs s) \<degree> t => u"
+ "s \<degree> t => u"
+ "Abs s => t"
+
+
+subsection {* Inclusions *}
+
+text {* @{text "beta \<subseteq> par_beta \<subseteq> beta^*"} \medskip *}
+
+lemma par_beta_varL [simp]:
+ "(Var n => t) = (t = Var n)"
+ by blast
+
+lemma par_beta_refl [simp]: "t => t" (* par_beta_refl [intro!] causes search to blow up *)
+ by (induct t) simp_all
+
+lemma beta_subset_par_beta: "beta <= par_beta"
+ apply (rule predicate2I)
+ apply (erule beta.induct)
+ apply (blast intro!: par_beta_refl)+
+ done
+
+lemma par_beta_subset_beta: "par_beta <= beta^**"
+ apply (rule predicate2I)
+ apply (erule par_beta.induct)
+ apply blast
+ apply (blast del: rtranclp.rtrancl_refl intro: rtranclp.rtrancl_into_rtrancl)+
+ -- {* @{thm[source] rtrancl_refl} complicates the proof by increasing the branching factor *}
+ done
+
+
+subsection {* Misc properties of @{text "par_beta"} *}
+
+lemma par_beta_lift [simp]:
+ "t => t' \<Longrightarrow> lift t n => lift t' n"
+ by (induct t arbitrary: t' n) fastsimp+
+
+lemma par_beta_subst:
+ "s => s' \<Longrightarrow> t => t' \<Longrightarrow> t[s/n] => t'[s'/n]"
+ apply (induct t arbitrary: s s' t' n)
+ apply (simp add: subst_Var)
+ apply (erule par_beta_cases)
+ apply simp
+ apply (simp add: subst_subst [symmetric])
+ apply (fastsimp intro!: par_beta_lift)
+ apply fastsimp
+ done
+
+
+subsection {* Confluence (directly) *}
+
+lemma diamond_par_beta: "diamond par_beta"
+ apply (unfold diamond_def commute_def square_def)
+ apply (rule impI [THEN allI [THEN allI]])
+ apply (erule par_beta.induct)
+ apply (blast intro!: par_beta_subst)+
+ done
+
+
+subsection {* Complete developments *}
+
+fun
+ "cd" :: "dB => dB"
+where
+ "cd (Var n) = Var n"
+| "cd (Var n \<degree> t) = Var n \<degree> cd t"
+| "cd ((s1 \<degree> s2) \<degree> t) = cd (s1 \<degree> s2) \<degree> cd t"
+| "cd (Abs u \<degree> t) = (cd u)[cd t/0]"
+| "cd (Abs s) = Abs (cd s)"
+
+lemma par_beta_cd: "s => t \<Longrightarrow> t => cd s"
+ apply (induct s arbitrary: t rule: cd.induct)
+ apply auto
+ apply (fast intro!: par_beta_subst)
+ done
+
+
+subsection {* Confluence (via complete developments) *}
+
+lemma diamond_par_beta2: "diamond par_beta"
+ apply (unfold diamond_def commute_def square_def)
+ apply (blast intro: par_beta_cd)
+ done
+
+theorem beta_confluent: "confluent beta"
+ apply (rule diamond_par_beta2 diamond_to_confluence
+ par_beta_subset_beta beta_subset_par_beta)+
+ done
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/README.html Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,29 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+
+<HTML>
+
+<HEAD>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <TITLE>HOL/Lambda</TITLE>
+</HEAD>
+
+<BODY>
+
+<H1>Lambda Calculus in de Bruijn's Notation</H1>
+
+This theory defines lambda-calculus terms with de Bruijn indixes and proves
+confluence of beta, eta and beta+eta.
+<P>
+
+
+The paper
+<A HREF="http://www.in.tum.de/~nipkow/pubs/jar2001.html">
+More Church-Rosser Proofs (in Isabelle/HOL)</A>
+describes the whole theory.
+
+<HR>
+
+<P>Last modified 20.5.2000
+
+</BODY>
+</HTML>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/ROOT.ML Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,2 @@
+no_document use_thys ["Code_Integer"];
+use_thys ["Eta", "StrongNorm", "Standardization", "WeakNorm"];
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/Standardization.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,360 @@
+(* Title: HOL/Proofs/Lambda/Standardization.thy
+ Author: Stefan Berghofer
+ Copyright 2005 TU Muenchen
+*)
+
+header {* Standardization *}
+
+theory Standardization
+imports NormalForm
+begin
+
+text {*
+Based on lecture notes by Ralph Matthes \cite{Matthes-ESSLLI2000},
+original proof idea due to Ralph Loader \cite{Loader1998}.
+*}
+
+
+subsection {* Standard reduction relation *}
+
+declare listrel_mono [mono_set]
+
+inductive
+ sred :: "dB \<Rightarrow> dB \<Rightarrow> bool" (infixl "\<rightarrow>\<^sub>s" 50)
+ and sredlist :: "dB list \<Rightarrow> dB list \<Rightarrow> bool" (infixl "[\<rightarrow>\<^sub>s]" 50)
+where
+ "s [\<rightarrow>\<^sub>s] t \<equiv> listrelp op \<rightarrow>\<^sub>s s t"
+| Var: "rs [\<rightarrow>\<^sub>s] rs' \<Longrightarrow> Var x \<degree>\<degree> rs \<rightarrow>\<^sub>s Var x \<degree>\<degree> rs'"
+| Abs: "r \<rightarrow>\<^sub>s r' \<Longrightarrow> ss [\<rightarrow>\<^sub>s] ss' \<Longrightarrow> Abs r \<degree>\<degree> ss \<rightarrow>\<^sub>s Abs r' \<degree>\<degree> ss'"
+| Beta: "r[s/0] \<degree>\<degree> ss \<rightarrow>\<^sub>s t \<Longrightarrow> Abs r \<degree> s \<degree>\<degree> ss \<rightarrow>\<^sub>s t"
+
+lemma refl_listrelp: "\<forall>x\<in>set xs. R x x \<Longrightarrow> listrelp R xs xs"
+ by (induct xs) (auto intro: listrelp.intros)
+
+lemma refl_sred: "t \<rightarrow>\<^sub>s t"
+ by (induct t rule: Apps_dB_induct) (auto intro: refl_listrelp sred.intros)
+
+lemma refl_sreds: "ts [\<rightarrow>\<^sub>s] ts"
+ by (simp add: refl_sred refl_listrelp)
+
+lemma listrelp_conj1: "listrelp (\<lambda>x y. R x y \<and> S x y) x y \<Longrightarrow> listrelp R x y"
+ by (erule listrelp.induct) (auto intro: listrelp.intros)
+
+lemma listrelp_conj2: "listrelp (\<lambda>x y. R x y \<and> S x y) x y \<Longrightarrow> listrelp S x y"
+ by (erule listrelp.induct) (auto intro: listrelp.intros)
+
+lemma listrelp_app:
+ assumes xsys: "listrelp R xs ys"
+ shows "listrelp R xs' ys' \<Longrightarrow> listrelp R (xs @ xs') (ys @ ys')" using xsys
+ by (induct arbitrary: xs' ys') (auto intro: listrelp.intros)
+
+lemma lemma1:
+ assumes r: "r \<rightarrow>\<^sub>s r'" and s: "s \<rightarrow>\<^sub>s s'"
+ shows "r \<degree> s \<rightarrow>\<^sub>s r' \<degree> s'" using r
+proof induct
+ case (Var rs rs' x)
+ then have "rs [\<rightarrow>\<^sub>s] rs'" by (rule listrelp_conj1)
+ moreover have "[s] [\<rightarrow>\<^sub>s] [s']" by (iprover intro: s listrelp.intros)
+ ultimately have "rs @ [s] [\<rightarrow>\<^sub>s] rs' @ [s']" by (rule listrelp_app)
+ hence "Var x \<degree>\<degree> (rs @ [s]) \<rightarrow>\<^sub>s Var x \<degree>\<degree> (rs' @ [s'])" by (rule sred.Var)
+ thus ?case by (simp only: app_last)
+next
+ case (Abs r r' ss ss')
+ from Abs(3) have "ss [\<rightarrow>\<^sub>s] ss'" by (rule listrelp_conj1)
+ moreover have "[s] [\<rightarrow>\<^sub>s] [s']" by (iprover intro: s listrelp.intros)
+ ultimately have "ss @ [s] [\<rightarrow>\<^sub>s] ss' @ [s']" by (rule listrelp_app)
+ with `r \<rightarrow>\<^sub>s r'` have "Abs r \<degree>\<degree> (ss @ [s]) \<rightarrow>\<^sub>s Abs r' \<degree>\<degree> (ss' @ [s'])"
+ by (rule sred.Abs)
+ thus ?case by (simp only: app_last)
+next
+ case (Beta r u ss t)
+ hence "r[u/0] \<degree>\<degree> (ss @ [s]) \<rightarrow>\<^sub>s t \<degree> s'" by (simp only: app_last)
+ hence "Abs r \<degree> u \<degree>\<degree> (ss @ [s]) \<rightarrow>\<^sub>s t \<degree> s'" by (rule sred.Beta)
+ thus ?case by (simp only: app_last)
+qed
+
+lemma lemma1':
+ assumes ts: "ts [\<rightarrow>\<^sub>s] ts'"
+ shows "r \<rightarrow>\<^sub>s r' \<Longrightarrow> r \<degree>\<degree> ts \<rightarrow>\<^sub>s r' \<degree>\<degree> ts'" using ts
+ by (induct arbitrary: r r') (auto intro: lemma1)
+
+lemma lemma2_1:
+ assumes beta: "t \<rightarrow>\<^sub>\<beta> u"
+ shows "t \<rightarrow>\<^sub>s u" using beta
+proof induct
+ case (beta s t)
+ have "Abs s \<degree> t \<degree>\<degree> [] \<rightarrow>\<^sub>s s[t/0] \<degree>\<degree> []" by (iprover intro: sred.Beta refl_sred)
+ thus ?case by simp
+next
+ case (appL s t u)
+ thus ?case by (iprover intro: lemma1 refl_sred)
+next
+ case (appR s t u)
+ thus ?case by (iprover intro: lemma1 refl_sred)
+next
+ case (abs s t)
+ hence "Abs s \<degree>\<degree> [] \<rightarrow>\<^sub>s Abs t \<degree>\<degree> []" by (iprover intro: sred.Abs listrelp.Nil)
+ thus ?case by simp
+qed
+
+lemma listrelp_betas:
+ assumes ts: "listrelp op \<rightarrow>\<^sub>\<beta>\<^sup>* ts ts'"
+ shows "\<And>t t'. t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<Longrightarrow> t \<degree>\<degree> ts \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<degree>\<degree> ts'" using ts
+ by induct auto
+
+lemma lemma2_2:
+ assumes t: "t \<rightarrow>\<^sub>s u"
+ shows "t \<rightarrow>\<^sub>\<beta>\<^sup>* u" using t
+ by induct (auto dest: listrelp_conj2
+ intro: listrelp_betas apps_preserves_beta converse_rtranclp_into_rtranclp)
+
+lemma sred_lift:
+ assumes s: "s \<rightarrow>\<^sub>s t"
+ shows "lift s i \<rightarrow>\<^sub>s lift t i" using s
+proof (induct arbitrary: i)
+ case (Var rs rs' x)
+ hence "map (\<lambda>t. lift t i) rs [\<rightarrow>\<^sub>s] map (\<lambda>t. lift t i) rs'"
+ by induct (auto intro: listrelp.intros)
+ thus ?case by (cases "x < i") (auto intro: sred.Var)
+next
+ case (Abs r r' ss ss')
+ from Abs(3) have "map (\<lambda>t. lift t i) ss [\<rightarrow>\<^sub>s] map (\<lambda>t. lift t i) ss'"
+ by induct (auto intro: listrelp.intros)
+ thus ?case by (auto intro: sred.Abs Abs)
+next
+ case (Beta r s ss t)
+ thus ?case by (auto intro: sred.Beta)
+qed
+
+lemma lemma3:
+ assumes r: "r \<rightarrow>\<^sub>s r'"
+ shows "s \<rightarrow>\<^sub>s s' \<Longrightarrow> r[s/x] \<rightarrow>\<^sub>s r'[s'/x]" using r
+proof (induct arbitrary: s s' x)
+ case (Var rs rs' y)
+ hence "map (\<lambda>t. t[s/x]) rs [\<rightarrow>\<^sub>s] map (\<lambda>t. t[s'/x]) rs'"
+ by induct (auto intro: listrelp.intros Var)
+ moreover have "Var y[s/x] \<rightarrow>\<^sub>s Var y[s'/x]"
+ proof (cases "y < x")
+ case True thus ?thesis by simp (rule refl_sred)
+ next
+ case False
+ thus ?thesis
+ by (cases "y = x") (auto simp add: Var intro: refl_sred)
+ qed
+ ultimately show ?case by simp (rule lemma1')
+next
+ case (Abs r r' ss ss')
+ from Abs(4) have "lift s 0 \<rightarrow>\<^sub>s lift s' 0" by (rule sred_lift)
+ hence "r[lift s 0/Suc x] \<rightarrow>\<^sub>s r'[lift s' 0/Suc x]" by (fast intro: Abs.hyps)
+ moreover from Abs(3) have "map (\<lambda>t. t[s/x]) ss [\<rightarrow>\<^sub>s] map (\<lambda>t. t[s'/x]) ss'"
+ by induct (auto intro: listrelp.intros Abs)
+ ultimately show ?case by simp (rule sred.Abs)
+next
+ case (Beta r u ss t)
+ thus ?case by (auto simp add: subst_subst intro: sred.Beta)
+qed
+
+lemma lemma4_aux:
+ assumes rs: "listrelp (\<lambda>t u. t \<rightarrow>\<^sub>s u \<and> (\<forall>r. u \<rightarrow>\<^sub>\<beta> r \<longrightarrow> t \<rightarrow>\<^sub>s r)) rs rs'"
+ shows "rs' => ss \<Longrightarrow> rs [\<rightarrow>\<^sub>s] ss" using rs
+proof (induct arbitrary: ss)
+ case Nil
+ thus ?case by cases (auto intro: listrelp.Nil)
+next
+ case (Cons x y xs ys)
+ note Cons' = Cons
+ show ?case
+ proof (cases ss)
+ case Nil with Cons show ?thesis by simp
+ next
+ case (Cons y' ys')
+ hence ss: "ss = y' # ys'" by simp
+ from Cons Cons' have "y \<rightarrow>\<^sub>\<beta> y' \<and> ys' = ys \<or> y' = y \<and> ys => ys'" by simp
+ hence "x # xs [\<rightarrow>\<^sub>s] y' # ys'"
+ proof
+ assume H: "y \<rightarrow>\<^sub>\<beta> y' \<and> ys' = ys"
+ with Cons' have "x \<rightarrow>\<^sub>s y'" by blast
+ moreover from Cons' have "xs [\<rightarrow>\<^sub>s] ys" by (iprover dest: listrelp_conj1)
+ ultimately have "x # xs [\<rightarrow>\<^sub>s] y' # ys" by (rule listrelp.Cons)
+ with H show ?thesis by simp
+ next
+ assume H: "y' = y \<and> ys => ys'"
+ with Cons' have "x \<rightarrow>\<^sub>s y'" by blast
+ moreover from H have "xs [\<rightarrow>\<^sub>s] ys'" by (blast intro: Cons')
+ ultimately show ?thesis by (rule listrelp.Cons)
+ qed
+ with ss show ?thesis by simp
+ qed
+qed
+
+lemma lemma4:
+ assumes r: "r \<rightarrow>\<^sub>s r'"
+ shows "r' \<rightarrow>\<^sub>\<beta> r'' \<Longrightarrow> r \<rightarrow>\<^sub>s r''" using r
+proof (induct arbitrary: r'')
+ case (Var rs rs' x)
+ then obtain ss where rs: "rs' => ss" and r'': "r'' = Var x \<degree>\<degree> ss"
+ by (blast dest: head_Var_reduction)
+ from Var(1) rs have "rs [\<rightarrow>\<^sub>s] ss" by (rule lemma4_aux)
+ hence "Var x \<degree>\<degree> rs \<rightarrow>\<^sub>s Var x \<degree>\<degree> ss" by (rule sred.Var)
+ with r'' show ?case by simp
+next
+ case (Abs r r' ss ss')
+ from `Abs r' \<degree>\<degree> ss' \<rightarrow>\<^sub>\<beta> r''` show ?case
+ proof
+ fix s
+ assume r'': "r'' = s \<degree>\<degree> ss'"
+ assume "Abs r' \<rightarrow>\<^sub>\<beta> s"
+ then obtain r''' where s: "s = Abs r'''" and r''': "r' \<rightarrow>\<^sub>\<beta> r'''" by cases auto
+ from r''' have "r \<rightarrow>\<^sub>s r'''" by (blast intro: Abs)
+ moreover from Abs have "ss [\<rightarrow>\<^sub>s] ss'" by (iprover dest: listrelp_conj1)
+ ultimately have "Abs r \<degree>\<degree> ss \<rightarrow>\<^sub>s Abs r''' \<degree>\<degree> ss'" by (rule sred.Abs)
+ with r'' s show "Abs r \<degree>\<degree> ss \<rightarrow>\<^sub>s r''" by simp
+ next
+ fix rs'
+ assume "ss' => rs'"
+ with Abs(3) have "ss [\<rightarrow>\<^sub>s] rs'" by (rule lemma4_aux)
+ with `r \<rightarrow>\<^sub>s r'` have "Abs r \<degree>\<degree> ss \<rightarrow>\<^sub>s Abs r' \<degree>\<degree> rs'" by (rule sred.Abs)
+ moreover assume "r'' = Abs r' \<degree>\<degree> rs'"
+ ultimately show "Abs r \<degree>\<degree> ss \<rightarrow>\<^sub>s r''" by simp
+ next
+ fix t u' us'
+ assume "ss' = u' # us'"
+ with Abs(3) obtain u us where
+ ss: "ss = u # us" and u: "u \<rightarrow>\<^sub>s u'" and us: "us [\<rightarrow>\<^sub>s] us'"
+ by cases (auto dest!: listrelp_conj1)
+ have "r[u/0] \<rightarrow>\<^sub>s r'[u'/0]" using Abs(1) and u by (rule lemma3)
+ with us have "r[u/0] \<degree>\<degree> us \<rightarrow>\<^sub>s r'[u'/0] \<degree>\<degree> us'" by (rule lemma1')
+ hence "Abs r \<degree> u \<degree>\<degree> us \<rightarrow>\<^sub>s r'[u'/0] \<degree>\<degree> us'" by (rule sred.Beta)
+ moreover assume "Abs r' = Abs t" and "r'' = t[u'/0] \<degree>\<degree> us'"
+ ultimately show "Abs r \<degree>\<degree> ss \<rightarrow>\<^sub>s r''" using ss by simp
+ qed
+next
+ case (Beta r s ss t)
+ show ?case
+ by (rule sred.Beta) (rule Beta)+
+qed
+
+lemma rtrancl_beta_sred:
+ assumes r: "r \<rightarrow>\<^sub>\<beta>\<^sup>* r'"
+ shows "r \<rightarrow>\<^sub>s r'" using r
+ by induct (iprover intro: refl_sred lemma4)+
+
+
+subsection {* Leftmost reduction and weakly normalizing terms *}
+
+inductive
+ lred :: "dB \<Rightarrow> dB \<Rightarrow> bool" (infixl "\<rightarrow>\<^sub>l" 50)
+ and lredlist :: "dB list \<Rightarrow> dB list \<Rightarrow> bool" (infixl "[\<rightarrow>\<^sub>l]" 50)
+where
+ "s [\<rightarrow>\<^sub>l] t \<equiv> listrelp op \<rightarrow>\<^sub>l s t"
+| Var: "rs [\<rightarrow>\<^sub>l] rs' \<Longrightarrow> Var x \<degree>\<degree> rs \<rightarrow>\<^sub>l Var x \<degree>\<degree> rs'"
+| Abs: "r \<rightarrow>\<^sub>l r' \<Longrightarrow> Abs r \<rightarrow>\<^sub>l Abs r'"
+| Beta: "r[s/0] \<degree>\<degree> ss \<rightarrow>\<^sub>l t \<Longrightarrow> Abs r \<degree> s \<degree>\<degree> ss \<rightarrow>\<^sub>l t"
+
+lemma lred_imp_sred:
+ assumes lred: "s \<rightarrow>\<^sub>l t"
+ shows "s \<rightarrow>\<^sub>s t" using lred
+proof induct
+ case (Var rs rs' x)
+ then have "rs [\<rightarrow>\<^sub>s] rs'"
+ by induct (iprover intro: listrelp.intros)+
+ then show ?case by (rule sred.Var)
+next
+ case (Abs r r')
+ from `r \<rightarrow>\<^sub>s r'`
+ have "Abs r \<degree>\<degree> [] \<rightarrow>\<^sub>s Abs r' \<degree>\<degree> []" using listrelp.Nil
+ by (rule sred.Abs)
+ then show ?case by simp
+next
+ case (Beta r s ss t)
+ from `r[s/0] \<degree>\<degree> ss \<rightarrow>\<^sub>s t`
+ show ?case by (rule sred.Beta)
+qed
+
+inductive WN :: "dB => bool"
+ where
+ Var: "listsp WN rs \<Longrightarrow> WN (Var n \<degree>\<degree> rs)"
+ | Lambda: "WN r \<Longrightarrow> WN (Abs r)"
+ | Beta: "WN ((r[s/0]) \<degree>\<degree> ss) \<Longrightarrow> WN ((Abs r \<degree> s) \<degree>\<degree> ss)"
+
+lemma listrelp_imp_listsp1:
+ assumes H: "listrelp (\<lambda>x y. P x) xs ys"
+ shows "listsp P xs" using H
+ by induct auto
+
+lemma listrelp_imp_listsp2:
+ assumes H: "listrelp (\<lambda>x y. P y) xs ys"
+ shows "listsp P ys" using H
+ by induct auto
+
+lemma lemma5:
+ assumes lred: "r \<rightarrow>\<^sub>l r'"
+ shows "WN r" and "NF r'" using lred
+ by induct
+ (iprover dest: listrelp_conj1 listrelp_conj2
+ listrelp_imp_listsp1 listrelp_imp_listsp2 intro: WN.intros
+ NF.intros [simplified listall_listsp_eq])+
+
+lemma lemma6:
+ assumes wn: "WN r"
+ shows "\<exists>r'. r \<rightarrow>\<^sub>l r'" using wn
+proof induct
+ case (Var rs n)
+ then have "\<exists>rs'. rs [\<rightarrow>\<^sub>l] rs'"
+ by induct (iprover intro: listrelp.intros)+
+ then show ?case by (iprover intro: lred.Var)
+qed (iprover intro: lred.intros)+
+
+lemma lemma7:
+ assumes r: "r \<rightarrow>\<^sub>s r'"
+ shows "NF r' \<Longrightarrow> r \<rightarrow>\<^sub>l r'" using r
+proof induct
+ case (Var rs rs' x)
+ from `NF (Var x \<degree>\<degree> rs')` have "listall NF rs'"
+ by cases simp_all
+ with Var(1) have "rs [\<rightarrow>\<^sub>l] rs'"
+ proof induct
+ case Nil
+ show ?case by (rule listrelp.Nil)
+ next
+ case (Cons x y xs ys)
+ hence "x \<rightarrow>\<^sub>l y" and "xs [\<rightarrow>\<^sub>l] ys" by simp_all
+ thus ?case by (rule listrelp.Cons)
+ qed
+ thus ?case by (rule lred.Var)
+next
+ case (Abs r r' ss ss')
+ from `NF (Abs r' \<degree>\<degree> ss')`
+ have ss': "ss' = []" by (rule Abs_NF)
+ from Abs(3) have ss: "ss = []" using ss'
+ by cases simp_all
+ from ss' Abs have "NF (Abs r')" by simp
+ hence "NF r'" by cases simp_all
+ with Abs have "r \<rightarrow>\<^sub>l r'" by simp
+ hence "Abs r \<rightarrow>\<^sub>l Abs r'" by (rule lred.Abs)
+ with ss ss' show ?case by simp
+next
+ case (Beta r s ss t)
+ hence "r[s/0] \<degree>\<degree> ss \<rightarrow>\<^sub>l t" by simp
+ thus ?case by (rule lred.Beta)
+qed
+
+lemma WN_eq: "WN t = (\<exists>t'. t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t')"
+proof
+ assume "WN t"
+ then have "\<exists>t'. t \<rightarrow>\<^sub>l t'" by (rule lemma6)
+ then obtain t' where t': "t \<rightarrow>\<^sub>l t'" ..
+ then have NF: "NF t'" by (rule lemma5)
+ from t' have "t \<rightarrow>\<^sub>s t'" by (rule lred_imp_sred)
+ then have "t \<rightarrow>\<^sub>\<beta>\<^sup>* t'" by (rule lemma2_2)
+ with NF show "\<exists>t'. t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'" by iprover
+next
+ assume "\<exists>t'. t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'"
+ then obtain t' where t': "t \<rightarrow>\<^sub>\<beta>\<^sup>* t'" and NF: "NF t'"
+ by iprover
+ from t' have "t \<rightarrow>\<^sub>s t'" by (rule rtrancl_beta_sred)
+ then have "t \<rightarrow>\<^sub>l t'" using NF by (rule lemma7)
+ then show "WN t" by (rule lemma5)
+qed
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/StrongNorm.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,286 @@
+(* Title: HOL/Proofs/Lambda/StrongNorm.thy
+ Author: Stefan Berghofer
+ Copyright 2000 TU Muenchen
+*)
+
+header {* Strong normalization for simply-typed lambda calculus *}
+
+theory StrongNorm imports Type InductTermi begin
+
+text {*
+Formalization by Stefan Berghofer. Partly based on a paper proof by
+Felix Joachimski and Ralph Matthes \cite{Matthes-Joachimski-AML}.
+*}
+
+
+subsection {* Properties of @{text IT} *}
+
+lemma lift_IT [intro!]: "IT t \<Longrightarrow> IT (lift t i)"
+ apply (induct arbitrary: i set: IT)
+ apply (simp (no_asm))
+ apply (rule conjI)
+ apply
+ (rule impI,
+ rule IT.Var,
+ erule listsp.induct,
+ simp (no_asm),
+ rule listsp.Nil,
+ simp (no_asm),
+ rule listsp.Cons,
+ blast,
+ assumption)+
+ apply auto
+ done
+
+lemma lifts_IT: "listsp IT ts \<Longrightarrow> listsp IT (map (\<lambda>t. lift t 0) ts)"
+ by (induct ts) auto
+
+lemma subst_Var_IT: "IT r \<Longrightarrow> IT (r[Var i/j])"
+ apply (induct arbitrary: i j set: IT)
+ txt {* Case @{term Var}: *}
+ apply (simp (no_asm) add: subst_Var)
+ apply
+ ((rule conjI impI)+,
+ rule IT.Var,
+ erule listsp.induct,
+ simp (no_asm),
+ rule listsp.Nil,
+ simp (no_asm),
+ rule listsp.Cons,
+ fast,
+ assumption)+
+ txt {* Case @{term Lambda}: *}
+ apply atomize
+ apply simp
+ apply (rule IT.Lambda)
+ apply fast
+ txt {* Case @{term Beta}: *}
+ apply atomize
+ apply (simp (no_asm_use) add: subst_subst [symmetric])
+ apply (rule IT.Beta)
+ apply auto
+ done
+
+lemma Var_IT: "IT (Var n)"
+ apply (subgoal_tac "IT (Var n \<degree>\<degree> [])")
+ apply simp
+ apply (rule IT.Var)
+ apply (rule listsp.Nil)
+ done
+
+lemma app_Var_IT: "IT t \<Longrightarrow> IT (t \<degree> Var i)"
+ apply (induct set: IT)
+ apply (subst app_last)
+ apply (rule IT.Var)
+ apply simp
+ apply (rule listsp.Cons)
+ apply (rule Var_IT)
+ apply (rule listsp.Nil)
+ apply (rule IT.Beta [where ?ss = "[]", unfolded foldl_Nil [THEN eq_reflection]])
+ apply (erule subst_Var_IT)
+ apply (rule Var_IT)
+ apply (subst app_last)
+ apply (rule IT.Beta)
+ apply (subst app_last [symmetric])
+ apply assumption
+ apply assumption
+ done
+
+
+subsection {* Well-typed substitution preserves termination *}
+
+lemma subst_type_IT:
+ "\<And>t e T u i. IT t \<Longrightarrow> e\<langle>i:U\<rangle> \<turnstile> t : T \<Longrightarrow>
+ IT u \<Longrightarrow> e \<turnstile> u : U \<Longrightarrow> IT (t[u/i])"
+ (is "PROP ?P U" is "\<And>t e T u i. _ \<Longrightarrow> PROP ?Q t e T u i U")
+proof (induct U)
+ fix T t
+ assume MI1: "\<And>T1 T2. T = T1 \<Rightarrow> T2 \<Longrightarrow> PROP ?P T1"
+ assume MI2: "\<And>T1 T2. T = T1 \<Rightarrow> T2 \<Longrightarrow> PROP ?P T2"
+ assume "IT t"
+ thus "\<And>e T' u i. PROP ?Q t e T' u i T"
+ proof induct
+ fix e T' u i
+ assume uIT: "IT u"
+ assume uT: "e \<turnstile> u : T"
+ {
+ case (Var rs n e_ T'_ u_ i_)
+ assume nT: "e\<langle>i:T\<rangle> \<turnstile> Var n \<degree>\<degree> rs : T'"
+ let ?ty = "\<lambda>t. \<exists>T'. e\<langle>i:T\<rangle> \<turnstile> t : T'"
+ let ?R = "\<lambda>t. \<forall>e T' u i.
+ e\<langle>i:T\<rangle> \<turnstile> t : T' \<longrightarrow> IT u \<longrightarrow> e \<turnstile> u : T \<longrightarrow> IT (t[u/i])"
+ show "IT ((Var n \<degree>\<degree> rs)[u/i])"
+ proof (cases "n = i")
+ case True
+ show ?thesis
+ proof (cases rs)
+ case Nil
+ with uIT True show ?thesis by simp
+ next
+ case (Cons a as)
+ with nT have "e\<langle>i:T\<rangle> \<turnstile> Var n \<degree> a \<degree>\<degree> as : T'" by simp
+ then obtain Ts
+ where headT: "e\<langle>i:T\<rangle> \<turnstile> Var n \<degree> a : Ts \<Rrightarrow> T'"
+ and argsT: "e\<langle>i:T\<rangle> \<tturnstile> as : Ts"
+ by (rule list_app_typeE)
+ from headT obtain T''
+ where varT: "e\<langle>i:T\<rangle> \<turnstile> Var n : T'' \<Rightarrow> Ts \<Rrightarrow> T'"
+ and argT: "e\<langle>i:T\<rangle> \<turnstile> a : T''"
+ by cases simp_all
+ from varT True have T: "T = T'' \<Rightarrow> Ts \<Rrightarrow> T'"
+ by cases auto
+ with uT have uT': "e \<turnstile> u : T'' \<Rightarrow> Ts \<Rrightarrow> T'" by simp
+ from T have "IT ((Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0)
+ (map (\<lambda>t. t[u/i]) as))[(u \<degree> a[u/i])/0])"
+ proof (rule MI2)
+ from T have "IT ((lift u 0 \<degree> Var 0)[a[u/i]/0])"
+ proof (rule MI1)
+ have "IT (lift u 0)" by (rule lift_IT [OF uIT])
+ thus "IT (lift u 0 \<degree> Var 0)" by (rule app_Var_IT)
+ show "e\<langle>0:T''\<rangle> \<turnstile> lift u 0 \<degree> Var 0 : Ts \<Rrightarrow> T'"
+ proof (rule typing.App)
+ show "e\<langle>0:T''\<rangle> \<turnstile> lift u 0 : T'' \<Rightarrow> Ts \<Rrightarrow> T'"
+ by (rule lift_type) (rule uT')
+ show "e\<langle>0:T''\<rangle> \<turnstile> Var 0 : T''"
+ by (rule typing.Var) simp
+ qed
+ from Var have "?R a" by cases (simp_all add: Cons)
+ with argT uIT uT show "IT (a[u/i])" by simp
+ from argT uT show "e \<turnstile> a[u/i] : T''"
+ by (rule subst_lemma) simp
+ qed
+ thus "IT (u \<degree> a[u/i])" by simp
+ from Var have "listsp ?R as"
+ by cases (simp_all add: Cons)
+ moreover from argsT have "listsp ?ty as"
+ by (rule lists_typings)
+ ultimately have "listsp (\<lambda>t. ?R t \<and> ?ty t) as"
+ by simp
+ hence "listsp IT (map (\<lambda>t. lift t 0) (map (\<lambda>t. t[u/i]) as))"
+ (is "listsp IT (?ls as)")
+ proof induct
+ case Nil
+ show ?case by fastsimp
+ next
+ case (Cons b bs)
+ hence I: "?R b" by simp
+ from Cons obtain U where "e\<langle>i:T\<rangle> \<turnstile> b : U" by fast
+ with uT uIT I have "IT (b[u/i])" by simp
+ hence "IT (lift (b[u/i]) 0)" by (rule lift_IT)
+ hence "listsp IT (lift (b[u/i]) 0 # ?ls bs)"
+ by (rule listsp.Cons) (rule Cons)
+ thus ?case by simp
+ qed
+ thus "IT (Var 0 \<degree>\<degree> ?ls as)" by (rule IT.Var)
+ have "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<turnstile> Var 0 : Ts \<Rrightarrow> T'"
+ by (rule typing.Var) simp
+ moreover from uT argsT have "e \<tturnstile> map (\<lambda>t. t[u/i]) as : Ts"
+ by (rule substs_lemma)
+ hence "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<tturnstile> ?ls as : Ts"
+ by (rule lift_types)
+ ultimately show "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<turnstile> Var 0 \<degree>\<degree> ?ls as : T'"
+ by (rule list_app_typeI)
+ from argT uT have "e \<turnstile> a[u/i] : T''"
+ by (rule subst_lemma) (rule refl)
+ with uT' show "e \<turnstile> u \<degree> a[u/i] : Ts \<Rrightarrow> T'"
+ by (rule typing.App)
+ qed
+ with Cons True show ?thesis
+ by (simp add: comp_def)
+ qed
+ next
+ case False
+ from Var have "listsp ?R rs" by simp
+ moreover from nT obtain Ts where "e\<langle>i:T\<rangle> \<tturnstile> rs : Ts"
+ by (rule list_app_typeE)
+ hence "listsp ?ty rs" by (rule lists_typings)
+ ultimately have "listsp (\<lambda>t. ?R t \<and> ?ty t) rs"
+ by simp
+ hence "listsp IT (map (\<lambda>x. x[u/i]) rs)"
+ proof induct
+ case Nil
+ show ?case by fastsimp
+ next
+ case (Cons a as)
+ hence I: "?R a" by simp
+ from Cons obtain U where "e\<langle>i:T\<rangle> \<turnstile> a : U" by fast
+ with uT uIT I have "IT (a[u/i])" by simp
+ hence "listsp IT (a[u/i] # map (\<lambda>t. t[u/i]) as)"
+ by (rule listsp.Cons) (rule Cons)
+ thus ?case by simp
+ qed
+ with False show ?thesis by (auto simp add: subst_Var)
+ qed
+ next
+ case (Lambda r e_ T'_ u_ i_)
+ assume "e\<langle>i:T\<rangle> \<turnstile> Abs r : T'"
+ and "\<And>e T' u i. PROP ?Q r e T' u i T"
+ with uIT uT show "IT (Abs r[u/i])"
+ by fastsimp
+ next
+ case (Beta r a as e_ T'_ u_ i_)
+ assume T: "e\<langle>i:T\<rangle> \<turnstile> Abs r \<degree> a \<degree>\<degree> as : T'"
+ assume SI1: "\<And>e T' u i. PROP ?Q (r[a/0] \<degree>\<degree> as) e T' u i T"
+ assume SI2: "\<And>e T' u i. PROP ?Q a e T' u i T"
+ have "IT (Abs (r[lift u 0/Suc i]) \<degree> a[u/i] \<degree>\<degree> map (\<lambda>t. t[u/i]) as)"
+ proof (rule IT.Beta)
+ have "Abs r \<degree> a \<degree>\<degree> as \<rightarrow>\<^sub>\<beta> r[a/0] \<degree>\<degree> as"
+ by (rule apps_preserves_beta) (rule beta.beta)
+ with T have "e\<langle>i:T\<rangle> \<turnstile> r[a/0] \<degree>\<degree> as : T'"
+ by (rule subject_reduction)
+ hence "IT ((r[a/0] \<degree>\<degree> as)[u/i])"
+ using uIT uT by (rule SI1)
+ thus "IT (r[lift u 0/Suc i][a[u/i]/0] \<degree>\<degree> map (\<lambda>t. t[u/i]) as)"
+ by (simp del: subst_map add: subst_subst subst_map [symmetric])
+ from T obtain U where "e\<langle>i:T\<rangle> \<turnstile> Abs r \<degree> a : U"
+ by (rule list_app_typeE) fast
+ then obtain T'' where "e\<langle>i:T\<rangle> \<turnstile> a : T''" by cases simp_all
+ thus "IT (a[u/i])" using uIT uT by (rule SI2)
+ qed
+ thus "IT ((Abs r \<degree> a \<degree>\<degree> as)[u/i])" by simp
+ }
+ qed
+qed
+
+
+subsection {* Well-typed terms are strongly normalizing *}
+
+lemma type_implies_IT:
+ assumes "e \<turnstile> t : T"
+ shows "IT t"
+ using assms
+proof induct
+ case Var
+ show ?case by (rule Var_IT)
+next
+ case Abs
+ show ?case by (rule IT.Lambda) (rule Abs)
+next
+ case (App e s T U t)
+ have "IT ((Var 0 \<degree> lift t 0)[s/0])"
+ proof (rule subst_type_IT)
+ have "IT (lift t 0)" using `IT t` by (rule lift_IT)
+ hence "listsp IT [lift t 0]" by (rule listsp.Cons) (rule listsp.Nil)
+ hence "IT (Var 0 \<degree>\<degree> [lift t 0])" by (rule IT.Var)
+ also have "Var 0 \<degree>\<degree> [lift t 0] = Var 0 \<degree> lift t 0" by simp
+ finally show "IT \<dots>" .
+ have "e\<langle>0:T \<Rightarrow> U\<rangle> \<turnstile> Var 0 : T \<Rightarrow> U"
+ by (rule typing.Var) simp
+ moreover have "e\<langle>0:T \<Rightarrow> U\<rangle> \<turnstile> lift t 0 : T"
+ by (rule lift_type) (rule App.hyps)
+ ultimately show "e\<langle>0:T \<Rightarrow> U\<rangle> \<turnstile> Var 0 \<degree> lift t 0 : U"
+ by (rule typing.App)
+ show "IT s" by fact
+ show "e \<turnstile> s : T \<Rightarrow> U" by fact
+ qed
+ thus ?case by simp
+qed
+
+theorem type_implies_termi: "e \<turnstile> t : T \<Longrightarrow> termip beta t"
+proof -
+ assume "e \<turnstile> t : T"
+ hence "IT t" by (rule type_implies_IT)
+ thus ?thesis by (rule IT_implies_termi)
+qed
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/Type.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,365 @@
+(* Title: HOL/Proofs/Lambda/Type.thy
+ Author: Stefan Berghofer
+ Copyright 2000 TU Muenchen
+*)
+
+header {* Simply-typed lambda terms *}
+
+theory Type imports ListApplication begin
+
+
+subsection {* Environments *}
+
+definition
+ shift :: "(nat \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> nat \<Rightarrow> 'a" ("_<_:_>" [90, 0, 0] 91) where
+ "e<i:a> = (\<lambda>j. if j < i then e j else if j = i then a else e (j - 1))"
+
+notation (xsymbols)
+ shift ("_\<langle>_:_\<rangle>" [90, 0, 0] 91)
+
+notation (HTML output)
+ shift ("_\<langle>_:_\<rangle>" [90, 0, 0] 91)
+
+lemma shift_eq [simp]: "i = j \<Longrightarrow> (e\<langle>i:T\<rangle>) j = T"
+ by (simp add: shift_def)
+
+lemma shift_gt [simp]: "j < i \<Longrightarrow> (e\<langle>i:T\<rangle>) j = e j"
+ by (simp add: shift_def)
+
+lemma shift_lt [simp]: "i < j \<Longrightarrow> (e\<langle>i:T\<rangle>) j = e (j - 1)"
+ by (simp add: shift_def)
+
+lemma shift_commute [simp]: "e\<langle>i:U\<rangle>\<langle>0:T\<rangle> = e\<langle>0:T\<rangle>\<langle>Suc i:U\<rangle>"
+ apply (rule ext)
+ apply (case_tac x)
+ apply simp
+ apply (case_tac nat)
+ apply (simp_all add: shift_def)
+ done
+
+
+subsection {* Types and typing rules *}
+
+datatype type =
+ Atom nat
+ | Fun type type (infixr "\<Rightarrow>" 200)
+
+inductive typing :: "(nat \<Rightarrow> type) \<Rightarrow> dB \<Rightarrow> type \<Rightarrow> bool" ("_ \<turnstile> _ : _" [50, 50, 50] 50)
+ where
+ Var [intro!]: "env x = T \<Longrightarrow> env \<turnstile> Var x : T"
+ | Abs [intro!]: "env\<langle>0:T\<rangle> \<turnstile> t : U \<Longrightarrow> env \<turnstile> Abs t : (T \<Rightarrow> U)"
+ | App [intro!]: "env \<turnstile> s : T \<Rightarrow> U \<Longrightarrow> env \<turnstile> t : T \<Longrightarrow> env \<turnstile> (s \<degree> t) : U"
+
+inductive_cases typing_elims [elim!]:
+ "e \<turnstile> Var i : T"
+ "e \<turnstile> t \<degree> u : T"
+ "e \<turnstile> Abs t : T"
+
+primrec
+ typings :: "(nat \<Rightarrow> type) \<Rightarrow> dB list \<Rightarrow> type list \<Rightarrow> bool"
+where
+ "typings e [] Ts = (Ts = [])"
+ | "typings e (t # ts) Ts =
+ (case Ts of
+ [] \<Rightarrow> False
+ | T # Ts \<Rightarrow> e \<turnstile> t : T \<and> typings e ts Ts)"
+
+abbreviation
+ typings_rel :: "(nat \<Rightarrow> type) \<Rightarrow> dB list \<Rightarrow> type list \<Rightarrow> bool"
+ ("_ ||- _ : _" [50, 50, 50] 50) where
+ "env ||- ts : Ts == typings env ts Ts"
+
+notation (latex)
+ typings_rel ("_ \<tturnstile> _ : _" [50, 50, 50] 50)
+
+abbreviation
+ funs :: "type list \<Rightarrow> type \<Rightarrow> type" (infixr "=>>" 200) where
+ "Ts =>> T == foldr Fun Ts T"
+
+notation (latex)
+ funs (infixr "\<Rrightarrow>" 200)
+
+
+subsection {* Some examples *}
+
+schematic_lemma "e \<turnstile> Abs (Abs (Abs (Var 1 \<degree> (Var 2 \<degree> Var 1 \<degree> Var 0)))) : ?T"
+ by force
+
+schematic_lemma "e \<turnstile> Abs (Abs (Abs (Var 2 \<degree> Var 0 \<degree> (Var 1 \<degree> Var 0)))) : ?T"
+ by force
+
+
+subsection {* Lists of types *}
+
+lemma lists_typings:
+ "e \<tturnstile> ts : Ts \<Longrightarrow> listsp (\<lambda>t. \<exists>T. e \<turnstile> t : T) ts"
+ apply (induct ts arbitrary: Ts)
+ apply (case_tac Ts)
+ apply simp
+ apply (rule listsp.Nil)
+ apply simp
+ apply (case_tac Ts)
+ apply simp
+ apply simp
+ apply (rule listsp.Cons)
+ apply blast
+ apply blast
+ done
+
+lemma types_snoc: "e \<tturnstile> ts : Ts \<Longrightarrow> e \<turnstile> t : T \<Longrightarrow> e \<tturnstile> ts @ [t] : Ts @ [T]"
+ apply (induct ts arbitrary: Ts)
+ apply simp
+ apply (case_tac Ts)
+ apply simp+
+ done
+
+lemma types_snoc_eq: "e \<tturnstile> ts @ [t] : Ts @ [T] =
+ (e \<tturnstile> ts : Ts \<and> e \<turnstile> t : T)"
+ apply (induct ts arbitrary: Ts)
+ apply (case_tac Ts)
+ apply simp+
+ apply (case_tac Ts)
+ apply (case_tac "ts @ [t]")
+ apply simp+
+ done
+
+lemma rev_exhaust2 [extraction_expand]:
+ obtains (Nil) "xs = []" | (snoc) ys y where "xs = ys @ [y]"
+ -- {* Cannot use @{text rev_exhaust} from the @{text List}
+ theory, since it is not constructive *}
+ apply (subgoal_tac "\<forall>ys. xs = rev ys \<longrightarrow> thesis")
+ apply (erule_tac x="rev xs" in allE)
+ apply simp
+ apply (rule allI)
+ apply (rule impI)
+ apply (case_tac ys)
+ apply simp
+ apply simp
+ apply atomize
+ apply (erule allE)+
+ apply (erule mp, rule conjI)
+ apply (rule refl)+
+ done
+
+lemma types_snocE: "e \<tturnstile> ts @ [t] : Ts \<Longrightarrow>
+ (\<And>Us U. Ts = Us @ [U] \<Longrightarrow> e \<tturnstile> ts : Us \<Longrightarrow> e \<turnstile> t : U \<Longrightarrow> P) \<Longrightarrow> P"
+ apply (cases Ts rule: rev_exhaust2)
+ apply simp
+ apply (case_tac "ts @ [t]")
+ apply (simp add: types_snoc_eq)+
+ apply iprover
+ done
+
+
+subsection {* n-ary function types *}
+
+lemma list_app_typeD:
+ "e \<turnstile> t \<degree>\<degree> ts : T \<Longrightarrow> \<exists>Ts. e \<turnstile> t : Ts \<Rrightarrow> T \<and> e \<tturnstile> ts : Ts"
+ apply (induct ts arbitrary: t T)
+ apply simp
+ apply atomize
+ apply simp
+ apply (erule_tac x = "t \<degree> a" in allE)
+ apply (erule_tac x = T in allE)
+ apply (erule impE)
+ apply assumption
+ apply (elim exE conjE)
+ apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
+ apply (rule_tac x = "Ta # Ts" in exI)
+ apply simp
+ done
+
+lemma list_app_typeE:
+ "e \<turnstile> t \<degree>\<degree> ts : T \<Longrightarrow> (\<And>Ts. e \<turnstile> t : Ts \<Rrightarrow> T \<Longrightarrow> e \<tturnstile> ts : Ts \<Longrightarrow> C) \<Longrightarrow> C"
+ by (insert list_app_typeD) fast
+
+lemma list_app_typeI:
+ "e \<turnstile> t : Ts \<Rrightarrow> T \<Longrightarrow> e \<tturnstile> ts : Ts \<Longrightarrow> e \<turnstile> t \<degree>\<degree> ts : T"
+ apply (induct ts arbitrary: t T Ts)
+ apply simp
+ apply atomize
+ apply (case_tac Ts)
+ apply simp
+ apply simp
+ apply (erule_tac x = "t \<degree> a" in allE)
+ apply (erule_tac x = T in allE)
+ apply (erule_tac x = list in allE)
+ apply (erule impE)
+ apply (erule conjE)
+ apply (erule typing.App)
+ apply assumption
+ apply blast
+ done
+
+text {*
+For the specific case where the head of the term is a variable,
+the following theorems allow to infer the types of the arguments
+without analyzing the typing derivation. This is crucial
+for program extraction.
+*}
+
+theorem var_app_type_eq:
+ "e \<turnstile> Var i \<degree>\<degree> ts : T \<Longrightarrow> e \<turnstile> Var i \<degree>\<degree> ts : U \<Longrightarrow> T = U"
+ apply (induct ts arbitrary: T U rule: rev_induct)
+ apply simp
+ apply (ind_cases "e \<turnstile> Var i : T" for T)
+ apply (ind_cases "e \<turnstile> Var i : T" for T)
+ apply simp
+ apply simp
+ apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
+ apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
+ apply atomize
+ apply (erule_tac x="Ta \<Rightarrow> T" in allE)
+ apply (erule_tac x="Tb \<Rightarrow> U" in allE)
+ apply (erule impE)
+ apply assumption
+ apply (erule impE)
+ apply assumption
+ apply simp
+ done
+
+lemma var_app_types: "e \<turnstile> Var i \<degree>\<degree> ts \<degree>\<degree> us : T \<Longrightarrow> e \<tturnstile> ts : Ts \<Longrightarrow>
+ e \<turnstile> Var i \<degree>\<degree> ts : U \<Longrightarrow> \<exists>Us. U = Us \<Rrightarrow> T \<and> e \<tturnstile> us : Us"
+ apply (induct us arbitrary: ts Ts U)
+ apply simp
+ apply (erule var_app_type_eq)
+ apply assumption
+ apply simp
+ apply atomize
+ apply (case_tac U)
+ apply (rule FalseE)
+ apply simp
+ apply (erule list_app_typeE)
+ apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
+ apply (drule_tac T="Atom nat" and U="Ta \<Rightarrow> Tsa \<Rrightarrow> T" in var_app_type_eq)
+ apply assumption
+ apply simp
+ apply (erule_tac x="ts @ [a]" in allE)
+ apply (erule_tac x="Ts @ [type1]" in allE)
+ apply (erule_tac x="type2" in allE)
+ apply simp
+ apply (erule impE)
+ apply (rule types_snoc)
+ apply assumption
+ apply (erule list_app_typeE)
+ apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
+ apply (drule_tac T="type1 \<Rightarrow> type2" and U="Ta \<Rightarrow> Tsa \<Rrightarrow> T" in var_app_type_eq)
+ apply assumption
+ apply simp
+ apply (erule impE)
+ apply (rule typing.App)
+ apply assumption
+ apply (erule list_app_typeE)
+ apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
+ apply (frule_tac T="type1 \<Rightarrow> type2" and U="Ta \<Rightarrow> Tsa \<Rrightarrow> T" in var_app_type_eq)
+ apply assumption
+ apply simp
+ apply (erule exE)
+ apply (rule_tac x="type1 # Us" in exI)
+ apply simp
+ apply (erule list_app_typeE)
+ apply (ind_cases "e \<turnstile> t \<degree> u : T" for t u T)
+ apply (frule_tac T="type1 \<Rightarrow> Us \<Rrightarrow> T" and U="Ta \<Rightarrow> Tsa \<Rrightarrow> T" in var_app_type_eq)
+ apply assumption
+ apply simp
+ done
+
+lemma var_app_typesE: "e \<turnstile> Var i \<degree>\<degree> ts : T \<Longrightarrow>
+ (\<And>Ts. e \<turnstile> Var i : Ts \<Rrightarrow> T \<Longrightarrow> e \<tturnstile> ts : Ts \<Longrightarrow> P) \<Longrightarrow> P"
+ apply (drule var_app_types [of _ _ "[]", simplified])
+ apply (iprover intro: typing.Var)+
+ done
+
+lemma abs_typeE: "e \<turnstile> Abs t : T \<Longrightarrow> (\<And>U V. e\<langle>0:U\<rangle> \<turnstile> t : V \<Longrightarrow> P) \<Longrightarrow> P"
+ apply (cases T)
+ apply (rule FalseE)
+ apply (erule typing.cases)
+ apply simp_all
+ apply atomize
+ apply (erule_tac x="type1" in allE)
+ apply (erule_tac x="type2" in allE)
+ apply (erule mp)
+ apply (erule typing.cases)
+ apply simp_all
+ done
+
+
+subsection {* Lifting preserves well-typedness *}
+
+lemma lift_type [intro!]: "e \<turnstile> t : T \<Longrightarrow> e\<langle>i:U\<rangle> \<turnstile> lift t i : T"
+ by (induct arbitrary: i U set: typing) auto
+
+lemma lift_types:
+ "e \<tturnstile> ts : Ts \<Longrightarrow> e\<langle>i:U\<rangle> \<tturnstile> (map (\<lambda>t. lift t i) ts) : Ts"
+ apply (induct ts arbitrary: Ts)
+ apply simp
+ apply (case_tac Ts)
+ apply auto
+ done
+
+
+subsection {* Substitution lemmas *}
+
+lemma subst_lemma:
+ "e \<turnstile> t : T \<Longrightarrow> e' \<turnstile> u : U \<Longrightarrow> e = e'\<langle>i:U\<rangle> \<Longrightarrow> e' \<turnstile> t[u/i] : T"
+ apply (induct arbitrary: e' i U u set: typing)
+ apply (rule_tac x = x and y = i in linorder_cases)
+ apply auto
+ apply blast
+ done
+
+lemma substs_lemma:
+ "e \<turnstile> u : T \<Longrightarrow> e\<langle>i:T\<rangle> \<tturnstile> ts : Ts \<Longrightarrow>
+ e \<tturnstile> (map (\<lambda>t. t[u/i]) ts) : Ts"
+ apply (induct ts arbitrary: Ts)
+ apply (case_tac Ts)
+ apply simp
+ apply simp
+ apply atomize
+ apply (case_tac Ts)
+ apply simp
+ apply simp
+ apply (erule conjE)
+ apply (erule (1) subst_lemma)
+ apply (rule refl)
+ done
+
+
+subsection {* Subject reduction *}
+
+lemma subject_reduction: "e \<turnstile> t : T \<Longrightarrow> t \<rightarrow>\<^sub>\<beta> t' \<Longrightarrow> e \<turnstile> t' : T"
+ apply (induct arbitrary: t' set: typing)
+ apply blast
+ apply blast
+ apply atomize
+ apply (ind_cases "s \<degree> t \<rightarrow>\<^sub>\<beta> t'" for s t t')
+ apply hypsubst
+ apply (ind_cases "env \<turnstile> Abs t : T \<Rightarrow> U" for env t T U)
+ apply (rule subst_lemma)
+ apply assumption
+ apply assumption
+ apply (rule ext)
+ apply (case_tac x)
+ apply auto
+ done
+
+theorem subject_reduction': "t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<Longrightarrow> e \<turnstile> t : T \<Longrightarrow> e \<turnstile> t' : T"
+ by (induct set: rtranclp) (iprover intro: subject_reduction)+
+
+
+subsection {* Alternative induction rule for types *}
+
+lemma type_induct [induct type]:
+ assumes
+ "(\<And>T. (\<And>T1 T2. T = T1 \<Rightarrow> T2 \<Longrightarrow> P T1) \<Longrightarrow>
+ (\<And>T1 T2. T = T1 \<Rightarrow> T2 \<Longrightarrow> P T2) \<Longrightarrow> P T)"
+ shows "P T"
+proof (induct T)
+ case Atom
+ show ?case by (rule assms) simp_all
+next
+ case Fun
+ show ?case by (rule assms) (insert Fun, simp_all)
+qed
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/WeakNorm.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,515 @@
+(* Title: HOL/Proofs/Lambda/WeakNorm.thy
+ Author: Stefan Berghofer
+ Copyright 2003 TU Muenchen
+*)
+
+header {* Weak normalization for simply-typed lambda calculus *}
+
+theory WeakNorm
+imports Type NormalForm Code_Integer
+begin
+
+text {*
+Formalization by Stefan Berghofer. Partly based on a paper proof by
+Felix Joachimski and Ralph Matthes \cite{Matthes-Joachimski-AML}.
+*}
+
+
+subsection {* Main theorems *}
+
+lemma norm_list:
+ assumes f_compat: "\<And>t t'. t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<Longrightarrow> f t \<rightarrow>\<^sub>\<beta>\<^sup>* f t'"
+ and f_NF: "\<And>t. NF t \<Longrightarrow> NF (f t)"
+ and uNF: "NF u" and uT: "e \<turnstile> u : T"
+ shows "\<And>Us. e\<langle>i:T\<rangle> \<tturnstile> as : Us \<Longrightarrow>
+ listall (\<lambda>t. \<forall>e T' u i. e\<langle>i:T\<rangle> \<turnstile> t : T' \<longrightarrow>
+ NF u \<longrightarrow> e \<turnstile> u : T \<longrightarrow> (\<exists>t'. t[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t')) as \<Longrightarrow>
+ \<exists>as'. \<forall>j. Var j \<degree>\<degree> map (\<lambda>t. f (t[u/i])) as \<rightarrow>\<^sub>\<beta>\<^sup>*
+ Var j \<degree>\<degree> map f as' \<and> NF (Var j \<degree>\<degree> map f as')"
+ (is "\<And>Us. _ \<Longrightarrow> listall ?R as \<Longrightarrow> \<exists>as'. ?ex Us as as'")
+proof (induct as rule: rev_induct)
+ case (Nil Us)
+ with Var_NF have "?ex Us [] []" by simp
+ thus ?case ..
+next
+ case (snoc b bs Us)
+ have "e\<langle>i:T\<rangle> \<tturnstile> bs @ [b] : Us" by fact
+ then obtain Vs W where Us: "Us = Vs @ [W]"
+ and bs: "e\<langle>i:T\<rangle> \<tturnstile> bs : Vs" and bT: "e\<langle>i:T\<rangle> \<turnstile> b : W"
+ by (rule types_snocE)
+ from snoc have "listall ?R bs" by simp
+ with bs have "\<exists>bs'. ?ex Vs bs bs'" by (rule snoc)
+ then obtain bs' where
+ bsred: "\<And>j. Var j \<degree>\<degree> map (\<lambda>t. f (t[u/i])) bs \<rightarrow>\<^sub>\<beta>\<^sup>* Var j \<degree>\<degree> map f bs'"
+ and bsNF: "\<And>j. NF (Var j \<degree>\<degree> map f bs')" by iprover
+ from snoc have "?R b" by simp
+ with bT and uNF and uT have "\<exists>b'. b[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* b' \<and> NF b'"
+ by iprover
+ then obtain b' where bred: "b[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* b'" and bNF: "NF b'"
+ by iprover
+ from bsNF [of 0] have "listall NF (map f bs')"
+ by (rule App_NF_D)
+ moreover have "NF (f b')" using bNF by (rule f_NF)
+ ultimately have "listall NF (map f (bs' @ [b']))"
+ by simp
+ hence "\<And>j. NF (Var j \<degree>\<degree> map f (bs' @ [b']))" by (rule NF.App)
+ moreover from bred have "f (b[u/i]) \<rightarrow>\<^sub>\<beta>\<^sup>* f b'"
+ by (rule f_compat)
+ with bsred have
+ "\<And>j. (Var j \<degree>\<degree> map (\<lambda>t. f (t[u/i])) bs) \<degree> f (b[u/i]) \<rightarrow>\<^sub>\<beta>\<^sup>*
+ (Var j \<degree>\<degree> map f bs') \<degree> f b'" by (rule rtrancl_beta_App)
+ ultimately have "?ex Us (bs @ [b]) (bs' @ [b'])" by simp
+ thus ?case ..
+qed
+
+lemma subst_type_NF:
+ "\<And>t e T u i. NF t \<Longrightarrow> e\<langle>i:U\<rangle> \<turnstile> t : T \<Longrightarrow> NF u \<Longrightarrow> e \<turnstile> u : U \<Longrightarrow> \<exists>t'. t[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'"
+ (is "PROP ?P U" is "\<And>t e T u i. _ \<Longrightarrow> PROP ?Q t e T u i U")
+proof (induct U)
+ fix T t
+ let ?R = "\<lambda>t. \<forall>e T' u i.
+ e\<langle>i:T\<rangle> \<turnstile> t : T' \<longrightarrow> NF u \<longrightarrow> e \<turnstile> u : T \<longrightarrow> (\<exists>t'. t[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t')"
+ assume MI1: "\<And>T1 T2. T = T1 \<Rightarrow> T2 \<Longrightarrow> PROP ?P T1"
+ assume MI2: "\<And>T1 T2. T = T1 \<Rightarrow> T2 \<Longrightarrow> PROP ?P T2"
+ assume "NF t"
+ thus "\<And>e T' u i. PROP ?Q t e T' u i T"
+ proof induct
+ fix e T' u i assume uNF: "NF u" and uT: "e \<turnstile> u : T"
+ {
+ case (App ts x e_ T'_ u_ i_)
+ assume "e\<langle>i:T\<rangle> \<turnstile> Var x \<degree>\<degree> ts : T'"
+ then obtain Us
+ where varT: "e\<langle>i:T\<rangle> \<turnstile> Var x : Us \<Rrightarrow> T'"
+ and argsT: "e\<langle>i:T\<rangle> \<tturnstile> ts : Us"
+ by (rule var_app_typesE)
+ from nat_eq_dec show "\<exists>t'. (Var x \<degree>\<degree> ts)[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'"
+ proof
+ assume eq: "x = i"
+ show ?thesis
+ proof (cases ts)
+ case Nil
+ with eq have "(Var x \<degree>\<degree> [])[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* u" by simp
+ with Nil and uNF show ?thesis by simp iprover
+ next
+ case (Cons a as)
+ with argsT obtain T'' Ts where Us: "Us = T'' # Ts"
+ by (cases Us) (rule FalseE, simp+, erule that)
+ from varT and Us have varT: "e\<langle>i:T\<rangle> \<turnstile> Var x : T'' \<Rightarrow> Ts \<Rrightarrow> T'"
+ by simp
+ from varT eq have T: "T = T'' \<Rightarrow> Ts \<Rrightarrow> T'" by cases auto
+ with uT have uT': "e \<turnstile> u : T'' \<Rightarrow> Ts \<Rrightarrow> T'" by simp
+ from argsT Us Cons have argsT': "e\<langle>i:T\<rangle> \<tturnstile> as : Ts" by simp
+ from argsT Us Cons have argT: "e\<langle>i:T\<rangle> \<turnstile> a : T''" by simp
+ from argT uT refl have aT: "e \<turnstile> a[u/i] : T''" by (rule subst_lemma)
+ from App and Cons have "listall ?R as" by simp (iprover dest: listall_conj2)
+ with lift_preserves_beta' lift_NF uNF uT argsT'
+ have "\<exists>as'. \<forall>j. Var j \<degree>\<degree> map (\<lambda>t. lift (t[u/i]) 0) as \<rightarrow>\<^sub>\<beta>\<^sup>*
+ Var j \<degree>\<degree> map (\<lambda>t. lift t 0) as' \<and>
+ NF (Var j \<degree>\<degree> map (\<lambda>t. lift t 0) as')" by (rule norm_list)
+ then obtain as' where
+ asred: "Var 0 \<degree>\<degree> map (\<lambda>t. lift (t[u/i]) 0) as \<rightarrow>\<^sub>\<beta>\<^sup>*
+ Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as'"
+ and asNF: "NF (Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as')" by iprover
+ from App and Cons have "?R a" by simp
+ with argT and uNF and uT have "\<exists>a'. a[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* a' \<and> NF a'"
+ by iprover
+ then obtain a' where ared: "a[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* a'" and aNF: "NF a'" by iprover
+ from uNF have "NF (lift u 0)" by (rule lift_NF)
+ hence "\<exists>u'. lift u 0 \<degree> Var 0 \<rightarrow>\<^sub>\<beta>\<^sup>* u' \<and> NF u'" by (rule app_Var_NF)
+ then obtain u' where ured: "lift u 0 \<degree> Var 0 \<rightarrow>\<^sub>\<beta>\<^sup>* u'" and u'NF: "NF u'"
+ by iprover
+ from T and u'NF have "\<exists>ua. u'[a'/0] \<rightarrow>\<^sub>\<beta>\<^sup>* ua \<and> NF ua"
+ proof (rule MI1)
+ have "e\<langle>0:T''\<rangle> \<turnstile> lift u 0 \<degree> Var 0 : Ts \<Rrightarrow> T'"
+ proof (rule typing.App)
+ from uT' show "e\<langle>0:T''\<rangle> \<turnstile> lift u 0 : T'' \<Rightarrow> Ts \<Rrightarrow> T'" by (rule lift_type)
+ show "e\<langle>0:T''\<rangle> \<turnstile> Var 0 : T''" by (rule typing.Var) simp
+ qed
+ with ured show "e\<langle>0:T''\<rangle> \<turnstile> u' : Ts \<Rrightarrow> T'" by (rule subject_reduction')
+ from ared aT show "e \<turnstile> a' : T''" by (rule subject_reduction')
+ show "NF a'" by fact
+ qed
+ then obtain ua where uared: "u'[a'/0] \<rightarrow>\<^sub>\<beta>\<^sup>* ua" and uaNF: "NF ua"
+ by iprover
+ from ared have "(lift u 0 \<degree> Var 0)[a[u/i]/0] \<rightarrow>\<^sub>\<beta>\<^sup>* (lift u 0 \<degree> Var 0)[a'/0]"
+ by (rule subst_preserves_beta2')
+ also from ured have "(lift u 0 \<degree> Var 0)[a'/0] \<rightarrow>\<^sub>\<beta>\<^sup>* u'[a'/0]"
+ by (rule subst_preserves_beta')
+ also note uared
+ finally have "(lift u 0 \<degree> Var 0)[a[u/i]/0] \<rightarrow>\<^sub>\<beta>\<^sup>* ua" .
+ hence uared': "u \<degree> a[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* ua" by simp
+ from T asNF _ uaNF have "\<exists>r. (Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as')[ua/0] \<rightarrow>\<^sub>\<beta>\<^sup>* r \<and> NF r"
+ proof (rule MI2)
+ have "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<turnstile> Var 0 \<degree>\<degree> map (\<lambda>t. lift (t[u/i]) 0) as : T'"
+ proof (rule list_app_typeI)
+ show "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<turnstile> Var 0 : Ts \<Rrightarrow> T'" by (rule typing.Var) simp
+ from uT argsT' have "e \<tturnstile> map (\<lambda>t. t[u/i]) as : Ts"
+ by (rule substs_lemma)
+ hence "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<tturnstile> map (\<lambda>t. lift t 0) (map (\<lambda>t. t[u/i]) as) : Ts"
+ by (rule lift_types)
+ thus "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<tturnstile> map (\<lambda>t. lift (t[u/i]) 0) as : Ts"
+ by (simp_all add: o_def)
+ qed
+ with asred show "e\<langle>0:Ts \<Rrightarrow> T'\<rangle> \<turnstile> Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as' : T'"
+ by (rule subject_reduction')
+ from argT uT refl have "e \<turnstile> a[u/i] : T''" by (rule subst_lemma)
+ with uT' have "e \<turnstile> u \<degree> a[u/i] : Ts \<Rrightarrow> T'" by (rule typing.App)
+ with uared' show "e \<turnstile> ua : Ts \<Rrightarrow> T'" by (rule subject_reduction')
+ qed
+ then obtain r where rred: "(Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as')[ua/0] \<rightarrow>\<^sub>\<beta>\<^sup>* r"
+ and rnf: "NF r" by iprover
+ from asred have
+ "(Var 0 \<degree>\<degree> map (\<lambda>t. lift (t[u/i]) 0) as)[u \<degree> a[u/i]/0] \<rightarrow>\<^sub>\<beta>\<^sup>*
+ (Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as')[u \<degree> a[u/i]/0]"
+ by (rule subst_preserves_beta')
+ also from uared' have "(Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as')[u \<degree> a[u/i]/0] \<rightarrow>\<^sub>\<beta>\<^sup>*
+ (Var 0 \<degree>\<degree> map (\<lambda>t. lift t 0) as')[ua/0]" by (rule subst_preserves_beta2')
+ also note rred
+ finally have "(Var 0 \<degree>\<degree> map (\<lambda>t. lift (t[u/i]) 0) as)[u \<degree> a[u/i]/0] \<rightarrow>\<^sub>\<beta>\<^sup>* r" .
+ with rnf Cons eq show ?thesis
+ by (simp add: o_def) iprover
+ qed
+ next
+ assume neq: "x \<noteq> i"
+ from App have "listall ?R ts" by (iprover dest: listall_conj2)
+ with TrueI TrueI uNF uT argsT
+ have "\<exists>ts'. \<forall>j. Var j \<degree>\<degree> map (\<lambda>t. t[u/i]) ts \<rightarrow>\<^sub>\<beta>\<^sup>* Var j \<degree>\<degree> ts' \<and>
+ NF (Var j \<degree>\<degree> ts')" (is "\<exists>ts'. ?ex ts'")
+ by (rule norm_list [of "\<lambda>t. t", simplified])
+ then obtain ts' where NF: "?ex ts'" ..
+ from nat_le_dec show ?thesis
+ proof
+ assume "i < x"
+ with NF show ?thesis by simp iprover
+ next
+ assume "\<not> (i < x)"
+ with NF neq show ?thesis by (simp add: subst_Var) iprover
+ qed
+ qed
+ next
+ case (Abs r e_ T'_ u_ i_)
+ assume absT: "e\<langle>i:T\<rangle> \<turnstile> Abs r : T'"
+ then obtain R S where "e\<langle>0:R\<rangle>\<langle>Suc i:T\<rangle> \<turnstile> r : S" by (rule abs_typeE) simp
+ moreover have "NF (lift u 0)" using `NF u` by (rule lift_NF)
+ moreover have "e\<langle>0:R\<rangle> \<turnstile> lift u 0 : T" using uT by (rule lift_type)
+ ultimately have "\<exists>t'. r[lift u 0/Suc i] \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'" by (rule Abs)
+ thus "\<exists>t'. Abs r[u/i] \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'"
+ by simp (iprover intro: rtrancl_beta_Abs NF.Abs)
+ }
+ qed
+qed
+
+
+-- {* A computationally relevant copy of @{term "e \<turnstile> t : T"} *}
+inductive rtyping :: "(nat \<Rightarrow> type) \<Rightarrow> dB \<Rightarrow> type \<Rightarrow> bool" ("_ \<turnstile>\<^sub>R _ : _" [50, 50, 50] 50)
+ where
+ Var: "e x = T \<Longrightarrow> e \<turnstile>\<^sub>R Var x : T"
+ | Abs: "e\<langle>0:T\<rangle> \<turnstile>\<^sub>R t : U \<Longrightarrow> e \<turnstile>\<^sub>R Abs t : (T \<Rightarrow> U)"
+ | App: "e \<turnstile>\<^sub>R s : T \<Rightarrow> U \<Longrightarrow> e \<turnstile>\<^sub>R t : T \<Longrightarrow> e \<turnstile>\<^sub>R (s \<degree> t) : U"
+
+lemma rtyping_imp_typing: "e \<turnstile>\<^sub>R t : T \<Longrightarrow> e \<turnstile> t : T"
+ apply (induct set: rtyping)
+ apply (erule typing.Var)
+ apply (erule typing.Abs)
+ apply (erule typing.App)
+ apply assumption
+ done
+
+
+theorem type_NF:
+ assumes "e \<turnstile>\<^sub>R t : T"
+ shows "\<exists>t'. t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<and> NF t'" using assms
+proof induct
+ case Var
+ show ?case by (iprover intro: Var_NF)
+next
+ case Abs
+ thus ?case by (iprover intro: rtrancl_beta_Abs NF.Abs)
+next
+ case (App e s T U t)
+ from App obtain s' t' where
+ sred: "s \<rightarrow>\<^sub>\<beta>\<^sup>* s'" and "NF s'"
+ and tred: "t \<rightarrow>\<^sub>\<beta>\<^sup>* t'" and tNF: "NF t'" by iprover
+ have "\<exists>u. (Var 0 \<degree> lift t' 0)[s'/0] \<rightarrow>\<^sub>\<beta>\<^sup>* u \<and> NF u"
+ proof (rule subst_type_NF)
+ have "NF (lift t' 0)" using tNF by (rule lift_NF)
+ hence "listall NF [lift t' 0]" by (rule listall_cons) (rule listall_nil)
+ hence "NF (Var 0 \<degree>\<degree> [lift t' 0])" by (rule NF.App)
+ thus "NF (Var 0 \<degree> lift t' 0)" by simp
+ show "e\<langle>0:T \<Rightarrow> U\<rangle> \<turnstile> Var 0 \<degree> lift t' 0 : U"
+ proof (rule typing.App)
+ show "e\<langle>0:T \<Rightarrow> U\<rangle> \<turnstile> Var 0 : T \<Rightarrow> U"
+ by (rule typing.Var) simp
+ from tred have "e \<turnstile> t' : T"
+ by (rule subject_reduction') (rule rtyping_imp_typing, rule App.hyps)
+ thus "e\<langle>0:T \<Rightarrow> U\<rangle> \<turnstile> lift t' 0 : T"
+ by (rule lift_type)
+ qed
+ from sred show "e \<turnstile> s' : T \<Rightarrow> U"
+ by (rule subject_reduction') (rule rtyping_imp_typing, rule App.hyps)
+ show "NF s'" by fact
+ qed
+ then obtain u where ured: "s' \<degree> t' \<rightarrow>\<^sub>\<beta>\<^sup>* u" and unf: "NF u" by simp iprover
+ from sred tred have "s \<degree> t \<rightarrow>\<^sub>\<beta>\<^sup>* s' \<degree> t'" by (rule rtrancl_beta_App)
+ hence "s \<degree> t \<rightarrow>\<^sub>\<beta>\<^sup>* u" using ured by (rule rtranclp_trans)
+ with unf show ?case by iprover
+qed
+
+
+subsection {* Extracting the program *}
+
+declare NF.induct [ind_realizer]
+declare rtranclp.induct [ind_realizer irrelevant]
+declare rtyping.induct [ind_realizer]
+lemmas [extraction_expand] = conj_assoc listall_cons_eq
+
+extract type_NF
+
+lemma rtranclR_rtrancl_eq: "rtranclpR r a b = r\<^sup>*\<^sup>* a b"
+ apply (rule iffI)
+ apply (erule rtranclpR.induct)
+ apply (rule rtranclp.rtrancl_refl)
+ apply (erule rtranclp.rtrancl_into_rtrancl)
+ apply assumption
+ apply (erule rtranclp.induct)
+ apply (rule rtranclpR.rtrancl_refl)
+ apply (erule rtranclpR.rtrancl_into_rtrancl)
+ apply assumption
+ done
+
+lemma NFR_imp_NF: "NFR nf t \<Longrightarrow> NF t"
+ apply (erule NFR.induct)
+ apply (rule NF.intros)
+ apply (simp add: listall_def)
+ apply (erule NF.intros)
+ done
+
+text_raw {*
+\begin{figure}
+\renewcommand{\isastyle}{\scriptsize\it}%
+@{thm [display,eta_contract=false,margin=100] subst_type_NF_def}
+\renewcommand{\isastyle}{\small\it}%
+\caption{Program extracted from @{text subst_type_NF}}
+\label{fig:extr-subst-type-nf}
+\end{figure}
+
+\begin{figure}
+\renewcommand{\isastyle}{\scriptsize\it}%
+@{thm [display,margin=100] subst_Var_NF_def}
+@{thm [display,margin=100] app_Var_NF_def}
+@{thm [display,margin=100] lift_NF_def}
+@{thm [display,eta_contract=false,margin=100] type_NF_def}
+\renewcommand{\isastyle}{\small\it}%
+\caption{Program extracted from lemmas and main theorem}
+\label{fig:extr-type-nf}
+\end{figure}
+*}
+
+text {*
+The program corresponding to the proof of the central lemma, which
+performs substitution and normalization, is shown in Figure
+\ref{fig:extr-subst-type-nf}. The correctness
+theorem corresponding to the program @{text "subst_type_NF"} is
+@{thm [display,margin=100] subst_type_NF_correctness
+ [simplified rtranclR_rtrancl_eq Collect_mem_eq, no_vars]}
+where @{text NFR} is the realizability predicate corresponding to
+the datatype @{text NFT}, which is inductively defined by the rules
+\pagebreak
+@{thm [display,margin=90] NFR.App [of ts nfs x] NFR.Abs [of nf t]}
+
+The programs corresponding to the main theorem @{text "type_NF"}, as
+well as to some lemmas, are shown in Figure \ref{fig:extr-type-nf}.
+The correctness statement for the main function @{text "type_NF"} is
+@{thm [display,margin=100] type_NF_correctness
+ [simplified rtranclR_rtrancl_eq Collect_mem_eq, no_vars]}
+where the realizability predicate @{text "rtypingR"} corresponding to the
+computationally relevant version of the typing judgement is inductively
+defined by the rules
+@{thm [display,margin=100] rtypingR.Var [no_vars]
+ rtypingR.Abs [of ty, no_vars] rtypingR.App [of ty e s T U ty' t]}
+*}
+
+subsection {* Generating executable code *}
+
+instantiation NFT :: default
+begin
+
+definition "default = Dummy ()"
+
+instance ..
+
+end
+
+instantiation dB :: default
+begin
+
+definition "default = dB.Var 0"
+
+instance ..
+
+end
+
+instantiation prod :: (default, default) default
+begin
+
+definition "default = (default, default)"
+
+instance ..
+
+end
+
+instantiation list :: (type) default
+begin
+
+definition "default = []"
+
+instance ..
+
+end
+
+instantiation "fun" :: (type, default) default
+begin
+
+definition "default = (\<lambda>x. default)"
+
+instance ..
+
+end
+
+definition int_of_nat :: "nat \<Rightarrow> int" where
+ "int_of_nat = of_nat"
+
+text {*
+ The following functions convert between Isabelle's built-in {\tt term}
+ datatype and the generated {\tt dB} datatype. This allows to
+ generate example terms using Isabelle's parser and inspect
+ normalized terms using Isabelle's pretty printer.
+*}
+
+ML {*
+fun dBtype_of_typ (Type ("fun", [T, U])) =
+ @{code Fun} (dBtype_of_typ T, dBtype_of_typ U)
+ | dBtype_of_typ (TFree (s, _)) = (case explode s of
+ ["'", a] => @{code Atom} (@{code nat} (ord a - 97))
+ | _ => error "dBtype_of_typ: variable name")
+ | dBtype_of_typ _ = error "dBtype_of_typ: bad type";
+
+fun dB_of_term (Bound i) = @{code dB.Var} (@{code nat} i)
+ | dB_of_term (t $ u) = @{code dB.App} (dB_of_term t, dB_of_term u)
+ | dB_of_term (Abs (_, _, t)) = @{code dB.Abs} (dB_of_term t)
+ | dB_of_term _ = error "dB_of_term: bad term";
+
+fun term_of_dB Ts (Type ("fun", [T, U])) (@{code dB.Abs} dBt) =
+ Abs ("x", T, term_of_dB (T :: Ts) U dBt)
+ | term_of_dB Ts _ dBt = term_of_dB' Ts dBt
+and term_of_dB' Ts (@{code dB.Var} n) = Bound (@{code int_of_nat} n)
+ | term_of_dB' Ts (@{code dB.App} (dBt, dBu)) =
+ let val t = term_of_dB' Ts dBt
+ in case fastype_of1 (Ts, t) of
+ Type ("fun", [T, U]) => t $ term_of_dB Ts T dBu
+ | _ => error "term_of_dB: function type expected"
+ end
+ | term_of_dB' _ _ = error "term_of_dB: term not in normal form";
+
+fun typing_of_term Ts e (Bound i) =
+ @{code Var} (e, @{code nat} i, dBtype_of_typ (nth Ts i))
+ | typing_of_term Ts e (t $ u) = (case fastype_of1 (Ts, t) of
+ Type ("fun", [T, U]) => @{code App} (e, dB_of_term t,
+ dBtype_of_typ T, dBtype_of_typ U, dB_of_term u,
+ typing_of_term Ts e t, typing_of_term Ts e u)
+ | _ => error "typing_of_term: function type expected")
+ | typing_of_term Ts e (Abs (s, T, t)) =
+ let val dBT = dBtype_of_typ T
+ in @{code Abs} (e, dBT, dB_of_term t,
+ dBtype_of_typ (fastype_of1 (T :: Ts, t)),
+ typing_of_term (T :: Ts) (@{code shift} e @{code "0::nat"} dBT) t)
+ end
+ | typing_of_term _ _ _ = error "typing_of_term: bad term";
+
+fun dummyf _ = error "dummy";
+
+val ct1 = @{cterm "%f. ((%f x. f (f (f x))) ((%f x. f (f (f (f x)))) f))"};
+val (dB1, _) = @{code type_NF} (typing_of_term [] dummyf (term_of ct1));
+val ct1' = cterm_of @{theory} (term_of_dB [] (#T (rep_cterm ct1)) dB1);
+
+val ct2 = @{cterm "%f x. (%x. f x x) ((%x. f x x) ((%x. f x x) ((%x. f x x) ((%x. f x x) ((%x. f x x) x)))))"};
+val (dB2, _) = @{code type_NF} (typing_of_term [] dummyf (term_of ct2));
+val ct2' = cterm_of @{theory} (term_of_dB [] (#T (rep_cterm ct2)) dB2);
+*}
+
+text {*
+The same story again for the SML code generator.
+*}
+
+consts_code
+ "default" ("(error \"default\")")
+ "default :: 'a \<Rightarrow> 'b::default" ("(fn '_ => error \"default\")")
+
+code_module Norm
+contains
+ test = "type_NF"
+
+ML {*
+fun nat_of_int 0 = Norm.zero
+ | nat_of_int n = Norm.Suc (nat_of_int (n-1));
+
+fun int_of_nat Norm.zero = 0
+ | int_of_nat (Norm.Suc n) = 1 + int_of_nat n;
+
+fun dBtype_of_typ (Type ("fun", [T, U])) =
+ Norm.Fun (dBtype_of_typ T, dBtype_of_typ U)
+ | dBtype_of_typ (TFree (s, _)) = (case explode s of
+ ["'", a] => Norm.Atom (nat_of_int (ord a - 97))
+ | _ => error "dBtype_of_typ: variable name")
+ | dBtype_of_typ _ = error "dBtype_of_typ: bad type";
+
+fun dB_of_term (Bound i) = Norm.dB_Var (nat_of_int i)
+ | dB_of_term (t $ u) = Norm.App (dB_of_term t, dB_of_term u)
+ | dB_of_term (Abs (_, _, t)) = Norm.Abs (dB_of_term t)
+ | dB_of_term _ = error "dB_of_term: bad term";
+
+fun term_of_dB Ts (Type ("fun", [T, U])) (Norm.Abs dBt) =
+ Abs ("x", T, term_of_dB (T :: Ts) U dBt)
+ | term_of_dB Ts _ dBt = term_of_dB' Ts dBt
+and term_of_dB' Ts (Norm.dB_Var n) = Bound (int_of_nat n)
+ | term_of_dB' Ts (Norm.App (dBt, dBu)) =
+ let val t = term_of_dB' Ts dBt
+ in case fastype_of1 (Ts, t) of
+ Type ("fun", [T, U]) => t $ term_of_dB Ts T dBu
+ | _ => error "term_of_dB: function type expected"
+ end
+ | term_of_dB' _ _ = error "term_of_dB: term not in normal form";
+
+fun typing_of_term Ts e (Bound i) =
+ Norm.Var (e, nat_of_int i, dBtype_of_typ (List.nth (Ts, i)))
+ | typing_of_term Ts e (t $ u) = (case fastype_of1 (Ts, t) of
+ Type ("fun", [T, U]) => Norm.rtypingT_App (e, dB_of_term t,
+ dBtype_of_typ T, dBtype_of_typ U, dB_of_term u,
+ typing_of_term Ts e t, typing_of_term Ts e u)
+ | _ => error "typing_of_term: function type expected")
+ | typing_of_term Ts e (Abs (s, T, t)) =
+ let val dBT = dBtype_of_typ T
+ in Norm.rtypingT_Abs (e, dBT, dB_of_term t,
+ dBtype_of_typ (fastype_of1 (T :: Ts, t)),
+ typing_of_term (T :: Ts) (Norm.shift e Norm.zero dBT) t)
+ end
+ | typing_of_term _ _ _ = error "typing_of_term: bad term";
+
+fun dummyf _ = error "dummy";
+*}
+
+text {*
+We now try out the extracted program @{text "type_NF"} on some example terms.
+*}
+
+ML {*
+val ct1 = @{cterm "%f. ((%f x. f (f (f x))) ((%f x. f (f (f (f x)))) f))"};
+val (dB1, _) = Norm.type_NF (typing_of_term [] dummyf (term_of ct1));
+val ct1' = cterm_of @{theory} (term_of_dB [] (#T (rep_cterm ct1)) dB1);
+
+val ct2 = @{cterm "%f x. (%x. f x x) ((%x. f x x) ((%x. f x x) ((%x. f x x) ((%x. f x x) ((%x. f x x) x)))))"};
+val (dB2, _) = Norm.type_NF (typing_of_term [] dummyf (term_of ct2));
+val ct2' = cterm_of @{theory} (term_of_dB [] (#T (rep_cterm ct2)) dB2);
+*}
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/document/root.bib Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,43 @@
+@TechReport{Loader1998,
+ author = {Ralph Loader},
+ title = {{N}otes on {S}imply {T}yped {L}ambda {C}alculus},
+ institution = {Laboratory for Foundations of Computer Science,
+ School of Informatics, University of Edinburgh},
+ year = 1998,
+ number = {ECS-LFCS-98-381}
+}
+
+@InProceedings{Matthes-ESSLLI2000,
+ author = {Ralph Matthes},
+ title = {{L}ambda {C}alculus: {A} {C}ase for {I}nductive
+ {D}efinitions},
+ booktitle = {Lecture notes of the 12th European Summer School in
+ Logic, Language and Information (ESSLLI 2000)},
+ year = 2000,
+ month = {August},
+ publisher = {School of Computer Science, University of
+ Birmingham}
+}
+
+@Article{Matthes-Joachimski-AML,
+ author = {Felix Joachimski and Ralph Matthes},
+ title = {Short Proofs of Normalization for the simply-typed
+ $\lambda$-calculus, permutative conversions and
+ {G}{\"o}del's {T}},
+ journal = {Archive for Mathematical Logic},
+ year = 2003,
+ volume = 42,
+ number = 1,
+ pages = {59--87}
+}
+
+@Article{Takahashi-IandC,
+ author = {Masako Takahashi},
+ title = {Parallel reductions in $\lambda$-calculus},
+ journal = {Information and Computation},
+ year = 1995,
+ volume = 118,
+ number = 1,
+ pages = {120--127},
+ month = {April}
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/Lambda/document/root.tex Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,33 @@
+\documentclass[11pt,a4paper]{article}
+\usepackage{graphicx}
+\usepackage[english]{babel}
+\usepackage[latin1]{inputenc}
+\usepackage{amssymb}
+\usepackage{isabelle,isabellesym,pdfsetup}
+
+\isabellestyle{it}
+\renewcommand{\isamarkupsubsubsection}[1]{\subsubsection*{#1}}
+\newcommand{\isasymRrightarrow}{\isamath{\Rrightarrow}}
+
+\begin{document}
+
+\title{Fundamental Properties of Lambda-calculus}
+\author{Tobias Nipkow \\ Stefan Berghofer}
+\maketitle
+
+\tableofcontents
+
+\begin{center}
+ \includegraphics[scale=0.7]{session_graph}
+\end{center}
+
+\newpage
+
+\parindent 0pt \parskip 0.5ex
+
+\input{session}
+
+\bibliographystyle{abbrv}
+\bibliography{root}
+
+\end{document}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/ex/Hilbert_Classical.thy Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,160 @@
+(* Title: HOL/Proofs/ex/Hilbert_Classical.thy
+ Author: Stefan Berghofer and Markus Wenzel, TU Muenchen
+*)
+
+header {* Hilbert's choice and classical logic *}
+
+theory Hilbert_Classical
+imports Main
+begin
+
+text {*
+ Derivation of the classical law of tertium-non-datur by means of
+ Hilbert's choice operator (due to M. J. Beeson and J. Harrison).
+*}
+
+
+subsection {* Proof text *}
+
+theorem tnd: "A \<or> \<not> A"
+proof -
+ let ?P = "\<lambda>X. X = False \<or> X = True \<and> A"
+ let ?Q = "\<lambda>X. X = False \<and> A \<or> X = True"
+
+ have a: "?P (Eps ?P)"
+ proof (rule someI)
+ have "False = False" ..
+ thus "?P False" ..
+ qed
+ have b: "?Q (Eps ?Q)"
+ proof (rule someI)
+ have "True = True" ..
+ thus "?Q True" ..
+ qed
+
+ from a show ?thesis
+ proof
+ assume "Eps ?P = True \<and> A"
+ hence A ..
+ thus ?thesis ..
+ next
+ assume P: "Eps ?P = False"
+ from b show ?thesis
+ proof
+ assume "Eps ?Q = False \<and> A"
+ hence A ..
+ thus ?thesis ..
+ next
+ assume Q: "Eps ?Q = True"
+ have neq: "?P \<noteq> ?Q"
+ proof
+ assume "?P = ?Q"
+ hence "Eps ?P = Eps ?Q" by (rule arg_cong)
+ also note P
+ also note Q
+ finally show False by (rule False_neq_True)
+ qed
+ have "\<not> A"
+ proof
+ assume a: A
+ have "?P = ?Q"
+ proof (rule ext)
+ fix x show "?P x = ?Q x"
+ proof
+ assume "?P x"
+ thus "?Q x"
+ proof
+ assume "x = False"
+ from this and a have "x = False \<and> A" ..
+ thus "?Q x" ..
+ next
+ assume "x = True \<and> A"
+ hence "x = True" ..
+ thus "?Q x" ..
+ qed
+ next
+ assume "?Q x"
+ thus "?P x"
+ proof
+ assume "x = False \<and> A"
+ hence "x = False" ..
+ thus "?P x" ..
+ next
+ assume "x = True"
+ from this and a have "x = True \<and> A" ..
+ thus "?P x" ..
+ qed
+ qed
+ qed
+ with neq show False by contradiction
+ qed
+ thus ?thesis ..
+ qed
+ qed
+qed
+
+
+subsection {* Proof term of text *}
+
+prf tnd
+
+
+subsection {* Proof script *}
+
+theorem tnd': "A \<or> \<not> A"
+ apply (subgoal_tac
+ "(((SOME x. x = False \<or> x = True \<and> A) = False) \<or>
+ ((SOME x. x = False \<or> x = True \<and> A) = True) \<and> A) \<and>
+ (((SOME x. x = False \<and> A \<or> x = True) = False) \<and> A \<or>
+ ((SOME x. x = False \<and> A \<or> x = True) = True))")
+ prefer 2
+ apply (rule conjI)
+ apply (rule someI)
+ apply (rule disjI1)
+ apply (rule refl)
+ apply (rule someI)
+ apply (rule disjI2)
+ apply (rule refl)
+ apply (erule conjE)
+ apply (erule disjE)
+ apply (erule disjE)
+ apply (erule conjE)
+ apply (erule disjI1)
+ prefer 2
+ apply (erule conjE)
+ apply (erule disjI1)
+ apply (subgoal_tac
+ "(\<lambda>x. (x = False) \<or> (x = True) \<and> A) \<noteq>
+ (\<lambda>x. (x = False) \<and> A \<or> (x = True))")
+ prefer 2
+ apply (rule notI)
+ apply (drule_tac f = "\<lambda>y. SOME x. y x" in arg_cong)
+ apply (drule trans, assumption)
+ apply (drule sym)
+ apply (drule trans, assumption)
+ apply (erule False_neq_True)
+ apply (rule disjI2)
+ apply (rule notI)
+ apply (erule notE)
+ apply (rule ext)
+ apply (rule iffI)
+ apply (erule disjE)
+ apply (rule disjI1)
+ apply (erule conjI)
+ apply assumption
+ apply (erule conjE)
+ apply (erule disjI2)
+ apply (erule disjE)
+ apply (erule conjE)
+ apply (erule disjI1)
+ apply (rule disjI2)
+ apply (erule conjI)
+ apply assumption
+ done
+
+
+subsection {* Proof term of script *}
+
+prf tnd'
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Proofs/ex/ROOT.ML Mon Sep 06 19:23:54 2010 +0200
@@ -0,0 +1,1 @@
+use_thys ["Hilbert_Classical"];
--- a/src/HOL/Statespace/distinct_tree_prover.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Statespace/distinct_tree_prover.ML Mon Sep 06 19:23:54 2010 +0200
@@ -23,18 +23,19 @@
structure DistinctTreeProver : DISTINCT_TREE_PROVER =
struct
-val all_distinct_left = thm "DistinctTreeProver.all_distinct_left";
-val all_distinct_right = thm "DistinctTreeProver.all_distinct_right";
+
+val all_distinct_left = @{thm DistinctTreeProver.all_distinct_left};
+val all_distinct_right = @{thm DistinctTreeProver.all_distinct_right};
-val distinct_left = thm "DistinctTreeProver.distinct_left";
-val distinct_right = thm "DistinctTreeProver.distinct_right";
-val distinct_left_right = thm "DistinctTreeProver.distinct_left_right";
-val in_set_root = thm "DistinctTreeProver.in_set_root";
-val in_set_left = thm "DistinctTreeProver.in_set_left";
-val in_set_right = thm "DistinctTreeProver.in_set_right";
+val distinct_left = @{thm DistinctTreeProver.distinct_left};
+val distinct_right = @{thm DistinctTreeProver.distinct_right};
+val distinct_left_right = @{thm DistinctTreeProver.distinct_left_right};
+val in_set_root = @{thm DistinctTreeProver.in_set_root};
+val in_set_left = @{thm DistinctTreeProver.in_set_left};
+val in_set_right = @{thm DistinctTreeProver.in_set_right};
-val swap_neq = thm "DistinctTreeProver.swap_neq";
-val neq_to_eq_False = thm "DistinctTreeProver.neq_to_eq_False"
+val swap_neq = @{thm DistinctTreeProver.swap_neq};
+val neq_to_eq_False = @{thm DistinctTreeProver.neq_to_eq_False};
datatype Direction = Left | Right
@@ -273,9 +274,9 @@
end
-val delete_root = thm "DistinctTreeProver.delete_root";
-val delete_left = thm "DistinctTreeProver.delete_left";
-val delete_right = thm "DistinctTreeProver.delete_right";
+val delete_root = @{thm DistinctTreeProver.delete_root};
+val delete_left = @{thm DistinctTreeProver.delete_left};
+val delete_right = @{thm DistinctTreeProver.delete_right};
fun deleteProver dist_thm [] = delete_root OF [dist_thm]
| deleteProver dist_thm (p::ps) =
@@ -286,10 +287,10 @@
val del = deleteProver dist_thm' ps;
in discharge [dist_thm, del] del_rule end;
-val subtract_Tip = thm "DistinctTreeProver.subtract_Tip";
-val subtract_Node = thm "DistinctTreeProver.subtract_Node";
-val delete_Some_all_distinct = thm "DistinctTreeProver.delete_Some_all_distinct";
-val subtract_Some_all_distinct_res = thm "DistinctTreeProver.subtract_Some_all_distinct_res";
+val subtract_Tip = @{thm DistinctTreeProver.subtract_Tip};
+val subtract_Node = @{thm DistinctTreeProver.subtract_Node};
+val delete_Some_all_distinct = @{thm DistinctTreeProver.delete_Some_all_distinct};
+val subtract_Some_all_distinct_res = @{thm DistinctTreeProver.subtract_Some_all_distinct_res};
local
val (alpha,v) =
@@ -320,7 +321,7 @@
in discharge [del_tree, sub_l, sub_r] subtract_Node end
end
-val subtract_Some_all_distinct = thm "DistinctTreeProver.subtract_Some_all_distinct";
+val subtract_Some_all_distinct = @{thm DistinctTreeProver.subtract_Some_all_distinct};
fun distinct_implProver dist_thm ct =
let
val ctree = ct |> Thm.dest_comb |> #2 |> Thm.dest_comb |> #2;
--- a/src/HOL/Statespace/state_fun.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Statespace/state_fun.ML Mon Sep 06 19:23:54 2010 +0200
@@ -38,10 +38,10 @@
local
-val conj1_False = thm "conj1_False";
-val conj2_False = thm "conj2_False";
-val conj_True = thm "conj_True";
-val conj_cong = thm "conj_cong";
+val conj1_False = @{thm conj1_False};
+val conj2_False = @{thm conj2_False};
+val conj_True = @{thm conj_True};
+val conj_cong = @{thm conj_cong};
fun isFalse (Const (@{const_name False},_)) = true
| isFalse _ = false;
@@ -255,7 +255,7 @@
local
-val swap_ex_eq = thm "StateFun.swap_ex_eq";
+val swap_ex_eq = @{thm StateFun.swap_ex_eq};
fun is_selector thy T sel =
let
val (flds,more) = Record.get_recT_fields thy T
--- a/src/HOL/TLA/Memory/Memory.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/TLA/Memory/Memory.thy Mon Sep 06 19:23:54 2010 +0200
@@ -181,10 +181,10 @@
|- Calling ch p & (rs!p ~= #NotAResult)
--> Enabled (<MemReturn ch rs p>_(rtrner ch ! p, rs!p))"
apply (tactic
- {* action_simp_tac @{simpset} [thm "MemReturn_change" RSN (2, thm "enabled_mono") ] [] 1 *})
+ {* action_simp_tac @{simpset} [@{thm MemReturn_change} RSN (2, @{thm enabled_mono}) ] [] 1 *})
apply (tactic
- {* action_simp_tac (@{simpset} addsimps [thm "MemReturn_def", thm "Return_def",
- thm "rtrner_def"]) [exI] [thm "base_enabled", thm "Pair_inject"] 1 *})
+ {* action_simp_tac (@{simpset} addsimps [@{thm MemReturn_def}, @{thm Return_def},
+ @{thm rtrner_def}]) [exI] [@{thm base_enabled}, @{thm Pair_inject}] 1 *})
done
lemma ReadInner_enabled: "!!p. basevars (rtrner ch ! p, rs!p) ==>
@@ -227,13 +227,13 @@
--> Enabled (<RNext ch mm rs p>_(rtrner ch ! p, rs!p))"
apply (auto simp: enabled_disj [try_rewrite] intro!: RWRNext_enabled [temp_use])
apply (case_tac "arg (ch w p)")
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "Read_def",
- temp_rewrite (thm "enabled_ex")]) [thm "ReadInner_enabled", exI] [] 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm Read_def},
+ temp_rewrite @{thm enabled_ex}]) [@{thm ReadInner_enabled}, exI] [] 1 *})
apply (force dest: base_pair [temp_use])
apply (erule contrapos_np)
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "Write_def",
- temp_rewrite (thm "enabled_ex")])
- [thm "WriteInner_enabled", exI] [] 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm Write_def},
+ temp_rewrite @{thm enabled_ex}])
+ [@{thm WriteInner_enabled}, exI] [] 1 *})
done
end
--- a/src/HOL/TLA/Memory/MemoryImplementation.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/TLA/Memory/MemoryImplementation.thy Mon Sep 06 19:23:54 2010 +0200
@@ -255,10 +255,10 @@
apply (rule historyI)
apply assumption+
apply (rule MI_base)
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "HInit_def"]) [] [] 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm HInit_def}]) [] [] 1 *})
apply (erule fun_cong)
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "HNext_def"])
- [thm "busy_squareI"] [] 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm HNext_def}])
+ [@{thm busy_squareI}] [] 1 *})
apply (erule fun_cong)
done
@@ -346,22 +346,22 @@
caller_def rtrner_def MVNROKBA_def S_def S1_def S2_def Calling_def)
lemma S1ClerkUnch: "|- [MClkNext memCh crCh cst p]_(c p) & $(S1 rmhist p) --> unchanged (c p)"
- by (tactic {* auto_tac (MI_fast_css addSDs2 [temp_use (thm "MClkidle")]
- addsimps2 [thm "S_def", thm "S1_def"]) *})
+ by (tactic {* auto_tac (MI_fast_css addSDs2 [temp_use @{thm MClkidle}]
+ addsimps2 [@{thm S_def}, @{thm S1_def}]) *})
lemma S1RPCUnch: "|- [RPCNext crCh rmCh rst p]_(r p) & $(S1 rmhist p) --> unchanged (r p)"
- by (tactic {* auto_tac (MI_fast_css addSDs2 [temp_use (thm "RPCidle")]
- addsimps2 [thm "S_def", thm "S1_def"]) *})
+ by (tactic {* auto_tac (MI_fast_css addSDs2 [temp_use @{thm RPCidle}]
+ addsimps2 [@{thm S_def}, @{thm S1_def}]) *})
lemma S1MemUnch: "|- [RNext rmCh mm ires p]_(m p) & $(S1 rmhist p) --> unchanged (m p)"
- by (tactic {* auto_tac (MI_fast_css addSDs2 [temp_use (thm "Memoryidle")]
- addsimps2 [thm "S_def", thm "S1_def"]) *})
+ by (tactic {* auto_tac (MI_fast_css addSDs2 [temp_use @{thm Memoryidle}]
+ addsimps2 [@{thm S_def}, @{thm S1_def}]) *})
lemma S1Hist: "|- [HNext rmhist p]_(c p,r p,m p,rmhist!p) & $(S1 rmhist p)
--> unchanged (rmhist!p)"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "HNext_def", thm "S_def",
- thm "S1_def", thm "MemReturn_def", thm "RPCFail_def", thm "MClkReply_def",
- thm "Return_def"]) [] [temp_use (thm "squareE")] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm HNext_def}, @{thm S_def},
+ @{thm S1_def}, @{thm MemReturn_def}, @{thm RPCFail_def}, @{thm MClkReply_def},
+ @{thm Return_def}]) [] [temp_use @{thm squareE}] 1 *})
(* ------------------------------ State S2 ---------------------------------------- *)
@@ -375,9 +375,9 @@
lemma S2Forward: "|- $(S2 rmhist p) & MClkFwd memCh crCh cst p
& unchanged (e p, r p, m p, rmhist!p)
--> (S3 rmhist p)$"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "MClkFwd_def",
- thm "Call_def", thm "e_def", thm "r_def", thm "m_def", thm "caller_def",
- thm "rtrner_def", thm "S_def", thm "S2_def", thm "S3_def", thm "Calling_def"]) [] [] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm MClkFwd_def},
+ @{thm Call_def}, @{thm e_def}, @{thm r_def}, @{thm m_def}, @{thm caller_def},
+ @{thm rtrner_def}, @{thm S_def}, @{thm S2_def}, @{thm S3_def}, @{thm Calling_def}]) [] [] 1 *})
lemma S2RPCUnch: "|- [RPCNext crCh rmCh rst p]_(r p) & $(S2 rmhist p) --> unchanged (r p)"
by (auto simp: S_def S2_def dest!: RPCidle [temp_use])
@@ -387,8 +387,8 @@
lemma S2Hist: "|- [HNext rmhist p]_(c p,r p,m p,rmhist!p) & $(S2 rmhist p)
--> unchanged (rmhist!p)"
- by (tactic {* auto_tac (MI_fast_css addsimps2 [thm "HNext_def", thm "MemReturn_def",
- thm "RPCFail_def", thm "MClkReply_def", thm "Return_def", thm "S_def", thm "S2_def"]) *})
+ by (tactic {* auto_tac (MI_fast_css addsimps2 [@{thm HNext_def}, @{thm MemReturn_def},
+ @{thm RPCFail_def}, @{thm MClkReply_def}, @{thm Return_def}, @{thm S_def}, @{thm S2_def}]) *})
(* ------------------------------ State S3 ---------------------------------------- *)
@@ -411,19 +411,19 @@
lemma S3Forward: "|- RPCFwd crCh rmCh rst p & HNext rmhist p & $(S3 rmhist p)
& unchanged (e p, c p, m p)
--> (S4 rmhist p)$ & unchanged (rmhist!p)"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "RPCFwd_def",
- thm "HNext_def", thm "MemReturn_def", thm "RPCFail_def",
- thm "MClkReply_def", thm "Return_def", thm "Call_def", thm "e_def",
- thm "c_def", thm "m_def", thm "caller_def", thm "rtrner_def", thm "S_def",
- thm "S3_def", thm "S4_def", thm "Calling_def"]) [] [] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm RPCFwd_def},
+ @{thm HNext_def}, @{thm MemReturn_def}, @{thm RPCFail_def},
+ @{thm MClkReply_def}, @{thm Return_def}, @{thm Call_def}, @{thm e_def},
+ @{thm c_def}, @{thm m_def}, @{thm caller_def}, @{thm rtrner_def}, @{thm S_def},
+ @{thm S3_def}, @{thm S4_def}, @{thm Calling_def}]) [] [] 1 *})
lemma S3Fail: "|- RPCFail crCh rmCh rst p & $(S3 rmhist p) & HNext rmhist p
& unchanged (e p, c p, m p)
--> (S6 rmhist p)$"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "HNext_def",
- thm "RPCFail_def", thm "Return_def", thm "e_def", thm "c_def",
- thm "m_def", thm "caller_def", thm "rtrner_def", thm "MVOKBARF_def",
- thm "S_def", thm "S3_def", thm "S6_def", thm "Calling_def"]) [] [] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm HNext_def},
+ @{thm RPCFail_def}, @{thm Return_def}, @{thm e_def}, @{thm c_def},
+ @{thm m_def}, @{thm caller_def}, @{thm rtrner_def}, @{thm MVOKBARF_def},
+ @{thm S_def}, @{thm S3_def}, @{thm S6_def}, @{thm Calling_def}]) [] [] 1 *})
lemma S3MemUnch: "|- [RNext rmCh mm ires p]_(m p) & $(S3 rmhist p) --> unchanged (m p)"
by (auto simp: S_def S3_def dest!: Memoryidle [temp_use])
@@ -441,18 +441,18 @@
by (auto simp: S_def S4_def dest!: MClkbusy [temp_use])
lemma S4RPCUnch: "|- [RPCNext crCh rmCh rst p]_(r p) & $(S4 rmhist p) --> unchanged (r p)"
- by (tactic {* auto_tac (MI_fast_css addsimps2 [thm "S_def", thm "S4_def"]
- addSDs2 [temp_use (thm "RPCbusy")]) *})
+ by (tactic {* auto_tac (MI_fast_css addsimps2 [@{thm S_def}, @{thm S4_def}]
+ addSDs2 [temp_use @{thm RPCbusy}]) *})
lemma S4ReadInner: "|- ReadInner rmCh mm ires p l & $(S4 rmhist p) & unchanged (e p, c p, r p)
& HNext rmhist p & $(MemInv mm l)
--> (S4 rmhist p)$ & unchanged (rmhist!p)"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "ReadInner_def",
- thm "GoodRead_def", thm "BadRead_def", thm "HNext_def", thm "MemReturn_def",
- thm "RPCFail_def", thm "MClkReply_def", thm "Return_def", thm "e_def",
- thm "c_def", thm "r_def", thm "rtrner_def", thm "caller_def",
- thm "MVNROKBA_def", thm "S_def", thm "S4_def", thm "RdRequest_def",
- thm "Calling_def", thm "MemInv_def"]) [] [] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm ReadInner_def},
+ @{thm GoodRead_def}, @{thm BadRead_def}, @{thm HNext_def}, @{thm MemReturn_def},
+ @{thm RPCFail_def}, @{thm MClkReply_def}, @{thm Return_def}, @{thm e_def},
+ @{thm c_def}, @{thm r_def}, @{thm rtrner_def}, @{thm caller_def},
+ @{thm MVNROKBA_def}, @{thm S_def}, @{thm S4_def}, @{thm RdRequest_def},
+ @{thm Calling_def}, @{thm MemInv_def}]) [] [] 1 *})
lemma S4Read: "|- Read rmCh mm ires p & $(S4 rmhist p) & unchanged (e p, c p, r p)
& HNext rmhist p & (!l. $MemInv mm l)
@@ -461,11 +461,11 @@
lemma S4WriteInner: "|- WriteInner rmCh mm ires p l v & $(S4 rmhist p) & unchanged (e p, c p, r p) & HNext rmhist p
--> (S4 rmhist p)$ & unchanged (rmhist!p)"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "WriteInner_def",
- thm "GoodWrite_def", thm "BadWrite_def", thm "HNext_def", thm "MemReturn_def",
- thm "RPCFail_def", thm "MClkReply_def", thm "Return_def", thm "e_def",
- thm "c_def", thm "r_def", thm "rtrner_def", thm "caller_def", thm "MVNROKBA_def",
- thm "S_def", thm "S4_def", thm "WrRequest_def", thm "Calling_def"]) [] [] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm WriteInner_def},
+ @{thm GoodWrite_def}, @{thm BadWrite_def}, @{thm HNext_def}, @{thm MemReturn_def},
+ @{thm RPCFail_def}, @{thm MClkReply_def}, @{thm Return_def}, @{thm e_def},
+ @{thm c_def}, @{thm r_def}, @{thm rtrner_def}, @{thm caller_def}, @{thm MVNROKBA_def},
+ @{thm S_def}, @{thm S4_def}, @{thm WrRequest_def}, @{thm Calling_def}]) [] [] 1 *})
lemma S4Write: "|- Write rmCh mm ires p l & $(S4 rmhist p) & unchanged (e p, c p, r p)
& (HNext rmhist p)
@@ -500,26 +500,26 @@
lemma S5Reply: "|- RPCReply crCh rmCh rst p & $(S5 rmhist p) & unchanged (e p, c p, m p,rmhist!p)
--> (S6 rmhist p)$"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "RPCReply_def",
- thm "Return_def", thm "e_def", thm "c_def", thm "m_def", thm "MVOKBA_def",
- thm "MVOKBARF_def", thm "caller_def", thm "rtrner_def", thm "S_def",
- thm "S5_def", thm "S6_def", thm "Calling_def"]) [] [] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm RPCReply_def},
+ @{thm Return_def}, @{thm e_def}, @{thm c_def}, @{thm m_def}, @{thm MVOKBA_def},
+ @{thm MVOKBARF_def}, @{thm caller_def}, @{thm rtrner_def}, @{thm S_def},
+ @{thm S5_def}, @{thm S6_def}, @{thm Calling_def}]) [] [] 1 *})
lemma S5Fail: "|- RPCFail crCh rmCh rst p & $(S5 rmhist p) & unchanged (e p, c p, m p,rmhist!p)
--> (S6 rmhist p)$"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "RPCFail_def",
- thm "Return_def", thm "e_def", thm "c_def", thm "m_def",
- thm "MVOKBARF_def", thm "caller_def", thm "rtrner_def",
- thm "S_def", thm "S5_def", thm "S6_def", thm "Calling_def"]) [] [] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm RPCFail_def},
+ @{thm Return_def}, @{thm e_def}, @{thm c_def}, @{thm m_def},
+ @{thm MVOKBARF_def}, @{thm caller_def}, @{thm rtrner_def},
+ @{thm S_def}, @{thm S5_def}, @{thm S6_def}, @{thm Calling_def}]) [] [] 1 *})
lemma S5MemUnch: "|- [RNext rmCh mm ires p]_(m p) & $(S5 rmhist p) --> unchanged (m p)"
by (auto simp: S_def S5_def dest!: Memoryidle [temp_use])
lemma S5Hist: "|- [HNext rmhist p]_(c p, r p, m p, rmhist!p) & $(S5 rmhist p)
--> (rmhist!p)$ = $(rmhist!p)"
- by (tactic {* auto_tac (MI_fast_css addsimps2 [thm "HNext_def",
- thm "MemReturn_def", thm "RPCFail_def", thm "MClkReply_def", thm "Return_def",
- thm "S_def", thm "S5_def"]) *})
+ by (tactic {* auto_tac (MI_fast_css addsimps2 [@{thm HNext_def},
+ @{thm MemReturn_def}, @{thm RPCFail_def}, @{thm MClkReply_def}, @{thm Return_def},
+ @{thm S_def}, @{thm S5_def}]) *})
(* ------------------------------ State S6 ---------------------------------------- *)
@@ -533,18 +533,18 @@
lemma S6Retry: "|- MClkRetry memCh crCh cst p & HNext rmhist p & $S6 rmhist p
& unchanged (e p,r p,m p)
--> (S3 rmhist p)$ & unchanged (rmhist!p)"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "HNext_def",
- thm "MClkReply_def", thm "MClkRetry_def", thm "Call_def", thm "Return_def",
- thm "e_def", thm "r_def", thm "m_def", thm "caller_def", thm "rtrner_def",
- thm "S_def", thm "S6_def", thm "S3_def", thm "Calling_def"]) [] [] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm HNext_def},
+ @{thm MClkReply_def}, @{thm MClkRetry_def}, @{thm Call_def}, @{thm Return_def},
+ @{thm e_def}, @{thm r_def}, @{thm m_def}, @{thm caller_def}, @{thm rtrner_def},
+ @{thm S_def}, @{thm S6_def}, @{thm S3_def}, @{thm Calling_def}]) [] [] 1 *})
lemma S6Reply: "|- MClkReply memCh crCh cst p & HNext rmhist p & $S6 rmhist p
& unchanged (e p,r p,m p)
--> (S1 rmhist p)$"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "HNext_def",
- thm "MemReturn_def", thm "RPCFail_def", thm "Return_def", thm "MClkReply_def",
- thm "e_def", thm "r_def", thm "m_def", thm "caller_def", thm "rtrner_def",
- thm "S_def", thm "S6_def", thm "S1_def", thm "Calling_def"]) [] [] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm HNext_def},
+ @{thm MemReturn_def}, @{thm RPCFail_def}, @{thm Return_def}, @{thm MClkReply_def},
+ @{thm e_def}, @{thm r_def}, @{thm m_def}, @{thm caller_def}, @{thm rtrner_def},
+ @{thm S_def}, @{thm S6_def}, @{thm S1_def}, @{thm Calling_def}]) [] [] 1 *})
lemma S6RPCUnch: "|- [RPCNext crCh rmCh rst p]_(r p) & $S6 rmhist p --> unchanged (r p)"
by (auto simp: S_def S6_def dest!: RPCidle [temp_use])
@@ -563,9 +563,9 @@
(* The implementation's initial condition implies the state predicate S1 *)
lemma Step1_1: "|- ImpInit p & HInit rmhist p --> S1 rmhist p"
- by (tactic {* auto_tac (MI_fast_css addsimps2 [thm "MVNROKBA_def",
- thm "MClkInit_def", thm "RPCInit_def", thm "PInit_def", thm "HInit_def",
- thm "ImpInit_def", thm "S_def", thm "S1_def"]) *})
+ by (tactic {* auto_tac (MI_fast_css addsimps2 [@{thm MVNROKBA_def},
+ @{thm MClkInit_def}, @{thm RPCInit_def}, @{thm PInit_def}, @{thm HInit_def},
+ @{thm ImpInit_def}, @{thm S_def}, @{thm S1_def}]) *})
(* ========== Step 1.2 ================================================== *)
(* Figure 16 is a predicate-action diagram for the implementation. *)
@@ -573,29 +573,29 @@
lemma Step1_2_1: "|- [HNext rmhist p]_(c p,r p,m p, rmhist!p) & ImpNext p
& ~unchanged (e p, c p, r p, m p, rmhist!p) & $S1 rmhist p
--> (S2 rmhist p)$ & ENext p & unchanged (c p, r p, m p)"
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "ImpNext_def"]) []
- (map temp_elim [thm "S1ClerkUnch", thm "S1RPCUnch", thm "S1MemUnch", thm "S1Hist"]) 1 *})
- apply (tactic {* auto_tac (MI_fast_css addSIs2 [temp_use (thm "S1Env")]) *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm ImpNext_def}]) []
+ (map temp_elim [@{thm S1ClerkUnch}, @{thm S1RPCUnch}, @{thm S1MemUnch}, @{thm S1Hist}]) 1 *})
+ apply (tactic {* auto_tac (MI_fast_css addSIs2 [temp_use @{thm S1Env}]) *})
done
lemma Step1_2_2: "|- [HNext rmhist p]_(c p,r p,m p, rmhist!p) & ImpNext p
& ~unchanged (e p, c p, r p, m p, rmhist!p) & $S2 rmhist p
--> (S3 rmhist p)$ & MClkFwd memCh crCh cst p
& unchanged (e p, r p, m p, rmhist!p)"
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "ImpNext_def"]) []
- (map temp_elim [thm "S2EnvUnch", thm "S2RPCUnch", thm "S2MemUnch", thm "S2Hist"]) 1 *})
- apply (tactic {* auto_tac (MI_fast_css addSIs2 [temp_use (thm "S2Clerk"),
- temp_use (thm "S2Forward")]) *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm ImpNext_def}]) []
+ (map temp_elim [@{thm S2EnvUnch}, @{thm S2RPCUnch}, @{thm S2MemUnch}, @{thm S2Hist}]) 1 *})
+ apply (tactic {* auto_tac (MI_fast_css addSIs2 [temp_use @{thm S2Clerk},
+ temp_use @{thm S2Forward}]) *})
done
lemma Step1_2_3: "|- [HNext rmhist p]_(c p,r p,m p, rmhist!p) & ImpNext p
& ~unchanged (e p, c p, r p, m p, rmhist!p) & $S3 rmhist p
--> ((S4 rmhist p)$ & RPCFwd crCh rmCh rst p & unchanged (e p, c p, m p, rmhist!p))
| ((S6 rmhist p)$ & RPCFail crCh rmCh rst p & unchanged (e p, c p, m p))"
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "ImpNext_def"]) []
- (map temp_elim [thm "S3EnvUnch", thm "S3ClerkUnch", thm "S3MemUnch"]) 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm ImpNext_def}]) []
+ (map temp_elim [@{thm S3EnvUnch}, @{thm S3ClerkUnch}, @{thm S3MemUnch}]) 1 *})
apply (tactic {* action_simp_tac @{simpset} []
- (thm "squareE" :: map temp_elim [thm "S3RPC", thm "S3Forward", thm "S3Fail"]) 1 *})
+ (@{thm squareE} :: map temp_elim [@{thm S3RPC}, @{thm S3Forward}, @{thm S3Fail}]) 1 *})
apply (auto dest!: S3Hist [temp_use])
done
@@ -605,10 +605,10 @@
--> ((S4 rmhist p)$ & Read rmCh mm ires p & unchanged (e p, c p, r p, rmhist!p))
| ((S4 rmhist p)$ & (? l. Write rmCh mm ires p l) & unchanged (e p, c p, r p, rmhist!p))
| ((S5 rmhist p)$ & MemReturn rmCh ires p & unchanged (e p, c p, r p))"
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "ImpNext_def"]) []
- (map temp_elim [thm "S4EnvUnch", thm "S4ClerkUnch", thm "S4RPCUnch"]) 1 *})
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "RNext_def"]) []
- (thm "squareE" :: map temp_elim [thm "S4Read", thm "S4Write", thm "S4Return"]) 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm ImpNext_def}]) []
+ (map temp_elim [@{thm S4EnvUnch}, @{thm S4ClerkUnch}, @{thm S4RPCUnch}]) 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm RNext_def}]) []
+ (@{thm squareE} :: map temp_elim [@{thm S4Read}, @{thm S4Write}, @{thm S4Return}]) 1 *})
apply (auto dest!: S4Hist [temp_use])
done
@@ -616,21 +616,21 @@
& ~unchanged (e p, c p, r p, m p, rmhist!p) & $S5 rmhist p
--> ((S6 rmhist p)$ & RPCReply crCh rmCh rst p & unchanged (e p, c p, m p))
| ((S6 rmhist p)$ & RPCFail crCh rmCh rst p & unchanged (e p, c p, m p))"
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "ImpNext_def"]) []
- (map temp_elim [thm "S5EnvUnch", thm "S5ClerkUnch", thm "S5MemUnch", thm "S5Hist"]) 1 *})
- apply (tactic {* action_simp_tac @{simpset} [] [thm "squareE", temp_elim (thm "S5RPC")] 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm ImpNext_def}]) []
+ (map temp_elim [@{thm S5EnvUnch}, @{thm S5ClerkUnch}, @{thm S5MemUnch}, @{thm S5Hist}]) 1 *})
+ apply (tactic {* action_simp_tac @{simpset} [] [@{thm squareE}, temp_elim @{thm S5RPC}] 1 *})
apply (tactic {* auto_tac (MI_fast_css addSDs2
- [temp_use (thm "S5Reply"), temp_use (thm "S5Fail")]) *})
+ [temp_use @{thm S5Reply}, temp_use @{thm S5Fail}]) *})
done
lemma Step1_2_6: "|- [HNext rmhist p]_(c p,r p,m p, rmhist!p) & ImpNext p
& ~unchanged (e p, c p, r p, m p, rmhist!p) & $S6 rmhist p
--> ((S1 rmhist p)$ & MClkReply memCh crCh cst p & unchanged (e p, r p, m p))
| ((S3 rmhist p)$ & MClkRetry memCh crCh cst p & unchanged (e p,r p,m p,rmhist!p))"
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "ImpNext_def"]) []
- (map temp_elim [thm "S6EnvUnch", thm "S6RPCUnch", thm "S6MemUnch"]) 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm ImpNext_def}]) []
+ (map temp_elim [@{thm S6EnvUnch}, @{thm S6RPCUnch}, @{thm S6MemUnch}]) 1 *})
apply (tactic {* action_simp_tac @{simpset} []
- (thm "squareE" :: map temp_elim [thm "S6Clerk", thm "S6Retry", thm "S6Reply"]) 1 *})
+ (@{thm squareE} :: map temp_elim [@{thm S6Clerk}, @{thm S6Retry}, @{thm S6Reply}]) 1 *})
apply (auto dest: S6Hist [temp_use])
done
@@ -641,8 +641,8 @@
section "Initialization (Step 1.3)"
lemma Step1_3: "|- S1 rmhist p --> PInit (resbar rmhist) p"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "resbar_def",
- thm "PInit_def", thm "S_def", thm "S1_def"]) [] [] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm resbar_def},
+ @{thm PInit_def}, @{thm S_def}, @{thm S1_def}]) [] [] 1 *})
(* ----------------------------------------------------------------------
Step 1.4: Implementation's next-state relation simulates specification's
@@ -653,23 +653,23 @@
lemma Step1_4_1: "|- ENext p & $S1 rmhist p & (S2 rmhist p)$ & unchanged (c p, r p, m p)
--> unchanged (rtrner memCh!p, resbar rmhist!p)"
- by (tactic {* auto_tac (MI_fast_css addsimps2 [thm "c_def", thm "r_def",
- thm "m_def", thm "resbar_def"]) *})
+ by (tactic {* auto_tac (MI_fast_css addsimps2 [@{thm c_def}, @{thm r_def},
+ @{thm m_def}, @{thm resbar_def}]) *})
lemma Step1_4_2: "|- MClkFwd memCh crCh cst p & $S2 rmhist p & (S3 rmhist p)$
& unchanged (e p, r p, m p, rmhist!p)
--> unchanged (rtrner memCh!p, resbar rmhist!p)"
by (tactic {* action_simp_tac
- (@{simpset} addsimps [thm "MClkFwd_def", thm "e_def", thm "r_def", thm "m_def",
- thm "resbar_def", thm "S_def", thm "S2_def", thm "S3_def"]) [] [] 1 *})
+ (@{simpset} addsimps [@{thm MClkFwd_def}, @{thm e_def}, @{thm r_def}, @{thm m_def},
+ @{thm resbar_def}, @{thm S_def}, @{thm S2_def}, @{thm S3_def}]) [] [] 1 *})
lemma Step1_4_3a: "|- RPCFwd crCh rmCh rst p & $S3 rmhist p & (S4 rmhist p)$
& unchanged (e p, c p, m p, rmhist!p)
--> unchanged (rtrner memCh!p, resbar rmhist!p)"
apply clarsimp
apply (drule S3_excl [temp_use] S4_excl [temp_use])+
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "e_def",
- thm "c_def", thm "m_def", thm "resbar_def", thm "S_def", thm "S3_def"]) [] [] 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm e_def},
+ @{thm c_def}, @{thm m_def}, @{thm resbar_def}, @{thm S_def}, @{thm S3_def}]) [] [] 1 *})
done
lemma Step1_4_3b: "|- RPCFail crCh rmCh rst p & $S3 rmhist p & (S6 rmhist p)$
@@ -687,13 +687,13 @@
--> ReadInner memCh mm (resbar rmhist) p l"
apply clarsimp
apply (drule S4_excl [temp_use])+
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "ReadInner_def",
- thm "GoodRead_def", thm "BadRead_def", thm "e_def", thm "c_def", thm "m_def"]) [] [] 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm ReadInner_def},
+ @{thm GoodRead_def}, @{thm BadRead_def}, @{thm e_def}, @{thm c_def}, @{thm m_def}]) [] [] 1 *})
apply (auto simp: resbar_def)
apply (tactic {* ALLGOALS (action_simp_tac
- (@{simpset} addsimps [thm "RPCRelayArg_def", thm "MClkRelayArg_def",
- thm "S_def", thm "S4_def", thm "RdRequest_def", thm "MemInv_def"])
- [] [thm "impE", thm "MemValNotAResultE"]) *})
+ (@{simpset} addsimps [@{thm RPCRelayArg_def}, @{thm MClkRelayArg_def},
+ @{thm S_def}, @{thm S4_def}, @{thm RdRequest_def}, @{thm MemInv_def}])
+ [] [@{thm impE}, @{thm MemValNotAResultE}]) *})
done
lemma Step1_4_4a: "|- Read rmCh mm ires p & $S4 rmhist p & (S4 rmhist p)$
@@ -707,12 +707,12 @@
apply clarsimp
apply (drule S4_excl [temp_use])+
apply (tactic {* action_simp_tac (@{simpset} addsimps
- [thm "WriteInner_def", thm "GoodWrite_def", thm "BadWrite_def", thm "e_def",
- thm "c_def", thm "m_def"]) [] [] 1 *})
+ [@{thm WriteInner_def}, @{thm GoodWrite_def}, @{thm BadWrite_def}, @{thm e_def},
+ @{thm c_def}, @{thm m_def}]) [] [] 1 *})
apply (auto simp: resbar_def)
apply (tactic {* ALLGOALS (action_simp_tac (@{simpset} addsimps
- [thm "RPCRelayArg_def", thm "MClkRelayArg_def", thm "S_def",
- thm "S4_def", thm "WrRequest_def"]) [] []) *})
+ [@{thm RPCRelayArg_def}, @{thm MClkRelayArg_def}, @{thm S_def},
+ @{thm S4_def}, @{thm WrRequest_def}]) [] []) *})
done
lemma Step1_4_4b: "|- Write rmCh mm ires p l & $S4 rmhist p & (S4 rmhist p)$
@@ -723,10 +723,10 @@
lemma Step1_4_4c: "|- MemReturn rmCh ires p & $S4 rmhist p & (S5 rmhist p)$
& unchanged (e p, c p, r p)
--> unchanged (rtrner memCh!p, resbar rmhist!p)"
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "e_def",
- thm "c_def", thm "r_def", thm "resbar_def"]) [] [] 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm e_def},
+ @{thm c_def}, @{thm r_def}, @{thm resbar_def}]) [] [] 1 *})
apply (drule S4_excl [temp_use] S5_excl [temp_use])+
- apply (tactic {* auto_tac (MI_fast_css addsimps2 [thm "MemReturn_def", thm "Return_def"]) *})
+ apply (tactic {* auto_tac (MI_fast_css addsimps2 [@{thm MemReturn_def}, @{thm Return_def}]) *})
done
lemma Step1_4_5a: "|- RPCReply crCh rmCh rst p & $S5 rmhist p & (S6 rmhist p)$
@@ -752,12 +752,12 @@
--> MemReturn memCh (resbar rmhist) p"
apply clarsimp
apply (drule S6_excl [temp_use])+
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "e_def",
- thm "r_def", thm "m_def", thm "MClkReply_def", thm "MemReturn_def",
- thm "Return_def", thm "resbar_def"]) [] [] 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm e_def},
+ @{thm r_def}, @{thm m_def}, @{thm MClkReply_def}, @{thm MemReturn_def},
+ @{thm Return_def}, @{thm resbar_def}]) [] [] 1 *})
apply simp_all (* simplify if-then-else *)
apply (tactic {* ALLGOALS (action_simp_tac (@{simpset} addsimps
- [thm "MClkReplyVal_def", thm "S6_def", thm "S_def"]) [] [thm "MVOKBARFnotNR"]) *})
+ [@{thm MClkReplyVal_def}, @{thm S6_def}, @{thm S_def}]) [] [@{thm MVOKBARFnotNR}]) *})
done
lemma Step1_4_6b: "|- MClkRetry memCh crCh cst p & $S6 rmhist p & (S3 rmhist p)$
@@ -765,8 +765,8 @@
--> MemFail memCh (resbar rmhist) p"
apply clarsimp
apply (drule S3_excl [temp_use])+
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "e_def", thm "r_def",
- thm "m_def", thm "MClkRetry_def", thm "MemFail_def", thm "resbar_def"]) [] [] 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm e_def}, @{thm r_def},
+ @{thm m_def}, @{thm MClkRetry_def}, @{thm MemFail_def}, @{thm resbar_def}]) [] [] 1 *})
apply (auto simp: S6_def S_def)
done
@@ -816,7 +816,7 @@
lemma unchanged_safe: "|- (~unchanged (e p, c p, r p, m p, rmhist!p)
--> [UNext memCh mm (resbar rmhist) p]_(rtrner memCh!p, resbar rmhist!p))
--> [UNext memCh mm (resbar rmhist) p]_(rtrner memCh!p, resbar rmhist!p)"
- apply (tactic {* split_idle_tac @{context} [thm "square_def"] 1 *})
+ apply (tactic {* split_idle_tac @{context} [@{thm square_def}] 1 *})
apply force
done
(* turn into (unsafe, looping!) introduction rule *)
@@ -898,15 +898,15 @@
lemma S1_RNextdisabled: "|- S1 rmhist p -->
~Enabled (<RNext memCh mm (resbar rmhist) p>_(rtrner memCh!p, resbar rmhist!p))"
- apply (tactic {* action_simp_tac (@{simpset} addsimps [thm "angle_def",
- thm "S_def", thm "S1_def"]) [notI] [thm "enabledE", temp_elim (thm "Memoryidle")] 1 *})
+ apply (tactic {* action_simp_tac (@{simpset} addsimps [@{thm angle_def},
+ @{thm S_def}, @{thm S1_def}]) [notI] [@{thm enabledE}, temp_elim @{thm Memoryidle}] 1 *})
apply force
done
lemma S1_Returndisabled: "|- S1 rmhist p -->
~Enabled (<MemReturn memCh (resbar rmhist) p>_(rtrner memCh!p, resbar rmhist!p))"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "angle_def", thm "MemReturn_def",
- thm "Return_def", thm "S_def", thm "S1_def"]) [notI] [thm "enabledE"] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm angle_def}, @{thm MemReturn_def},
+ @{thm Return_def}, @{thm S_def}, @{thm S1_def}]) [notI] [@{thm enabledE}] 1 *})
lemma RNext_fair: "|- []<>S1 rmhist p
--> WF(RNext memCh mm (resbar rmhist) p)_(rtrner memCh!p, resbar rmhist!p)"
@@ -986,7 +986,7 @@
& ImpNext p & [HNext rmhist p]_(c p,r p,m p,rmhist!p) & (ALL l. $MemInv mm l)
--> (S4 rmhist p & ires!p = #NotAResult)$
| ((S4 rmhist p & ires!p ~= #NotAResult) | S5 rmhist p)$"
- apply (tactic {* split_idle_tac @{context} [thm "m_def"] 1 *})
+ apply (tactic {* split_idle_tac @{context} [@{thm m_def}] 1 *})
apply (auto dest!: Step1_2_4 [temp_use])
done
@@ -1017,7 +1017,7 @@
lemma S4b_successors: "|- $(S4 rmhist p & ires!p ~= #NotAResult)
& ImpNext p & [HNext rmhist p]_(c p,r p,m p, rmhist!p) & (ALL l. $MemInv mm l)
--> (S4 rmhist p & ires!p ~= #NotAResult)$ | (S5 rmhist p)$"
- apply (tactic {* split_idle_tac @{context} [thm "m_def"] 1 *})
+ apply (tactic {* split_idle_tac @{context} [@{thm m_def}] 1 *})
apply (auto dest!: WriteResult [temp_use] Step1_2_4 [temp_use] ReadResult [temp_use])
done
@@ -1084,16 +1084,16 @@
lemma MClkReplyS6:
"|- $ImpInv rmhist p & <MClkReply memCh crCh cst p>_(c p) --> $S6 rmhist p"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "angle_def",
- thm "MClkReply_def", thm "Return_def", thm "ImpInv_def", thm "S_def",
- thm "S1_def", thm "S2_def", thm "S3_def", thm "S4_def", thm "S5_def"]) [] [] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm angle_def},
+ @{thm MClkReply_def}, @{thm Return_def}, @{thm ImpInv_def}, @{thm S_def},
+ @{thm S1_def}, @{thm S2_def}, @{thm S3_def}, @{thm S4_def}, @{thm S5_def}]) [] [] 1 *})
lemma S6MClkReply_enabled: "|- S6 rmhist p --> Enabled (<MClkReply memCh crCh cst p>_(c p))"
apply (auto simp: c_def intro!: MClkReply_enabled [temp_use])
apply (cut_tac MI_base)
apply (blast dest: base_pair)
apply (tactic {* ALLGOALS (action_simp_tac (@{simpset}
- addsimps [thm "S_def", thm "S6_def"]) [] []) *})
+ addsimps [@{thm S_def}, @{thm S6_def}]) [] []) *})
done
lemma S6_live: "|- [](ImpNext p & [HNext rmhist p]_(c p,r p,m p, rmhist!p) & $(ImpInv rmhist p))
@@ -1104,7 +1104,7 @@
apply (erule InfiniteEnsures)
apply assumption
apply (tactic {* action_simp_tac @{simpset} []
- (map temp_elim [thm "MClkReplyS6", thm "S6MClkReply_successors"]) 1 *})
+ (map temp_elim [@{thm MClkReplyS6}, @{thm S6MClkReply_successors}]) 1 *})
apply (auto simp: SF_def)
apply (erule contrapos_np)
apply (auto intro!: S6MClkReply_enabled [temp_use] elim!: STL4E [temp_use] DmdImplE [temp_use])
@@ -1191,7 +1191,7 @@
==> sigma |= []<>S1 rmhist p"
apply (rule classical)
apply (tactic {* asm_lr_simp_tac (@{simpset} addsimps
- [temp_use (thm "NotBox"), temp_rewrite (thm "NotDmd")]) 1 *})
+ [temp_use @{thm NotBox}, temp_rewrite @{thm NotDmd}]) 1 *})
apply (auto elim!: leadsto_infinite [temp_use] mp dest!: DBImplBD [temp_use])
done
--- a/src/HOL/TLA/Memory/RPC.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/TLA/Memory/RPC.thy Mon Sep 06 19:23:54 2010 +0200
@@ -103,15 +103,15 @@
(* Enabledness of some actions *)
lemma RPCFail_enabled: "!!p. basevars (rtrner send!p, caller rcv!p, rst!p) ==>
|- ~Calling rcv p & Calling send p --> Enabled (RPCFail send rcv rst p)"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "RPCFail_def",
- thm "Return_def", thm "caller_def", thm "rtrner_def"]) [exI]
- [thm "base_enabled", thm "Pair_inject"] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm RPCFail_def},
+ @{thm Return_def}, @{thm caller_def}, @{thm rtrner_def}]) [exI]
+ [@{thm base_enabled}, @{thm Pair_inject}] 1 *})
lemma RPCReply_enabled: "!!p. basevars (rtrner send!p, caller rcv!p, rst!p) ==>
|- ~Calling rcv p & Calling send p & rst!p = #rpcB
--> Enabled (RPCReply send rcv rst p)"
- by (tactic {* action_simp_tac (@{simpset} addsimps [thm "RPCReply_def",
- thm "Return_def", thm "caller_def", thm "rtrner_def"]) [exI]
- [thm "base_enabled", thm "Pair_inject"] 1 *})
+ by (tactic {* action_simp_tac (@{simpset} addsimps [@{thm RPCReply_def},
+ @{thm Return_def}, @{thm caller_def}, @{thm rtrner_def}]) [exI]
+ [@{thm base_enabled}, @{thm Pair_inject}] 1 *})
end
--- a/src/HOL/Tools/Nitpick/nitpick_isar.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Tools/Nitpick/nitpick_isar.ML Mon Sep 06 19:23:54 2010 +0200
@@ -348,7 +348,7 @@
|>> curry (op =) "genuine")
in
if auto orelse blocking then go ()
- else (Toplevel.thread true (tap go); (false, state))
+ else (Toplevel.thread true (tap go); (false, state)) (* FIXME no threads in user-space *)
end
fun nitpick_trans (params, i) =
--- a/src/HOL/Tools/Qelim/cooper.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Tools/Qelim/cooper.ML Mon Sep 06 19:23:54 2010 +0200
@@ -423,7 +423,7 @@
val emptyIS = @{cterm "{}::int set"};
val insert_tm = @{cterm "insert :: int => _"};
fun mkISet cts = fold_rev (Thm.capply insert_tm #> Thm.capply) cts emptyIS;
-val eqelem_imp_imp = (thm"eqelem_imp_iff") RS iffD1;
+val eqelem_imp_imp = @{thm eqelem_imp_iff} RS iffD1;
val [A_tm,B_tm] = map (fn th => cprop_of th |> funpow 2 Thm.dest_arg |> Thm.dest_abs NONE |> snd |> Thm.dest_arg1 |> Thm.dest_arg
|> Thm.dest_abs NONE |> snd |> Thm.dest_fun |> Thm.dest_arg)
[asetP,bsetP];
--- a/src/HOL/Tools/Sledgehammer/sledgehammer.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer.ML Mon Sep 06 19:23:54 2010 +0200
@@ -452,6 +452,7 @@
else
()
end
+ (* FIXME no threads in user-space *)
in if blocking then run () else Toplevel.thread true (tap run) |> K () end
val setup =
--- a/src/HOL/Tools/TFL/casesplit.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Tools/TFL/casesplit.ML Mon Sep 06 19:23:54 2010 +0200
@@ -32,9 +32,9 @@
val dest_Trueprop = HOLogic.dest_Trueprop;
val mk_Trueprop = HOLogic.mk_Trueprop;
-val atomize = thms "induct_atomize";
-val rulify = thms "induct_rulify";
-val rulify_fallback = thms "induct_rulify_fallback";
+val atomize = @{thms induct_atomize};
+val rulify = @{thms induct_rulify};
+val rulify_fallback = @{thms induct_rulify_fallback};
end;
--- a/src/HOL/Tools/TFL/tfl.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Tools/TFL/tfl.ML Mon Sep 06 19:23:54 2010 +0200
@@ -564,7 +564,7 @@
val body_th = R.LIST_CONJ (map R.ASSUME full_rqt_prop)
val SELECT_AX = (*in this way we hope to avoid a STATIC dependence upon
theory Hilbert_Choice*)
- thm "Hilbert_Choice.tfl_some"
+ ML_Context.thm "Hilbert_Choice.tfl_some"
handle ERROR msg => cat_error msg
"defer_recdef requires theory Main or at least Hilbert_Choice as parent"
val bar = R.MP (R.ISPECL[tych R'abs, tych R1] SELECT_AX) body_th
--- a/src/HOL/Tools/TFL/thms.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Tools/TFL/thms.ML Mon Sep 06 19:23:54 2010 +0200
@@ -1,20 +1,19 @@
(* Title: HOL/Tools/TFL/thms.ML
- ID: $Id$
Author: Konrad Slind, Cambridge University Computer Laboratory
Copyright 1997 University of Cambridge
*)
structure Thms =
struct
- val WFREC_COROLLARY = thm "tfl_wfrec";
- val WF_INDUCTION_THM = thm "tfl_wf_induct";
- val CUT_DEF = thm "cut_def";
- val eqT = thm "tfl_eq_True";
- val rev_eq_mp = thm "tfl_rev_eq_mp";
- val simp_thm = thm "tfl_simp_thm";
- val P_imp_P_iff_True = thm "tfl_P_imp_P_iff_True";
- val imp_trans = thm "tfl_imp_trans";
- val disj_assoc = thm "tfl_disj_assoc";
- val tfl_disjE = thm "tfl_disjE";
- val choose_thm = thm "tfl_exE";
+ val WFREC_COROLLARY = @{thm tfl_wfrec};
+ val WF_INDUCTION_THM = @{thm tfl_wf_induct};
+ val CUT_DEF = @{thm cut_def};
+ val eqT = @{thm tfl_eq_True};
+ val rev_eq_mp = @{thm tfl_rev_eq_mp};
+ val simp_thm = @{thm tfl_simp_thm};
+ val P_imp_P_iff_True = @{thm tfl_P_imp_P_iff_True};
+ val imp_trans = @{thm tfl_imp_trans};
+ val disj_assoc = @{thm tfl_disj_assoc};
+ val tfl_disjE = @{thm tfl_disjE};
+ val choose_thm = @{thm tfl_exE};
end;
--- a/src/HOL/Tools/meson.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Tools/meson.ML Mon Sep 06 19:23:54 2010 +0200
@@ -68,24 +68,24 @@
val ex_forward = @{thm ex_forward};
val choice = @{thm choice};
-val not_conjD = thm "meson_not_conjD";
-val not_disjD = thm "meson_not_disjD";
-val not_notD = thm "meson_not_notD";
-val not_allD = thm "meson_not_allD";
-val not_exD = thm "meson_not_exD";
-val imp_to_disjD = thm "meson_imp_to_disjD";
-val not_impD = thm "meson_not_impD";
-val iff_to_disjD = thm "meson_iff_to_disjD";
-val not_iffD = thm "meson_not_iffD";
-val conj_exD1 = thm "meson_conj_exD1";
-val conj_exD2 = thm "meson_conj_exD2";
-val disj_exD = thm "meson_disj_exD";
-val disj_exD1 = thm "meson_disj_exD1";
-val disj_exD2 = thm "meson_disj_exD2";
-val disj_assoc = thm "meson_disj_assoc";
-val disj_comm = thm "meson_disj_comm";
-val disj_FalseD1 = thm "meson_disj_FalseD1";
-val disj_FalseD2 = thm "meson_disj_FalseD2";
+val not_conjD = @{thm meson_not_conjD};
+val not_disjD = @{thm meson_not_disjD};
+val not_notD = @{thm meson_not_notD};
+val not_allD = @{thm meson_not_allD};
+val not_exD = @{thm meson_not_exD};
+val imp_to_disjD = @{thm meson_imp_to_disjD};
+val not_impD = @{thm meson_not_impD};
+val iff_to_disjD = @{thm meson_iff_to_disjD};
+val not_iffD = @{thm meson_not_iffD};
+val conj_exD1 = @{thm meson_conj_exD1};
+val conj_exD2 = @{thm meson_conj_exD2};
+val disj_exD = @{thm meson_disj_exD};
+val disj_exD1 = @{thm meson_disj_exD1};
+val disj_exD2 = @{thm meson_disj_exD2};
+val disj_assoc = @{thm meson_disj_assoc};
+val disj_comm = @{thm meson_disj_comm};
+val disj_FalseD1 = @{thm meson_disj_FalseD1};
+val disj_FalseD2 = @{thm meson_disj_FalseD2};
(**** Operators for forward proof ****)
@@ -203,7 +203,7 @@
(*They are typically functional reflexivity axioms and are the converses of
injectivity equivalences*)
-val not_refl_disj_D = thm"meson_not_refl_disj_D";
+val not_refl_disj_D = @{thm meson_not_refl_disj_D};
(*Is either term a Var that does not properly occur in the other term?*)
fun eliminable (t as Var _, u) = t aconv u orelse not (Logic.occs(t,u))
--- a/src/HOL/UNITY/Comp/Alloc.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/UNITY/Comp/Alloc.thy Mon Sep 06 19:23:54 2010 +0200
@@ -426,7 +426,7 @@
apply (rule fst_o_funPair)
done
-ML {* bind_thms ("fst_o_client_map'", make_o_equivs (thm "fst_o_client_map")) *}
+ML {* bind_thms ("fst_o_client_map'", make_o_equivs @{thm fst_o_client_map}) *}
declare fst_o_client_map' [simp]
lemma snd_o_client_map: "snd o client_map = clientState_d.dummy"
@@ -434,7 +434,7 @@
apply (rule snd_o_funPair)
done
-ML {* bind_thms ("snd_o_client_map'", make_o_equivs (thm "snd_o_client_map")) *}
+ML {* bind_thms ("snd_o_client_map'", make_o_equivs @{thm snd_o_client_map}) *}
declare snd_o_client_map' [simp]
@@ -444,28 +444,28 @@
apply record_auto
done
-ML {* bind_thms ("client_o_sysOfAlloc'", make_o_equivs (thm "client_o_sysOfAlloc")) *}
+ML {* bind_thms ("client_o_sysOfAlloc'", make_o_equivs @{thm client_o_sysOfAlloc}) *}
declare client_o_sysOfAlloc' [simp]
lemma allocGiv_o_sysOfAlloc_eq: "allocGiv o sysOfAlloc = allocGiv"
apply record_auto
done
-ML {* bind_thms ("allocGiv_o_sysOfAlloc_eq'", make_o_equivs (thm "allocGiv_o_sysOfAlloc_eq")) *}
+ML {* bind_thms ("allocGiv_o_sysOfAlloc_eq'", make_o_equivs @{thm allocGiv_o_sysOfAlloc_eq}) *}
declare allocGiv_o_sysOfAlloc_eq' [simp]
lemma allocAsk_o_sysOfAlloc_eq: "allocAsk o sysOfAlloc = allocAsk"
apply record_auto
done
-ML {* bind_thms ("allocAsk_o_sysOfAlloc_eq'", make_o_equivs (thm "allocAsk_o_sysOfAlloc_eq")) *}
+ML {* bind_thms ("allocAsk_o_sysOfAlloc_eq'", make_o_equivs @{thm allocAsk_o_sysOfAlloc_eq}) *}
declare allocAsk_o_sysOfAlloc_eq' [simp]
lemma allocRel_o_sysOfAlloc_eq: "allocRel o sysOfAlloc = allocRel"
apply record_auto
done
-ML {* bind_thms ("allocRel_o_sysOfAlloc_eq'", make_o_equivs (thm "allocRel_o_sysOfAlloc_eq")) *}
+ML {* bind_thms ("allocRel_o_sysOfAlloc_eq'", make_o_equivs @{thm allocRel_o_sysOfAlloc_eq}) *}
declare allocRel_o_sysOfAlloc_eq' [simp]
@@ -475,49 +475,49 @@
apply record_auto
done
-ML {* bind_thms ("client_o_sysOfClient'", make_o_equivs (thm "client_o_sysOfClient")) *}
+ML {* bind_thms ("client_o_sysOfClient'", make_o_equivs @{thm client_o_sysOfClient}) *}
declare client_o_sysOfClient' [simp]
lemma allocGiv_o_sysOfClient_eq: "allocGiv o sysOfClient = allocGiv o snd "
apply record_auto
done
-ML {* bind_thms ("allocGiv_o_sysOfClient_eq'", make_o_equivs (thm "allocGiv_o_sysOfClient_eq")) *}
+ML {* bind_thms ("allocGiv_o_sysOfClient_eq'", make_o_equivs @{thm allocGiv_o_sysOfClient_eq}) *}
declare allocGiv_o_sysOfClient_eq' [simp]
lemma allocAsk_o_sysOfClient_eq: "allocAsk o sysOfClient = allocAsk o snd "
apply record_auto
done
-ML {* bind_thms ("allocAsk_o_sysOfClient_eq'", make_o_equivs (thm "allocAsk_o_sysOfClient_eq")) *}
+ML {* bind_thms ("allocAsk_o_sysOfClient_eq'", make_o_equivs @{thm allocAsk_o_sysOfClient_eq}) *}
declare allocAsk_o_sysOfClient_eq' [simp]
lemma allocRel_o_sysOfClient_eq: "allocRel o sysOfClient = allocRel o snd "
apply record_auto
done
-ML {* bind_thms ("allocRel_o_sysOfClient_eq'", make_o_equivs (thm "allocRel_o_sysOfClient_eq")) *}
+ML {* bind_thms ("allocRel_o_sysOfClient_eq'", make_o_equivs @{thm allocRel_o_sysOfClient_eq}) *}
declare allocRel_o_sysOfClient_eq' [simp]
lemma allocGiv_o_inv_sysOfAlloc_eq: "allocGiv o inv sysOfAlloc = allocGiv"
apply (simp add: o_def)
done
-ML {* bind_thms ("allocGiv_o_inv_sysOfAlloc_eq'", make_o_equivs (thm "allocGiv_o_inv_sysOfAlloc_eq")) *}
+ML {* bind_thms ("allocGiv_o_inv_sysOfAlloc_eq'", make_o_equivs @{thm allocGiv_o_inv_sysOfAlloc_eq}) *}
declare allocGiv_o_inv_sysOfAlloc_eq' [simp]
lemma allocAsk_o_inv_sysOfAlloc_eq: "allocAsk o inv sysOfAlloc = allocAsk"
apply (simp add: o_def)
done
-ML {* bind_thms ("allocAsk_o_inv_sysOfAlloc_eq'", make_o_equivs (thm "allocAsk_o_inv_sysOfAlloc_eq")) *}
+ML {* bind_thms ("allocAsk_o_inv_sysOfAlloc_eq'", make_o_equivs @{thm allocAsk_o_inv_sysOfAlloc_eq}) *}
declare allocAsk_o_inv_sysOfAlloc_eq' [simp]
lemma allocRel_o_inv_sysOfAlloc_eq: "allocRel o inv sysOfAlloc = allocRel"
apply (simp add: o_def)
done
-ML {* bind_thms ("allocRel_o_inv_sysOfAlloc_eq'", make_o_equivs (thm "allocRel_o_inv_sysOfAlloc_eq")) *}
+ML {* bind_thms ("allocRel_o_inv_sysOfAlloc_eq'", make_o_equivs @{thm allocRel_o_inv_sysOfAlloc_eq}) *}
declare allocRel_o_inv_sysOfAlloc_eq' [simp]
lemma rel_inv_client_map_drop_map: "(rel o inv client_map o drop_map i o inv sysOfClient) =
@@ -525,7 +525,7 @@
apply (simp add: o_def drop_map_def)
done
-ML {* bind_thms ("rel_inv_client_map_drop_map'", make_o_equivs (thm "rel_inv_client_map_drop_map")) *}
+ML {* bind_thms ("rel_inv_client_map_drop_map'", make_o_equivs @{thm rel_inv_client_map_drop_map}) *}
declare rel_inv_client_map_drop_map [simp]
lemma ask_inv_client_map_drop_map: "(ask o inv client_map o drop_map i o inv sysOfClient) =
@@ -533,7 +533,7 @@
apply (simp add: o_def drop_map_def)
done
-ML {* bind_thms ("ask_inv_client_map_drop_map'", make_o_equivs (thm "ask_inv_client_map_drop_map")) *}
+ML {* bind_thms ("ask_inv_client_map_drop_map'", make_o_equivs @{thm ask_inv_client_map_drop_map}) *}
declare ask_inv_client_map_drop_map [simp]
@@ -812,7 +812,7 @@
ML {*
-bind_thms ("System_Increasing'", list_of_Int (thm "System_Increasing"))
+bind_thms ("System_Increasing'", list_of_Int @{thm System_Increasing})
*}
declare System_Increasing' [intro!]
@@ -903,9 +903,9 @@
text{*safety (1), step 4 (final result!) *}
theorem System_safety: "System : system_safety"
apply (unfold system_safety_def)
- apply (tactic {* rtac (Always_Int_rule [thm "System_sum_bounded",
- thm "Always_tokens_giv_le_allocGiv", thm "Always_tokens_allocRel_le_rel"] RS
- thm "Always_weaken") 1 *})
+ apply (tactic {* rtac (Always_Int_rule [@{thm System_sum_bounded},
+ @{thm Always_tokens_giv_le_allocGiv}, @{thm Always_tokens_allocRel_le_rel}] RS
+ @{thm Always_weaken}) 1 *})
apply auto
apply (rule setsum_fun_mono [THEN order_trans])
apply (drule_tac [2] order_trans)
@@ -946,8 +946,8 @@
lemma System_Bounded_allocAsk: "System : Always {s. ALL i<Nclients.
ALL elt : set ((sub i o allocAsk) s). elt \<le> NbT}"
apply (auto simp add: Collect_all_imp_eq)
- apply (tactic {* rtac (Always_Int_rule [thm "Always_allocAsk_le_ask",
- thm "System_Bounded_ask"] RS thm "Always_weaken") 1 *})
+ apply (tactic {* rtac (Always_Int_rule [@{thm Always_allocAsk_le_ask},
+ @{thm System_Bounded_ask}] RS @{thm Always_weaken}) 1 *})
apply (auto dest: set_mono)
done
--- a/src/HOL/Unix/ROOT.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/Unix/ROOT.ML Mon Sep 06 19:23:54 2010 +0200
@@ -1,2 +1,1 @@
-setmp_noncritical print_mode (! print_mode @ ["no_brackets", "no_type_brackets"])
- use_thys ["Unix"];
+use_thys ["Unix"];
--- a/src/HOL/ex/Hilbert_Classical.thy Mon Sep 06 15:01:37 2010 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,163 +0,0 @@
-(* Title: HOL/ex/Hilbert_Classical.thy
- ID: $Id$
- Author: Stefan Berghofer and Markus Wenzel, TU Muenchen
-*)
-
-header {* Hilbert's choice and classical logic *}
-
-theory Hilbert_Classical imports Main begin
-
-text {*
- Derivation of the classical law of tertium-non-datur by means of
- Hilbert's choice operator (due to M. J. Beeson and J. Harrison).
-*}
-
-
-subsection {* Proof text *}
-
-theorem tnd: "A \<or> \<not> A"
-proof -
- let ?P = "\<lambda>X. X = False \<or> X = True \<and> A"
- let ?Q = "\<lambda>X. X = False \<and> A \<or> X = True"
-
- have a: "?P (Eps ?P)"
- proof (rule someI)
- have "False = False" ..
- thus "?P False" ..
- qed
- have b: "?Q (Eps ?Q)"
- proof (rule someI)
- have "True = True" ..
- thus "?Q True" ..
- qed
-
- from a show ?thesis
- proof
- assume "Eps ?P = True \<and> A"
- hence A ..
- thus ?thesis ..
- next
- assume P: "Eps ?P = False"
- from b show ?thesis
- proof
- assume "Eps ?Q = False \<and> A"
- hence A ..
- thus ?thesis ..
- next
- assume Q: "Eps ?Q = True"
- have neq: "?P \<noteq> ?Q"
- proof
- assume "?P = ?Q"
- hence "Eps ?P = Eps ?Q" by (rule arg_cong)
- also note P
- also note Q
- finally show False by (rule False_neq_True)
- qed
- have "\<not> A"
- proof
- assume a: A
- have "?P = ?Q"
- proof (rule ext)
- fix x show "?P x = ?Q x"
- proof
- assume "?P x"
- thus "?Q x"
- proof
- assume "x = False"
- from this and a have "x = False \<and> A" ..
- thus "?Q x" ..
- next
- assume "x = True \<and> A"
- hence "x = True" ..
- thus "?Q x" ..
- qed
- next
- assume "?Q x"
- thus "?P x"
- proof
- assume "x = False \<and> A"
- hence "x = False" ..
- thus "?P x" ..
- next
- assume "x = True"
- from this and a have "x = True \<and> A" ..
- thus "?P x" ..
- qed
- qed
- qed
- with neq show False by contradiction
- qed
- thus ?thesis ..
- qed
- qed
-qed
-
-
-subsection {* Proof term of text *}
-
-text {*
- {\small @{prf [display, margin = 80] tnd}}
-*}
-
-
-subsection {* Proof script *}
-
-theorem tnd': "A \<or> \<not> A"
- apply (subgoal_tac
- "(((SOME x. x = False \<or> x = True \<and> A) = False) \<or>
- ((SOME x. x = False \<or> x = True \<and> A) = True) \<and> A) \<and>
- (((SOME x. x = False \<and> A \<or> x = True) = False) \<and> A \<or>
- ((SOME x. x = False \<and> A \<or> x = True) = True))")
- prefer 2
- apply (rule conjI)
- apply (rule someI)
- apply (rule disjI1)
- apply (rule refl)
- apply (rule someI)
- apply (rule disjI2)
- apply (rule refl)
- apply (erule conjE)
- apply (erule disjE)
- apply (erule disjE)
- apply (erule conjE)
- apply (erule disjI1)
- prefer 2
- apply (erule conjE)
- apply (erule disjI1)
- apply (subgoal_tac
- "(\<lambda>x. (x = False) \<or> (x = True) \<and> A) \<noteq>
- (\<lambda>x. (x = False) \<and> A \<or> (x = True))")
- prefer 2
- apply (rule notI)
- apply (drule_tac f = "\<lambda>y. SOME x. y x" in arg_cong)
- apply (drule trans, assumption)
- apply (drule sym)
- apply (drule trans, assumption)
- apply (erule False_neq_True)
- apply (rule disjI2)
- apply (rule notI)
- apply (erule notE)
- apply (rule ext)
- apply (rule iffI)
- apply (erule disjE)
- apply (rule disjI1)
- apply (erule conjI)
- apply assumption
- apply (erule conjE)
- apply (erule disjI2)
- apply (erule disjE)
- apply (erule conjE)
- apply (erule disjI1)
- apply (rule disjI2)
- apply (erule conjI)
- apply assumption
- done
-
-
-subsection {* Proof term of script *}
-
-text {*
- {\small @{prf [display, margin = 80] tnd'}}
-*}
-
-end
--- a/src/HOL/ex/ROOT.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOL/ex/ROOT.ML Mon Sep 06 19:23:54 2010 +0200
@@ -70,9 +70,6 @@
HTML.with_charset "utf-8" (no_document use_thys)
["Hebrew", "Chinese", "Serbian"];
-(setmp_noncritical proofs 2 (setmp_noncritical Multithreading.max_threads 1 use_thy))
- "Hilbert_Classical";
-
use_thy "SVC_Oracle";
if getenv "SVC_HOME" = "" then ()
else use_thy "svc_test";
--- a/src/HOLCF/FOCUS/Fstream.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOLCF/FOCUS/Fstream.thy Mon Sep 06 19:23:54 2010 +0200
@@ -156,12 +156,12 @@
lemma slen_fscons_eq_rev:
"(#x < Fin (Suc (Suc n))) = (!a y. x ~= a~> y | #y < Fin (Suc n))"
apply (simp add: fscons_def2 slen_scons_eq_rev)
-apply (tactic {* step_tac (HOL_cs addSEs [thm "DefE"]) 1 *})
-apply (tactic {* step_tac (HOL_cs addSEs [thm "DefE"]) 1 *})
-apply (tactic {* step_tac (HOL_cs addSEs [thm "DefE"]) 1 *})
-apply (tactic {* step_tac (HOL_cs addSEs [thm "DefE"]) 1 *})
-apply (tactic {* step_tac (HOL_cs addSEs [thm "DefE"]) 1 *})
-apply (tactic {* step_tac (HOL_cs addSEs [thm "DefE"]) 1 *})
+apply (tactic {* step_tac (HOL_cs addSEs @{thms DefE}) 1 *})
+apply (tactic {* step_tac (HOL_cs addSEs @{thms DefE}) 1 *})
+apply (tactic {* step_tac (HOL_cs addSEs @{thms DefE}) 1 *})
+apply (tactic {* step_tac (HOL_cs addSEs @{thms DefE}) 1 *})
+apply (tactic {* step_tac (HOL_cs addSEs @{thms DefE}) 1 *})
+apply (tactic {* step_tac (HOL_cs addSEs @{thms DefE}) 1 *})
apply (erule contrapos_np)
apply (fast dest: not_Undef_is_Def [THEN iffD1] elim: DefE)
done
--- a/src/HOLCF/IOA/ABP/Correctness.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOLCF/IOA/ABP/Correctness.thy Mon Sep 06 19:23:54 2010 +0200
@@ -164,16 +164,16 @@
lemma sender_abstraction: "is_weak_ref_map reduce srch_ioa srch_fin_ioa"
apply (tactic {*
- simp_tac (HOL_ss addsimps [thm "srch_fin_ioa_def", thm "rsch_fin_ioa_def",
- thm "srch_ioa_def", thm "rsch_ioa_def", thm "rename_through_pmap",
- thm "channel_abstraction"]) 1 *})
+ simp_tac (HOL_ss addsimps [@{thm srch_fin_ioa_def}, @{thm rsch_fin_ioa_def},
+ @{thm srch_ioa_def}, @{thm rsch_ioa_def}, @{thm rename_through_pmap},
+ @{thm channel_abstraction}]) 1 *})
done
lemma receiver_abstraction: "is_weak_ref_map reduce rsch_ioa rsch_fin_ioa"
apply (tactic {*
- simp_tac (HOL_ss addsimps [thm "srch_fin_ioa_def", thm "rsch_fin_ioa_def",
- thm "srch_ioa_def", thm "rsch_ioa_def", thm "rename_through_pmap",
- thm "channel_abstraction"]) 1 *})
+ simp_tac (HOL_ss addsimps [@{thm srch_fin_ioa_def}, @{thm rsch_fin_ioa_def},
+ @{thm srch_ioa_def}, @{thm rsch_ioa_def}, @{thm rename_through_pmap},
+ @{thm channel_abstraction}]) 1 *})
done
--- a/src/HOLCF/IOA/meta_theory/CompoScheds.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOLCF/IOA/meta_theory/CompoScheds.thy Mon Sep 06 19:23:54 2010 +0200
@@ -289,9 +289,9 @@
ML {*
local
- val defs = [thm "Filter_def", thm "Forall_def", thm "sforall_def", thm "mkex_def",
- thm "stutter_def"]
- val asigs = [thm "asig_of_par", thm "actions_asig_comp"]
+ val defs = [@{thm Filter_def}, @{thm Forall_def}, @{thm sforall_def}, @{thm mkex_def},
+ @{thm stutter_def}]
+ val asigs = [@{thm asig_of_par}, @{thm actions_asig_comp}]
in
fun mkex_induct_tac ctxt sch exA exB =
--- a/src/HOLCF/IOA/meta_theory/SimCorrectness.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOLCF/IOA/meta_theory/SimCorrectness.thy Mon Sep 06 19:23:54 2010 +0200
@@ -171,7 +171,7 @@
!s s'. reachable C s & is_exec_frag C (s,ex) & (s,s'): R -->
mk_trace C$ex = mk_trace A$((corresp_ex_simC A R$ex) s')"
-apply (tactic {* pair_induct_tac @{context} "ex" [thm "is_exec_frag_def"] 1 *})
+apply (tactic {* pair_induct_tac @{context} "ex" [@{thm is_exec_frag_def}] 1 *})
(* cons case *)
apply auto
apply (rename_tac ex a t s s')
--- a/src/HOLCF/ex/Focus_ex.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOLCF/ex/Focus_ex.thy Mon Sep 06 19:23:54 2010 +0200
@@ -180,7 +180,7 @@
done
lemma lemma3: "def_g(g) --> is_g(g)"
-apply (tactic {* simp_tac (HOL_ss addsimps [thm "def_g_def", thm "lemma1", thm "lemma2"]) 1 *})
+apply (tactic {* simp_tac (HOL_ss addsimps [@{thm def_g_def}, @{thm lemma1}, @{thm lemma2}]) 1 *})
apply (rule impI)
apply (erule exE)
apply (rule_tac x = "f" in exI)
@@ -205,7 +205,7 @@
lemma lemma4: "is_g(g) --> def_g(g)"
apply (tactic {* simp_tac (HOL_ss delsimps (@{thms HOL.ex_simps} @ @{thms HOL.all_simps})
- addsimps [thm "lemma1", thm "lemma2", thm "def_g_def"]) 1 *})
+ addsimps [@{thm lemma1}, @{thm lemma2}, @{thm def_g_def}]) 1 *})
apply (rule impI)
apply (erule exE)
apply (rule_tac x = "f" in exI)
--- a/src/HOLCF/ex/Loop.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/HOLCF/ex/Loop.thy Mon Sep 06 19:23:54 2010 +0200
@@ -104,12 +104,12 @@
apply (simp (no_asm) add: step_def2)
apply (rule_tac p = "b$ (iterate n$ (step$b$g) $x) " in trE)
apply (erule notE)
-apply (tactic {* asm_simp_tac (HOLCF_ss addsimps [thm "step_def2"]) 1 *})
+apply (tactic {* asm_simp_tac (HOLCF_ss addsimps [@{thm step_def2}]) 1 *})
apply (tactic "asm_simp_tac HOLCF_ss 1")
apply (rule mp)
apply (erule spec)
-apply (tactic {* asm_simp_tac (HOLCF_ss delsimps [thm "iterate_Suc"]
- addsimps [thm "loop_lemma2"]) 1 *})
+apply (tactic {* asm_simp_tac (HOLCF_ss delsimps [@{thm iterate_Suc}]
+ addsimps [@{thm loop_lemma2}]) 1 *})
apply (rule_tac s = "iterate (Suc n) $ (step$b$g) $x"
and t = "g$ (iterate n$ (step$b$g) $x) " in ssubst)
prefer 2 apply (assumption)
--- a/src/Provers/quasi.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/Provers/quasi.ML Mon Sep 06 19:23:54 2010 +0200
@@ -149,7 +149,7 @@
| "~=" => if (x aconv y) then
raise Contr (Thm ([(Thm ([(Thm ([], Less.le_refl)) ,(Asm n)], Less.le_neq_trans))], Less.less_reflE))
else [ NotEq (x, y, Asm n),
- NotEq (y, x,Thm ( [Asm n], thm "not_sym"))]
+ NotEq (y, x,Thm ( [Asm n], @{thm not_sym}))]
| _ => error ("quasi_tac: unknown relation symbol ``" ^ rel ^
"''returned by decomp_quasi."))
| NONE => [];
@@ -300,7 +300,7 @@
let
val leEdge = Le (x,y, Thm ([p], Less.less_imp_le))
val neqEdges = [ NotEq (x,y, Thm ([p], Less.less_imp_neq)),
- NotEq (y,x, Thm ( [Thm ([p], Less.less_imp_neq)], thm "not_sym"))]
+ NotEq (y,x, Thm ( [Thm ([p], Less.less_imp_neq)], @{thm not_sym}))]
in
buildGraphs (ls, addEdge(y,[],(addEdge (x,[(y,leEdge)],leG))), neqEdges@neqE)
end
@@ -459,9 +459,10 @@
let
fun findParallelNeq [] = NONE
| findParallelNeq (e::es) =
- if (x aconv (lower e) andalso y aconv (upper e)) then SOME e
- else if (y aconv (lower e) andalso x aconv (upper e)) then SOME (NotEq (x,y, (Thm ([getprf e], thm "not_sym"))))
- else findParallelNeq es ;
+ if (x aconv (lower e) andalso y aconv (upper e)) then SOME e
+ else if (y aconv (lower e) andalso x aconv (upper e))
+ then SOME (NotEq (x,y, (Thm ([getprf e], @{thm not_sym}))))
+ else findParallelNeq es;
in
(* test if there is a edge x ~= y respectivly y ~= x and *)
(* if it possible to find a path x <= y in leG, thus we can conclude x < y *)
@@ -483,7 +484,7 @@
(case (Library.find_first (fn fact => fact subsumes subgoal) neqE, subgoal) of
(SOME (NotEq (x, y, p)), NotEq (x', y', _)) =>
if (x aconv x' andalso y aconv y') then p
- else Thm ([p], thm "not_sym")
+ else Thm ([p], @{thm not_sym})
| _ => raise Cannot
)
--- a/src/Sequents/ILL.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/Sequents/ILL.thy Mon Sep 06 19:23:54 2010 +0200
@@ -126,24 +126,16 @@
ML {*
-
val lazy_cs = empty_pack
- add_safes [thm "tensl", thm "conjr", thm "disjl", thm "promote0",
- thm "context2", thm "context3"]
- add_unsafes [thm "identity", thm "zerol", thm "conjll", thm "conjlr",
- thm "disjrl", thm "disjrr", thm "impr", thm "tensr", thm "impl",
- thm "derelict", thm "weaken", thm "promote1", thm "promote2",
- thm "context1", thm "context4a", thm "context4b"];
+ add_safes [@{thm tensl}, @{thm conjr}, @{thm disjl}, @{thm promote0},
+ @{thm context2}, @{thm context3}]
+ add_unsafes [@{thm identity}, @{thm zerol}, @{thm conjll}, @{thm conjlr},
+ @{thm disjrl}, @{thm disjrr}, @{thm impr}, @{thm tensr}, @{thm impl},
+ @{thm derelict}, @{thm weaken}, @{thm promote1}, @{thm promote2},
+ @{thm context1}, @{thm context4a}, @{thm context4b}];
-local
- val promote0 = thm "promote0"
- val promote1 = thm "promote1"
- val promote2 = thm "promote2"
-in
-
-fun prom_tac n = REPEAT (resolve_tac [promote0,promote1,promote2] n)
-
-end
+fun prom_tac n =
+ REPEAT (resolve_tac [@{thm promote0}, @{thm promote1}, @{thm promote2}] n)
*}
method_setup best_lazy =
@@ -272,13 +264,12 @@
done
ML {*
+val safe_cs = lazy_cs add_safes [@{thm conj_lemma}, @{thm ll_mp}, @{thm contrad1},
+ @{thm contrad2}, @{thm mp_rule1}, @{thm mp_rule2}, @{thm o_a_rule},
+ @{thm a_not_a_rule}]
+ add_unsafes [@{thm aux_impl}];
-val safe_cs = lazy_cs add_safes [thm "conj_lemma", thm "ll_mp", thm "contrad1",
- thm "contrad2", thm "mp_rule1", thm "mp_rule2", thm "o_a_rule",
- thm "a_not_a_rule"]
- add_unsafes [thm "aux_impl"];
-
-val power_cs = safe_cs add_unsafes [thm "impr_contr_der"];
+val power_cs = safe_cs add_unsafes [@{thm impr_contr_der}];
*}
--- a/src/Sequents/LK0.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/Sequents/LK0.thy Mon Sep 06 19:23:54 2010 +0200
@@ -210,27 +210,19 @@
ML {*
val prop_pack = empty_pack add_safes
- [thm "basic", thm "refl", thm "TrueR", thm "FalseL",
- thm "conjL", thm "conjR", thm "disjL", thm "disjR", thm "impL", thm "impR",
- thm "notL", thm "notR", thm "iffL", thm "iffR"];
+ [@{thm basic}, @{thm refl}, @{thm TrueR}, @{thm FalseL},
+ @{thm conjL}, @{thm conjR}, @{thm disjL}, @{thm disjR}, @{thm impL}, @{thm impR},
+ @{thm notL}, @{thm notR}, @{thm iffL}, @{thm iffR}];
-val LK_pack = prop_pack add_safes [thm "allR", thm "exL"]
- add_unsafes [thm "allL_thin", thm "exR_thin", thm "the_equality"];
+val LK_pack = prop_pack add_safes [@{thm allR}, @{thm exL}]
+ add_unsafes [@{thm allL_thin}, @{thm exR_thin}, @{thm the_equality}];
-val LK_dup_pack = prop_pack add_safes [thm "allR", thm "exL"]
- add_unsafes [thm "allL", thm "exR", thm "the_equality"];
+val LK_dup_pack = prop_pack add_safes [@{thm allR}, @{thm exL}]
+ add_unsafes [@{thm allL}, @{thm exR}, @{thm the_equality}];
-local
- val thinR = thm "thinR"
- val thinL = thm "thinL"
- val cut = thm "cut"
-in
-
fun lemma_tac th i =
- rtac (thinR RS cut) i THEN REPEAT (rtac thinL i) THEN rtac th i;
-
-end;
+ rtac (@{thm thinR} RS @{thm cut}) i THEN REPEAT (rtac @{thm thinL} i) THEN rtac th i;
*}
method_setup fast_prop =
@@ -321,10 +313,10 @@
(** Equality **)
lemma sym: "|- a=b --> b=a"
- by (tactic {* safe_tac (LK_pack add_safes [thm "subst"]) 1 *})
+ by (tactic {* safe_tac (LK_pack add_safes [@{thm subst}]) 1 *})
lemma trans: "|- a=b --> b=c --> a=c"
- by (tactic {* safe_tac (LK_pack add_safes [thm "subst"]) 1 *})
+ by (tactic {* safe_tac (LK_pack add_safes [@{thm subst}]) 1 *})
(* Symmetry of equality in hypotheses *)
lemmas symL = sym [THEN L_of_imp, standard]
--- a/src/Sequents/S4.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/Sequents/S4.thy Mon Sep 06 19:23:54 2010 +0200
@@ -34,12 +34,12 @@
ML {*
structure S4_Prover = Modal_ProverFun
(
- val rewrite_rls = thms "rewrite_rls"
- val safe_rls = thms "safe_rls"
- val unsafe_rls = thms "unsafe_rls" @ [thm "boxR", thm "diaL"]
- val bound_rls = thms "bound_rls" @ [thm "boxL", thm "diaR"]
- val aside_rls = [thm "lstar0", thm "lstar1", thm "lstar2", thm "rstar0",
- thm "rstar1", thm "rstar2"]
+ val rewrite_rls = @{thms rewrite_rls}
+ val safe_rls = @{thms safe_rls}
+ val unsafe_rls = @{thms unsafe_rls} @ [@{thm boxR}, @{thm diaL}]
+ val bound_rls = @{thms bound_rls} @ [@{thm boxL}, @{thm diaR}]
+ val aside_rls = [@{thm lstar0}, @{thm lstar1}, @{thm lstar2}, @{thm rstar0},
+ @{thm rstar1}, @{thm rstar2}]
)
*}
--- a/src/Sequents/S43.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/Sequents/S43.thy Mon Sep 06 19:23:54 2010 +0200
@@ -79,12 +79,12 @@
ML {*
structure S43_Prover = Modal_ProverFun
(
- val rewrite_rls = thms "rewrite_rls"
- val safe_rls = thms "safe_rls"
- val unsafe_rls = thms "unsafe_rls" @ [thm "pi1", thm "pi2"]
- val bound_rls = thms "bound_rls" @ [thm "boxL", thm "diaR"]
- val aside_rls = [thm "lstar0", thm "lstar1", thm "lstar2", thm "rstar0",
- thm "rstar1", thm "rstar2", thm "S43pi0", thm "S43pi1", thm "S43pi2"]
+ val rewrite_rls = @{thms rewrite_rls}
+ val safe_rls = @{thms safe_rls}
+ val unsafe_rls = @{thms unsafe_rls} @ [@{thm pi1}, @{thm pi2}]
+ val bound_rls = @{thms bound_rls} @ [@{thm boxL}, @{thm diaR}]
+ val aside_rls = [@{thm lstar0}, @{thm lstar1}, @{thm lstar2}, @{thm rstar0},
+ @{thm rstar1}, @{thm rstar2}, @{thm S43pi0}, @{thm S43pi1}, @{thm S43pi2}]
)
*}
--- a/src/Sequents/T.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/Sequents/T.thy Mon Sep 06 19:23:54 2010 +0200
@@ -33,12 +33,12 @@
ML {*
structure T_Prover = Modal_ProverFun
(
- val rewrite_rls = thms "rewrite_rls"
- val safe_rls = thms "safe_rls"
- val unsafe_rls = thms "unsafe_rls" @ [thm "boxR", thm "diaL"]
- val bound_rls = thms "bound_rls" @ [thm "boxL", thm "diaR"]
- val aside_rls = [thm "lstar0", thm "lstar1", thm "lstar2", thm "rstar0",
- thm "rstar1", thm "rstar2"]
+ val rewrite_rls = @{thms rewrite_rls}
+ val safe_rls = @{thms safe_rls}
+ val unsafe_rls = @{thms unsafe_rls} @ [@{thm boxR}, @{thm diaL}]
+ val bound_rls = @{thms bound_rls} @ [@{thm boxL}, @{thm diaR}]
+ val aside_rls = [@{thm lstar0}, @{thm lstar1}, @{thm lstar2}, @{thm rstar0},
+ @{thm rstar1}, @{thm rstar2}]
)
*}
--- a/src/Sequents/Washing.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/Sequents/Washing.thy Mon Sep 06 19:23:54 2010 +0200
@@ -33,27 +33,27 @@
(* "activate" definitions for use in proof *)
-ML {* bind_thms ("changeI", [thm "context1"] RL ([thm "change"] RLN (2,[thm "cut"]))) *}
-ML {* bind_thms ("load1I", [thm "context1"] RL ([thm "load1"] RLN (2,[thm "cut"]))) *}
-ML {* bind_thms ("washI", [thm "context1"] RL ([thm "wash"] RLN (2,[thm "cut"]))) *}
-ML {* bind_thms ("dryI", [thm "context1"] RL ([thm "dry"] RLN (2,[thm "cut"]))) *}
+ML {* bind_thms ("changeI", [@{thm context1}] RL ([@{thm change}] RLN (2,[@{thm cut}]))) *}
+ML {* bind_thms ("load1I", [@{thm context1}] RL ([@{thm load1}] RLN (2,[@{thm cut}]))) *}
+ML {* bind_thms ("washI", [@{thm context1}] RL ([@{thm wash}] RLN (2,[@{thm cut}]))) *}
+ML {* bind_thms ("dryI", [@{thm context1}] RL ([@{thm dry}] RLN (2,[@{thm cut}]))) *}
(* a load of dirty clothes and two dollars gives you clean clothes *)
lemma "dollar , dollar , dirty |- clean"
- apply (tactic {* best_tac (lazy_cs add_safes (thms "changeI" @ thms "load1I" @ thms "washI" @ thms "dryI")) 1 *})
+ apply (tactic {* best_tac (lazy_cs add_safes (@{thms changeI} @ @{thms load1I} @ @{thms washI} @ @{thms dryI})) 1 *})
done
(* order of premises doesn't matter *)
lemma "dollar , dirty , dollar |- clean"
- apply (tactic {* best_tac (lazy_cs add_safes (thms "changeI" @ thms "load1I" @ thms "washI" @ thms "dryI")) 1 *})
+ apply (tactic {* best_tac (lazy_cs add_safes (@{thms changeI} @ @{thms load1I} @ @{thms washI} @ @{thms dryI})) 1 *})
done
(* alternative formulation *)
lemma "dollar , dollar |- dirty -o clean"
- apply (tactic {* best_tac (lazy_cs add_safes (thms "changeI" @ thms "load1I" @ thms "washI" @ thms "dryI")) 1 *})
+ apply (tactic {* best_tac (lazy_cs add_safes (@{thms changeI} @ @{thms load1I} @ @{thms washI} @ @{thms dryI})) 1 *})
done
end
--- a/src/Tools/WWW_Find/scgi_server.ML Mon Sep 06 15:01:37 2010 +0200
+++ b/src/Tools/WWW_Find/scgi_server.ML Mon Sep 06 19:23:54 2010 +0200
@@ -50,7 +50,7 @@
else (tracing ("Waiting for a free thread...");
ConditionVar.wait (thread_wait, thread_wait_mutex));
add_thread
- (Thread.fork
+ (Thread.fork (* FIXME avoid low-level Poly/ML thread primitives *)
(fn () => exception_trace threadf,
[Thread.EnableBroadcastInterrupt true,
Thread.InterruptState
--- a/src/ZF/Bool.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/ZF/Bool.thy Mon Sep 06 19:23:54 2010 +0200
@@ -172,45 +172,44 @@
ML
{*
-val bool_def = thm "bool_def";
-
-val bool_defs = thms "bool_defs";
-val singleton_0 = thm "singleton_0";
-val bool_1I = thm "bool_1I";
-val bool_0I = thm "bool_0I";
-val one_not_0 = thm "one_not_0";
-val one_neq_0 = thm "one_neq_0";
-val boolE = thm "boolE";
-val cond_1 = thm "cond_1";
-val cond_0 = thm "cond_0";
-val cond_type = thm "cond_type";
-val cond_simple_type = thm "cond_simple_type";
-val def_cond_1 = thm "def_cond_1";
-val def_cond_0 = thm "def_cond_0";
-val not_1 = thm "not_1";
-val not_0 = thm "not_0";
-val and_1 = thm "and_1";
-val and_0 = thm "and_0";
-val or_1 = thm "or_1";
-val or_0 = thm "or_0";
-val xor_1 = thm "xor_1";
-val xor_0 = thm "xor_0";
-val not_type = thm "not_type";
-val and_type = thm "and_type";
-val or_type = thm "or_type";
-val xor_type = thm "xor_type";
-val bool_typechecks = thms "bool_typechecks";
-val not_not = thm "not_not";
-val not_and = thm "not_and";
-val not_or = thm "not_or";
-val and_absorb = thm "and_absorb";
-val and_commute = thm "and_commute";
-val and_assoc = thm "and_assoc";
-val and_or_distrib = thm "and_or_distrib";
-val or_absorb = thm "or_absorb";
-val or_commute = thm "or_commute";
-val or_assoc = thm "or_assoc";
-val or_and_distrib = thm "or_and_distrib";
+val bool_def = @{thm bool_def};
+val bool_defs = @{thms bool_defs};
+val singleton_0 = @{thm singleton_0};
+val bool_1I = @{thm bool_1I};
+val bool_0I = @{thm bool_0I};
+val one_not_0 = @{thm one_not_0};
+val one_neq_0 = @{thm one_neq_0};
+val boolE = @{thm boolE};
+val cond_1 = @{thm cond_1};
+val cond_0 = @{thm cond_0};
+val cond_type = @{thm cond_type};
+val cond_simple_type = @{thm cond_simple_type};
+val def_cond_1 = @{thm def_cond_1};
+val def_cond_0 = @{thm def_cond_0};
+val not_1 = @{thm not_1};
+val not_0 = @{thm not_0};
+val and_1 = @{thm and_1};
+val and_0 = @{thm and_0};
+val or_1 = @{thm or_1};
+val or_0 = @{thm or_0};
+val xor_1 = @{thm xor_1};
+val xor_0 = @{thm xor_0};
+val not_type = @{thm not_type};
+val and_type = @{thm and_type};
+val or_type = @{thm or_type};
+val xor_type = @{thm xor_type};
+val bool_typechecks = @{thms bool_typechecks};
+val not_not = @{thm not_not};
+val not_and = @{thm not_and};
+val not_or = @{thm not_or};
+val and_absorb = @{thm and_absorb};
+val and_commute = @{thm and_commute};
+val and_assoc = @{thm and_assoc};
+val and_or_distrib = @{thm and_or_distrib};
+val or_absorb = @{thm or_absorb};
+val or_commute = @{thm or_commute};
+val or_assoc = @{thm or_assoc};
+val or_and_distrib = @{thm or_and_distrib};
*}
end
--- a/src/ZF/Cardinal.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/ZF/Cardinal.thy Mon Sep 06 19:23:54 2010 +0200
@@ -1044,134 +1044,134 @@
ML
{*
-val Least_def = thm "Least_def";
-val eqpoll_def = thm "eqpoll_def";
-val lepoll_def = thm "lepoll_def";
-val lesspoll_def = thm "lesspoll_def";
-val cardinal_def = thm "cardinal_def";
-val Finite_def = thm "Finite_def";
-val Card_def = thm "Card_def";
-val eq_imp_not_mem = thm "eq_imp_not_mem";
-val decomp_bnd_mono = thm "decomp_bnd_mono";
-val Banach_last_equation = thm "Banach_last_equation";
-val decomposition = thm "decomposition";
-val schroeder_bernstein = thm "schroeder_bernstein";
-val bij_imp_eqpoll = thm "bij_imp_eqpoll";
-val eqpoll_refl = thm "eqpoll_refl";
-val eqpoll_sym = thm "eqpoll_sym";
-val eqpoll_trans = thm "eqpoll_trans";
-val subset_imp_lepoll = thm "subset_imp_lepoll";
-val lepoll_refl = thm "lepoll_refl";
-val le_imp_lepoll = thm "le_imp_lepoll";
-val eqpoll_imp_lepoll = thm "eqpoll_imp_lepoll";
-val lepoll_trans = thm "lepoll_trans";
-val eqpollI = thm "eqpollI";
-val eqpollE = thm "eqpollE";
-val eqpoll_iff = thm "eqpoll_iff";
-val lepoll_0_is_0 = thm "lepoll_0_is_0";
-val empty_lepollI = thm "empty_lepollI";
-val lepoll_0_iff = thm "lepoll_0_iff";
-val Un_lepoll_Un = thm "Un_lepoll_Un";
-val eqpoll_0_is_0 = thm "eqpoll_0_is_0";
-val eqpoll_0_iff = thm "eqpoll_0_iff";
-val eqpoll_disjoint_Un = thm "eqpoll_disjoint_Un";
-val lesspoll_not_refl = thm "lesspoll_not_refl";
-val lesspoll_irrefl = thm "lesspoll_irrefl";
-val lesspoll_imp_lepoll = thm "lesspoll_imp_lepoll";
-val lepoll_well_ord = thm "lepoll_well_ord";
-val lepoll_iff_leqpoll = thm "lepoll_iff_leqpoll";
-val inj_not_surj_succ = thm "inj_not_surj_succ";
-val lesspoll_trans = thm "lesspoll_trans";
-val lesspoll_trans1 = thm "lesspoll_trans1";
-val lesspoll_trans2 = thm "lesspoll_trans2";
-val Least_equality = thm "Least_equality";
-val LeastI = thm "LeastI";
-val Least_le = thm "Least_le";
-val less_LeastE = thm "less_LeastE";
-val LeastI2 = thm "LeastI2";
-val Least_0 = thm "Least_0";
-val Ord_Least = thm "Ord_Least";
-val Least_cong = thm "Least_cong";
-val cardinal_cong = thm "cardinal_cong";
-val well_ord_cardinal_eqpoll = thm "well_ord_cardinal_eqpoll";
-val Ord_cardinal_eqpoll = thm "Ord_cardinal_eqpoll";
-val well_ord_cardinal_eqE = thm "well_ord_cardinal_eqE";
-val well_ord_cardinal_eqpoll_iff = thm "well_ord_cardinal_eqpoll_iff";
-val Ord_cardinal_le = thm "Ord_cardinal_le";
-val Card_cardinal_eq = thm "Card_cardinal_eq";
-val CardI = thm "CardI";
-val Card_is_Ord = thm "Card_is_Ord";
-val Card_cardinal_le = thm "Card_cardinal_le";
-val Ord_cardinal = thm "Ord_cardinal";
-val Card_iff_initial = thm "Card_iff_initial";
-val lt_Card_imp_lesspoll = thm "lt_Card_imp_lesspoll";
-val Card_0 = thm "Card_0";
-val Card_Un = thm "Card_Un";
-val Card_cardinal = thm "Card_cardinal";
-val cardinal_mono = thm "cardinal_mono";
-val cardinal_lt_imp_lt = thm "cardinal_lt_imp_lt";
-val Card_lt_imp_lt = thm "Card_lt_imp_lt";
-val Card_lt_iff = thm "Card_lt_iff";
-val Card_le_iff = thm "Card_le_iff";
-val well_ord_lepoll_imp_Card_le = thm "well_ord_lepoll_imp_Card_le";
-val lepoll_cardinal_le = thm "lepoll_cardinal_le";
-val lepoll_Ord_imp_eqpoll = thm "lepoll_Ord_imp_eqpoll";
-val lesspoll_imp_eqpoll = thm "lesspoll_imp_eqpoll";
-val cardinal_subset_Ord = thm "cardinal_subset_Ord";
-val cons_lepoll_consD = thm "cons_lepoll_consD";
-val cons_eqpoll_consD = thm "cons_eqpoll_consD";
-val succ_lepoll_succD = thm "succ_lepoll_succD";
-val nat_lepoll_imp_le = thm "nat_lepoll_imp_le";
-val nat_eqpoll_iff = thm "nat_eqpoll_iff";
-val nat_into_Card = thm "nat_into_Card";
-val cardinal_0 = thm "cardinal_0";
-val cardinal_1 = thm "cardinal_1";
-val succ_lepoll_natE = thm "succ_lepoll_natE";
-val n_lesspoll_nat = thm "n_lesspoll_nat";
-val nat_lepoll_imp_ex_eqpoll_n = thm "nat_lepoll_imp_ex_eqpoll_n";
-val lepoll_imp_lesspoll_succ = thm "lepoll_imp_lesspoll_succ";
-val lesspoll_succ_imp_lepoll = thm "lesspoll_succ_imp_lepoll";
-val lesspoll_succ_iff = thm "lesspoll_succ_iff";
-val lepoll_succ_disj = thm "lepoll_succ_disj";
-val lesspoll_cardinal_lt = thm "lesspoll_cardinal_lt";
-val lt_not_lepoll = thm "lt_not_lepoll";
-val Ord_nat_eqpoll_iff = thm "Ord_nat_eqpoll_iff";
-val Card_nat = thm "Card_nat";
-val nat_le_cardinal = thm "nat_le_cardinal";
-val cons_lepoll_cong = thm "cons_lepoll_cong";
-val cons_eqpoll_cong = thm "cons_eqpoll_cong";
-val cons_lepoll_cons_iff = thm "cons_lepoll_cons_iff";
-val cons_eqpoll_cons_iff = thm "cons_eqpoll_cons_iff";
-val singleton_eqpoll_1 = thm "singleton_eqpoll_1";
-val cardinal_singleton = thm "cardinal_singleton";
-val not_0_is_lepoll_1 = thm "not_0_is_lepoll_1";
-val succ_eqpoll_cong = thm "succ_eqpoll_cong";
-val sum_eqpoll_cong = thm "sum_eqpoll_cong";
-val prod_eqpoll_cong = thm "prod_eqpoll_cong";
-val inj_disjoint_eqpoll = thm "inj_disjoint_eqpoll";
-val Diff_sing_lepoll = thm "Diff_sing_lepoll";
-val lepoll_Diff_sing = thm "lepoll_Diff_sing";
-val Diff_sing_eqpoll = thm "Diff_sing_eqpoll";
-val lepoll_1_is_sing = thm "lepoll_1_is_sing";
-val Un_lepoll_sum = thm "Un_lepoll_sum";
-val well_ord_Un = thm "well_ord_Un";
-val disj_Un_eqpoll_sum = thm "disj_Un_eqpoll_sum";
-val Finite_0 = thm "Finite_0";
-val lepoll_nat_imp_Finite = thm "lepoll_nat_imp_Finite";
-val lesspoll_nat_is_Finite = thm "lesspoll_nat_is_Finite";
-val lepoll_Finite = thm "lepoll_Finite";
-val subset_Finite = thm "subset_Finite";
-val Finite_Diff = thm "Finite_Diff";
-val Finite_cons = thm "Finite_cons";
-val Finite_succ = thm "Finite_succ";
-val nat_le_infinite_Ord = thm "nat_le_infinite_Ord";
-val Finite_imp_well_ord = thm "Finite_imp_well_ord";
-val nat_wf_on_converse_Memrel = thm "nat_wf_on_converse_Memrel";
-val nat_well_ord_converse_Memrel = thm "nat_well_ord_converse_Memrel";
-val well_ord_converse = thm "well_ord_converse";
-val ordertype_eq_n = thm "ordertype_eq_n";
-val Finite_well_ord_converse = thm "Finite_well_ord_converse";
-val nat_into_Finite = thm "nat_into_Finite";
+val Least_def = @{thm Least_def};
+val eqpoll_def = @{thm eqpoll_def};
+val lepoll_def = @{thm lepoll_def};
+val lesspoll_def = @{thm lesspoll_def};
+val cardinal_def = @{thm cardinal_def};
+val Finite_def = @{thm Finite_def};
+val Card_def = @{thm Card_def};
+val eq_imp_not_mem = @{thm eq_imp_not_mem};
+val decomp_bnd_mono = @{thm decomp_bnd_mono};
+val Banach_last_equation = @{thm Banach_last_equation};
+val decomposition = @{thm decomposition};
+val schroeder_bernstein = @{thm schroeder_bernstein};
+val bij_imp_eqpoll = @{thm bij_imp_eqpoll};
+val eqpoll_refl = @{thm eqpoll_refl};
+val eqpoll_sym = @{thm eqpoll_sym};
+val eqpoll_trans = @{thm eqpoll_trans};
+val subset_imp_lepoll = @{thm subset_imp_lepoll};
+val lepoll_refl = @{thm lepoll_refl};
+val le_imp_lepoll = @{thm le_imp_lepoll};
+val eqpoll_imp_lepoll = @{thm eqpoll_imp_lepoll};
+val lepoll_trans = @{thm lepoll_trans};
+val eqpollI = @{thm eqpollI};
+val eqpollE = @{thm eqpollE};
+val eqpoll_iff = @{thm eqpoll_iff};
+val lepoll_0_is_0 = @{thm lepoll_0_is_0};
+val empty_lepollI = @{thm empty_lepollI};
+val lepoll_0_iff = @{thm lepoll_0_iff};
+val Un_lepoll_Un = @{thm Un_lepoll_Un};
+val eqpoll_0_is_0 = @{thm eqpoll_0_is_0};
+val eqpoll_0_iff = @{thm eqpoll_0_iff};
+val eqpoll_disjoint_Un = @{thm eqpoll_disjoint_Un};
+val lesspoll_not_refl = @{thm lesspoll_not_refl};
+val lesspoll_irrefl = @{thm lesspoll_irrefl};
+val lesspoll_imp_lepoll = @{thm lesspoll_imp_lepoll};
+val lepoll_well_ord = @{thm lepoll_well_ord};
+val lepoll_iff_leqpoll = @{thm lepoll_iff_leqpoll};
+val inj_not_surj_succ = @{thm inj_not_surj_succ};
+val lesspoll_trans = @{thm lesspoll_trans};
+val lesspoll_trans1 = @{thm lesspoll_trans1};
+val lesspoll_trans2 = @{thm lesspoll_trans2};
+val Least_equality = @{thm Least_equality};
+val LeastI = @{thm LeastI};
+val Least_le = @{thm Least_le};
+val less_LeastE = @{thm less_LeastE};
+val LeastI2 = @{thm LeastI2};
+val Least_0 = @{thm Least_0};
+val Ord_Least = @{thm Ord_Least};
+val Least_cong = @{thm Least_cong};
+val cardinal_cong = @{thm cardinal_cong};
+val well_ord_cardinal_eqpoll = @{thm well_ord_cardinal_eqpoll};
+val Ord_cardinal_eqpoll = @{thm Ord_cardinal_eqpoll};
+val well_ord_cardinal_eqE = @{thm well_ord_cardinal_eqE};
+val well_ord_cardinal_eqpoll_iff = @{thm well_ord_cardinal_eqpoll_iff};
+val Ord_cardinal_le = @{thm Ord_cardinal_le};
+val Card_cardinal_eq = @{thm Card_cardinal_eq};
+val CardI = @{thm CardI};
+val Card_is_Ord = @{thm Card_is_Ord};
+val Card_cardinal_le = @{thm Card_cardinal_le};
+val Ord_cardinal = @{thm Ord_cardinal};
+val Card_iff_initial = @{thm Card_iff_initial};
+val lt_Card_imp_lesspoll = @{thm lt_Card_imp_lesspoll};
+val Card_0 = @{thm Card_0};
+val Card_Un = @{thm Card_Un};
+val Card_cardinal = @{thm Card_cardinal};
+val cardinal_mono = @{thm cardinal_mono};
+val cardinal_lt_imp_lt = @{thm cardinal_lt_imp_lt};
+val Card_lt_imp_lt = @{thm Card_lt_imp_lt};
+val Card_lt_iff = @{thm Card_lt_iff};
+val Card_le_iff = @{thm Card_le_iff};
+val well_ord_lepoll_imp_Card_le = @{thm well_ord_lepoll_imp_Card_le};
+val lepoll_cardinal_le = @{thm lepoll_cardinal_le};
+val lepoll_Ord_imp_eqpoll = @{thm lepoll_Ord_imp_eqpoll};
+val lesspoll_imp_eqpoll = @{thm lesspoll_imp_eqpoll};
+val cardinal_subset_Ord = @{thm cardinal_subset_Ord};
+val cons_lepoll_consD = @{thm cons_lepoll_consD};
+val cons_eqpoll_consD = @{thm cons_eqpoll_consD};
+val succ_lepoll_succD = @{thm succ_lepoll_succD};
+val nat_lepoll_imp_le = @{thm nat_lepoll_imp_le};
+val nat_eqpoll_iff = @{thm nat_eqpoll_iff};
+val nat_into_Card = @{thm nat_into_Card};
+val cardinal_0 = @{thm cardinal_0};
+val cardinal_1 = @{thm cardinal_1};
+val succ_lepoll_natE = @{thm succ_lepoll_natE};
+val n_lesspoll_nat = @{thm n_lesspoll_nat};
+val nat_lepoll_imp_ex_eqpoll_n = @{thm nat_lepoll_imp_ex_eqpoll_n};
+val lepoll_imp_lesspoll_succ = @{thm lepoll_imp_lesspoll_succ};
+val lesspoll_succ_imp_lepoll = @{thm lesspoll_succ_imp_lepoll};
+val lesspoll_succ_iff = @{thm lesspoll_succ_iff};
+val lepoll_succ_disj = @{thm lepoll_succ_disj};
+val lesspoll_cardinal_lt = @{thm lesspoll_cardinal_lt};
+val lt_not_lepoll = @{thm lt_not_lepoll};
+val Ord_nat_eqpoll_iff = @{thm Ord_nat_eqpoll_iff};
+val Card_nat = @{thm Card_nat};
+val nat_le_cardinal = @{thm nat_le_cardinal};
+val cons_lepoll_cong = @{thm cons_lepoll_cong};
+val cons_eqpoll_cong = @{thm cons_eqpoll_cong};
+val cons_lepoll_cons_iff = @{thm cons_lepoll_cons_iff};
+val cons_eqpoll_cons_iff = @{thm cons_eqpoll_cons_iff};
+val singleton_eqpoll_1 = @{thm singleton_eqpoll_1};
+val cardinal_singleton = @{thm cardinal_singleton};
+val not_0_is_lepoll_1 = @{thm not_0_is_lepoll_1};
+val succ_eqpoll_cong = @{thm succ_eqpoll_cong};
+val sum_eqpoll_cong = @{thm sum_eqpoll_cong};
+val prod_eqpoll_cong = @{thm prod_eqpoll_cong};
+val inj_disjoint_eqpoll = @{thm inj_disjoint_eqpoll};
+val Diff_sing_lepoll = @{thm Diff_sing_lepoll};
+val lepoll_Diff_sing = @{thm lepoll_Diff_sing};
+val Diff_sing_eqpoll = @{thm Diff_sing_eqpoll};
+val lepoll_1_is_sing = @{thm lepoll_1_is_sing};
+val Un_lepoll_sum = @{thm Un_lepoll_sum};
+val well_ord_Un = @{thm well_ord_Un};
+val disj_Un_eqpoll_sum = @{thm disj_Un_eqpoll_sum};
+val Finite_0 = @{thm Finite_0};
+val lepoll_nat_imp_Finite = @{thm lepoll_nat_imp_Finite};
+val lesspoll_nat_is_Finite = @{thm lesspoll_nat_is_Finite};
+val lepoll_Finite = @{thm lepoll_Finite};
+val subset_Finite = @{thm subset_Finite};
+val Finite_Diff = @{thm Finite_Diff};
+val Finite_cons = @{thm Finite_cons};
+val Finite_succ = @{thm Finite_succ};
+val nat_le_infinite_Ord = @{thm nat_le_infinite_Ord};
+val Finite_imp_well_ord = @{thm Finite_imp_well_ord};
+val nat_wf_on_converse_Memrel = @{thm nat_wf_on_converse_Memrel};
+val nat_well_ord_converse_Memrel = @{thm nat_well_ord_converse_Memrel};
+val well_ord_converse = @{thm well_ord_converse};
+val ordertype_eq_n = @{thm ordertype_eq_n};
+val Finite_well_ord_converse = @{thm Finite_well_ord_converse};
+val nat_into_Finite = @{thm nat_into_Finite};
*}
end
--- a/src/ZF/CardinalArith.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/ZF/CardinalArith.thy Mon Sep 06 19:23:54 2010 +0200
@@ -911,89 +911,89 @@
ML{*
-val InfCard_def = thm "InfCard_def"
-val cmult_def = thm "cmult_def"
-val cadd_def = thm "cadd_def"
-val jump_cardinal_def = thm "jump_cardinal_def"
-val csucc_def = thm "csucc_def"
+val InfCard_def = @{thm InfCard_def};
+val cmult_def = @{thm cmult_def};
+val cadd_def = @{thm cadd_def};
+val jump_cardinal_def = @{thm jump_cardinal_def};
+val csucc_def = @{thm csucc_def};
-val sum_commute_eqpoll = thm "sum_commute_eqpoll";
-val cadd_commute = thm "cadd_commute";
-val sum_assoc_eqpoll = thm "sum_assoc_eqpoll";
-val well_ord_cadd_assoc = thm "well_ord_cadd_assoc";
-val sum_0_eqpoll = thm "sum_0_eqpoll";
-val cadd_0 = thm "cadd_0";
-val sum_lepoll_self = thm "sum_lepoll_self";
-val cadd_le_self = thm "cadd_le_self";
-val sum_lepoll_mono = thm "sum_lepoll_mono";
-val cadd_le_mono = thm "cadd_le_mono";
-val eq_imp_not_mem = thm "eq_imp_not_mem";
-val sum_succ_eqpoll = thm "sum_succ_eqpoll";
-val nat_cadd_eq_add = thm "nat_cadd_eq_add";
-val prod_commute_eqpoll = thm "prod_commute_eqpoll";
-val cmult_commute = thm "cmult_commute";
-val prod_assoc_eqpoll = thm "prod_assoc_eqpoll";
-val well_ord_cmult_assoc = thm "well_ord_cmult_assoc";
-val sum_prod_distrib_eqpoll = thm "sum_prod_distrib_eqpoll";
-val well_ord_cadd_cmult_distrib = thm "well_ord_cadd_cmult_distrib";
-val prod_0_eqpoll = thm "prod_0_eqpoll";
-val cmult_0 = thm "cmult_0";
-val prod_singleton_eqpoll = thm "prod_singleton_eqpoll";
-val cmult_1 = thm "cmult_1";
-val prod_lepoll_self = thm "prod_lepoll_self";
-val cmult_le_self = thm "cmult_le_self";
-val prod_lepoll_mono = thm "prod_lepoll_mono";
-val cmult_le_mono = thm "cmult_le_mono";
-val prod_succ_eqpoll = thm "prod_succ_eqpoll";
-val nat_cmult_eq_mult = thm "nat_cmult_eq_mult";
-val cmult_2 = thm "cmult_2";
-val sum_lepoll_prod = thm "sum_lepoll_prod";
-val lepoll_imp_sum_lepoll_prod = thm "lepoll_imp_sum_lepoll_prod";
-val nat_cons_lepoll = thm "nat_cons_lepoll";
-val nat_cons_eqpoll = thm "nat_cons_eqpoll";
-val nat_succ_eqpoll = thm "nat_succ_eqpoll";
-val InfCard_nat = thm "InfCard_nat";
-val InfCard_is_Card = thm "InfCard_is_Card";
-val InfCard_Un = thm "InfCard_Un";
-val InfCard_is_Limit = thm "InfCard_is_Limit";
-val ordermap_eqpoll_pred = thm "ordermap_eqpoll_pred";
-val ordermap_z_lt = thm "ordermap_z_lt";
-val InfCard_le_cmult_eq = thm "InfCard_le_cmult_eq";
-val InfCard_cmult_eq = thm "InfCard_cmult_eq";
-val InfCard_cdouble_eq = thm "InfCard_cdouble_eq";
-val InfCard_le_cadd_eq = thm "InfCard_le_cadd_eq";
-val InfCard_cadd_eq = thm "InfCard_cadd_eq";
-val Ord_jump_cardinal = thm "Ord_jump_cardinal";
-val jump_cardinal_iff = thm "jump_cardinal_iff";
-val K_lt_jump_cardinal = thm "K_lt_jump_cardinal";
-val Card_jump_cardinal = thm "Card_jump_cardinal";
-val csucc_basic = thm "csucc_basic";
-val Card_csucc = thm "Card_csucc";
-val lt_csucc = thm "lt_csucc";
-val Ord_0_lt_csucc = thm "Ord_0_lt_csucc";
-val csucc_le = thm "csucc_le";
-val lt_csucc_iff = thm "lt_csucc_iff";
-val Card_lt_csucc_iff = thm "Card_lt_csucc_iff";
-val InfCard_csucc = thm "InfCard_csucc";
-val Finite_into_Fin = thm "Finite_into_Fin";
-val Fin_into_Finite = thm "Fin_into_Finite";
-val Finite_Fin_iff = thm "Finite_Fin_iff";
-val Finite_Un = thm "Finite_Un";
-val Finite_Union = thm "Finite_Union";
-val Finite_induct = thm "Finite_induct";
-val Fin_imp_not_cons_lepoll = thm "Fin_imp_not_cons_lepoll";
-val Finite_imp_cardinal_cons = thm "Finite_imp_cardinal_cons";
-val Finite_imp_succ_cardinal_Diff = thm "Finite_imp_succ_cardinal_Diff";
-val Finite_imp_cardinal_Diff = thm "Finite_imp_cardinal_Diff";
-val nat_implies_well_ord = thm "nat_implies_well_ord";
-val nat_sum_eqpoll_sum = thm "nat_sum_eqpoll_sum";
-val Diff_sing_Finite = thm "Diff_sing_Finite";
-val Diff_Finite = thm "Diff_Finite";
-val Ord_subset_natD = thm "Ord_subset_natD";
-val Ord_nat_subset_into_Card = thm "Ord_nat_subset_into_Card";
-val Finite_cardinal_in_nat = thm "Finite_cardinal_in_nat";
-val Finite_Diff_sing_eq_diff_1 = thm "Finite_Diff_sing_eq_diff_1";
-val cardinal_lt_imp_Diff_not_0 = thm "cardinal_lt_imp_Diff_not_0";
+val sum_commute_eqpoll = @{thm sum_commute_eqpoll};
+val cadd_commute = @{thm cadd_commute};
+val sum_assoc_eqpoll = @{thm sum_assoc_eqpoll};
+val well_ord_cadd_assoc = @{thm well_ord_cadd_assoc};
+val sum_0_eqpoll = @{thm sum_0_eqpoll};
+val cadd_0 = @{thm cadd_0};
+val sum_lepoll_self = @{thm sum_lepoll_self};
+val cadd_le_self = @{thm cadd_le_self};
+val sum_lepoll_mono = @{thm sum_lepoll_mono};
+val cadd_le_mono = @{thm cadd_le_mono};
+val eq_imp_not_mem = @{thm eq_imp_not_mem};
+val sum_succ_eqpoll = @{thm sum_succ_eqpoll};
+val nat_cadd_eq_add = @{thm nat_cadd_eq_add};
+val prod_commute_eqpoll = @{thm prod_commute_eqpoll};
+val cmult_commute = @{thm cmult_commute};
+val prod_assoc_eqpoll = @{thm prod_assoc_eqpoll};
+val well_ord_cmult_assoc = @{thm well_ord_cmult_assoc};
+val sum_prod_distrib_eqpoll = @{thm sum_prod_distrib_eqpoll};
+val well_ord_cadd_cmult_distrib = @{thm well_ord_cadd_cmult_distrib};
+val prod_0_eqpoll = @{thm prod_0_eqpoll};
+val cmult_0 = @{thm cmult_0};
+val prod_singleton_eqpoll = @{thm prod_singleton_eqpoll};
+val cmult_1 = @{thm cmult_1};
+val prod_lepoll_self = @{thm prod_lepoll_self};
+val cmult_le_self = @{thm cmult_le_self};
+val prod_lepoll_mono = @{thm prod_lepoll_mono};
+val cmult_le_mono = @{thm cmult_le_mono};
+val prod_succ_eqpoll = @{thm prod_succ_eqpoll};
+val nat_cmult_eq_mult = @{thm nat_cmult_eq_mult};
+val cmult_2 = @{thm cmult_2};
+val sum_lepoll_prod = @{thm sum_lepoll_prod};
+val lepoll_imp_sum_lepoll_prod = @{thm lepoll_imp_sum_lepoll_prod};
+val nat_cons_lepoll = @{thm nat_cons_lepoll};
+val nat_cons_eqpoll = @{thm nat_cons_eqpoll};
+val nat_succ_eqpoll = @{thm nat_succ_eqpoll};
+val InfCard_nat = @{thm InfCard_nat};
+val InfCard_is_Card = @{thm InfCard_is_Card};
+val InfCard_Un = @{thm InfCard_Un};
+val InfCard_is_Limit = @{thm InfCard_is_Limit};
+val ordermap_eqpoll_pred = @{thm ordermap_eqpoll_pred};
+val ordermap_z_lt = @{thm ordermap_z_lt};
+val InfCard_le_cmult_eq = @{thm InfCard_le_cmult_eq};
+val InfCard_cmult_eq = @{thm InfCard_cmult_eq};
+val InfCard_cdouble_eq = @{thm InfCard_cdouble_eq};
+val InfCard_le_cadd_eq = @{thm InfCard_le_cadd_eq};
+val InfCard_cadd_eq = @{thm InfCard_cadd_eq};
+val Ord_jump_cardinal = @{thm Ord_jump_cardinal};
+val jump_cardinal_iff = @{thm jump_cardinal_iff};
+val K_lt_jump_cardinal = @{thm K_lt_jump_cardinal};
+val Card_jump_cardinal = @{thm Card_jump_cardinal};
+val csucc_basic = @{thm csucc_basic};
+val Card_csucc = @{thm Card_csucc};
+val lt_csucc = @{thm lt_csucc};
+val Ord_0_lt_csucc = @{thm Ord_0_lt_csucc};
+val csucc_le = @{thm csucc_le};
+val lt_csucc_iff = @{thm lt_csucc_iff};
+val Card_lt_csucc_iff = @{thm Card_lt_csucc_iff};
+val InfCard_csucc = @{thm InfCard_csucc};
+val Finite_into_Fin = @{thm Finite_into_Fin};
+val Fin_into_Finite = @{thm Fin_into_Finite};
+val Finite_Fin_iff = @{thm Finite_Fin_iff};
+val Finite_Un = @{thm Finite_Un};
+val Finite_Union = @{thm Finite_Union};
+val Finite_induct = @{thm Finite_induct};
+val Fin_imp_not_cons_lepoll = @{thm Fin_imp_not_cons_lepoll};
+val Finite_imp_cardinal_cons = @{thm Finite_imp_cardinal_cons};
+val Finite_imp_succ_cardinal_Diff = @{thm Finite_imp_succ_cardinal_Diff};
+val Finite_imp_cardinal_Diff = @{thm Finite_imp_cardinal_Diff};
+val nat_implies_well_ord = @{thm nat_implies_well_ord};
+val nat_sum_eqpoll_sum = @{thm nat_sum_eqpoll_sum};
+val Diff_sing_Finite = @{thm Diff_sing_Finite};
+val Diff_Finite = @{thm Diff_Finite};
+val Ord_subset_natD = @{thm Ord_subset_natD};
+val Ord_nat_subset_into_Card = @{thm Ord_nat_subset_into_Card};
+val Finite_cardinal_in_nat = @{thm Finite_cardinal_in_nat};
+val Finite_Diff_sing_eq_diff_1 = @{thm Finite_Diff_sing_eq_diff_1};
+val cardinal_lt_imp_Diff_not_0 = @{thm cardinal_lt_imp_Diff_not_0};
*}
end
--- a/src/ZF/Cardinal_AC.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/ZF/Cardinal_AC.thy Mon Sep 06 19:23:54 2010 +0200
@@ -193,8 +193,8 @@
ML
{*
-val cardinal_0_iff_0 = thm "cardinal_0_iff_0";
-val cardinal_lt_iff_lesspoll = thm "cardinal_lt_iff_lesspoll";
+val cardinal_0_iff_0 = @{thm cardinal_0_iff_0};
+val cardinal_lt_iff_lesspoll = @{thm cardinal_lt_iff_lesspoll};
*}
end
--- a/src/ZF/Epsilon.thy Mon Sep 06 15:01:37 2010 +0200
+++ b/src/ZF/Epsilon.thy Mon Sep 06 19:23:54 2010 +0200
@@ -398,56 +398,56 @@
ML
{*
-val arg_subset_eclose = thm "arg_subset_eclose";
-val arg_into_eclose = thm "arg_into_eclose";
-val Transset_eclose = thm "Transset_eclose";
-val eclose_subset = thm "eclose_subset";
-val ecloseD = thm "ecloseD";
-val arg_in_eclose_sing = thm "arg_in_eclose_sing";
-val arg_into_eclose_sing = thm "arg_into_eclose_sing";
-val eclose_induct = thm "eclose_induct";
-val eps_induct = thm "eps_induct";
-val eclose_least = thm "eclose_least";
-val eclose_induct_down = thm "eclose_induct_down";
-val Transset_eclose_eq_arg = thm "Transset_eclose_eq_arg";
-val mem_eclose_trans = thm "mem_eclose_trans";
-val mem_eclose_sing_trans = thm "mem_eclose_sing_trans";
-val under_Memrel = thm "under_Memrel";
-val under_Memrel_eclose = thm "under_Memrel_eclose";
-val wfrec_ssubst = thm "wfrec_ssubst";
-val wfrec_eclose_eq = thm "wfrec_eclose_eq";
-val wfrec_eclose_eq2 = thm "wfrec_eclose_eq2";
-val transrec = thm "transrec";
-val def_transrec = thm "def_transrec";
-val transrec_type = thm "transrec_type";
-val eclose_sing_Ord = thm "eclose_sing_Ord";
-val Ord_transrec_type = thm "Ord_transrec_type";
-val rank = thm "rank";
-val Ord_rank = thm "Ord_rank";
-val rank_of_Ord = thm "rank_of_Ord";
-val rank_lt = thm "rank_lt";
-val eclose_rank_lt = thm "eclose_rank_lt";
-val rank_mono = thm "rank_mono";
-val rank_Pow = thm "rank_Pow";
-val rank_0 = thm "rank_0";
-val rank_succ = thm "rank_succ";
-val rank_Union = thm "rank_Union";
-val rank_eclose = thm "rank_eclose";
-val rank_pair1 = thm "rank_pair1";
-val rank_pair2 = thm "rank_pair2";
-val the_equality_if = thm "the_equality_if";
-val rank_apply = thm "rank_apply";
-val mem_eclose_subset = thm "mem_eclose_subset";
-val eclose_mono = thm "eclose_mono";
-val eclose_idem = thm "eclose_idem";
-val transrec2_0 = thm "transrec2_0";
-val transrec2_succ = thm "transrec2_succ";
-val transrec2_Limit = thm "transrec2_Limit";
-val recursor_0 = thm "recursor_0";
-val recursor_succ = thm "recursor_succ";
-val rec_0 = thm "rec_0";
-val rec_succ = thm "rec_succ";
-val rec_type = thm "rec_type";
+val arg_subset_eclose = @{thm arg_subset_eclose};
+val arg_into_eclose = @{thm arg_into_eclose};
+val Transset_eclose = @{thm Transset_eclose};
+val eclose_subset = @{thm eclose_subset};
+val ecloseD = @{thm ecloseD};
+val arg_in_eclose_sing = @{thm arg_in_eclose_sing};
+val arg_into_eclose_sing = @{thm arg_into_eclose_sing};
+val eclose_induct = @{thm eclose_induct};
+val eps_induct = @{thm eps_induct};
+val eclose_least = @{thm eclose_least};
+val eclose_induct_down = @{thm eclose_induct_down};
+val Transset_eclose_eq_arg = @{thm Transset_eclose_eq_arg};
+val mem_eclose_trans = @{thm mem_eclose_trans};
+val mem_eclose_sing_trans = @{thm mem_eclose_sing_trans};
+val under_Memrel = @{thm under_Memrel};
+val under_Memrel_eclose = @{thm under_Memrel_eclose};
+val wfrec_ssubst = @{thm wfrec_ssubst};
+val wfrec_eclose_eq = @{thm wfrec_eclose_eq};
+val wfrec_eclose_eq2 = @{thm wfrec_eclose_eq2};
+val transrec = @{thm transrec};
+val def_transrec = @{thm def_transrec};
+val transrec_type = @{thm transrec_type};
+val eclose_sing_Ord = @{thm eclose_sing_Ord};
+val Ord_transrec_type = @{thm Ord_transrec_type};
+val rank = @{thm rank};
+val Ord_rank = @{thm Ord_rank};
+val rank_of_Ord = @{thm rank_of_Ord};
+val rank_lt = @{thm rank_lt};
+val eclose_rank_lt = @{thm eclose_rank_lt};
+val rank_mono = @{thm rank_mono};
+val rank_Pow = @{thm rank_Pow};
+val rank_0 = @{thm rank_0};
+val rank_succ = @{thm rank_succ};
+val rank_Union = @{thm rank_Union};
+val rank_eclose = @{thm rank_eclose};
+val rank_pair1 = @{thm rank_pair1};
+val rank_pair2 = @{thm rank_pair2};
+val the_equality_if = @{thm the_equality_if};
+val rank_apply = @{thm rank_apply};
+val mem_eclose_subset = @{thm mem_eclose_subset};
+val eclose_mono = @{thm eclose_mono};
+val eclose_idem = @{thm eclose_idem};
+val transrec2_0 = @{thm transrec2_0};
+val transrec2_succ = @{thm transrec2_succ};
+val transrec2_Limit = @{thm transrec2_Limit};
+val recursor_0 = @{thm recursor_0};
+val recursor_succ = @{thm recursor_succ};
+val rec_0 = @{thm rec_0};
+val rec_succ = @{thm rec_succ};
+val rec_type = @{thm rec_type};
*}
end