misc tuning and modernization;
authorwenzelm
Sat, 01 Feb 2014 17:56:03 +0100
changeset 55228 901a6696cdd8
parent 55227 653de351d21c
child 55229 08f2ebb65078
misc tuning and modernization;
src/Sequents/ILL.thy
src/Sequents/LK.thy
src/Sequents/LK/Hard_Quantifiers.thy
src/Sequents/LK/Nat.thy
src/Sequents/LK0.thy
src/Sequents/Sequents.thy
src/Sequents/Washing.thy
src/Sequents/prover.ML
src/Sequents/simpdata.ML
--- a/src/Sequents/ILL.thy	Sat Feb 01 00:32:32 2014 +0000
+++ b/src/Sequents/ILL.thy	Sat Feb 01 17:56:03 2014 +0100
@@ -126,23 +126,25 @@
 
 
 ML {*
-val lazy_cs = empty_pack
-  add_safes [@{thm tensl}, @{thm conjr}, @{thm disjl}, @{thm promote0},
-    @{thm context2}, @{thm context3}]
-  add_unsafes [@{thm identity}, @{thm zerol}, @{thm conjll}, @{thm conjlr},
-    @{thm disjrl}, @{thm disjrr}, @{thm impr}, @{thm tensr}, @{thm impl},
-    @{thm derelict}, @{thm weaken}, @{thm promote1}, @{thm promote2},
-    @{thm context1}, @{thm context4a}, @{thm context4b}];
-
-fun prom_tac n =
-  REPEAT (resolve_tac [@{thm promote0}, @{thm promote1}, @{thm promote2}] n)
+  val lazy_pack =
+    Cla.put_pack Cla.empty_pack @{context}
+    |> fold_rev Cla.add_safe @{thms tensl conjr disjl promote0 context2 context3}
+    |> fold_rev Cla.add_unsafe @{thms
+      identity zerol conjll conjlr
+      disjrl disjrr impr tensr impl
+      derelict weaken promote1 promote2
+      context1 context4a context4b}
+    |> Cla.get_pack;
+  
+  fun promote_tac i =
+    REPEAT (resolve_tac @{thms promote0 promote1 promote2} i);
 *}
 
-method_setup best_lazy =
-  {* Scan.succeed (K (SIMPLE_METHOD' (best_tac lazy_cs))) *}
-  "lazy classical reasoning"
+method_setup best_lazy = {*
+  Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.best_tac (Cla.put_pack lazy_pack ctxt)))
+*} "lazy classical reasoning"
 
-lemma aux_impl: "$F, $G |- A ==> $F, !(A -o B), $G |- B"
+lemma aux_impl: "$F, $G |- A \<Longrightarrow> $F, !(A -o B), $G |- B"
   apply (rule derelict)
   apply (rule impl)
   apply (rule_tac [2] identity)
@@ -150,7 +152,7 @@
   apply assumption
   done
 
-lemma conj_lemma: " $F, !A, !B, $G |- C ==> $F, !(A && B), $G |- C"
+lemma conj_lemma: " $F, !A, !B, $G |- C \<Longrightarrow> $F, !(A && B), $G |- C"
   apply (rule contract)
   apply (rule_tac A = " (!A) >< (!B) " in cut)
   apply (rule_tac [2] tensr)
@@ -170,13 +172,13 @@
   apply (rule context1)
   done
 
-lemma impr_contract: "!A, !A, $G |- B ==> $G |- (!A) -o B"
+lemma impr_contract: "!A, !A, $G |- B \<Longrightarrow> $G |- (!A) -o B"
   apply (rule impr)
   apply (rule contract)
   apply assumption
   done
 
-lemma impr_contr_der: "A, !A, $G |- B ==> $G |- (!A) -o B"
+lemma impr_contr_der: "A, !A, $G |- B \<Longrightarrow> $G |- (!A) -o B"
   apply (rule impr)
   apply (rule contract)
   apply (rule derelict)
@@ -207,7 +209,7 @@
   apply (rule context1)
   done
 
-lemma mp_rule1: "$F, B, $G, $H |- C ==> $F, A, $G, A -o B, $H |- C"
+lemma mp_rule1: "$F, B, $G, $H |- C \<Longrightarrow> $F, A, $G, A -o B, $H |- C"
   apply (rule_tac A = "B" in cut)
   apply (rule_tac [2] ll_mp)
   prefer 2 apply (assumption)
@@ -216,7 +218,7 @@
   apply (rule context1)
   done
 
-lemma mp_rule2: "$F, B, $G, $H |- C ==> $F, A -o B, $G, A, $H |- C"
+lemma mp_rule2: "$F, B, $G, $H |- C \<Longrightarrow> $F, A -o B, $G, A, $H |- C"
   apply (rule_tac A = "B" in cut)
   apply (rule_tac [2] ll_mp)
   prefer 2 apply (assumption)
@@ -228,7 +230,7 @@
 lemma or_to_and: "!((!(A ++ B)) -o 0) |- !( ((!A) -o 0) && ((!B) -o 0))"
   by best_lazy
 
-lemma o_a_rule: "$F, !( ((!A) -o 0) && ((!B) -o 0)), $G |- C ==>  
+lemma o_a_rule: "$F, !( ((!A) -o 0) && ((!B) -o 0)), $G |- C \<Longrightarrow>
           $F, !((!(A ++ B)) -o 0), $G |- C"
   apply (rule cut)
   apply (rule_tac [2] or_to_and)
@@ -256,7 +258,7 @@
   apply best_lazy
   done
 
-lemma a_not_a_rule: "$J1, !A -o 0, $J2 |- B ==> $J1, !A -o (!A -o 0), $J2 |- B"
+lemma a_not_a_rule: "$J1, !A -o 0, $J2 |- B \<Longrightarrow> $J1, !A -o (!A -o 0), $J2 |- B"
   apply (rule_tac A = "!A -o 0" in cut)
   apply (rule_tac [2] a_not_a)
   prefer 2 apply (assumption)
@@ -264,20 +266,25 @@
   done
 
 ML {*
-val safe_cs = lazy_cs add_safes [@{thm conj_lemma}, @{thm ll_mp}, @{thm contrad1},
-                                 @{thm contrad2}, @{thm mp_rule1}, @{thm mp_rule2}, @{thm o_a_rule},
-                                 @{thm a_not_a_rule}]
-                      add_unsafes [@{thm aux_impl}];
+  val safe_pack =
+    Cla.put_pack lazy_pack @{context}
+    |> fold_rev Cla.add_safe @{thms conj_lemma ll_mp contrad1
+        contrad2 mp_rule1 mp_rule2 o_a_rule a_not_a_rule}
+    |> Cla.add_unsafe @{thm aux_impl}
+    |> Cla.get_pack;
 
-val power_cs = safe_cs add_unsafes [@{thm impr_contr_der}];
+  val power_pack =
+    Cla.put_pack safe_pack @{context}
+    |> Cla.add_unsafe @{thm impr_contr_der}
+    |> Cla.get_pack;
 *}
 
 
 method_setup best_safe =
-  {* Scan.succeed (K (SIMPLE_METHOD' (best_tac safe_cs))) *}
+  {* Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.best_tac (Cla.put_pack safe_pack ctxt))) *}
 
 method_setup best_power =
-  {* Scan.succeed (K (SIMPLE_METHOD' (best_tac power_cs))) *}
+  {* Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.best_tac (Cla.put_pack power_pack ctxt))) *}
 
 
 (* Some examples from Troelstra and van Dalen *)
--- a/src/Sequents/LK.thy	Sat Feb 01 00:32:32 2014 +0000
+++ b/src/Sequents/LK.thy	Sat Feb 01 17:56:03 2014 +0100
@@ -35,8 +35,7 @@
   "|- P & ~P <-> False"
   "|- ~P & P <-> False"
   "|- (P & Q) & R <-> P & (Q & R)"
-  apply (tactic {* ALLGOALS (fast_tac (LK_pack add_safes @{thms subst})) *})
-  done
+  by (fast add!: subst)+
 
 lemma disj_simps:
   "|- P | True <-> True"
@@ -46,14 +45,12 @@
   "|- P | P <-> P"
   "|- P | P | Q <-> P | Q"
   "|- (P | Q) | R <-> P | (Q | R)"
-  apply (tactic {* ALLGOALS (fast_tac (LK_pack add_safes @{thms subst})) *})
-  done
+  by (fast add!: subst)+
 
 lemma not_simps:
   "|- ~ False <-> True"
   "|- ~ True <-> False"
-  apply (tactic {* ALLGOALS (fast_tac (LK_pack add_safes @{thms subst})) *})
-  done
+  by (fast add!: subst)+
 
 lemma imp_simps:
   "|- (P --> False) <-> ~P"
@@ -62,8 +59,7 @@
   "|- (True --> P) <-> P"
   "|- (P --> P) <-> True"
   "|- (P --> ~P) <-> ~P"
-  apply (tactic {* ALLGOALS (fast_tac (LK_pack add_safes @{thms subst})) *})
-  done
+  by (fast add!: subst)+
 
 lemma iff_simps:
   "|- (True <-> P) <-> P"
@@ -71,8 +67,7 @@
   "|- (P <-> P) <-> True"
   "|- (False <-> P) <-> ~P"
   "|- (P <-> False) <-> ~P"
-  apply (tactic {* ALLGOALS (fast_tac (LK_pack add_safes @{thms subst})) *})
-  done
+  by (fast add!: subst)+
 
 lemma quant_simps:
   "!!P. |- (ALL x. P) <-> P"
@@ -81,8 +76,7 @@
   "!!P. |- (EX x. P) <-> P"
   "!!P. |- (EX x. x=t & P(x)) <-> P(t)"
   "!!P. |- (EX x. t=x & P(x)) <-> P(t)"
-  apply (tactic {* ALLGOALS (fast_tac (LK_pack add_safes @{thms subst})) *})
-  done
+  by (fast add!: subst)+
 
 
 subsection {* Miniscoping: pushing quantifiers in *}
@@ -101,8 +95,7 @@
   "!!P Q. |- (EX x. P | Q(x)) <-> P | (EX x. Q(x))"
   "!!P Q. |- (EX x. P(x) --> Q) <-> (ALL x. P(x)) --> Q"
   "!!P Q. |- (EX x. P --> Q(x)) <-> P --> (EX x. Q(x))"
-  apply (tactic {* ALLGOALS (fast_tac (LK_pack add_safes @{thms subst})) *})
-  done
+  by (fast add!: subst)+
 
 text {*universal miniscoping*}
 lemma all_simps:
@@ -112,27 +105,25 @@
   "!!P Q. |- (ALL x. P --> Q(x)) <-> P --> (ALL x. Q(x))"
   "!!P Q. |- (ALL x. P(x) | Q) <-> (ALL x. P(x)) | Q"
   "!!P Q. |- (ALL x. P | Q(x)) <-> P | (ALL x. Q(x))"
-  apply (tactic {* ALLGOALS (fast_tac (LK_pack add_safes @{thms subst})) *})
-  done
+  by (fast add!: subst)+
 
 text {*These are NOT supplied by default!*}
 lemma distrib_simps:
   "|- P & (Q | R) <-> P&Q | P&R"
   "|- (Q | R) & P <-> Q&P | R&P"
   "|- (P | Q --> R) <-> (P --> R) & (Q --> R)"
-  apply (tactic {* ALLGOALS (fast_tac (LK_pack add_safes @{thms subst})) *})
-  done
+  by (fast add!: subst)+
 
 lemma P_iff_F: "|- ~P ==> |- (P <-> False)"
   apply (erule thinR [THEN cut])
-  apply (tactic {* fast_tac LK_pack 1 *})
+  apply fast
   done
 
 lemmas iff_reflection_F = P_iff_F [THEN iff_reflection]
 
 lemma P_iff_T: "|- P ==> |- (P <-> True)"
   apply (erule thinR [THEN cut])
-  apply (tactic {* fast_tac LK_pack 1 *})
+  apply fast
   done
 
 lemmas iff_reflection_T = P_iff_T [THEN iff_reflection]
@@ -144,23 +135,20 @@
   "|- ~ ~ P <-> P"
   "|- (~P --> P) <-> P"
   "|- (~P <-> ~Q) <-> (P<->Q)"
-  apply (tactic {* ALLGOALS (fast_tac (LK_pack add_safes @{thms subst})) *})
-  done
+  by (fast add!: subst)+
 
 
 subsection {* Named rewrite rules *}
 
 lemma conj_commute: "|- P&Q <-> Q&P"
   and conj_left_commute: "|- P&(Q&R) <-> Q&(P&R)"
-  apply (tactic {* ALLGOALS (fast_tac (LK_pack add_safes @{thms subst})) *})
-  done
+  by (fast add!: subst)+
 
 lemmas conj_comms = conj_commute conj_left_commute
 
 lemma disj_commute: "|- P|Q <-> Q|P"
   and disj_left_commute: "|- P|(Q|R) <-> Q|(P|R)"
-  apply (tactic {* ALLGOALS (fast_tac (LK_pack add_safes @{thms subst})) *})
-  done
+  by (fast add!: subst)+
 
 lemmas disj_comms = disj_commute disj_left_commute
 
@@ -181,20 +169,19 @@
   and de_Morgan_conj: "|- (~(P & Q)) <-> (~P | ~Q)"
 
   and not_iff: "|- ~(P <-> Q) <-> (P <-> ~Q)"
-  apply (tactic {* ALLGOALS (fast_tac (LK_pack add_safes @{thms subst})) *})
-  done
+  by (fast add!: subst)+
 
 lemma imp_cong:
   assumes p1: "|- P <-> P'"
     and p2: "|- P' ==> |- Q <-> Q'"
   shows "|- (P-->Q) <-> (P'-->Q')"
   apply (tactic {* lemma_tac @{thm p1} 1 *})
-  apply (tactic {* safe_tac LK_pack 1 *})
+  apply safe
    apply (tactic {*
      REPEAT (rtac @{thm cut} 1 THEN
        DEPTH_SOLVE_1
          (resolve_tac [@{thm thinL}, @{thm thinR}, @{thm p2} COMP @{thm monotonic}] 1) THEN
-           safe_tac LK_pack 1) *})
+           Cla.safe_tac @{context} 1) *})
   done
 
 lemma conj_cong:
@@ -202,17 +189,16 @@
     and p2: "|- P' ==> |- Q <-> Q'"
   shows "|- (P&Q) <-> (P'&Q')"
   apply (tactic {* lemma_tac @{thm p1} 1 *})
-  apply (tactic {* safe_tac LK_pack 1 *})
+  apply safe
    apply (tactic {*
      REPEAT (rtac @{thm cut} 1 THEN
        DEPTH_SOLVE_1
          (resolve_tac [@{thm thinL}, @{thm thinR}, @{thm p2} COMP @{thm monotonic}] 1) THEN
-           safe_tac LK_pack 1) *})
+           Cla.safe_tac @{context} 1) *})
   done
 
 lemma eq_sym_conv: "|- (x=y) <-> (y=x)"
-  apply (tactic {* fast_tac (LK_pack add_safes @{thms subst}) 1 *})
-  done
+  by (fast add!: subst)
 
 ML_file "simpdata.ML"
 setup {* map_theory_simpset (put_simpset LK_ss) *}
@@ -229,17 +215,17 @@
    apply (tactic {* simp_tac (@{context} addsimps @{thms if_P}) 2 *})
   apply (rule_tac P = "~Q" in cut)
    apply (tactic {* simp_tac (@{context} addsimps @{thms if_not_P}) 2 *})
-  apply (tactic {* fast_tac LK_pack 1 *})
+  apply fast
   done
 
 lemma if_cancel: "|- (if P then x else x) = x"
   apply (tactic {* lemma_tac @{thm split_if} 1 *})
-  apply (tactic {* fast_tac LK_pack 1 *})
+  apply fast
   done
 
 lemma if_eq_cancel: "|- (if x=y then y else x) = x"
   apply (tactic {* lemma_tac @{thm split_if} 1 *})
-  apply (tactic {* safe_tac LK_pack 1 *})
+  apply safe
   apply (rule symL)
   apply (rule basic)
   done
--- a/src/Sequents/LK/Hard_Quantifiers.thy	Sat Feb 01 00:32:32 2014 +0000
+++ b/src/Sequents/LK/Hard_Quantifiers.thy	Sat Feb 01 17:56:03 2014 +0100
@@ -73,7 +73,7 @@
 lemma "|- ~(EX x. S(x)&Q(x)) & (ALL x. P(x) --> Q(x)|R(x)) &   
      ~(EX x. P(x)) --> (EX x. Q(x)) & (ALL x. Q(x)|R(x) --> S(x))   
     --> (EX x. P(x)&R(x))"
-  by (tactic "pc_tac LK_pack 1")
+  by pc
 
 text "Problem 25"
 lemma "|- (EX x. P(x)) &   
@@ -87,7 +87,7 @@
 lemma "|- ((EX x. p(x)) <-> (EX x. q(x))) &        
       (ALL x. ALL y. p(x) & q(y) --> (r(x) <-> s(y)))    
   --> ((ALL x. p(x)-->r(x)) <-> (ALL x. q(x)-->s(x)))"
-  by (tactic "pc_tac LK_pack 1")
+  by pc
 
 text "Problem 27"
 lemma "|- (EX x. P(x) & ~Q(x)) &    
@@ -95,20 +95,20 @@
               (ALL x. M(x) & L(x) --> P(x)) &    
               ((EX x. R(x) & ~ Q(x)) --> (ALL x. L(x) --> ~ R(x)))   
           --> (ALL x. M(x) --> ~L(x))"
-  by (tactic "pc_tac LK_pack 1")
+  by pc
 
 text "Problem 28.  AMENDED"
 lemma "|- (ALL x. P(x) --> (ALL x. Q(x))) &    
         ((ALL x. Q(x)|R(x)) --> (EX x. Q(x)&S(x))) &   
         ((EX x. S(x)) --> (ALL x. L(x) --> M(x)))   
     --> (ALL x. P(x) & L(x) --> M(x))"
-  by (tactic "pc_tac LK_pack 1")
+  by pc
 
 text "Problem 29.  Essentially the same as Principia Mathematica *11.71"
 lemma "|- (EX x. P(x)) & (EX y. Q(y))   
     --> ((ALL x. P(x)-->R(x)) & (ALL y. Q(y)-->S(y))   <->      
          (ALL x y. P(x) & Q(y) --> R(x) & S(y)))"
-  by (tactic "pc_tac LK_pack 1")
+  by pc
 
 text "Problem 30"
 lemma "|- (ALL x. P(x) | Q(x) --> ~ R(x)) &  
@@ -161,7 +161,7 @@
         (ALL x z. ~P(x,z) --> (EX y. Q(y,z))) &  
         ((EX x y. Q(x,y)) --> (ALL x. R(x,x)))   
     --> (ALL x. EX y. R(x,y))"
-  by (tactic "pc_tac LK_pack 1")
+  by pc
 
 text "Problem 38"
 lemma "|- (ALL x. p(a) & (p(x) --> (EX y. p(y) & r(x,y))) -->         
@@ -169,7 +169,7 @@
          (ALL x. (~p(a) | p(x) | (EX z. EX w. p(z) & r(x,w) & r(w,z))) &     
                  (~p(a) | ~(EX y. p(y) & r(x,y)) |                           
                  (EX z. EX w. p(z) & r(x,w) & r(w,z))))"
-  by (tactic "pc_tac LK_pack 1")
+  by pc
 
 text "Problem 39"
 lemma "|- ~ (EX x. ALL y. F(y,x) <-> ~F(y,y))"
@@ -215,7 +215,7 @@
 
 text "Problem 48"
 lemma "|- (a=b | c=d) & (a=c | b=d) --> a=d | b=c"
-  by (tactic {* fast_tac (LK_pack add_safes @{thms subst}) 1 *})
+  by (fast add!: subst)
 
 text "Problem 50"
 lemma "|- (ALL x. P(a,x) | (ALL y. P(x,y))) --> (EX x. ALL y. P(x,y))"
@@ -224,16 +224,16 @@
 text "Problem 51"
 lemma "|- (EX z w. ALL x y. P(x,y) <->  (x=z & y=w)) -->   
          (EX z. ALL x. EX w. (ALL y. P(x,y) <-> y=w) <-> x=z)"
-  by (tactic {* fast_tac (LK_pack add_safes @{thms subst}) 1 *})
+  by (fast add!: subst)
 
 text "Problem 52"  (*Almost the same as 51. *)
 lemma "|- (EX z w. ALL x y. P(x,y) <->  (x=z & y=w)) -->
          (EX w. ALL y. EX z. (ALL x. P(x,y) <-> x=z) <-> y=w)"
-  by (tactic {* fast_tac (LK_pack add_safes @{thms subst}) 1 *})
+  by (fast add!: subst)
 
 text "Problem 56"
 lemma "|- (ALL x.(EX y. P(y) & x=f(y)) --> P(x)) <-> (ALL x. P(x) --> P(f(x)))"
-  by (tactic {* best_tac (LK_pack add_unsafes [@{thm symL}, @{thm subst}]) 1 *})
+  by (best add: symL subst)
   (*requires tricker to orient the equality properly*)
 
 text "Problem 57"
@@ -243,7 +243,7 @@
 
 text "Problem 58!"
 lemma "|- (ALL x y. f(x)=g(y)) --> (ALL x y. f(f(x))=f(g(y)))"
-  by (tactic {* fast_tac (LK_pack add_safes @{thms subst}) 1 *})
+  by (fast add!: subst)
 
 text "Problem 59"
 (*Unification works poorly here -- the abstraction %sobj prevents efficient
--- a/src/Sequents/LK/Nat.thy	Sat Feb 01 00:32:32 2014 +0000
+++ b/src/Sequents/LK/Nat.thy	Sat Feb 01 17:56:03 2014 +0100
@@ -37,7 +37,7 @@
 lemma Suc_n_not_n: "|- Suc(k) ~= k"
   apply (rule_tac n = k in induct)
   apply (tactic {* simp_tac (put_simpset LK_ss @{context} addsimps @{thms Suc_neq_0}) 1 *})
-  apply (tactic {* fast_tac (LK_pack add_safes @{thms Suc_inject_rule}) 1 *})
+  apply (fast add!: Suc_inject_rule)
   done
 
 lemma add_0: "|- 0+n = n"
--- a/src/Sequents/LK0.thy	Sat Feb 01 00:32:32 2014 +0000
+++ b/src/Sequents/LK0.thy	Sat Feb 01 17:56:03 2014 +0100
@@ -162,13 +162,13 @@
 
 
 (** If-and-only-if rules **)
-lemma iffR: 
+lemma iffR:
     "[| $H,P |- $E,Q,$F;  $H,Q |- $E,P,$F |] ==> $H |- $E, P <-> Q, $F"
   apply (unfold iff_def)
   apply (assumption | rule conjR impR)+
   done
 
-lemma iffL: 
+lemma iffL:
     "[| $H,$G |- $E,P,Q;  $H,Q,P,$G |- $E |] ==> $H, P <-> Q, $G |- $E"
   apply (unfold iff_def)
   apply (assumption | rule conjL impL basic)+
@@ -210,28 +210,44 @@
 
 (*The rules of LK*)
 
-ML {*
-val prop_pack = empty_pack add_safes
-                [@{thm basic}, @{thm refl}, @{thm TrueR}, @{thm FalseL},
-                 @{thm conjL}, @{thm conjR}, @{thm disjL}, @{thm disjR}, @{thm impL}, @{thm impR},
-                 @{thm notL}, @{thm notR}, @{thm iffL}, @{thm iffR}];
+lemmas [safe] =
+  iffR iffL
+  notR notL
+  impR impL
+  disjR disjL
+  conjR conjL
+  FalseL TrueR
+  refl basic
+ML {* val prop_pack = Cla.get_pack @{context} *}
+
+lemmas [safe] = exL allR
+lemmas [unsafe] = the_equality exR_thin allL_thin
+ML {* val LK_pack = Cla.get_pack @{context} *}
 
-val LK_pack = prop_pack add_safes   [@{thm allR}, @{thm exL}]
-                        add_unsafes [@{thm allL_thin}, @{thm exR_thin}, @{thm the_equality}];
+ML {*
+  val LK_dup_pack =
+    Cla.put_pack prop_pack @{context}
+    |> fold_rev Cla.add_safe @{thms allR exL}
+    |> fold_rev Cla.add_unsafe @{thms allL exR the_equality}
+    |> Cla.get_pack;
+*}
 
-val LK_dup_pack = prop_pack add_safes   [@{thm allR}, @{thm exL}]
-                            add_unsafes [@{thm allL}, @{thm exR}, @{thm the_equality}];
+ML {*
+  fun lemma_tac th i =
+    rtac (@{thm thinR} RS @{thm cut}) i THEN
+    REPEAT (rtac @{thm thinL} i) THEN
+    rtac th i;
+*}
 
 
-fun lemma_tac th i =
-    rtac (@{thm thinR} RS @{thm cut}) i THEN REPEAT (rtac @{thm thinL} i) THEN rtac th i;
-*}
+method_setup fast_prop =
+  {* Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.fast_tac (Cla.put_pack prop_pack ctxt))) *}
 
-method_setup fast_prop = {* Scan.succeed (K (SIMPLE_METHOD' (fast_tac prop_pack))) *}
-method_setup fast = {* Scan.succeed (K (SIMPLE_METHOD' (fast_tac LK_pack))) *}
-method_setup fast_dup = {* Scan.succeed (K (SIMPLE_METHOD' (fast_tac LK_dup_pack))) *}
-method_setup best = {* Scan.succeed (K (SIMPLE_METHOD' (best_tac LK_pack))) *}
-method_setup best_dup = {* Scan.succeed (K (SIMPLE_METHOD' (best_tac LK_dup_pack))) *}
+method_setup fast_dup =
+  {* Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.fast_tac (Cla.put_pack LK_dup_pack ctxt))) *}
+
+method_setup best_dup =
+  {* Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.best_tac (Cla.put_pack LK_dup_pack ctxt))) *}
 
 
 lemma mp_R:
@@ -239,7 +255,7 @@
     and minor: "$H |- $E, $F, P"
   shows "$H |- $E, Q, $F"
   apply (rule thinRS [THEN cut], rule major)
-  apply (tactic "step_tac LK_pack 1")
+  apply step
   apply (rule thinR, rule minor)
   done
 
@@ -248,7 +264,7 @@
     and minor: "$H, $G, Q |- $E"
   shows "$H, P, $G |- $E"
   apply (rule thinL [THEN cut], rule major)
-  apply (tactic "step_tac LK_pack 1")
+  apply step
   apply (rule thinL, rule minor)
   done
 
@@ -301,10 +317,10 @@
 (** Equality **)
 
 lemma sym: "|- a=b --> b=a"
-  by (tactic {* safe_tac (LK_pack add_safes [@{thm subst}]) 1 *})
+  by (safe add!: subst)
 
 lemma trans: "|- a=b --> b=c --> a=c"
-  by (tactic {* safe_tac (LK_pack add_safes [@{thm subst}]) 1 *})
+  by (safe add!: subst)
 
 (* Symmetry of equality in hypotheses *)
 lemmas symL = sym [THEN L_of_imp]
--- a/src/Sequents/Sequents.thy	Sat Feb 01 00:32:32 2014 +0000
+++ b/src/Sequents/Sequents.thy	Sat Feb 01 17:56:03 2014 +0100
@@ -7,6 +7,7 @@
 
 theory Sequents
 imports Pure
+keywords "print_pack" :: diag
 begin
 
 setup Pure_Thy.old_appl_syntax_setup
@@ -15,7 +16,8 @@
 
 typedecl o
 
-(* Sequences *)
+
+subsection {* Sequences *}
 
 typedecl
  seq'
@@ -25,7 +27,7 @@
  Seq1'         :: "o=>seq'"
 
 
-(* concrete syntax *)
+subsection {* Concrete syntax *}
 
 nonterminal seq and seqobj and seqcont
 
@@ -141,6 +143,9 @@
 
 parse_translation {* [(@{syntax_const "_Side"}, K side_tr)] *}
 
+
+subsection {* Proof tools *}
+
 ML_file "prover.ML"
 
 end
--- a/src/Sequents/Washing.thy	Sat Feb 01 00:32:32 2014 +0000
+++ b/src/Sequents/Washing.thy	Sat Feb 01 17:56:03 2014 +0100
@@ -40,19 +40,16 @@
 (* a load of dirty clothes and two dollars gives you clean clothes *)
 
 lemma "dollar , dollar , dirty |- clean"
-  apply (tactic {* best_tac (lazy_cs add_safes (@{thms changeI} @ @{thms load1I} @ @{thms washI} @ @{thms dryI})) 1 *})
-  done
+  by (tactic {* Cla.best_tac (Cla.put_pack lazy_pack @{context} |> fold Cla.add_safe @{thms changeI load1I washI dryI}) 1 *})
 
 (* order of premises doesn't matter *)
 
 lemma "dollar , dirty , dollar |- clean"
-  apply (tactic {* best_tac (lazy_cs add_safes (@{thms changeI} @ @{thms load1I} @ @{thms washI} @ @{thms dryI})) 1 *})
-  done
+  by (tactic {* Cla.best_tac (Cla.put_pack lazy_pack @{context} |> fold Cla.add_safe @{thms changeI load1I washI dryI}) 1 *})
 
 (* alternative formulation *)
 
 lemma "dollar , dollar |- dirty -o clean"
-  apply (tactic {* best_tac (lazy_cs add_safes (@{thms changeI} @ @{thms load1I} @ @{thms washI} @ @{thms dryI})) 1 *})
-  done
+  by (tactic {* Cla.best_tac (Cla.put_pack lazy_pack @{context} |> fold Cla.add_safe @{thms changeI load1I washI dryI}) 1 *})
 
 end
--- a/src/Sequents/prover.ML	Sat Feb 01 00:32:32 2014 +0000
+++ b/src/Sequents/prover.ML	Sat Feb 01 17:56:03 2014 +0100
@@ -5,54 +5,114 @@
 Simple classical reasoner for the sequent calculus, based on "theorem packs".
 *)
 
+signature CLA =
+sig
+  type pack
+  val empty_pack: pack
+  val get_pack: Proof.context -> pack
+  val put_pack: pack -> Proof.context -> Proof.context
+  val pretty_pack: Proof.context -> Pretty.T
+  val add_safe: thm -> Proof.context -> Proof.context
+  val add_unsafe: thm -> Proof.context -> Proof.context
+  val safe_add: attribute
+  val unsafe_add: attribute
+  val method: (Proof.context -> int -> tactic) -> (Proof.context -> Proof.method) context_parser
+  val trace: bool Config.T
+  val forms_of_seq: term -> term list
+  val safe_tac: Proof.context -> int -> tactic
+  val step_tac: Proof.context -> int -> tactic
+  val pc_tac: Proof.context -> int -> tactic
+  val fast_tac: Proof.context -> int -> tactic
+  val best_tac: Proof.context -> int -> tactic
+end;
 
-(*Higher precedence than := facilitates use of references*)
-infix 4 add_safes add_unsafes;
-
-structure Cla =
+structure Cla: CLA =
 struct
 
-datatype pack = Pack of thm list * thm list;
-
-val trace = Unsynchronized.ref false;
+(** rule declarations **)
 
 (*A theorem pack has the form  (safe rules, unsafe rules)
   An unsafe rule is incomplete or introduces variables in subgoals,
   and is tried only when the safe rules are not applicable.  *)
 
-fun less (rl1,rl2) = (nprems_of rl1) < (nprems_of rl2);
+fun less (rl1, rl2) = Thm.nprems_of rl1 < Thm.nprems_of rl2;
+val sort_rules = sort (make_ord less);
 
-val empty_pack = Pack([],[]);
+datatype pack = Pack of thm list * thm list;
+
+val empty_pack = Pack ([], []);
 
-fun warn_duplicates [] = []
-  | warn_duplicates dups =
-      (warning (cat_lines ("Ignoring duplicate theorems:" ::
-          map Display.string_of_thm_without_context dups));
-       dups);
+structure Pack = Generic_Data
+(
+  type T = pack;
+  val empty = empty_pack;
+  val extend = I;
+  fun merge (Pack (safes, unsafes), Pack (safes', unsafes')) =
+    Pack
+     (sort_rules (Thm.merge_thms (safes, safes')),
+      sort_rules (Thm.merge_thms (unsafes, unsafes')));
+);
 
-fun (Pack(safes,unsafes)) add_safes ths   = 
-    let val dups = warn_duplicates (inter Thm.eq_thm_prop ths safes)
-        val ths' = subtract Thm.eq_thm_prop dups ths
-    in
-        Pack(sort (make_ord less) (ths'@safes), unsafes)
-    end;
+val put_pack = Context.proof_map o Pack.put;
+val get_pack = Pack.get o Context.Proof;
+fun get_rules ctxt = let val Pack rules = get_pack ctxt in rules end;
+
+
+(* print pack *)
 
-fun (Pack(safes,unsafes)) add_unsafes ths = 
-    let val dups = warn_duplicates (inter Thm.eq_thm_prop unsafes ths)
-        val ths' = subtract Thm.eq_thm_prop dups ths
-    in
-        Pack(safes, sort (make_ord less) (ths'@unsafes))
-    end;
+fun pretty_pack ctxt =
+  let val (safes, unsafes) = get_rules ctxt in
+    Pretty.chunks
+     [Pretty.big_list "safe rules:" (map (Display.pretty_thm ctxt) safes),
+      Pretty.big_list "unsafe rules:" (map (Display.pretty_thm ctxt) unsafes)]
+  end;
 
-fun merge_pack (Pack(safes,unsafes), Pack(safes',unsafes')) =
-        Pack(sort (make_ord less) (safes@safes'), 
-             sort (make_ord less) (unsafes@unsafes'));
+val _ =
+  Outer_Syntax.command @{command_spec "print_pack"} "print pack of classical rules"
+    (Scan.succeed (Toplevel.keep (Pretty.writeln o pretty_pack o Toplevel.context_of)));
 
 
-fun print_pack (Pack(safes,unsafes)) =
-  writeln (cat_lines
-   (["Safe rules:"] @ map Display.string_of_thm_without_context safes @
-    ["Unsafe rules:"] @ map Display.string_of_thm_without_context unsafes));
+(* declare rules *)
+
+fun add_rule which th context = context |> Pack.map (fn Pack rules =>
+  Pack (rules |> which (fn ths =>
+    if member Thm.eq_thm_prop ths th then
+      let
+        val ctxt = Context.proof_of context;
+        val _ =
+          if Context_Position.is_visible ctxt then
+            warning ("Ignoring duplicate theorems:\n" ^ Display.string_of_thm ctxt th)
+          else ();
+      in ths end
+    else sort_rules (th :: ths))));
+
+val add_safe = Context.proof_map o add_rule apfst;
+val add_unsafe = Context.proof_map o add_rule apsnd;
+
+
+(* attributes *)
+
+val safe_add = Thm.declaration_attribute (add_rule apfst);
+val unsafe_add = Thm.declaration_attribute (add_rule apsnd);
+
+val _ = Theory.setup
+  (Attrib.setup @{binding safe} (Scan.succeed safe_add) "" #>
+   Attrib.setup @{binding unsafe} (Scan.succeed unsafe_add) "");
+
+
+(* proof method syntax *)
+
+fun method tac =
+  Method.sections
+   [Args.$$$ "add" -- Args.bang_colon >> K (I, safe_add),
+    Args.$$$ "add" -- Args.colon >> K (I, unsafe_add)]
+  >> K (SIMPLE_METHOD' o tac);
+
+
+(** tactics **)
+
+val trace = Attrib.setup_config_bool @{binding cla_trace} (K false);
+
 
 (*Returns the list of all formulas in the sequent*)
 fun forms_of_seq (Const(@{const_name "SeqO'"},_) $ P $ u) = P :: forms_of_seq u
@@ -65,11 +125,10 @@
   -- checks that each concl formula looks like some subgoal formula.
   It SHOULD check order as well, using recursion rather than forall/exists*)
 fun could_res (seqp,seqc) =
-      forall (fn Qc => exists (fn Qp => Term.could_unify (Qp,Qc)) 
+      forall (fn Qc => exists (fn Qp => Term.could_unify (Qp,Qc))
                               (forms_of_seq seqp))
              (forms_of_seq seqc);
 
-
 (*Tests whether two sequents or pairs of sequents could be resolved*)
 fun could_resolve_seq (prem,conc) =
   case (prem,conc) of
@@ -102,7 +161,7 @@
   The list of rules is partitioned into 0, 1, 2 premises.
   The resulting tactic, gtac, tries to resolve with rules.
   If successful, it recursively applies nextac to the new subgoals only.
-  Else fails.  (Treatment of goals due to Ph. de Groote) 
+  Else fails.  (Treatment of goals due to Ph. de Groote)
   Bind (RESOLVE_THEN rules) to a variable: it preprocesses the rules. *)
 
 (*Takes rule lists separated in to 0, 1, 2, >2 premises.
@@ -112,7 +171,7 @@
 fun RESOLVE_THEN rules =
   let val [rls0,rls1,rls2] = partition_list has_prems 0 2 rules;
       fun tac nextac i state = state |>
-             (filseq_resolve_tac rls0 9999 i 
+             (filseq_resolve_tac rls0 9999 i
               ORELSE
               (DETERM(filseq_resolve_tac rls1 9999 i) THEN  TRY(nextac i))
               ORELSE
@@ -123,50 +182,54 @@
 
 
 (*repeated resolution applied to the designated goal*)
-fun reresolve_tac rules = 
-  let val restac = RESOLVE_THEN rules;  (*preprocessing done now*)
-      fun gtac i = restac gtac i
-  in  gtac  end; 
+fun reresolve_tac rules =
+  let
+    val restac = RESOLVE_THEN rules;  (*preprocessing done now*)
+    fun gtac i = restac gtac i;
+  in gtac end;
 
 (*tries the safe rules repeatedly before the unsafe rules. *)
-fun repeat_goal_tac (Pack(safes,unsafes)) = 
-  let val restac  =    RESOLVE_THEN safes
-      and lastrestac = RESOLVE_THEN unsafes;
-      fun gtac i = restac gtac i  
-                   ORELSE  (if !trace then
-                                (print_tac "" THEN lastrestac gtac i)
-                            else lastrestac gtac i)
-  in  gtac  end; 
+fun repeat_goal_tac ctxt =
+  let
+    val (safes, unsafes) = get_rules ctxt;
+    val restac = RESOLVE_THEN safes;
+    val lastrestac = RESOLVE_THEN unsafes;
+    fun gtac i =
+      restac gtac i ORELSE
+       (if Config.get ctxt trace then print_tac "" THEN lastrestac gtac i
+        else lastrestac gtac i)
+  in gtac end;
 
 
 (*Tries safe rules only*)
-fun safe_tac (Pack(safes,unsafes)) = reresolve_tac safes;
-
-val safe_goal_tac = safe_tac;   (*backwards compatibility*)
+fun safe_tac ctxt = reresolve_tac (#1 (get_rules ctxt));
 
 (*Tries a safe rule or else a unsafe rule.  Single-step for tracing. *)
-fun step_tac (pack as Pack(safes,unsafes)) =
-    safe_tac pack  ORELSE'
-    filseq_resolve_tac unsafes 9999;
+fun step_tac ctxt =
+  safe_tac ctxt ORELSE' filseq_resolve_tac (#2 (get_rules ctxt)) 9999;
 
 
 (* Tactic for reducing a goal, using Predicate Calculus rules.
    A decision procedure for Propositional Calculus, it is incomplete
-   for Predicate-Calculus because of allL_thin and exR_thin.  
+   for Predicate-Calculus because of allL_thin and exR_thin.
    Fails if it can do nothing.      *)
-fun pc_tac pack = SELECT_GOAL (DEPTH_SOLVE (repeat_goal_tac pack 1));
+fun pc_tac ctxt = SELECT_GOAL (DEPTH_SOLVE (repeat_goal_tac ctxt 1));
 
 
-(*The following two tactics are analogous to those provided by 
+(*The following two tactics are analogous to those provided by
   Provers/classical.  In fact, pc_tac is usually FASTER than fast_tac!*)
-fun fast_tac pack =
-  SELECT_GOAL (DEPTH_SOLVE (step_tac pack 1));
+fun fast_tac ctxt =
+  SELECT_GOAL (DEPTH_SOLVE (step_tac ctxt 1));
+
+fun best_tac ctxt  =
+  SELECT_GOAL (BEST_FIRST (has_fewer_prems 1, size_of_thm) (step_tac ctxt 1));
 
-fun best_tac pack  = 
-  SELECT_GOAL (BEST_FIRST (has_fewer_prems 1, size_of_thm) 
-               (step_tac pack 1));
+val _ = Theory.setup
+  (Method.setup @{binding safe} (method safe_tac) "" #>
+   Method.setup @{binding step} (method step_tac) "" #>
+   Method.setup @{binding pc} (method pc_tac) "" #>
+   Method.setup @{binding fast} (method fast_tac) "" #>
+   Method.setup @{binding best} (method best_tac) "");
 
 end;
 
-
-open Cla;
--- a/src/Sequents/simpdata.ML	Sat Feb 01 00:32:32 2014 +0000
+++ b/src/Sequents/simpdata.ML	Sat Feb 01 17:56:03 2014 +0100
@@ -14,7 +14,7 @@
 fun atomize r =
  case concl_of r of
    Const(@{const_name Trueprop},_) $ Abs(_,_,a) $ Abs(_,_,c) =>
-     (case (forms_of_seq a, forms_of_seq c) of
+     (case (Cla.forms_of_seq a, Cla.forms_of_seq c) of
         ([], [p]) =>
           (case p of
                Const(@{const_name imp},_)$_$_ => atomize(r RS @{thm mp_R})
@@ -28,18 +28,17 @@
  | _ => [r];
 
 (*Make meta-equalities.*)
-fun mk_meta_eq th = case concl_of th of
+fun mk_meta_eq ctxt th = case concl_of th of
     Const("==",_)$_$_           => th
   | Const(@{const_name Trueprop},_) $ Abs(_,_,a) $ Abs(_,_,c) =>
-        (case (forms_of_seq a, forms_of_seq c) of
+        (case (Cla.forms_of_seq a, Cla.forms_of_seq c) of
              ([], [p]) =>
                  (case p of
                       (Const(@{const_name equal},_)$_$_)   => th RS @{thm eq_reflection}
                     | (Const(@{const_name iff},_)$_$_) => th RS @{thm iff_reflection}
                     | (Const(@{const_name Not},_)$_)      => th RS @{thm iff_reflection_F}
                     | _                       => th RS @{thm iff_reflection_T})
-           | _ => error ("addsimps: unable to use theorem\n" ^
-                         Display.string_of_thm_without_context th));
+           | _ => error ("addsimps: unable to use theorem\n" ^ Display.string_of_thm ctxt th));
 
 (*Replace premises x=y, X<->Y by X==Y*)
 fun mk_meta_prems ctxt =
@@ -48,7 +47,7 @@
 
 (*Congruence rules for = or <-> (instead of ==)*)
 fun mk_meta_cong ctxt rl =
-  Drule.zero_var_indexes (mk_meta_eq (mk_meta_prems ctxt rl))
+  Drule.zero_var_indexes (mk_meta_eq ctxt (mk_meta_prems ctxt rl))
     handle THM _ =>
       error("Premises and conclusion of congruence rules must use =-equality or <->");
 
@@ -71,7 +70,7 @@
   setSSolver (mk_solver "safe" safe_solver)
   setSolver (mk_solver "unsafe" unsafe_solver)
   |> Simplifier.set_subgoaler asm_simp_tac
-  |> Simplifier.set_mksimps (K (map mk_meta_eq o atomize o gen_all))
+  |> Simplifier.set_mksimps (fn ctxt => map (mk_meta_eq ctxt) o atomize o gen_all)
   |> Simplifier.set_mkcong mk_meta_cong
   |> simpset_of;