merged
authorbulwahn
Wed, 28 Apr 2010 19:46:09 +0200
changeset 36512 875218f3f97c
parent 36511 db2953f5887a (current diff)
parent 36501 6c7ba330ab42 (diff)
child 36517 0dcd03d521ec
child 36581 bbea7f52e8e1
merged
--- a/src/HOL/Metis_Examples/BT.thy	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Metis_Examples/BT.thy	Wed Apr 28 19:46:09 2010 +0200
@@ -1,5 +1,6 @@
 (*  Title:      HOL/MetisTest/BT.thy
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
+    Author:     Jasmin Blanchette, TU Muenchen
 
 Testing the metis method
 *)
@@ -10,7 +11,6 @@
 imports Main
 begin
 
-
 datatype 'a bt =
     Lf
   | Br 'a  "'a bt"  "'a bt"
@@ -66,178 +66,223 @@
 text {* \medskip BT simplification *}
 
 declare [[ atp_problem_prefix = "BT__n_leaves_reflect" ]]
+
 lemma n_leaves_reflect: "n_leaves (reflect t) = n_leaves t"
-  apply (induct t)
-  apply (metis add_right_cancel n_leaves.simps(1) reflect.simps(1))
-  apply (metis add_commute n_leaves.simps(2) reflect.simps(2))
-  done
+proof (induct t)
+  case Lf thus ?case
+  proof -
+    let "?p\<^isub>1 x\<^isub>1" = "x\<^isub>1 \<noteq> n_leaves (reflect (Lf::'a bt))"
+    have "\<not> ?p\<^isub>1 (Suc 0)" by (metis reflect.simps(1) n_leaves.simps(1))
+    hence "\<not> ?p\<^isub>1 (n_leaves (Lf::'a bt))" by (metis n_leaves.simps(1))
+    thus "n_leaves (reflect (Lf::'a bt)) = n_leaves (Lf::'a bt)" by metis
+  qed
+next
+  case (Br a t1 t2) thus ?case
+    by (metis n_leaves.simps(2) nat_add_commute reflect.simps(2))
+qed
 
 declare [[ atp_problem_prefix = "BT__n_nodes_reflect" ]]
+
 lemma n_nodes_reflect: "n_nodes (reflect t) = n_nodes t"
-  apply (induct t)
-  apply (metis reflect.simps(1))
-  apply (metis n_nodes.simps(2) nat_add_commute reflect.simps(2))
-  done
+proof (induct t)
+  case Lf thus ?case by (metis reflect.simps(1))
+next
+  case (Br a t1 t2) thus ?case
+    by (metis class_semiring.semiring_rules(24) n_nodes.simps(2) reflect.simps(2))
+qed
 
 declare [[ atp_problem_prefix = "BT__depth_reflect" ]]
+
 lemma depth_reflect: "depth (reflect t) = depth t"
-  apply (induct t)
-  apply (metis depth.simps(1) reflect.simps(1))
-  apply (metis depth.simps(2) min_max.sup_commute reflect.simps(2))
-  done
+apply (induct t)
+ apply (metis depth.simps(1) reflect.simps(1))
+by (metis depth.simps(2) min_max.inf_sup_aci(5) reflect.simps(2))
 
 text {*
-  The famous relationship between the numbers of leaves and nodes.
+The famous relationship between the numbers of leaves and nodes.
 *}
 
 declare [[ atp_problem_prefix = "BT__n_leaves_nodes" ]]
+
 lemma n_leaves_nodes: "n_leaves t = Suc (n_nodes t)"
-  apply (induct t)
-  apply (metis n_leaves.simps(1) n_nodes.simps(1))
-  apply auto
-  done
+apply (induct t)
+ apply (metis n_leaves.simps(1) n_nodes.simps(1))
+by auto
 
 declare [[ atp_problem_prefix = "BT__reflect_reflect_ident" ]]
+
 lemma reflect_reflect_ident: "reflect (reflect t) = t"
-  apply (induct t)
-  apply (metis add_right_cancel reflect.simps(1));
-  apply (metis reflect.simps(2))
-  done
+apply (induct t)
+ apply (metis reflect.simps(1))
+proof -
+  fix a :: 'a and t1 :: "'a bt" and t2 :: "'a bt"
+  assume A1: "reflect (reflect t1) = t1"
+  assume A2: "reflect (reflect t2) = t2"
+  have "\<And>V U. reflect (Br U V (reflect t1)) = Br U t1 (reflect V)"
+    using A1 by (metis reflect.simps(2))
+  hence "\<And>V U. Br U t1 (reflect (reflect V)) = reflect (reflect (Br U t1 V))"
+    by (metis reflect.simps(2))
+  hence "\<And>U. reflect (reflect (Br U t1 t2)) = Br U t1 t2"
+    using A2 by metis
+  thus "reflect (reflect (Br a t1 t2)) = Br a t1 t2" by blast
+qed
 
 declare [[ atp_problem_prefix = "BT__bt_map_ident" ]]
+
 lemma bt_map_ident: "bt_map (%x. x) = (%y. y)"
 apply (rule ext) 
 apply (induct_tac y)
-  apply (metis bt_map.simps(1))
-txt{*BUG involving flex-flex pairs*}
-(*  apply (metis bt_map.simps(2)) *)
-apply auto
-done
-
+ apply (metis bt_map.simps(1))
+by (metis COMBI_def bt_map.simps(2))
 
 declare [[ atp_problem_prefix = "BT__bt_map_appnd" ]]
+
 lemma bt_map_appnd: "bt_map f (appnd t u) = appnd (bt_map f t) (bt_map f u)"
 apply (induct t)
-  apply (metis appnd.simps(1) bt_map.simps(1))
-  apply (metis appnd.simps(2) bt_map.simps(2))  (*slow!!*)
-done
-
+ apply (metis appnd.simps(1) bt_map.simps(1))
+by (metis appnd.simps(2) bt_map.simps(2))
 
 declare [[ atp_problem_prefix = "BT__bt_map_compose" ]]
+
 lemma bt_map_compose: "bt_map (f o g) t = bt_map f (bt_map g t)"
-apply (induct t) 
-  apply (metis bt_map.simps(1))
-txt{*Metis runs forever*}
-(*  apply (metis bt_map.simps(2) o_apply)*)
-apply auto
-done
-
+apply (induct t)
+ apply (metis bt_map.simps(1))
+by (metis bt_map.simps(2) o_eq_dest_lhs)
 
 declare [[ atp_problem_prefix = "BT__bt_map_reflect" ]]
+
 lemma bt_map_reflect: "bt_map f (reflect t) = reflect (bt_map f t)"
-  apply (induct t)
-  apply (metis add_right_cancel bt_map.simps(1) reflect.simps(1))
-  apply (metis add_right_cancel bt_map.simps(2) reflect.simps(2))
-  done
+apply (induct t)
+ apply (metis bt_map.simps(1) reflect.simps(1))
+by (metis bt_map.simps(2) reflect.simps(2))
 
 declare [[ atp_problem_prefix = "BT__preorder_bt_map" ]]
+
 lemma preorder_bt_map: "preorder (bt_map f t) = map f (preorder t)"
-  apply (induct t)
-  apply (metis bt_map.simps(1) map.simps(1) preorder.simps(1))
-   apply simp
-  done
+apply (induct t)
+ apply (metis bt_map.simps(1) map.simps(1) preorder.simps(1))
+by simp
 
 declare [[ atp_problem_prefix = "BT__inorder_bt_map" ]]
+
 lemma inorder_bt_map: "inorder (bt_map f t) = map f (inorder t)"
-  apply (induct t)
-  apply (metis bt_map.simps(1) inorder.simps(1) map.simps(1))
-  apply simp
-  done
+proof (induct t)
+  case Lf thus ?case
+  proof -
+    have "map f [] = []" by (metis map.simps(1))
+    hence "map f [] = inorder Lf" by (metis inorder.simps(1))
+    hence "inorder (bt_map f Lf) = map f []" by (metis bt_map.simps(1))
+    thus "inorder (bt_map f Lf) = map f (inorder Lf)" by (metis inorder.simps(1))
+  qed
+next
+  case (Br a t1 t2) thus ?case by simp
+qed
 
 declare [[ atp_problem_prefix = "BT__postorder_bt_map" ]]
+
 lemma postorder_bt_map: "postorder (bt_map f t) = map f (postorder t)"
-  apply (induct t)
-  apply (metis bt_map.simps(1) map.simps(1) postorder.simps(1))
-   apply simp
-  done
+apply (induct t)
+ apply (metis Nil_is_map_conv bt_map.simps(1) postorder.simps(1))
+by simp
 
 declare [[ atp_problem_prefix = "BT__depth_bt_map" ]]
+
 lemma depth_bt_map [simp]: "depth (bt_map f t) = depth t"
-  apply (induct t)
-  apply (metis bt_map.simps(1) depth.simps(1))
-   apply simp
-  done
+apply (induct t)
+ apply (metis bt_map.simps(1) depth.simps(1))
+by simp
 
 declare [[ atp_problem_prefix = "BT__n_leaves_bt_map" ]]
+
 lemma n_leaves_bt_map [simp]: "n_leaves (bt_map f t) = n_leaves t"
-  apply (induct t)
-  apply (metis One_nat_def Suc_eq_plus1 bt_map.simps(1) less_add_one less_antisym linorder_neq_iff n_leaves.simps(1))
-  apply (metis bt_map.simps(2) n_leaves.simps(2))
-  done
-
+apply (induct t)
+ apply (metis bt_map.simps(1) n_leaves.simps(1))
+proof -
+  fix a :: 'b and t1 :: "'b bt" and t2 :: "'b bt"
+  assume A1: "n_leaves (bt_map f t1) = n_leaves t1"
+  assume A2: "n_leaves (bt_map f t2) = n_leaves t2"
+  have "\<And>V U. n_leaves (Br U (bt_map f t1) V) = n_leaves t1 + n_leaves V"
+    using A1 by (metis n_leaves.simps(2))
+  hence "\<And>V U. n_leaves (bt_map f (Br U t1 V)) = n_leaves t1 + n_leaves (bt_map f V)"
+    by (metis bt_map.simps(2))
+  hence F1: "\<And>U. n_leaves (bt_map f (Br U t1 t2)) = n_leaves t1 + n_leaves t2"
+    using A2 by metis
+  have "n_leaves t1 + n_leaves t2 = n_leaves (Br a t1 t2)"
+    by (metis n_leaves.simps(2))
+  thus "n_leaves (bt_map f (Br a t1 t2)) = n_leaves (Br a t1 t2)"
+    using F1 by metis
+qed
 
 declare [[ atp_problem_prefix = "BT__preorder_reflect" ]]
+
 lemma preorder_reflect: "preorder (reflect t) = rev (postorder t)"
-  apply (induct t)
-  apply (metis postorder.simps(1) preorder.simps(1) reflect.simps(1) rev_is_Nil_conv)
-  apply (metis append_Nil Cons_eq_append_conv postorder.simps(2) preorder.simps(2) reflect.simps(2) rev.simps(2) rev_append rev_rev_ident)
-  done
+apply (induct t)
+ apply (metis Nil_is_rev_conv postorder.simps(1) preorder.simps(1)
+              reflect.simps(1))
+by (metis append.simps(1) append.simps(2) postorder.simps(2) preorder.simps(2)
+          reflect.simps(2) rev.simps(2) rev_append rev_swap)
 
 declare [[ atp_problem_prefix = "BT__inorder_reflect" ]]
+
 lemma inorder_reflect: "inorder (reflect t) = rev (inorder t)"
-  apply (induct t)
-  apply (metis inorder.simps(1) reflect.simps(1) rev.simps(1))
-  apply simp
-  done
+apply (induct t)
+ apply (metis Nil_is_rev_conv inorder.simps(1) reflect.simps(1))
+by simp
+(* Slow:
+by (metis append.simps(1) append_eq_append_conv2 inorder.simps(2)
+          reflect.simps(2) rev.simps(2) rev_append)
+*)
 
 declare [[ atp_problem_prefix = "BT__postorder_reflect" ]]
+
 lemma postorder_reflect: "postorder (reflect t) = rev (preorder t)"
-  apply (induct t)
-  apply (metis postorder.simps(1) preorder.simps(1) reflect.simps(1) rev.simps(1))
-  apply (metis Cons_eq_appendI postorder.simps(2) preorder.simps(2) reflect.simps(2) rev.simps(2) rev_append self_append_conv2)
-  done
+apply (induct t)
+ apply (metis Nil_is_rev_conv postorder.simps(1) preorder.simps(1)
+              reflect.simps(1))
+by (metis preorder_reflect reflect_reflect_ident rev_swap)
 
 text {*
- Analogues of the standard properties of the append function for lists.
+Analogues of the standard properties of the append function for lists.
 *}
 
 declare [[ atp_problem_prefix = "BT__appnd_assoc" ]]
-lemma appnd_assoc [simp]:
-     "appnd (appnd t1 t2) t3 = appnd t1 (appnd t2 t3)"
-  apply (induct t1)
-  apply (metis appnd.simps(1))
-  apply (metis appnd.simps(2))
-  done
+
+lemma appnd_assoc [simp]: "appnd (appnd t1 t2) t3 = appnd t1 (appnd t2 t3)"
+apply (induct t1)
+ apply (metis appnd.simps(1))
+by (metis appnd.simps(2))
 
 declare [[ atp_problem_prefix = "BT__appnd_Lf2" ]]
+
 lemma appnd_Lf2 [simp]: "appnd t Lf = t"
-  apply (induct t)
-  apply (metis appnd.simps(1))
-  apply (metis appnd.simps(2))
-  done
+apply (induct t)
+ apply (metis appnd.simps(1))
+by (metis appnd.simps(2))
+
+declare max_add_distrib_left [simp]
 
 declare [[ atp_problem_prefix = "BT__depth_appnd" ]]
-  declare max_add_distrib_left [simp]
+
 lemma depth_appnd [simp]: "depth (appnd t1 t2) = depth t1 + depth t2"
-  apply (induct t1)
-  apply (metis add_0 appnd.simps(1) depth.simps(1))
-apply (simp add: ); 
-  done
+apply (induct t1)
+ apply (metis appnd.simps(1) depth.simps(1) plus_nat.simps(1))
+by simp
 
 declare [[ atp_problem_prefix = "BT__n_leaves_appnd" ]]
+
 lemma n_leaves_appnd [simp]:
      "n_leaves (appnd t1 t2) = n_leaves t1 * n_leaves t2"
-  apply (induct t1)
-  apply (metis One_nat_def appnd.simps(1) less_irrefl less_linear n_leaves.simps(1) nat_mult_1) 
-  apply (simp add: left_distrib)
-  done
+apply (induct t1)
+ apply (metis appnd.simps(1) n_leaves.simps(1) nat_mult_1 plus_nat.simps(1)
+              semiring_norm(111))
+by (simp add: left_distrib)
 
 declare [[ atp_problem_prefix = "BT__bt_map_appnd" ]]
+
 lemma (*bt_map_appnd:*)
      "bt_map f (appnd t1 t2) = appnd (bt_map f t1) (bt_map f t2)"
-  apply (induct t1)
-  apply (metis appnd.simps(1) bt_map_appnd)
-  apply (metis bt_map_appnd)
-  done
+apply (induct t1)
+ apply (metis appnd.simps(1) bt_map.simps(1))
+by (metis bt_map_appnd)
 
 end
--- a/src/HOL/Metis_Examples/BigO.thy	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Metis_Examples/BigO.thy	Wed Apr 28 19:46:09 2010 +0200
@@ -206,8 +206,6 @@
 qed
 
 
-sledgehammer_params [sorts]
-
 lemma bigo_alt_def: "O(f) = 
     {h. EX c. (0 < c & (ALL x. abs (h x) <= c * abs (f x)))}"
 by (auto simp add: bigo_def bigo_pos_const)
--- a/src/HOL/Metis_Examples/TransClosure.thy	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Metis_Examples/TransClosure.thy	Wed Apr 28 19:46:09 2010 +0200
@@ -17,45 +17,45 @@
   | Intg int    -- "integer value" 
   | Addr addr   -- "addresses of objects in the heap"
 
-consts R::"(addr \<times> addr) set"
-
-consts f:: "addr \<Rightarrow> val"
+consts R :: "(addr \<times> addr) set"
 
-declare [[ atp_problem_prefix = "TransClosure__test" ]]
-lemma "\<lbrakk> f c = Intg x; \<forall> y. f b = Intg y \<longrightarrow> y \<noteq> x; (a,b) \<in> R\<^sup>*; (b,c) \<in> R\<^sup>* \<rbrakk> 
-   \<Longrightarrow> \<exists> c. (b,c) \<in> R \<and> (a,c) \<in> R\<^sup>*"  
-by (metis Transitive_Closure.rtrancl_into_rtrancl converse_rtranclE trancl_reflcl)
+consts f :: "addr \<Rightarrow> val"
 
-lemma "\<lbrakk> f c = Intg x; \<forall> y. f b = Intg y \<longrightarrow> y \<noteq> x; (a,b) \<in> R\<^sup>*; (b,c) \<in> R\<^sup>* \<rbrakk> 
-   \<Longrightarrow> \<exists> c. (b,c) \<in> R \<and> (a,c) \<in> R\<^sup>*"
-proof (neg_clausify)
-assume 0: "f c = Intg x"
-assume 1: "(a, b) \<in> R\<^sup>*"
-assume 2: "(b, c) \<in> R\<^sup>*"
-assume 3: "f b \<noteq> Intg x"
-assume 4: "\<And>A. (b, A) \<notin> R \<or> (a, A) \<notin> R\<^sup>*"
-have 5: "b = c \<or> b \<in> Domain R"
-  by (metis Not_Domain_rtrancl 2)
-have 6: "\<And>X1. (a, X1) \<in> R\<^sup>* \<or> (b, X1) \<notin> R"
-  by (metis Transitive_Closure.rtrancl_into_rtrancl 1)
-have 7: "\<And>X1. (b, X1) \<notin> R"
-  by (metis 6 4)
-have 8: "b \<notin> Domain R"
-  by (metis 7 DomainE)
-have 9: "c = b"
-  by (metis 5 8)
-have 10: "f b = Intg x"
-  by (metis 0 9)
-show "False"
-  by (metis 10 3)
+lemma "\<lbrakk>f c = Intg x; \<forall>y. f b = Intg y \<longrightarrow> y \<noteq> x; (a, b) \<in> R\<^sup>*; (b, c) \<in> R\<^sup>*\<rbrakk>
+       \<Longrightarrow> \<exists>c. (b, c) \<in> R \<and> (a, c) \<in> R\<^sup>*"
+(* sledgehammer *)
+proof -
+  assume A1: "f c = Intg x"
+  assume A2: "\<forall>y. f b = Intg y \<longrightarrow> y \<noteq> x"
+  assume A3: "(a, b) \<in> R\<^sup>*"
+  assume A4: "(b, c) \<in> R\<^sup>*"
+  have F1: "f c \<noteq> f b" using A2 A1 by metis
+  have F2: "\<forall>u. (b, u) \<in> R \<longrightarrow> (a, u) \<in> R\<^sup>*" using A3 by (metis transitive_closure_trans(6))
+  have F3: "\<exists>x. (b, x R b c) \<in> R \<or> c = b" using A4 by (metis converse_rtranclE)
+  have "c \<noteq> b" using F1 by metis
+  hence "\<exists>u. (b, u) \<in> R" using F3 by metis
+  thus "\<exists>c. (b, c) \<in> R \<and> (a, c) \<in> R\<^sup>*" using F2 by metis
 qed
 
-declare [[ atp_problem_prefix = "TransClosure__test_simpler" ]]
-lemma "\<lbrakk> f c = Intg x; \<forall> y. f b = Intg y \<longrightarrow> y \<noteq> x; (a,b) \<in> R\<^sup>*; (b,c) \<in> R\<^sup>* \<rbrakk> 
-   \<Longrightarrow> \<exists> c. (b,c) \<in> R \<and> (a,c) \<in> R\<^sup>*"
-apply (erule_tac x="b" in converse_rtranclE)
-apply (metis rel_pow_0_E rel_pow_0_I)
-apply (metis DomainE Domain_iff Transitive_Closure.rtrancl_into_rtrancl)
-done
+lemma "\<lbrakk>f c = Intg x; \<forall>y. f b = Intg y \<longrightarrow> y \<noteq> x; (a, b) \<in> R\<^sup>*; (b,c) \<in> R\<^sup>*\<rbrakk>
+       \<Longrightarrow> \<exists>c. (b, c) \<in> R \<and> (a, c) \<in> R\<^sup>*"
+(* sledgehammer [isar_proof, shrink_factor = 2] *)
+proof -
+  assume A1: "f c = Intg x"
+  assume A2: "\<forall>y. f b = Intg y \<longrightarrow> y \<noteq> x"
+  assume A3: "(a, b) \<in> R\<^sup>*"
+  assume A4: "(b, c) \<in> R\<^sup>*"
+  have "(R\<^sup>*) (a, b)" using A3 by (metis mem_def)
+  hence F1: "(a, b) \<in> R\<^sup>*" by (metis mem_def)
+  have "b \<noteq> c" using A1 A2 by metis
+  hence "\<exists>x\<^isub>1. (b, x\<^isub>1) \<in> R" using A4 by (metis converse_rtranclE)
+  thus "\<exists>c. (b, c) \<in> R \<and> (a, c) \<in> R\<^sup>*" using F1 by (metis transitive_closure_trans(6))
+qed
+
+lemma "\<lbrakk>f c = Intg x; \<forall>y. f b = Intg y \<longrightarrow> y \<noteq> x; (a, b) \<in> R\<^sup>*; (b, c) \<in> R\<^sup>*\<rbrakk> 
+       \<Longrightarrow> \<exists>c. (b, c) \<in> R \<and> (a, c) \<in> R\<^sup>*"
+apply (erule_tac x = b in converse_rtranclE)
+ apply metis
+by (metis transitive_closure_trans(6))
 
 end
--- a/src/HOL/Mirabelle/Tools/mirabelle_sledgehammer.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Mirabelle/Tools/mirabelle_sledgehammer.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -377,6 +377,8 @@
 end
 
 
+val subgoal_count = Logic.count_prems o prop_of o #goal o Proof.goal
+
 fun run_minimize args named_thms id ({pre=st, log, ...}: Mirabelle.run_args) =
   let
     open Sledgehammer_Fact_Minimizer
@@ -390,7 +392,7 @@
       |> the_default 5
     val params = default_params thy
       [("atps", prover_name), ("minimize_timeout", Int.toString timeout)]
-    val minimize = minimize_theorems params 1
+    val minimize = minimize_theorems params 1 (subgoal_count st)
     val _ = log separator
   in
     case minimize st (these (!named_thms)) of
--- a/src/HOL/Tools/ATP_Manager/atp_manager.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Tools/ATP_Manager/atp_manager.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -22,11 +22,9 @@
      relevance_threshold: real,
      convergence: real,
      theory_relevant: bool option,
-     higher_order: bool option,
      follow_defs: bool,
      isar_proof: bool,
      shrink_factor: int,
-     sorts: bool,
      timeout: Time.time,
      minimize_timeout: Time.time}
   type problem =
@@ -59,7 +57,7 @@
   val get_prover: theory -> string -> prover
   val available_atps: theory -> unit
   val start_prover_thread:
-    params -> Time.time -> Time.time -> int -> relevance_override
+    params -> Time.time -> Time.time -> int -> int -> relevance_override
     -> (string -> minimize_command) -> Proof.state -> string -> unit
 end;
 
@@ -83,11 +81,9 @@
    relevance_threshold: real,
    convergence: real,
    theory_relevant: bool option,
-   higher_order: bool option,
    follow_defs: bool,
    isar_proof: bool,
    shrink_factor: int,
-   sorts: bool,
    timeout: Time.time,
    minimize_timeout: Time.time}
 
@@ -268,6 +264,8 @@
     let
       val killing = map (fn (th, (_, _, desc)) => (th, (Time.now (), desc))) active;
       val state' = make_state manager timeout_heap [] (killing @ cancelling) messages store;
+      val _ = if null active then ()
+              else priority "Killed active Sledgehammer threads."
     in state' end);
 
 
@@ -338,12 +336,11 @@
 
 (* start prover thread *)
 
-fun start_prover_thread (params as {timeout, ...}) birth_time death_time i
+fun start_prover_thread (params as {timeout, ...}) birth_time death_time i n
                         relevance_override minimize_command proof_state name =
   let
     val prover = get_prover (Proof.theory_of proof_state) name
     val {context = ctxt, facts, goal} = Proof.goal proof_state;
-    val n = Logic.count_prems (prop_of goal)
     val desc =
       "ATP " ^ quote name ^ " for subgoal " ^ string_of_int i ^ ":\n" ^
       Syntax.string_of_term ctxt (Thm.term_of (Thm.cprem_of goal i));
--- a/src/HOL/Tools/ATP_Manager/atp_systems.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Tools/ATP_Manager/atp_systems.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -110,8 +110,8 @@
 
 fun generic_prover overlord get_facts prepare write_file home executable args
         proof_delims known_failures name
-        ({debug, full_types, explicit_apply, isar_proof, shrink_factor, sorts,
-         ...} : params) minimize_command
+        ({debug, full_types, explicit_apply, isar_proof, shrink_factor, ...}
+         : params) minimize_command
         ({subgoal, goal, relevance_override, axiom_clauses, filtered_clauses}
          : problem) =
   let
@@ -161,8 +161,7 @@
                             File.shell_path probfile, "2>&1"]) ^
       (if overlord then
          " | sed 's/,/, /g' \
-         \| sed 's/\\([^!=]\\)\\([=|]\\)\\([^=]\\)/\\1 \\2 \\3/g' \
-         \| sed 's/! =/ !=/g' \
+         \| sed 's/\\([^!=<]\\)\\([=|]\\)\\([^=>]\\)/\\1 \\2 \\3/g' \
          \| sed 's/  / /g' | sed 's/| |/||/g' \
          \| sed 's/ = = =/===/g' \
          \| sed 's/= = /== /g'"
@@ -214,7 +213,7 @@
       case outcome of
         NONE =>
         proof_text isar_proof
-                   (pool, debug, shrink_factor, sorts, ctxt, conjecture_shape)
+                   (pool, debug, shrink_factor, ctxt, conjecture_shape)
                    (minimize_command, proof, internal_thm_names, th, subgoal)
       | SOME failure => (string_for_failure failure ^ "\n", [])
   in
@@ -233,14 +232,13 @@
         (name, {home, executable, arguments, proof_delims, known_failures,
                 max_axiom_clauses, prefers_theory_relevant})
         (params as {debug, overlord, respect_no_atp, relevance_threshold,
-                    convergence, theory_relevant, higher_order, follow_defs,
-                    isar_proof, ...})
+                    convergence, theory_relevant, follow_defs, isar_proof, ...})
         minimize_command timeout =
   generic_prover overlord
       (get_relevant_facts respect_no_atp relevance_threshold convergence
-                          higher_order follow_defs max_axiom_clauses
+                          follow_defs max_axiom_clauses
                           (the_default prefers_theory_relevant theory_relevant))
-      (prepare_clauses higher_order false)
+      (prepare_clauses false)
       (write_tptp_file (debug andalso overlord)) home
       executable (arguments timeout) proof_delims known_failures name params
       minimize_command
@@ -305,13 +303,13 @@
         (name, {home, executable, arguments, proof_delims, known_failures,
                 max_axiom_clauses, prefers_theory_relevant})
         (params as {overlord, respect_no_atp, relevance_threshold, convergence,
-                    theory_relevant, higher_order, follow_defs, ...})
+                    theory_relevant, follow_defs, ...})
         minimize_command timeout =
   generic_prover overlord
       (get_relevant_facts respect_no_atp relevance_threshold convergence
-                          higher_order follow_defs max_axiom_clauses
+                          follow_defs max_axiom_clauses
                           (the_default prefers_theory_relevant theory_relevant))
-      (prepare_clauses higher_order true) write_dfg_file home executable
+      (prepare_clauses true) write_dfg_file home executable
       (arguments timeout) proof_delims known_failures name params
       minimize_command
 
--- a/src/HOL/Tools/Nitpick/nitpick_util.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Tools/Nitpick/nitpick_util.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -248,20 +248,11 @@
 
 val pstrs = Pretty.breaks o map Pretty.str o space_explode " "
 
-fun plain_string_from_xml_tree t =
-  Buffer.empty |> XML.add_content t |> Buffer.content
-val unyxml = plain_string_from_xml_tree o YXML.parse
-
-val is_long_identifier = forall Syntax.is_identifier o space_explode "."
-fun maybe_quote y =
-  let val s = unyxml y in
-    y |> ((not (is_long_identifier (perhaps (try (unprefix "'")) s)) andalso
-           not (is_long_identifier (perhaps (try (unprefix "?")) s))) orelse
-           OuterKeyword.is_keyword s) ? quote
-  end
+val unyxml = Sledgehammer_Util.unyxml
+val maybe_quote = Sledgehammer_Util.maybe_quote
 
 (* This hash function is recommended in Compilers: Principles, Techniques, and
-   Tools, by Aho, Sethi and Ullman. The hashpjw function, which they
+   Tools, by Aho, Sethi, and Ullman. The "hashpjw" function, which they
    particularly recommend, triggers a bug in versions of Poly/ML up to 4.2.0. *)
 fun hashw (u, w) = Word.+ (u, Word.* (0w65599, w))
 fun hashw_char (c, w) = hashw (Word.fromInt (Char.ord c), w)
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_fact_filter.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_fact_filter.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -10,6 +10,7 @@
   type axiom_name = Sledgehammer_HOL_Clause.axiom_name
   type hol_clause = Sledgehammer_HOL_Clause.hol_clause
   type hol_clause_id = Sledgehammer_HOL_Clause.hol_clause_id
+
   type relevance_override =
     {add: Facts.ref list,
      del: Facts.ref list,
@@ -19,15 +20,15 @@
   val tfree_classes_of_terms : term list -> string list
   val type_consts_of_terms : theory -> term list -> string list
   val get_relevant_facts :
-    bool -> real -> real -> bool option -> bool -> int -> bool
-    -> relevance_override -> Proof.context * (thm list * 'a) -> thm list
+    bool -> real -> real -> bool -> int -> bool -> relevance_override
+    -> Proof.context * (thm list * 'a) -> thm list
     -> (thm * (string * int)) list
-  val prepare_clauses : bool option -> bool -> thm list -> thm list ->
-    (thm * (axiom_name * hol_clause_id)) list ->
-    (thm * (axiom_name * hol_clause_id)) list -> theory ->
-    axiom_name vector *
-      (hol_clause list * hol_clause list * hol_clause list *
-      hol_clause list * classrel_clause list * arity_clause list)
+  val prepare_clauses :
+    bool -> thm list -> thm list -> (thm * (axiom_name * hol_clause_id)) list
+    -> (thm * (axiom_name * hol_clause_id)) list -> theory
+    -> axiom_name vector
+       * (hol_clause list * hol_clause list * hol_clause list *
+          hol_clause list * classrel_clause list * arity_clause list)
 end;
 
 structure Sledgehammer_Fact_Filter : SLEDGEHAMMER_FACT_FILTER =
@@ -500,13 +501,10 @@
   likely to lead to unsound proofs.*)
 fun remove_unwanted_clauses cls = filter (not o unwanted o prop_of o fst) cls;
 
-fun is_first_order thy higher_order goal_cls =
-  case higher_order of
-    NONE => forall (Meson.is_fol_term thy) (map prop_of goal_cls)
-  | SOME b => not b
+fun is_first_order thy = forall (Meson.is_fol_term thy) o map prop_of
 
 fun get_relevant_facts respect_no_atp relevance_threshold convergence
-                       higher_order follow_defs max_new theory_relevant
+                       follow_defs max_new theory_relevant
                        (relevance_override as {add, only, ...})
                        (ctxt, (chain_ths, th)) goal_cls =
   if (only andalso null add) orelse relevance_threshold > 1.0 then
@@ -514,7 +512,7 @@
   else
     let
       val thy = ProofContext.theory_of ctxt
-      val is_FO = is_first_order thy higher_order goal_cls
+      val is_FO = is_first_order thy goal_cls
       val included_cls = get_all_lemmas respect_no_atp ctxt
         |> cnf_rules_pairs thy |> make_unique
         |> restrict_to_logic thy is_FO
@@ -527,7 +525,7 @@
 
 (* prepare for passing to writer,
    create additional clauses based on the information from extra_cls *)
-fun prepare_clauses higher_order dfg goal_cls chain_ths axcls extra_cls thy =
+fun prepare_clauses dfg goal_cls chain_ths axcls extra_cls thy =
   let
     (* add chain thms *)
     val chain_cls =
@@ -535,7 +533,7 @@
                                   (map (`Thm.get_name_hint) chain_ths))
     val axcls = chain_cls @ axcls
     val extra_cls = chain_cls @ extra_cls
-    val is_FO = is_first_order thy higher_order goal_cls
+    val is_FO = is_first_order thy goal_cls
     val ccls = subtract_cls extra_cls goal_cls
     val _ = app (fn th => trace_msg (fn _ => Display.string_of_thm_global thy th)) ccls
     val ccltms = map prop_of ccls
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_fact_minimizer.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_fact_minimizer.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -11,7 +11,7 @@
   type prover_result = ATP_Manager.prover_result
 
   val minimize_theorems :
-    params -> int -> Proof.state -> (string * thm list) list
+    params -> int -> int -> Proof.state -> (string * thm list) list
     -> (string * thm list) list option * string
 end;
 
@@ -68,15 +68,14 @@
 (* minimalization of thms *)
 
 fun minimize_theorems (params as {debug, atps, minimize_timeout, isar_proof,
-                                  shrink_factor, sorts, ...})
-                      i state name_thms_pairs =
+                                  shrink_factor, ...})
+                      i n state name_thms_pairs =
   let
     val thy = Proof.theory_of state
     val prover = case atps of
                    [atp_name] => get_prover thy atp_name
                  | _ => error "Expected a single ATP."
     val msecs = Time.toMilliseconds minimize_timeout
-    val n = length name_thms_pairs
     val _ =
       priority ("Sledgehammer minimizer: ATP " ^ quote (the_single atps) ^
                 " with a time limit of " ^ string_of_int msecs ^ " ms.")
@@ -88,7 +87,6 @@
       | _ => NONE
 
     val {context = ctxt, facts, goal} = Proof.goal state;
-    val n = Logic.count_prems (prop_of goal)
   in
     (* try prove first to check result and get used theorems *)
     (case test_thms_fun NONE name_thms_pairs of
@@ -105,14 +103,13 @@
           val (min_thms, {proof, internal_thm_names, ...}) =
             linear_minimize (test_thms (SOME filtered_clauses)) to_use
                             ([], result)
-          val n = length min_thms
+          val m = length min_thms
           val _ = priority (cat_lines
-            ["Minimized: " ^ string_of_int n ^ " theorem" ^ plural_s n] ^ ".")
+            ["Minimized: " ^ string_of_int m ^ " theorem" ^ plural_s m] ^ ".")
         in
           (SOME min_thms,
            proof_text isar_proof
-                      (pool, debug, shrink_factor, sorts, ctxt,
-                       conjecture_shape)
+                      (pool, debug, shrink_factor, ctxt, conjecture_shape)
                       (K "", proof, internal_thm_names, goal, i) |> fst)
         end
     | {outcome = SOME TimedOut, ...} =>
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_fact_preprocessor.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_fact_preprocessor.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -10,6 +10,7 @@
   val trace: bool Unsynchronized.ref
   val trace_msg: (unit -> string) -> unit
   val skolem_prefix: string
+  val skolem_infix: string
   val cnf_axiom: theory -> thm -> thm list
   val multi_base_blacklist: string list
   val bad_for_atp: thm -> bool
@@ -17,7 +18,7 @@
   val cnf_rules_pairs: theory -> (string * thm) list -> (thm * (string * int)) list
   val suppress_endtheory: bool Unsynchronized.ref
     (*for emergency use where endtheory causes problems*)
-  val strip_subgoal : thm -> int -> term list * term list * term
+  val strip_subgoal : thm -> int -> (string * typ) list * term list * term
   val neg_clausify: thm -> thm list
   val neg_conjecture_clauses:
     Proof.context -> thm -> int -> thm list list * (string * typ) list
@@ -34,6 +35,7 @@
 fun trace_msg msg = if !trace then tracing (msg ()) else ();
 
 val skolem_prefix = "sko_"
+val skolem_infix = "$"
 
 fun freeze_thm th = #1 (Drule.legacy_freeze_thaw th);
 
@@ -65,6 +67,13 @@
 
 (**** SKOLEMIZATION BY INFERENCE (lcp) ****)
 
+(*Keep the full complexity of the original name*)
+fun flatten_name s = space_implode "_X" (Long_Name.explode s);
+
+fun skolem_name thm_name nref var_name =
+  skolem_prefix ^ thm_name ^ "_" ^ Int.toString (Unsynchronized.inc nref) ^
+  skolem_infix ^ (if var_name = "" then "g" else flatten_name var_name)
+
 fun rhs_extra_types lhsT rhs =
   let val lhs_vars = Term.add_tfreesT lhsT []
       fun add_new_TFrees (TFree v) =
@@ -78,10 +87,10 @@
 fun declare_skofuns s th =
   let
     val nref = Unsynchronized.ref 0    (* FIXME ??? *)
-    fun dec_sko (Const (@{const_name Ex}, _) $ (xtp as Abs (_, T, p))) (axs, thy) =
+    fun dec_sko (Const (@{const_name Ex}, _) $ (xtp as Abs (s', T, p))) (axs, thy) =
           (*Existential: declare a Skolem function, then insert into body and continue*)
           let
-            val cname = skolem_prefix ^ s ^ "_" ^ Int.toString (Unsynchronized.inc nref)
+            val cname = skolem_name s nref s'
             val args0 = OldTerm.term_frees xtp  (*get the formal parameter list*)
             val Ts = map type_of args0
             val extraTs = rhs_extra_types (Ts ---> T) xtp
@@ -110,13 +119,13 @@
 (*Traverse a theorem, accumulating Skolem function definitions.*)
 fun assume_skofuns s th =
   let val sko_count = Unsynchronized.ref 0   (* FIXME ??? *)
-      fun dec_sko (Const (@{const_name Ex}, _) $ (xtp as Abs(_,T,p))) defs =
+      fun dec_sko (Const (@{const_name Ex}, _) $ (xtp as Abs (s', T, p))) defs =
             (*Existential: declare a Skolem function, then insert into body and continue*)
             let val skos = map (#1 o Logic.dest_equals) defs  (*existing sko fns*)
                 val args = subtract (op =) skos (OldTerm.term_frees xtp) (*the formal parameters*)
                 val Ts = map type_of args
                 val cT = Ts ---> T
-                val id = skolem_prefix ^ s ^ "_" ^ Int.toString (Unsynchronized.inc sko_count)
+                val id = skolem_name s sko_count s'
                 val c = Free (id, cT)
                 val rhs = list_abs_free (map dest_Free args,
                                          HOLogic.choice_const T $ xtp)
@@ -337,9 +346,6 @@
   ["defs", "select_defs", "update_defs", "induct", "inducts", "split", "splits",
    "split_asm", "cases", "ext_cases"];
 
-(*Keep the full complexity of the original name*)
-fun flatten_name s = space_implode "_X" (Long_Name.explode s);
-
 fun fake_name th =
   if Thm.has_name_hint th then flatten_name (Thm.get_name_hint th)
   else gensym "unknown_thm_";
@@ -463,7 +469,7 @@
     val (t, frees) = Logic.goal_params (prop_of goal) i
     val hyp_ts = t |> Logic.strip_assums_hyp |> map (curry subst_bounds frees)
     val concl_t = t |> Logic.strip_assums_concl |> curry subst_bounds frees
-  in (rev frees, hyp_ts, concl_t) end
+  in (rev (map dest_Free frees), hyp_ts, concl_t) end
 
 (*** Converting a subgoal into negated conjecture clauses. ***)
 
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_fol_clause.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_fol_clause.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -107,10 +107,10 @@
 
 fun union_all xss = fold (union (op =)) xss []
 
-(* Provide readable names for the more common symbolic functions *)
+(* Readable names for the more common symbolic functions. Do not mess with the
+   last six entries of the table unless you know what you are doing. *)
 val const_trans_table =
   Symtab.make [(@{const_name "op ="}, "equal"),
-               (@{const_name Orderings.less_eq}, "lessequals"),
                (@{const_name "op &"}, "and"),
                (@{const_name "op |"}, "or"),
                (@{const_name "op -->"}, "implies"),
@@ -120,10 +120,11 @@
                (@{const_name COMBK}, "COMBK"),
                (@{const_name COMBB}, "COMBB"),
                (@{const_name COMBC}, "COMBC"),
-               (@{const_name COMBS}, "COMBS")];
+               (@{const_name COMBS}, "COMBS")]
 
 val type_const_trans_table =
-  Symtab.make [("*", "prod"), ("+", "sum"), ("~=>", "map")];
+  Symtab.make [(@{type_name "*"}, "prod"),
+               (@{type_name "+"}, "sum")]
 
 (*Escaping of special characters.
   Alphanumeric characters are left unchanged.
@@ -191,7 +192,8 @@
       tvar_prefix ^ (ascii_of_indexname (trim_type_var x,i));
 fun make_fixed_type_var x = tfree_prefix ^ (ascii_of (trim_type_var x));
 
-val max_dfg_symbol_length = 63
+val max_dfg_symbol_length =
+  if is_new_spass_version then 1000000 (* arbitrary large number *) else 63
 
 (* HACK because SPASS 3.0 truncates identifiers to 63 characters. *)
 fun controlled_length dfg s =
@@ -263,7 +265,9 @@
     val s' =
       if s' = "" orelse not (Char.isAlpha (String.sub (s', 0))) then "X" ^ s'
       else s'
-    val s' = if s' = "op" then full_name else s'
+    (* Avoid "equal", since it's built into ATPs; and "op" is very ambiguous
+       ("op &", "op |", etc.). *)
+    val s' = if s' = "equal" orelse s' = "op" then full_name else s'
   in
     case (Char.isLower (String.sub (full_name, 0)),
           Char.isLower (String.sub (s', 0))) of
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_hol_clause.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_hol_clause.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -487,8 +487,8 @@
           fold count_constants_clause conjectures (Symtab.empty, Symtab.empty)
        |> fold count_constants_clause extra_clauses
        |> fold count_constants_clause helper_clauses
-     val _ = List.app (display_arity explicit_apply const_needs_hBOOL)
-                      (Symtab.dest (const_min_arity))
+     val _ = app (display_arity explicit_apply const_needs_hBOOL)
+                 (Symtab.dest (const_min_arity))
      in (const_min_arity, const_needs_hBOOL) end
   else (Symtab.empty, Symtab.empty);
 
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -96,11 +96,9 @@
    ("relevance_threshold", "50"),
    ("convergence", "320"),
    ("theory_relevant", "smart"),
-   ("higher_order", "smart"),
    ("follow_defs", "false"),
    ("isar_proof", "false"),
    ("shrink_factor", "1"),
-   ("sorts", "false"),
    ("minimize_timeout", "5 s")]
 
 val alias_params =
@@ -113,14 +111,12 @@
    ("implicit_apply", "explicit_apply"),
    ("ignore_no_atp", "respect_no_atp"),
    ("theory_irrelevant", "theory_relevant"),
-   ("first_order", "higher_order"),
    ("dont_follow_defs", "follow_defs"),
-   ("metis_proof", "isar_proof"),
-   ("no_sorts", "sorts")]
+   ("metis_proof", "isar_proof")]
 
 val params_for_minimize =
   ["debug", "verbose", "overlord", "full_types", "explicit_apply",
-   "higher_order", "isar_proof", "shrink_factor", "sorts", "minimize_timeout"]
+   "isar_proof", "shrink_factor", "minimize_timeout"]
 
 val property_dependent_params = ["atps", "full_types", "timeout"]
 
@@ -200,11 +196,9 @@
       0.01 * Real.fromInt (lookup_int "relevance_threshold")
     val convergence = 0.01 * Real.fromInt (lookup_int "convergence")
     val theory_relevant = lookup_bool_option "theory_relevant"
-    val higher_order = lookup_bool_option "higher_order"
     val follow_defs = lookup_bool "follow_defs"
     val isar_proof = lookup_bool "isar_proof"
     val shrink_factor = Int.max (1, lookup_int "shrink_factor")
-    val sorts = lookup_bool "sorts"
     val timeout = lookup_time "timeout"
     val minimize_timeout = lookup_time "minimize_timeout"
   in
@@ -212,28 +206,33 @@
      full_types = full_types, explicit_apply = explicit_apply,
      respect_no_atp = respect_no_atp, relevance_threshold = relevance_threshold,
      convergence = convergence, theory_relevant = theory_relevant,
-     higher_order = higher_order, follow_defs = follow_defs,
-     isar_proof = isar_proof, shrink_factor = shrink_factor, sorts = sorts,
-     timeout = timeout, minimize_timeout = minimize_timeout}
+     follow_defs = follow_defs, isar_proof = isar_proof,
+     shrink_factor = shrink_factor, timeout = timeout,
+     minimize_timeout = minimize_timeout}
   end
 
 fun get_params thy = extract_params thy (default_raw_params thy)
 fun default_params thy = get_params thy o map (apsnd single)
 
+val subgoal_count = Logic.count_prems o prop_of o #goal o Proof.goal
+
 (* Sledgehammer the given subgoal *)
 
 fun run {atps = [], ...} _ _ _ _ = error "No ATP is set."
   | run (params as {atps, timeout, ...}) i relevance_override minimize_command
-        proof_state =
-    let
-      val birth_time = Time.now ()
-      val death_time = Time.+ (birth_time, timeout)
-      val _ = kill_atps ()  (* race w.r.t. other invocations of Sledgehammer *)
-      val _ = priority "Sledgehammering..."
-      val _ = List.app (start_prover_thread params birth_time death_time i
-                                            relevance_override minimize_command
-                                            proof_state) atps
-    in () end
+        state =
+    case subgoal_count state of
+      0 => priority "No subgoal!"
+    | n =>
+      let
+        val birth_time = Time.now ()
+        val death_time = Time.+ (birth_time, timeout)
+        val _ = kill_atps ()  (* race w.r.t. other Sledgehammer invocations *)
+        val _ = priority "Sledgehammering..."
+        val _ = app (start_prover_thread params birth_time death_time i n
+                                         relevance_override minimize_command
+                                         state) atps
+      in () end
 
 fun minimize override_params i fact_refs state =
   let
@@ -243,8 +242,10 @@
       map o pairf Facts.string_of_ref o ProofContext.get_fact
     val name_thms_pairs = theorems_from_refs ctxt fact_refs
   in
-    priority (#2 (minimize_theorems (get_params thy override_params) i state
-                                    name_thms_pairs))
+    case subgoal_count state of
+      0 => priority "No subgoal!"
+    | n => priority (#2 (minimize_theorems (get_params thy override_params) i n
+                                           state name_thms_pairs))
   end
 
 val sledgehammerN = "sledgehammer"
@@ -265,9 +266,7 @@
 val is_raw_param_relevant_for_minimize =
   member (op =) params_for_minimize o fst o unalias_raw_param
 fun string_for_raw_param (key, values) =
-  key ^ (case space_implode " " values of
-           "" => ""
-         | value => " = " ^ value)
+  key ^ (case space_implode " " values of "" => "" | value => " = " ^ value)
 
 fun minimize_command override_params i atp_name facts =
   sledgehammerN ^ " " ^ minimizeN ^ " [atp = " ^ atp_name ^
@@ -279,7 +278,7 @@
 fun hammer_away override_params subcommand opt_i relevance_override state =
   let
     val thy = Proof.theory_of state
-    val _ = List.app check_raw_param override_params
+    val _ = app check_raw_param override_params
   in
     if subcommand = runN then
       let val i = the_default 1 opt_i in
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_proof_reconstruct.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_proof_reconstruct.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -21,11 +21,11 @@
     minimize_command * string * string vector * thm * int
     -> string * string list
   val isar_proof_text:
-    name_pool option * bool * int * bool * Proof.context * int list list
+    name_pool option * bool * int * Proof.context * int list list
     -> minimize_command * string * string vector * thm * int
     -> string * string list
   val proof_text:
-    bool -> name_pool option * bool * int * bool * Proof.context * int list list
+    bool -> name_pool option * bool * int * Proof.context * int list list
     -> minimize_command * string * string vector * thm * int
     -> string * string list
 end;
@@ -33,6 +33,7 @@
 structure Sledgehammer_Proof_Reconstruct : SLEDGEHAMMER_PROOF_RECONSTRUCT =
 struct
 
+open Sledgehammer_Util
 open Sledgehammer_FOL_Clause
 open Sledgehammer_Fact_Preprocessor
 
@@ -49,14 +50,29 @@
       SOME s' => s'
     | NONE => s
 
+fun smart_lambda v t =
+    Abs (case v of
+           Const (s, _) =>
+           List.last (space_explode skolem_infix (Long_Name.base_name s))
+         | Var ((s, _), _) => s
+         | Free (s, _) => s
+         | _ => "", fastype_of v, abstract_over (v, t))
+
+fun forall_of v t = HOLogic.all_const (fastype_of v) $ smart_lambda v t
+fun exists_of v t = HOLogic.exists_const (fastype_of v) $ smart_lambda v t
+
+datatype ('a, 'b, 'c, 'd, 'e) raw_step =
+  Definition of 'a * 'b * 'c |
+  Inference of 'a * 'd * 'e list
+
 (**** PARSING OF TSTP FORMAT ****)
 
 (* Syntax trees, either term list or formulae *)
-datatype stree = SInt of int | SBranch of string * stree list;
+datatype node = IntLeaf of int | StrNode of string * node list
 
-fun atom x = SBranch (x, [])
+fun atom x = StrNode (x, [])
 
-fun scons (x, y) = SBranch ("cons", [x, y])
+fun scons (x, y) = StrNode ("cons", [x, y])
 val slist_of = List.foldl scons (atom "nil")
 
 (*Strings enclosed in single quotes, e.g. filenames*)
@@ -74,54 +90,63 @@
 (* The "x" argument is not strictly necessary, but without it Poly/ML loops
    forever at compile time. *)
 fun parse_term pool x =
-  (parse_quoted >> atom
-   || parse_integer >> SInt
+     (parse_quoted >> atom
+   || parse_integer >> IntLeaf
    || $$ "$" |-- Symbol.scan_id >> (atom o repair_bool_literal)
    || (Symbol.scan_id >> repair_name pool)
-      -- Scan.optional ($$ "(" |-- parse_terms pool --| $$ ")") [] >> SBranch
+      -- Scan.optional ($$ "(" |-- parse_terms pool --| $$ ")") [] >> StrNode
    || $$ "(" |-- parse_term pool --| $$ ")"
    || $$ "[" |-- Scan.optional (parse_terms pool) [] --| $$ "]" >> slist_of) x
 and parse_terms pool x =
   (parse_term pool ::: Scan.repeat ($$ "," |-- parse_term pool)) x
 
-fun negate_stree t = SBranch ("c_Not", [t])
-fun equate_strees t1 t2 = SBranch ("c_equal", [t1, t2]);
+fun negate_node u = StrNode ("c_Not", [u])
+fun equate_nodes u1 u2 = StrNode ("c_equal", [u1, u2])
 
 (* Apply equal or not-equal to a term. *)
-fun repair_predicate_term (t, NONE) = t
-  | repair_predicate_term (t1, SOME (NONE, t2)) = equate_strees t1 t2
-  | repair_predicate_term (t1, SOME (SOME _, t2)) =
-    negate_stree (equate_strees t1 t2)
+fun repair_predicate_term (u, NONE) = u
+  | repair_predicate_term (u1, SOME (NONE, u2)) = equate_nodes u1 u2
+  | repair_predicate_term (u1, SOME (SOME _, u2)) =
+    negate_node (equate_nodes u1 u2)
 fun parse_predicate_term pool =
   parse_term pool -- Scan.option (Scan.option ($$ "!") --| $$ "="
                                   -- parse_term pool)
   >> repair_predicate_term
-(*Literals can involve negation, = and !=.*)
+(* Literals can involve "~", "=", and "!=". *)
 fun parse_literal pool x =
-  ($$ "~" |-- parse_literal pool >> negate_stree || parse_predicate_term pool) x
+  ($$ "~" |-- parse_literal pool >> negate_node || parse_predicate_term pool) x
 
 fun parse_literals pool =
   parse_literal pool ::: Scan.repeat ($$ "|" |-- parse_literal pool)
 
-(* Clause: a list of literals separated by the disjunction sign. *)
+(* Clause: a list of literals separated by disjunction operators ("|"). *)
 fun parse_clause pool =
   $$ "(" |-- parse_literals pool --| $$ ")" || Scan.single (parse_literal pool)
 
-fun ints_of_stree (SInt n) = cons n
-  | ints_of_stree (SBranch (_, ts)) = fold ints_of_stree ts
+fun ints_of_node (IntLeaf n) = cons n
+  | ints_of_node (StrNode (_, us)) = fold ints_of_node us
 val parse_tstp_annotations =
   Scan.optional ($$ "," |-- parse_term NONE
                    --| Scan.option ($$ "," |-- parse_terms NONE)
-                 >> (fn source => ints_of_stree source [])) []
+                 >> (fn source => ints_of_node source [])) []
+
+fun parse_definition pool =
+  $$ "(" |-- parse_literal NONE --| Scan.this_string "<=>"
+  -- parse_clause pool --| $$ ")"
 
-(* <cnf_annotated> ::= cnf(<name>, <formula_role>, <cnf_formula> <annotations>).
-   The <name> could be an identifier, but we assume integers. *)
-fun retuple_tstp_line ((name, ts), deps) = (name, ts, deps)
+(* Syntax: cnf(<num>, <formula_role>, <cnf_formula> <annotations>).
+   The <num> could be an identifier, but we assume integers. *)
+fun finish_tstp_definition_line (num, (u, us)) = Definition (num, u, us)
+fun finish_tstp_inference_line ((num, us), deps) = Inference (num, us, deps)
 fun parse_tstp_line pool =
-  (Scan.this_string "cnf" -- $$ "(") |-- parse_integer --| $$ ","
-   --| Symbol.scan_id --| $$ "," -- parse_clause pool -- parse_tstp_annotations
-   --| $$ ")" --| $$ "."
-  >> retuple_tstp_line
+     ((Scan.this_string "fof" -- $$ "(") |-- parse_integer --| $$ ","
+       --| Scan.this_string "definition" --| $$ "," -- parse_definition pool
+       --| parse_tstp_annotations --| $$ ")" --| $$ "."
+      >> finish_tstp_definition_line)
+  || ((Scan.this_string "cnf" -- $$ "(") |-- parse_integer --| $$ ","
+       --| Symbol.scan_id --| $$ "," -- parse_clause pool
+       -- parse_tstp_annotations --| $$ ")" --| $$ "."
+      >> finish_tstp_inference_line)
 
 (**** PARSING OF SPASS OUTPUT ****)
 
@@ -142,22 +167,22 @@
   Scan.repeat (parse_starred_predicate_term pool) --| $$ "-" --| $$ ">"
   -- Scan.repeat (parse_starred_predicate_term pool)
   >> (fn ([], []) => [atom "c_False"]
-       | (clauses1, clauses2) => map negate_stree clauses1 @ clauses2)
+       | (clauses1, clauses2) => map negate_node clauses1 @ clauses2)
 
-(* Syntax: <name>[0:<inference><annotations>] ||
+(* Syntax: <num>[0:<inference><annotations>] ||
            <cnf_formulas> -> <cnf_formulas>. *)
-fun retuple_spass_line ((name, deps), ts) = (name, ts, deps)
+fun finish_spass_line ((num, deps), us) = Inference (num, us, deps)
 fun parse_spass_line pool =
   parse_integer --| $$ "[" --| $$ "0" --| $$ ":" --| Symbol.scan_id
   -- parse_spass_annotations --| $$ "]" --| $$ "|" --| $$ "|"
   -- parse_horn_clause pool --| $$ "."
-  >> retuple_spass_line
+  >> finish_spass_line
 
 fun parse_line pool = fst o (parse_tstp_line pool || parse_spass_line pool)
 
 (**** INTERPRETATION OF TSTP SYNTAX TREES ****)
 
-exception STREE of stree;
+exception NODE of node
 
 (*If string s has the prefix s1, return the result of deleting it.*)
 fun strip_prefix s1 s =
@@ -180,24 +205,21 @@
 
 (*Type variables are given the basic sort, HOL.type. Some will later be constrained
   by information from type literals, or by type inference.*)
-fun type_of_stree t =
-  case t of
-      SInt _ => raise STREE t
-    | SBranch (a,ts) =>
-        let val Ts = map type_of_stree ts
-        in
-          case strip_prefix tconst_prefix a of
-              SOME b => Type(invert_type_const b, Ts)
-            | NONE =>
-                if not (null ts) then raise STREE t  (*only tconsts have type arguments*)
-                else
-                case strip_prefix tfree_prefix a of
-                    SOME b => TFree("'" ^ b, HOLogic.typeS)
-                  | NONE =>
-                case strip_prefix tvar_prefix a of
-                    SOME b => make_tvar b
-                  | NONE => make_tparam a  (* Variable from the ATP, say "X1" *)
-        end;
+fun type_of_node (u as IntLeaf _) = raise NODE u
+  | type_of_node (u as StrNode (a, us)) =
+    let val Ts = map type_of_node us in
+      case strip_prefix tconst_prefix a of
+        SOME b => Type (invert_type_const b, Ts)
+      | NONE =>
+        if not (null us) then
+          raise NODE u  (*only tconsts have type arguments*)
+        else case strip_prefix tfree_prefix a of
+          SOME b => TFree ("'" ^ b, HOLogic.typeS)
+        | NONE =>
+          case strip_prefix tvar_prefix a of
+            SOME b => make_tvar b
+          | NONE => make_tparam a  (* Variable from the ATP, say "X1" *)
+    end
 
 (*Invert the table of translations between Isabelle and ATPs*)
 val const_trans_table_inv =
@@ -212,62 +234,81 @@
 (*Generates a constant, given its type arguments*)
 fun const_of thy (a,Ts) = Const(a, Sign.const_instance thy (a,Ts));
 
+fun fix_atp_variable_name s =
+  let
+    fun subscript_name s n = s ^ nat_subscript n
+    val s = String.map Char.toLower s
+  in
+    case space_explode "_" s of
+      [_] => (case take_suffix Char.isDigit (String.explode s) of
+                (cs1 as _ :: _, cs2 as _ :: _) =>
+                subscript_name (String.implode cs1)
+                               (the (Int.fromString (String.implode cs2)))
+              | (_, _) => s)
+    | [s1, s2] => (case Int.fromString s2 of
+                     SOME n => subscript_name s1 n
+                   | NONE => s)
+    | _ => s
+  end
+
 (*First-order translation. No types are known for variables. HOLogic.typeT should allow
   them to be inferred.*)
-fun term_of_stree args thy t =
-  case t of
-      SInt _ => raise STREE t
-    | SBranch ("hBOOL", [t]) => term_of_stree [] thy t  (*ignore hBOOL*)
-    | SBranch ("hAPP", [t, u]) => term_of_stree (u::args) thy t
-    | SBranch (a, ts) =>
-        case strip_prefix const_prefix a of
-            SOME "equal" =>
-              list_comb(Const (@{const_name "op ="}, HOLogic.typeT), List.map (term_of_stree [] thy) ts)
-          | SOME b =>
-              let val c = invert_const b
-                  val nterms = length ts - num_typargs thy c
-                  val us = List.map (term_of_stree [] thy) (List.take(ts,nterms) @ args)
-                  (*Extra args from hAPP come AFTER any arguments given directly to the
-                    constant.*)
-                  val Ts = List.map type_of_stree (List.drop(ts,nterms))
-              in  list_comb(const_of thy (c, Ts), us)  end
-          | NONE => (*a variable, not a constant*)
-              let val T = HOLogic.typeT
-                  val opr = (*a Free variable is typically a Skolem function*)
-                    case strip_prefix fixed_var_prefix a of
-                        SOME b => Free(b,T)
-                      | NONE =>
-                    case strip_prefix schematic_var_prefix a of
-                        SOME b => make_var (b,T)
-                      | NONE => make_var (a,T)  (* Variable from the ATP, say "X1" *)
-              in  list_comb (opr, List.map (term_of_stree [] thy) (ts@args))  end;
+fun term_of_node args thy u =
+  case u of
+    IntLeaf _ => raise NODE u
+  | StrNode ("hBOOL", [u]) => term_of_node [] thy u  (* ignore hBOOL *)
+  | StrNode ("hAPP", [u1, u2]) => term_of_node (u2 :: args) thy u1
+  | StrNode (a, us) =>
+    case strip_prefix const_prefix a of
+      SOME "equal" =>
+      list_comb (Const (@{const_name "op ="}, HOLogic.typeT),
+                 map (term_of_node [] thy) us)
+    | SOME b =>
+      let
+        val c = invert_const b
+        val nterms = length us - num_typargs thy c
+        val ts = map (term_of_node [] thy) (take nterms us @ args)
+        (*Extra args from hAPP come AFTER any arguments given directly to the
+          constant.*)
+        val Ts = map type_of_node (drop nterms us)
+      in list_comb(const_of thy (c, Ts), ts) end
+    | NONE => (*a variable, not a constant*)
+      let
+        val opr =
+          (* a Free variable is typically a Skolem function *)
+          case strip_prefix fixed_var_prefix a of
+            SOME b => Free (b, HOLogic.typeT)
+          | NONE =>
+            case strip_prefix schematic_var_prefix a of
+              SOME b => make_var (b, HOLogic.typeT)
+            | NONE =>
+              (* Variable from the ATP, say "X1" *)
+              make_var (fix_atp_variable_name a, HOLogic.typeT)
+      in list_comb (opr, map (term_of_node [] thy) (us @ args)) end
 
 (* Type class literal applied to a type. Returns triple of polarity, class,
    type. *)
-fun constraint_of_stree pol (SBranch ("c_Not", [t])) =
-    constraint_of_stree (not pol) t
-  | constraint_of_stree pol t = case t of
-        SInt _ => raise STREE t
-      | SBranch (a, ts) =>
-            (case (strip_prefix class_prefix a, map type_of_stree ts) of
-                 (SOME b, [T]) => (pol, b, T)
-               | _ => raise STREE t);
+fun constraint_of_node pos (StrNode ("c_Not", [u])) =
+    constraint_of_node (not pos) u
+  | constraint_of_node pos u = case u of
+        IntLeaf _ => raise NODE u
+      | StrNode (a, us) =>
+            (case (strip_prefix class_prefix a, map type_of_node us) of
+                 (SOME b, [T]) => (pos, b, T)
+               | _ => raise NODE u)
 
 (** Accumulate type constraints in a clause: negative type literals **)
 
-fun addix (key,z)  = Vartab.map_default (key,[]) (cons z);
+fun add_var (key, z)  = Vartab.map_default (key, []) (cons z)
 
-fun add_constraint ((false, cl, TFree(a,_)), vt) = addix ((a,~1),cl) vt
-  | add_constraint ((false, cl, TVar(ix,_)), vt) = addix (ix,cl) vt
+fun add_constraint ((false, cl, TFree(a,_)), vt) = add_var ((a,~1),cl) vt
+  | add_constraint ((false, cl, TVar(ix,_)), vt) = add_var (ix,cl) vt
   | add_constraint (_, vt) = vt;
 
-fun is_positive_literal (@{const Trueprop} $ t) = is_positive_literal t
-  | is_positive_literal (@{const Not} $ _) = false
+fun is_positive_literal (@{const Not} $ _) = false
   | is_positive_literal t = true
 
-fun negate_term thy (@{const Trueprop} $ t) =
-    @{const Trueprop} $ negate_term thy t
-  | negate_term thy (Const (@{const_name All}, T) $ Abs (s, T', t')) =
+fun negate_term thy (Const (@{const_name All}, T) $ Abs (s, T', t')) =
     Const (@{const_name Ex}, T) $ Abs (s, T', negate_term thy t')
   | negate_term thy (Const (@{const_name Ex}, T) $ Abs (s, T', t')) =
     Const (@{const_name All}, T) $ Abs (s, T', negate_term thy t')
@@ -277,12 +318,8 @@
     @{const "op |"} $ negate_term thy t1 $ negate_term thy t2
   | negate_term thy (@{const "op |"} $ t1 $ t2) =
     @{const "op &"} $ negate_term thy t1 $ negate_term thy t2
-  | negate_term thy (@{const Not} $ t) = t
-  | negate_term thy t =
-    if fastype_of t = @{typ prop} then
-      HOLogic.mk_Trueprop (negate_term thy (Object_Logic.atomize_term thy t))
-    else
-      @{const Not} $ t
+  | negate_term _ (@{const Not} $ t) = t
+  | negate_term _ t = @{const Not} $ t
 
 fun clause_for_literals _ [] = HOLogic.false_const
   | clause_for_literals _ [lit] = lit
@@ -302,11 +339,10 @@
          |> clause_for_literals thy
 
 (*Accumulate sort constraints in vt, with "real" literals in lits.*)
-fun lits_of_strees thy (vt, lits) [] = (vt, finish_clause thy lits)
-  | lits_of_strees thy (vt, lits) (t :: ts) =
-    lits_of_strees thy (add_constraint (constraint_of_stree true t, vt), lits)
-                   ts
-    handle STREE _ => lits_of_strees thy (vt, term_of_stree [] thy t :: lits) ts
+fun lits_of_nodes thy (vt, lits) [] = (vt, finish_clause thy lits)
+  | lits_of_nodes thy (vt, lits) (u :: us) =
+    lits_of_nodes thy (add_constraint (constraint_of_node true u, vt), lits) us
+    handle NODE _ => lits_of_nodes thy (vt, term_of_node [] thy u :: lits) us
 
 (*Update TVars/TFrees with detected sort constraints.*)
 fun repair_sorts vt =
@@ -318,122 +354,133 @@
         | tmsubst (Var (xi, T)) = Var (xi, tysubst T)
         | tmsubst (t as Bound _) = t
         | tmsubst (Abs (a, T, t)) = Abs (a, tysubst T, tmsubst t)
-        | tmsubst (t $ u) = tmsubst t $ tmsubst u;
+        | tmsubst (t1 $ t2) = tmsubst t1 $ tmsubst t2
   in not (Vartab.is_empty vt) ? tmsubst end;
 
-(*Interpret a list of syntax trees as a clause, given by "real" literals and sort constraints.
-  vt0 holds the initial sort constraints, from the conjecture clauses.*)
-fun clause_of_strees ctxt vt ts =
-  let val (vt, dt) = lits_of_strees (ProofContext.theory_of ctxt) (vt, []) ts in
-    dt |> repair_sorts vt |> TypeInfer.constrain HOLogic.boolT
-       |> Syntax.check_term ctxt
+(* Interpret a list of syntax trees as a clause, given by "real" literals and
+   sort constraints. "vt" holds the initial sort constraints, from the
+   conjecture clauses. *)
+fun clause_of_nodes ctxt vt us =
+  let val (vt, dt) = lits_of_nodes (ProofContext.theory_of ctxt) (vt, []) us in
+    dt |> repair_sorts vt
   end
-
-fun gen_all_vars t = fold_rev Logic.all (OldTerm.term_vars t) t;
+fun check_clause ctxt =
+  TypeInfer.constrain HOLogic.boolT
+  #> Syntax.check_term (ProofContext.set_mode ProofContext.mode_schematic ctxt)
+fun checked_clause_of_nodes ctxt = check_clause ctxt oo clause_of_nodes ctxt
 
-fun decode_line vt0 (name, ts, deps) ctxt =
-  let val cl = clause_of_strees ctxt vt0 ts in
-    ((name, cl, deps), fold Variable.declare_term (OldTerm.term_frees cl) ctxt)
-  end
-
-(** Global sort constraints on TFrees (from tfree_tcs) are positive unit clauses. **)
+(** Global sort constraints on TFrees (from tfree_tcs) are positive unit
+    clauses. **)
 
-fun add_tfree_constraint ((true, cl, TFree(a,_)), vt) = addix ((a,~1),cl) vt
-  | add_tfree_constraint (_, vt) = vt;
-
+fun add_tfree_constraint (true, cl, TFree (a, _)) = add_var ((a, ~1), cl)
+  | add_tfree_constraint _ = I
 fun tfree_constraints_of_clauses vt [] = vt
-  | tfree_constraints_of_clauses vt ([lit]::tss) =
-      (tfree_constraints_of_clauses (add_tfree_constraint (constraint_of_stree true lit, vt)) tss
-       handle STREE _ => (*not a positive type constraint: ignore*)
-       tfree_constraints_of_clauses vt tss)
-  | tfree_constraints_of_clauses vt (_::tss) = tfree_constraints_of_clauses vt tss;
+  | tfree_constraints_of_clauses vt ([lit] :: uss) =
+    (tfree_constraints_of_clauses (add_tfree_constraint
+                                          (constraint_of_node true lit) vt) uss
+     handle NODE _ => (* Not a positive type constraint? Ignore the literal. *)
+     tfree_constraints_of_clauses vt uss)
+  | tfree_constraints_of_clauses vt (_ :: uss) =
+    tfree_constraints_of_clauses vt uss
 
 
 (**** Translation of TSTP files to Isar Proofs ****)
 
-fun decode_lines ctxt tuples =
-  let val vt0 = tfree_constraints_of_clauses Vartab.empty (map #2 tuples) in
-    #1 (fold_map (decode_line vt0) tuples ctxt)
-  end
+fun unvarify_term (Var ((s, 0), T)) = Free (s, T)
+  | unvarify_term t = raise TERM ("unvarify_term: non-Var", [t])
 
-fun unequal t (_, t', _) = not (t aconv t');
+fun clauses_in_lines (Definition (_, u, us)) = u :: us
+  | clauses_in_lines (Inference (_, us, _)) = us
 
-(*No "real" literals means only type information*)
-fun eq_types t = t aconv HOLogic.true_const;
+fun decode_line vt (Definition (num, u, us)) ctxt =
+    let
+      val cl1 = clause_of_nodes ctxt vt [u]
+      val vars = snd (strip_comb cl1)
+      val frees = map unvarify_term vars
+      val unvarify_args = subst_atomic (vars ~~ frees)
+      val cl2 = clause_of_nodes ctxt vt us
+      val (cl1, cl2) =
+        HOLogic.eq_const HOLogic.typeT $ cl1 $ cl2
+        |> unvarify_args |> check_clause ctxt |> HOLogic.dest_eq
+    in
+      (Definition (num, cl1, cl2),
+       fold Variable.declare_term (maps OldTerm.term_frees [cl1, cl2]) ctxt)
+    end
+  | decode_line vt (Inference (num, us, deps)) ctxt =
+    let val cl = us |> clause_of_nodes ctxt vt |> check_clause ctxt in
+      (Inference (num, cl, deps),
+       fold Variable.declare_term (OldTerm.term_frees cl) ctxt)
+    end
+fun decode_lines ctxt lines =
+  let
+    val vt = tfree_constraints_of_clauses Vartab.empty
+                                          (map clauses_in_lines lines)
+  in #1 (fold_map (decode_line vt) lines ctxt) end
 
-fun replace_dep (old, new) dep = if dep = old then new else [dep]
-fun replace_deps p (num, t, deps) =
-  (num, t, fold (union (op =) o replace_dep p) deps [])
+fun aint_inference _ (Definition _) = true
+  | aint_inference t (Inference (_, t', _)) = not (t aconv t')
+
+(* No "real" literals means only type information (tfree_tcs, clsrel, or
+   clsarity). *)
+val is_only_type_information = curry (op aconv) HOLogic.true_const
+
+fun replace_one_dep (old, new) dep = if dep = old then new else [dep]
+fun replace_deps_in_line _ (line as Definition _) = line
+  | replace_deps_in_line p (Inference (num, t, deps)) =
+    Inference (num, t, fold (union (op =) o replace_one_dep p) deps [])
 
 (*Discard axioms; consolidate adjacent lines that prove the same clause, since they differ
   only in type information.*)
-fun add_line thm_names (num, t, []) lines =
-      (* No dependencies: axiom or conjecture clause *)
-      if is_axiom_clause_number thm_names num then
-        (* Axioms are not proof lines *)
-        if eq_types t then
-          (* Must be clsrel/clsarity: type information, so delete refs to it *)
-          map (replace_deps (num, [])) lines
-        else
-          (case take_prefix (unequal t) lines of
-             (_,[]) => lines                  (*no repetition of proof line*)
-           | (pre, (num', _, _) :: post) =>   (*repetition: replace later line by earlier one*)
-               pre @ map (replace_deps (num', [num])) post)
-      else
-        (num, t, []) :: lines
-  | add_line _ (num, t, deps) lines =
-      if eq_types t then (num, t, deps) :: lines
-      (*Type information will be deleted later; skip repetition test.*)
-      else (*FIXME: Doesn't this code risk conflating proofs involving different types??*)
-      case take_prefix (unequal t) lines of
-         (_,[]) => (num, t, deps) :: lines  (*no repetition of proof line*)
-       | (pre, (num', t', _) :: post) =>
-           (num, t', deps) ::               (*repetition: replace later line by earlier one*)
-           (pre @ map (replace_deps (num', [num])) post);
+fun add_line _ (line as Definition _) lines = line :: lines
+  | add_line thm_names (Inference (num, t, [])) lines =
+    (* No dependencies: axiom or conjecture clause *)
+    if is_axiom_clause_number thm_names num then
+      (* Axioms are not proof lines. *)
+      if is_only_type_information t then
+        map (replace_deps_in_line (num, [])) lines
+      (* Is there a repetition? If so, replace later line by earlier one. *)
+      else case take_prefix (aint_inference t) lines of
+        (_, []) => lines (*no repetition of proof line*)
+      | (pre, Inference (num', _, _) :: post) =>
+        pre @ map (replace_deps_in_line (num', [num])) post
+    else
+      Inference (num, t, []) :: lines
+  | add_line _ (Inference (num, t, deps)) lines =
+    (* Type information will be deleted later; skip repetition test. *)
+    if is_only_type_information t then
+      Inference (num, t, deps) :: lines
+    (* Is there a repetition? If so, replace later line by earlier one. *)
+    else case take_prefix (aint_inference t) lines of
+      (* FIXME: Doesn't this code risk conflating proofs involving different
+         types?? *)
+       (_, []) => Inference (num, t, deps) :: lines
+     | (pre, Inference (num', t', _) :: post) =>
+       Inference (num, t', deps) ::
+       pre @ map (replace_deps_in_line (num', [num])) post
 
-(*Recursively delete empty lines (type information) from the proof.*)
-fun add_nonnull_line (num, t, []) lines = (*no dependencies, so a conjecture clause*)
-    if eq_types t then
-      (*must be type information, tfree_tcs, clsrel, clsarity: delete refs to it*)
-      delete_dep num lines
-    else
-      (num, t, []) :: lines
-  | add_nonnull_line (num, t, deps) lines = (num, t, deps) :: lines
+(* Recursively delete empty lines (type information) from the proof. *)
+fun add_nontrivial_line (Inference (num, t, [])) lines =
+    if is_only_type_information t then delete_dep num lines
+    else Inference (num, t, []) :: lines
+  | add_nontrivial_line line lines = line :: lines
 and delete_dep num lines =
-  fold_rev add_nonnull_line (map (replace_deps (num, [])) lines) []
-
-fun bad_free (Free (a,_)) = String.isPrefix skolem_prefix a
-  | bad_free _ = false;
+  fold_rev add_nontrivial_line (map (replace_deps_in_line (num, [])) lines) []
 
-fun add_desired_line ctxt (num, t, []) (j, lines) =
-    (j, (num, t, []) :: lines)  (* conjecture clauses must be kept *)
-  | add_desired_line ctxt (num, t, deps) (j, lines) =
-    (j + 1,
-     if eq_types t orelse not (null (Term.add_tvars t [])) orelse
-        exists_subterm bad_free t orelse length deps < 2 then
-       map (replace_deps (num, deps)) lines  (* delete line *)
-     else
-       (num, t, deps) :: lines)
+fun is_bad_free (Free (a, _)) = String.isPrefix skolem_prefix a
+  | is_bad_free _ = false
 
-(* ### *)
-(*Replace numeric proof lines by strings, either from thm_names or sequential line numbers*)
-fun stringify_deps thm_names deps_map [] = []
-  | stringify_deps thm_names deps_map ((num, t, deps) :: lines) =
-    if is_axiom_clause_number thm_names num then
-      (Vector.sub (thm_names, num - 1), t, []) ::
-      stringify_deps thm_names deps_map lines
-    else
-      let
-        val label = Int.toString (length deps_map)
-        fun string_for_num num =
-          if is_axiom_clause_number thm_names num then
-            SOME (Vector.sub (thm_names, num - 1))
-          else
-            AList.lookup (op =) deps_map num
-      in
-        (label, t, map_filter string_for_num (distinct (op=) deps)) ::
-        stringify_deps thm_names ((num, label) :: deps_map) lines
-      end
+fun add_desired_line _ _ (line as Definition _) (j, lines) = (j, line :: lines)
+  | add_desired_line ctxt _ (Inference (num, t, [])) (j, lines) =
+    (j, Inference (num, t, []) :: lines)  (* conjecture clauses must be kept *)
+  | add_desired_line ctxt shrink_factor (Inference (num, t, deps)) (j, lines) =
+    (j + 1,
+     if is_only_type_information t orelse
+        not (null (Term.add_tvars t [])) orelse
+        exists_subterm is_bad_free t orelse
+        (length deps < 2 orelse j mod shrink_factor <> 0) then
+       map (replace_deps_in_line (num, deps)) lines  (* delete line *)
+     else
+       Inference (num, t, deps) :: lines)
 
 (** EXTRACTING LEMMAS **)
 
@@ -485,20 +532,22 @@
     val n = Logic.count_prems (prop_of goal)
   in (metis_line i n lemmas ^ minimize_line minimize_command lemmas, lemmas) end
 
-val is_valid_line = String.isPrefix "cnf(" orf String.isSubstring "||"
+val is_valid_line =
+  String.isPrefix "fof(" orf String.isPrefix "cnf(" orf String.isSubstring "||"
 
-(** NEW PROOF RECONSTRUCTION CODE **)
+(** Isar proof construction and manipulation **)
+
+fun merge_fact_sets (ls1, ss1) (ls2, ss2) =
+  (union (op =) ls1 ls2, union (op =) ss1 ss2)
 
 type label = string * int
 type facts = label list * string list
 
-fun merge_fact_sets (ls1, ss1) (ls2, ss2) =
-  (union (op =) ls1 ls2, union (op =) ss1 ss2)
-
 datatype qualifier = Show | Then | Moreover | Ultimately
 
 datatype step =
-  Fix of term list |
+  Fix of (string * typ) list |
+  Let of term * term |
   Assume of label * term |
   Have of qualifier list * label * term * byline
 and byline =
@@ -509,19 +558,20 @@
 val assum_prefix = "A"
 val fact_prefix = "F"
 
-(* ###
-fun add_fact_from_dep s =
-  case Int.fromString s of
-    SOME n => apfst (cons (raw_prefix, n))
-  | NONE => apsnd (cons s)
-*)
+fun add_fact_from_dep thm_names num =
+  if is_axiom_clause_number thm_names num then
+    apsnd (insert (op =) (Vector.sub (thm_names, num - 1)))
+  else
+    apfst (insert (op =) (raw_prefix, num))
 
-val add_fact_from_dep = apfst o cons o pair raw_prefix
+fun forall_vars t = fold_rev forall_of (map Var (Term.add_vars t [])) t
 
-fun step_for_tuple _ (label, t, []) = Assume ((raw_prefix, label), t)
-  | step_for_tuple j (label, t, deps) =
-    Have (if j = 1 then [Show] else [], (raw_prefix, label), t,
-          Facts (fold add_fact_from_dep deps ([], [])))
+fun step_for_line _ _ (Definition (num, t1, t2)) = Let (t1, t2)
+  | step_for_line _ _ (Inference (num, t, [])) = Assume ((raw_prefix, num), t)
+  | step_for_line thm_names j (Inference (num, t, deps)) =
+    Have (if j = 1 then [Show] else [], (raw_prefix, num),
+          forall_vars t,
+          Facts (fold (add_fact_from_dep thm_names) deps ([], [])))
 
 fun strip_spaces_in_list [] = ""
   | strip_spaces_in_list [c1] = if Char.isSpace c1 then "" else str c1
@@ -540,19 +590,20 @@
       str c1 ^ strip_spaces_in_list (c2 :: c3 :: cs)
 val strip_spaces = strip_spaces_in_list o String.explode
 
-fun proof_from_atp_proof pool ctxt atp_proof thm_names frees =
+fun proof_from_atp_proof pool ctxt shrink_factor atp_proof thm_names frees =
   let
-    val tuples =
-      atp_proof |> split_lines |> map strip_spaces
-                |> filter is_valid_line
-                |> map (parse_line pool o explode)
-                |> decode_lines ctxt
-    val tuples = fold_rev (add_line thm_names) tuples []
-    val tuples = fold_rev add_nonnull_line tuples []
-    val tuples = fold_rev (add_desired_line ctxt) tuples (0, []) |> snd
+    val lines =
+      atp_proof
+      |> split_lines |> map strip_spaces |> filter is_valid_line
+      |> map (parse_line pool o explode)
+      |> decode_lines ctxt
+      |> rpair [] |-> fold_rev (add_line thm_names)
+      |> rpair [] |-> fold_rev add_nontrivial_line
+      |> rpair (0, []) |-> fold_rev (add_desired_line ctxt shrink_factor)
+      |> snd
   in
     (if null frees then [] else [Fix frees]) @
-    map2 step_for_tuple (length tuples downto 1) tuples
+    map2 (step_for_line thm_names) (length lines downto 1) lines
   end
 
 val indent_size = 2
@@ -575,6 +626,7 @@
 and using_of proof = fold (union (op =) o using_of_step) proof []
 
 fun new_labels_of_step (Fix _) = []
+  | new_labels_of_step (Let _) = []
   | new_labels_of_step (Assume (l, _)) = [l]
   | new_labels_of_step (Have (_, l, _, _)) = [l]
 val new_labels_of = maps new_labels_of_step
@@ -607,23 +659,26 @@
 
 val index_in_shape = find_index o exists o curry (op =)
 
-fun direct_proof thy conjecture_shape hyp_ts concl_t proof =
+fun redirect_proof thy conjecture_shape hyp_ts concl_t proof =
   let
     val concl_ls = map (pair raw_prefix) (List.last conjecture_shape)
     fun find_hyp (_, j) = nth hyp_ts (index_in_shape j conjecture_shape)
     fun first_pass ([], contra) = ([], contra)
-      | first_pass ((ps as Fix _) :: proof, contra) =
-        first_pass (proof, contra) |>> cons ps
-      | first_pass ((ps as Assume (l, t)) :: proof, contra) =
+      | first_pass ((step as Fix _) :: proof, contra) =
+        first_pass (proof, contra) |>> cons step
+      | first_pass ((step as Let _) :: proof, contra) =
+        first_pass (proof, contra) |>> cons step
+      | first_pass ((step as Assume (l, t)) :: proof, contra) =
         if member (op =) concl_ls l then
-          first_pass (proof, contra ||> cons ps)
+          first_pass (proof, contra ||> cons step)
         else
           first_pass (proof, contra) |>> cons (Assume (l, find_hyp l))
-      | first_pass ((ps as Have (qs, l, t, Facts (ls, ss))) :: proof, contra) =
+      | first_pass ((step as Have (qs, l, t, Facts (ls, ss))) :: proof,
+                    contra) =
         if exists (member (op =) (fst contra)) ls then
-          first_pass (proof, contra |>> cons l ||> cons ps)
+          first_pass (proof, contra |>> cons l ||> cons step)
         else
-          first_pass (proof, contra) |>> cons ps
+          first_pass (proof, contra) |>> cons step
       | first_pass _ = raise Fail "malformed proof"
     val (proof_top, (contra_ls, contra_proof)) =
       first_pass (proof, (concl_ls, []))
@@ -690,20 +745,18 @@
              end
            | _ => raise Fail "malformed proof")
        | second_pass _ _ = raise Fail "malformed proof"
-    val (proof_bottom, _) =
-      second_pass [Show] (contra_proof, [], ([], ([], [])))
+    val proof_bottom =
+      second_pass [Show] (contra_proof, [], ([], ([], []))) |> fst
   in proof_top @ proof_bottom end
 
 val kill_duplicate_assumptions_in_proof =
   let
     fun relabel_facts subst =
       apfst (map (fn l => AList.lookup (op =) subst l |> the_default l))
-    fun do_step (ps as Fix _) (proof, subst, assums) =
-        (ps :: proof, subst, assums)
-      | do_step (ps as Assume (l, t)) (proof, subst, assums) =
+    fun do_step (step as Assume (l, t)) (proof, subst, assums) =
         (case AList.lookup (op aconv) assums t of
            SOME l' => (proof, (l', l) :: subst, assums)
-         | NONE => (ps :: proof, subst, (t, l) :: assums))
+         | NONE => (step :: proof, subst, (t, l) :: assums))
       | do_step (Have (qs, l, t, by)) (proof, subst, assums) =
         (Have (qs, l, t,
                case by of
@@ -711,17 +764,29 @@
                | CaseSplit (proofs, facts) =>
                  CaseSplit (map do_proof proofs, relabel_facts subst facts)) ::
          proof, subst, assums)
+      | do_step step (proof, subst, assums) = (step :: proof, subst, assums)
     and do_proof proof = fold do_step proof ([], [], []) |> #1 |> rev
   in do_proof end
 
-(* FIXME: implement *)
-fun shrink_proof shrink_factor proof = proof
+
+(* Hack: Could return false positives (e.g., a user happens to declare a
+   constant called "SomeTheory.sko_means_shoe_in_$wedish". *)
+val is_skolem_const_name =
+  Long_Name.base_name
+  #> String.isPrefix skolem_prefix andf String.isSubstring skolem_infix
+
+fun unskolemize_term t =
+  fold exists_of (Term.add_consts t []
+                  |> filter (is_skolem_const_name o fst) |> map Const) t
+
+fun unskolemize_step (Have (qs, l, t, by)) =
+    Have (qs, l, unskolemize_term t, by)
+  | unskolemize_step step = step
 
 val then_chain_proof =
   let
     fun aux _ [] = []
-      | aux _ ((ps as Fix _) :: proof) = ps :: aux no_label proof
-      | aux _ ((ps as Assume (l, _)) :: proof) = ps :: aux l proof
+      | aux _ ((step as Assume (l, _)) :: proof) = step :: aux l proof
       | aux l' (Have (qs, l, t, by) :: proof) =
         (case by of
            Facts (ls, ss) =>
@@ -733,20 +798,21 @@
          | CaseSplit (proofs, facts) =>
            Have (qs, l, t, CaseSplit (map (aux no_label) proofs, facts))) ::
         aux l proof
+      | aux _ (step :: proof) = step :: aux no_label proof
   in aux no_label end
 
 fun kill_useless_labels_in_proof proof =
   let
     val used_ls = using_of proof
     fun do_label l = if member (op =) used_ls l then l else no_label
-    fun kill (Fix ts) = Fix ts
-      | kill (Assume (l, t)) = Assume (do_label l, t)
+    fun kill (Assume (l, t)) = Assume (do_label l, t)
       | kill (Have (qs, l, t, by)) =
         Have (qs, do_label l, t,
               case by of
                 CaseSplit (proofs, facts) =>
                 CaseSplit (map (map kill) proofs, facts)
               | _ => by)
+      | kill step = step
   in map kill proof end
 
 fun prefix_for_depth n = replicate_string (n + 1)
@@ -754,8 +820,6 @@
 val relabel_proof =
   let
     fun aux _ _ _ [] = []
-      | aux subst depth nextp ((ps as Fix _) :: proof) =
-        ps :: aux subst depth nextp proof
       | aux subst depth (next_assum, next_fact) (Assume (l, t) :: proof) =
         if l = no_label then
           Assume (l, t) :: aux subst depth (next_assum, next_fact) proof
@@ -773,7 +837,7 @@
               let
                 val l' = (prefix_for_depth depth fact_prefix, next_fact)
               in (l', (l, l') :: subst, next_fact + 1) end
-          val relabel_facts = apfst (map (the o AList.lookup (op =) subst))
+          val relabel_facts = apfst (map_filter (AList.lookup (op =) subst))
           val by =
             case by of
               Facts facts => Facts (relabel_facts facts)
@@ -784,11 +848,19 @@
           Have (qs, l', t, by) ::
           aux subst depth (next_assum, next_fact) proof
         end
+      | aux subst depth nextp (step :: proof) =
+        step :: aux subst depth nextp proof
   in aux [] 0 (1, 1) end
 
-fun string_for_proof ctxt sorts i n =
+fun string_for_proof ctxt i n =
   let
+    fun fix_print_mode f =
+      PrintMode.setmp (filter (curry (op =) Symbol.xsymbolsN)
+                      (print_mode_value ())) f
     fun do_indent ind = replicate_string (ind * indent_size) " "
+    fun do_free (s, T) =
+      maybe_quote s ^ " :: " ^
+      maybe_quote (fix_print_mode (Syntax.string_of_typ ctxt) T)
     fun do_raw_label (s, j) = s ^ string_of_int j
     fun do_label l = if l = no_label then "" else do_raw_label l ^ ": "
     fun do_have qs =
@@ -798,29 +870,26 @@
          if member (op =) qs Show then "thus" else "hence"
        else
          if member (op =) qs Show then "show" else "have")
-    val do_term =
-      quote o PrintMode.setmp (filter (curry (op =) Symbol.xsymbolsN)
-                                      (print_mode_value ()))
-                              (Syntax.string_of_term ctxt)
+    val do_term = maybe_quote o fix_print_mode (Syntax.string_of_term ctxt)
     fun do_using [] = ""
       | do_using ls = "using " ^ (space_implode " " (map do_raw_label ls)) ^ " "
-    fun do_by_facts [] [] = "by blast"
-      | do_by_facts _ [] = "by metis"
-      | do_by_facts _ ss = "by (metis " ^ space_implode " " ss ^ ")"
-    fun do_facts ind (ls, ss) =
-      do_indent (ind + 1) ^ do_using ls ^ do_by_facts ls ss
-    and do_step ind (Fix ts) =
-        do_indent ind ^ "fix " ^ space_implode " and " (map do_term ts) ^ "\n"
+    fun do_by_facts [] = "by metis"
+      | do_by_facts ss = "by (metis " ^ space_implode " " ss ^ ")"
+    fun do_facts (ls, ss) = do_using ls ^ do_by_facts ss
+    and do_step ind (Fix xs) =
+        do_indent ind ^ "fix " ^ space_implode " and " (map do_free xs) ^ "\n"
+      | do_step ind (Let (t1, t2)) =
+        do_indent ind ^ "let " ^ do_term t1 ^ " = " ^ do_term t2 ^ "\n"
       | do_step ind (Assume (l, t)) =
         do_indent ind ^ "assume " ^ do_label l ^ do_term t ^ "\n"
       | do_step ind (Have (qs, l, t, Facts facts)) =
         do_indent ind ^ do_have qs ^ " " ^
-        do_label l ^ do_term t ^ "\n" ^ do_facts ind facts ^ "\n"
+        do_label l ^ do_term t ^ " " ^ do_facts facts ^ "\n"
       | do_step ind (Have (qs, l, t, CaseSplit (proofs, facts))) =
         space_implode (do_indent ind ^ "moreover\n")
                       (map (do_block ind) proofs) ^
-        do_indent ind ^ do_have qs ^ " " ^ do_label l ^ do_term t ^ "\n" ^
-        do_facts ind facts ^ "\n"
+        do_indent ind ^ do_have qs ^ " " ^ do_label l ^ do_term t ^ " " ^
+        do_facts facts ^ "\n"
     and do_steps prefix suffix ind steps =
       let val s = implode (map (do_step ind) steps) in
         replicate_string (ind * indent_size - size prefix) " " ^ prefix ^
@@ -829,14 +898,16 @@
         suffix ^ "\n"
       end
     and do_block ind proof = do_steps "{ " " }" (ind + 1) proof
-    and do_proof proof =
-      (if i <> 1 then "prefer " ^ string_of_int i ^ "\n" else "") ^
-      do_indent 0 ^ "proof -\n" ^
-      do_steps "" "" 1 proof ^
-      do_indent 0 ^ (if n <> 1 then "next" else "qed") ^ "\n"
-  in setmp_CRITICAL show_sorts sorts do_proof end
+    (* One-step proofs are pointless; better use the Metis one-liner. *)
+    and do_proof [_] = ""
+      | do_proof proof =
+        (if i <> 1 then "prefer " ^ string_of_int i ^ "\n" else "") ^
+        do_indent 0 ^ "proof -\n" ^
+        do_steps "" "" 1 proof ^
+        do_indent 0 ^ (if n <> 1 then "next" else "qed") ^ "\n"
+  in do_proof end
 
-fun isar_proof_text (pool, debug, shrink_factor, sorts, ctxt, conjecture_shape)
+fun isar_proof_text (pool, debug, shrink_factor, ctxt, conjecture_shape)
                     (minimize_command, atp_proof, thm_names, goal, i) =
   let
     val thy = ProofContext.theory_of ctxt
@@ -845,14 +916,15 @@
     val (one_line_proof, lemma_names) =
       metis_proof_text (minimize_command, atp_proof, thm_names, goal, i)
     fun isar_proof_for () =
-      case proof_from_atp_proof pool ctxt atp_proof thm_names frees
-           |> direct_proof thy conjecture_shape hyp_ts concl_t
+      case proof_from_atp_proof pool ctxt shrink_factor atp_proof thm_names
+                                frees
+           |> redirect_proof thy conjecture_shape hyp_ts concl_t
            |> kill_duplicate_assumptions_in_proof
-           |> shrink_proof shrink_factor
+           |> map unskolemize_step
            |> then_chain_proof
            |> kill_useless_labels_in_proof
            |> relabel_proof
-           |> string_for_proof ctxt sorts i n of
+           |> string_for_proof ctxt i n of
         "" => ""
       | proof => "\nStructured proof:\n" ^ Markup.markup Markup.sendback proof
     val isar_proof =
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_util.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_util.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -6,6 +6,7 @@
 
 signature SLEDGEHAMMER_UTIL =
 sig
+  val is_new_spass_version : bool
   val pairf : ('a -> 'b) -> ('a -> 'c) -> 'a -> 'b * 'c
   val plural_s : int -> string
   val serial_commas : string -> string list -> string list
@@ -14,11 +15,26 @@
   val timestamp : unit -> string
   val parse_bool_option : bool -> string -> string -> bool option
   val parse_time_option : string -> string -> Time.time option
+  val nat_subscript : int -> string
+  val unyxml : string -> string
+  val maybe_quote : string -> string
 end;
  
 structure Sledgehammer_Util : SLEDGEHAMMER_UTIL =
 struct
 
+val is_new_spass_version =
+  case getenv "SPASS_VERSION" of
+    "" => (case getenv "SPASS_HOME" of
+             "" => false
+           | s =>
+             (* Hack: Preliminary versions of the SPASS 3.7 package don't set
+                "SPASS_VERSION". *)
+             String.isSubstring "/spass-3.7/" s)
+  | s => (case s |> space_explode "." |> map Int.fromString of
+            SOME m :: SOME n :: _ => m > 3 orelse (m = 3 andalso n >= 5)
+          | _ => false)
+
 fun pairf f g x = (f x, g x)
 
 fun plural_s n = if n = 1 then "" else "s"
@@ -72,4 +88,19 @@
         SOME (Time.fromMilliseconds msecs)
     end
 
+val subscript = implode o map (prefix "\<^isub>") o explode
+val nat_subscript = subscript o string_of_int
+
+fun plain_string_from_xml_tree t =
+  Buffer.empty |> XML.add_content t |> Buffer.content
+val unyxml = plain_string_from_xml_tree o YXML.parse
+
+val is_long_identifier = forall Syntax.is_identifier o space_explode "."
+fun maybe_quote y =
+  let val s = unyxml y in
+    y |> ((not (is_long_identifier (perhaps (try (unprefix "'")) s)) andalso
+           not (is_long_identifier (perhaps (try (unprefix "?")) s))) orelse
+           OuterKeyword.is_keyword s) ? quote
+  end
+
 end;
--- a/src/HOLCF/IOA/meta_theory/Sequence.thy	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/HOLCF/IOA/meta_theory/Sequence.thy	Wed Apr 28 19:46:09 2010 +0200
@@ -10,7 +10,7 @@
 
 default_sort type
 
-types 'a Seq = "'a::type lift seq"
+types 'a Seq = "'a lift seq"
 
 consts
 
--- a/src/Pure/Isar/typedecl.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/Pure/Isar/typedecl.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -82,10 +82,12 @@
 
 (* type abbreviations *)
 
+local
+
 fun gen_abbrev prep_typ (b, vs, mx) raw_rhs lthy =
   let
     val Type (name, _) = global_type lthy (b, map (rpair dummyS) vs);
-    val rhs = prep_typ lthy raw_rhs
+    val rhs = prep_typ b lthy raw_rhs
       handle ERROR msg => cat_error msg ("in type abbreviation " ^ quote (Binding.str_of b));
   in
     lthy
@@ -94,8 +96,23 @@
     |> pair name
   end;
 
-val abbrev = gen_abbrev ProofContext.cert_typ_syntax;
-val abbrev_cmd = gen_abbrev ProofContext.read_typ_syntax;
+fun read_abbrev b ctxt raw_rhs =
+  let
+    val rhs = ProofContext.read_typ_syntax (ctxt |> ProofContext.set_defsort []) raw_rhs;
+    val ignored = Term.fold_atyps_sorts (fn (_, []) => I | (T, _) => insert (op =) T) rhs [];
+    val _ =
+      if null ignored then ()
+      else warning ("Ignoring sort constraints in type variables(s): " ^
+        commas_quote (map (Syntax.string_of_typ ctxt) (rev ignored)) ^
+        "\nin type abbreviation " ^ quote (Binding.str_of b));
+  in rhs end;
+
+in
+
+val abbrev = gen_abbrev (K ProofContext.cert_typ_syntax);
+val abbrev_cmd = gen_abbrev read_abbrev;
+
+end;
 
 fun abbrev_global decl rhs =
   Theory_Target.init NONE
--- a/src/Pure/axclass.ML	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/Pure/axclass.ML	Wed Apr 28 19:46:09 2010 +0200
@@ -190,9 +190,9 @@
 
 infix 0 RSO;
 
-fun (SOME a RSO SOME b) = SOME (a RS b)
-  | (x RSO NONE) = x
-  | (NONE RSO y) = y;
+fun (SOME a) RSO (SOME b) = SOME (a RS b)
+  | x RSO NONE = x
+  | NONE RSO y = y;
 
 fun the_classrel thy (c1, c2) =
   (case Symreltab.lookup (proven_classrels_of thy) (c1, c2) of
--- a/src/Tools/jEdit/README_BUILD	Wed Apr 28 16:45:51 2010 +0200
+++ b/src/Tools/jEdit/README_BUILD	Wed Apr 28 19:46:09 2010 +0200
@@ -74,3 +74,17 @@
 releases. (See
 http://java.sun.com/j2se/1.5.0/docs/guide/jpda/conninv.html)
 -----------------------------------------------------------------------
+
+
+Known problems with Mac OS
+==========================
+
+- The MacOSX plugin disrupts regular C-X/C/V operations, e.g. between
+  the editor and the Console plugin, which is a standard swing text
+  box.  Similar for search boxes etc.
+
+- Anti-aliasing does not really work as well as for Linux or Windows.
+  (General Apple/Swing problem?)
+
+- Font.createFont mangles the font family of non-regular fonts,
+  e.g. bold.