merged
authorhoelzl
Thu, 02 Sep 2010 20:44:33 +0200
changeset 39100 e9467adb8b52
parent 39079 bddc3d3f6e53 (diff)
parent 39099 5c714fd55b1e (current diff)
child 39101 606432dd1896
merged
--- a/Admin/isatest/settings/cygwin-poly-e	Thu Sep 02 20:29:12 2010 +0200
+++ b/Admin/isatest/settings/cygwin-poly-e	Thu Sep 02 20:44:33 2010 +0200
@@ -24,6 +24,4 @@
 
 ISABELLE_USEDIR_OPTIONS="-M 1 -i false -d false"
 
-# Disable while Jasmin is on vacation
-unset KODKODI
-#init_component "$HOME/contrib_devel/kodkodi"
+init_component "$HOME/contrib_devel/kodkodi"
--- a/NEWS	Thu Sep 02 20:29:12 2010 +0200
+++ b/NEWS	Thu Sep 02 20:44:33 2010 +0200
@@ -23,7 +23,7 @@
 at the cost of clarity of file dependencies.  Recall that Isabelle/ML
 files exclusively use the .ML extension.  Minor INCOMPATIBILTY.
 
-* Various options that affect document antiquotations are now properly
+* Various options that affect pretty printing etc. are now properly
 handled within the context via configuration options, instead of
 unsynchronized references.  There are both ML Config.T entities and
 Isar declaration attributes to access these.
@@ -36,15 +36,11 @@
   Thy_Output.source         thy_output_source
   Thy_Output.break          thy_output_break
 
+  show_question_marks       show_question_marks
+
 Note that the corresponding "..._default" references may be only
 changed globally at the ROOT session setup, but *not* within a theory.
 
-* ML structure Unsynchronized never opened, not even in Isar
-interaction mode as before.  Old Unsynchronized.set etc. have been
-discontinued -- use plain := instead.  This should be *rare* anyway,
-since modern tools always work via official context data, notably
-configuration options.
-
 
 *** Pure ***
 
@@ -204,6 +200,9 @@
 derive instantiated and simplified equations for inductive predicates,
 similar to inductive_cases.
 
+* "bij f" is now an abbreviation of "bij_betw f UNIV UNIV". surj_on is a
+generalization of surj, and "surj f" is now a abbreviation of "surj_on f UNIV".
+The theorems bij_def and surj_def are unchanged.
 
 *** FOL ***
 
@@ -217,6 +216,15 @@
 
 *** ML ***
 
+* Configuration option show_question_marks only affects regular pretty
+printing of types and terms, not raw Term.string_of_vname.
+
+* ML structure Unsynchronized never opened, not even in Isar
+interaction mode as before.  Old Unsynchronized.set etc. have been
+discontinued -- use plain := instead.  This should be *rare* anyway,
+since modern tools always work via official context data, notably
+configuration options.
+
 * ML antiquotations @{theory} and @{theory_ref} refer to named
 theories from the ancestry of the current context, not any accidental
 theory loader state as before.  Potential INCOMPATIBILITY, subtle
--- a/doc-src/Classes/Thy/document/Classes.tex	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Classes/Thy/document/Classes.tex	Thu Sep 02 20:44:33 2010 +0200
@@ -1134,26 +1134,27 @@
 \noindent%
 \hspace*{0pt}module Example where {\char123}\\
 \hspace*{0pt}\\
-\hspace*{0pt}data Nat = Zero{\char95}nat | Suc Nat;\\
+\hspace*{0pt}data Nat = Zero{\char95}nat | Suc Example.Nat;\\
 \hspace*{0pt}\\
-\hspace*{0pt}nat{\char95}aux ::~Integer -> Nat -> Nat;\\
-\hspace*{0pt}nat{\char95}aux i n = (if i <= 0 then n else nat{\char95}aux (i - 1) (Suc n));\\
+\hspace*{0pt}nat{\char95}aux ::~Integer -> Example.Nat -> Example.Nat;\\
+\hspace*{0pt}nat{\char95}aux i n =\\
+\hspace*{0pt} ~(if i <= 0 then n else Example.nat{\char95}aux (i - 1) (Example.Suc n));\\
 \hspace*{0pt}\\
-\hspace*{0pt}nat ::~Integer -> Nat;\\
-\hspace*{0pt}nat i = nat{\char95}aux i Zero{\char95}nat;\\
+\hspace*{0pt}nat ::~Integer -> Example.Nat;\\
+\hspace*{0pt}nat i = Example.nat{\char95}aux i Example.Zero{\char95}nat;\\
 \hspace*{0pt}\\
 \hspace*{0pt}class Semigroup a where {\char123}\\
 \hspace*{0pt} ~mult ::~a -> a -> a;\\
 \hspace*{0pt}{\char125};\\
 \hspace*{0pt}\\
-\hspace*{0pt}class (Semigroup a) => Monoidl a where {\char123}\\
+\hspace*{0pt}class (Example.Semigroup a) => Monoidl a where {\char123}\\
 \hspace*{0pt} ~neutral ::~a;\\
 \hspace*{0pt}{\char125};\\
 \hspace*{0pt}\\
-\hspace*{0pt}class (Monoidl a) => Monoid a where {\char123}\\
+\hspace*{0pt}class (Example.Monoidl a) => Monoid a where {\char123}\\
 \hspace*{0pt}{\char125};\\
 \hspace*{0pt}\\
-\hspace*{0pt}class (Monoid a) => Group a where {\char123}\\
+\hspace*{0pt}class (Example.Monoid a) => Group a where {\char123}\\
 \hspace*{0pt} ~inverse ::~a -> a;\\
 \hspace*{0pt}{\char125};\\
 \hspace*{0pt}\\
@@ -1166,32 +1167,32 @@
 \hspace*{0pt}inverse{\char95}int ::~Integer -> Integer;\\
 \hspace*{0pt}inverse{\char95}int i = negate i;\\
 \hspace*{0pt}\\
-\hspace*{0pt}instance Semigroup Integer where {\char123}\\
-\hspace*{0pt} ~mult = mult{\char95}int;\\
+\hspace*{0pt}instance Example.Semigroup Integer where {\char123}\\
+\hspace*{0pt} ~mult = Example.mult{\char95}int;\\
 \hspace*{0pt}{\char125};\\
 \hspace*{0pt}\\
-\hspace*{0pt}instance Monoidl Integer where {\char123}\\
-\hspace*{0pt} ~neutral = neutral{\char95}int;\\
+\hspace*{0pt}instance Example.Monoidl Integer where {\char123}\\
+\hspace*{0pt} ~neutral = Example.neutral{\char95}int;\\
 \hspace*{0pt}{\char125};\\
 \hspace*{0pt}\\
-\hspace*{0pt}instance Monoid Integer where {\char123}\\
+\hspace*{0pt}instance Example.Monoid Integer where {\char123}\\
 \hspace*{0pt}{\char125};\\
 \hspace*{0pt}\\
-\hspace*{0pt}instance Group Integer where {\char123}\\
-\hspace*{0pt} ~inverse = inverse{\char95}int;\\
+\hspace*{0pt}instance Example.Group Integer where {\char123}\\
+\hspace*{0pt} ~inverse = Example.inverse{\char95}int;\\
 \hspace*{0pt}{\char125};\\
 \hspace*{0pt}\\
-\hspace*{0pt}pow{\char95}nat ::~forall a.~(Monoid a) => Nat -> a -> a;\\
-\hspace*{0pt}pow{\char95}nat Zero{\char95}nat x = neutral;\\
-\hspace*{0pt}pow{\char95}nat (Suc n) x = mult x (pow{\char95}nat n x);\\
+\hspace*{0pt}pow{\char95}nat ::~forall a.~(Example.Monoid a) => Example.Nat -> a -> a;\\
+\hspace*{0pt}pow{\char95}nat Example.Zero{\char95}nat x = Example.neutral;\\
+\hspace*{0pt}pow{\char95}nat (Example.Suc n) x = Example.mult x (Example.pow{\char95}nat n x);\\
 \hspace*{0pt}\\
-\hspace*{0pt}pow{\char95}int ::~forall a.~(Group a) => Integer -> a -> a;\\
+\hspace*{0pt}pow{\char95}int ::~forall a.~(Example.Group a) => Integer -> a -> a;\\
 \hspace*{0pt}pow{\char95}int k x =\\
-\hspace*{0pt} ~(if 0 <= k then pow{\char95}nat (nat k) x\\
-\hspace*{0pt} ~~~else inverse (pow{\char95}nat (nat (negate k)) x));\\
+\hspace*{0pt} ~(if 0 <= k then Example.pow{\char95}nat (Example.nat k) x\\
+\hspace*{0pt} ~~~else Example.inverse (Example.pow{\char95}nat (Example.nat (negate k)) x));\\
 \hspace*{0pt}\\
 \hspace*{0pt}example ::~Integer;\\
-\hspace*{0pt}example = pow{\char95}int 10 (-2);\\
+\hspace*{0pt}example = Example.pow{\char95}int 10 (-2);\\
 \hspace*{0pt}\\
 \hspace*{0pt}{\char125}%
 \end{isamarkuptext}%
@@ -1232,15 +1233,15 @@
 \hspace*{0pt} ~type 'a group\\
 \hspace*{0pt} ~val monoid{\char95}group :~'a group -> 'a monoid\\
 \hspace*{0pt} ~val inverse :~'a group -> 'a -> 'a\\
+\hspace*{0pt} ~val pow{\char95}nat :~'a monoid -> nat -> 'a -> 'a\\
+\hspace*{0pt} ~val pow{\char95}int :~'a group -> IntInf.int -> 'a -> 'a\\
 \hspace*{0pt} ~val mult{\char95}int :~IntInf.int -> IntInf.int -> IntInf.int\\
+\hspace*{0pt} ~val semigroup{\char95}int :~IntInf.int semigroup\\
 \hspace*{0pt} ~val neutral{\char95}int :~IntInf.int\\
-\hspace*{0pt} ~val inverse{\char95}int :~IntInf.int -> IntInf.int\\
-\hspace*{0pt} ~val semigroup{\char95}int :~IntInf.int semigroup\\
 \hspace*{0pt} ~val monoidl{\char95}int :~IntInf.int monoidl\\
 \hspace*{0pt} ~val monoid{\char95}int :~IntInf.int monoid\\
+\hspace*{0pt} ~val inverse{\char95}int :~IntInf.int -> IntInf.int\\
 \hspace*{0pt} ~val group{\char95}int :~IntInf.int group\\
-\hspace*{0pt} ~val pow{\char95}nat :~'a monoid -> nat -> 'a -> 'a\\
-\hspace*{0pt} ~val pow{\char95}int :~'a group -> IntInf.int -> 'a -> 'a\\
 \hspace*{0pt} ~val example :~IntInf.int\\
 \hspace*{0pt}end = struct\\
 \hspace*{0pt}\\
@@ -1266,23 +1267,6 @@
 \hspace*{0pt}val monoid{\char95}group = {\char35}monoid{\char95}group :~'a group -> 'a monoid;\\
 \hspace*{0pt}val inverse = {\char35}inverse :~'a group -> 'a -> 'a;\\
 \hspace*{0pt}\\
-\hspace*{0pt}fun mult{\char95}int i j = IntInf.+ (i,~j);\\
-\hspace*{0pt}\\
-\hspace*{0pt}val neutral{\char95}int :~IntInf.int = (0 :~IntInf.int);\\
-\hspace*{0pt}\\
-\hspace*{0pt}fun inverse{\char95}int i = IntInf.{\char126}~i;\\
-\hspace*{0pt}\\
-\hspace*{0pt}val semigroup{\char95}int = {\char123}mult = mult{\char95}int{\char125}~:~IntInf.int semigroup;\\
-\hspace*{0pt}\\
-\hspace*{0pt}val monoidl{\char95}int =\\
-\hspace*{0pt} ~{\char123}semigroup{\char95}monoidl = semigroup{\char95}int,~neutral = neutral{\char95}int{\char125}~:\\
-\hspace*{0pt} ~IntInf.int monoidl;\\
-\hspace*{0pt}\\
-\hspace*{0pt}val monoid{\char95}int = {\char123}monoidl{\char95}monoid = monoidl{\char95}int{\char125}~:~IntInf.int monoid;\\
-\hspace*{0pt}\\
-\hspace*{0pt}val group{\char95}int = {\char123}monoid{\char95}group = monoid{\char95}int,~inverse = inverse{\char95}int{\char125}~:\\
-\hspace*{0pt} ~IntInf.int group;\\
-\hspace*{0pt}\\
 \hspace*{0pt}fun pow{\char95}nat A{\char95}~Zero{\char95}nat x = neutral (monoidl{\char95}monoid A{\char95})\\
 \hspace*{0pt} ~| pow{\char95}nat A{\char95}~(Suc n) x =\\
 \hspace*{0pt} ~~~mult ((semigroup{\char95}monoidl o monoidl{\char95}monoid) A{\char95}) x (pow{\char95}nat A{\char95}~n x);\\
@@ -1292,6 +1276,23 @@
 \hspace*{0pt} ~~~then pow{\char95}nat (monoid{\char95}group A{\char95}) (nat k) x\\
 \hspace*{0pt} ~~~else inverse A{\char95}~(pow{\char95}nat (monoid{\char95}group A{\char95}) (nat (IntInf.{\char126}~k)) x));\\
 \hspace*{0pt}\\
+\hspace*{0pt}fun mult{\char95}int i j = IntInf.+ (i,~j);\\
+\hspace*{0pt}\\
+\hspace*{0pt}val semigroup{\char95}int = {\char123}mult = mult{\char95}int{\char125}~:~IntInf.int semigroup;\\
+\hspace*{0pt}\\
+\hspace*{0pt}val neutral{\char95}int :~IntInf.int = (0 :~IntInf.int);\\
+\hspace*{0pt}\\
+\hspace*{0pt}val monoidl{\char95}int =\\
+\hspace*{0pt} ~{\char123}semigroup{\char95}monoidl = semigroup{\char95}int,~neutral = neutral{\char95}int{\char125}~:\\
+\hspace*{0pt} ~IntInf.int monoidl;\\
+\hspace*{0pt}\\
+\hspace*{0pt}val monoid{\char95}int = {\char123}monoidl{\char95}monoid = monoidl{\char95}int{\char125}~:~IntInf.int monoid;\\
+\hspace*{0pt}\\
+\hspace*{0pt}fun inverse{\char95}int i = IntInf.{\char126}~i;\\
+\hspace*{0pt}\\
+\hspace*{0pt}val group{\char95}int = {\char123}monoid{\char95}group = monoid{\char95}int,~inverse = inverse{\char95}int{\char125}~:\\
+\hspace*{0pt} ~IntInf.int group;\\
+\hspace*{0pt}\\
 \hspace*{0pt}val example :~IntInf.int =\\
 \hspace*{0pt} ~pow{\char95}int group{\char95}int (10 :~IntInf.int) ({\char126}2 :~IntInf.int);\\
 \hspace*{0pt}\\
@@ -1335,9 +1336,6 @@
 \noindent%
 \hspace*{0pt}object Example {\char123}\\
 \hspace*{0pt}\\
-\hspace*{0pt}import /*implicits*/ Example.semigroup{\char95}int,~Example.monoidl{\char95}int,\\
-\hspace*{0pt} ~Example.monoid{\char95}int,~Example.group{\char95}int\\
-\hspace*{0pt}\\
 \hspace*{0pt}abstract sealed class nat\\
 \hspace*{0pt}final case object Zero{\char95}nat extends nat\\
 \hspace*{0pt}final case class Suc(a:~nat) extends nat\\
@@ -1348,23 +1346,23 @@
 \hspace*{0pt}def nat(i:~BigInt):~nat = nat{\char95}aux(i,~Zero{\char95}nat)\\
 \hspace*{0pt}\\
 \hspace*{0pt}trait semigroup[A] {\char123}\\
-\hspace*{0pt} ~val `Example+mult`:~(A,~A) => A\\
+\hspace*{0pt} ~val `Example.mult`:~(A,~A) => A\\
 \hspace*{0pt}{\char125}\\
 \hspace*{0pt}def mult[A](a:~A,~b:~A)(implicit A:~semigroup[A]):~A =\\
-\hspace*{0pt} ~A.`Example+mult`(a,~b)\\
+\hspace*{0pt} ~A.`Example.mult`(a,~b)\\
 \hspace*{0pt}\\
 \hspace*{0pt}trait monoidl[A] extends semigroup[A] {\char123}\\
-\hspace*{0pt} ~val `Example+neutral`:~A\\
+\hspace*{0pt} ~val `Example.neutral`:~A\\
 \hspace*{0pt}{\char125}\\
-\hspace*{0pt}def neutral[A](implicit A:~monoidl[A]):~A = A.`Example+neutral`\\
+\hspace*{0pt}def neutral[A](implicit A:~monoidl[A]):~A = A.`Example.neutral`\\
 \hspace*{0pt}\\
 \hspace*{0pt}trait monoid[A] extends monoidl[A] {\char123}\\
 \hspace*{0pt}{\char125}\\
 \hspace*{0pt}\\
 \hspace*{0pt}trait group[A] extends monoid[A] {\char123}\\
-\hspace*{0pt} ~val `Example+inverse`:~A => A\\
+\hspace*{0pt} ~val `Example.inverse`:~A => A\\
 \hspace*{0pt}{\char125}\\
-\hspace*{0pt}def inverse[A](a:~A)(implicit A:~group[A]):~A = A.`Example+inverse`(a)\\
+\hspace*{0pt}def inverse[A](a:~A)(implicit A:~group[A]):~A = A.`Example.inverse`(a)\\
 \hspace*{0pt}\\
 \hspace*{0pt}def pow{\char95}nat[A:~monoid](xa0:~nat,~x:~A):~A = (xa0,~x) match {\char123}\\
 \hspace*{0pt} ~case (Zero{\char95}nat,~x) => neutral[A]\\
@@ -1378,27 +1376,27 @@
 \hspace*{0pt}def mult{\char95}int(i:~BigInt,~j:~BigInt):~BigInt = i + j\\
 \hspace*{0pt}\\
 \hspace*{0pt}implicit def semigroup{\char95}int:~semigroup[BigInt] = new semigroup[BigInt] {\char123}\\
-\hspace*{0pt} ~val `Example+mult` = (a:~BigInt,~b:~BigInt) => mult{\char95}int(a,~b)\\
+\hspace*{0pt} ~val `Example.mult` = (a:~BigInt,~b:~BigInt) => mult{\char95}int(a,~b)\\
 \hspace*{0pt}{\char125}\\
 \hspace*{0pt}\\
 \hspace*{0pt}def neutral{\char95}int:~BigInt = BigInt(0)\\
 \hspace*{0pt}\\
 \hspace*{0pt}implicit def monoidl{\char95}int:~monoidl[BigInt] = new monoidl[BigInt] {\char123}\\
-\hspace*{0pt} ~val `Example+neutral` = neutral{\char95}int\\
-\hspace*{0pt} ~val `Example+mult` = (a:~BigInt,~b:~BigInt) => mult{\char95}int(a,~b)\\
+\hspace*{0pt} ~val `Example.neutral` = neutral{\char95}int\\
+\hspace*{0pt} ~val `Example.mult` = (a:~BigInt,~b:~BigInt) => mult{\char95}int(a,~b)\\
 \hspace*{0pt}{\char125}\\
 \hspace*{0pt}\\
 \hspace*{0pt}implicit def monoid{\char95}int:~monoid[BigInt] = new monoid[BigInt] {\char123}\\
-\hspace*{0pt} ~val `Example+neutral` = neutral{\char95}int\\
-\hspace*{0pt} ~val `Example+mult` = (a:~BigInt,~b:~BigInt) => mult{\char95}int(a,~b)\\
+\hspace*{0pt} ~val `Example.neutral` = neutral{\char95}int\\
+\hspace*{0pt} ~val `Example.mult` = (a:~BigInt,~b:~BigInt) => mult{\char95}int(a,~b)\\
 \hspace*{0pt}{\char125}\\
 \hspace*{0pt}\\
 \hspace*{0pt}def inverse{\char95}int(i:~BigInt):~BigInt = (- i)\\
 \hspace*{0pt}\\
 \hspace*{0pt}implicit def group{\char95}int:~group[BigInt] = new group[BigInt] {\char123}\\
-\hspace*{0pt} ~val `Example+inverse` = (a:~BigInt) => inverse{\char95}int(a)\\
-\hspace*{0pt} ~val `Example+neutral` = neutral{\char95}int\\
-\hspace*{0pt} ~val `Example+mult` = (a:~BigInt,~b:~BigInt) => mult{\char95}int(a,~b)\\
+\hspace*{0pt} ~val `Example.inverse` = (a:~BigInt) => inverse{\char95}int(a)\\
+\hspace*{0pt} ~val `Example.neutral` = neutral{\char95}int\\
+\hspace*{0pt} ~val `Example.mult` = (a:~BigInt,~b:~BigInt) => mult{\char95}int(a,~b)\\
 \hspace*{0pt}{\char125}\\
 \hspace*{0pt}\\
 \hspace*{0pt}def example:~BigInt = pow{\char95}int[BigInt](BigInt(10),~BigInt(- 2))\\
--- a/doc-src/Codegen/Thy/Adaptation.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Codegen/Thy/Adaptation.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -272,12 +272,12 @@
 
 text {*
   For convenience, the default @{text HOL} setup for @{text Haskell}
-  maps the @{class eq} class to its counterpart in @{text Haskell},
-  giving custom serialisations for the class @{class eq} (by command
-  @{command_def code_class}) and its operation @{const HOL.eq}
+  maps the @{class equal} class to its counterpart in @{text Haskell},
+  giving custom serialisations for the class @{class equal} (by command
+  @{command_def code_class}) and its operation @{const HOL.equal}
 *}
 
-code_class %quotett eq
+code_class %quotett equal
   (Haskell "Eq")
 
 code_const %quotett "op ="
@@ -285,19 +285,19 @@
 
 text {*
   \noindent A problem now occurs whenever a type which is an instance
-  of @{class eq} in @{text HOL} is mapped on a @{text
+  of @{class equal} in @{text HOL} is mapped on a @{text
   Haskell}-built-in type which is also an instance of @{text Haskell}
   @{text Eq}:
 *}
 
 typedecl %quote bar
 
-instantiation %quote bar :: eq
+instantiation %quote bar :: equal
 begin
 
-definition %quote "eq_class.eq (x\<Colon>bar) y \<longleftrightarrow> x = y"
+definition %quote "HOL.equal (x\<Colon>bar) y \<longleftrightarrow> x = y"
 
-instance %quote by default (simp add: eq_bar_def)
+instance %quote by default (simp add: equal_bar_def)
 
 end %quote (*<*)
 
@@ -310,7 +310,7 @@
   suppress this additional instance, use @{command_def "code_instance"}:
 *}
 
-code_instance %quotett bar :: eq
+code_instance %quotett bar :: equal
   (Haskell -)
 
 
--- a/doc-src/Codegen/Thy/Evaluation.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Codegen/Thy/Evaluation.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -96,7 +96,7 @@
   allows to use pattern matching on constructors stemming from compiled
   @{text "datatypes"}.
 
-  For a less simplistic example, theory @{theory Ferrack} is
+  For a less simplistic example, theory @{text Ferrack} is
   a good reference.
 *}
 
--- a/doc-src/Codegen/Thy/Further.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Codegen/Thy/Further.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -122,7 +122,7 @@
   specific application, you should consider \emph{Imperative
   Functional Programming with Isabelle/HOL}
   \cite{bulwahn-et-al:2008:imperative}; the framework described there
-  is available in session @{theory Imperative_HOL}.
+  is available in session @{text Imperative_HOL}.
 *}
 
 
--- a/doc-src/Codegen/Thy/Inductive_Predicate.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Codegen/Thy/Inductive_Predicate.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -220,8 +220,8 @@
   "values"} and the number of elements.
 *}
 
-values %quote [mode: i \<Rightarrow> o \<Rightarrow> bool] 20 "{n. tranclp succ 10 n}"
-values %quote [mode: o \<Rightarrow> i \<Rightarrow> bool] 10 "{n. tranclp succ n 10}"
+values %quote [mode: i \<Rightarrow> o \<Rightarrow> bool] 1 "{n. tranclp succ 10 n}" (*FIMXE does not terminate for n\<ge>1*)
+values %quote [mode: o \<Rightarrow> i \<Rightarrow> bool] 1 "{n. tranclp succ n 10}"
 
 
 subsection {* Embedding into functional code within Isabelle/HOL *}
--- a/doc-src/Codegen/Thy/Setup.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Codegen/Thy/Setup.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -1,15 +1,12 @@
 theory Setup
-imports Complex_Main More_List RBT Dlist Mapping
+imports
+  Complex_Main
+  More_List RBT Dlist Mapping
 uses
   "../../antiquote_setup.ML"
   "../../more_antiquote.ML"
 begin
 
-ML {* no_document use_thys
-  ["Efficient_Nat", "Code_Char_chr", "Product_ord",
-   "~~/src/HOL/Imperative_HOL/Imperative_HOL",
-   "~~/src/HOL/Decision_Procs/Ferrack"] *}
-
 setup {*
 let
   val typ = Simple_Syntax.read_typ;
--- a/doc-src/Codegen/Thy/document/Adaptation.tex	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Codegen/Thy/document/Adaptation.tex	Thu Sep 02 20:44:33 2010 +0200
@@ -499,9 +499,9 @@
 %
 \begin{isamarkuptext}%
 For convenience, the default \isa{HOL} setup for \isa{Haskell}
-  maps the \isa{eq} class to its counterpart in \isa{Haskell},
-  giving custom serialisations for the class \isa{eq} (by command
-  \indexdef{}{command}{code\_class}\hypertarget{command.code-class}{\hyperlink{command.code-class}{\mbox{\isa{\isacommand{code{\isacharunderscore}class}}}}}) and its operation \isa{eq{\isacharunderscore}class{\isachardot}eq}%
+  maps the \isa{equal} class to its counterpart in \isa{Haskell},
+  giving custom serialisations for the class \isa{equal} (by command
+  \indexdef{}{command}{code\_class}\hypertarget{command.code-class}{\hyperlink{command.code-class}{\mbox{\isa{\isacommand{code{\isacharunderscore}class}}}}}) and its operation \isa{equal{\isacharunderscore}class{\isachardot}equal}%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -511,7 +511,7 @@
 %
 \isatagquotett
 \isacommand{code{\isacharunderscore}class}\isamarkupfalse%
-\ eq\isanewline
+\ equal\isanewline
 \ \ {\isacharparenleft}Haskell\ {\isachardoublequoteopen}Eq{\isachardoublequoteclose}{\isacharparenright}\isanewline
 \isanewline
 \isacommand{code{\isacharunderscore}const}\isamarkupfalse%
@@ -526,7 +526,7 @@
 %
 \begin{isamarkuptext}%
 \noindent A problem now occurs whenever a type which is an instance
-  of \isa{eq} in \isa{HOL} is mapped on a \isa{Haskell}-built-in type which is also an instance of \isa{Haskell}
+  of \isa{equal} in \isa{HOL} is mapped on a \isa{Haskell}-built-in type which is also an instance of \isa{Haskell}
   \isa{Eq}:%
 \end{isamarkuptext}%
 \isamarkuptrue%
@@ -540,15 +540,15 @@
 \ bar\isanewline
 \isanewline
 \isacommand{instantiation}\isamarkupfalse%
-\ bar\ {\isacharcolon}{\isacharcolon}\ eq\isanewline
+\ bar\ {\isacharcolon}{\isacharcolon}\ equal\isanewline
 \isakeyword{begin}\isanewline
 \isanewline
 \isacommand{definition}\isamarkupfalse%
-\ {\isachardoublequoteopen}eq{\isacharunderscore}class{\isachardot}eq\ {\isacharparenleft}x{\isasymColon}bar{\isacharparenright}\ y\ {\isasymlongleftrightarrow}\ x\ {\isacharequal}\ y{\isachardoublequoteclose}\isanewline
+\ {\isachardoublequoteopen}HOL{\isachardot}equal\ {\isacharparenleft}x{\isasymColon}bar{\isacharparenright}\ y\ {\isasymlongleftrightarrow}\ x\ {\isacharequal}\ y{\isachardoublequoteclose}\isanewline
 \isanewline
 \isacommand{instance}\isamarkupfalse%
 \ \isacommand{by}\isamarkupfalse%
-\ default\ {\isacharparenleft}simp\ add{\isacharcolon}\ eq{\isacharunderscore}bar{\isacharunderscore}def{\isacharparenright}\isanewline
+\ default\ {\isacharparenleft}simp\ add{\isacharcolon}\ equal{\isacharunderscore}bar{\isacharunderscore}def{\isacharparenright}\isanewline
 \isanewline
 \isacommand{end}\isamarkupfalse%
 %
@@ -587,7 +587,7 @@
 %
 \isatagquotett
 \isacommand{code{\isacharunderscore}instance}\isamarkupfalse%
-\ bar\ {\isacharcolon}{\isacharcolon}\ eq\isanewline
+\ bar\ {\isacharcolon}{\isacharcolon}\ equal\isanewline
 \ \ {\isacharparenleft}Haskell\ {\isacharminus}{\isacharparenright}%
 \endisatagquotett
 {\isafoldquotett}%
--- a/doc-src/Codegen/Thy/document/Evaluation.tex	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Codegen/Thy/document/Evaluation.tex	Thu Sep 02 20:44:33 2010 +0200
@@ -225,7 +225,7 @@
   allows to use pattern matching on constructors stemming from compiled
   \isa{datatypes}.
 
-  For a less simplistic example, theory \hyperlink{theory.Ferrack}{\mbox{\isa{Ferrack}}} is
+  For a less simplistic example, theory \isa{Ferrack} is
   a good reference.%
 \end{isamarkuptext}%
 \isamarkuptrue%
--- a/doc-src/Codegen/Thy/document/Foundations.tex	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Codegen/Thy/document/Foundations.tex	Thu Sep 02 20:44:33 2010 +0200
@@ -247,11 +247,11 @@
 \begin{isamarkuptext}%
 \isatypewriter%
 \noindent%
-\hspace*{0pt}dequeue ::~forall a.~Queue a -> (Maybe a,~Queue a);\\
-\hspace*{0pt}dequeue (AQueue xs (y :~ys)) = (Just y,~AQueue xs ys);\\
-\hspace*{0pt}dequeue (AQueue xs []) =\\
-\hspace*{0pt} ~(if null xs then (Nothing,~AQueue [] [])\\
-\hspace*{0pt} ~~~else dequeue (AQueue [] (reverse xs)));%
+\hspace*{0pt}dequeue ::~forall a.~Example.Queue a -> (Maybe a,~Example.Queue a);\\
+\hspace*{0pt}dequeue (Example.AQueue xs (y :~ys)) = (Just y,~Example.AQueue xs ys);\\
+\hspace*{0pt}dequeue (Example.AQueue xs []) =\\
+\hspace*{0pt} ~(if null xs then (Nothing,~Example.AQueue [] [])\\
+\hspace*{0pt} ~~~else Example.dequeue (Example.AQueue [] (reverse xs)));%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -350,21 +350,21 @@
 \isatypewriter%
 \noindent%
 \hspace*{0pt}structure Example :~sig\\
-\hspace*{0pt} ~type 'a eq\\
-\hspace*{0pt} ~val eq :~'a eq -> 'a -> 'a -> bool\\
-\hspace*{0pt} ~val eqa :~'a eq -> 'a -> 'a -> bool\\
-\hspace*{0pt} ~val member :~'a eq -> 'a list -> 'a -> bool\\
+\hspace*{0pt} ~type 'a equal\\
+\hspace*{0pt} ~val equal :~'a equal -> 'a -> 'a -> bool\\
+\hspace*{0pt} ~val eq :~'a equal -> 'a -> 'a -> bool\\
+\hspace*{0pt} ~val member :~'a equal -> 'a list -> 'a -> bool\\
 \hspace*{0pt} ~val collect{\char95}duplicates :\\
-\hspace*{0pt} ~~~'a eq -> 'a list -> 'a list -> 'a list -> 'a list\\
+\hspace*{0pt} ~~~'a equal -> 'a list -> 'a list -> 'a list -> 'a list\\
 \hspace*{0pt}end = struct\\
 \hspace*{0pt}\\
-\hspace*{0pt}type 'a eq = {\char123}eq :~'a -> 'a -> bool{\char125};\\
-\hspace*{0pt}val eq = {\char35}eq :~'a eq -> 'a -> 'a -> bool;\\
+\hspace*{0pt}type 'a equal = {\char123}equal :~'a -> 'a -> bool{\char125};\\
+\hspace*{0pt}val equal = {\char35}equal :~'a equal -> 'a -> 'a -> bool;\\
 \hspace*{0pt}\\
-\hspace*{0pt}fun eqa A{\char95}~a b = eq A{\char95}~a b;\\
+\hspace*{0pt}fun eq A{\char95}~a b = equal A{\char95}~a b;\\
 \hspace*{0pt}\\
 \hspace*{0pt}fun member A{\char95}~[] y = false\\
-\hspace*{0pt} ~| member A{\char95}~(x ::~xs) y = eqa A{\char95}~x y orelse member A{\char95}~xs y;\\
+\hspace*{0pt} ~| member A{\char95}~(x ::~xs) y = eq A{\char95}~x y orelse member A{\char95}~xs y;\\
 \hspace*{0pt}\\
 \hspace*{0pt}fun collect{\char95}duplicates A{\char95}~xs ys [] = xs\\
 \hspace*{0pt} ~| collect{\char95}duplicates A{\char95}~xs ys (z ::~zs) =\\
@@ -387,11 +387,11 @@
 \begin{isamarkuptext}%
 \noindent Obviously, polymorphic equality is implemented the Haskell
   way using a type class.  How is this achieved?  HOL introduces an
-  explicit class \isa{eq} with a corresponding operation \isa{eq{\isacharunderscore}class{\isachardot}eq} such that \isa{eq{\isacharunderscore}class{\isachardot}eq\ {\isacharequal}\ op\ {\isacharequal}}.  The preprocessing
-  framework does the rest by propagating the \isa{eq} constraints
+  explicit class \isa{equal} with a corresponding operation \isa{equal{\isacharunderscore}class{\isachardot}equal} such that \isa{equal{\isacharunderscore}class{\isachardot}equal\ {\isacharequal}\ op\ {\isacharequal}}.  The preprocessing
+  framework does the rest by propagating the \isa{equal} constraints
   through all dependent code equations.  For datatypes, instances of
-  \isa{eq} are implicitly derived when possible.  For other types,
-  you may instantiate \isa{eq} manually like any other type class.%
+  \isa{equal} are implicitly derived when possible.  For other types,
+  you may instantiate \isa{equal} manually like any other type class.%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -444,12 +444,12 @@
 \begin{isamarkuptext}%
 \isatypewriter%
 \noindent%
-\hspace*{0pt}strict{\char95}dequeue ::~forall a.~Queue a -> (a,~Queue a);\\
-\hspace*{0pt}strict{\char95}dequeue (AQueue xs []) =\\
+\hspace*{0pt}strict{\char95}dequeue ::~forall a.~Example.Queue a -> (a,~Example.Queue a);\\
+\hspace*{0pt}strict{\char95}dequeue (Example.AQueue xs []) =\\
 \hspace*{0pt} ~let {\char123}\\
 \hspace*{0pt} ~~~(y :~ys) = reverse xs;\\
-\hspace*{0pt} ~{\char125}~in (y,~AQueue [] ys);\\
-\hspace*{0pt}strict{\char95}dequeue (AQueue xs (y :~ys)) = (y,~AQueue xs ys);%
+\hspace*{0pt} ~{\char125}~in (y,~Example.AQueue [] ys);\\
+\hspace*{0pt}strict{\char95}dequeue (Example.AQueue xs (y :~ys)) = (y,~Example.AQueue xs ys);%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -538,11 +538,11 @@
 \hspace*{0pt}empty{\char95}queue ::~forall a.~a;\\
 \hspace*{0pt}empty{\char95}queue = error {\char34}empty{\char95}queue{\char34};\\
 \hspace*{0pt}\\
-\hspace*{0pt}strict{\char95}dequeue ::~forall a.~Queue a -> (a,~Queue a);\\
-\hspace*{0pt}strict{\char95}dequeue (AQueue xs (y :~ys)) = (y,~AQueue xs ys);\\
-\hspace*{0pt}strict{\char95}dequeue (AQueue xs []) =\\
-\hspace*{0pt} ~(if null xs then empty{\char95}queue\\
-\hspace*{0pt} ~~~else strict{\char95}dequeue (AQueue [] (reverse xs)));%
+\hspace*{0pt}strict{\char95}dequeue ::~forall a.~Example.Queue a -> (a,~Example.Queue a);\\
+\hspace*{0pt}strict{\char95}dequeue (Example.AQueue xs (y :~ys)) = (y,~Example.AQueue xs ys);\\
+\hspace*{0pt}strict{\char95}dequeue (Example.AQueue xs []) =\\
+\hspace*{0pt} ~(if null xs then Example.empty{\char95}queue\\
+\hspace*{0pt} ~~~else Example.strict{\char95}dequeue (Example.AQueue [] (reverse xs)));%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
--- a/doc-src/Codegen/Thy/document/Further.tex	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Codegen/Thy/document/Further.tex	Thu Sep 02 20:44:33 2010 +0200
@@ -216,13 +216,13 @@
 \begin{isamarkuptext}%
 \isatypewriter%
 \noindent%
-\hspace*{0pt}funpow ::~forall a.~Nat -> (a -> a) -> a -> a;\\
-\hspace*{0pt}funpow Zero{\char95}nat f = id;\\
-\hspace*{0pt}funpow (Suc n) f = f .~funpow n f;\\
+\hspace*{0pt}funpow ::~forall a.~Example.Nat -> (a -> a) -> a -> a;\\
+\hspace*{0pt}funpow Example.Zero{\char95}nat f = id;\\
+\hspace*{0pt}funpow (Example.Suc n) f = f .~Example.funpow n f;\\
 \hspace*{0pt}\\
-\hspace*{0pt}funpows ::~forall a.~[Nat] -> (a -> a) -> a -> a;\\
+\hspace*{0pt}funpows ::~forall a.~[Example.Nat] -> (a -> a) -> a -> a;\\
 \hspace*{0pt}funpows [] = id;\\
-\hspace*{0pt}funpows (x :~xs) = funpow x .~funpows xs;%
+\hspace*{0pt}funpows (x :~xs) = Example.funpow x .~Example.funpows xs;%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -242,7 +242,7 @@
   specific application, you should consider \emph{Imperative
   Functional Programming with Isabelle/HOL}
   \cite{bulwahn-et-al:2008:imperative}; the framework described there
-  is available in session \hyperlink{theory.Imperative-HOL}{\mbox{\isa{Imperative{\isacharunderscore}HOL}}}.%
+  is available in session \isa{Imperative{\isacharunderscore}HOL}.%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
--- a/doc-src/Codegen/Thy/document/Inductive_Predicate.tex	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Codegen/Thy/document/Inductive_Predicate.tex	Thu Sep 02 20:44:33 2010 +0200
@@ -422,9 +422,9 @@
 %
 \isatagquote
 \isacommand{values}\isamarkupfalse%
-\ {\isacharbrackleft}mode{\isacharcolon}\ i\ {\isasymRightarrow}\ o\ {\isasymRightarrow}\ bool{\isacharbrackright}\ {\isadigit{2}}{\isadigit{0}}\ {\isachardoublequoteopen}{\isacharbraceleft}n{\isachardot}\ tranclp\ succ\ {\isadigit{1}}{\isadigit{0}}\ n{\isacharbraceright}{\isachardoublequoteclose}\isanewline
+\ {\isacharbrackleft}mode{\isacharcolon}\ i\ {\isasymRightarrow}\ o\ {\isasymRightarrow}\ bool{\isacharbrackright}\ {\isadigit{1}}\ {\isachardoublequoteopen}{\isacharbraceleft}n{\isachardot}\ tranclp\ succ\ {\isadigit{1}}{\isadigit{0}}\ n{\isacharbraceright}{\isachardoublequoteclose}\ \isanewline
 \isacommand{values}\isamarkupfalse%
-\ {\isacharbrackleft}mode{\isacharcolon}\ o\ {\isasymRightarrow}\ i\ {\isasymRightarrow}\ bool{\isacharbrackright}\ {\isadigit{1}}{\isadigit{0}}\ {\isachardoublequoteopen}{\isacharbraceleft}n{\isachardot}\ tranclp\ succ\ n\ {\isadigit{1}}{\isadigit{0}}{\isacharbraceright}{\isachardoublequoteclose}%
+\ {\isacharbrackleft}mode{\isacharcolon}\ o\ {\isasymRightarrow}\ i\ {\isasymRightarrow}\ bool{\isacharbrackright}\ {\isadigit{1}}\ {\isachardoublequoteopen}{\isacharbraceleft}n{\isachardot}\ tranclp\ succ\ n\ {\isadigit{1}}{\isadigit{0}}{\isacharbraceright}{\isachardoublequoteclose}%
 \endisatagquote
 {\isafoldquote}%
 %
--- a/doc-src/Codegen/Thy/document/Introduction.tex	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Codegen/Thy/document/Introduction.tex	Thu Sep 02 20:44:33 2010 +0200
@@ -231,19 +231,19 @@
 \hspace*{0pt}\\
 \hspace*{0pt}data Queue a = AQueue [a] [a];\\
 \hspace*{0pt}\\
-\hspace*{0pt}empty ::~forall a.~Queue a;\\
-\hspace*{0pt}empty = AQueue [] [];\\
+\hspace*{0pt}empty ::~forall a.~Example.Queue a;\\
+\hspace*{0pt}empty = Example.AQueue [] [];\\
 \hspace*{0pt}\\
-\hspace*{0pt}dequeue ::~forall a.~Queue a -> (Maybe a,~Queue a);\\
-\hspace*{0pt}dequeue (AQueue [] []) = (Nothing,~AQueue [] []);\\
-\hspace*{0pt}dequeue (AQueue xs (y :~ys)) = (Just y,~AQueue xs ys);\\
-\hspace*{0pt}dequeue (AQueue (v :~va) []) =\\
+\hspace*{0pt}dequeue ::~forall a.~Example.Queue a -> (Maybe a,~Example.Queue a);\\
+\hspace*{0pt}dequeue (Example.AQueue [] []) = (Nothing,~Example.AQueue [] []);\\
+\hspace*{0pt}dequeue (Example.AQueue xs (y :~ys)) = (Just y,~Example.AQueue xs ys);\\
+\hspace*{0pt}dequeue (Example.AQueue (v :~va) []) =\\
 \hspace*{0pt} ~let {\char123}\\
 \hspace*{0pt} ~~~(y :~ys) = reverse (v :~va);\\
-\hspace*{0pt} ~{\char125}~in (Just y,~AQueue [] ys);\\
+\hspace*{0pt} ~{\char125}~in (Just y,~Example.AQueue [] ys);\\
 \hspace*{0pt}\\
-\hspace*{0pt}enqueue ::~forall a.~a -> Queue a -> Queue a;\\
-\hspace*{0pt}enqueue x (AQueue xs ys) = AQueue (x :~xs) ys;\\
+\hspace*{0pt}enqueue ::~forall a.~a -> Example.Queue a -> Example.Queue a;\\
+\hspace*{0pt}enqueue x (Example.AQueue xs ys) = Example.AQueue (x :~xs) ys;\\
 \hspace*{0pt}\\
 \hspace*{0pt}{\char125}%
 \end{isamarkuptext}%
@@ -397,41 +397,41 @@
 \noindent%
 \hspace*{0pt}module Example where {\char123}\\
 \hspace*{0pt}\\
-\hspace*{0pt}data Nat = Zero{\char95}nat | Suc Nat;\\
+\hspace*{0pt}data Nat = Zero{\char95}nat | Suc Example.Nat;\\
 \hspace*{0pt}\\
-\hspace*{0pt}plus{\char95}nat ::~Nat -> Nat -> Nat;\\
-\hspace*{0pt}plus{\char95}nat (Suc m) n = plus{\char95}nat m (Suc n);\\
-\hspace*{0pt}plus{\char95}nat Zero{\char95}nat n = n;\\
+\hspace*{0pt}plus{\char95}nat ::~Example.Nat -> Example.Nat -> Example.Nat;\\
+\hspace*{0pt}plus{\char95}nat (Example.Suc m) n = Example.plus{\char95}nat m (Example.Suc n);\\
+\hspace*{0pt}plus{\char95}nat Example.Zero{\char95}nat n = n;\\
 \hspace*{0pt}\\
 \hspace*{0pt}class Semigroup a where {\char123}\\
 \hspace*{0pt} ~mult ::~a -> a -> a;\\
 \hspace*{0pt}{\char125};\\
 \hspace*{0pt}\\
-\hspace*{0pt}class (Semigroup a) => Monoid a where {\char123}\\
+\hspace*{0pt}class (Example.Semigroup a) => Monoid a where {\char123}\\
 \hspace*{0pt} ~neutral ::~a;\\
 \hspace*{0pt}{\char125};\\
 \hspace*{0pt}\\
-\hspace*{0pt}pow ::~forall a.~(Monoid a) => Nat -> a -> a;\\
-\hspace*{0pt}pow Zero{\char95}nat a = neutral;\\
-\hspace*{0pt}pow (Suc n) a = mult a (pow n a);\\
+\hspace*{0pt}pow ::~forall a.~(Example.Monoid a) => Example.Nat -> a -> a;\\
+\hspace*{0pt}pow Example.Zero{\char95}nat a = Example.neutral;\\
+\hspace*{0pt}pow (Example.Suc n) a = Example.mult a (Example.pow n a);\\
 \hspace*{0pt}\\
-\hspace*{0pt}mult{\char95}nat ::~Nat -> Nat -> Nat;\\
-\hspace*{0pt}mult{\char95}nat Zero{\char95}nat n = Zero{\char95}nat;\\
-\hspace*{0pt}mult{\char95}nat (Suc m) n = plus{\char95}nat n (mult{\char95}nat m n);\\
+\hspace*{0pt}mult{\char95}nat ::~Example.Nat -> Example.Nat -> Example.Nat;\\
+\hspace*{0pt}mult{\char95}nat Example.Zero{\char95}nat n = Example.Zero{\char95}nat;\\
+\hspace*{0pt}mult{\char95}nat (Example.Suc m) n = Example.plus{\char95}nat n (Example.mult{\char95}nat m n);\\
 \hspace*{0pt}\\
-\hspace*{0pt}neutral{\char95}nat ::~Nat;\\
-\hspace*{0pt}neutral{\char95}nat = Suc Zero{\char95}nat;\\
+\hspace*{0pt}neutral{\char95}nat ::~Example.Nat;\\
+\hspace*{0pt}neutral{\char95}nat = Example.Suc Example.Zero{\char95}nat;\\
 \hspace*{0pt}\\
-\hspace*{0pt}instance Semigroup Nat where {\char123}\\
-\hspace*{0pt} ~mult = mult{\char95}nat;\\
+\hspace*{0pt}instance Example.Semigroup Example.Nat where {\char123}\\
+\hspace*{0pt} ~mult = Example.mult{\char95}nat;\\
 \hspace*{0pt}{\char125};\\
 \hspace*{0pt}\\
-\hspace*{0pt}instance Monoid Nat where {\char123}\\
-\hspace*{0pt} ~neutral = neutral{\char95}nat;\\
+\hspace*{0pt}instance Example.Monoid Example.Nat where {\char123}\\
+\hspace*{0pt} ~neutral = Example.neutral{\char95}nat;\\
 \hspace*{0pt}{\char125};\\
 \hspace*{0pt}\\
-\hspace*{0pt}bexp ::~Nat -> Nat;\\
-\hspace*{0pt}bexp n = pow n (Suc (Suc Zero{\char95}nat));\\
+\hspace*{0pt}bexp ::~Example.Nat -> Example.Nat;\\
+\hspace*{0pt}bexp n = Example.pow n (Example.Suc (Example.Suc Example.Zero{\char95}nat));\\
 \hspace*{0pt}\\
 \hspace*{0pt}{\char125}%
 \end{isamarkuptext}%
@@ -470,8 +470,8 @@
 \hspace*{0pt} ~val neutral :~'a monoid -> 'a\\
 \hspace*{0pt} ~val pow :~'a monoid -> nat -> 'a -> 'a\\
 \hspace*{0pt} ~val mult{\char95}nat :~nat -> nat -> nat\\
+\hspace*{0pt} ~val semigroup{\char95}nat :~nat semigroup\\
 \hspace*{0pt} ~val neutral{\char95}nat :~nat\\
-\hspace*{0pt} ~val semigroup{\char95}nat :~nat semigroup\\
 \hspace*{0pt} ~val monoid{\char95}nat :~nat monoid\\
 \hspace*{0pt} ~val bexp :~nat -> nat\\
 \hspace*{0pt}end = struct\\
@@ -494,9 +494,9 @@
 \hspace*{0pt}fun mult{\char95}nat Zero{\char95}nat n = Zero{\char95}nat\\
 \hspace*{0pt} ~| mult{\char95}nat (Suc m) n = plus{\char95}nat n (mult{\char95}nat m n);\\
 \hspace*{0pt}\\
-\hspace*{0pt}val neutral{\char95}nat :~nat = Suc Zero{\char95}nat;\\
+\hspace*{0pt}val semigroup{\char95}nat = {\char123}mult = mult{\char95}nat{\char125}~:~nat semigroup;\\
 \hspace*{0pt}\\
-\hspace*{0pt}val semigroup{\char95}nat = {\char123}mult = mult{\char95}nat{\char125}~:~nat semigroup;\\
+\hspace*{0pt}val neutral{\char95}nat :~nat = Suc Zero{\char95}nat;\\
 \hspace*{0pt}\\
 \hspace*{0pt}val monoid{\char95}nat = {\char123}semigroup{\char95}monoid = semigroup{\char95}nat,~neutral = neutral{\char95}nat{\char125}\\
 \hspace*{0pt} ~:~nat monoid;\\
--- a/doc-src/Codegen/Thy/document/Refinement.tex	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Codegen/Thy/document/Refinement.tex	Thu Sep 02 20:44:33 2010 +0200
@@ -74,10 +74,11 @@
 \begin{isamarkuptext}%
 \isatypewriter%
 \noindent%
-\hspace*{0pt}fib ::~Nat -> Nat;\\
-\hspace*{0pt}fib Zero{\char95}nat = Zero{\char95}nat;\\
-\hspace*{0pt}fib (Suc Zero{\char95}nat) = Suc Zero{\char95}nat;\\
-\hspace*{0pt}fib (Suc (Suc n)) = plus{\char95}nat (fib n) (fib (Suc n));%
+\hspace*{0pt}fib ::~Example.Nat -> Example.Nat;\\
+\hspace*{0pt}fib Example.Zero{\char95}nat = Example.Zero{\char95}nat;\\
+\hspace*{0pt}fib (Example.Suc Example.Zero{\char95}nat) = Example.Suc Example.Zero{\char95}nat;\\
+\hspace*{0pt}fib (Example.Suc (Example.Suc n)) =\\
+\hspace*{0pt} ~Example.plus{\char95}nat (Example.fib n) (Example.fib (Example.Suc n));%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -172,15 +173,17 @@
 \begin{isamarkuptext}%
 \isatypewriter%
 \noindent%
-\hspace*{0pt}fib{\char95}step ::~Nat -> (Nat,~Nat);\\
-\hspace*{0pt}fib{\char95}step (Suc n) = let {\char123}\\
-\hspace*{0pt} ~~~~~~~~~~~~~~~~~~~~(m,~q) = fib{\char95}step n;\\
-\hspace*{0pt} ~~~~~~~~~~~~~~~~~~{\char125}~in (plus{\char95}nat m q,~m);\\
-\hspace*{0pt}fib{\char95}step Zero{\char95}nat = (Suc Zero{\char95}nat,~Zero{\char95}nat);\\
+\hspace*{0pt}fib{\char95}step ::~Example.Nat -> (Example.Nat,~Example.Nat);\\
+\hspace*{0pt}fib{\char95}step (Example.Suc n) =\\
+\hspace*{0pt} ~let {\char123}\\
+\hspace*{0pt} ~~~(m,~q) = Example.fib{\char95}step n;\\
+\hspace*{0pt} ~{\char125}~in (Example.plus{\char95}nat m q,~m);\\
+\hspace*{0pt}fib{\char95}step Example.Zero{\char95}nat =\\
+\hspace*{0pt} ~(Example.Suc Example.Zero{\char95}nat,~Example.Zero{\char95}nat);\\
 \hspace*{0pt}\\
-\hspace*{0pt}fib ::~Nat -> Nat;\\
-\hspace*{0pt}fib (Suc n) = fst (fib{\char95}step n);\\
-\hspace*{0pt}fib Zero{\char95}nat = Zero{\char95}nat;%
+\hspace*{0pt}fib ::~Example.Nat -> Example.Nat;\\
+\hspace*{0pt}fib (Example.Suc n) = fst (Example.fib{\char95}step n);\\
+\hspace*{0pt}fib Example.Zero{\char95}nat = Example.Zero{\char95}nat;%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -590,28 +593,30 @@
 \hspace*{0pt}\\
 \hspace*{0pt}newtype Dlist a = Dlist [a];\\
 \hspace*{0pt}\\
-\hspace*{0pt}empty ::~forall a.~Dlist a;\\
-\hspace*{0pt}empty = Dlist [];\\
+\hspace*{0pt}empty ::~forall a.~Example.Dlist a;\\
+\hspace*{0pt}empty = Example.Dlist [];\\
 \hspace*{0pt}\\
 \hspace*{0pt}member ::~forall a.~(Eq a) => [a] -> a -> Bool;\\
 \hspace*{0pt}member [] y = False;\\
-\hspace*{0pt}member (x :~xs) y = x == y || member xs y;\\
+\hspace*{0pt}member (x :~xs) y = x == y || Example.member xs y;\\
 \hspace*{0pt}\\
 \hspace*{0pt}inserta ::~forall a.~(Eq a) => a -> [a] -> [a];\\
-\hspace*{0pt}inserta x xs = (if member xs x then xs else x :~xs);\\
+\hspace*{0pt}inserta x xs = (if Example.member xs x then xs else x :~xs);\\
 \hspace*{0pt}\\
-\hspace*{0pt}list{\char95}of{\char95}dlist ::~forall a.~Dlist a -> [a];\\
-\hspace*{0pt}list{\char95}of{\char95}dlist (Dlist x) = x;\\
+\hspace*{0pt}list{\char95}of{\char95}dlist ::~forall a.~Example.Dlist a -> [a];\\
+\hspace*{0pt}list{\char95}of{\char95}dlist (Example.Dlist x) = x;\\
 \hspace*{0pt}\\
-\hspace*{0pt}insert ::~forall a.~(Eq a) => a -> Dlist a -> Dlist a;\\
-\hspace*{0pt}insert x dxs = Dlist (inserta x (list{\char95}of{\char95}dlist dxs));\\
+\hspace*{0pt}insert ::~forall a.~(Eq a) => a -> Example.Dlist a -> Example.Dlist a;\\
+\hspace*{0pt}insert x dxs =\\
+\hspace*{0pt} ~Example.Dlist (Example.inserta x (Example.list{\char95}of{\char95}dlist dxs));\\
 \hspace*{0pt}\\
 \hspace*{0pt}remove1 ::~forall a.~(Eq a) => a -> [a] -> [a];\\
 \hspace*{0pt}remove1 x [] = [];\\
-\hspace*{0pt}remove1 x (y :~xs) = (if x == y then xs else y :~remove1 x xs);\\
+\hspace*{0pt}remove1 x (y :~xs) = (if x == y then xs else y :~Example.remove1 x xs);\\
 \hspace*{0pt}\\
-\hspace*{0pt}remove ::~forall a.~(Eq a) => a -> Dlist a -> Dlist a;\\
-\hspace*{0pt}remove x dxs = Dlist (remove1 x (list{\char95}of{\char95}dlist dxs));\\
+\hspace*{0pt}remove ::~forall a.~(Eq a) => a -> Example.Dlist a -> Example.Dlist a;\\
+\hspace*{0pt}remove x dxs =\\
+\hspace*{0pt} ~Example.Dlist (Example.remove1 x (Example.list{\char95}of{\char95}dlist dxs));\\
 \hspace*{0pt}\\
 \hspace*{0pt}{\char125}%
 \end{isamarkuptext}%
--- a/doc-src/Codegen/Thy/examples/Example.hs	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Codegen/Thy/examples/Example.hs	Thu Sep 02 20:44:33 2010 +0200
@@ -4,18 +4,18 @@
 
 data Queue a = AQueue [a] [a];
 
-empty :: forall a. Queue a;
-empty = AQueue [] [];
+empty :: forall a. Example.Queue a;
+empty = Example.AQueue [] [];
 
-dequeue :: forall a. Queue a -> (Maybe a, Queue a);
-dequeue (AQueue [] []) = (Nothing, AQueue [] []);
-dequeue (AQueue xs (y : ys)) = (Just y, AQueue xs ys);
-dequeue (AQueue (v : va) []) =
+dequeue :: forall a. Example.Queue a -> (Maybe a, Example.Queue a);
+dequeue (Example.AQueue [] []) = (Nothing, Example.AQueue [] []);
+dequeue (Example.AQueue xs (y : ys)) = (Just y, Example.AQueue xs ys);
+dequeue (Example.AQueue (v : va) []) =
   let {
     (y : ys) = reverse (v : va);
-  } in (Just y, AQueue [] ys);
+  } in (Just y, Example.AQueue [] ys);
 
-enqueue :: forall a. a -> Queue a -> Queue a;
-enqueue x (AQueue xs ys) = AQueue (x : xs) ys;
+enqueue :: forall a. a -> Example.Queue a -> Example.Queue a;
+enqueue x (Example.AQueue xs ys) = Example.AQueue (x : xs) ys;
 
 }
--- a/doc-src/IsarRef/Thy/Inner_Syntax.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/IsarRef/Thy/Inner_Syntax.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -108,7 +108,7 @@
     @{index_ML Proof.show_main_goal: "bool Unsynchronized.ref"} & default @{ML false} \\
     @{index_ML show_hyps: "bool Unsynchronized.ref"} & default @{ML false} \\
     @{index_ML show_tags: "bool Unsynchronized.ref"} & default @{ML false} \\
-    @{index_ML show_question_marks: "bool Unsynchronized.ref"} & default @{ML true} \\
+    @{index_ML show_question_marks: "bool Config.T"} & default @{ML true} \\
   \end{mldecls}
 
   These global ML variables control the detail of information that is
--- a/doc-src/IsarRef/Thy/document/Inner_Syntax.tex	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/IsarRef/Thy/document/Inner_Syntax.tex	Thu Sep 02 20:44:33 2010 +0200
@@ -130,7 +130,7 @@
     \indexdef{}{ML}{Proof.show\_main\_goal}\verb|Proof.show_main_goal: bool Unsynchronized.ref| & default \verb|false| \\
     \indexdef{}{ML}{show\_hyps}\verb|show_hyps: bool Unsynchronized.ref| & default \verb|false| \\
     \indexdef{}{ML}{show\_tags}\verb|show_tags: bool Unsynchronized.ref| & default \verb|false| \\
-    \indexdef{}{ML}{show\_question\_marks}\verb|show_question_marks: bool Unsynchronized.ref| & default \verb|true| \\
+    \indexdef{}{ML}{show\_question\_marks}\verb|show_question_marks: bool Config.T| & default \verb|true| \\
   \end{mldecls}
 
   These global ML variables control the detail of information that is
--- a/doc-src/LaTeXsugar/Sugar/Sugar.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/LaTeXsugar/Sugar/Sugar.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -132,19 +132,19 @@
 This \verb!no_vars! business can become a bit tedious.
 If you would rather never see question marks, simply put
 \begin{quote}
-@{ML "show_question_marks := false"}\verb!;!
+@{ML "show_question_marks_default := false"}\verb!;!
 \end{quote}
 at the beginning of your file \texttt{ROOT.ML}.
 The rest of this document is produced with this flag set to \texttt{false}.
 
-Hint: Setting \verb!show_question_marks! to \texttt{false} only
+Hint: Setting \verb!show_question_marks_default! to \texttt{false} only
 suppresses question marks; variables that end in digits,
 e.g. @{text"x1"}, are still printed with a trailing @{text".0"},
 e.g. @{text"x1.0"}, their internal index. This can be avoided by
 turning the last digit into a subscript: write \verb!x\<^isub>1! and
 obtain the much nicer @{text"x\<^isub>1"}. *}
 
-(*<*)ML "show_question_marks := false"(*>*)
+(*<*)declare [[show_question_marks = false]](*>*)
 
 subsection {*Qualified names*}
 
--- a/doc-src/LaTeXsugar/Sugar/document/Sugar.tex	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/LaTeXsugar/Sugar/document/Sugar.tex	Thu Sep 02 20:44:33 2010 +0200
@@ -164,7 +164,7 @@
 \begin{isamarkuptext}%
 If you print anything, especially theorems, containing
 schematic variables they are prefixed with a question mark:
-\verb!@!\verb!{thm conjI}! results in \isa{{\isasymlbrakk}P{\isacharsemicolon}\ Q{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ {\isasymand}\ Q}. Most of the time
+\verb!@!\verb!{thm conjI}! results in \isa{{\isasymlbrakk}{\isacharquery}P{\isacharsemicolon}\ {\isacharquery}Q{\isasymrbrakk}\ {\isasymLongrightarrow}\ {\isacharquery}P\ {\isasymand}\ {\isacharquery}Q}. Most of the time
 you would rather not see the question marks. There is an attribute
 \verb!no_vars! that you can attach to the theorem that turns its
 schematic into ordinary free variables: \verb!@!\verb!{thm conjI[no_vars]}!
@@ -173,12 +173,12 @@
 This \verb!no_vars! business can become a bit tedious.
 If you would rather never see question marks, simply put
 \begin{quote}
-\verb|show_question_marks := false|\verb!;!
+\verb|show_question_marks_default := false|\verb!;!
 \end{quote}
 at the beginning of your file \texttt{ROOT.ML}.
 The rest of this document is produced with this flag set to \texttt{false}.
 
-Hint: Setting \verb!show_question_marks! to \texttt{false} only
+Hint: Setting \verb!show_question_marks_default! to \texttt{false} only
 suppresses question marks; variables that end in digits,
 e.g. \isa{x{\isadigit{1}}}, are still printed with a trailing \isa{{\isachardot}{\isadigit{0}}},
 e.g. \isa{x{\isadigit{1}}{\isachardot}{\isadigit{0}}}, their internal index. This can be avoided by
@@ -187,19 +187,6 @@
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
-\isadelimML
-%
-\endisadelimML
-%
-\isatagML
-%
-\endisatagML
-{\isafoldML}%
-%
-\isadelimML
-%
-\endisadelimML
-%
 \isamarkupsubsection{Qualified names%
 }
 \isamarkuptrue%
--- a/doc-src/Nitpick/nitpick.tex	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Nitpick/nitpick.tex	Thu Sep 02 20:44:33 2010 +0200
@@ -1921,7 +1921,7 @@
 \end{enum}
 
 Default values are indicated in square brackets. Boolean options have a negated
-counterpart (e.g., \textit{blocking} vs.\ \textit{no\_blocking}). When setting
+counterpart (e.g., \textit{blocking} vs.\ \textit{non\_blocking}). When setting
 Boolean options, ``= \textit{true}'' may be omitted.
 
 \subsection{Mode of Operation}
--- a/doc-src/Sledgehammer/sledgehammer.tex	Thu Sep 02 20:29:12 2010 +0200
+++ b/doc-src/Sledgehammer/sledgehammer.tex	Thu Sep 02 20:44:33 2010 +0200
@@ -333,8 +333,9 @@
 \def\opargboolorsmart#1#2#3{\flushitem{\textit{#1} \qtybf{#2} $\bigl[$= \qtybf{bool\_or\_smart}$\bigr]$\hfill (neg.: \textit{#3})}\nopagebreak\\[\parskip]}
 
 Sledgehammer's options are categorized as follows:\ mode of operation
-(\S\ref{mode-of-operation}), problem encoding (\S\ref{problem-encoding}), output
-format (\S\ref{output-format}), and timeouts (\S\ref{timeouts}).
+(\S\ref{mode-of-operation}), problem encoding (\S\ref{problem-encoding}),
+relevance filter (\S\ref{relevance-filter}), output format
+(\S\ref{output-format}), and authentication (\S\ref{authentication}).
 
 The descriptions below refer to the following syntactic quantities:
 
@@ -349,19 +350,13 @@
 \end{enum}
 
 Default values are indicated in square brackets. Boolean options have a negated
-counterpart (e.g., \textit{debug} vs.\ \textit{no\_debug}). When setting
+counterpart (e.g., \textit{blocking} vs.\ \textit{non\_blocking}). When setting
 Boolean options, ``= \textit{true}'' may be omitted.
 
 \subsection{Mode of Operation}
 \label{mode-of-operation}
 
 \begin{enum}
-%\optrue{blocking}{non\_blocking}
-%Specifies whether the \textbf{sledgehammer} command should operate synchronously.
-%The asynchronous (non-blocking) mode lets the user start proving the putative
-%theorem while Sledgehammer looks for a counterexample, but it can also be more
-%confusing. For technical reasons, automatic runs currently always block.
-
 \opnodefault{atps}{string}
 Specifies the ATPs (automated theorem provers) to use as a space-separated list
 (e.g., ``\textit{e}~\textit{spass}''). The following ATPs are supported:
@@ -414,6 +409,18 @@
 \opnodefault{atp}{string}
 Alias for \textit{atps}.
 
+\opdefault{timeout}{time}{$\mathbf{60}$ s}
+Specifies the maximum amount of time that the ATPs should spend searching for a
+proof. For historical reasons, the default value of this option can be
+overridden using the option ``Sledgehammer: Time Limit'' from the ``Isabelle''
+menu in Proof General.
+
+\opfalse{blocking}{non\_blocking}
+Specifies whether the \textbf{sledgehammer} command should operate
+synchronously. The asynchronous (non-blocking) mode lets the user start proving
+the putative theorem manually while Sledgehammer looks for a proof, but it can
+also be more confusing.
+
 \opfalse{overlord}{no\_overlord}
 Specifies whether Sledgehammer should put its temporary files in
 \texttt{\$ISA\-BELLE\_\allowbreak HOME\_\allowbreak USER}, which is useful for
@@ -460,11 +467,11 @@
 filter. If the option is set to \textit{smart}, it is set to a value that was
 empirically found to be appropriate for the ATP. A typical value would be 300.
 
-\opsmartx{theory\_relevant}{theory\_irrelevant}
-Specifies whether the theory from which a fact comes should be taken into
-consideration by the relevance filter. If the option is set to \textit{smart},
-it is taken to be \textit{true} for SPASS and \textit{false} for the other ATPs;
-empirical results suggest that these are the best settings.
+%\opsmartx{theory\_relevant}{theory\_irrelevant}
+%Specifies whether the theory from which a fact comes should be taken into
+%consideration by the relevance filter. If the option is set to \textit{smart},
+%it is taken to be \textit{true} for SPASS and \textit{false} for the other ATPs;
+%empirical results suggest that these are the best settings.
 
 %\opfalse{defs\_relevant}{defs\_irrelevant}
 %Specifies whether the definition of constants occurring in the formula to prove
@@ -501,15 +508,25 @@
 
 \end{enum}
 
-\subsection{Timeouts}
-\label{timeouts}
+\subsection{Authentication}
+\label{authentication}
+
+\begin{enum}
+\opnodefault{expect}{string}
+Specifies the expected outcome, which must be one of the following:
 
 \begin{enum}
-\opdefault{timeout}{time}{$\mathbf{60}$ s}
-Specifies the maximum amount of time that the ATPs should spend looking for a
-proof. For historical reasons, the default value of this option can be
-overridden using the option ``Sledgehammer: Time Limit'' from the ``Isabelle''
-menu in Proof General.
+\item[$\bullet$] \textbf{\textit{some}:} Sledgehammer found a (potentially unsound) proof.
+\item[$\bullet$] \textbf{\textit{none}:} Sledgehammer found no proof.
+\item[$\bullet$] \textbf{\textit{unknown}:} Sledgehammer encountered some problem.
+\end{enum}
+
+Sledgehammer emits an error (if \textit{blocking} is enabled) or a warning
+(otherwise) if the actual outcome differs from the expected outcome. This option
+is useful for regression testing.
+
+\nopagebreak
+{\small See also \textit{blocking} (\S\ref{mode-of-operation}).}
 \end{enum}
 
 \let\em=\sl
--- a/src/HOL/Boogie/Tools/boogie_vcs.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Boogie/Tools/boogie_vcs.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -348,6 +348,6 @@
   let
     fun rewr (_, (t, _)) = vc_of_term (f thy t)
       |> (fn vc => (vc, (t, thm_of thy vc)))
-  in VCs.map (Option.map (fn (vcs, _) => (Symtab.map rewr vcs, g))) thy end
+  in VCs.map (Option.map (fn (vcs, _) => (Symtab.map (K rewr) vcs, g))) thy end
 
 end
--- a/src/HOL/Fun.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Fun.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -117,31 +117,27 @@
 no_notation fcomp (infixl "\<circ>>" 60)
 
 
-subsection {* Injectivity and Surjectivity *}
+subsection {* Injectivity, Surjectivity and Bijectivity *}
+
+definition inj_on :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a set \<Rightarrow> bool" where -- "injective"
+  "inj_on f A \<longleftrightarrow> (\<forall>x\<in>A. \<forall>y\<in>A. f x = f y \<longrightarrow> x = y)"
 
-definition
-  inj_on :: "['a => 'b, 'a set] => bool" where
-  -- "injective"
-  "inj_on f A == ! x:A. ! y:A. f(x)=f(y) --> x=y"
+definition surj_on :: "('a \<Rightarrow> 'b) \<Rightarrow> 'b set \<Rightarrow> bool" where -- "surjective"
+  "surj_on f B \<longleftrightarrow> B \<subseteq> range f"
+
+definition bij_betw :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a set \<Rightarrow> 'b set \<Rightarrow> bool" where -- "bijective"
+  "bij_betw f A B \<longleftrightarrow> inj_on f A \<and> f ` A = B"
 
 text{*A common special case: functions injective over the entire domain type.*}
 
 abbreviation
-  "inj f == inj_on f UNIV"
-
-definition
-  bij_betw :: "('a => 'b) => 'a set => 'b set => bool" where -- "bijective"
-  "bij_betw f A B \<longleftrightarrow> inj_on f A & f ` A = B"
+  "inj f \<equiv> inj_on f UNIV"
 
-definition
-  surj :: "('a => 'b) => bool" where
-  -- "surjective"
-  "surj f == ! y. ? x. y=f(x)"
+abbreviation
+  "surj f \<equiv> surj_on f UNIV"
 
-definition
-  bij :: "('a => 'b) => bool" where
-  -- "bijective"
-  "bij f == inj f & surj f"
+abbreviation
+  "bij f \<equiv> bij_betw f UNIV UNIV"
 
 lemma injI:
   assumes "\<And>x y. f x = f y \<Longrightarrow> x = y"
@@ -173,16 +169,16 @@
 by (simp add: inj_on_eq_iff)
 
 lemma inj_on_id[simp]: "inj_on id A"
-  by (simp add: inj_on_def) 
+  by (simp add: inj_on_def)
 
 lemma inj_on_id2[simp]: "inj_on (%x. x) A"
-by (simp add: inj_on_def) 
+by (simp add: inj_on_def)
 
-lemma surj_id[simp]: "surj id"
-by (simp add: surj_def) 
+lemma surj_id[simp]: "surj_on id A"
+by (simp add: surj_on_def)
 
-lemma bij_id[simp]: "bij id"
-by (simp add: bij_def)
+lemma bij_id[simp]: "bij_betw id A A"
+by (simp add: bij_betw_def)
 
 lemma inj_onI:
     "(!! x y. [|  x:A;  y:A;  f(x) = f(y) |] ==> x=y) ==> inj_on f A"
@@ -242,19 +238,26 @@
 apply (blast)
 done
 
-lemma surjI: "(!! x. g(f x) = x) ==> surj g"
-apply (simp add: surj_def)
-apply (blast intro: sym)
-done
+lemma surj_onI: "(\<And>x. x \<in> B \<Longrightarrow> g (f x) = x) \<Longrightarrow> surj_on g B"
+  by (simp add: surj_on_def) (blast intro: sym)
+
+lemma surj_onD: "surj_on f B \<Longrightarrow> y \<in> B \<Longrightarrow> \<exists>x. y = f x"
+  by (auto simp: surj_on_def)
+
+lemma surj_on_range_iff: "surj_on f B \<longleftrightarrow> (\<exists>A. f ` A = B)"
+  unfolding surj_on_def by (auto intro!: exI[of _ "f -` B"])
 
-lemma surj_range: "surj f ==> range f = UNIV"
-by (auto simp add: surj_def)
+lemma surj_def: "surj f \<longleftrightarrow> (\<forall>y. \<exists>x. y = f x)"
+  by (simp add: surj_on_def subset_eq image_iff)
+
+lemma surjI: "(\<And> x. g (f x) = x) \<Longrightarrow> surj g"
+  by (blast intro: surj_onI)
 
-lemma surjD: "surj f ==> EX x. y = f x"
-by (simp add: surj_def)
+lemma surjD: "surj f \<Longrightarrow> \<exists>x. y = f x"
+  by (simp add: surj_def)
 
-lemma surjE: "surj f ==> (!!x. y = f x ==> C) ==> C"
-by (simp add: surj_def, blast)
+lemma surjE: "surj f \<Longrightarrow> (\<And>x. y = f x \<Longrightarrow> C) \<Longrightarrow> C"
+  by (simp add: surj_def, blast)
 
 lemma comp_surj: "[| surj f;  surj g |] ==> surj (g o f)"
 apply (simp add: comp_def surj_def, clarify)
@@ -262,6 +265,18 @@
 apply (drule_tac x = x in spec, blast)
 done
 
+lemma surj_range: "surj f \<Longrightarrow> range f = UNIV"
+  by (auto simp add: surj_on_def)
+
+lemma surj_range_iff: "surj f \<longleftrightarrow> range f = UNIV"
+  unfolding surj_on_def by auto
+
+lemma bij_betw_imp_surj: "bij_betw f A UNIV \<Longrightarrow> surj f"
+  unfolding bij_betw_def surj_range_iff by auto
+
+lemma bij_def: "bij f \<longleftrightarrow> inj f \<and> surj f"
+  unfolding surj_range_iff bij_betw_def ..
+
 lemma bijI: "[| inj f; surj f |] ==> bij f"
 by (simp add: bij_def)
 
@@ -274,6 +289,9 @@
 lemma bij_betw_imp_inj_on: "bij_betw f A B \<Longrightarrow> inj_on f A"
 by (simp add: bij_betw_def)
 
+lemma bij_betw_imp_surj_on: "bij_betw f A B \<Longrightarrow> surj_on f B"
+by (auto simp: bij_betw_def surj_on_range_iff)
+
 lemma bij_comp: "bij f \<Longrightarrow> bij g \<Longrightarrow> bij (g o f)"
 by(fastsimp intro: comp_inj_on comp_surj simp: bij_def surj_range)
 
@@ -312,6 +330,11 @@
   ultimately show ?thesis by(auto simp:bij_betw_def)
 qed
 
+lemma bij_betw_combine:
+  assumes "bij_betw f A B" "bij_betw f C D" "B \<inter> D = {}"
+  shows "bij_betw f (A \<union> C) (B \<union> D)"
+  using assms unfolding bij_betw_def inj_on_Un image_Un by auto
+
 lemma surj_image_vimage_eq: "surj f ==> f ` (f -` A) = A"
 by (simp add: surj_range)
 
@@ -497,44 +520,46 @@
 lemma comp_swap: "f \<circ> swap a b g = swap a b (f \<circ> g)"
 by (rule ext, simp add: fun_upd_def swap_def)
 
+lemma swap_image_eq [simp]:
+  assumes "a \<in> A" "b \<in> A" shows "swap a b f ` A = f ` A"
+proof -
+  have subset: "\<And>f. swap a b f ` A \<subseteq> f ` A"
+    using assms by (auto simp: image_iff swap_def)
+  then have "swap a b (swap a b f) ` A \<subseteq> (swap a b f) ` A" .
+  with subset[of f] show ?thesis by auto
+qed
+
 lemma inj_on_imp_inj_on_swap:
-  "[|inj_on f A; a \<in> A; b \<in> A|] ==> inj_on (swap a b f) A"
-by (simp add: inj_on_def swap_def, blast)
+  "\<lbrakk>inj_on f A; a \<in> A; b \<in> A\<rbrakk> \<Longrightarrow> inj_on (swap a b f) A"
+  by (simp add: inj_on_def swap_def, blast)
 
 lemma inj_on_swap_iff [simp]:
-  assumes A: "a \<in> A" "b \<in> A" shows "inj_on (swap a b f) A = inj_on f A"
-proof 
+  assumes A: "a \<in> A" "b \<in> A" shows "inj_on (swap a b f) A \<longleftrightarrow> inj_on f A"
+proof
   assume "inj_on (swap a b f) A"
-  with A have "inj_on (swap a b (swap a b f)) A" 
-    by (iprover intro: inj_on_imp_inj_on_swap) 
-  thus "inj_on f A" by simp 
+  with A have "inj_on (swap a b (swap a b f)) A"
+    by (iprover intro: inj_on_imp_inj_on_swap)
+  thus "inj_on f A" by simp
 next
   assume "inj_on f A"
   with A show "inj_on (swap a b f) A" by (iprover intro: inj_on_imp_inj_on_swap)
 qed
 
-lemma surj_imp_surj_swap: "surj f ==> surj (swap a b f)"
-apply (simp add: surj_def swap_def, clarify)
-apply (case_tac "y = f b", blast)
-apply (case_tac "y = f a", auto)
-done
+lemma surj_imp_surj_swap: "surj f \<Longrightarrow> surj (swap a b f)"
+  unfolding surj_range_iff by simp
+
+lemma surj_swap_iff [simp]: "surj (swap a b f) \<longleftrightarrow> surj f"
+  unfolding surj_range_iff by simp
 
-lemma surj_swap_iff [simp]: "surj (swap a b f) = surj f"
-proof 
-  assume "surj (swap a b f)"
-  hence "surj (swap a b (swap a b f))" by (rule surj_imp_surj_swap) 
-  thus "surj f" by simp 
-next
-  assume "surj f"
-  thus "surj (swap a b f)" by (rule surj_imp_surj_swap) 
-qed
+lemma bij_betw_swap_iff [simp]:
+  "\<lbrakk> x \<in> A; y \<in> A \<rbrakk> \<Longrightarrow> bij_betw (swap x y f) A B \<longleftrightarrow> bij_betw f A B"
+  by (auto simp: bij_betw_def)
 
-lemma bij_swap_iff: "bij (swap a b f) = bij f"
-by (simp add: bij_def)
+lemma bij_swap_iff [simp]: "bij (swap a b f) \<longleftrightarrow> bij f"
+  by simp
 
 hide_const (open) swap
 
-
 subsection {* Inversion of injective functions *}
 
 definition the_inv_into :: "'a set => ('a => 'b) => ('b => 'a)" where
--- a/src/HOL/HOL.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/HOL.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -30,7 +30,9 @@
   "~~/src/Tools/induct.ML"
   ("~~/src/Tools/induct_tacs.ML")
   ("Tools/recfun_codegen.ML")
+  "Tools/async_manager.ML"
   "Tools/try.ML"
+  ("Tools/cnf_funcs.ML")
 begin
 
 setup {* Intuitionistic.method_setup @{binding iprover} *}
@@ -1644,7 +1646,6 @@
   "(\<not> \<not>(P)) = P"
 by blast+
 
-
 subsection {* Basic ML bindings *}
 
 ML {*
@@ -1698,6 +1699,7 @@
 val trans = @{thm trans}
 *}
 
+use "Tools/cnf_funcs.ML"
 
 subsection {* Code generator setup *}
 
@@ -1930,6 +1932,10 @@
 code_reserved Scala
   Boolean
 
+code_modulename SML Pure HOL
+code_modulename OCaml Pure HOL
+code_modulename Haskell Pure HOL
+
 text {* using built-in Haskell equality *}
 
 code_class equal
@@ -1995,7 +2001,8 @@
   "solve goal by evaluation"
 
 method_setup normalization = {*
-  Scan.succeed (K (SIMPLE_METHOD' (CONVERSION Nbe.dynamic_eval_conv THEN' (fn k => TRY (rtac TrueI k)))))
+  Scan.succeed (K (SIMPLE_METHOD'
+    (CHANGED_PROP o (CONVERSION Nbe.dynamic_eval_conv THEN' (fn k => TRY (rtac TrueI k))))))
 *} "solve goal by normalization"
 
 
--- a/src/HOL/Hilbert_Choice.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Hilbert_Choice.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -7,7 +7,8 @@
 
 theory Hilbert_Choice
 imports Nat Wellfounded Plain
-uses ("Tools/meson.ML") ("Tools/choice_specification.ML")
+uses ("Tools/meson.ML")
+     ("Tools/choice_specification.ML")
 begin
 
 subsection {* Hilbert's epsilon *}
--- a/src/HOL/Imperative_HOL/Heap_Monad.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Imperative_HOL/Heap_Monad.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -578,7 +578,7 @@
             (map o pairself) imp_monad_bind pats),
               imp_monad_bind t0);
 
-  in (Graph.map_nodes o map_terms_stmt) imp_monad_bind end;
+  in (Graph.map o K o map_terms_stmt) imp_monad_bind end;
 
 in
 
--- a/src/HOL/IsaMakefile	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/IsaMakefile	Thu Sep 02 20:44:33 2010 +0200
@@ -267,7 +267,7 @@
   $(SRC)/Provers/Arith/combine_numerals.ML \
   $(SRC)/Provers/Arith/extract_common_term.ML \
   $(SRC)/Tools/Metis/metis.ML \
-  Tools/ATP/async_manager.ML \
+  Tools/async_manager.ML \
   Tools/ATP/atp_problem.ML \
   Tools/ATP/atp_systems.ML \
   Tools/choice_specification.ML \
@@ -320,10 +320,10 @@
   Tools/Sledgehammer/metis_clauses.ML \
   Tools/Sledgehammer/metis_tactics.ML \
   Tools/Sledgehammer/sledgehammer.ML \
-  Tools/Sledgehammer/sledgehammer_fact_filter.ML \
-  Tools/Sledgehammer/sledgehammer_fact_minimize.ML \
+  Tools/Sledgehammer/sledgehammer_filter.ML \
+  Tools/Sledgehammer/sledgehammer_minimize.ML \
   Tools/Sledgehammer/sledgehammer_isar.ML \
-  Tools/Sledgehammer/sledgehammer_proof_reconstruct.ML \
+  Tools/Sledgehammer/sledgehammer_reconstruct.ML \
   Tools/Sledgehammer/sledgehammer_translate.ML \
   Tools/Sledgehammer/sledgehammer_util.ML \
   Tools/SMT/cvc3_solver.ML \
--- a/src/HOL/Library/Permutation.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Library/Permutation.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -183,4 +183,55 @@
 lemma perm_remdups_iff_eq_set: "remdups x <~~> remdups y = (set x = set y)"
   by (metis List.set_remdups perm_set_eq eq_set_perm_remdups)
 
+lemma permutation_Ex_bij:
+  assumes "xs <~~> ys"
+  shows "\<exists>f. bij_betw f {..<length xs} {..<length ys} \<and> (\<forall>i<length xs. xs ! i = ys ! (f i))"
+using assms proof induct
+  case Nil then show ?case unfolding bij_betw_def by simp
+next
+  case (swap y x l)
+  show ?case
+  proof (intro exI[of _ "Fun.swap 0 1 id"] conjI allI impI)
+    show "bij_betw (Fun.swap 0 1 id) {..<length (y # x # l)} {..<length (x # y # l)}"
+      by (auto simp: bij_betw_def bij_betw_swap_iff)
+    fix i assume "i < length(y#x#l)"
+    show "(y # x # l) ! i = (x # y # l) ! (Fun.swap 0 1 id) i"
+      by (cases i) (auto simp: Fun.swap_def gr0_conv_Suc)
+  qed
+next
+  case (Cons xs ys z)
+  then obtain f where bij: "bij_betw f {..<length xs} {..<length ys}" and
+    perm: "\<forall>i<length xs. xs ! i = ys ! (f i)" by blast
+  let "?f i" = "case i of Suc n \<Rightarrow> Suc (f n) | 0 \<Rightarrow> 0"
+  show ?case
+  proof (intro exI[of _ ?f] allI conjI impI)
+    have *: "{..<length (z#xs)} = {0} \<union> Suc ` {..<length xs}"
+            "{..<length (z#ys)} = {0} \<union> Suc ` {..<length ys}"
+      by (simp_all add: lessThan_Suc_eq_insert_0)
+    show "bij_betw ?f {..<length (z#xs)} {..<length (z#ys)}" unfolding *
+    proof (rule bij_betw_combine)
+      show "bij_betw ?f (Suc ` {..<length xs}) (Suc ` {..<length ys})"
+        using bij unfolding bij_betw_def
+        by (auto intro!: inj_onI imageI dest: inj_onD simp: image_compose[symmetric] comp_def)
+    qed (auto simp: bij_betw_def)
+    fix i assume "i < length (z#xs)"
+    then show "(z # xs) ! i = (z # ys) ! (?f i)"
+      using perm by (cases i) auto
+  qed
+next
+  case (trans xs ys zs)
+  then obtain f g where
+    bij: "bij_betw f {..<length xs} {..<length ys}" "bij_betw g {..<length ys} {..<length zs}" and
+    perm: "\<forall>i<length xs. xs ! i = ys ! (f i)" "\<forall>i<length ys. ys ! i = zs ! (g i)" by blast
+  show ?case
+  proof (intro exI[of _ "g\<circ>f"] conjI allI impI)
+    show "bij_betw (g \<circ> f) {..<length xs} {..<length zs}"
+      using bij by (rule bij_betw_trans)
+    fix i assume "i < length xs"
+    with bij have "f i < length ys" unfolding bij_betw_def by force
+    with `i < length xs` show "xs ! i = zs ! (g \<circ> f) i"
+      using trans(1,3)[THEN perm_length] perm by force
+  qed
+qed
+
 end
--- a/src/HOL/Library/Sum_Of_Squares/sum_of_squares.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Library/Sum_Of_Squares/sum_of_squares.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -124,10 +124,10 @@
 fun vector_cmul c (v:vector) =
  let val n = dim v
  in if c =/ rat_0 then vector_0 n
-    else (n,FuncUtil.Intfunc.map (fn x => c */ x) (snd v))
+    else (n,FuncUtil.Intfunc.map (fn _ => fn x => c */ x) (snd v))
  end;
 
-fun vector_neg (v:vector) = (fst v,FuncUtil.Intfunc.map Rat.neg (snd v)) :vector;
+fun vector_neg (v:vector) = (fst v,FuncUtil.Intfunc.map (K Rat.neg) (snd v)) :vector;
 
 fun vector_add (v1:vector) (v2:vector) =
  let val m = dim v1
@@ -167,11 +167,11 @@
 fun matrix_cmul c (m:matrix) =
  let val (i,j) = dimensions m
  in if c =/ rat_0 then matrix_0 (i,j)
-    else ((i,j),FuncUtil.Intpairfunc.map (fn x => c */ x) (snd m))
+    else ((i,j),FuncUtil.Intpairfunc.map (fn _ => fn x => c */ x) (snd m))
  end;
 
 fun matrix_neg (m:matrix) =
-  (dimensions m, FuncUtil.Intpairfunc.map Rat.neg (snd m)) :matrix;
+  (dimensions m, FuncUtil.Intpairfunc.map (K Rat.neg) (snd m)) :matrix;
 
 fun matrix_add (m1:matrix) (m2:matrix) =
  let val d1 = dimensions m1
@@ -229,14 +229,14 @@
 
 fun monomial_pow m k =
   if k = 0 then monomial_1
-  else FuncUtil.Ctermfunc.map (fn x => k * x) m;
+  else FuncUtil.Ctermfunc.map (fn _ => fn x => k * x) m;
 
 fun monomial_divides m1 m2 =
   FuncUtil.Ctermfunc.fold (fn (x, k) => fn a => FuncUtil.Ctermfunc.tryapplyd m2 x 0 >= k andalso a) m1 true;;
 
 fun monomial_div m1 m2 =
  let val m = FuncUtil.Ctermfunc.combine Integer.add
-   (fn x => x = 0) m1 (FuncUtil.Ctermfunc.map (fn x => ~ x) m2)
+   (fn x => x = 0) m1 (FuncUtil.Ctermfunc.map (fn _ => fn x => ~ x) m2)
  in if FuncUtil.Ctermfunc.fold (fn (x, k) => fn a => k >= 0 andalso a) m true then m
   else error "monomial_div: non-divisible"
  end;
@@ -270,9 +270,9 @@
 
 fun poly_cmul c p =
   if c =/ rat_0 then poly_0
-  else FuncUtil.Monomialfunc.map (fn x => c */ x) p;
+  else FuncUtil.Monomialfunc.map (fn _ => fn x => c */ x) p;
 
-fun poly_neg p = FuncUtil.Monomialfunc.map Rat.neg p;;
+fun poly_neg p = FuncUtil.Monomialfunc.map (K Rat.neg) p;;
 
 fun poly_add p1 p2 =
   FuncUtil.Monomialfunc.combine (curry op +/) (fn x => x =/ rat_0) p1 p2;
@@ -282,7 +282,7 @@
 fun poly_cmmul (c,m) p =
  if c =/ rat_0 then poly_0
  else if FuncUtil.Ctermfunc.is_empty m
-      then FuncUtil.Monomialfunc.map (fn d => c */ d) p
+      then FuncUtil.Monomialfunc.map (fn _ => fn d => c */ d) p
       else FuncUtil.Monomialfunc.fold (fn (m', d) => fn a => (FuncUtil.Monomialfunc.update (monomial_mul m m', c */ d) a)) p poly_0;
 
 fun poly_mul p1 p2 =
@@ -596,13 +596,13 @@
  let
   val cd1 = fold_rev (common_denominator FuncUtil.Intpairfunc.fold) mats (rat_1)
   val cd2 = common_denominator FuncUtil.Intfunc.fold (snd obj)  (rat_1)
-  val mats' = map (FuncUtil.Intpairfunc.map (fn x => cd1 */ x)) mats
+  val mats' = map (FuncUtil.Intpairfunc.map (fn _ => fn x => cd1 */ x)) mats
   val obj' = vector_cmul cd2 obj
   val max1 = fold_rev (maximal_element FuncUtil.Intpairfunc.fold) mats' (rat_0)
   val max2 = maximal_element FuncUtil.Intfunc.fold (snd obj') (rat_0)
   val scal1 = pow2 (20 - trunc(Math.ln (float_of_rat max1) / Math.ln 2.0))
   val scal2 = pow2 (20 - trunc(Math.ln (float_of_rat max2) / Math.ln 2.0))
-  val mats'' = map (FuncUtil.Intpairfunc.map (fn x => x */ scal1)) mats'
+  val mats'' = map (FuncUtil.Intpairfunc.map (fn _ => fn x => x */ scal1)) mats'
   val obj'' = vector_cmul scal2 obj'
  in solver obj'' mats''
   end
@@ -629,13 +629,13 @@
  let
   val cd1 = fold_rev (common_denominator Inttriplefunc.fold) mats (rat_1)
   val cd2 = common_denominator FuncUtil.Intfunc.fold (snd obj)  (rat_1)
-  val mats' = map (Inttriplefunc.map (fn x => cd1 */ x)) mats
+  val mats' = map (Inttriplefunc.map (fn _ => fn x => cd1 */ x)) mats
   val obj' = vector_cmul cd2 obj
   val max1 = fold_rev (maximal_element Inttriplefunc.fold) mats' (rat_0)
   val max2 = maximal_element FuncUtil.Intfunc.fold (snd obj') (rat_0)
   val scal1 = pow2 (20 - int_of_float(Math.ln (float_of_rat max1) / Math.ln 2.0))
   val scal2 = pow2 (20 - int_of_float(Math.ln (float_of_rat max2) / Math.ln 2.0))
-  val mats'' = map (Inttriplefunc.map (fn x => x */ scal1)) mats'
+  val mats'' = map (Inttriplefunc.map (fn _ => fn x => x */ scal1)) mats'
   val obj'' = vector_cmul scal2 obj'
  in solver obj'' mats''
   end
@@ -655,7 +655,7 @@
 (* Stuff for "equations" ((int*int*int)->num functions).                         *)
 
 fun tri_equation_cmul c eq =
-  if c =/ rat_0 then Inttriplefunc.empty else Inttriplefunc.map (fn d => c */ d) eq;
+  if c =/ rat_0 then Inttriplefunc.empty else Inttriplefunc.map (fn _ => fn d => c */ d) eq;
 
 fun tri_equation_add eq1 eq2 = Inttriplefunc.combine (curry op +/) (fn x => x =/ rat_0) eq1 eq2;
 
@@ -686,7 +686,7 @@
      in if b =/ rat_0 then e else
         tri_equation_add e (tri_equation_cmul (Rat.neg b // a) eq)
      end
-   in eliminate vs (Inttriplefunc.update (v,eq') (Inttriplefunc.map elim dun)) (map elim oeqs)
+   in eliminate vs (Inttriplefunc.update (v,eq') (Inttriplefunc.map (K elim) dun)) (map elim oeqs)
    end)
   handle Failure _ => eliminate vs dun eqs)
 in
@@ -724,7 +724,7 @@
          in if b =/ rat_0 then e
             else tri_equation_add e (tri_equation_cmul (Rat.neg b // a) eq)
          end
-    in eliminate (Inttriplefunc.update(v, eq') (Inttriplefunc.map elim dun))
+    in eliminate (Inttriplefunc.update(v, eq') (Inttriplefunc.map (K elim) dun))
                  (map elim oeqs)
     end
 in fn eqs =>
@@ -744,7 +744,7 @@
             (Inttriplefunc.onefunc(one, Rat.rat_of_int ~1))
   val ass =
     Inttriplefunc.combine (curry op +/) (K false)
-    (Inttriplefunc.map (tri_equation_eval vfn) assigs) vfn
+    (Inttriplefunc.map (K (tri_equation_eval vfn)) assigs) vfn
  in if forall (fn e => tri_equation_eval ass e =/ rat_0) eqs
     then Inttriplefunc.delete_safe one ass else raise Sanity
  end;
@@ -762,7 +762,7 @@
 (* Usual operations on equation-parametrized poly.                           *)
 
 fun tri_epoly_cmul c l =
-  if c =/ rat_0 then Inttriplefunc.empty else Inttriplefunc.map (tri_equation_cmul c) l;;
+  if c =/ rat_0 then Inttriplefunc.empty else Inttriplefunc.map (K (tri_equation_cmul c)) l;;
 
 val tri_epoly_neg = tri_epoly_cmul (Rat.rat_of_int ~1);
 
@@ -773,7 +773,7 @@
 (* Stuff for "equations" ((int*int)->num functions).                         *)
 
 fun pi_equation_cmul c eq =
-  if c =/ rat_0 then Inttriplefunc.empty else Inttriplefunc.map (fn d => c */ d) eq;
+  if c =/ rat_0 then Inttriplefunc.empty else Inttriplefunc.map (fn _ => fn d => c */ d) eq;
 
 fun pi_equation_add eq1 eq2 = Inttriplefunc.combine (curry op +/) (fn x => x =/ rat_0) eq1 eq2;
 
@@ -804,7 +804,7 @@
      in if b =/ rat_0 then e else
         pi_equation_add e (pi_equation_cmul (Rat.neg b // a) eq)
      end
-   in eliminate vs (Inttriplefunc.update (v,eq') (Inttriplefunc.map elim dun)) (map elim oeqs)
+   in eliminate vs (Inttriplefunc.update (v,eq') (Inttriplefunc.map (K elim) dun)) (map elim oeqs)
    end
   handle Failure _ => eliminate vs dun eqs
 in
@@ -842,7 +842,7 @@
          in if b =/ rat_0 then e
             else pi_equation_add e (pi_equation_cmul (Rat.neg b // a) eq)
          end
-    in eliminate (Inttriplefunc.update(v, eq') (Inttriplefunc.map elim dun))
+    in eliminate (Inttriplefunc.update(v, eq') (Inttriplefunc.map (K elim) dun))
                  (map elim oeqs)
     end
 in fn eqs =>
@@ -862,7 +862,7 @@
             (Inttriplefunc.onefunc(one, Rat.rat_of_int ~1))
   val ass =
     Inttriplefunc.combine (curry op +/) (K false)
-    (Inttriplefunc.map (pi_equation_eval vfn) assigs) vfn
+    (Inttriplefunc.map (K (pi_equation_eval vfn)) assigs) vfn
  in if forall (fn e => pi_equation_eval ass e =/ rat_0) eqs
     then Inttriplefunc.delete_safe one ass else raise Sanity
  end;
@@ -880,7 +880,7 @@
 (* Usual operations on equation-parametrized poly.                           *)
 
 fun pi_epoly_cmul c l =
-  if c =/ rat_0 then Inttriplefunc.empty else Inttriplefunc.map (pi_equation_cmul c) l;;
+  if c =/ rat_0 then Inttriplefunc.empty else Inttriplefunc.map (K (pi_equation_cmul c)) l;;
 
 val pi_epoly_neg = pi_epoly_cmul (Rat.rat_of_int ~1);
 
@@ -1035,7 +1035,7 @@
 val bmatrix_add = Inttriplefunc.combine (curry op +/) (fn x => x =/ rat_0);
 fun bmatrix_cmul c bm =
   if c =/ rat_0 then Inttriplefunc.empty
-  else Inttriplefunc.map (fn x => c */ x) bm;
+  else Inttriplefunc.map (fn _ => fn x => c */ x) bm;
 
 val bmatrix_neg = bmatrix_cmul (Rat.rat_of_int ~1);
 fun bmatrix_sub m1 m2 = bmatrix_add m1 (bmatrix_neg m2);;
--- a/src/HOL/Library/positivstellensatz.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Library/positivstellensatz.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -549,7 +549,7 @@
 (* A linear arithmetic prover *)
 local
   val linear_add = FuncUtil.Ctermfunc.combine (curry op +/) (fn z => z =/ Rat.zero)
-  fun linear_cmul c = FuncUtil.Ctermfunc.map (fn x => c */ x)
+  fun linear_cmul c = FuncUtil.Ctermfunc.map (fn _ => fn x => c */ x)
   val one_tm = @{cterm "1::real"}
   fun contradictory p (e,_) = ((FuncUtil.Ctermfunc.is_empty e) andalso not(p Rat.zero)) orelse
      ((eq_set (op aconvc) (FuncUtil.Ctermfunc.dom e, [one_tm])) andalso
--- a/src/HOL/List.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/List.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -3076,6 +3076,10 @@
   "\<not> P x \<Longrightarrow> remove1 x (filter P xs) = filter P xs"
 by(induct xs) auto
 
+lemma filter_remove1:
+  "filter Q (remove1 x xs) = remove1 x (filter Q xs)"
+by (induct xs) auto
+
 lemma notin_set_remove1[simp]: "x ~: set xs ==> x ~: set(remove1 y xs)"
 apply(insert set_remove1_subset)
 apply fast
--- a/src/HOL/Metis_Examples/Abstraction.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Metis_Examples/Abstraction.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -21,7 +21,7 @@
   pset  :: "'a set => 'a set"
   order :: "'a set => ('a * 'a) set"
 
-declare [[ atp_problem_prefix = "Abstraction__Collect_triv" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__Collect_triv" ]]
 lemma (*Collect_triv:*) "a \<in> {x. P x} ==> P a"
 proof -
   assume "a \<in> {x. P x}"
@@ -34,11 +34,11 @@
 by (metis mem_Collect_eq)
 
 
-declare [[ atp_problem_prefix = "Abstraction__Collect_mp" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__Collect_mp" ]]
 lemma "a \<in> {x. P x --> Q x} ==> a \<in> {x. P x} ==> a \<in> {x. Q x}"
   by (metis Collect_imp_eq ComplD UnE)
 
-declare [[ atp_problem_prefix = "Abstraction__Sigma_triv" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__Sigma_triv" ]]
 lemma "(a,b) \<in> Sigma A B ==> a \<in> A & b \<in> B a"
 proof -
   assume A1: "(a, b) \<in> Sigma A B"
@@ -51,7 +51,7 @@
 lemma Sigma_triv: "(a,b) \<in> Sigma A B ==> a \<in> A & b \<in> B a"
 by (metis SigmaD1 SigmaD2)
 
-declare [[ atp_problem_prefix = "Abstraction__Sigma_Collect" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__Sigma_Collect" ]]
 lemma "(a, b) \<in> (SIGMA x:A. {y. x = f y}) \<Longrightarrow> a \<in> A \<and> a = f b"
 (* Metis says this is satisfiable!
 by (metis CollectD SigmaD1 SigmaD2)
@@ -85,7 +85,7 @@
 qed
 
 
-declare [[ atp_problem_prefix = "Abstraction__CLF_eq_in_pp" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__CLF_eq_in_pp" ]]
 lemma "(cl,f) \<in> CLF ==> CLF = (SIGMA cl: CL.{f. f \<in> pset cl}) ==> f \<in> pset cl"
 by (metis Collect_mem_eq SigmaD2)
 
@@ -100,7 +100,7 @@
   thus "f \<in> pset cl" by metis
 qed
 
-declare [[ atp_problem_prefix = "Abstraction__Sigma_Collect_Pi" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__Sigma_Collect_Pi" ]]
 lemma
     "(cl,f) \<in> (SIGMA cl: CL. {f. f \<in> pset cl \<rightarrow> pset cl}) ==> 
     f \<in> pset cl \<rightarrow> pset cl"
@@ -112,7 +112,7 @@
   thus "f \<in> pset cl \<rightarrow> pset cl" by metis
 qed
 
-declare [[ atp_problem_prefix = "Abstraction__Sigma_Collect_Int" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__Sigma_Collect_Int" ]]
 lemma
     "(cl,f) \<in> (SIGMA cl: CL. {f. f \<in> pset cl \<inter> cl}) ==>
    f \<in> pset cl \<inter> cl"
@@ -127,27 +127,27 @@
 qed
 
 
-declare [[ atp_problem_prefix = "Abstraction__Sigma_Collect_Pi_mono" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__Sigma_Collect_Pi_mono" ]]
 lemma
     "(cl,f) \<in> (SIGMA cl: CL. {f. f \<in> pset cl \<rightarrow> pset cl & monotone f (pset cl) (order cl)}) ==>
    (f \<in> pset cl \<rightarrow> pset cl)  &  (monotone f (pset cl) (order cl))"
 by auto
 
-declare [[ atp_problem_prefix = "Abstraction__CLF_subset_Collect_Int" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__CLF_subset_Collect_Int" ]]
 lemma "(cl,f) \<in> CLF ==> 
    CLF \<subseteq> (SIGMA cl: CL. {f. f \<in> pset cl \<inter> cl}) ==>
    f \<in> pset cl \<inter> cl"
 by auto
 
 
-declare [[ atp_problem_prefix = "Abstraction__CLF_eq_Collect_Int" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__CLF_eq_Collect_Int" ]]
 lemma "(cl,f) \<in> CLF ==> 
    CLF = (SIGMA cl: CL. {f. f \<in> pset cl \<inter> cl}) ==>
    f \<in> pset cl \<inter> cl"
 by auto
 
 
-declare [[ atp_problem_prefix = "Abstraction__CLF_subset_Collect_Pi" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__CLF_subset_Collect_Pi" ]]
 lemma 
    "(cl,f) \<in> CLF ==> 
     CLF \<subseteq> (SIGMA cl': CL. {f. f \<in> pset cl' \<rightarrow> pset cl'}) ==> 
@@ -155,7 +155,7 @@
 by fast
 
 
-declare [[ atp_problem_prefix = "Abstraction__CLF_eq_Collect_Pi" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__CLF_eq_Collect_Pi" ]]
 lemma 
   "(cl,f) \<in> CLF ==> 
    CLF = (SIGMA cl: CL. {f. f \<in> pset cl \<rightarrow> pset cl}) ==> 
@@ -163,46 +163,46 @@
 by auto
 
 
-declare [[ atp_problem_prefix = "Abstraction__CLF_eq_Collect_Pi_mono" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__CLF_eq_Collect_Pi_mono" ]]
 lemma 
   "(cl,f) \<in> CLF ==> 
    CLF = (SIGMA cl: CL. {f. f \<in> pset cl \<rightarrow> pset cl & monotone f (pset cl) (order cl)}) ==>
    (f \<in> pset cl \<rightarrow> pset cl)  &  (monotone f (pset cl) (order cl))"
 by auto
 
-declare [[ atp_problem_prefix = "Abstraction__map_eq_zipA" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__map_eq_zipA" ]]
 lemma "map (%x. (f x, g x)) xs = zip (map f xs) (map g xs)"
 apply (induct xs)
  apply (metis map_is_Nil_conv zip.simps(1))
 by auto
 
-declare [[ atp_problem_prefix = "Abstraction__map_eq_zipB" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__map_eq_zipB" ]]
 lemma "map (%w. (w -> w, w \<times> w)) xs = 
        zip (map (%w. w -> w) xs) (map (%w. w \<times> w) xs)"
 apply (induct xs)
  apply (metis Nil_is_map_conv zip_Nil)
 by auto
 
-declare [[ atp_problem_prefix = "Abstraction__image_evenA" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__image_evenA" ]]
 lemma "(%x. Suc(f x)) ` {x. even x} <= A ==> (\<forall>x. even x --> Suc(f x) \<in> A)"
 by (metis Collect_def image_subset_iff mem_def)
 
-declare [[ atp_problem_prefix = "Abstraction__image_evenB" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__image_evenB" ]]
 lemma "(%x. f (f x)) ` ((%x. Suc(f x)) ` {x. even x}) <= A 
        ==> (\<forall>x. even x --> f (f (Suc(f x))) \<in> A)";
 by (metis Collect_def imageI image_image image_subset_iff mem_def)
 
-declare [[ atp_problem_prefix = "Abstraction__image_curry" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__image_curry" ]]
 lemma "f \<in> (%u v. b \<times> u \<times> v) ` A ==> \<forall>u v. P (b \<times> u \<times> v) ==> P(f y)" 
 (*sledgehammer*)
 by auto
 
-declare [[ atp_problem_prefix = "Abstraction__image_TimesA" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__image_TimesA" ]]
 lemma image_TimesA: "(%(x,y). (f x, g y)) ` (A \<times> B) = (f`A) \<times> (g`B)"
 (*sledgehammer*)
 apply (rule equalityI)
 (***Even the two inclusions are far too difficult
-using [[ atp_problem_prefix = "Abstraction__image_TimesA_simpler"]]
+using [[ sledgehammer_problem_prefix = "Abstraction__image_TimesA_simpler"]]
 ***)
 apply (rule subsetI)
 apply (erule imageE)
@@ -225,13 +225,13 @@
 (*Given the difficulty of the previous problem, these two are probably
 impossible*)
 
-declare [[ atp_problem_prefix = "Abstraction__image_TimesB" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__image_TimesB" ]]
 lemma image_TimesB:
     "(%(x,y,z). (f x, g y, h z)) ` (A \<times> B \<times> C) = (f`A) \<times> (g`B) \<times> (h`C)"
 (*sledgehammer*)
 by force
 
-declare [[ atp_problem_prefix = "Abstraction__image_TimesC" ]]
+declare [[ sledgehammer_problem_prefix = "Abstraction__image_TimesC" ]]
 lemma image_TimesC:
     "(%(x,y). (x \<rightarrow> x, y \<times> y)) ` (A \<times> B) = 
      ((%x. x \<rightarrow> x) ` A) \<times> ((%y. y \<times> y) ` B)" 
--- a/src/HOL/Metis_Examples/BT.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Metis_Examples/BT.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -65,7 +65,7 @@
 
 text {* \medskip BT simplification *}
 
-declare [[ atp_problem_prefix = "BT__n_leaves_reflect" ]]
+declare [[ sledgehammer_problem_prefix = "BT__n_leaves_reflect" ]]
 
 lemma n_leaves_reflect: "n_leaves (reflect t) = n_leaves t"
 proof (induct t)
@@ -81,7 +81,7 @@
     by (metis n_leaves.simps(2) nat_add_commute reflect.simps(2))
 qed
 
-declare [[ atp_problem_prefix = "BT__n_nodes_reflect" ]]
+declare [[ sledgehammer_problem_prefix = "BT__n_nodes_reflect" ]]
 
 lemma n_nodes_reflect: "n_nodes (reflect t) = n_nodes t"
 proof (induct t)
@@ -91,7 +91,7 @@
     by (metis add_commute n_nodes.simps(2) reflect.simps(2))
 qed
 
-declare [[ atp_problem_prefix = "BT__depth_reflect" ]]
+declare [[ sledgehammer_problem_prefix = "BT__depth_reflect" ]]
 
 lemma depth_reflect: "depth (reflect t) = depth t"
 apply (induct t)
@@ -102,14 +102,14 @@
 The famous relationship between the numbers of leaves and nodes.
 *}
 
-declare [[ atp_problem_prefix = "BT__n_leaves_nodes" ]]
+declare [[ sledgehammer_problem_prefix = "BT__n_leaves_nodes" ]]
 
 lemma n_leaves_nodes: "n_leaves t = Suc (n_nodes t)"
 apply (induct t)
  apply (metis n_leaves.simps(1) n_nodes.simps(1))
 by auto
 
-declare [[ atp_problem_prefix = "BT__reflect_reflect_ident" ]]
+declare [[ sledgehammer_problem_prefix = "BT__reflect_reflect_ident" ]]
 
 lemma reflect_reflect_ident: "reflect (reflect t) = t"
 apply (induct t)
@@ -127,7 +127,7 @@
   thus "reflect (reflect (Br a t1 t2)) = Br a t1 t2" by blast
 qed
 
-declare [[ atp_problem_prefix = "BT__bt_map_ident" ]]
+declare [[ sledgehammer_problem_prefix = "BT__bt_map_ident" ]]
 
 lemma bt_map_ident: "bt_map (%x. x) = (%y. y)"
 apply (rule ext) 
@@ -135,35 +135,35 @@
  apply (metis bt_map.simps(1))
 by (metis bt_map.simps(2))
 
-declare [[ atp_problem_prefix = "BT__bt_map_appnd" ]]
+declare [[ sledgehammer_problem_prefix = "BT__bt_map_appnd" ]]
 
 lemma bt_map_appnd: "bt_map f (appnd t u) = appnd (bt_map f t) (bt_map f u)"
 apply (induct t)
  apply (metis appnd.simps(1) bt_map.simps(1))
 by (metis appnd.simps(2) bt_map.simps(2))
 
-declare [[ atp_problem_prefix = "BT__bt_map_compose" ]]
+declare [[ sledgehammer_problem_prefix = "BT__bt_map_compose" ]]
 
 lemma bt_map_compose: "bt_map (f o g) t = bt_map f (bt_map g t)"
 apply (induct t)
  apply (metis bt_map.simps(1))
 by (metis bt_map.simps(2) o_eq_dest_lhs)
 
-declare [[ atp_problem_prefix = "BT__bt_map_reflect" ]]
+declare [[ sledgehammer_problem_prefix = "BT__bt_map_reflect" ]]
 
 lemma bt_map_reflect: "bt_map f (reflect t) = reflect (bt_map f t)"
 apply (induct t)
  apply (metis bt_map.simps(1) reflect.simps(1))
 by (metis bt_map.simps(2) reflect.simps(2))
 
-declare [[ atp_problem_prefix = "BT__preorder_bt_map" ]]
+declare [[ sledgehammer_problem_prefix = "BT__preorder_bt_map" ]]
 
 lemma preorder_bt_map: "preorder (bt_map f t) = map f (preorder t)"
 apply (induct t)
  apply (metis bt_map.simps(1) map.simps(1) preorder.simps(1))
 by simp
 
-declare [[ atp_problem_prefix = "BT__inorder_bt_map" ]]
+declare [[ sledgehammer_problem_prefix = "BT__inorder_bt_map" ]]
 
 lemma inorder_bt_map: "inorder (bt_map f t) = map f (inorder t)"
 proof (induct t)
@@ -178,21 +178,21 @@
   case (Br a t1 t2) thus ?case by simp
 qed
 
-declare [[ atp_problem_prefix = "BT__postorder_bt_map" ]]
+declare [[ sledgehammer_problem_prefix = "BT__postorder_bt_map" ]]
 
 lemma postorder_bt_map: "postorder (bt_map f t) = map f (postorder t)"
 apply (induct t)
  apply (metis Nil_is_map_conv bt_map.simps(1) postorder.simps(1))
 by simp
 
-declare [[ atp_problem_prefix = "BT__depth_bt_map" ]]
+declare [[ sledgehammer_problem_prefix = "BT__depth_bt_map" ]]
 
 lemma depth_bt_map [simp]: "depth (bt_map f t) = depth t"
 apply (induct t)
  apply (metis bt_map.simps(1) depth.simps(1))
 by simp
 
-declare [[ atp_problem_prefix = "BT__n_leaves_bt_map" ]]
+declare [[ sledgehammer_problem_prefix = "BT__n_leaves_bt_map" ]]
 
 lemma n_leaves_bt_map [simp]: "n_leaves (bt_map f t) = n_leaves t"
 apply (induct t)
@@ -213,7 +213,7 @@
     using F1 by metis
 qed
 
-declare [[ atp_problem_prefix = "BT__preorder_reflect" ]]
+declare [[ sledgehammer_problem_prefix = "BT__preorder_reflect" ]]
 
 lemma preorder_reflect: "preorder (reflect t) = rev (postorder t)"
 apply (induct t)
@@ -222,7 +222,7 @@
 by (metis append.simps(1) append.simps(2) postorder.simps(2) preorder.simps(2)
           reflect.simps(2) rev.simps(2) rev_append rev_swap)
 
-declare [[ atp_problem_prefix = "BT__inorder_reflect" ]]
+declare [[ sledgehammer_problem_prefix = "BT__inorder_reflect" ]]
 
 lemma inorder_reflect: "inorder (reflect t) = rev (inorder t)"
 apply (induct t)
@@ -233,7 +233,7 @@
           reflect.simps(2) rev.simps(2) rev_append)
 *)
 
-declare [[ atp_problem_prefix = "BT__postorder_reflect" ]]
+declare [[ sledgehammer_problem_prefix = "BT__postorder_reflect" ]]
 
 lemma postorder_reflect: "postorder (reflect t) = rev (preorder t)"
 apply (induct t)
@@ -245,14 +245,14 @@
 Analogues of the standard properties of the append function for lists.
 *}
 
-declare [[ atp_problem_prefix = "BT__appnd_assoc" ]]
+declare [[ sledgehammer_problem_prefix = "BT__appnd_assoc" ]]
 
 lemma appnd_assoc [simp]: "appnd (appnd t1 t2) t3 = appnd t1 (appnd t2 t3)"
 apply (induct t1)
  apply (metis appnd.simps(1))
 by (metis appnd.simps(2))
 
-declare [[ atp_problem_prefix = "BT__appnd_Lf2" ]]
+declare [[ sledgehammer_problem_prefix = "BT__appnd_Lf2" ]]
 
 lemma appnd_Lf2 [simp]: "appnd t Lf = t"
 apply (induct t)
@@ -261,14 +261,14 @@
 
 declare max_add_distrib_left [simp]
 
-declare [[ atp_problem_prefix = "BT__depth_appnd" ]]
+declare [[ sledgehammer_problem_prefix = "BT__depth_appnd" ]]
 
 lemma depth_appnd [simp]: "depth (appnd t1 t2) = depth t1 + depth t2"
 apply (induct t1)
  apply (metis appnd.simps(1) depth.simps(1) plus_nat.simps(1))
 by simp
 
-declare [[ atp_problem_prefix = "BT__n_leaves_appnd" ]]
+declare [[ sledgehammer_problem_prefix = "BT__n_leaves_appnd" ]]
 
 lemma n_leaves_appnd [simp]:
      "n_leaves (appnd t1 t2) = n_leaves t1 * n_leaves t2"
@@ -277,7 +277,7 @@
               semiring_norm(111))
 by (simp add: left_distrib)
 
-declare [[ atp_problem_prefix = "BT__bt_map_appnd" ]]
+declare [[ sledgehammer_problem_prefix = "BT__bt_map_appnd" ]]
 
 lemma (*bt_map_appnd:*)
      "bt_map f (appnd t1 t2) = appnd (bt_map f t1) (bt_map f t2)"
--- a/src/HOL/Metis_Examples/BigO.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Metis_Examples/BigO.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -15,7 +15,7 @@
 definition bigo :: "('a => 'b::linordered_idom) => ('a => 'b) set"    ("(1O'(_'))") where
   "O(f::('a => 'b)) ==   {h. EX c. ALL x. abs (h x) <= c * abs (f x)}"
 
-declare [[ atp_problem_prefix = "BigO__bigo_pos_const" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_pos_const" ]]
 lemma bigo_pos_const: "(EX (c::'a::linordered_idom). 
     ALL x. (abs (h x)) <= (c * (abs (f x))))
       = (EX c. 0 < c & (ALL x. (abs(h x)) <= (c * (abs (f x)))))"
@@ -124,7 +124,7 @@
     {h. EX c. (0 < c & (ALL x. abs (h x) <= c * abs (f x)))}"
 by (auto simp add: bigo_def bigo_pos_const)
 
-declare [[ atp_problem_prefix = "BigO__bigo_elt_subset" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_elt_subset" ]]
 lemma bigo_elt_subset [intro]: "f : O(g) ==> O(f) <= O(g)"
   apply (auto simp add: bigo_alt_def)
   apply (rule_tac x = "ca * c" in exI)
@@ -142,12 +142,12 @@
 done
 
 
-declare [[ atp_problem_prefix = "BigO__bigo_refl" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_refl" ]]
 lemma bigo_refl [intro]: "f : O(f)"
 apply (auto simp add: bigo_def)
 by (metis mult_1 order_refl)
 
-declare [[ atp_problem_prefix = "BigO__bigo_zero" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_zero" ]]
 lemma bigo_zero: "0 : O(g)"
 apply (auto simp add: bigo_def func_zero)
 by (metis mult_zero_left order_refl)
@@ -230,7 +230,7 @@
   apply (auto del: subsetI simp del: bigo_plus_idemp)
 done
 
-declare [[ atp_problem_prefix = "BigO__bigo_plus_eq" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_plus_eq" ]]
 lemma bigo_plus_eq: "ALL x. 0 <= f x ==> ALL x. 0 <= g x ==> 
   O(f + g) = O(f) \<oplus> O(g)"
   apply (rule equalityI)
@@ -253,13 +253,13 @@
   apply (rule abs_triangle_ineq)
   apply (metis add_nonneg_nonneg)
   apply (rule add_mono)
-using [[ atp_problem_prefix = "BigO__bigo_plus_eq_simpler" ]] 
+using [[ sledgehammer_problem_prefix = "BigO__bigo_plus_eq_simpler" ]] 
 (*Found by SPASS; SLOW*)
 apply (metis le_maxI2 linorder_linear linorder_not_le min_max.sup_absorb1 mult_le_cancel_right order_trans)
 apply (metis le_maxI2 linorder_not_le mult_le_cancel_right order_trans)
 done
 
-declare [[ atp_problem_prefix = "BigO__bigo_bounded_alt" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_bounded_alt" ]]
 lemma bigo_bounded_alt: "ALL x. 0 <= f x ==> ALL x. f x <= c * g x ==> 
     f : O(g)" 
   apply (auto simp add: bigo_def)
@@ -277,14 +277,14 @@
 qed
 
 text{*So here is the easier (and more natural) problem using transitivity*}
-declare [[ atp_problem_prefix = "BigO__bigo_bounded_alt_trans" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_bounded_alt_trans" ]]
 lemma "ALL x. 0 <= f x ==> ALL x. f x <= c * g x ==> f : O(g)" 
 apply (auto simp add: bigo_def)
 (* Version 1: one-line proof *)
 by (metis abs_ge_self abs_mult order_trans)
 
 text{*So here is the easier (and more natural) problem using transitivity*}
-declare [[ atp_problem_prefix = "BigO__bigo_bounded_alt_trans" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_bounded_alt_trans" ]]
 lemma "ALL x. 0 <= f x ==> ALL x. f x <= c * g x ==> f : O(g)" 
   apply (auto simp add: bigo_def)
 (* Version 2: structured proof *)
@@ -299,7 +299,7 @@
   apply simp
 done
 
-declare [[ atp_problem_prefix = "BigO__bigo_bounded2" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_bounded2" ]]
 lemma bigo_bounded2: "ALL x. lb x <= f x ==> ALL x. f x <= lb x + g x ==>
     f : lb +o O(g)"
 apply (rule set_minus_imp_plus)
@@ -314,13 +314,13 @@
   thus "(0\<Colon>'b) \<le> f x + - lb x" by (metis not_leE diff_minus less_iff_diff_less_0 less_le_not_le)
 qed
 
-declare [[ atp_problem_prefix = "BigO__bigo_abs" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_abs" ]]
 lemma bigo_abs: "(%x. abs(f x)) =o O(f)" 
 apply (unfold bigo_def)
 apply auto
 by (metis mult_1 order_refl)
 
-declare [[ atp_problem_prefix = "BigO__bigo_abs2" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_abs2" ]]
 lemma bigo_abs2: "f =o O(%x. abs(f x))"
 apply (unfold bigo_def)
 apply auto
@@ -383,7 +383,7 @@
     by (simp add: bigo_abs3 [symmetric])
 qed
 
-declare [[ atp_problem_prefix = "BigO__bigo_mult" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_mult" ]]
 lemma bigo_mult [intro]: "O(f)\<otimes>O(g) <= O(f * g)"
   apply (rule subsetI)
   apply (subst bigo_def)
@@ -395,7 +395,7 @@
   apply(erule_tac x = x in allE)+
   apply(subgoal_tac "c * ca * abs(f x * g x) = 
       (c * abs(f x)) * (ca * abs(g x))")
-using [[ atp_problem_prefix = "BigO__bigo_mult_simpler" ]]
+using [[ sledgehammer_problem_prefix = "BigO__bigo_mult_simpler" ]]
 prefer 2 
 apply (metis mult_assoc mult_left_commute
   abs_of_pos mult_left_commute
@@ -406,14 +406,14 @@
    abs_mult has just been done *)
 by (metis abs_ge_zero mult_mono')
 
-declare [[ atp_problem_prefix = "BigO__bigo_mult2" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_mult2" ]]
 lemma bigo_mult2 [intro]: "f *o O(g) <= O(f * g)"
   apply (auto simp add: bigo_def elt_set_times_def func_times abs_mult)
 (*sledgehammer*); 
   apply (rule_tac x = c in exI)
   apply clarify
   apply (drule_tac x = x in spec)
-using [[ atp_problem_prefix = "BigO__bigo_mult2_simpler" ]]
+using [[ sledgehammer_problem_prefix = "BigO__bigo_mult2_simpler" ]]
 (*sledgehammer [no luck]*); 
   apply (subgoal_tac "abs(f x) * abs(b x) <= abs(f x) * (c * abs(g x))")
   apply (simp add: mult_ac)
@@ -421,11 +421,11 @@
   apply (rule abs_ge_zero)
 done
 
-declare [[ atp_problem_prefix = "BigO__bigo_mult3" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_mult3" ]]
 lemma bigo_mult3: "f : O(h) ==> g : O(j) ==> f * g : O(h * j)"
 by (metis bigo_mult set_rev_mp set_times_intro)
 
-declare [[ atp_problem_prefix = "BigO__bigo_mult4" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_mult4" ]]
 lemma bigo_mult4 [intro]:"f : k +o O(h) ==> g * f : (g * k) +o O(g * h)"
 by (metis bigo_mult2 set_plus_mono_b set_times_intro2 set_times_plus_distrib)
 
@@ -459,13 +459,13 @@
   qed
 qed
 
-declare [[ atp_problem_prefix = "BigO__bigo_mult6" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_mult6" ]]
 lemma bigo_mult6: "ALL x. f x ~= 0 ==>
     O(f * g) = (f::'a => ('b::linordered_field)) *o O(g)"
 by (metis bigo_mult2 bigo_mult5 order_antisym)
 
 (*proof requires relaxing relevance: 2007-01-25*)
-declare [[ atp_problem_prefix = "BigO__bigo_mult7" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_mult7" ]]
   declare bigo_mult6 [simp]
 lemma bigo_mult7: "ALL x. f x ~= 0 ==>
     O(f * g) <= O(f::'a => ('b::linordered_field)) \<otimes> O(g)"
@@ -477,7 +477,7 @@
 done
   declare bigo_mult6 [simp del]
 
-declare [[ atp_problem_prefix = "BigO__bigo_mult8" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_mult8" ]]
   declare bigo_mult7[intro!]
 lemma bigo_mult8: "ALL x. f x ~= 0 ==>
     O(f * g) = O(f::'a => ('b::linordered_field)) \<otimes> O(g)"
@@ -528,7 +528,7 @@
   qed
 qed
 
-declare [[ atp_problem_prefix = "BigO__bigo_plus_absorb" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_plus_absorb" ]]
 lemma bigo_plus_absorb [simp]: "f : O(g) ==> f +o O(g) = O(g)"
 by (metis bigo_plus_absorb_lemma1 bigo_plus_absorb_lemma2 order_eq_iff);
 
@@ -555,7 +555,7 @@
 lemma bigo_const1: "(%x. c) : O(%x. 1)"
 by (auto simp add: bigo_def mult_ac)
 
-declare [[ atp_problem_prefix = "BigO__bigo_const2" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_const2" ]]
 lemma (*bigo_const2 [intro]:*) "O(%x. c) <= O(%x. 1)"
 by (metis bigo_const1 bigo_elt_subset);
 
@@ -566,7 +566,7 @@
   show "O(\<lambda>x. c) \<subseteq> O(\<lambda>x. 1)" by (metis F1 bigo_elt_subset)
 qed
 
-declare [[ atp_problem_prefix = "BigO__bigo_const3" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_const3" ]]
 lemma bigo_const3: "(c::'a::linordered_field) ~= 0 ==> (%x. 1) : O(%x. c)"
 apply (simp add: bigo_def)
 by (metis abs_eq_0 left_inverse order_refl)
@@ -578,7 +578,7 @@
     O(%x. c) = O(%x. 1)"
 by (rule equalityI, rule bigo_const2, rule bigo_const4, assumption)
 
-declare [[ atp_problem_prefix = "BigO__bigo_const_mult1" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_const_mult1" ]]
 lemma bigo_const_mult1: "(%x. c * f x) : O(f)"
   apply (simp add: bigo_def abs_mult)
 by (metis le_less)
@@ -586,7 +586,7 @@
 lemma bigo_const_mult2: "O(%x. c * f x) <= O(f)"
 by (rule bigo_elt_subset, rule bigo_const_mult1)
 
-declare [[ atp_problem_prefix = "BigO__bigo_const_mult3" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_const_mult3" ]]
 lemma bigo_const_mult3: "(c::'a::linordered_field) ~= 0 ==> f : O(%x. c * f x)"
   apply (simp add: bigo_def)
 (*sledgehammer [no luck]*)
@@ -604,7 +604,7 @@
     O(%x. c * f x) = O(f)"
 by (rule equalityI, rule bigo_const_mult2, erule bigo_const_mult4)
 
-declare [[ atp_problem_prefix = "BigO__bigo_const_mult5" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_const_mult5" ]]
 lemma bigo_const_mult5 [simp]: "(c::'a::linordered_field) ~= 0 ==> 
     (%x. c) *o O(f) = O(f)"
   apply (auto del: subsetI)
@@ -624,7 +624,7 @@
 done
 
 
-declare [[ atp_problem_prefix = "BigO__bigo_const_mult6" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_const_mult6" ]]
 lemma bigo_const_mult6 [intro]: "(%x. c) *o O(f) <= O(f)"
   apply (auto intro!: subsetI
     simp add: bigo_def elt_set_times_def func_times
@@ -681,7 +681,7 @@
 apply (blast intro: order_trans mult_right_mono abs_ge_self) 
 done
 
-declare [[ atp_problem_prefix = "BigO__bigo_setsum1" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_setsum1" ]]
 lemma bigo_setsum1: "ALL x y. 0 <= h x y ==> 
     EX c. ALL x y. abs(f x y) <= c * (h x y) ==>
       (%x. SUM y : A x. f x y) =o O(%x. SUM y : A x. h x y)"
@@ -698,7 +698,7 @@
       (%x. SUM y : A x. f y) =o O(%x. SUM y : A x. h y)"
 by (rule bigo_setsum1, auto)  
 
-declare [[ atp_problem_prefix = "BigO__bigo_setsum3" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_setsum3" ]]
 lemma bigo_setsum3: "f =o O(h) ==>
     (%x. SUM y : A x. (l x y) * f(k x y)) =o
       O(%x. SUM y : A x. abs(l x y * h(k x y)))"
@@ -729,7 +729,7 @@
   apply (erule set_plus_imp_minus)
 done
 
-declare [[ atp_problem_prefix = "BigO__bigo_setsum5" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_setsum5" ]]
 lemma bigo_setsum5: "f =o O(h) ==> ALL x y. 0 <= l x y ==> 
     ALL x. 0 <= h x ==>
       (%x. SUM y : A x. (l x y) * f(k x y)) =o
@@ -786,7 +786,7 @@
   apply (simp add: func_times) 
 done
 
-declare [[ atp_problem_prefix = "BigO__bigo_fix" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_fix" ]]
 lemma bigo_fix: "(%x. f ((x::nat) + 1)) =o O(%x. h(x + 1)) ==> f 0 = 0 ==>
     f =o O(h)"
   apply (simp add: bigo_alt_def)
@@ -849,7 +849,7 @@
   apply (erule spec)+
 done
 
-declare [[ atp_problem_prefix = "BigO__bigo_lesso1" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_lesso1" ]]
 lemma bigo_lesso1: "ALL x. f x <= g x ==> f <o g =o O(h)"
 apply (unfold lesso_def)
 apply (subgoal_tac "(%x. max (f x - g x) 0) = 0")
@@ -863,7 +863,7 @@
   by (simp split: split_max)
 qed
 
-declare [[ atp_problem_prefix = "BigO__bigo_lesso2" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_lesso2" ]]
 lemma bigo_lesso2: "f =o g +o O(h) ==>
     ALL x. 0 <= k x ==> ALL x. k x <= f x ==>
       k <o g =o O(h)"
@@ -899,7 +899,7 @@
     by (metis abs_ge_zero le_cases min_max.sup_absorb2)
 qed
 
-declare [[ atp_problem_prefix = "BigO__bigo_lesso3" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_lesso3" ]]
 lemma bigo_lesso3: "f =o g +o O(h) ==>
     ALL x. 0 <= k x ==> ALL x. g x <= k x ==>
       f <o k =o O(h)"
@@ -916,7 +916,7 @@
   apply (simp)
   apply (subst abs_of_nonneg)
   apply (drule_tac x = x in spec) back
-using [[ atp_problem_prefix = "BigO__bigo_lesso3_simpler" ]]
+using [[ sledgehammer_problem_prefix = "BigO__bigo_lesso3_simpler" ]]
 apply (metis diff_less_0_iff_less linorder_not_le not_leE uminus_add_conv_diff xt1(12) xt1(6))
 apply (metis add_minus_cancel diff_le_eq le_diff_eq uminus_add_conv_diff)
 apply (metis abs_ge_zero linorder_linear min_max.sup_absorb1 min_max.sup_commute)
@@ -936,7 +936,7 @@
     split: split_max abs_split)
 done
 
-declare [[ atp_problem_prefix = "BigO__bigo_lesso5" ]]
+declare [[ sledgehammer_problem_prefix = "BigO__bigo_lesso5" ]]
 lemma bigo_lesso5: "f <o g =o O(h) ==>
     EX C. ALL x. f x <= g x + C * abs(h x)"
   apply (simp only: lesso_def bigo_alt_def)
--- a/src/HOL/Metis_Examples/Tarski.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Metis_Examples/Tarski.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -409,7 +409,7 @@
 (*never proved, 2007-01-22: Tarski__CLF_unnamed_lemma
   NOT PROVABLE because of the conjunction used in the definition: we don't
   allow reasoning with rules like conjE, which is essential here.*)
-declare [[ atp_problem_prefix = "Tarski__CLF_unnamed_lemma" ]]
+declare [[ sledgehammer_problem_prefix = "Tarski__CLF_unnamed_lemma" ]]
 lemma (in CLF) [simp]:
     "f: pset cl -> pset cl & monotone f (pset cl) (order cl)" 
 apply (insert f_cl)
@@ -426,7 +426,7 @@
 by (simp add: A_def r_def)
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__CLF_CLF_dual" ]]
+declare [[ sledgehammer_problem_prefix = "Tarski__CLF_CLF_dual" ]]
 declare (in CLF) CLF_set_def [simp] CL_dualCL [simp] monotone_dual [simp] dualA_iff [simp]
 
 lemma (in CLF) CLF_dual: "(dual cl, f) \<in> CLF_set" 
@@ -454,7 +454,7 @@
 subsection {* lemmas for Tarski, lub *}
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__CLF_lubH_le_flubH" ]]
+declare [[ sledgehammer_problem_prefix = "Tarski__CLF_lubH_le_flubH" ]]
   declare CL.lub_least[intro] CLF.f_in_funcset[intro] funcset_mem[intro] CL.lub_in_lattice[intro] PO.transE[intro] PO.monotoneE[intro] CLF.monotone_f[intro] CL.lub_upper[intro] 
 lemma (in CLF) lubH_le_flubH:
      "H = {x. (x, f x) \<in> r & x \<in> A} ==> (lub H cl, f (lub H cl)) \<in> r"
@@ -464,7 +464,7 @@
 -- {* @{text "\<forall>x:H. (x, f (lub H r)) \<in> r"} *}
 apply (rule ballI)
 (*never proved, 2007-01-22*)
-using [[ atp_problem_prefix = "Tarski__CLF_lubH_le_flubH_simpler" ]]
+using [[ sledgehammer_problem_prefix = "Tarski__CLF_lubH_le_flubH_simpler" ]]
 apply (rule transE)
 -- {* instantiates @{text "(x, ?z) \<in> order cl to (x, f x)"}, *}
 -- {* because of the def of @{text H} *}
@@ -482,7 +482,7 @@
           CLF.monotone_f[rule del] CL.lub_upper[rule del] 
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__CLF_flubH_le_lubH" ]]
+declare [[ sledgehammer_problem_prefix = "Tarski__CLF_flubH_le_lubH" ]]
   declare CLF.f_in_funcset[intro] funcset_mem[intro] CL.lub_in_lattice[intro]
        PO.monotoneE[intro] CLF.monotone_f[intro] CL.lub_upper[intro] 
        CLF.lubH_le_flubH[simp]
@@ -492,7 +492,7 @@
 apply (rule_tac t = "H" in ssubst, assumption)
 apply (rule CollectI)
 apply (rule conjI)
-using [[ atp_problem_prefix = "Tarski__CLF_flubH_le_lubH_simpler" ]]
+using [[ sledgehammer_problem_prefix = "Tarski__CLF_flubH_le_lubH_simpler" ]]
 (*??no longer terminates, with combinators
 apply (metis CO_refl_on lubH_le_flubH monotone_def monotone_f reflD1 reflD2) 
 *)
@@ -506,7 +506,7 @@
 
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__CLF_lubH_is_fixp" ]]
+declare [[ sledgehammer_problem_prefix = "Tarski__CLF_lubH_is_fixp" ]]
 (* Single-step version fails. The conjecture clauses refer to local abstraction
 functions (Frees). *)
 lemma (in CLF) lubH_is_fixp:
@@ -557,7 +557,7 @@
      "H = {x. (x, f x) \<in> r & x \<in> A} ==> lub H cl \<in> fix f A"
 apply (simp add: fix_def)
 apply (rule conjI)
-using [[ atp_problem_prefix = "Tarski__CLF_lubH_is_fixp_simpler" ]]
+using [[ sledgehammer_problem_prefix = "Tarski__CLF_lubH_is_fixp_simpler" ]]
 apply (metis CO_refl_on lubH_le_flubH refl_onD1)
 apply (metis antisymE flubH_le_lubH lubH_le_flubH)
 done
@@ -576,7 +576,7 @@
 apply (simp_all add: P_def)
 done
 
-declare [[ atp_problem_prefix = "Tarski__CLF_lubH_least_fixf" ]]
+declare [[ sledgehammer_problem_prefix = "Tarski__CLF_lubH_least_fixf" ]]
 lemma (in CLF) lubH_least_fixf:
      "H = {x. (x, f x) \<in> r & x \<in> A}
       ==> \<forall>L. (\<forall>y \<in> fix f A. (y,L) \<in> r) --> (lub H cl, L) \<in> r"
@@ -584,7 +584,7 @@
 done
 
 subsection {* Tarski fixpoint theorem 1, first part *}
-declare [[ atp_problem_prefix = "Tarski__CLF_T_thm_1_lub" ]]
+declare [[ sledgehammer_problem_prefix = "Tarski__CLF_T_thm_1_lub" ]]
   declare CL.lubI[intro] fix_subset[intro] CL.lub_in_lattice[intro] 
           CLF.fixf_le_lubH[simp] CLF.lubH_least_fixf[simp]
 lemma (in CLF) T_thm_1_lub: "lub P cl = lub {x. (x, f x) \<in> r & x \<in> A} cl"
@@ -592,7 +592,7 @@
 apply (rule sym)
 apply (simp add: P_def)
 apply (rule lubI)
-using [[ atp_problem_prefix = "Tarski__CLF_T_thm_1_lub_simpler" ]]
+using [[ sledgehammer_problem_prefix = "Tarski__CLF_T_thm_1_lub_simpler" ]]
 apply (metis P_def fix_subset) 
 apply (metis Collect_conj_eq Collect_mem_eq Int_commute Int_lower1 lub_in_lattice vimage_def)
 (*??no longer terminates, with combinators
@@ -607,7 +607,7 @@
 
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__CLF_glbH_is_fixp" ]]
+declare [[ sledgehammer_problem_prefix = "Tarski__CLF_glbH_is_fixp" ]]
   declare glb_dual_lub[simp] PO.dualA_iff[intro] CLF.lubH_is_fixp[intro] 
           PO.dualPO[intro] CL.CL_dualCL[intro] PO.dualr_iff[simp]
 lemma (in CLF) glbH_is_fixp: "H = {x. (f x, x) \<in> r & x \<in> A} ==> glb H cl \<in> P"
@@ -631,13 +631,13 @@
 
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__T_thm_1_glb" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__T_thm_1_glb" ]]  (*ALL THEOREMS*)
 lemma (in CLF) T_thm_1_glb: "glb P cl = glb {x. (f x, x) \<in> r & x \<in> A} cl"
 (*sledgehammer;*)
 apply (simp add: glb_dual_lub P_def A_def r_def)
 apply (rule dualA_iff [THEN subst])
 (*never proved, 2007-01-22*)
-using [[ atp_problem_prefix = "Tarski__T_thm_1_glb_simpler" ]]  (*ALL THEOREMS*)
+using [[ sledgehammer_problem_prefix = "Tarski__T_thm_1_glb_simpler" ]]  (*ALL THEOREMS*)
 (*sledgehammer;*)
 apply (simp add: CLF.T_thm_1_lub [of _ f, OF CLF.intro, OF CL.intro CLF_axioms.intro, OF PO.intro CL_axioms.intro,
   OF dualPO CL_dualCL] dualPO CL_dualCL CLF_dual dualr_iff)
@@ -646,13 +646,13 @@
 subsection {* interval *}
 
 
-declare [[ atp_problem_prefix = "Tarski__rel_imp_elem" ]]
+declare [[ sledgehammer_problem_prefix = "Tarski__rel_imp_elem" ]]
   declare (in CLF) CO_refl_on[simp] refl_on_def [simp]
 lemma (in CLF) rel_imp_elem: "(x, y) \<in> r ==> x \<in> A"
 by (metis CO_refl_on refl_onD1)
   declare (in CLF) CO_refl_on[simp del]  refl_on_def [simp del]
 
-declare [[ atp_problem_prefix = "Tarski__interval_subset" ]]
+declare [[ sledgehammer_problem_prefix = "Tarski__interval_subset" ]]
   declare (in CLF) rel_imp_elem[intro] 
   declare interval_def [simp]
 lemma (in CLF) interval_subset: "[| a \<in> A; b \<in> A |] ==> interval r a b \<subseteq> A"
@@ -687,7 +687,7 @@
      "[| a \<in> A; b \<in> A; S \<subseteq> interval r a b |]==> S \<subseteq> A"
 by (simp add: subset_trans [OF _ interval_subset])
 
-declare [[ atp_problem_prefix = "Tarski__L_in_interval" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__L_in_interval" ]]  (*ALL THEOREMS*)
 lemma (in CLF) L_in_interval:
      "[| a \<in> A; b \<in> A; S \<subseteq> interval r a b;
          S \<noteq> {}; isLub S cl L; interval r a b \<noteq> {} |] ==> L \<in> interval r a b" 
@@ -706,7 +706,7 @@
 done
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__G_in_interval" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__G_in_interval" ]]  (*ALL THEOREMS*)
 lemma (in CLF) G_in_interval:
      "[| a \<in> A; b \<in> A; interval r a b \<noteq> {}; S \<subseteq> interval r a b; isGlb S cl G;
          S \<noteq> {} |] ==> G \<in> interval r a b"
@@ -715,7 +715,7 @@
                  dualA_iff A_def dualPO CL_dualCL CLF_dual isGlb_dual_isLub)
 done
 
-declare [[ atp_problem_prefix = "Tarski__intervalPO" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__intervalPO" ]]  (*ALL THEOREMS*)
 lemma (in CLF) intervalPO:
      "[| a \<in> A; b \<in> A; interval r a b \<noteq> {} |]
       ==> (| pset = interval r a b, order = induced (interval r a b) r |)
@@ -783,7 +783,7 @@
 lemmas (in CLF) intv_CL_glb = intv_CL_lub [THEN Rdual]
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__interval_is_sublattice" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__interval_is_sublattice" ]]  (*ALL THEOREMS*)
 lemma (in CLF) interval_is_sublattice:
      "[| a \<in> A; b \<in> A; interval r a b \<noteq> {} |]
         ==> interval r a b <<= cl"
@@ -791,7 +791,7 @@
 apply (rule sublatticeI)
 apply (simp add: interval_subset)
 (*never proved, 2007-01-22*)
-using [[ atp_problem_prefix = "Tarski__interval_is_sublattice_simpler" ]]
+using [[ sledgehammer_problem_prefix = "Tarski__interval_is_sublattice_simpler" ]]
 (*sledgehammer *)
 apply (rule CompleteLatticeI)
 apply (simp add: intervalPO)
@@ -810,7 +810,7 @@
 lemma (in CLF) Bot_dual_Top: "Bot cl = Top (dual cl)"
 by (simp add: Top_def Bot_def least_def greatest_def dualA_iff dualr_iff)
 
-declare [[ atp_problem_prefix = "Tarski__Bot_in_lattice" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__Bot_in_lattice" ]]  (*ALL THEOREMS*)
 lemma (in CLF) Bot_in_lattice: "Bot cl \<in> A"
 (*sledgehammer; *)
 apply (simp add: Bot_def least_def)
@@ -820,12 +820,12 @@
 done
 
 (*first proved 2007-01-25 after relaxing relevance*)
-declare [[ atp_problem_prefix = "Tarski__Top_in_lattice" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__Top_in_lattice" ]]  (*ALL THEOREMS*)
 lemma (in CLF) Top_in_lattice: "Top cl \<in> A"
 (*sledgehammer;*)
 apply (simp add: Top_dual_Bot A_def)
 (*first proved 2007-01-25 after relaxing relevance*)
-using [[ atp_problem_prefix = "Tarski__Top_in_lattice_simpler" ]]  (*ALL THEOREMS*)
+using [[ sledgehammer_problem_prefix = "Tarski__Top_in_lattice_simpler" ]]  (*ALL THEOREMS*)
 (*sledgehammer*)
 apply (rule dualA_iff [THEN subst])
 apply (blast intro!: CLF.Bot_in_lattice [OF CLF.intro, OF CL.intro CLF_axioms.intro, OF PO.intro CL_axioms.intro] dualPO CL_dualCL CLF_dual)
@@ -840,7 +840,7 @@
 done
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__Bot_prop" ]]  (*ALL THEOREMS*) 
+declare [[ sledgehammer_problem_prefix = "Tarski__Bot_prop" ]]  (*ALL THEOREMS*) 
 lemma (in CLF) Bot_prop: "x \<in> A ==> (Bot cl, x) \<in> r"
 (*sledgehammer*) 
 apply (simp add: Bot_dual_Top r_def)
@@ -849,12 +849,12 @@
                  dualA_iff A_def dualPO CL_dualCL CLF_dual)
 done
 
-declare [[ atp_problem_prefix = "Tarski__Bot_in_lattice" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__Bot_in_lattice" ]]  (*ALL THEOREMS*)
 lemma (in CLF) Top_intv_not_empty: "x \<in> A  ==> interval r x (Top cl) \<noteq> {}" 
 apply (metis Top_in_lattice Top_prop empty_iff intervalI reflE)
 done
 
-declare [[ atp_problem_prefix = "Tarski__Bot_intv_not_empty" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__Bot_intv_not_empty" ]]  (*ALL THEOREMS*)
 lemma (in CLF) Bot_intv_not_empty: "x \<in> A ==> interval r (Bot cl) x \<noteq> {}" 
 apply (metis Bot_prop ex_in_conv intervalI reflE rel_imp_elem)
 done
@@ -866,7 +866,7 @@
 by (simp add: P_def fix_subset po_subset_po)
 
 (*first proved 2007-01-25 after relaxing relevance*)
-declare [[ atp_problem_prefix = "Tarski__Y_subset_A" ]]
+declare [[ sledgehammer_problem_prefix = "Tarski__Y_subset_A" ]]
   declare (in Tarski) P_def[simp] Y_ss [simp]
   declare fix_subset [intro] subset_trans [intro]
 lemma (in Tarski) Y_subset_A: "Y \<subseteq> A"
@@ -882,7 +882,7 @@
   by (rule Y_subset_A [THEN lub_in_lattice])
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__lubY_le_flubY" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__lubY_le_flubY" ]]  (*ALL THEOREMS*)
 lemma (in Tarski) lubY_le_flubY: "(lub Y cl, f (lub Y cl)) \<in> r"
 (*sledgehammer*) 
 apply (rule lub_least)
@@ -891,12 +891,12 @@
 apply (rule lubY_in_A)
 -- {* @{text "Y \<subseteq> P ==> f x = x"} *}
 apply (rule ballI)
-using [[ atp_problem_prefix = "Tarski__lubY_le_flubY_simpler" ]]  (*ALL THEOREMS*)
+using [[ sledgehammer_problem_prefix = "Tarski__lubY_le_flubY_simpler" ]]  (*ALL THEOREMS*)
 (*sledgehammer *)
 apply (rule_tac t = "x" in fix_imp_eq [THEN subst])
 apply (erule Y_ss [simplified P_def, THEN subsetD])
 -- {* @{text "reduce (f x, f (lub Y cl)) \<in> r to (x, lub Y cl) \<in> r"} by monotonicity *}
-using [[ atp_problem_prefix = "Tarski__lubY_le_flubY_simplest" ]]  (*ALL THEOREMS*)
+using [[ sledgehammer_problem_prefix = "Tarski__lubY_le_flubY_simplest" ]]  (*ALL THEOREMS*)
 (*sledgehammer*)
 apply (rule_tac f = "f" in monotoneE)
 apply (rule monotone_f)
@@ -906,7 +906,7 @@
 done
 
 (*first proved 2007-01-25 after relaxing relevance*)
-declare [[ atp_problem_prefix = "Tarski__intY1_subset" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__intY1_subset" ]]  (*ALL THEOREMS*)
 lemma (in Tarski) intY1_subset: "intY1 \<subseteq> A"
 (*sledgehammer*) 
 apply (unfold intY1_def)
@@ -918,7 +918,7 @@
 lemmas (in Tarski) intY1_elem = intY1_subset [THEN subsetD]
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__intY1_f_closed" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__intY1_f_closed" ]]  (*ALL THEOREMS*)
 lemma (in Tarski) intY1_f_closed: "x \<in> intY1 \<Longrightarrow> f x \<in> intY1"
 (*sledgehammer*) 
 apply (simp add: intY1_def  interval_def)
@@ -926,7 +926,7 @@
 apply (rule transE)
 apply (rule lubY_le_flubY)
 -- {* @{text "(f (lub Y cl), f x) \<in> r"} *}
-using [[ atp_problem_prefix = "Tarski__intY1_f_closed_simpler" ]]  (*ALL THEOREMS*)
+using [[ sledgehammer_problem_prefix = "Tarski__intY1_f_closed_simpler" ]]  (*ALL THEOREMS*)
 (*sledgehammer [has been proved before now...]*)
 apply (rule_tac f=f in monotoneE)
 apply (rule monotone_f)
@@ -939,13 +939,13 @@
 apply (simp add: intY1_def interval_def  intY1_elem)
 done
 
-declare [[ atp_problem_prefix = "Tarski__intY1_func" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__intY1_func" ]]  (*ALL THEOREMS*)
 lemma (in Tarski) intY1_func: "(%x: intY1. f x) \<in> intY1 -> intY1"
 apply (rule restrict_in_funcset)
 apply (metis intY1_f_closed restrict_in_funcset)
 done
 
-declare [[ atp_problem_prefix = "Tarski__intY1_mono" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__intY1_mono" ]]  (*ALL THEOREMS*)
 lemma (in Tarski) intY1_mono:
      "monotone (%x: intY1. f x) intY1 (induced intY1 r)"
 (*sledgehammer *)
@@ -954,7 +954,7 @@
 done
 
 (*proof requires relaxing relevance: 2007-01-25*)
-declare [[ atp_problem_prefix = "Tarski__intY1_is_cl" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__intY1_is_cl" ]]  (*ALL THEOREMS*)
 lemma (in Tarski) intY1_is_cl:
     "(| pset = intY1, order = induced intY1 r |) \<in> CompleteLattice"
 (*sledgehammer*) 
@@ -967,7 +967,7 @@
 done
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__v_in_P" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__v_in_P" ]]  (*ALL THEOREMS*)
 lemma (in Tarski) v_in_P: "v \<in> P"
 (*sledgehammer*) 
 apply (unfold P_def)
@@ -977,7 +977,7 @@
                  v_def CL_imp_PO intY1_is_cl CLF_set_def intY1_func intY1_mono)
 done
 
-declare [[ atp_problem_prefix = "Tarski__z_in_interval" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__z_in_interval" ]]  (*ALL THEOREMS*)
 lemma (in Tarski) z_in_interval:
      "[| z \<in> P; \<forall>y\<in>Y. (y, z) \<in> induced P r |] ==> z \<in> intY1"
 (*sledgehammer *)
@@ -991,14 +991,14 @@
 apply (simp add: induced_def)
 done
 
-declare [[ atp_problem_prefix = "Tarski__fz_in_int_rel" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__fz_in_int_rel" ]]  (*ALL THEOREMS*)
 lemma (in Tarski) f'z_in_int_rel: "[| z \<in> P; \<forall>y\<in>Y. (y, z) \<in> induced P r |]
       ==> ((%x: intY1. f x) z, z) \<in> induced intY1 r" 
 apply (metis P_def acc_def fix_imp_eq fix_subset indI reflE restrict_apply subset_eq z_in_interval)
 done
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__tarski_full_lemma" ]]  (*ALL THEOREMS*)
+declare [[ sledgehammer_problem_prefix = "Tarski__tarski_full_lemma" ]]  (*ALL THEOREMS*)
 lemma (in Tarski) tarski_full_lemma:
      "\<exists>L. isLub Y (| pset = P, order = induced P r |) L"
 apply (rule_tac x = "v" in exI)
@@ -1028,12 +1028,12 @@
  prefer 2 apply (simp add: v_in_P)
 apply (unfold v_def)
 (*never proved, 2007-01-22*)
-using [[ atp_problem_prefix = "Tarski__tarski_full_lemma_simpler" ]]
+using [[ sledgehammer_problem_prefix = "Tarski__tarski_full_lemma_simpler" ]]
 (*sledgehammer*) 
 apply (rule indE)
 apply (rule_tac [2] intY1_subset)
 (*never proved, 2007-01-22*)
-using [[ atp_problem_prefix = "Tarski__tarski_full_lemma_simplest" ]]
+using [[ sledgehammer_problem_prefix = "Tarski__tarski_full_lemma_simplest" ]]
 (*sledgehammer*) 
 apply (rule CL.glb_lower [OF CL.intro, OF PO.intro CL_axioms.intro, OF _ intY1_is_cl, simplified])
   apply (simp add: CL_imp_PO intY1_is_cl)
@@ -1051,7 +1051,7 @@
 
 
 (*never proved, 2007-01-22*)
-declare [[ atp_problem_prefix = "Tarski__Tarski_full" ]]
+declare [[ sledgehammer_problem_prefix = "Tarski__Tarski_full" ]]
   declare (in CLF) fixf_po[intro] P_def [simp] A_def [simp] r_def [simp]
                Tarski.tarski_full_lemma [intro] cl_po [intro] cl_co [intro]
                CompleteLatticeI_simp [intro]
@@ -1061,7 +1061,7 @@
 apply (rule CompleteLatticeI_simp)
 apply (rule fixf_po, clarify)
 (*never proved, 2007-01-22*)
-using [[ atp_problem_prefix = "Tarski__Tarski_full_simpler" ]]
+using [[ sledgehammer_problem_prefix = "Tarski__Tarski_full_simpler" ]]
 (*sledgehammer*) 
 apply (simp add: P_def A_def r_def)
 apply (blast intro!: Tarski.tarski_full_lemma [OF Tarski.intro, OF CLF.intro Tarski_axioms.intro,
--- a/src/HOL/Mirabelle/Tools/mirabelle_sledgehammer.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Mirabelle/Tools/mirabelle_sledgehammer.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -290,7 +290,7 @@
     | NONE => get_prover (default_atp_name ()))
   end
 
-type locality = Sledgehammer_Fact_Filter.locality
+type locality = Sledgehammer_Filter.locality
 
 local
 
@@ -299,20 +299,29 @@
   SH_FAIL of int * int |
   SH_ERROR
 
-fun run_sh prover hard_timeout timeout dir st =
+fun run_sh prover_name prover hard_timeout timeout dir st =
   let
-    val {context = ctxt, facts, goal} = Proof.goal st
+    val {context = ctxt, facts = chained_ths, goal} = Proof.goal st
     val thy = ProofContext.theory_of ctxt
+    val i = 1
     val change_dir = (fn SOME d => Config.put Sledgehammer.dest_dir d | _ => I)
     val ctxt' =
       ctxt
       |> change_dir dir
-      |> Config.put Sledgehammer.measure_runtime true
-    val params = Sledgehammer_Isar.default_params thy
-      [("timeout", Int.toString timeout ^ " s")]
+      |> Config.put Sledgehammer.measure_run_time true
+    val params as {full_types, relevance_thresholds, max_relevant, ...} =
+      Sledgehammer_Isar.default_params thy
+          [("timeout", Int.toString timeout ^ " s")]
+    val relevance_override = {add = [], del = [], only = false}
+    val {default_max_relevant, ...} = ATP_Systems.get_prover thy prover_name
+    val (_, hyp_ts, concl_t) = Sledgehammer_Util.strip_subgoal goal i
+    val axioms =
+      Sledgehammer_Filter.relevant_facts ctxt full_types relevance_thresholds
+          (the_default default_max_relevant max_relevant)
+          relevance_override chained_ths hyp_ts concl_t
     val problem =
-      {subgoal = 1, goal = (ctxt', (facts, goal)),
-       relevance_override = {add = [], del = [], only = false}, axioms = NONE}
+      {ctxt = ctxt', goal = goal, subgoal = i,
+       axioms = map (Sledgehammer_Translate.prepare_axiom ctxt) axioms}
     val time_limit =
       (case hard_timeout of
         NONE => I
@@ -352,12 +361,12 @@
     val timeout = Mirabelle.get_int_setting args (prover_timeoutK, 30)
     val hard_timeout = AList.lookup (op =) args prover_hard_timeoutK
       |> Option.map (fst o read_int o explode)
-    val (msg, result) = run_sh prover hard_timeout timeout dir st
+    val (msg, result) = run_sh prover_name prover hard_timeout timeout dir st
   in
     case result of
       SH_OK (time_isa, time_atp, names) =>
         let
-          fun get_thms (name, Sledgehammer_Fact_Filter.Chained) = NONE
+          fun get_thms (name, Sledgehammer_Filter.Chained) = NONE
             | get_thms (name, loc) =
               SOME ((name, loc), thms_of_name (Proof.context_of st) name)
         in
@@ -396,7 +405,7 @@
     val params = Sledgehammer_Isar.default_params thy
       [("atps", prover_name), ("timeout", Int.toString timeout ^ " s")]
     val minimize =
-      Sledgehammer_Fact_Minimize.minimize_theorems params 1 (subgoal_count st)
+      Sledgehammer_Minimize.minimize_theorems params 1 (subgoal_count st)
     val _ = log separator
   in
     case minimize st (these (!named_thms)) of
--- a/src/HOL/Mirabelle/Tools/mirabelle_sledgehammer_filter.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Mirabelle/Tools/mirabelle_sledgehammer_filter.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -5,22 +5,26 @@
 structure Mirabelle_Sledgehammer_Filter : MIRABELLE_ACTION =
 struct
 
+structure SF = Sledgehammer_Filter
+
 val relevance_filter_args =
-  [("worse_irrel_freq", Sledgehammer_Fact_Filter.worse_irrel_freq),
-   ("higher_order_irrel_weight",
-    Sledgehammer_Fact_Filter.higher_order_irrel_weight),
-   ("abs_rel_weight", Sledgehammer_Fact_Filter.abs_rel_weight),
-   ("abs_irrel_weight", Sledgehammer_Fact_Filter.abs_irrel_weight),
-   ("skolem_irrel_weight", Sledgehammer_Fact_Filter.skolem_irrel_weight),
-   ("intro_bonus", Sledgehammer_Fact_Filter.intro_bonus),
-   ("elim_bonus", Sledgehammer_Fact_Filter.elim_bonus),
-   ("simp_bonus", Sledgehammer_Fact_Filter.simp_bonus),
-   ("local_bonus", Sledgehammer_Fact_Filter.local_bonus),
-   ("chained_bonus", Sledgehammer_Fact_Filter.chained_bonus),
-   ("max_imperfect", Sledgehammer_Fact_Filter.max_imperfect),
-   ("max_imperfect_exp", Sledgehammer_Fact_Filter.max_imperfect_exp),
-   ("threshold_divisor", Sledgehammer_Fact_Filter.threshold_divisor),
-   ("ridiculous_threshold", Sledgehammer_Fact_Filter.ridiculous_threshold)]
+  [("worse_irrel_freq", SF.worse_irrel_freq),
+   ("higher_order_irrel_weight", SF.higher_order_irrel_weight),
+   ("abs_rel_weight", SF.abs_rel_weight),
+   ("abs_irrel_weight", SF.abs_irrel_weight),
+   ("skolem_irrel_weight", SF.skolem_irrel_weight),
+   ("theory_const_rel_weight", SF.theory_const_rel_weight),
+   ("theory_const_irrel_weight", SF.theory_const_irrel_weight),
+   ("intro_bonus", SF.intro_bonus),
+   ("elim_bonus", SF.elim_bonus),
+   ("simp_bonus", SF.simp_bonus),
+   ("local_bonus", SF.local_bonus),
+   ("assum_bonus", SF.assum_bonus),
+   ("chained_bonus", SF.chained_bonus),
+   ("max_imperfect", SF.max_imperfect),
+   ("max_imperfect_exp", SF.max_imperfect_exp),
+   ("threshold_divisor", SF.threshold_divisor),
+   ("ridiculous_threshold", SF.ridiculous_threshold)]
 
 structure Prooftab =
   Table(type key = int * int val ord = prod_ord int_ord int_ord);
@@ -99,15 +103,14 @@
                            SOME rf => (rf := the (Real.fromString value); false)
                          | NONE => true)
 
-         val {relevance_thresholds, full_types, max_relevant, theory_relevant,
-              ...} = Sledgehammer_Isar.default_params thy args
+         val {relevance_thresholds, full_types, max_relevant, ...} =
+           Sledgehammer_Isar.default_params thy args
          val subgoal = 1
          val (_, hyp_ts, concl_t) = Sledgehammer_Util.strip_subgoal goal subgoal
          val facts =
-           Sledgehammer_Fact_Filter.relevant_facts ctxt full_types
+           SF.relevant_facts ctxt full_types
                relevance_thresholds
                (the_default default_max_relevant max_relevant)
-               (the_default false theory_relevant)
                {add = [], del = [], only = false} facts hyp_ts concl_t
            |> map (fst o fst)
          val (found_facts, lost_facts) =
--- a/src/HOL/Multivariate_Analysis/normarith.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Multivariate_Analysis/normarith.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -32,16 +32,16 @@
                       (Thm.dest_arg t) acc
       | _ => augment_norm true t acc
 
- val cterm_lincomb_neg = FuncUtil.Ctermfunc.map Rat.neg
+ val cterm_lincomb_neg = FuncUtil.Ctermfunc.map (K Rat.neg)
  fun cterm_lincomb_cmul c t =
-    if c =/ Rat.zero then FuncUtil.Ctermfunc.empty else FuncUtil.Ctermfunc.map (fn x => x */ c) t
+    if c =/ Rat.zero then FuncUtil.Ctermfunc.empty else FuncUtil.Ctermfunc.map (fn _ => fn x => x */ c) t
  fun cterm_lincomb_add l r = FuncUtil.Ctermfunc.combine (curry op +/) (fn x => x =/ Rat.zero) l r
  fun cterm_lincomb_sub l r = cterm_lincomb_add l (cterm_lincomb_neg r)
  fun cterm_lincomb_eq l r = FuncUtil.Ctermfunc.is_empty (cterm_lincomb_sub l r)
 
- val int_lincomb_neg = FuncUtil.Intfunc.map Rat.neg
+ val int_lincomb_neg = FuncUtil.Intfunc.map (K Rat.neg)
  fun int_lincomb_cmul c t =
-    if c =/ Rat.zero then FuncUtil.Intfunc.empty else FuncUtil.Intfunc.map (fn x => x */ c) t
+    if c =/ Rat.zero then FuncUtil.Intfunc.empty else FuncUtil.Intfunc.map (fn _ => fn x => x */ c) t
  fun int_lincomb_add l r = FuncUtil.Intfunc.combine (curry op +/) (fn x => x =/ Rat.zero) l r
  fun int_lincomb_sub l r = int_lincomb_add l (int_lincomb_neg r)
  fun int_lincomb_eq l r = FuncUtil.Intfunc.is_empty (int_lincomb_sub l r)
--- a/src/HOL/SAT.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/SAT.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -10,7 +10,6 @@
 theory SAT
 imports Refute
 uses
-  "Tools/cnf_funcs.ML"
   "Tools/sat_funcs.ML"
 begin
 
--- a/src/HOL/SetInterval.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/SetInterval.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -304,6 +304,17 @@
 lemma lessThan_Suc: "lessThan (Suc k) = insert k (lessThan k)"
 by (simp add: lessThan_def less_Suc_eq, blast)
 
+text {* The following proof is convinient in induction proofs where
+new elements get indices at the beginning. So it is used to transform
+@{term "{..<Suc n}"} to @{term "0::nat"} and @{term "{..< n}"}. *}
+
+lemma lessThan_Suc_eq_insert_0: "{..<Suc n} = insert 0 (Suc ` {..<n})"
+proof safe
+  fix x assume "x < Suc n" "x \<notin> Suc ` {..<n}"
+  then have "x \<noteq> Suc (x - 1)" by auto
+  with `x < Suc n` show "x = 0" by auto
+qed
+
 lemma lessThan_Suc_atMost: "lessThan (Suc k) = atMost k"
 by (simp add: lessThan_def atMost_def less_Suc_eq_le)
 
--- a/src/HOL/Sledgehammer.thy	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Sledgehammer.thy	Thu Sep 02 20:44:33 2010 +0200
@@ -10,7 +10,6 @@
 theory Sledgehammer
 imports Plain Hilbert_Choice
 uses
-  ("Tools/ATP/async_manager.ML")
   ("Tools/ATP/atp_problem.ML")
   ("Tools/ATP/atp_systems.ML")
   ("~~/src/Tools/Metis/metis.ML")
@@ -19,14 +18,17 @@
   ("Tools/Sledgehammer/metis_clauses.ML")
   ("Tools/Sledgehammer/metis_tactics.ML")
   ("Tools/Sledgehammer/sledgehammer_util.ML")
-  ("Tools/Sledgehammer/sledgehammer_fact_filter.ML")
+  ("Tools/Sledgehammer/sledgehammer_filter.ML")
   ("Tools/Sledgehammer/sledgehammer_translate.ML")
-  ("Tools/Sledgehammer/sledgehammer_proof_reconstruct.ML")
+  ("Tools/Sledgehammer/sledgehammer_reconstruct.ML")
   ("Tools/Sledgehammer/sledgehammer.ML")
-  ("Tools/Sledgehammer/sledgehammer_fact_minimize.ML")
+  ("Tools/Sledgehammer/sledgehammer_minimize.ML")
   ("Tools/Sledgehammer/sledgehammer_isar.ML")
 begin
 
+lemma TruepropI: "P \<equiv> Q \<Longrightarrow> Trueprop P \<equiv> Trueprop Q"
+by simp
+
 definition skolem_id :: "'a \<Rightarrow> 'a" where
 [no_atp]: "skolem_id = (\<lambda>x. x)"
 
@@ -89,7 +91,6 @@
 apply (simp add: COMBC_def) 
 done
 
-use "Tools/ATP/async_manager.ML"
 use "Tools/ATP/atp_problem.ML"
 use "Tools/ATP/atp_systems.ML"
 setup ATP_Systems.setup
@@ -103,12 +104,12 @@
 use "Tools/Sledgehammer/metis_tactics.ML"
 
 use "Tools/Sledgehammer/sledgehammer_util.ML"
-use "Tools/Sledgehammer/sledgehammer_fact_filter.ML"
+use "Tools/Sledgehammer/sledgehammer_filter.ML"
 use "Tools/Sledgehammer/sledgehammer_translate.ML"
-use "Tools/Sledgehammer/sledgehammer_proof_reconstruct.ML"
+use "Tools/Sledgehammer/sledgehammer_reconstruct.ML"
 use "Tools/Sledgehammer/sledgehammer.ML"
 setup Sledgehammer.setup
-use "Tools/Sledgehammer/sledgehammer_fact_minimize.ML"
+use "Tools/Sledgehammer/sledgehammer_minimize.ML"
 use "Tools/Sledgehammer/sledgehammer_isar.ML"
 setup Metis_Tactics.setup
 
--- a/src/HOL/Tools/ATP/async_manager.ML	Thu Sep 02 20:29:12 2010 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,241 +0,0 @@
-(*  Title:      HOL/Tools/ATP/async_manager.ML
-    Author:     Fabian Immler, TU Muenchen
-    Author:     Makarius
-    Author:     Jasmin Blanchette, TU Muenchen
-
-Central manager for asynchronous diagnosis tool threads.
-*)
-
-signature ASYNC_MANAGER =
-sig
-  val launch :
-    string -> bool -> Time.time -> Time.time -> string -> (unit -> string)
-    -> unit
-  val kill_threads : string -> string -> unit
-  val running_threads : string -> string -> unit
-  val thread_messages : string -> string -> int option -> unit
-end;
-
-structure Async_Manager : ASYNC_MANAGER =
-struct
-
-(** preferences **)
-
-val message_store_limit = 20;
-val message_display_limit = 5;
-
-
-(** thread management **)
-
-(* data structures over threads *)
-
-structure Thread_Heap = Heap
-(
-  type elem = Time.time * Thread.thread;
-  fun ord ((a, _), (b, _)) = Time.compare (a, b);
-);
-
-fun lookup_thread xs = AList.lookup Thread.equal xs;
-fun delete_thread xs = AList.delete Thread.equal xs;
-fun update_thread xs = AList.update Thread.equal xs;
-
-
-(* state of thread manager *)
-
-type state =
-  {manager: Thread.thread option,
-   timeout_heap: Thread_Heap.T,
-   active: (Thread.thread * (string * Time.time * Time.time * string)) list,
-   canceling: (Thread.thread * (string * Time.time * string)) list,
-   messages: (string * string) list,
-   store: (string * string) list}
-
-fun make_state manager timeout_heap active canceling messages store : state =
-  {manager = manager, timeout_heap = timeout_heap, active = active,
-   canceling = canceling, messages = messages, store = store}
-
-val global_state = Synchronized.var "async_manager"
-  (make_state NONE Thread_Heap.empty [] [] [] []);
-
-
-(* unregister thread *)
-
-fun unregister verbose message thread =
-  Synchronized.change global_state
-  (fn state as {manager, timeout_heap, active, canceling, messages, store} =>
-    (case lookup_thread active thread of
-      SOME (tool, birth_time, _, desc) =>
-        let
-          val active' = delete_thread thread active;
-          val now = Time.now ()
-          val canceling' = (thread, (tool, now, desc)) :: canceling
-          val message' =
-            desc ^ "\n" ^ message ^
-            (if verbose then
-               "\nTotal time: " ^ Int.toString (Time.toMilliseconds
-                                            (Time.- (now, birth_time))) ^ " ms."
-             else
-               "")
-          val messages' = (tool, message') :: messages;
-          val store' = (tool, message') ::
-            (if length store <= message_store_limit then store
-             else #1 (chop message_store_limit store));
-        in make_state manager timeout_heap active' canceling' messages' store' end
-    | NONE => state));
-
-
-(* main manager thread -- only one may exist *)
-
-val min_wait_time = Time.fromMilliseconds 300;
-val max_wait_time = Time.fromSeconds 10;
-
-fun replace_all bef aft =
-  let
-    fun aux seen "" = String.implode (rev seen)
-      | aux seen s =
-        if String.isPrefix bef s then
-          aux seen "" ^ aft ^ aux [] (unprefix bef s)
-        else
-          aux (String.sub (s, 0) :: seen) (String.extract (s, 1, NONE))
-  in aux [] end
-
-(* This is a workaround for Proof General's off-by-a-few sendback display bug,
-   whereby "pr" in "proof" is not highlighted. *)
-fun break_into_chunks xs =
-  maps (space_explode "\000" o replace_all "\n\n" "\000" o snd) xs
-
-fun print_new_messages () =
-  case Synchronized.change_result global_state
-         (fn {manager, timeout_heap, active, canceling, messages, store} =>
-             (messages, make_state manager timeout_heap active canceling []
-                                   store)) of
-    [] => ()
-  | msgs as (tool, _) :: _ =>
-    let val ss = break_into_chunks msgs in
-      List.app priority (tool ^ ": " ^ hd ss :: tl ss)
-    end
-
-fun check_thread_manager verbose = Synchronized.change global_state
-  (fn state as {manager, timeout_heap, active, canceling, messages, store} =>
-    if (case manager of SOME thread => Thread.isActive thread | NONE => false) then state
-    else let val manager = SOME (Toplevel.thread false (fn () =>
-      let
-        fun time_limit timeout_heap =
-          (case try Thread_Heap.min timeout_heap of
-            NONE => Time.+ (Time.now (), max_wait_time)
-          | SOME (time, _) => time);
-
-        (*action: find threads whose timeout is reached, and interrupt canceling threads*)
-        fun action {manager, timeout_heap, active, canceling, messages, store} =
-          let val (timeout_threads, timeout_heap') =
-            Thread_Heap.upto (Time.now (), Thread.self ()) timeout_heap;
-          in
-            if null timeout_threads andalso null canceling then
-              NONE
-            else
-              let
-                val _ = List.app (Simple_Thread.interrupt o #1) canceling
-                val canceling' = filter (Thread.isActive o #1) canceling
-                val state' = make_state manager timeout_heap' active canceling' messages store;
-              in SOME (map #2 timeout_threads, state') end
-          end;
-      in
-        while Synchronized.change_result global_state
-          (fn state as {timeout_heap, active, canceling, messages, store, ...} =>
-            if null active andalso null canceling andalso null messages
-            then (false, make_state NONE timeout_heap active canceling messages store)
-            else (true, state))
-        do
-          (Synchronized.timed_access global_state (SOME o time_limit o #timeout_heap) action
-            |> these
-            |> List.app (unregister verbose "Timed out.\n");
-            print_new_messages ();
-            (*give threads some time to respond to interrupt*)
-            OS.Process.sleep min_wait_time)
-      end))
-    in make_state manager timeout_heap active canceling messages store end)
-
-
-(* register thread *)
-
-fun register tool verbose birth_time death_time desc thread =
- (Synchronized.change global_state
-    (fn {manager, timeout_heap, active, canceling, messages, store} =>
-      let
-        val timeout_heap' = Thread_Heap.insert (death_time, thread) timeout_heap;
-        val active' = update_thread (thread, (tool, birth_time, death_time, desc)) active;
-        val state' = make_state manager timeout_heap' active' canceling messages store;
-      in state' end);
-  check_thread_manager verbose);
-
-
-fun launch tool verbose birth_time death_time desc f =
-  (Toplevel.thread true
-       (fn () =>
-           let
-             val self = Thread.self ()
-             val _ = register tool verbose birth_time death_time desc self
-             val message = f ()
-           in unregister verbose message self end);
-   ())
-
-
-(** user commands **)
-
-(* kill threads *)
-
-fun kill_threads tool workers = Synchronized.change global_state
-  (fn {manager, timeout_heap, active, canceling, messages, store} =>
-    let
-      val killing =
-        map_filter (fn (th, (tool', _, _, desc)) =>
-                       if tool' = tool then SOME (th, (tool', Time.now (), desc))
-                       else NONE) active
-      val state' = make_state manager timeout_heap [] (killing @ canceling) messages store;
-      val _ = if null killing then () else priority ("Killed active " ^ workers ^ ".")
-    in state' end);
-
-
-(* running threads *)
-
-fun seconds time = string_of_int (Time.toSeconds time) ^ " s"
-
-fun running_threads tool workers =
-  let
-    val {active, canceling, ...} = Synchronized.value global_state;
-
-    val now = Time.now ();
-    fun running_info (_, (tool', birth_time, death_time, desc)) =
-      if tool' = tool then
-        SOME ("Running: " ^ seconds (Time.- (now, birth_time)) ^ " -- " ^
-              seconds (Time.- (death_time, now)) ^ " to live:\n" ^ desc)
-      else
-        NONE
-    fun canceling_info (_, (tool', death_time, desc)) =
-      if tool' = tool then
-        SOME ("Trying to interrupt thread since " ^
-              seconds (Time.- (now, death_time)) ^ ":\n" ^ desc)
-      else
-        NONE
-    val running =
-      case map_filter running_info active of
-        [] => ["No " ^ workers ^ " running."]
-      | ss => "Running " ^ workers ^ ":" :: ss
-    val interrupting =
-      case map_filter canceling_info canceling of
-        [] => []
-      | ss => "Trying to interrupt the following " ^ workers ^ ":" :: ss
-  in priority (space_implode "\n\n" (running @ interrupting)) end
-
-fun thread_messages tool worker opt_limit =
-  let
-    val limit = the_default message_display_limit opt_limit;
-    val tool_store = Synchronized.value global_state
-                     |> #store |> filter (curry (op =) tool o fst)
-    val header =
-      "Recent " ^ worker ^ " messages" ^
-        (if length tool_store <= limit then ":"
-         else " (" ^ string_of_int limit ^ " displayed):");
-  in List.app priority (header :: break_into_chunks (#1 (chop limit tool_store))) end
-
-end;
--- a/src/HOL/Tools/ATP/atp_systems.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/ATP/atp_systems.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -20,7 +20,6 @@
      proof_delims: (string * string) list,
      known_failures: (failure * string) list,
      default_max_relevant: int,
-     default_theory_relevant: bool,
      explicit_forall: bool,
      use_conjecture_for_hypotheses: bool}
 
@@ -53,7 +52,6 @@
    proof_delims: (string * string) list,
    known_failures: (failure * string) list,
    default_max_relevant: int,
-   default_theory_relevant: bool,
    explicit_forall: bool,
    use_conjecture_for_hypotheses: bool}
 
@@ -160,7 +158,6 @@
       (OutOfResources, "SZS status: ResourceOut"),
       (OutOfResources, "SZS status ResourceOut")],
    default_max_relevant = 500 (* FUDGE *),
-   default_theory_relevant = false,
    explicit_forall = false,
    use_conjecture_for_hypotheses = true}
 
@@ -171,7 +168,7 @@
    counteracting the presence of "hAPP". *)
 val spass_config : prover_config =
   {exec = ("ISABELLE_ATP", "scripts/spass"),
-   required_execs = [("SPASS_HOME", "SPASS")],
+   required_execs = [("SPASS_HOME", "SPASS"), ("SPASS_HOME", "tptp2dfg")],
    arguments = fn complete => fn timeout =>
      ("-Auto -PGiven=0 -PProblem=0 -Splits=0 -FullRed=0 -DocProof \
       \-VarWeight=3 -TimeLimit=" ^ string_of_int (to_secs 0 timeout))
@@ -187,7 +184,6 @@
       (MalformedInput, "Free Variable"),
       (SpassTooOld, "tptp2dfg")],
    default_max_relevant = 350 (* FUDGE *),
-   default_theory_relevant = true,
    explicit_forall = true,
    use_conjecture_for_hypotheses = true}
 
@@ -217,7 +213,6 @@
       (Unprovable, "Termination reason: Satisfiable"),
       (VampireTooOld, "not a valid option")],
    default_max_relevant = 400 (* FUDGE *),
-   default_theory_relevant = false,
    explicit_forall = false,
    use_conjecture_for_hypotheses = true}
 
@@ -252,12 +247,12 @@
 
 fun the_system name versions =
   case get_system name versions of
-    NONE => error ("System " ^ quote name ^ " not available at SystemOnTPTP.")
-  | SOME sys => sys
+    SOME sys => sys
+  | NONE => error ("System " ^ quote name ^ " not available at SystemOnTPTP.")
 
 fun remote_config system_name system_versions proof_delims known_failures
-                  default_max_relevant default_theory_relevant
-                  use_conjecture_for_hypotheses : prover_config =
+                  default_max_relevant use_conjecture_for_hypotheses
+                  : prover_config =
   {exec = ("ISABELLE_ATP", "scripts/remote_atp"),
    required_execs = [],
    arguments = fn _ => fn timeout =>
@@ -269,26 +264,21 @@
      known_failures @ known_perl_failures @
      [(TimedOut, "says Timeout")],
    default_max_relevant = default_max_relevant,
-   default_theory_relevant = default_theory_relevant,
    explicit_forall = true,
    use_conjecture_for_hypotheses = use_conjecture_for_hypotheses}
 
 fun remotify_config system_name system_versions
         ({proof_delims, known_failures, default_max_relevant,
-          default_theory_relevant, use_conjecture_for_hypotheses, ...}
-         : prover_config) : prover_config =
+          use_conjecture_for_hypotheses, ...} : prover_config) : prover_config =
   remote_config system_name system_versions proof_delims known_failures
-                default_max_relevant default_theory_relevant
-                use_conjecture_for_hypotheses
+                default_max_relevant use_conjecture_for_hypotheses
 
 val remotify_name = prefix "remote_"
 fun remote_prover name system_name system_versions proof_delims known_failures
-                  default_max_relevant default_theory_relevant
-                  use_conjecture_for_hypotheses =
+                  default_max_relevant use_conjecture_for_hypotheses =
   (remotify_name name,
    remote_config system_name system_versions proof_delims known_failures
-                 default_max_relevant default_theory_relevant
-                 use_conjecture_for_hypotheses)
+                 default_max_relevant use_conjecture_for_hypotheses)
 fun remotify_prover (name, config) system_name system_versions =
   (remotify_name name, remotify_config system_name system_versions config)
 
@@ -296,10 +286,10 @@
 val remote_vampire = remotify_prover vampire "Vampire" ["9.0", "1.0", "0.6"]
 val remote_sine_e =
   remote_prover "sine_e" "SInE" [] [] [(Unprovable, "says Unknown")]
-                1000 (* FUDGE *) false true
+                800 (* FUDGE *) true
 val remote_snark =
   remote_prover "snark" "SNARK---" [] [("refutation.", "end_refutation.")] []
-                350 (* FUDGE *) false true
+                250 (* FUDGE *) true
 
 (* Setup *)
 
--- a/src/HOL/Tools/Datatype/datatype_realizer.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/Datatype/datatype_realizer.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -56,7 +56,7 @@
           val recs = filter (is_rec_type o fst o fst) (cargs ~~ tnames ~~ Ts);
           val frees = tnames ~~ Ts;
 
-          fun mk_prems vs [] = 
+          fun mk_prems vs [] =
                 let
                   val rT = nth (rec_result_Ts) i;
                   val vs' = filter_out is_unit vs;
@@ -67,7 +67,7 @@
                 in (HOLogic.mk_Trueprop (make_pred i rT T (list_comb (f, vs'))
                   (list_comb (Const (cname, Ts ---> T), map Free frees))), f')
                 end
-            | mk_prems vs (((dt, s), T) :: ds) = 
+            | mk_prems vs (((dt, s), T) :: ds) =
                 let
                   val k = body_index dt;
                   val (Us, U) = strip_type T;
@@ -83,7 +83,7 @@
 
         in (apfst (curry list_all_free frees) (mk_prems (map Free frees) recs), j + 1) end)
           constrs) (descr ~~ recTs) 1)));
- 
+
     fun mk_proj j [] t = t
       | mk_proj j (i :: is) t = if null is then t else
           if (j: int) = i then HOLogic.mk_fst t
@@ -107,14 +107,16 @@
     val inst = map (pairself cert) (map head_of (HOLogic.dest_conj
       (HOLogic.dest_Trueprop (concl_of induct))) ~~ ps);
 
-    val thm = OldGoals.simple_prove_goal_cterm (cert (Logic.list_implies (prems, concl)))
+    val thm = Goal.prove_internal (map cert prems) (cert concl)
       (fn prems =>
-         [rewrite_goals_tac (map mk_meta_eq [@{thm fst_conv}, @{thm snd_conv}]),
+         EVERY [
+          rewrite_goals_tac (map mk_meta_eq [@{thm fst_conv}, @{thm snd_conv}]),
           rtac (cterm_instantiate inst induct) 1,
           ALLGOALS Object_Logic.atomize_prems_tac,
           rewrite_goals_tac (@{thm o_def} :: map mk_meta_eq rec_rewrites),
           REPEAT ((resolve_tac prems THEN_ALL_NEW (fn i =>
-            REPEAT (etac allE i) THEN atac i)) 1)]);
+            REPEAT (etac allE i) THEN atac i)) 1)])
+      |> Drule.export_without_context;
 
     val ind_name = Thm.derivation_name induct;
     val vs = map (fn i => List.nth (pnames, i)) is;
@@ -178,14 +180,15 @@
     val y = Var (("y", 0), Logic.varifyT_global T);
     val y' = Free ("y", T);
 
-    val thm = OldGoals.prove_goalw_cterm [] (cert (Logic.list_implies (prems,
-      HOLogic.mk_Trueprop (Free ("P", rT --> HOLogic.boolT) $
-        list_comb (r, rs @ [y'])))))
+    val thm = Goal.prove_internal (map cert prems)
+      (cert (HOLogic.mk_Trueprop (Free ("P", rT --> HOLogic.boolT) $ list_comb (r, rs @ [y']))))
       (fn prems =>
-         [rtac (cterm_instantiate [(cert y, cert y')] exhaust) 1,
+         EVERY [
+          rtac (cterm_instantiate [(cert y, cert y')] exhaust) 1,
           ALLGOALS (EVERY'
             [asm_simp_tac (HOL_basic_ss addsimps case_rewrites),
-             resolve_tac prems, asm_simp_tac HOL_basic_ss])]);
+             resolve_tac prems, asm_simp_tac HOL_basic_ss])])
+      |> Drule.export_without_context;
 
     val exh_name = Thm.derivation_name exhaust;
     val (thm', thy') = thy
@@ -213,15 +216,16 @@
 
 fun add_dt_realizers config names thy =
   if ! Proofterm.proofs < 2 then thy
-  else let
-    val _ = message config "Adding realizers for induction and case analysis ..."
-    val infos = map (Datatype.the_info thy) names;
-    val info :: _ = infos;
-  in
-    thy
-    |> fold_rev (make_ind (#sorts info) info) (subsets 0 (length (#descr info) - 1))
-    |> fold_rev (make_casedists (#sorts info)) infos
-  end;
+  else
+    let
+      val _ = message config "Adding realizers for induction and case analysis ..."
+      val infos = map (Datatype.the_info thy) names;
+      val info :: _ = infos;
+    in
+      thy
+      |> fold_rev (make_ind (#sorts info) info) (subsets 0 (length (#descr info) - 1))
+      |> fold_rev (make_casedists (#sorts info)) infos
+    end;
 
 val setup = Datatype.interpretation add_dt_realizers;
 
--- a/src/HOL/Tools/Nitpick/nitpick_isar.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/Nitpick/nitpick_isar.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -348,7 +348,7 @@
                            |>> curry (op =) "genuine")
   in
     if auto orelse blocking then go ()
-    else (Toplevel.thread true (fn () => (go (); ())); (false, state))
+    else (Toplevel.thread true (tap go); (false, state))
   end
 
 fun nitpick_trans (params, i) =
--- a/src/HOL/Tools/Predicate_Compile/predicate_compile_core.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/Predicate_Compile/predicate_compile_core.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -310,7 +310,7 @@
 val predfun_neg_intro_of = #neg_intro ooo the_predfun_data
 
 val intros_graph_of =
-  Graph.map_nodes (#intros o rep_pred_data) o PredData.get o ProofContext.theory_of
+  Graph.map (K (#intros o rep_pred_data)) o PredData.get o ProofContext.theory_of
 
 (* diagnostic display functions *)
 
--- a/src/HOL/Tools/SMT/smt_monomorph.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/SMT/smt_monomorph.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -133,7 +133,7 @@
     fun complete (vT as (v, _)) subst =
       subst
       |> not (Vartab.defined subst v) ? Vartab.update vT
-      |> Vartab.map (apsnd (Term.map_atyps (replace vT)))
+      |> Vartab.map (K (apsnd (Term.map_atyps (replace vT))))
 
     fun cert (ix, (S, T)) = pairself (Thm.ctyp_of thy) (TVar (ix, S), T)
 
--- a/src/HOL/Tools/SMT/z3_proof_parser.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/SMT/z3_proof_parser.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -191,7 +191,7 @@
     fun cert @{term True} = @{cterm "~False"}
       | cert t = certify ctxt' t
 
-  in (typs, Symtab.map cert terms, Inttab.empty, Inttab.empty, [], ctxt') end
+  in (typs, Symtab.map (K cert) terms, Inttab.empty, Inttab.empty, [], ctxt') end
 
 fun fresh_name n (typs, terms, exprs, steps, vars, ctxt) =
   let val (n', ctxt') = yield_singleton Variable.variant_fixes n ctxt
--- a/src/HOL/Tools/SMT/z3_proof_reconstruction.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/SMT/z3_proof_reconstruction.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -823,7 +823,7 @@
 
     fun result (p, (cx, _)) = (thm_of p, cx)
   in
-    (fn (idx, ptab) => result (lookup idx (ctxt, Inttab.map Unproved ptab)))
+    (fn (idx, ptab) => result (lookup idx (ctxt, Inttab.map (K Unproved) ptab)))
   end
 
 fun reconstruct (output, {typs, terms, unfolds, assms}) ctxt =
--- a/src/HOL/Tools/Sledgehammer/clausifier.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/clausifier.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -10,7 +10,8 @@
   val extensionalize_theorem : thm -> thm
   val introduce_combinators_in_cterm : cterm -> thm
   val introduce_combinators_in_theorem : thm -> thm
-  val cnf_axiom: theory -> thm -> thm list
+  val to_definitional_cnf_with_quantifiers : theory -> thm -> thm
+  val cnf_axiom : theory -> thm -> thm list
 end;
 
 structure Clausifier : CLAUSIFIER =
@@ -228,19 +229,26 @@
                   |> Meson.make_nnf ctxt
   in (th3, ctxt) end
 
+fun to_definitional_cnf_with_quantifiers thy th =
+  let
+    val eqth = cnf.make_cnfx_thm thy (HOLogic.dest_Trueprop (prop_of th))
+    val eqth = eqth RS @{thm eq_reflection}
+    val eqth = eqth RS @{thm TruepropI}
+  in Thm.equal_elim eqth th end
+
 (* Convert a theorem to CNF, with Skolem functions as additional premises. *)
 fun cnf_axiom thy th =
   let
     val ctxt0 = Variable.global_thm_context th
-    val (nnfth, ctxt) = to_nnf th ctxt0
-    val sko_ths = map (skolem_theorem_of_def thy)
-                      (assume_skolem_funs nnfth)
-    val (cnfs, ctxt) = Meson.make_cnf sko_ths nnfth ctxt
+    val (nnf_th, ctxt) = to_nnf th ctxt0
+    val def_th = to_definitional_cnf_with_quantifiers thy nnf_th
+    val sko_ths = map (skolem_theorem_of_def thy) (assume_skolem_funs def_th)
+    val (cnf_ths, ctxt) = Meson.make_cnf sko_ths def_th ctxt
   in
-    cnfs |> map introduce_combinators_in_theorem
-         |> Variable.export ctxt ctxt0
-         |> Meson.finish_cnf
-         |> map Thm.close_derivation
+    cnf_ths |> map introduce_combinators_in_theorem
+            |> Variable.export ctxt ctxt0
+            |> Meson.finish_cnf
+            |> map Thm.close_derivation
   end
   handle THM _ => []
 
--- a/src/HOL/Tools/Sledgehammer/metis_tactics.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/metis_tactics.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -795,13 +795,15 @@
 
 fun generic_metis_tac mode ctxt ths i st0 =
   let
+    val thy = ProofContext.theory_of ctxt
     val _ = trace_msg (fn () =>
         "Metis called with theorems " ^ cat_lines (map (Display.string_of_thm ctxt) ths))
   in
     if exists_type type_has_top_sort (prop_of st0) then
       (warning ("Metis: Proof state contains the universal sort {}"); Seq.empty)
     else
-      Meson.MESON (maps neg_clausify)
+      Meson.MESON (K all_tac) (* TODO: Try (cnf.cnfx_rewrite_tac ctxt) *)
+                  (maps neg_clausify)
                   (fn cls => resolve_tac (FOL_SOLVE mode ctxt cls ths) 1)
                   ctxt i st0
   end
@@ -814,7 +816,7 @@
    "by (metis X)", to prevent "Subgoal.FOCUS" from freezing the type variables.
    We don't do it for nonschematic facts "X" because this breaks a few proofs
    (in the rare and subtle case where a proof relied on extensionality not being
-   applied) and brings no benefits. *)
+   applied) and brings few benefits. *)
 val has_tvar =
   exists_type (exists_subtype (fn TVar _ => true | _ => false)) o prop_of
 fun method name mode =
--- a/src/HOL/Tools/Sledgehammer/sledgehammer.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -9,11 +9,13 @@
 signature SLEDGEHAMMER =
 sig
   type failure = ATP_Systems.failure
-  type locality = Sledgehammer_Fact_Filter.locality
-  type relevance_override = Sledgehammer_Fact_Filter.relevance_override
-  type minimize_command = Sledgehammer_Proof_Reconstruct.minimize_command
+  type locality = Sledgehammer_Filter.locality
+  type relevance_override = Sledgehammer_Filter.relevance_override
+  type fol_formula = Sledgehammer_Translate.fol_formula
+  type minimize_command = Sledgehammer_Reconstruct.minimize_command
   type params =
-    {debug: bool,
+    {blocking: bool,
+     debug: bool,
      verbose: bool,
      overlord: bool,
      atps: string list,
@@ -21,15 +23,15 @@
      explicit_apply: bool,
      relevance_thresholds: real * real,
      max_relevant: int option,
-     theory_relevant: bool option,
      isar_proof: bool,
      isar_shrink_factor: int,
-     timeout: Time.time}
+     timeout: Time.time,
+     expect: string}
   type problem =
-    {subgoal: int,
-     goal: Proof.context * (thm list * thm),
-     relevance_override: relevance_override,
-     axioms: ((string * locality) * thm) list option}
+    {ctxt: Proof.context,
+     goal: thm,
+     subgoal: int,
+     axioms: (term * ((string * locality) * fol_formula) option) list}
   type prover_result =
     {outcome: failure option,
      message: string,
@@ -44,7 +46,7 @@
 
   val dest_dir : string Config.T
   val problem_prefix : string Config.T
-  val measure_runtime : bool Config.T
+  val measure_run_time : bool Config.T
   val kill_atps: unit -> unit
   val running_atps: unit -> unit
   val messages: int option -> unit
@@ -62,9 +64,9 @@
 open ATP_Systems
 open Metis_Clauses
 open Sledgehammer_Util
-open Sledgehammer_Fact_Filter
+open Sledgehammer_Filter
 open Sledgehammer_Translate
-open Sledgehammer_Proof_Reconstruct
+open Sledgehammer_Reconstruct
 
 
 (** The Sledgehammer **)
@@ -80,7 +82,8 @@
 (** problems, results, provers, etc. **)
 
 type params =
-  {debug: bool,
+  {blocking: bool,
+   debug: bool,
    verbose: bool,
    overlord: bool,
    atps: string list,
@@ -88,16 +91,16 @@
    explicit_apply: bool,
    relevance_thresholds: real * real,
    max_relevant: int option,
-   theory_relevant: bool option,
    isar_proof: bool,
    isar_shrink_factor: int,
-   timeout: Time.time}
+   timeout: Time.time,
+   expect: string}
 
 type problem =
-  {subgoal: int,
-   goal: Proof.context * (thm list * thm),
-   relevance_override: relevance_override,
-   axioms: ((string * locality) * thm) list option}
+  {ctxt: Proof.context,
+   goal: thm,
+   subgoal: int,
+   axioms: (term * ((string * locality) * fol_formula) option) list}
 
 type prover_result =
   {outcome: failure option,
@@ -114,14 +117,15 @@
 
 (* configuration attributes *)
 
-val (dest_dir, dest_dir_setup) = Attrib.config_string "atp_dest_dir" (K "");
-  (*Empty string means create files in Isabelle's temporary files directory.*)
+val (dest_dir, dest_dir_setup) =
+  Attrib.config_string "sledgehammer_dest_dir" (K "")
+  (* Empty string means create files in Isabelle's temporary files directory. *)
 
 val (problem_prefix, problem_prefix_setup) =
-  Attrib.config_string "atp_problem_prefix" (K "prob");
+  Attrib.config_string "sledgehammer_problem_prefix" (K "prob")
 
-val (measure_runtime, measure_runtime_setup) =
-  Attrib.config_bool "atp_measure_runtime" (K false);
+val (measure_run_time, measure_run_time_setup) =
+  Attrib.config_bool "sledgehammer_measure_run_time" (K false)
 
 fun with_path cleanup after f path =
   Exn.capture f path
@@ -172,10 +176,11 @@
   |-- Scan.repeat parse_clause_formula_pair
 val extract_clause_formula_relation =
   Substring.full #> Substring.position set_ClauseFormulaRelationN
-  #> snd #> Substring.string #> strip_spaces_except_between_ident_chars
-  #> explode #> parse_clause_formula_relation #> fst
+  #> snd #> Substring.position "." #> fst #> Substring.string
+  #> explode #> filter_out Symbol.is_blank #> parse_clause_formula_relation
+  #> fst
 
-(* TODO: move to "Sledgehammer_Proof_Reconstruct.ML" *)
+(* TODO: move to "Sledgehammer_Reconstruct" *)
 fun repair_conjecture_shape_and_theorem_names output conjecture_shape
                                               axiom_names =
   if String.isSubstring set_ClauseFormulaRelationN output then
@@ -210,51 +215,31 @@
 
 fun prover_fun atp_name
         {exec, required_execs, arguments, has_incomplete_mode, proof_delims,
-         known_failures, default_max_relevant, default_theory_relevant,
-         explicit_forall, use_conjecture_for_hypotheses}
+         known_failures, default_max_relevant, explicit_forall,
+         use_conjecture_for_hypotheses}
         ({debug, verbose, overlord, full_types, explicit_apply,
-          relevance_thresholds, max_relevant, theory_relevant, isar_proof,
-          isar_shrink_factor, timeout, ...} : params)
-        minimize_command
-        ({subgoal, goal = (ctxt, (chained_ths, th)), relevance_override,
-          axioms} : problem) =
+          max_relevant, isar_proof, isar_shrink_factor, timeout, ...} : params)
+        minimize_command ({ctxt, goal, subgoal, axioms} : problem) =
   let
-    val (_, hyp_ts, concl_t) = strip_subgoal th subgoal
-
-    val print = priority
-    fun print_v f = () |> verbose ? print o f
-    fun print_d f = () |> debug ? print o f
-
-    val the_axioms =
-      case axioms of
-        SOME axioms => axioms
-      | NONE =>
-        (relevant_facts ctxt full_types relevance_thresholds
-                        (the_default default_max_relevant max_relevant)
-                        (the_default default_theory_relevant theory_relevant)
-                        relevance_override chained_ths hyp_ts concl_t
-         |> tap ((fn n => print_v (fn () =>
-                          "Selected " ^ string_of_int n ^ " fact" ^ plural_s n ^
-                          " for " ^ quote atp_name ^ ".")) o length))
-
+    val (_, hyp_ts, concl_t) = strip_subgoal goal subgoal
+    val max_relevant = the_default default_max_relevant max_relevant
+    val axioms = take max_relevant axioms
     (* path to unique problem file *)
     val the_dest_dir = if overlord then getenv "ISABELLE_HOME_USER"
-                       else Config.get ctxt dest_dir;
-    val the_problem_prefix = Config.get ctxt problem_prefix;
-    fun prob_pathname nr =
-      let
-        val probfile =
-          Path.basic ((if overlord then "prob_" ^ atp_name
-                       else the_problem_prefix ^ serial_string ())
-                      ^ "_" ^ string_of_int nr)
-      in
-        if the_dest_dir = "" then File.tmp_path probfile
-        else if File.exists (Path.explode the_dest_dir)
-        then Path.append (Path.explode the_dest_dir) probfile
-        else error ("No such directory: " ^ quote the_dest_dir ^ ".")
-      end;
-
-    val measure_run_time = verbose orelse Config.get ctxt measure_runtime
+                       else Config.get ctxt dest_dir
+    val the_problem_prefix = Config.get ctxt problem_prefix
+    val problem_file_name =
+      Path.basic ((if overlord then "prob_" ^ atp_name
+                   else the_problem_prefix ^ serial_string ())
+                  ^ "_" ^ string_of_int subgoal)
+    val problem_path_name =
+      if the_dest_dir = "" then
+        File.tmp_path problem_file_name
+      else if File.exists (Path.explode the_dest_dir) then
+        Path.append (Path.explode the_dest_dir) problem_file_name
+      else
+        error ("No such directory: " ^ quote the_dest_dir ^ ".")
+    val measure_run_time = verbose orelse Config.get ctxt measure_run_time
     val command = Path.explode (getenv (fst exec) ^ "/" ^ snd exec)
     (* write out problem file and call prover *)
     fun command_line complete timeout probfile =
@@ -262,7 +247,7 @@
         val core = File.shell_path command ^ " " ^ arguments complete timeout ^
                    " " ^ File.shell_path probfile
       in
-        (if measure_run_time then "TIMEFORMAT='%3U'; { time " ^ core ^ " ; }"
+        (if measure_run_time then "TIMEFORMAT='%3R'; { time " ^ core ^ " ; }"
          else "exec " ^ core) ^ " 2>&1"
       end
     fun split_time s =
@@ -300,14 +285,13 @@
             val readable_names = debug andalso overlord
             val (problem, pool, conjecture_offset, axiom_names) =
               prepare_problem ctxt readable_names explicit_forall full_types
-                              explicit_apply hyp_ts concl_t the_axioms
+                              explicit_apply hyp_ts concl_t axioms
             val ss = strings_for_tptp_problem use_conjecture_for_hypotheses
                                               problem
             val _ = File.write_list probfile ss
             val conjecture_shape =
               conjecture_offset + 1 upto conjecture_offset + length hyp_ts + 1
               |> map single
-            val _ = print_d (fn () => "Running " ^ quote atp_name ^ "...")
             val timer = Timer.startRealTimer ()
             val result =
               do_run false (if has_incomplete_mode then
@@ -337,7 +321,7 @@
         File.write (Path.explode (Path.implode probfile ^ "_proof")) output
     val ((pool, conjecture_shape, axiom_names),
          (output, msecs, proof, outcome)) =
-      with_path cleanup export run_on (prob_pathname subgoal)
+      with_path cleanup export run_on problem_path_name
     val (conjecture_shape, axiom_names) =
       repair_conjecture_shape_and_theorem_names output conjecture_shape
                                                 axiom_names
@@ -346,10 +330,10 @@
         NONE =>
         proof_text isar_proof
             (pool, debug, isar_shrink_factor, ctxt, conjecture_shape)
-            (full_types, minimize_command, proof, axiom_names, th, subgoal)
+            (full_types, minimize_command, proof, axiom_names, goal, subgoal)
         |>> (fn message =>
                 message ^ (if verbose then
-                             "\nATP CPU time: " ^ string_of_int msecs ^ " ms."
+                             "\nReal CPU time: " ^ string_of_int msecs ^ " ms."
                            else
                              ""))
       | SOME failure => (string_for_failure failure, [])
@@ -362,49 +346,93 @@
 
 fun get_prover_fun thy name = prover_fun name (get_prover thy name)
 
-fun start_prover_thread (params as {verbose, full_types, timeout, ...}) i n
-                        relevance_override minimize_command proof_state
-                        atp_name =
+fun run_prover ctxt (params as {blocking, timeout, expect, ...}) i n
+               relevance_override minimize_command
+               (problem as {goal, ...}) (prover, atp_name) =
   let
-    val thy = Proof.theory_of proof_state
     val birth_time = Time.now ()
     val death_time = Time.+ (birth_time, timeout)
-    val prover = get_prover_fun thy atp_name
-    val {context = ctxt, facts, goal} = Proof.goal proof_state;
     val desc =
-      "ATP " ^ quote atp_name ^ " for subgoal " ^ string_of_int i ^ ":\n" ^
-      Syntax.string_of_term ctxt (Thm.term_of (Thm.cprem_of goal i));
+      "ATP " ^ quote atp_name ^ " for subgoal " ^ string_of_int i ^ ":" ^
+      (if blocking then
+         ""
+       else
+         "\n" ^ Syntax.string_of_term ctxt (Thm.term_of (Thm.cprem_of goal i)))
+    fun run () =
+      let
+        val (outcome_code, message) =
+          prover_fun atp_name prover params (minimize_command atp_name) problem
+          |> (fn {outcome, message, ...} =>
+                 (if is_some outcome then "none" else "some", message))
+          handle ERROR message => ("unknown", "Error: " ^ message ^ "\n")
+               | exn => ("unknown", "Internal error:\n" ^
+                                    ML_Compiler.exn_message exn ^ "\n")
+      in
+        if expect = "" orelse outcome_code = expect then
+          ()
+        else if blocking then
+          error ("Unexpected outcome: " ^ quote outcome_code ^ ".")
+        else
+          warning ("Unexpected outcome: " ^ quote outcome_code ^ ".");
+        message
+      end
   in
-    Async_Manager.launch das_Tool verbose birth_time death_time desc
-        (fn () =>
-            let
-              val problem =
-                {subgoal = i, goal = (ctxt, (facts, goal)),
-                 relevance_override = relevance_override, axioms = NONE}
-            in
-              prover params (minimize_command atp_name) problem |> #message
-              handle ERROR message => "Error: " ^ message ^ "\n"
-                   | exn => "Internal error: \n" ^ ML_Compiler.exn_message exn ^
-                            "\n"
-            end)
+    if blocking then priority (desc ^ "\n" ^ TimeLimit.timeLimit timeout run ())
+    else Async_Manager.launch das_Tool birth_time death_time desc run
   end
 
-fun run_sledgehammer {atps = [], ...} _ _ _ _ = error "No ATP is set."
-  | run_sledgehammer (params as {atps, ...}) i relevance_override
-                     minimize_command state =
-    case subgoal_count state of
+fun run_sledgehammer (params as {blocking, verbose, atps, full_types,
+                                 relevance_thresholds, max_relevant, ...})
+                     i relevance_override minimize_command state =
+    if null atps then error "No ATP is set."
+    else case subgoal_count state of
       0 => priority "No subgoal!"
     | n =>
       let
-        val _ = kill_atps ()
+        val timer = Timer.startRealTimer ()
+        val _ = () |> not blocking ? kill_atps
         val _ = priority "Sledgehammering..."
-        val _ = app (start_prover_thread params i n relevance_override
-                                         minimize_command state) atps
-      in () end
+        fun run () =
+          let
+            val {context = ctxt, facts = chained_ths, goal} = Proof.goal state
+            val thy = Proof.theory_of state
+            val (_, hyp_ts, concl_t) = strip_subgoal goal i
+            val provers = map (`(get_prover thy)) atps
+            val max_max_relevant =
+              case max_relevant of
+                SOME n => n
+              | NONE => fold (Integer.max o #default_max_relevant o fst)
+                             provers 0
+            val axioms =
+              relevant_facts ctxt full_types relevance_thresholds
+                             max_max_relevant relevance_override chained_ths
+                             hyp_ts concl_t
+            val problem =
+              {ctxt = ctxt, goal = goal, subgoal = i,
+               axioms = map (prepare_axiom ctxt) axioms}
+            val num_axioms = length axioms
+            val _ = if verbose then
+                      priority ("Selected " ^ string_of_int num_axioms ^
+                                " fact" ^ plural_s num_axioms ^ ".")
+                    else
+                      ()
+            val _ =
+              (if blocking then Par_List.map else map)
+                  (run_prover ctxt params i n relevance_override
+                              minimize_command problem) provers
+          in
+            if verbose andalso blocking then
+              priority ("Total time: " ^
+                        signed_string_of_int (Time.toMilliseconds
+                            (Timer.checkRealTimer timer)) ^ " ms.")
+            else
+              ()
+          end
+      in if blocking then run () else Toplevel.thread true (tap run) |> K () end
 
 val setup =
   dest_dir_setup
   #> problem_prefix_setup
-  #> measure_runtime_setup
+  #> measure_run_time_setup
 
 end;
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_fact_filter.ML	Thu Sep 02 20:29:12 2010 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,800 +0,0 @@
-(*  Title:      HOL/Tools/Sledgehammer/sledgehammer_fact_filter.ML
-    Author:     Jia Meng, Cambridge University Computer Laboratory and NICTA
-    Author:     Jasmin Blanchette, TU Muenchen
-*)
-
-signature SLEDGEHAMMER_FACT_FILTER =
-sig
-  datatype locality = General | Intro | Elim | Simp | Local | Chained
-
-  type relevance_override =
-    {add: Facts.ref list,
-     del: Facts.ref list,
-     only: bool}
-
-  val trace : bool Unsynchronized.ref
-  val worse_irrel_freq : real Unsynchronized.ref
-  val higher_order_irrel_weight : real Unsynchronized.ref
-  val abs_rel_weight : real Unsynchronized.ref
-  val abs_irrel_weight : real Unsynchronized.ref
-  val skolem_irrel_weight : real Unsynchronized.ref
-  val intro_bonus : real Unsynchronized.ref
-  val elim_bonus : real Unsynchronized.ref
-  val simp_bonus : real Unsynchronized.ref
-  val local_bonus : real Unsynchronized.ref
-  val chained_bonus : real Unsynchronized.ref
-  val max_imperfect : real Unsynchronized.ref
-  val max_imperfect_exp : real Unsynchronized.ref
-  val threshold_divisor : real Unsynchronized.ref
-  val ridiculous_threshold : real Unsynchronized.ref
-  val name_thm_pairs_from_ref :
-    Proof.context -> unit Symtab.table -> thm list -> Facts.ref
-    -> ((string * locality) * thm) list
-  val relevant_facts :
-    Proof.context -> bool -> real * real -> int -> bool -> relevance_override
-    -> thm list -> term list -> term -> ((string * locality) * thm) list
-end;
-
-structure Sledgehammer_Fact_Filter : SLEDGEHAMMER_FACT_FILTER =
-struct
-
-open Sledgehammer_Util
-
-val trace = Unsynchronized.ref false
-fun trace_msg msg = if !trace then tracing (msg ()) else ()
-
-(* experimental feature *)
-val term_patterns = false
-
-val respect_no_atp = true
-
-datatype locality = General | Intro | Elim | Simp | Local | Chained
-
-type relevance_override =
-  {add: Facts.ref list,
-   del: Facts.ref list,
-   only: bool}
-
-val sledgehammer_prefix = "Sledgehammer" ^ Long_Name.separator
-
-fun repair_name reserved multi j name =
-  (name |> Symtab.defined reserved name ? quote) ^
-  (if multi then "(" ^ Int.toString j ^ ")" else "")
-
-fun name_thm_pairs_from_ref ctxt reserved chained_ths xref =
-  let
-    val ths = ProofContext.get_fact ctxt xref
-    val name = Facts.string_of_ref xref
-    val multi = length ths > 1
-  in
-    (ths, (1, []))
-    |-> fold (fn th => fn (j, rest) =>
-                 (j + 1, ((repair_name reserved multi j name,
-                          if member Thm.eq_thm chained_ths th then Chained
-                          else General), th) :: rest))
-    |> snd
-  end
-
-(***************************************************************)
-(* Relevance Filtering                                         *)
-(***************************************************************)
-
-(*** constants with types ***)
-
-fun order_of_type (Type (@{type_name fun}, [T1, @{typ bool}])) =
-    order_of_type T1 (* cheat: pretend sets are first-order *)
-  | order_of_type (Type (@{type_name fun}, [T1, T2])) =
-    Int.max (order_of_type T1 + 1, order_of_type T2)
-  | order_of_type (Type (_, Ts)) = fold (Integer.max o order_of_type) Ts 0
-  | order_of_type _ = 0
-
-(* An abstraction of Isabelle types and first-order terms *)
-datatype pattern = PVar | PApp of string * pattern list
-datatype ptype = PType of int * pattern list
-
-fun string_for_pattern PVar = "_"
-  | string_for_pattern (PApp (s, ps)) =
-    if null ps then s else s ^ string_for_patterns ps
-and string_for_patterns ps = "(" ^ commas (map string_for_pattern ps) ^ ")"
-fun string_for_ptype (PType (_, ps)) = string_for_patterns ps
-
-(*Is the second type an instance of the first one?*)
-fun match_pattern (PVar, _) = true
-  | match_pattern (PApp _, PVar) = false
-  | match_pattern (PApp (s, ps), PApp (t, qs)) =
-    s = t andalso match_patterns (ps, qs)
-and match_patterns (_, []) = true
-  | match_patterns ([], _) = false
-  | match_patterns (p :: ps, q :: qs) =
-    match_pattern (p, q) andalso match_patterns (ps, qs)
-fun match_ptype (PType (_, ps), PType (_, qs)) = match_patterns (ps, qs)
-
-(* Is there a unifiable constant? *)
-fun pconst_mem f consts (s, ps) =
-  exists (curry (match_ptype o f) ps)
-         (map snd (filter (curry (op =) s o fst) consts))
-fun pconst_hyper_mem f const_tab (s, ps) =
-  exists (curry (match_ptype o f) ps) (these (Symtab.lookup const_tab s))
-
-fun pattern_for_type (Type (s, Ts)) = PApp (s, map pattern_for_type Ts)
-  | pattern_for_type (TFree (s, _)) = PApp (s, [])
-  | pattern_for_type (TVar _) = PVar
-
-fun pterm thy t =
-  case strip_comb t of
-    (Const x, ts) => PApp (pconst thy true x ts)
-  | (Free x, ts) => PApp (pconst thy false x ts)
-  | (Var x, []) => PVar
-  | _ => PApp ("?", [])  (* equivalence class of higher-order constructs *)
-(* Pairs a constant with the list of its type instantiations. *)
-and ptype thy const x ts =
-  (if const then map pattern_for_type (these (try (Sign.const_typargs thy) x))
-   else []) @
-  (if term_patterns then map (pterm thy) ts else [])
-and pconst thy const (s, T) ts = (s, ptype thy const (s, T) ts)
-and rich_ptype thy const (s, T) ts =
-  PType (order_of_type T, ptype thy const (s, T) ts)
-and rich_pconst thy const (s, T) ts = (s, rich_ptype thy const (s, T) ts)
-
-fun string_for_hyper_pconst (s, ps) =
-  s ^ "{" ^ commas (map string_for_ptype ps) ^ "}"
-
-val abs_name = "Sledgehammer.abs"
-val skolem_prefix = "Sledgehammer.sko"
-
-(* These are typically simplified away by "Meson.presimplify". Equality is
-   handled specially via "fequal". *)
-val boring_consts =
-  [@{const_name False}, @{const_name True}, @{const_name If}, @{const_name Let},
-   @{const_name HOL.eq}]
-
-(* Add a pconstant to the table, but a [] entry means a standard
-   connective, which we ignore.*)
-fun add_pconst_to_table also_skolem (c, p) =
-  if member (op =) boring_consts c orelse
-     (not also_skolem andalso String.isPrefix skolem_prefix c) then
-    I
-  else
-    Symtab.map_default (c, [p]) (insert (op =) p)
-
-fun is_formula_type T = (T = HOLogic.boolT orelse T = propT)
-
-fun pconsts_in_terms thy also_skolems pos ts =
-  let
-    val flip = Option.map not
-    (* We include free variables, as well as constants, to handle locales. For
-       each quantifiers that must necessarily be skolemized by the ATP, we
-       introduce a fresh constant to simulate the effect of Skolemization. *)
-    fun do_const const (s, T) ts =
-      add_pconst_to_table also_skolems (rich_pconst thy const (s, T) ts)
-      #> fold do_term ts
-    and do_term t =
-      case strip_comb t of
-        (Const x, ts) => do_const true x ts
-      | (Free x, ts) => do_const false x ts
-      | (Abs (_, T, t'), ts) =>
-        (null ts
-         ? add_pconst_to_table true (abs_name, PType (order_of_type T + 1, [])))
-        #> fold do_term (t' :: ts)
-      | (_, ts) => fold do_term ts
-    fun do_quantifier will_surely_be_skolemized abs_T body_t =
-      do_formula pos body_t
-      #> (if also_skolems andalso will_surely_be_skolemized then
-            add_pconst_to_table true
-                         (gensym skolem_prefix, PType (order_of_type abs_T, []))
-          else
-            I)
-    and do_term_or_formula T =
-      if is_formula_type T then do_formula NONE else do_term
-    and do_formula pos t =
-      case t of
-        Const (@{const_name all}, _) $ Abs (_, T, t') =>
-        do_quantifier (pos = SOME false) T t'
-      | @{const "==>"} $ t1 $ t2 =>
-        do_formula (flip pos) t1 #> do_formula pos t2
-      | Const (@{const_name "=="}, Type (_, [T, _])) $ t1 $ t2 =>
-        fold (do_term_or_formula T) [t1, t2]
-      | @{const Trueprop} $ t1 => do_formula pos t1
-      | @{const Not} $ t1 => do_formula (flip pos) t1
-      | Const (@{const_name All}, _) $ Abs (_, T, t') =>
-        do_quantifier (pos = SOME false) T t'
-      | Const (@{const_name Ex}, _) $ Abs (_, T, t') =>
-        do_quantifier (pos = SOME true) T t'
-      | @{const HOL.conj} $ t1 $ t2 => fold (do_formula pos) [t1, t2]
-      | @{const HOL.disj} $ t1 $ t2 => fold (do_formula pos) [t1, t2]
-      | @{const HOL.implies} $ t1 $ t2 =>
-        do_formula (flip pos) t1 #> do_formula pos t2
-      | Const (@{const_name HOL.eq}, Type (_, [T, _])) $ t1 $ t2 =>
-        fold (do_term_or_formula T) [t1, t2]
-      | Const (@{const_name If}, Type (_, [_, Type (_, [T, _])]))
-        $ t1 $ t2 $ t3 =>
-        do_formula NONE t1 #> fold (do_term_or_formula T) [t2, t3]
-      | Const (@{const_name Ex1}, _) $ Abs (_, T, t') =>
-        do_quantifier (is_some pos) T t'
-      | Const (@{const_name Ball}, _) $ t1 $ Abs (_, T, t') =>
-        do_quantifier (pos = SOME false) T
-                      (HOLogic.mk_imp (incr_boundvars 1 t1 $ Bound 0, t'))
-      | Const (@{const_name Bex}, _) $ t1 $ Abs (_, T, t') =>
-        do_quantifier (pos = SOME true) T
-                      (HOLogic.mk_conj (incr_boundvars 1 t1 $ Bound 0, t'))
-      | (t0 as Const (_, @{typ bool})) $ t1 =>
-        do_term t0 #> do_formula pos t1  (* theory constant *)
-      | _ => do_term t
-  in Symtab.empty |> fold (do_formula pos) ts end
-
-(*Inserts a dummy "constant" referring to the theory name, so that relevance
-  takes the given theory into account.*)
-fun theory_const_prop_of theory_relevant th =
-  if theory_relevant then
-    let
-      val name = Context.theory_name (theory_of_thm th)
-      val t = Const (name ^ ". 1", @{typ bool})
-    in t $ prop_of th end
-  else
-    prop_of th
-
-(**** Constant / Type Frequencies ****)
-
-(* A two-dimensional symbol table counts frequencies of constants. It's keyed
-   first by constant name and second by its list of type instantiations. For the
-   latter, we need a linear ordering on "pattern list". *)
-
-fun pattern_ord p =
-  case p of
-    (PVar, PVar) => EQUAL
-  | (PVar, PApp _) => LESS
-  | (PApp _, PVar) => GREATER
-  | (PApp q1, PApp q2) =>
-    prod_ord fast_string_ord (dict_ord pattern_ord) (q1, q2)
-fun ptype_ord (PType p, PType q) =
-  prod_ord (dict_ord pattern_ord) int_ord (swap p, swap q)
-
-structure PType_Tab = Table(type key = ptype val ord = ptype_ord)
-
-fun count_axiom_consts theory_relevant thy =
-  let
-    fun do_const const (s, T) ts =
-      (* Two-dimensional table update. Constant maps to types maps to count. *)
-      PType_Tab.map_default (rich_ptype thy const (s, T) ts, 0) (Integer.add 1)
-      |> Symtab.map_default (s, PType_Tab.empty)
-      #> fold do_term ts
-    and do_term t =
-      case strip_comb t of
-        (Const x, ts) => do_const true x ts
-      | (Free x, ts) => do_const false x ts
-      | (Abs (_, _, t'), ts) => fold do_term (t' :: ts)
-      | (_, ts) => fold do_term ts
-  in do_term o theory_const_prop_of theory_relevant o snd end
-
-
-(**** Actual Filtering Code ****)
-
-fun pow_int x 0 = 1.0
-  | pow_int x 1 = x
-  | pow_int x n = if n > 0 then x * pow_int x (n - 1) else pow_int x (n + 1) / x
-
-(*The frequency of a constant is the sum of those of all instances of its type.*)
-fun pconst_freq match const_tab (c, ps) =
-  PType_Tab.fold (fn (qs, m) => match (ps, qs) ? Integer.add m)
-                 (the (Symtab.lookup const_tab c)) 0
-
-
-(* A surprising number of theorems contain only a few significant constants.
-   These include all induction rules, and other general theorems. *)
-
-(* "log" seems best in practice. A constant function of one ignores the constant
-   frequencies. Rare constants give more points if they are relevant than less
-   rare ones. *)
-fun rel_weight_for order freq = 1.0 + 2.0 / Math.ln (Real.fromInt freq + 1.0)
-
-(* FUDGE *)
-val worse_irrel_freq = Unsynchronized.ref 100.0
-val higher_order_irrel_weight = Unsynchronized.ref 1.05
-
-(* Irrelevant constants are treated differently. We associate lower penalties to
-   very rare constants and very common ones -- the former because they can't
-   lead to the inclusion of too many new facts, and the latter because they are
-   so common as to be of little interest. *)
-fun irrel_weight_for order freq =
-  let val (k, x) = !worse_irrel_freq |> `Real.ceil in
-    (if freq < k then Math.ln (Real.fromInt (freq + 1)) / Math.ln x
-     else rel_weight_for order freq / rel_weight_for order k)
-    * pow_int (!higher_order_irrel_weight) (order - 1)
-  end
-
-(* FUDGE *)
-val abs_rel_weight = Unsynchronized.ref 0.5
-val abs_irrel_weight = Unsynchronized.ref 2.0
-val skolem_irrel_weight = Unsynchronized.ref 0.75
-
-(* Computes a constant's weight, as determined by its frequency. *)
-fun generic_pconst_weight abs_weight skolem_weight weight_for f const_tab
-                          (c as (s, PType (m, _))) =
-  if s = abs_name then abs_weight
-  else if String.isPrefix skolem_prefix s then skolem_weight
-  else weight_for m (pconst_freq (match_ptype o f) const_tab c)
-
-fun rel_pconst_weight const_tab =
-  generic_pconst_weight (!abs_rel_weight) 0.0 rel_weight_for I const_tab
-fun irrel_pconst_weight const_tab =
-  generic_pconst_weight (!abs_irrel_weight) (!skolem_irrel_weight)
-                        irrel_weight_for swap const_tab
-
-(* FUDGE *)
-val intro_bonus = Unsynchronized.ref 0.15
-val elim_bonus = Unsynchronized.ref 0.15
-val simp_bonus = Unsynchronized.ref 0.15
-val local_bonus = Unsynchronized.ref 0.55
-val chained_bonus = Unsynchronized.ref 1.5
-
-fun locality_bonus General = 0.0
-  | locality_bonus Intro = !intro_bonus
-  | locality_bonus Elim = !elim_bonus
-  | locality_bonus Simp = !simp_bonus
-  | locality_bonus Local = !local_bonus
-  | locality_bonus Chained = !chained_bonus
-
-fun axiom_weight loc const_tab relevant_consts axiom_consts =
-  case axiom_consts |> List.partition (pconst_hyper_mem I relevant_consts)
-                    ||> filter_out (pconst_hyper_mem swap relevant_consts) of
-    ([], _) => 0.0
-  | (rel, irrel) =>
-    let
-      val irrel = irrel |> filter_out (pconst_mem swap rel)
-      val rel_weight =
-        0.0 |> fold (curry (op +) o rel_pconst_weight const_tab) rel
-      val irrel_weight =
-        ~ (locality_bonus loc)
-        |> fold (curry (op +) o irrel_pconst_weight const_tab) irrel
-      val res = rel_weight / (rel_weight + irrel_weight)
-    in if Real.isFinite res then res else 0.0 end
-
-(* FIXME: experiment
-fun debug_axiom_weight loc const_tab relevant_consts axiom_consts =
-  case axiom_consts |> List.partition (pconst_hyper_mem I relevant_consts)
-                    ||> filter_out (pconst_hyper_mem swap relevant_consts) of
-    ([], _) => 0.0
-  | (rel, irrel) =>
-    let
-      val irrel = irrel |> filter_out (pconst_mem swap rel)
-      val rels_weight =
-        0.0 |> fold (curry (op +) o rel_pconst_weight const_tab) rel
-      val irrels_weight =
-        ~ (locality_bonus loc)
-        |> fold (curry (op +) o irrel_pconst_weight const_tab) irrel
-val _ = tracing (PolyML.makestring ("REL: ", map (`(rel_pconst_weight const_tab)) rel))
-val _ = tracing (PolyML.makestring ("IRREL: ", map (`(irrel_pconst_weight const_tab)) irrel))
-      val res = rels_weight / (rels_weight + irrels_weight)
-    in if Real.isFinite res then res else 0.0 end
-*)
-
-fun pconsts_in_axiom thy t =
-  Symtab.fold (fn (s, pss) => fold (cons o pair s) pss)
-              (pconsts_in_terms thy true (SOME true) [t]) []
-fun pair_consts_axiom theory_relevant thy axiom =
-  case axiom |> snd |> theory_const_prop_of theory_relevant
-             |> pconsts_in_axiom thy of
-    [] => NONE
-  | consts => SOME ((axiom, consts), NONE)
-
-type annotated_thm =
-  (((unit -> string) * locality) * thm) * (string * ptype) list
-
-(* FUDGE *)
-val max_imperfect = Unsynchronized.ref 11.5
-val max_imperfect_exp = Unsynchronized.ref 1.0
-
-fun take_most_relevant max_relevant remaining_max
-                       (candidates : (annotated_thm * real) list) =
-  let
-    val max_imperfect =
-      Real.ceil (Math.pow (!max_imperfect,
-                    Math.pow (Real.fromInt remaining_max
-                              / Real.fromInt max_relevant, !max_imperfect_exp)))
-    val (perfect, imperfect) =
-      candidates |> sort (Real.compare o swap o pairself snd)
-                 |> take_prefix (fn (_, w) => w > 0.99999)
-    val ((accepts, more_rejects), rejects) =
-      chop max_imperfect imperfect |>> append perfect |>> chop remaining_max
-  in
-    trace_msg (fn () =>
-        "Actually passed (" ^ Int.toString (length accepts) ^ " of " ^
-        Int.toString (length candidates) ^ "): " ^
-        (accepts |> map (fn ((((name, _), _), _), weight) =>
-                            name () ^ " [" ^ Real.toString weight ^ "]")
-                 |> commas));
-    (accepts, more_rejects @ rejects)
-  end
-
-fun if_empty_replace_with_locality thy axioms loc tab =
-  if Symtab.is_empty tab then
-    pconsts_in_terms thy false (SOME false)
-        (map_filter (fn ((_, loc'), th) =>
-                        if loc' = loc then SOME (prop_of th) else NONE) axioms)
-  else
-    tab
-
-(* FUDGE *)
-val threshold_divisor = Unsynchronized.ref 2.0
-val ridiculous_threshold = Unsynchronized.ref 0.1
-
-fun relevance_filter ctxt threshold0 decay max_relevant theory_relevant
-                     ({add, del, ...} : relevance_override) axioms goal_ts =
-  let
-    val thy = ProofContext.theory_of ctxt
-    val const_tab =
-      fold (count_axiom_consts theory_relevant thy) axioms Symtab.empty
-    val goal_const_tab =
-      pconsts_in_terms thy false (SOME false) goal_ts
-      |> fold (if_empty_replace_with_locality thy axioms) [Chained, Local]
-    val add_thms = maps (ProofContext.get_fact ctxt) add
-    val del_thms = maps (ProofContext.get_fact ctxt) del
-    fun iter j remaining_max threshold rel_const_tab hopeless hopeful =
-      let
-        fun game_over rejects =
-          (* Add "add:" facts. *)
-          if null add_thms then
-            []
-          else
-            map_filter (fn ((p as (_, th), _), _) =>
-                           if member Thm.eq_thm add_thms th then SOME p
-                           else NONE) rejects
-        fun relevant [] rejects [] =
-            (* Nothing has been added this iteration. *)
-            if j = 0 andalso threshold >= !ridiculous_threshold then
-              (* First iteration? Try again. *)
-              iter 0 max_relevant (threshold / !threshold_divisor) rel_const_tab
-                   hopeless hopeful
-            else
-              game_over (rejects @ hopeless)
-          | relevant candidates rejects [] =
-            let
-              val (accepts, more_rejects) =
-                take_most_relevant max_relevant remaining_max candidates
-              val rel_const_tab' =
-                rel_const_tab
-                |> fold (add_pconst_to_table false) (maps (snd o fst) accepts)
-              fun is_dirty (c, _) =
-                Symtab.lookup rel_const_tab' c <> Symtab.lookup rel_const_tab c
-              val (hopeful_rejects, hopeless_rejects) =
-                 (rejects @ hopeless, ([], []))
-                 |-> fold (fn (ax as (_, consts), old_weight) =>
-                              if exists is_dirty consts then
-                                apfst (cons (ax, NONE))
-                              else
-                                apsnd (cons (ax, old_weight)))
-                 |>> append (more_rejects
-                             |> map (fn (ax as (_, consts), old_weight) =>
-                                        (ax, if exists is_dirty consts then NONE
-                                             else SOME old_weight)))
-              val threshold =
-                1.0 - (1.0 - threshold)
-                      * Math.pow (decay, Real.fromInt (length accepts))
-              val remaining_max = remaining_max - length accepts
-            in
-              trace_msg (fn () => "New or updated constants: " ^
-                  commas (rel_const_tab' |> Symtab.dest
-                          |> subtract (op =) (rel_const_tab |> Symtab.dest)
-                          |> map string_for_hyper_pconst));
-              map (fst o fst) accepts @
-              (if remaining_max = 0 then
-                 game_over (hopeful_rejects @ map (apsnd SOME) hopeless_rejects)
-               else
-                 iter (j + 1) remaining_max threshold rel_const_tab'
-                      hopeless_rejects hopeful_rejects)
-            end
-          | relevant candidates rejects
-                     (((ax as (((_, loc), th), axiom_consts)), cached_weight)
-                      :: hopeful) =
-            let
-              val weight =
-                case cached_weight of
-                  SOME w => w
-                | NONE => axiom_weight loc const_tab rel_const_tab axiom_consts
-(* FIXME: experiment
-val name = fst (fst (fst ax)) ()
-val _ = if String.isSubstring "positive_minus" name orelse String.isSubstring "not_exp_le_zero" name then
-tracing ("*** " ^ name ^ PolyML.makestring (debug_axiom_weight loc const_tab rel_const_tab axiom_consts))
-else
-()
-*)
-            in
-              if weight >= threshold then
-                relevant ((ax, weight) :: candidates) rejects hopeful
-              else
-                relevant candidates ((ax, weight) :: rejects) hopeful
-            end
-        in
-          trace_msg (fn () =>
-              "ITERATION " ^ string_of_int j ^ ": current threshold: " ^
-              Real.toString threshold ^ ", constants: " ^
-              commas (rel_const_tab |> Symtab.dest
-                      |> filter (curry (op <>) [] o snd)
-                      |> map string_for_hyper_pconst));
-          relevant [] [] hopeful
-        end
-  in
-    axioms |> filter_out (member Thm.eq_thm del_thms o snd)
-           |> map_filter (pair_consts_axiom theory_relevant thy)
-           |> iter 0 max_relevant threshold0 goal_const_tab []
-           |> tap (fn res => trace_msg (fn () =>
-                                "Total relevant: " ^ Int.toString (length res)))
-  end
-
-
-(***************************************************************)
-(* Retrieving and filtering lemmas                             *)
-(***************************************************************)
-
-(*** retrieve lemmas and filter them ***)
-
-(*Reject theorems with names like "List.filter.filter_list_def" or
-  "Accessible_Part.acc.defs", as these are definitions arising from packages.*)
-fun is_package_def a =
-  let val names = Long_Name.explode a
-  in
-     length names > 2 andalso
-     not (hd names = "local") andalso
-     String.isSuffix "_def" a  orelse  String.isSuffix "_defs" a
-  end;
-
-fun mk_fact_table f xs =
-  fold (Termtab.update o `(prop_of o f)) xs Termtab.empty
-fun uniquify xs = Termtab.fold (cons o snd) (mk_fact_table snd xs) []
-
-(* FIXME: put other record thms here, or declare as "no_atp" *)
-val multi_base_blacklist =
-  ["defs", "select_defs", "update_defs", "induct", "inducts", "split", "splits",
-   "split_asm", "cases", "ext_cases", "eq.simps", "eq.refl", "nchotomy",
-   "case_cong", "weak_case_cong"]
-  |> map (prefix ".")
-
-val max_lambda_nesting = 3
-
-fun term_has_too_many_lambdas max (t1 $ t2) =
-    exists (term_has_too_many_lambdas max) [t1, t2]
-  | term_has_too_many_lambdas max (Abs (_, _, t)) =
-    max = 0 orelse term_has_too_many_lambdas (max - 1) t
-  | term_has_too_many_lambdas _ _ = false
-
-(* Don't count nested lambdas at the level of formulas, since they are
-   quantifiers. *)
-fun formula_has_too_many_lambdas Ts (Abs (_, T, t)) =
-    formula_has_too_many_lambdas (T :: Ts) t
-  | formula_has_too_many_lambdas Ts t =
-    if is_formula_type (fastype_of1 (Ts, t)) then
-      exists (formula_has_too_many_lambdas Ts) (#2 (strip_comb t))
-    else
-      term_has_too_many_lambdas max_lambda_nesting t
-
-(* The max apply depth of any "metis" call in "Metis_Examples" (on 2007-10-31)
-   was 11. *)
-val max_apply_depth = 15
-
-fun apply_depth (f $ t) = Int.max (apply_depth f, apply_depth t + 1)
-  | apply_depth (Abs (_, _, t)) = apply_depth t
-  | apply_depth _ = 0
-
-fun is_formula_too_complex t =
-  apply_depth t > max_apply_depth orelse formula_has_too_many_lambdas [] t
-
-val exists_sledgehammer_const =
-  exists_Const (fn (s, _) => String.isPrefix sledgehammer_prefix s)
-
-(* FIXME: make more reliable *)
-val exists_low_level_class_const =
-  exists_Const (fn (s, _) =>
-     String.isSubstring (Long_Name.separator ^ "class" ^ Long_Name.separator) s)
-
-fun is_metastrange_theorem th =
-  case head_of (concl_of th) of
-      Const (a, _) => (a <> @{const_name Trueprop} andalso
-                       a <> @{const_name "=="})
-    | _ => false
-
-fun is_that_fact th =
-  String.isSuffix (Long_Name.separator ^ Obtain.thatN) (Thm.get_name_hint th)
-  andalso exists_subterm (fn Free (s, _) => s = Name.skolem Auto_Bind.thesisN
-                           | _ => false) (prop_of th)
-
-val type_has_top_sort =
-  exists_subtype (fn TFree (_, []) => true | TVar (_, []) => true | _ => false)
-
-(**** Predicates to detect unwanted facts (prolific or likely to cause
-      unsoundness) ****)
-
-(* Too general means, positive equality literal with a variable X as one
-   operand, when X does not occur properly in the other operand. This rules out
-   clearly inconsistent facts such as X = a | X = b, though it by no means
-   guarantees soundness. *)
-
-(* Unwanted equalities are those between a (bound or schematic) variable that
-   does not properly occur in the second operand. *)
-val is_exhaustive_finite =
-  let
-    fun is_bad_equal (Var z) t =
-        not (exists_subterm (fn Var z' => z = z' | _ => false) t)
-      | is_bad_equal (Bound j) t = not (loose_bvar1 (t, j))
-      | is_bad_equal _ _ = false
-    fun do_equals t1 t2 = is_bad_equal t1 t2 orelse is_bad_equal t2 t1
-    fun do_formula pos t =
-      case (pos, t) of
-        (_, @{const Trueprop} $ t1) => do_formula pos t1
-      | (true, Const (@{const_name all}, _) $ Abs (_, _, t')) =>
-        do_formula pos t'
-      | (true, Const (@{const_name All}, _) $ Abs (_, _, t')) =>
-        do_formula pos t'
-      | (false, Const (@{const_name Ex}, _) $ Abs (_, _, t')) =>
-        do_formula pos t'
-      | (_, @{const "==>"} $ t1 $ t2) =>
-        do_formula (not pos) t1 andalso
-        (t2 = @{prop False} orelse do_formula pos t2)
-      | (_, @{const HOL.implies} $ t1 $ t2) =>
-        do_formula (not pos) t1 andalso
-        (t2 = @{const False} orelse do_formula pos t2)
-      | (_, @{const Not} $ t1) => do_formula (not pos) t1
-      | (true, @{const HOL.disj} $ t1 $ t2) => forall (do_formula pos) [t1, t2]
-      | (false, @{const HOL.conj} $ t1 $ t2) => forall (do_formula pos) [t1, t2]
-      | (true, Const (@{const_name HOL.eq}, _) $ t1 $ t2) => do_equals t1 t2
-      | (true, Const (@{const_name "=="}, _) $ t1 $ t2) => do_equals t1 t2
-      | _ => false
-  in do_formula true end
-
-fun has_bound_or_var_of_type tycons =
-  exists_subterm (fn Var (_, Type (s, _)) => member (op =) tycons s
-                   | Abs (_, Type (s, _), _) => member (op =) tycons s
-                   | _ => false)
-
-(* Facts are forbidden to contain variables of these types. The typical reason
-   is that they lead to unsoundness. Note that "unit" satisfies numerous
-   equations like "?x = ()". The resulting clauses will have no type constraint,
-   yielding false proofs. Even "bool" leads to many unsound proofs, though only
-   for higher-order problems. *)
-val dangerous_types = [@{type_name unit}, @{type_name bool}, @{type_name prop}];
-
-(* Facts containing variables of type "unit" or "bool" or of the form
-   "ALL x. x = A | x = B | x = C" are likely to lead to unsound proofs if types
-   are omitted. *)
-fun is_dangerous_term full_types t =
-  not full_types andalso
-  let val t = transform_elim_term t in
-    has_bound_or_var_of_type dangerous_types t orelse
-    is_exhaustive_finite t
-  end
-
-fun is_theorem_bad_for_atps full_types thm =
-  let val t = prop_of thm in
-    is_formula_too_complex t orelse exists_type type_has_top_sort t orelse
-    is_dangerous_term full_types t orelse exists_sledgehammer_const t orelse
-    exists_low_level_class_const t orelse is_metastrange_theorem thm orelse
-    is_that_fact thm
-  end
-
-fun clasimpset_rules_of ctxt =
-  let
-    val {safeIs, safeEs, hazIs, hazEs, ...} = ctxt |> claset_of |> rep_cs
-    val intros = safeIs @ hazIs
-    val elims = map Classical.classical_rule (safeEs @ hazEs)
-    val simps = ctxt |> simpset_of |> dest_ss |> #simps |> map snd
-  in (mk_fact_table I intros, mk_fact_table I elims, mk_fact_table I simps) end
-
-fun all_name_thms_pairs ctxt reserved full_types add_thms chained_ths =
-  let
-    val thy = ProofContext.theory_of ctxt
-    val global_facts = PureThy.facts_of thy
-    val local_facts = ProofContext.facts_of ctxt
-    val named_locals = local_facts |> Facts.dest_static []
-    val is_chained = member Thm.eq_thm chained_ths
-    val (intros, elims, simps) =
-      if exists (curry (op <) 0.0) [!intro_bonus, !elim_bonus, !simp_bonus] then
-        clasimpset_rules_of ctxt
-      else
-        (Termtab.empty, Termtab.empty, Termtab.empty)
-    (* Unnamed nonchained formulas with schematic variables are omitted, because
-       they are rejected by the backticks (`...`) parser for some reason. *)
-    fun is_good_unnamed_local th =
-      not (Thm.has_name_hint th) andalso
-      (not (exists_subterm is_Var (prop_of th)) orelse (is_chained th)) andalso
-      forall (fn (_, ths) => not (member Thm.eq_thm ths th)) named_locals
-    val unnamed_locals =
-      union Thm.eq_thm (Facts.props local_facts) chained_ths
-      |> filter is_good_unnamed_local |> map (pair "" o single)
-    val full_space =
-      Name_Space.merge (Facts.space_of global_facts, Facts.space_of local_facts)
-    fun add_facts global foldx facts =
-      foldx (fn (name0, ths) =>
-        if name0 <> "" andalso
-           forall (not o member Thm.eq_thm add_thms) ths andalso
-           (Facts.is_concealed facts name0 orelse
-            (respect_no_atp andalso is_package_def name0) orelse
-            exists (fn s => String.isSuffix s name0) multi_base_blacklist orelse
-            String.isSuffix "_def_raw" (* FIXME: crude hack *) name0) then
-          I
-        else
-          let
-            val multi = length ths > 1
-            fun backquotify th =
-              "`" ^ Print_Mode.setmp [Print_Mode.input]
-                                 (Syntax.string_of_term ctxt) (prop_of th) ^ "`"
-              |> String.translate (fn c => if Char.isPrint c then str c else "")
-              |> simplify_spaces
-            fun check_thms a =
-              case try (ProofContext.get_thms ctxt) a of
-                NONE => false
-              | SOME ths' => Thm.eq_thms (ths, ths')
-          in
-            pair 1
-            #> fold (fn th => fn (j, rest) =>
-                 (j + 1,
-                  if is_theorem_bad_for_atps full_types th andalso
-                     not (member Thm.eq_thm add_thms th) then
-                    rest
-                  else
-                    (((fn () =>
-                          if name0 = "" then
-                            th |> backquotify
-                          else
-                            let
-                              val name1 = Facts.extern facts name0
-                              val name2 = Name_Space.extern full_space name0
-                            in
-                              case find_first check_thms [name1, name2, name0] of
-                                SOME name => repair_name reserved multi j name
-                              | NONE => ""
-                            end),
-                      let val t = prop_of th in
-                        if is_chained th then Chained
-                        else if not global then Local
-                        else if Termtab.defined intros t then Intro
-                        else if Termtab.defined elims t then Elim
-                        else if Termtab.defined simps t then Simp
-                        else General
-                      end),
-                      (multi, th)) :: rest)) ths
-            #> snd
-          end)
-  in
-    [] |> add_facts false fold local_facts (unnamed_locals @ named_locals)
-       |> add_facts true Facts.fold_static global_facts global_facts
-  end
-
-(* The single-name theorems go after the multiple-name ones, so that single
-   names are preferred when both are available. *)
-fun name_thm_pairs ctxt respect_no_atp =
-  List.partition (fst o snd) #> op @ #> map (apsnd snd)
-  #> respect_no_atp ? filter_out (No_ATPs.member ctxt o snd)
-
-(***************************************************************)
-(* ATP invocation methods setup                                *)
-(***************************************************************)
-
-fun relevant_facts ctxt full_types (threshold0, threshold1) max_relevant
-                   theory_relevant (relevance_override as {add, del, only})
-                   chained_ths hyp_ts concl_t =
-  let
-    val decay = Math.pow ((1.0 - threshold1) / (1.0 - threshold0),
-                          1.0 / Real.fromInt (max_relevant + 1))
-    val add_thms = maps (ProofContext.get_fact ctxt) add
-    val reserved = reserved_isar_keyword_table ()
-    val axioms =
-      (if only then
-         maps (map (fn ((name, loc), th) => ((K name, loc), (true, th)))
-               o name_thm_pairs_from_ref ctxt reserved chained_ths) add
-       else
-         all_name_thms_pairs ctxt reserved full_types add_thms chained_ths)
-      |> name_thm_pairs ctxt (respect_no_atp andalso not only)
-      |> uniquify
-  in
-    trace_msg (fn () => "Considering " ^ Int.toString (length axioms) ^
-                        " theorems");
-    (if threshold0 > 1.0 orelse threshold0 > threshold1 then
-       []
-     else if threshold0 < 0.0 then
-       axioms
-     else
-       relevance_filter ctxt threshold0 decay max_relevant theory_relevant
-                        relevance_override axioms (concl_t :: hyp_ts))
-    |> map (apfst (apfst (fn f => f ())))
-  end
-
-end;
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_fact_minimize.ML	Thu Sep 02 20:29:12 2010 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,162 +0,0 @@
-(*  Title:      HOL/Tools/Sledgehammer/sledgehammer_fact_minimize.ML
-    Author:     Philipp Meyer, TU Muenchen
-    Author:     Jasmin Blanchette, TU Muenchen
-
-Minimization of theorem list for Metis using automatic theorem provers.
-*)
-
-signature SLEDGEHAMMER_FACT_MINIMIZE =
-sig
-  type locality = Sledgehammer_Fact_Filter.locality
-  type params = Sledgehammer.params
-
-  val minimize_theorems :
-    params -> int -> int -> Proof.state -> ((string * locality) * thm list) list
-    -> ((string * locality) * thm list) list option * string
-  val run_minimize : params -> int -> Facts.ref list -> Proof.state -> unit
-end;
-
-structure Sledgehammer_Fact_Minimize : SLEDGEHAMMER_FACT_MINIMIZE =
-struct
-
-open ATP_Systems
-open Sledgehammer_Util
-open Sledgehammer_Fact_Filter
-open Sledgehammer_Proof_Reconstruct
-open Sledgehammer
-
-(* wrapper for calling external prover *)
-
-fun string_for_failure Unprovable = "Unprovable."
-  | string_for_failure TimedOut = "Timed out."
-  | string_for_failure _ = "Unknown error."
-
-fun n_theorems names =
-  let val n = length names in
-    string_of_int n ^ " theorem" ^ plural_s n ^
-    (if n > 0 then
-       ": " ^ (names |> map fst
-                     |> sort_distinct string_ord |> space_implode " ")
-     else
-       "")
-  end
-
-fun test_theorems ({debug, verbose, overlord, atps, full_types, isar_proof,
-                    isar_shrink_factor, ...} : params)
-                  (prover : prover) explicit_apply timeout subgoal state
-                  axioms =
-  let
-    val _ =
-      priority ("Testing " ^ n_theorems (map fst axioms) ^ "...")
-    val params =
-      {debug = debug, verbose = verbose, overlord = overlord, atps = atps,
-       full_types = full_types, explicit_apply = explicit_apply,
-       relevance_thresholds = (1.01, 1.01), max_relevant = NONE,
-       theory_relevant = NONE, isar_proof = isar_proof,
-       isar_shrink_factor = isar_shrink_factor, timeout = timeout}
-    val axioms = maps (fn (n, ths) => map (pair n) ths) axioms
-    val {context = ctxt, facts, goal} = Proof.goal state
-    val problem =
-     {subgoal = subgoal, goal = (ctxt, (facts, goal)),
-      relevance_override = {add = [], del = [], only = false},
-      axioms = SOME axioms}
-    val result as {outcome, used_thm_names, ...} = prover params (K "") problem
-  in
-    priority (case outcome of
-                NONE =>
-                if length used_thm_names = length axioms then
-                  "Found proof."
-                else
-                  "Found proof with " ^ n_theorems used_thm_names ^ "."
-              | SOME failure => string_for_failure failure);
-    result
-  end
-
-(* minimalization of thms *)
-
-fun filter_used_facts used = filter (member (op =) used o fst)
-
-fun sublinear_minimize _ [] p = p
-  | sublinear_minimize test (x :: xs) (seen, result) =
-    case test (xs @ seen) of
-      result as {outcome = NONE, proof, used_thm_names, ...} : prover_result =>
-      sublinear_minimize test (filter_used_facts used_thm_names xs)
-                         (filter_used_facts used_thm_names seen, result)
-    | _ => sublinear_minimize test xs (x :: seen, result)
-
-(* Give the ATP some slack. The ATP gets further slack because the Sledgehammer
-   preprocessing time is included in the estimate below but isn't part of the
-   timeout. *)
-val fudge_msecs = 1000
-
-fun minimize_theorems {atps = [], ...} _ _ _ _ = error "No ATP is set."
-  | minimize_theorems (params as {debug, atps = atp :: _, full_types,
-                                  isar_proof, isar_shrink_factor, timeout, ...})
-                      i n state axioms =
-  let
-    val thy = Proof.theory_of state
-    val prover = get_prover_fun thy atp
-    val msecs = Time.toMilliseconds timeout
-    val _ = priority ("Sledgehammer minimize: ATP " ^ quote atp ^ ".")
-    val {context = ctxt, goal, ...} = Proof.goal state
-    val (_, hyp_ts, concl_t) = strip_subgoal goal i
-    val explicit_apply =
-      not (forall (Meson.is_fol_term thy)
-                  (concl_t :: hyp_ts @ maps (map prop_of o snd) axioms))
-    fun do_test timeout =
-      test_theorems params prover explicit_apply timeout i state
-    val timer = Timer.startRealTimer ()
-  in
-    (case do_test timeout axioms of
-       result as {outcome = NONE, pool, used_thm_names,
-                  conjecture_shape, ...} =>
-       let
-         val time = Timer.checkRealTimer timer
-         val new_timeout =
-           Int.min (msecs, Time.toMilliseconds time + fudge_msecs)
-           |> Time.fromMilliseconds
-         val (min_thms, {proof, axiom_names, ...}) =
-           sublinear_minimize (do_test new_timeout)
-               (filter_used_facts used_thm_names axioms) ([], result)
-         val n = length min_thms
-         val _ = priority (cat_lines
-           ["Minimized: " ^ string_of_int n ^ " theorem" ^ plural_s n] ^
-            (case length (filter (curry (op =) Chained o snd o fst) min_thms) of
-               0 => ""
-             | n => " (including " ^ Int.toString n ^ " chained)") ^ ".")
-       in
-         (SOME min_thms,
-          proof_text isar_proof
-              (pool, debug, isar_shrink_factor, ctxt, conjecture_shape)
-              (full_types, K "", proof, axiom_names, goal, i) |> fst)
-       end
-     | {outcome = SOME TimedOut, ...} =>
-       (NONE, "Timeout: You can increase the time limit using the \"timeout\" \
-              \option (e.g., \"timeout = " ^
-              string_of_int (10 + msecs div 1000) ^ " s\").")
-     | {outcome = SOME UnknownError, ...} =>
-       (* Failure sometimes mean timeout, unfortunately. *)
-       (NONE, "Failure: No proof was found with the current time limit. You \
-              \can increase the time limit using the \"timeout\" \
-              \option (e.g., \"timeout = " ^
-              string_of_int (10 + msecs div 1000) ^ " s\").")
-     | {message, ...} => (NONE, "ATP error: " ^ message))
-    handle ERROR msg => (NONE, "Error: " ^ msg)
-  end
-
-fun run_minimize params i refs state =
-  let
-    val ctxt = Proof.context_of state
-    val reserved = reserved_isar_keyword_table ()
-    val chained_ths = #facts (Proof.goal state)
-    val axioms =
-      maps (map (apsnd single)
-            o name_thm_pairs_from_ref ctxt reserved chained_ths) refs
-  in
-    case subgoal_count state of
-      0 => priority "No subgoal!"
-    | n =>
-      (kill_atps (); priority (#2 (minimize_theorems params i n state axioms)))
-  end
-
-end;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_filter.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -0,0 +1,812 @@
+(*  Title:      HOL/Tools/Sledgehammer/sledgehammer_filter.ML
+    Author:     Jia Meng, Cambridge University Computer Laboratory and NICTA
+    Author:     Jasmin Blanchette, TU Muenchen
+*)
+
+signature SLEDGEHAMMER_FILTER =
+sig
+  datatype locality = General | Intro | Elim | Simp | Local | Assum | Chained
+
+  type relevance_override =
+    {add: (Facts.ref * Attrib.src list) list,
+     del: (Facts.ref * Attrib.src list) list,
+     only: bool}
+
+  val trace : bool Unsynchronized.ref
+  val worse_irrel_freq : real Unsynchronized.ref
+  val higher_order_irrel_weight : real Unsynchronized.ref
+  val abs_rel_weight : real Unsynchronized.ref
+  val abs_irrel_weight : real Unsynchronized.ref
+  val skolem_irrel_weight : real Unsynchronized.ref
+  val theory_const_rel_weight : real Unsynchronized.ref
+  val theory_const_irrel_weight : real Unsynchronized.ref
+  val intro_bonus : real Unsynchronized.ref
+  val elim_bonus : real Unsynchronized.ref
+  val simp_bonus : real Unsynchronized.ref
+  val local_bonus : real Unsynchronized.ref
+  val assum_bonus : real Unsynchronized.ref
+  val chained_bonus : real Unsynchronized.ref
+  val max_imperfect : real Unsynchronized.ref
+  val max_imperfect_exp : real Unsynchronized.ref
+  val threshold_divisor : real Unsynchronized.ref
+  val ridiculous_threshold : real Unsynchronized.ref
+  val name_thm_pairs_from_ref :
+    Proof.context -> unit Symtab.table -> thm list
+    -> Facts.ref * Attrib.src list -> ((string * locality) * thm) list
+  val relevant_facts :
+    Proof.context -> bool -> real * real -> int -> relevance_override
+    -> thm list -> term list -> term -> ((string * locality) * thm) list
+end;
+
+structure Sledgehammer_Filter : SLEDGEHAMMER_FILTER =
+struct
+
+open Sledgehammer_Util
+
+val trace = Unsynchronized.ref false
+fun trace_msg msg = if !trace then tracing (msg ()) else ()
+
+(* experimental features *)
+val term_patterns = false
+val respect_no_atp = true
+
+(* FUDGE *)
+val worse_irrel_freq = Unsynchronized.ref 100.0
+val higher_order_irrel_weight = Unsynchronized.ref 1.05
+val abs_rel_weight = Unsynchronized.ref 0.5
+val abs_irrel_weight = Unsynchronized.ref 2.0
+val skolem_irrel_weight = Unsynchronized.ref 0.75
+val theory_const_rel_weight = Unsynchronized.ref 0.5
+val theory_const_irrel_weight = Unsynchronized.ref 0.25
+val intro_bonus = Unsynchronized.ref 0.15
+val elim_bonus = Unsynchronized.ref 0.15
+val simp_bonus = Unsynchronized.ref 0.15
+val local_bonus = Unsynchronized.ref 0.55
+val assum_bonus = Unsynchronized.ref 1.05
+val chained_bonus = Unsynchronized.ref 1.5
+val max_imperfect = Unsynchronized.ref 11.5
+val max_imperfect_exp = Unsynchronized.ref 1.0
+val threshold_divisor = Unsynchronized.ref 2.0
+val ridiculous_threshold = Unsynchronized.ref 0.1
+
+datatype locality = General | Intro | Elim | Simp | Local | Assum | Chained
+
+type relevance_override =
+  {add: (Facts.ref * Attrib.src list) list,
+   del: (Facts.ref * Attrib.src list) list,
+   only: bool}
+
+val sledgehammer_prefix = "Sledgehammer" ^ Long_Name.separator
+val abs_name = "Sledgehammer.abs"
+val skolem_prefix = "Sledgehammer.sko"
+val theory_const_suffix = Long_Name.separator ^ " 1"
+
+fun repair_name reserved multi j name =
+  (name |> Symtab.defined reserved name ? quote) ^
+  (if multi then "(" ^ Int.toString j ^ ")" else "")
+
+fun name_thm_pairs_from_ref ctxt reserved chained_ths (xthm as (xref, args)) =
+  let
+    val ths = Attrib.eval_thms ctxt [xthm]
+    val bracket =
+      implode (map (fn arg => "[" ^ Pretty.str_of (Args.pretty_src ctxt arg)
+                               ^ "]") args)
+    val space_bracket = if bracket = "" then "" else " " ^ bracket
+    val name =
+      case xref of
+        Facts.Fact s => "`" ^ s ^ "`" ^ space_bracket
+      | Facts.Named (("", _), _) => "[" ^ bracket ^ "]"
+      | _ => Facts.string_of_ref xref ^ space_bracket
+    val multi = length ths > 1
+  in
+    (ths, (1, []))
+    |-> fold (fn th => fn (j, rest) =>
+                 (j + 1, ((repair_name reserved multi j name,
+                          if member Thm.eq_thm chained_ths th then Chained
+                          else General), th) :: rest))
+    |> snd
+  end
+
+(***************************************************************)
+(* Relevance Filtering                                         *)
+(***************************************************************)
+
+(*** constants with types ***)
+
+fun order_of_type (Type (@{type_name fun}, [T1, @{typ bool}])) =
+    order_of_type T1 (* cheat: pretend sets are first-order *)
+  | order_of_type (Type (@{type_name fun}, [T1, T2])) =
+    Int.max (order_of_type T1 + 1, order_of_type T2)
+  | order_of_type (Type (_, Ts)) = fold (Integer.max o order_of_type) Ts 0
+  | order_of_type _ = 0
+
+(* An abstraction of Isabelle types and first-order terms *)
+datatype pattern = PVar | PApp of string * pattern list
+datatype ptype = PType of int * pattern list
+
+fun string_for_pattern PVar = "_"
+  | string_for_pattern (PApp (s, ps)) =
+    if null ps then s else s ^ string_for_patterns ps
+and string_for_patterns ps = "(" ^ commas (map string_for_pattern ps) ^ ")"
+fun string_for_ptype (PType (_, ps)) = string_for_patterns ps
+
+(*Is the second type an instance of the first one?*)
+fun match_pattern (PVar, _) = true
+  | match_pattern (PApp _, PVar) = false
+  | match_pattern (PApp (s, ps), PApp (t, qs)) =
+    s = t andalso match_patterns (ps, qs)
+and match_patterns (_, []) = true
+  | match_patterns ([], _) = false
+  | match_patterns (p :: ps, q :: qs) =
+    match_pattern (p, q) andalso match_patterns (ps, qs)
+fun match_ptype (PType (_, ps), PType (_, qs)) = match_patterns (ps, qs)
+
+(* Is there a unifiable constant? *)
+fun pconst_mem f consts (s, ps) =
+  exists (curry (match_ptype o f) ps)
+         (map snd (filter (curry (op =) s o fst) consts))
+fun pconst_hyper_mem f const_tab (s, ps) =
+  exists (curry (match_ptype o f) ps) (these (Symtab.lookup const_tab s))
+
+fun pattern_for_type (Type (s, Ts)) = PApp (s, map pattern_for_type Ts)
+  | pattern_for_type (TFree (s, _)) = PApp (s, [])
+  | pattern_for_type (TVar _) = PVar
+
+fun pterm thy t =
+  case strip_comb t of
+    (Const x, ts) => PApp (pconst thy true x ts)
+  | (Free x, ts) => PApp (pconst thy false x ts)
+  | (Var x, []) => PVar
+  | _ => PApp ("?", [])  (* equivalence class of higher-order constructs *)
+(* Pairs a constant with the list of its type instantiations. *)
+and ptype thy const x ts =
+  (if const then map pattern_for_type (these (try (Sign.const_typargs thy) x))
+   else []) @
+  (if term_patterns then map (pterm thy) ts else [])
+and pconst thy const (s, T) ts = (s, ptype thy const (s, T) ts)
+and rich_ptype thy const (s, T) ts =
+  PType (order_of_type T, ptype thy const (s, T) ts)
+and rich_pconst thy const (s, T) ts = (s, rich_ptype thy const (s, T) ts)
+
+fun string_for_hyper_pconst (s, ps) =
+  s ^ "{" ^ commas (map string_for_ptype ps) ^ "}"
+
+(* These are typically simplified away by "Meson.presimplify". Equality is
+   handled specially via "fequal". *)
+val boring_consts =
+  [@{const_name False}, @{const_name True}, @{const_name If}, @{const_name Let},
+   @{const_name HOL.eq}]
+
+(* Add a pconstant to the table, but a [] entry means a standard
+   connective, which we ignore.*)
+fun add_pconst_to_table also_skolem (c, p) =
+  if member (op =) boring_consts c orelse
+     (not also_skolem andalso String.isPrefix skolem_prefix c) then
+    I
+  else
+    Symtab.map_default (c, [p]) (insert (op =) p)
+
+fun is_formula_type T = (T = HOLogic.boolT orelse T = propT)
+
+fun pconsts_in_terms thy also_skolems pos ts =
+  let
+    val flip = Option.map not
+    (* We include free variables, as well as constants, to handle locales. For
+       each quantifiers that must necessarily be skolemized by the ATP, we
+       introduce a fresh constant to simulate the effect of Skolemization. *)
+    fun do_const const (s, T) ts =
+      add_pconst_to_table also_skolems (rich_pconst thy const (s, T) ts)
+      #> fold do_term ts
+    and do_term t =
+      case strip_comb t of
+        (Const x, ts) => do_const true x ts
+      | (Free x, ts) => do_const false x ts
+      | (Abs (_, T, t'), ts) =>
+        (null ts
+         ? add_pconst_to_table true (abs_name, PType (order_of_type T + 1, [])))
+        #> fold do_term (t' :: ts)
+      | (_, ts) => fold do_term ts
+    fun do_quantifier will_surely_be_skolemized abs_T body_t =
+      do_formula pos body_t
+      #> (if also_skolems andalso will_surely_be_skolemized then
+            add_pconst_to_table true
+                         (gensym skolem_prefix, PType (order_of_type abs_T, []))
+          else
+            I)
+    and do_term_or_formula T =
+      if is_formula_type T then do_formula NONE else do_term
+    and do_formula pos t =
+      case t of
+        Const (@{const_name all}, _) $ Abs (_, T, t') =>
+        do_quantifier (pos = SOME false) T t'
+      | @{const "==>"} $ t1 $ t2 =>
+        do_formula (flip pos) t1 #> do_formula pos t2
+      | Const (@{const_name "=="}, Type (_, [T, _])) $ t1 $ t2 =>
+        fold (do_term_or_formula T) [t1, t2]
+      | @{const Trueprop} $ t1 => do_formula pos t1
+      | @{const Not} $ t1 => do_formula (flip pos) t1
+      | Const (@{const_name All}, _) $ Abs (_, T, t') =>
+        do_quantifier (pos = SOME false) T t'
+      | Const (@{const_name Ex}, _) $ Abs (_, T, t') =>
+        do_quantifier (pos = SOME true) T t'
+      | @{const HOL.conj} $ t1 $ t2 => fold (do_formula pos) [t1, t2]
+      | @{const HOL.disj} $ t1 $ t2 => fold (do_formula pos) [t1, t2]
+      | @{const HOL.implies} $ t1 $ t2 =>
+        do_formula (flip pos) t1 #> do_formula pos t2
+      | Const (@{const_name HOL.eq}, Type (_, [T, _])) $ t1 $ t2 =>
+        fold (do_term_or_formula T) [t1, t2]
+      | Const (@{const_name If}, Type (_, [_, Type (_, [T, _])]))
+        $ t1 $ t2 $ t3 =>
+        do_formula NONE t1 #> fold (do_term_or_formula T) [t2, t3]
+      | Const (@{const_name Ex1}, _) $ Abs (_, T, t') =>
+        do_quantifier (is_some pos) T t'
+      | Const (@{const_name Ball}, _) $ t1 $ Abs (_, T, t') =>
+        do_quantifier (pos = SOME false) T
+                      (HOLogic.mk_imp (incr_boundvars 1 t1 $ Bound 0, t'))
+      | Const (@{const_name Bex}, _) $ t1 $ Abs (_, T, t') =>
+        do_quantifier (pos = SOME true) T
+                      (HOLogic.mk_conj (incr_boundvars 1 t1 $ Bound 0, t'))
+      | (t0 as Const (_, @{typ bool})) $ t1 =>
+        do_term t0 #> do_formula pos t1  (* theory constant *)
+      | _ => do_term t
+  in Symtab.empty |> fold (do_formula pos) ts end
+
+(*Inserts a dummy "constant" referring to the theory name, so that relevance
+  takes the given theory into account.*)
+fun theory_const_prop_of th =
+  if exists (curry (op <) 0.0) [!theory_const_rel_weight,
+                                !theory_const_irrel_weight] then
+    let
+      val name = Context.theory_name (theory_of_thm th)
+      val t = Const (name ^ theory_const_suffix, @{typ bool})
+    in t $ prop_of th end
+  else
+    prop_of th
+
+(**** Constant / Type Frequencies ****)
+
+(* A two-dimensional symbol table counts frequencies of constants. It's keyed
+   first by constant name and second by its list of type instantiations. For the
+   latter, we need a linear ordering on "pattern list". *)
+
+fun pattern_ord p =
+  case p of
+    (PVar, PVar) => EQUAL
+  | (PVar, PApp _) => LESS
+  | (PApp _, PVar) => GREATER
+  | (PApp q1, PApp q2) =>
+    prod_ord fast_string_ord (dict_ord pattern_ord) (q1, q2)
+fun ptype_ord (PType p, PType q) =
+  prod_ord (dict_ord pattern_ord) int_ord (swap p, swap q)
+
+structure PType_Tab = Table(type key = ptype val ord = ptype_ord)
+
+fun count_axiom_consts thy =
+  let
+    fun do_const const (s, T) ts =
+      (* Two-dimensional table update. Constant maps to types maps to count. *)
+      PType_Tab.map_default (rich_ptype thy const (s, T) ts, 0) (Integer.add 1)
+      |> Symtab.map_default (s, PType_Tab.empty)
+      #> fold do_term ts
+    and do_term t =
+      case strip_comb t of
+        (Const x, ts) => do_const true x ts
+      | (Free x, ts) => do_const false x ts
+      | (Abs (_, _, t'), ts) => fold do_term (t' :: ts)
+      | (_, ts) => fold do_term ts
+  in do_term o theory_const_prop_of o snd end
+
+
+(**** Actual Filtering Code ****)
+
+fun pow_int x 0 = 1.0
+  | pow_int x 1 = x
+  | pow_int x n = if n > 0 then x * pow_int x (n - 1) else pow_int x (n + 1) / x
+
+(*The frequency of a constant is the sum of those of all instances of its type.*)
+fun pconst_freq match const_tab (c, ps) =
+  PType_Tab.fold (fn (qs, m) => match (ps, qs) ? Integer.add m)
+                 (the (Symtab.lookup const_tab c)) 0
+
+
+(* A surprising number of theorems contain only a few significant constants.
+   These include all induction rules, and other general theorems. *)
+
+(* "log" seems best in practice. A constant function of one ignores the constant
+   frequencies. Rare constants give more points if they are relevant than less
+   rare ones. *)
+fun rel_weight_for order freq = 1.0 + 2.0 / Math.ln (Real.fromInt freq + 1.0)
+
+(* Irrelevant constants are treated differently. We associate lower penalties to
+   very rare constants and very common ones -- the former because they can't
+   lead to the inclusion of too many new facts, and the latter because they are
+   so common as to be of little interest. *)
+fun irrel_weight_for order freq =
+  let val (k, x) = !worse_irrel_freq |> `Real.ceil in
+    (if freq < k then Math.ln (Real.fromInt (freq + 1)) / Math.ln x
+     else rel_weight_for order freq / rel_weight_for order k)
+    * pow_int (!higher_order_irrel_weight) (order - 1)
+  end
+
+(* Computes a constant's weight, as determined by its frequency. *)
+fun generic_pconst_weight abs_weight skolem_weight theory_const_weight
+                          weight_for f const_tab (c as (s, PType (m, _))) =
+  if s = abs_name then abs_weight
+  else if String.isPrefix skolem_prefix s then skolem_weight
+  else if String.isSuffix theory_const_suffix s then theory_const_weight
+  else weight_for m (pconst_freq (match_ptype o f) const_tab c)
+
+fun rel_pconst_weight const_tab =
+  generic_pconst_weight (!abs_rel_weight) 0.0 (!theory_const_rel_weight)
+                        rel_weight_for I const_tab
+fun irrel_pconst_weight const_tab =
+  generic_pconst_weight (!abs_irrel_weight) (!skolem_irrel_weight)
+                    (!theory_const_irrel_weight) irrel_weight_for swap const_tab
+
+fun locality_bonus General = 0.0
+  | locality_bonus Intro = !intro_bonus
+  | locality_bonus Elim = !elim_bonus
+  | locality_bonus Simp = !simp_bonus
+  | locality_bonus Local = !local_bonus
+  | locality_bonus Assum = !assum_bonus
+  | locality_bonus Chained = !chained_bonus
+
+fun axiom_weight loc const_tab relevant_consts axiom_consts =
+  case axiom_consts |> List.partition (pconst_hyper_mem I relevant_consts)
+                    ||> filter_out (pconst_hyper_mem swap relevant_consts) of
+    ([], _) => 0.0
+  | (rel, irrel) =>
+    let
+      val irrel = irrel |> filter_out (pconst_mem swap rel)
+      val rel_weight =
+        0.0 |> fold (curry (op +) o rel_pconst_weight const_tab) rel
+      val irrel_weight =
+        ~ (locality_bonus loc)
+        |> fold (curry (op +) o irrel_pconst_weight const_tab) irrel
+      val res = rel_weight / (rel_weight + irrel_weight)
+    in if Real.isFinite res then res else 0.0 end
+
+(* FIXME: experiment
+fun debug_axiom_weight loc const_tab relevant_consts axiom_consts =
+  case axiom_consts |> List.partition (pconst_hyper_mem I relevant_consts)
+                    ||> filter_out (pconst_hyper_mem swap relevant_consts) of
+    ([], _) => 0.0
+  | (rel, irrel) =>
+    let
+      val irrel = irrel |> filter_out (pconst_mem swap rel)
+      val rels_weight =
+        0.0 |> fold (curry (op +) o rel_pconst_weight const_tab) rel
+      val irrels_weight =
+        ~ (locality_bonus loc)
+        |> fold (curry (op +) o irrel_pconst_weight const_tab) irrel
+val _ = tracing (PolyML.makestring ("REL: ", map (`(rel_pconst_weight const_tab)) rel))
+val _ = tracing (PolyML.makestring ("IRREL: ", map (`(irrel_pconst_weight const_tab)) irrel))
+      val res = rels_weight / (rels_weight + irrels_weight)
+    in if Real.isFinite res then res else 0.0 end
+*)
+
+fun pconsts_in_axiom thy t =
+  Symtab.fold (fn (s, pss) => fold (cons o pair s) pss)
+              (pconsts_in_terms thy true (SOME true) [t]) []
+fun pair_consts_axiom thy axiom =
+  case axiom |> snd |> theory_const_prop_of |> pconsts_in_axiom thy of
+    [] => NONE
+  | consts => SOME ((axiom, consts), NONE)
+
+type annotated_thm =
+  (((unit -> string) * locality) * thm) * (string * ptype) list
+
+fun take_most_relevant max_relevant remaining_max
+                       (candidates : (annotated_thm * real) list) =
+  let
+    val max_imperfect =
+      Real.ceil (Math.pow (!max_imperfect,
+                    Math.pow (Real.fromInt remaining_max
+                              / Real.fromInt max_relevant, !max_imperfect_exp)))
+    val (perfect, imperfect) =
+      candidates |> sort (Real.compare o swap o pairself snd)
+                 |> take_prefix (fn (_, w) => w > 0.99999)
+    val ((accepts, more_rejects), rejects) =
+      chop max_imperfect imperfect |>> append perfect |>> chop remaining_max
+  in
+    trace_msg (fn () =>
+        "Actually passed (" ^ Int.toString (length accepts) ^ " of " ^
+        Int.toString (length candidates) ^ "): " ^
+        (accepts |> map (fn ((((name, _), _), _), weight) =>
+                            name () ^ " [" ^ Real.toString weight ^ "]")
+                 |> commas));
+    (accepts, more_rejects @ rejects)
+  end
+
+fun if_empty_replace_with_locality thy axioms loc tab =
+  if Symtab.is_empty tab then
+    pconsts_in_terms thy false (SOME false)
+        (map_filter (fn ((_, loc'), th) =>
+                        if loc' = loc then SOME (prop_of th) else NONE) axioms)
+  else
+    tab
+
+fun relevance_filter ctxt threshold0 decay max_relevant
+                     ({add, del, ...} : relevance_override) axioms goal_ts =
+  let
+    val thy = ProofContext.theory_of ctxt
+    val const_tab = fold (count_axiom_consts thy) axioms Symtab.empty
+    val goal_const_tab =
+      pconsts_in_terms thy false (SOME false) goal_ts
+      |> fold (if_empty_replace_with_locality thy axioms)
+              [Chained, Assum, Local]
+    val add_ths = Attrib.eval_thms ctxt add
+    val del_ths = Attrib.eval_thms ctxt del
+    fun iter j remaining_max threshold rel_const_tab hopeless hopeful =
+      let
+        fun game_over rejects =
+          (* Add "add:" facts. *)
+          if null add_ths then
+            []
+          else
+            map_filter (fn ((p as (_, th), _), _) =>
+                           if member Thm.eq_thm add_ths th then SOME p
+                           else NONE) rejects
+        fun relevant [] rejects [] =
+            (* Nothing has been added this iteration. *)
+            if j = 0 andalso threshold >= !ridiculous_threshold then
+              (* First iteration? Try again. *)
+              iter 0 max_relevant (threshold / !threshold_divisor) rel_const_tab
+                   hopeless hopeful
+            else
+              game_over (rejects @ hopeless)
+          | relevant candidates rejects [] =
+            let
+              val (accepts, more_rejects) =
+                take_most_relevant max_relevant remaining_max candidates
+              val rel_const_tab' =
+                rel_const_tab
+                |> fold (add_pconst_to_table false) (maps (snd o fst) accepts)
+              fun is_dirty (c, _) =
+                Symtab.lookup rel_const_tab' c <> Symtab.lookup rel_const_tab c
+              val (hopeful_rejects, hopeless_rejects) =
+                 (rejects @ hopeless, ([], []))
+                 |-> fold (fn (ax as (_, consts), old_weight) =>
+                              if exists is_dirty consts then
+                                apfst (cons (ax, NONE))
+                              else
+                                apsnd (cons (ax, old_weight)))
+                 |>> append (more_rejects
+                             |> map (fn (ax as (_, consts), old_weight) =>
+                                        (ax, if exists is_dirty consts then NONE
+                                             else SOME old_weight)))
+              val threshold =
+                1.0 - (1.0 - threshold)
+                      * Math.pow (decay, Real.fromInt (length accepts))
+              val remaining_max = remaining_max - length accepts
+            in
+              trace_msg (fn () => "New or updated constants: " ^
+                  commas (rel_const_tab' |> Symtab.dest
+                          |> subtract (op =) (rel_const_tab |> Symtab.dest)
+                          |> map string_for_hyper_pconst));
+              map (fst o fst) accepts @
+              (if remaining_max = 0 then
+                 game_over (hopeful_rejects @ map (apsnd SOME) hopeless_rejects)
+               else
+                 iter (j + 1) remaining_max threshold rel_const_tab'
+                      hopeless_rejects hopeful_rejects)
+            end
+          | relevant candidates rejects
+                     (((ax as (((_, loc), th), axiom_consts)), cached_weight)
+                      :: hopeful) =
+            let
+              val weight =
+                case cached_weight of
+                  SOME w => w
+                | NONE => axiom_weight loc const_tab rel_const_tab axiom_consts
+(* FIXME: experiment
+val name = fst (fst (fst ax)) ()
+val _ = if String.isSubstring "positive_minus" name orelse String.isSubstring "not_exp_le_zero" name then
+tracing ("*** " ^ name ^ PolyML.makestring (debug_axiom_weight loc const_tab rel_const_tab axiom_consts))
+else
+()
+*)
+            in
+              if weight >= threshold then
+                relevant ((ax, weight) :: candidates) rejects hopeful
+              else
+                relevant candidates ((ax, weight) :: rejects) hopeful
+            end
+        in
+          trace_msg (fn () =>
+              "ITERATION " ^ string_of_int j ^ ": current threshold: " ^
+              Real.toString threshold ^ ", constants: " ^
+              commas (rel_const_tab |> Symtab.dest
+                      |> filter (curry (op <>) [] o snd)
+                      |> map string_for_hyper_pconst));
+          relevant [] [] hopeful
+        end
+  in
+    axioms |> filter_out (member Thm.eq_thm del_ths o snd)
+           |> map_filter (pair_consts_axiom thy)
+           |> iter 0 max_relevant threshold0 goal_const_tab []
+           |> tap (fn res => trace_msg (fn () =>
+                                "Total relevant: " ^ Int.toString (length res)))
+  end
+
+
+(***************************************************************)
+(* Retrieving and filtering lemmas                             *)
+(***************************************************************)
+
+(*** retrieve lemmas and filter them ***)
+
+(*Reject theorems with names like "List.filter.filter_list_def" or
+  "Accessible_Part.acc.defs", as these are definitions arising from packages.*)
+fun is_package_def a =
+  let val names = Long_Name.explode a
+  in
+     length names > 2 andalso
+     not (hd names = "local") andalso
+     String.isSuffix "_def" a  orelse  String.isSuffix "_defs" a
+  end;
+
+fun mk_fact_table f xs =
+  fold (Termtab.update o `(prop_of o f)) xs Termtab.empty
+fun uniquify xs = Termtab.fold (cons o snd) (mk_fact_table snd xs) []
+
+(* FIXME: put other record thms here, or declare as "no_atp" *)
+val multi_base_blacklist =
+  ["defs", "select_defs", "update_defs", "induct", "inducts", "split", "splits",
+   "split_asm", "cases", "ext_cases", "eq.simps", "eq.refl", "nchotomy",
+   "case_cong", "weak_case_cong"]
+  |> map (prefix ".")
+
+val max_lambda_nesting = 3
+
+fun term_has_too_many_lambdas max (t1 $ t2) =
+    exists (term_has_too_many_lambdas max) [t1, t2]
+  | term_has_too_many_lambdas max (Abs (_, _, t)) =
+    max = 0 orelse term_has_too_many_lambdas (max - 1) t
+  | term_has_too_many_lambdas _ _ = false
+
+(* Don't count nested lambdas at the level of formulas, since they are
+   quantifiers. *)
+fun formula_has_too_many_lambdas Ts (Abs (_, T, t)) =
+    formula_has_too_many_lambdas (T :: Ts) t
+  | formula_has_too_many_lambdas Ts t =
+    if is_formula_type (fastype_of1 (Ts, t)) then
+      exists (formula_has_too_many_lambdas Ts) (#2 (strip_comb t))
+    else
+      term_has_too_many_lambdas max_lambda_nesting t
+
+(* The max apply depth of any "metis" call in "Metis_Examples" (on 2007-10-31)
+   was 11. *)
+val max_apply_depth = 15
+
+fun apply_depth (f $ t) = Int.max (apply_depth f, apply_depth t + 1)
+  | apply_depth (Abs (_, _, t)) = apply_depth t
+  | apply_depth _ = 0
+
+fun is_formula_too_complex t =
+  apply_depth t > max_apply_depth orelse formula_has_too_many_lambdas [] t
+
+val exists_sledgehammer_const =
+  exists_Const (fn (s, _) => String.isPrefix sledgehammer_prefix s)
+
+(* FIXME: make more reliable *)
+val exists_low_level_class_const =
+  exists_Const (fn (s, _) =>
+     String.isSubstring (Long_Name.separator ^ "class" ^ Long_Name.separator) s)
+
+fun is_metastrange_theorem th =
+  case head_of (concl_of th) of
+      Const (a, _) => (a <> @{const_name Trueprop} andalso
+                       a <> @{const_name "=="})
+    | _ => false
+
+fun is_that_fact th =
+  String.isSuffix (Long_Name.separator ^ Obtain.thatN) (Thm.get_name_hint th)
+  andalso exists_subterm (fn Free (s, _) => s = Name.skolem Auto_Bind.thesisN
+                           | _ => false) (prop_of th)
+
+val type_has_top_sort =
+  exists_subtype (fn TFree (_, []) => true | TVar (_, []) => true | _ => false)
+
+(**** Predicates to detect unwanted facts (prolific or likely to cause
+      unsoundness) ****)
+
+(* Too general means, positive equality literal with a variable X as one
+   operand, when X does not occur properly in the other operand. This rules out
+   clearly inconsistent facts such as X = a | X = b, though it by no means
+   guarantees soundness. *)
+
+(* Unwanted equalities are those between a (bound or schematic) variable that
+   does not properly occur in the second operand. *)
+val is_exhaustive_finite =
+  let
+    fun is_bad_equal (Var z) t =
+        not (exists_subterm (fn Var z' => z = z' | _ => false) t)
+      | is_bad_equal (Bound j) t = not (loose_bvar1 (t, j))
+      | is_bad_equal _ _ = false
+    fun do_equals t1 t2 = is_bad_equal t1 t2 orelse is_bad_equal t2 t1
+    fun do_formula pos t =
+      case (pos, t) of
+        (_, @{const Trueprop} $ t1) => do_formula pos t1
+      | (true, Const (@{const_name all}, _) $ Abs (_, _, t')) =>
+        do_formula pos t'
+      | (true, Const (@{const_name All}, _) $ Abs (_, _, t')) =>
+        do_formula pos t'
+      | (false, Const (@{const_name Ex}, _) $ Abs (_, _, t')) =>
+        do_formula pos t'
+      | (_, @{const "==>"} $ t1 $ t2) =>
+        do_formula (not pos) t1 andalso
+        (t2 = @{prop False} orelse do_formula pos t2)
+      | (_, @{const HOL.implies} $ t1 $ t2) =>
+        do_formula (not pos) t1 andalso
+        (t2 = @{const False} orelse do_formula pos t2)
+      | (_, @{const Not} $ t1) => do_formula (not pos) t1
+      | (true, @{const HOL.disj} $ t1 $ t2) => forall (do_formula pos) [t1, t2]
+      | (false, @{const HOL.conj} $ t1 $ t2) => forall (do_formula pos) [t1, t2]
+      | (true, Const (@{const_name HOL.eq}, _) $ t1 $ t2) => do_equals t1 t2
+      | (true, Const (@{const_name "=="}, _) $ t1 $ t2) => do_equals t1 t2
+      | _ => false
+  in do_formula true end
+
+fun has_bound_or_var_of_type tycons =
+  exists_subterm (fn Var (_, Type (s, _)) => member (op =) tycons s
+                   | Abs (_, Type (s, _), _) => member (op =) tycons s
+                   | _ => false)
+
+(* Facts are forbidden to contain variables of these types. The typical reason
+   is that they lead to unsoundness. Note that "unit" satisfies numerous
+   equations like "?x = ()". The resulting clauses will have no type constraint,
+   yielding false proofs. Even "bool" leads to many unsound proofs, though only
+   for higher-order problems. *)
+val dangerous_types = [@{type_name unit}, @{type_name bool}, @{type_name prop}];
+
+(* Facts containing variables of type "unit" or "bool" or of the form
+   "ALL x. x = A | x = B | x = C" are likely to lead to unsound proofs if types
+   are omitted. *)
+fun is_dangerous_term full_types t =
+  not full_types andalso
+  let val t = transform_elim_term t in
+    has_bound_or_var_of_type dangerous_types t orelse
+    is_exhaustive_finite t
+  end
+
+fun is_theorem_bad_for_atps full_types thm =
+  let val t = prop_of thm in
+    is_formula_too_complex t orelse exists_type type_has_top_sort t orelse
+    is_dangerous_term full_types t orelse exists_sledgehammer_const t orelse
+    exists_low_level_class_const t orelse is_metastrange_theorem thm orelse
+    is_that_fact thm
+  end
+
+fun clasimpset_rules_of ctxt =
+  let
+    val {safeIs, safeEs, hazIs, hazEs, ...} = ctxt |> claset_of |> rep_cs
+    val intros = safeIs @ hazIs
+    val elims = map Classical.classical_rule (safeEs @ hazEs)
+    val simps = ctxt |> simpset_of |> dest_ss |> #simps |> map snd
+  in (mk_fact_table I intros, mk_fact_table I elims, mk_fact_table I simps) end
+
+fun all_name_thms_pairs ctxt reserved full_types add_ths chained_ths =
+  let
+    val thy = ProofContext.theory_of ctxt
+    val global_facts = PureThy.facts_of thy
+    val local_facts = ProofContext.facts_of ctxt
+    val named_locals = local_facts |> Facts.dest_static []
+    val assms = Assumption.all_assms_of ctxt
+    fun is_assum th = exists (fn ct => prop_of th aconv term_of ct) assms
+    val is_chained = member Thm.eq_thm chained_ths
+    val (intros, elims, simps) =
+      if exists (curry (op <) 0.0) [!intro_bonus, !elim_bonus, !simp_bonus] then
+        clasimpset_rules_of ctxt
+      else
+        (Termtab.empty, Termtab.empty, Termtab.empty)
+    (* Unnamed nonchained formulas with schematic variables are omitted, because
+       they are rejected by the backticks (`...`) parser for some reason. *)
+    fun is_good_unnamed_local th =
+      not (Thm.has_name_hint th) andalso
+      (not (exists_subterm is_Var (prop_of th)) orelse (is_chained th)) andalso
+      forall (fn (_, ths) => not (member Thm.eq_thm ths th)) named_locals
+    val unnamed_locals =
+      union Thm.eq_thm (Facts.props local_facts) chained_ths
+      |> filter is_good_unnamed_local |> map (pair "" o single)
+    val full_space =
+      Name_Space.merge (Facts.space_of global_facts, Facts.space_of local_facts)
+    fun add_facts global foldx facts =
+      foldx (fn (name0, ths) =>
+        if name0 <> "" andalso
+           forall (not o member Thm.eq_thm add_ths) ths andalso
+           (Facts.is_concealed facts name0 orelse
+            (respect_no_atp andalso is_package_def name0) orelse
+            exists (fn s => String.isSuffix s name0) multi_base_blacklist orelse
+            String.isSuffix "_def_raw" (* FIXME: crude hack *) name0) then
+          I
+        else
+          let
+            val multi = length ths > 1
+            fun backquotify th =
+              "`" ^ Print_Mode.setmp [Print_Mode.input]
+                                 (Syntax.string_of_term ctxt) (prop_of th) ^ "`"
+              |> String.translate (fn c => if Char.isPrint c then str c else "")
+              |> simplify_spaces
+            fun check_thms a =
+              case try (ProofContext.get_thms ctxt) a of
+                NONE => false
+              | SOME ths' => Thm.eq_thms (ths, ths')
+          in
+            pair 1
+            #> fold (fn th => fn (j, rest) =>
+                 (j + 1,
+                  if is_theorem_bad_for_atps full_types th andalso
+                     not (member Thm.eq_thm add_ths th) then
+                    rest
+                  else
+                    (((fn () =>
+                          if name0 = "" then
+                            th |> backquotify
+                          else
+                            let
+                              val name1 = Facts.extern facts name0
+                              val name2 = Name_Space.extern full_space name0
+                            in
+                              case find_first check_thms [name1, name2, name0] of
+                                SOME name => repair_name reserved multi j name
+                              | NONE => ""
+                            end),
+                      let val t = prop_of th in
+                        if is_chained th then Chained
+                        else if global then
+                          if Termtab.defined intros t then Intro
+                          else if Termtab.defined elims t then Elim
+                          else if Termtab.defined simps t then Simp
+                          else General
+                        else
+                          if is_assum th then Assum else Local
+                      end),
+                      (multi, th)) :: rest)) ths
+            #> snd
+          end)
+  in
+    [] |> add_facts false fold local_facts (unnamed_locals @ named_locals)
+       |> add_facts true Facts.fold_static global_facts global_facts
+  end
+
+(* The single-name theorems go after the multiple-name ones, so that single
+   names are preferred when both are available. *)
+fun name_thm_pairs ctxt respect_no_atp =
+  List.partition (fst o snd) #> op @ #> map (apsnd snd)
+  #> respect_no_atp ? filter_out (No_ATPs.member ctxt o snd)
+
+(***************************************************************)
+(* ATP invocation methods setup                                *)
+(***************************************************************)
+
+fun relevant_facts ctxt full_types (threshold0, threshold1) max_relevant
+                   (relevance_override as {add, del, only}) chained_ths hyp_ts
+                   concl_t =
+  let
+    val decay = Math.pow ((1.0 - threshold1) / (1.0 - threshold0),
+                          1.0 / Real.fromInt (max_relevant + 1))
+    val add_ths = Attrib.eval_thms ctxt add
+    val reserved = reserved_isar_keyword_table ()
+    val axioms =
+      (if only then
+         maps (map (fn ((name, loc), th) => ((K name, loc), (true, th)))
+               o name_thm_pairs_from_ref ctxt reserved chained_ths) add
+       else
+         all_name_thms_pairs ctxt reserved full_types add_ths chained_ths)
+      |> name_thm_pairs ctxt (respect_no_atp andalso not only)
+      |> uniquify
+  in
+    trace_msg (fn () => "Considering " ^ Int.toString (length axioms) ^
+                        " theorems");
+    (if threshold0 > 1.0 orelse threshold0 > threshold1 then
+       []
+     else if threshold0 < 0.0 then
+       axioms
+     else
+       relevance_filter ctxt threshold0 decay max_relevant relevance_override
+                        axioms (concl_t :: hyp_ts))
+    |> map (apfst (apfst (fn f => f ())))
+  end
+
+end;
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -20,7 +20,7 @@
 open ATP_Systems
 open Sledgehammer_Util
 open Sledgehammer
-open Sledgehammer_Fact_Minimize
+open Sledgehammer_Minimize
 
 (** Sledgehammer commands **)
 
@@ -63,25 +63,25 @@
 type raw_param = string * string list
 
 val default_default_params =
-  [("debug", "false"),
+  [("blocking", "false"),
+   ("debug", "false"),
    ("verbose", "false"),
    ("overlord", "false"),
    ("explicit_apply", "false"),
    ("relevance_thresholds", "45 85"),
    ("max_relevant", "smart"),
-   ("theory_relevant", "smart"),
    ("isar_proof", "false"),
    ("isar_shrink_factor", "1")]
 
 val alias_params =
   [("atp", "atps")]
 val negated_alias_params =
-  [("no_debug", "debug"),
+  [("non_blocking", "blocking"),
+   ("no_debug", "debug"),
    ("quiet", "verbose"),
    ("no_overlord", "overlord"),
    ("partial_types", "full_types"),
    ("implicit_apply", "explicit_apply"),
-   ("theory_irrelevant", "theory_relevant"),
    ("no_isar_proof", "isar_proof")]
 
 val params_for_minimize =
@@ -94,7 +94,7 @@
   AList.defined (op =) default_default_params s orelse
   AList.defined (op =) alias_params s orelse
   AList.defined (op =) negated_alias_params s orelse
-  member (op =) property_dependent_params s
+  member (op =) property_dependent_params s orelse s = "expect"
 
 fun check_raw_param (s, _) =
   if is_known_raw_param s then ()
@@ -167,6 +167,7 @@
       case lookup name of
         SOME "smart" => NONE
       | _ => SOME (lookup_int name)
+    val blocking = lookup_bool "blocking"
     val debug = lookup_bool "debug"
     val verbose = debug orelse lookup_bool "verbose"
     val overlord = lookup_bool "overlord"
@@ -177,16 +178,16 @@
       lookup_int_pair "relevance_thresholds"
       |> pairself (fn n => 0.01 * Real.fromInt n)
     val max_relevant = lookup_int_option "max_relevant"
-    val theory_relevant = lookup_bool_option "theory_relevant"
     val isar_proof = lookup_bool "isar_proof"
     val isar_shrink_factor = Int.max (1, lookup_int "isar_shrink_factor")
     val timeout = lookup_time "timeout"
+    val expect = lookup_string "expect"
   in
-    {debug = debug, verbose = verbose, overlord = overlord, atps = atps,
-     full_types = full_types, explicit_apply = explicit_apply,
+    {blocking = blocking, debug = debug, verbose = verbose, overlord = overlord,
+     atps = atps, full_types = full_types, explicit_apply = explicit_apply,
      relevance_thresholds = relevance_thresholds, max_relevant = max_relevant,
-     theory_relevant = theory_relevant, isar_proof = isar_proof,
-     isar_shrink_factor = isar_shrink_factor, timeout = timeout}
+     isar_proof = isar_proof, isar_shrink_factor = isar_shrink_factor,
+     timeout = timeout, expect = expect}
   end
 
 fun get_params thy = extract_params (default_raw_params thy)
@@ -267,10 +268,7 @@
 val parse_param = parse_key -- Scan.optional (Parse.$$$ "=" |-- parse_value) []
 val parse_params = Scan.optional (Args.bracks (Parse.list parse_param)) []
 val parse_fact_refs =
-  Scan.repeat1 (Scan.unless (Parse.name -- Args.colon)
-                            (Parse.xname -- Scan.option Attrib.thm_sel)
-                >> (fn (name, interval) =>
-                       Facts.Named ((name, Position.none), interval)))
+  Scan.repeat1 (Scan.unless (Parse.name -- Args.colon) Parse_Spec.xthm)
 val parse_relevance_chunk =
   (Args.add |-- Args.colon |-- parse_fact_refs >> add_to_relevance_override)
   || (Args.del |-- Args.colon |-- parse_fact_refs
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_minimize.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -0,0 +1,163 @@
+(*  Title:      HOL/Tools/Sledgehammer/sledgehammer_minimize.ML
+    Author:     Philipp Meyer, TU Muenchen
+    Author:     Jasmin Blanchette, TU Muenchen
+
+Minimization of theorem list for Metis using automatic theorem provers.
+*)
+
+signature SLEDGEHAMMER_MINIMIZE =
+sig
+  type locality = Sledgehammer_Filter.locality
+  type params = Sledgehammer.params
+
+  val minimize_theorems :
+    params -> int -> int -> Proof.state -> ((string * locality) * thm list) list
+    -> ((string * locality) * thm list) list option * string
+  val run_minimize :
+    params -> int -> (Facts.ref * Attrib.src list) list -> Proof.state -> unit
+end;
+
+structure Sledgehammer_Minimize : SLEDGEHAMMER_MINIMIZE =
+struct
+
+open ATP_Systems
+open Sledgehammer_Util
+open Sledgehammer_Filter
+open Sledgehammer_Translate
+open Sledgehammer_Reconstruct
+open Sledgehammer
+
+(* wrapper for calling external prover *)
+
+fun string_for_failure Unprovable = "Unprovable."
+  | string_for_failure TimedOut = "Timed out."
+  | string_for_failure _ = "Unknown error."
+
+fun n_theorems names =
+  let val n = length names in
+    string_of_int n ^ " theorem" ^ plural_s n ^
+    (if n > 0 then
+       ": " ^ (names |> map fst
+                     |> sort_distinct string_ord |> space_implode " ")
+     else
+       "")
+  end
+
+fun test_theorems ({debug, verbose, overlord, atps, full_types, isar_proof,
+                    isar_shrink_factor, ...} : params)
+                  (prover : prover) explicit_apply timeout subgoal state
+                  axioms =
+  let
+    val _ =
+      priority ("Testing " ^ n_theorems (map fst axioms) ^ "...")
+    val params =
+      {blocking = true, debug = debug, verbose = verbose, overlord = overlord,
+       atps = atps, full_types = full_types, explicit_apply = explicit_apply,
+       relevance_thresholds = (1.01, 1.01),
+       max_relevant = SOME 65536 (* a large number *), isar_proof = isar_proof,
+       isar_shrink_factor = isar_shrink_factor, timeout = timeout, expect = ""}
+    val axioms = maps (fn (n, ths) => map (pair n) ths) axioms
+    val {context = ctxt, goal, ...} = Proof.goal state
+    val problem =
+      {ctxt = ctxt, goal = goal, subgoal = subgoal,
+       axioms = map (prepare_axiom ctxt) axioms}
+    val result as {outcome, used_thm_names, ...} = prover params (K "") problem
+  in
+    priority (case outcome of
+                NONE =>
+                if length used_thm_names = length axioms then
+                  "Found proof."
+                else
+                  "Found proof with " ^ n_theorems used_thm_names ^ "."
+              | SOME failure => string_for_failure failure);
+    result
+  end
+
+(* minimalization of thms *)
+
+fun filter_used_facts used = filter (member (op =) used o fst)
+
+fun sublinear_minimize _ [] p = p
+  | sublinear_minimize test (x :: xs) (seen, result) =
+    case test (xs @ seen) of
+      result as {outcome = NONE, proof, used_thm_names, ...} : prover_result =>
+      sublinear_minimize test (filter_used_facts used_thm_names xs)
+                         (filter_used_facts used_thm_names seen, result)
+    | _ => sublinear_minimize test xs (x :: seen, result)
+
+(* Give the ATP some slack. The ATP gets further slack because the Sledgehammer
+   preprocessing time is included in the estimate below but isn't part of the
+   timeout. *)
+val fudge_msecs = 1000
+
+fun minimize_theorems {atps = [], ...} _ _ _ _ = error "No ATP is set."
+  | minimize_theorems (params as {debug, atps = atp :: _, full_types,
+                                  isar_proof, isar_shrink_factor, timeout, ...})
+                      i n state axioms =
+  let
+    val thy = Proof.theory_of state
+    val prover = get_prover_fun thy atp
+    val msecs = Time.toMilliseconds timeout
+    val _ = priority ("Sledgehammer minimize: ATP " ^ quote atp ^ ".")
+    val {context = ctxt, goal, ...} = Proof.goal state
+    val (_, hyp_ts, concl_t) = strip_subgoal goal i
+    val explicit_apply =
+      not (forall (Meson.is_fol_term thy)
+                  (concl_t :: hyp_ts @ maps (map prop_of o snd) axioms))
+    fun do_test timeout =
+      test_theorems params prover explicit_apply timeout i state
+    val timer = Timer.startRealTimer ()
+  in
+    (case do_test timeout axioms of
+       result as {outcome = NONE, pool, used_thm_names,
+                  conjecture_shape, ...} =>
+       let
+         val time = Timer.checkRealTimer timer
+         val new_timeout =
+           Int.min (msecs, Time.toMilliseconds time + fudge_msecs)
+           |> Time.fromMilliseconds
+         val (min_thms, {proof, axiom_names, ...}) =
+           sublinear_minimize (do_test new_timeout)
+               (filter_used_facts used_thm_names axioms) ([], result)
+         val n = length min_thms
+         val _ = priority (cat_lines
+           ["Minimized: " ^ string_of_int n ^ " theorem" ^ plural_s n] ^
+            (case length (filter (curry (op =) Chained o snd o fst) min_thms) of
+               0 => ""
+             | n => " (including " ^ Int.toString n ^ " chained)") ^ ".")
+       in
+         (SOME min_thms,
+          proof_text isar_proof
+              (pool, debug, isar_shrink_factor, ctxt, conjecture_shape)
+              (full_types, K "", proof, axiom_names, goal, i) |> fst)
+       end
+     | {outcome = SOME TimedOut, ...} =>
+       (NONE, "Timeout: You can increase the time limit using the \"timeout\" \
+              \option (e.g., \"timeout = " ^
+              string_of_int (10 + msecs div 1000) ^ " s\").")
+     | {outcome = SOME UnknownError, ...} =>
+       (* Failure sometimes mean timeout, unfortunately. *)
+       (NONE, "Failure: No proof was found with the current time limit. You \
+              \can increase the time limit using the \"timeout\" \
+              \option (e.g., \"timeout = " ^
+              string_of_int (10 + msecs div 1000) ^ " s\").")
+     | {message, ...} => (NONE, "ATP error: " ^ message))
+    handle ERROR msg => (NONE, "Error: " ^ msg)
+  end
+
+fun run_minimize params i refs state =
+  let
+    val ctxt = Proof.context_of state
+    val reserved = reserved_isar_keyword_table ()
+    val chained_ths = #facts (Proof.goal state)
+    val axioms =
+      maps (map (apsnd single)
+            o name_thm_pairs_from_ref ctxt reserved chained_ths) refs
+  in
+    case subgoal_count state of
+      0 => priority "No subgoal!"
+    | n =>
+      (kill_atps (); priority (#2 (minimize_theorems params i n state axioms)))
+  end
+
+end;
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_proof_reconstruct.ML	Thu Sep 02 20:29:12 2010 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1038 +0,0 @@
-(*  Title:      HOL/Tools/Sledgehammer/sledgehammer_proof_reconstruct.ML
-    Author:     Lawrence C. Paulson, Cambridge University Computer Laboratory
-    Author:     Claire Quigley, Cambridge University Computer Laboratory
-    Author:     Jasmin Blanchette, TU Muenchen
-
-Transfer of proofs from external provers.
-*)
-
-signature SLEDGEHAMMER_PROOF_RECONSTRUCT =
-sig
-  type locality = Sledgehammer_Fact_Filter.locality
-  type minimize_command = string list -> string
-  type metis_params =
-    bool * minimize_command * string * (string * locality) list vector * thm
-    * int
-  type isar_params =
-    string Symtab.table * bool * int * Proof.context * int list list
-  type text_result = string * (string * locality) list
-
-  val metis_proof_text : metis_params -> text_result
-  val isar_proof_text : isar_params -> metis_params -> text_result
-  val proof_text : bool -> isar_params -> metis_params -> text_result
-end;
-
-structure Sledgehammer_Proof_Reconstruct : SLEDGEHAMMER_PROOF_RECONSTRUCT =
-struct
-
-open ATP_Problem
-open Metis_Clauses
-open Sledgehammer_Util
-open Sledgehammer_Fact_Filter
-open Sledgehammer_Translate
-
-type minimize_command = string list -> string
-type metis_params =
-  bool * minimize_command * string * (string * locality) list vector * thm * int
-type isar_params =
-  string Symtab.table * bool * int * Proof.context * int list list
-type text_result = string * (string * locality) list
-
-(* Simple simplifications to ensure that sort annotations don't leave a trail of
-   spurious "True"s. *)
-fun s_not @{const False} = @{const True}
-  | s_not @{const True} = @{const False}
-  | s_not (@{const Not} $ t) = t
-  | s_not t = @{const Not} $ t
-fun s_conj (@{const True}, t2) = t2
-  | s_conj (t1, @{const True}) = t1
-  | s_conj p = HOLogic.mk_conj p
-fun s_disj (@{const False}, t2) = t2
-  | s_disj (t1, @{const False}) = t1
-  | s_disj p = HOLogic.mk_disj p
-fun s_imp (@{const True}, t2) = t2
-  | s_imp (t1, @{const False}) = s_not t1
-  | s_imp p = HOLogic.mk_imp p
-fun s_iff (@{const True}, t2) = t2
-  | s_iff (t1, @{const True}) = t1
-  | s_iff (t1, t2) = HOLogic.eq_const HOLogic.boolT $ t1 $ t2
-
-fun mk_anot (AConn (ANot, [phi])) = phi
-  | mk_anot phi = AConn (ANot, [phi])
-fun mk_aconn c (phi1, phi2) = AConn (c, [phi1, phi2])
-
-fun index_in_shape x = find_index (exists (curry (op =) x))
-fun is_axiom_number axiom_names num =
-  num > 0 andalso num <= Vector.length axiom_names andalso
-  not (null (Vector.sub (axiom_names, num - 1)))
-fun is_conjecture_number conjecture_shape num =
-  index_in_shape num conjecture_shape >= 0
-
-fun negate_term (Const (@{const_name All}, T) $ Abs (s, T', t')) =
-    Const (@{const_name Ex}, T) $ Abs (s, T', negate_term t')
-  | negate_term (Const (@{const_name Ex}, T) $ Abs (s, T', t')) =
-    Const (@{const_name All}, T) $ Abs (s, T', negate_term t')
-  | negate_term (@{const HOL.implies} $ t1 $ t2) =
-    @{const HOL.conj} $ t1 $ negate_term t2
-  | negate_term (@{const HOL.conj} $ t1 $ t2) =
-    @{const HOL.disj} $ negate_term t1 $ negate_term t2
-  | negate_term (@{const HOL.disj} $ t1 $ t2) =
-    @{const HOL.conj} $ negate_term t1 $ negate_term t2
-  | negate_term (@{const Not} $ t) = t
-  | negate_term t = @{const Not} $ t
-
-datatype ('a, 'b, 'c, 'd, 'e) raw_step =
-  Definition of 'a * 'b * 'c |
-  Inference of 'a * 'd * 'e list
-
-fun raw_step_number (Definition (num, _, _)) = num
-  | raw_step_number (Inference (num, _, _)) = num
-
-(**** PARSING OF TSTP FORMAT ****)
-
-(*Strings enclosed in single quotes, e.g. filenames*)
-val scan_quoted = $$ "'" |-- Scan.repeat (~$$ "'") --| $$ "'" >> implode;
-
-val scan_dollar_name =
-  Scan.repeat ($$ "$") -- Symbol.scan_id >> (fn (ss, s) => implode ss ^ s)
-
-fun repair_name _ "$true" = "c_True"
-  | repair_name _ "$false" = "c_False"
-  | repair_name _ "$$e" = "c_equal" (* seen in Vampire proofs *)
-  | repair_name _ "equal" = "c_equal" (* needed by SPASS? *)
-  | repair_name pool s =
-    case Symtab.lookup pool s of
-      SOME s' => s'
-    | NONE =>
-      if String.isPrefix "sQ" s andalso String.isSuffix "_eqProxy" s then
-        "c_equal" (* seen in Vampire proofs *)
-      else
-        s
-(* Generalized first-order terms, which include file names, numbers, etc. *)
-val parse_potential_integer =
-  (scan_dollar_name || scan_quoted) >> K NONE
-  || scan_integer >> SOME
-fun parse_annotation x =
-  ((parse_potential_integer ::: Scan.repeat ($$ " " |-- parse_potential_integer)
-    >> map_filter I) -- Scan.optional parse_annotation []
-     >> uncurry (union (op =))
-   || $$ "(" |-- parse_annotations --| $$ ")"
-   || $$ "[" |-- parse_annotations --| $$ "]") x
-and parse_annotations x =
-  (Scan.optional (parse_annotation
-                  ::: Scan.repeat ($$ "," |-- parse_annotation)) []
-   >> (fn numss => fold (union (op =)) numss [])) x
-
-(* Vampire proof lines sometimes contain needless information such as "(0:3)",
-   which can be hard to disambiguate from function application in an LL(1)
-   parser. As a workaround, we extend the TPTP term syntax with such detritus
-   and ignore it. *)
-fun parse_vampire_detritus x =
-  (scan_integer |-- $$ ":" --| scan_integer >> K []) x
-
-fun parse_term pool x =
-  ((scan_dollar_name >> repair_name pool)
-    -- Scan.optional ($$ "(" |-- (parse_vampire_detritus || parse_terms pool)
-                      --| $$ ")") []
-    --| Scan.optional ($$ "(" |-- parse_vampire_detritus --| $$ ")") []
-   >> ATerm) x
-and parse_terms pool x =
-  (parse_term pool ::: Scan.repeat ($$ "," |-- parse_term pool)) x
-
-fun parse_atom pool =
-  parse_term pool -- Scan.option (Scan.option ($$ "!") --| $$ "="
-                                  -- parse_term pool)
-  >> (fn (u1, NONE) => AAtom u1
-       | (u1, SOME (NONE, u2)) => AAtom (ATerm ("c_equal", [u1, u2]))
-       | (u1, SOME (SOME _, u2)) =>
-         mk_anot (AAtom (ATerm ("c_equal", [u1, u2]))))
-
-fun fo_term_head (ATerm (s, _)) = s
-
-(* TPTP formulas are fully parenthesized, so we don't need to worry about
-   operator precedence. *)
-fun parse_formula pool x =
-  (($$ "(" |-- parse_formula pool --| $$ ")"
-    || ($$ "!" >> K AForall || $$ "?" >> K AExists)
-       --| $$ "[" -- parse_terms pool --| $$ "]" --| $$ ":"
-       -- parse_formula pool
-       >> (fn ((q, ts), phi) => AQuant (q, map fo_term_head ts, phi))
-    || $$ "~" |-- parse_formula pool >> mk_anot
-    || parse_atom pool)
-   -- Scan.option ((Scan.this_string "=>" >> K AImplies
-                    || Scan.this_string "<=>" >> K AIff
-                    || Scan.this_string "<~>" >> K ANotIff
-                    || Scan.this_string "<=" >> K AIf
-                    || $$ "|" >> K AOr || $$ "&" >> K AAnd)
-                   -- parse_formula pool)
-   >> (fn (phi1, NONE) => phi1
-        | (phi1, SOME (c, phi2)) => mk_aconn c (phi1, phi2))) x
-
-val parse_tstp_extra_arguments =
-  Scan.optional ($$ "," |-- parse_annotation
-                 --| Scan.option ($$ "," |-- parse_annotations)) []
-
-(* Syntax: (fof|cnf)\(<num>, <formula_role>, <formula> <extra_arguments>\).
-   The <num> could be an identifier, but we assume integers. *)
- fun parse_tstp_line pool =
-   ((Scan.this_string "fof" || Scan.this_string "cnf") -- $$ "(")
-     |-- scan_integer --| $$ "," -- Symbol.scan_id --| $$ ","
-     -- parse_formula pool -- parse_tstp_extra_arguments --| $$ ")" --| $$ "."
-    >> (fn (((num, role), phi), deps) =>
-           case role of
-             "definition" =>
-             (case phi of
-                AConn (AIff, [phi1 as AAtom _, phi2]) =>
-                Definition (num, phi1, phi2)
-              | AAtom (ATerm ("c_equal", _)) =>
-                Inference (num, phi, deps) (* Vampire's equality proxy axiom *)
-              | _ => raise Fail "malformed definition")
-           | _ => Inference (num, phi, deps))
-
-(**** PARSING OF VAMPIRE OUTPUT ****)
-
-(* Syntax: <num>. <formula> <annotation> *)
-fun parse_vampire_line pool =
-  scan_integer --| $$ "." -- parse_formula pool -- parse_annotation
-  >> (fn ((num, phi), deps) => Inference (num, phi, deps))
-
-(**** PARSING OF SPASS OUTPUT ****)
-
-(* SPASS returns clause references of the form "x.y". We ignore "y", whose role
-   is not clear anyway. *)
-val parse_dot_name = scan_integer --| $$ "." --| scan_integer
-
-val parse_spass_annotations =
-  Scan.optional ($$ ":" |-- Scan.repeat (parse_dot_name
-                                         --| Scan.option ($$ ","))) []
-
-(* It is not clear why some literals are followed by sequences of stars and/or
-   pluses. We ignore them. *)
-fun parse_decorated_atom pool =
-  parse_atom pool --| Scan.repeat ($$ "*" || $$ "+" || $$ " ")
-
-fun mk_horn ([], []) = AAtom (ATerm ("c_False", []))
-  | mk_horn ([], pos_lits) = foldr1 (mk_aconn AOr) pos_lits
-  | mk_horn (neg_lits, []) = mk_anot (foldr1 (mk_aconn AAnd) neg_lits)
-  | mk_horn (neg_lits, pos_lits) =
-    mk_aconn AImplies (foldr1 (mk_aconn AAnd) neg_lits,
-                       foldr1 (mk_aconn AOr) pos_lits)
-
-fun parse_horn_clause pool =
-  Scan.repeat (parse_decorated_atom pool) --| $$ "|" --| $$ "|"
-    -- Scan.repeat (parse_decorated_atom pool) --| $$ "-" --| $$ ">"
-    -- Scan.repeat (parse_decorated_atom pool)
-  >> (mk_horn o apfst (op @))
-
-(* Syntax: <num>[0:<inference><annotations>]
-   <atoms> || <atoms> -> <atoms>. *)
-fun parse_spass_line pool =
-  scan_integer --| $$ "[" --| $$ "0" --| $$ ":" --| Symbol.scan_id
-    -- parse_spass_annotations --| $$ "]" -- parse_horn_clause pool --| $$ "."
-  >> (fn ((num, deps), u) => Inference (num, u, deps))
-
-fun parse_line pool =
-  parse_tstp_line pool || parse_vampire_line pool || parse_spass_line pool
-fun parse_lines pool = Scan.repeat1 (parse_line pool)
-fun parse_proof pool =
-  fst o Scan.finite Symbol.stopper
-            (Scan.error (!! (fn _ => raise Fail "unrecognized ATP output")
-                            (parse_lines pool)))
-  o explode o strip_spaces_except_between_ident_chars
-
-(**** INTERPRETATION OF TSTP SYNTAX TREES ****)
-
-exception FO_TERM of string fo_term list
-exception FORMULA of (string, string fo_term) formula list
-exception SAME of unit
-
-(* Type variables are given the basic sort "HOL.type". Some will later be
-   constrained by information from type literals, or by type inference. *)
-fun type_from_fo_term tfrees (u as ATerm (a, us)) =
-  let val Ts = map (type_from_fo_term tfrees) us in
-    case strip_prefix_and_unascii type_const_prefix a of
-      SOME b => Type (invert_const b, Ts)
-    | NONE =>
-      if not (null us) then
-        raise FO_TERM [u]  (* only "tconst"s have type arguments *)
-      else case strip_prefix_and_unascii tfree_prefix a of
-        SOME b =>
-        let val s = "'" ^ b in
-          TFree (s, AList.lookup (op =) tfrees s |> the_default HOLogic.typeS)
-        end
-      | NONE =>
-        case strip_prefix_and_unascii tvar_prefix a of
-          SOME b => TVar (("'" ^ b, 0), HOLogic.typeS)
-        | NONE =>
-          (* Variable from the ATP, say "X1" *)
-          Type_Infer.param 0 (a, HOLogic.typeS)
-  end
-
-(* Type class literal applied to a type. Returns triple of polarity, class,
-   type. *)
-fun type_constraint_from_term pos tfrees (u as ATerm (a, us)) =
-  case (strip_prefix_and_unascii class_prefix a,
-        map (type_from_fo_term tfrees) us) of
-    (SOME b, [T]) => (pos, b, T)
-  | _ => raise FO_TERM [u]
-
-(** Accumulate type constraints in a formula: negative type literals **)
-fun add_var (key, z)  = Vartab.map_default (key, []) (cons z)
-fun add_type_constraint (false, cl, TFree (a ,_)) = add_var ((a, ~1), cl)
-  | add_type_constraint (false, cl, TVar (ix, _)) = add_var (ix, cl)
-  | add_type_constraint _ = I
-
-fun repair_atp_variable_name f s =
-  let
-    fun subscript_name s n = s ^ nat_subscript n
-    val s = String.map f s
-  in
-    case space_explode "_" s of
-      [_] => (case take_suffix Char.isDigit (String.explode s) of
-                (cs1 as _ :: _, cs2 as _ :: _) =>
-                subscript_name (String.implode cs1)
-                               (the (Int.fromString (String.implode cs2)))
-              | (_, _) => s)
-    | [s1, s2] => (case Int.fromString s2 of
-                     SOME n => subscript_name s1 n
-                   | NONE => s)
-    | _ => s
-  end
-
-(* First-order translation. No types are known for variables. "HOLogic.typeT"
-   should allow them to be inferred. *)
-fun raw_term_from_pred thy full_types tfrees =
-  let
-    fun aux opt_T extra_us u =
-      case u of
-        ATerm ("hBOOL", [u1]) => aux (SOME @{typ bool}) [] u1
-      | ATerm ("hAPP", [u1, u2]) => aux opt_T (u2 :: extra_us) u1
-      | ATerm (a, us) =>
-        if a = type_wrapper_name then
-          case us of
-            [typ_u, term_u] =>
-            aux (SOME (type_from_fo_term tfrees typ_u)) extra_us term_u
-          | _ => raise FO_TERM us
-        else case strip_prefix_and_unascii const_prefix a of
-          SOME "equal" =>
-          list_comb (Const (@{const_name HOL.eq}, HOLogic.typeT),
-                     map (aux NONE []) us)
-        | SOME b =>
-          let
-            val c = invert_const b
-            val num_type_args = num_type_args thy c
-            val (type_us, term_us) =
-              chop (if full_types then 0 else num_type_args) us
-            (* Extra args from "hAPP" come after any arguments given directly to
-               the constant. *)
-            val term_ts = map (aux NONE []) term_us
-            val extra_ts = map (aux NONE []) extra_us
-            val t =
-              Const (c, if full_types then
-                          case opt_T of
-                            SOME T => map fastype_of term_ts ---> T
-                          | NONE =>
-                            if num_type_args = 0 then
-                              Sign.const_instance thy (c, [])
-                            else
-                              raise Fail ("no type information for " ^ quote c)
-                        else
-                          Sign.const_instance thy (c,
-                              map (type_from_fo_term tfrees) type_us))
-          in list_comb (t, term_ts @ extra_ts) end
-        | NONE => (* a free or schematic variable *)
-          let
-            val ts = map (aux NONE []) (us @ extra_us)
-            val T = map fastype_of ts ---> HOLogic.typeT
-            val t =
-              case strip_prefix_and_unascii fixed_var_prefix a of
-                SOME b => Free (b, T)
-              | NONE =>
-                case strip_prefix_and_unascii schematic_var_prefix a of
-                  SOME b => Var ((b, 0), T)
-                | NONE =>
-                  if is_tptp_variable a then
-                    Var ((repair_atp_variable_name Char.toLower a, 0), T)
-                  else
-                    (* Skolem constants? *)
-                    Var ((repair_atp_variable_name Char.toUpper a, 0), T)
-          in list_comb (t, ts) end
-  in aux (SOME HOLogic.boolT) [] end
-
-fun term_from_pred thy full_types tfrees pos (u as ATerm (s, _)) =
-  if String.isPrefix class_prefix s then
-    add_type_constraint (type_constraint_from_term pos tfrees u)
-    #> pair @{const True}
-  else
-    pair (raw_term_from_pred thy full_types tfrees u)
-
-val combinator_table =
-  [(@{const_name COMBI}, @{thm COMBI_def_raw}),
-   (@{const_name COMBK}, @{thm COMBK_def_raw}),
-   (@{const_name COMBB}, @{thm COMBB_def_raw}),
-   (@{const_name COMBC}, @{thm COMBC_def_raw}),
-   (@{const_name COMBS}, @{thm COMBS_def_raw})]
-
-fun uncombine_term (t1 $ t2) = betapply (pairself uncombine_term (t1, t2))
-  | uncombine_term (Abs (s, T, t')) = Abs (s, T, uncombine_term t')
-  | uncombine_term (t as Const (x as (s, _))) =
-    (case AList.lookup (op =) combinator_table s of
-       SOME thm => thm |> prop_of |> specialize_type @{theory} x |> Logic.dest_equals |> snd
-     | NONE => t)
-  | uncombine_term t = t
-
-(* Update schematic type variables with detected sort constraints. It's not
-   totally clear when this code is necessary. *)
-fun repair_tvar_sorts (t, tvar_tab) =
-  let
-    fun do_type (Type (a, Ts)) = Type (a, map do_type Ts)
-      | do_type (TVar (xi, s)) =
-        TVar (xi, the_default s (Vartab.lookup tvar_tab xi))
-      | do_type (TFree z) = TFree z
-    fun do_term (Const (a, T)) = Const (a, do_type T)
-      | do_term (Free (a, T)) = Free (a, do_type T)
-      | do_term (Var (xi, T)) = Var (xi, do_type T)
-      | do_term (t as Bound _) = t
-      | do_term (Abs (a, T, t)) = Abs (a, do_type T, do_term t)
-      | do_term (t1 $ t2) = do_term t1 $ do_term t2
-  in t |> not (Vartab.is_empty tvar_tab) ? do_term end
-
-fun quantify_over_free quant_s free_s body_t =
-  case Term.add_frees body_t [] |> filter (curry (op =) free_s o fst) of
-    [] => body_t
-  | frees as (_, free_T) :: _ =>
-    Abs (free_s, free_T, fold (curry abstract_over) (map Free frees) body_t)
-
-(* Interpret an ATP formula as a HOL term, extracting sort constraints as they
-   appear in the formula. *)
-fun prop_from_formula thy full_types tfrees phi =
-  let
-    fun do_formula pos phi =
-      case phi of
-        AQuant (_, [], phi) => do_formula pos phi
-      | AQuant (q, x :: xs, phi') =>
-        do_formula pos (AQuant (q, xs, phi'))
-        #>> quantify_over_free (case q of
-                                  AForall => @{const_name All}
-                                | AExists => @{const_name Ex})
-                               (repair_atp_variable_name Char.toLower x)
-      | AConn (ANot, [phi']) => do_formula (not pos) phi' #>> s_not
-      | AConn (c, [phi1, phi2]) =>
-        do_formula (pos |> c = AImplies ? not) phi1
-        ##>> do_formula pos phi2
-        #>> (case c of
-               AAnd => s_conj
-             | AOr => s_disj
-             | AImplies => s_imp
-             | AIf => s_imp o swap
-             | AIff => s_iff
-             | ANotIff => s_not o s_iff)
-      | AAtom tm => term_from_pred thy full_types tfrees pos tm
-      | _ => raise FORMULA [phi]
-  in repair_tvar_sorts (do_formula true phi Vartab.empty) end
-
-fun check_formula ctxt =
-  Type_Infer.constrain HOLogic.boolT
-  #> Syntax.check_term (ProofContext.set_mode ProofContext.mode_schematic ctxt)
-
-
-(**** Translation of TSTP files to Isar Proofs ****)
-
-fun unvarify_term (Var ((s, 0), T)) = Free (s, T)
-  | unvarify_term t = raise TERM ("unvarify_term: non-Var", [t])
-
-fun decode_line full_types tfrees (Definition (num, phi1, phi2)) ctxt =
-    let
-      val thy = ProofContext.theory_of ctxt
-      val t1 = prop_from_formula thy full_types tfrees phi1
-      val vars = snd (strip_comb t1)
-      val frees = map unvarify_term vars
-      val unvarify_args = subst_atomic (vars ~~ frees)
-      val t2 = prop_from_formula thy full_types tfrees phi2
-      val (t1, t2) =
-        HOLogic.eq_const HOLogic.typeT $ t1 $ t2
-        |> unvarify_args |> uncombine_term |> check_formula ctxt
-        |> HOLogic.dest_eq
-    in
-      (Definition (num, t1, t2),
-       fold Variable.declare_term (maps OldTerm.term_frees [t1, t2]) ctxt)
-    end
-  | decode_line full_types tfrees (Inference (num, u, deps)) ctxt =
-    let
-      val thy = ProofContext.theory_of ctxt
-      val t = u |> prop_from_formula thy full_types tfrees
-                |> uncombine_term |> check_formula ctxt
-    in
-      (Inference (num, t, deps),
-       fold Variable.declare_term (OldTerm.term_frees t) ctxt)
-    end
-fun decode_lines ctxt full_types tfrees lines =
-  fst (fold_map (decode_line full_types tfrees) lines ctxt)
-
-fun is_same_inference _ (Definition _) = false
-  | is_same_inference t (Inference (_, t', _)) = t aconv t'
-
-(* No "real" literals means only type information (tfree_tcs, clsrel, or
-   clsarity). *)
-val is_only_type_information = curry (op aconv) HOLogic.true_const
-
-fun replace_one_dep (old, new) dep = if dep = old then new else [dep]
-fun replace_deps_in_line _ (line as Definition _) = line
-  | replace_deps_in_line p (Inference (num, t, deps)) =
-    Inference (num, t, fold (union (op =) o replace_one_dep p) deps [])
-
-(* Discard axioms; consolidate adjacent lines that prove the same formula, since
-   they differ only in type information.*)
-fun add_line _ _ (line as Definition _) lines = line :: lines
-  | add_line conjecture_shape axiom_names (Inference (num, t, [])) lines =
-    (* No dependencies: axiom, conjecture, or (for Vampire) internal axioms or
-       definitions. *)
-    if is_axiom_number axiom_names num then
-      (* Axioms are not proof lines. *)
-      if is_only_type_information t then
-        map (replace_deps_in_line (num, [])) lines
-      (* Is there a repetition? If so, replace later line by earlier one. *)
-      else case take_prefix (not o is_same_inference t) lines of
-        (_, []) => lines (*no repetition of proof line*)
-      | (pre, Inference (num', _, _) :: post) =>
-        pre @ map (replace_deps_in_line (num', [num])) post
-    else if is_conjecture_number conjecture_shape num then
-      Inference (num, negate_term t, []) :: lines
-    else
-      map (replace_deps_in_line (num, [])) lines
-  | add_line _ _ (Inference (num, t, deps)) lines =
-    (* Type information will be deleted later; skip repetition test. *)
-    if is_only_type_information t then
-      Inference (num, t, deps) :: lines
-    (* Is there a repetition? If so, replace later line by earlier one. *)
-    else case take_prefix (not o is_same_inference t) lines of
-      (* FIXME: Doesn't this code risk conflating proofs involving different
-         types? *)
-       (_, []) => Inference (num, t, deps) :: lines
-     | (pre, Inference (num', t', _) :: post) =>
-       Inference (num, t', deps) ::
-       pre @ map (replace_deps_in_line (num', [num])) post
-
-(* Recursively delete empty lines (type information) from the proof. *)
-fun add_nontrivial_line (Inference (num, t, [])) lines =
-    if is_only_type_information t then delete_dep num lines
-    else Inference (num, t, []) :: lines
-  | add_nontrivial_line line lines = line :: lines
-and delete_dep num lines =
-  fold_rev add_nontrivial_line (map (replace_deps_in_line (num, [])) lines) []
-
-(* ATPs sometimes reuse free variable names in the strangest ways. Removing
-   offending lines often does the trick. *)
-fun is_bad_free frees (Free x) = not (member (op =) frees x)
-  | is_bad_free _ _ = false
-
-(* Vampire is keen on producing these. *)
-fun is_trivial_formula (@{const Not} $ (Const (@{const_name HOL.eq}, _)
-                                        $ t1 $ t2)) = (t1 aconv t2)
-  | is_trivial_formula _ = false
-
-fun add_desired_line _ _ _ _ (line as Definition (num, _, _)) (j, lines) =
-    (j, line :: map (replace_deps_in_line (num, [])) lines)
-  | add_desired_line isar_shrink_factor conjecture_shape axiom_names frees
-                     (Inference (num, t, deps)) (j, lines) =
-    (j + 1,
-     if is_axiom_number axiom_names num orelse
-        is_conjecture_number conjecture_shape num orelse
-        (not (is_only_type_information t) andalso
-         null (Term.add_tvars t []) andalso
-         not (exists_subterm (is_bad_free frees) t) andalso
-         not (is_trivial_formula t) andalso
-         (null lines orelse (* last line must be kept *)
-          (length deps >= 2 andalso j mod isar_shrink_factor = 0))) then
-       Inference (num, t, deps) :: lines  (* keep line *)
-     else
-       map (replace_deps_in_line (num, deps)) lines)  (* drop line *)
-
-(** EXTRACTING LEMMAS **)
-
-(* Like "split_line", but ignores "\n" that follow a comma (as in SNARK's
-   output). *)
-val split_proof_lines =
-  let
-    fun aux [] [] = []
-      | aux line [] = [implode (rev line)]
-      | aux line ("," :: "\n" :: rest) = aux ("," :: line) rest
-      | aux line ("\n" :: rest) = aux line [] @ aux [] rest
-      | aux line (s :: rest) = aux (s :: line) rest
-  in aux [] o explode end
-
-(* A list consisting of the first number in each line is returned. For TSTP,
-   interesting lines have the form "fof(108, axiom, ...)", where the number
-   (108) is extracted. For SPASS, lines have the form "108[0:Inp] ...", where
-   the first number (108) is extracted. For Vampire, we look for
-   "108. ... [input]". *)
-fun used_facts_in_atp_proof axiom_names atp_proof =
-  let
-    fun axiom_names_at_index num =
-      let val j = Int.fromString num |> the_default ~1 in
-        if is_axiom_number axiom_names j then Vector.sub (axiom_names, j - 1)
-        else []
-      end
-    val tokens_of =
-      String.tokens (fn c => not (Char.isAlphaNum c) andalso c <> #"_")
-    fun do_line (tag :: num :: "axiom" :: (rest as _ :: _)) =
-        if tag = "cnf" orelse tag = "fof" then
-          (case strip_prefix_and_unascii axiom_prefix (List.last rest) of
-             SOME name =>
-             if member (op =) rest "file" then
-               ([(name, name |> find_first_in_list_vector axiom_names |> the)]
-                handle Option.Option =>
-                       error ("No such fact: " ^ quote name ^ "."))
-             else
-               axiom_names_at_index num
-           | NONE => axiom_names_at_index num)
-        else
-          []
-      | do_line (num :: "0" :: "Inp" :: _) = axiom_names_at_index num
-      | do_line (num :: rest) =
-        (case List.last rest of "input" => axiom_names_at_index num | _ => [])
-      | do_line _ = []
-  in atp_proof |> split_proof_lines |> maps (do_line o tokens_of) end
-
-val indent_size = 2
-val no_label = ("", ~1)
-
-val raw_prefix = "X"
-val assum_prefix = "A"
-val fact_prefix = "F"
-
-fun string_for_label (s, num) = s ^ string_of_int num
-
-fun metis_using [] = ""
-  | metis_using ls =
-    "using " ^ space_implode " " (map string_for_label ls) ^ " "
-fun metis_apply _ 1 = "by "
-  | metis_apply 1 _ = "apply "
-  | metis_apply i _ = "prefer " ^ string_of_int i ^ " apply "
-fun metis_name full_types = if full_types then "metisFT" else "metis"
-fun metis_call full_types [] = metis_name full_types
-  | metis_call full_types ss =
-    "(" ^ metis_name full_types ^ " " ^ space_implode " " ss ^ ")"
-fun metis_command full_types i n (ls, ss) =
-  metis_using ls ^ metis_apply i n ^ metis_call full_types ss
-fun metis_line full_types i n ss =
-  "Try this command: " ^
-  Markup.markup Markup.sendback (metis_command full_types i n ([], ss)) ^ "."
-fun minimize_line _ [] = ""
-  | minimize_line minimize_command ss =
-    case minimize_command ss of
-      "" => ""
-    | command =>
-      "\nTo minimize the number of lemmas, try this: " ^
-      Markup.markup Markup.sendback command ^ "."
-
-fun used_facts axiom_names =
-  used_facts_in_atp_proof axiom_names
-  #> List.partition (curry (op =) Chained o snd)
-  #> pairself (sort_distinct (string_ord o pairself fst))
-
-fun metis_proof_text (full_types, minimize_command, atp_proof, axiom_names,
-                      goal, i) =
-  let
-    val (chained_lemmas, other_lemmas) = used_facts axiom_names atp_proof
-    val n = Logic.count_prems (prop_of goal)
-  in
-    (metis_line full_types i n (map fst other_lemmas) ^
-     minimize_line minimize_command (map fst (other_lemmas @ chained_lemmas)),
-     other_lemmas @ chained_lemmas)
-  end
-
-(** Isar proof construction and manipulation **)
-
-fun merge_fact_sets (ls1, ss1) (ls2, ss2) =
-  (union (op =) ls1 ls2, union (op =) ss1 ss2)
-
-type label = string * int
-type facts = label list * string list
-
-datatype qualifier = Show | Then | Moreover | Ultimately
-
-datatype step =
-  Fix of (string * typ) list |
-  Let of term * term |
-  Assume of label * term |
-  Have of qualifier list * label * term * byline
-and byline =
-  ByMetis of facts |
-  CaseSplit of step list list * facts
-
-fun smart_case_split [] facts = ByMetis facts
-  | smart_case_split proofs facts = CaseSplit (proofs, facts)
-
-fun add_fact_from_dep axiom_names num =
-  if is_axiom_number axiom_names num then
-    apsnd (union (op =) (map fst (Vector.sub (axiom_names, num - 1))))
-  else
-    apfst (insert (op =) (raw_prefix, num))
-
-fun forall_of v t = HOLogic.all_const (fastype_of v) $ lambda v t
-fun forall_vars t = fold_rev forall_of (map Var (Term.add_vars t [])) t
-
-fun step_for_line _ _ (Definition (_, t1, t2)) = Let (t1, t2)
-  | step_for_line _ _ (Inference (num, t, [])) = Assume ((raw_prefix, num), t)
-  | step_for_line axiom_names j (Inference (num, t, deps)) =
-    Have (if j = 1 then [Show] else [], (raw_prefix, num),
-          forall_vars t,
-          ByMetis (fold (add_fact_from_dep axiom_names) deps ([], [])))
-
-fun proof_from_atp_proof pool ctxt full_types tfrees isar_shrink_factor
-                         atp_proof conjecture_shape axiom_names params frees =
-  let
-    val lines =
-      atp_proof ^ "$" (* the $ sign acts as a sentinel (FIXME: needed?) *)
-      |> parse_proof pool
-      |> sort (int_ord o pairself raw_step_number)
-      |> decode_lines ctxt full_types tfrees
-      |> rpair [] |-> fold_rev (add_line conjecture_shape axiom_names)
-      |> rpair [] |-> fold_rev add_nontrivial_line
-      |> rpair (0, []) |-> fold_rev (add_desired_line isar_shrink_factor
-                                             conjecture_shape axiom_names frees)
-      |> snd
-  in
-    (if null params then [] else [Fix params]) @
-    map2 (step_for_line axiom_names) (length lines downto 1) lines
-  end
-
-(* When redirecting proofs, we keep information about the labels seen so far in
-   the "backpatches" data structure. The first component indicates which facts
-   should be associated with forthcoming proof steps. The second component is a
-   pair ("assum_ls", "drop_ls"), where "assum_ls" are the labels that should
-   become assumptions and "drop_ls" are the labels that should be dropped in a
-   case split. *)
-type backpatches = (label * facts) list * (label list * label list)
-
-fun used_labels_of_step (Have (_, _, _, by)) =
-    (case by of
-       ByMetis (ls, _) => ls
-     | CaseSplit (proofs, (ls, _)) =>
-       fold (union (op =) o used_labels_of) proofs ls)
-  | used_labels_of_step _ = []
-and used_labels_of proof = fold (union (op =) o used_labels_of_step) proof []
-
-fun new_labels_of_step (Fix _) = []
-  | new_labels_of_step (Let _) = []
-  | new_labels_of_step (Assume (l, _)) = [l]
-  | new_labels_of_step (Have (_, l, _, _)) = [l]
-val new_labels_of = maps new_labels_of_step
-
-val join_proofs =
-  let
-    fun aux _ [] = NONE
-      | aux proof_tail (proofs as (proof1 :: _)) =
-        if exists null proofs then
-          NONE
-        else if forall (curry (op =) (hd proof1) o hd) (tl proofs) then
-          aux (hd proof1 :: proof_tail) (map tl proofs)
-        else case hd proof1 of
-          Have ([], l, t, _) => (* FIXME: should we really ignore the "by"? *)
-          if forall (fn Have ([], l', t', _) :: _ => (l, t) = (l', t')
-                      | _ => false) (tl proofs) andalso
-             not (exists (member (op =) (maps new_labels_of proofs))
-                         (used_labels_of proof_tail)) then
-            SOME (l, t, map rev proofs, proof_tail)
-          else
-            NONE
-        | _ => NONE
-  in aux [] o map rev end
-
-fun case_split_qualifiers proofs =
-  case length proofs of
-    0 => []
-  | 1 => [Then]
-  | _ => [Ultimately]
-
-fun redirect_proof conjecture_shape hyp_ts concl_t proof =
-  let
-    (* The first pass outputs those steps that are independent of the negated
-       conjecture. The second pass flips the proof by contradiction to obtain a
-       direct proof, introducing case splits when an inference depends on
-       several facts that depend on the negated conjecture. *)
-    fun find_hyp num =
-      nth hyp_ts (index_in_shape num conjecture_shape)
-      handle Subscript =>
-             raise Fail ("Cannot find hypothesis " ^ Int.toString num)
-     val concl_ls = map (pair raw_prefix) (List.last conjecture_shape)
-     val canonicalize_labels =
-       map (fn l => if member (op =) concl_ls l then hd concl_ls else l)
-       #> distinct (op =)
-     fun first_pass ([], contra) = ([], contra)
-       | first_pass ((step as Fix _) :: proof, contra) =
-         first_pass (proof, contra) |>> cons step
-       | first_pass ((step as Let _) :: proof, contra) =
-         first_pass (proof, contra) |>> cons step
-       | first_pass ((step as Assume (l as (_, num), _)) :: proof, contra) =
-         if member (op =) concl_ls l then
-           first_pass (proof, contra ||> l = hd concl_ls ? cons step)
-         else
-           first_pass (proof, contra) |>> cons (Assume (l, find_hyp num))
-       | first_pass (Have (qs, l, t, ByMetis (ls, ss)) :: proof, contra) =
-         let
-           val ls = canonicalize_labels ls
-           val step = Have (qs, l, t, ByMetis (ls, ss))
-         in
-           if exists (member (op =) (fst contra)) ls then
-             first_pass (proof, contra |>> cons l ||> cons step)
-           else
-             first_pass (proof, contra) |>> cons step
-         end
-       | first_pass _ = raise Fail "malformed proof"
-    val (proof_top, (contra_ls, contra_proof)) =
-      first_pass (proof, (concl_ls, []))
-    val backpatch_label = the_default ([], []) oo AList.lookup (op =) o fst
-    fun backpatch_labels patches ls =
-      fold merge_fact_sets (map (backpatch_label patches) ls) ([], [])
-    fun second_pass end_qs ([], assums, patches) =
-        ([Have (end_qs, no_label, concl_t,
-                ByMetis (backpatch_labels patches (map snd assums)))], patches)
-      | second_pass end_qs (Assume (l, t) :: proof, assums, patches) =
-        second_pass end_qs (proof, (t, l) :: assums, patches)
-      | second_pass end_qs (Have (qs, l, t, ByMetis (ls, ss)) :: proof, assums,
-                            patches) =
-        if member (op =) (snd (snd patches)) l andalso
-           not (member (op =) (fst (snd patches)) l) andalso
-           not (AList.defined (op =) (fst patches) l) then
-          second_pass end_qs (proof, assums, patches ||> apsnd (append ls))
-        else
-          (case List.partition (member (op =) contra_ls) ls of
-             ([contra_l], co_ls) =>
-             if member (op =) qs Show then
-               second_pass end_qs (proof, assums,
-                                   patches |>> cons (contra_l, (co_ls, ss)))
-             else
-               second_pass end_qs
-                           (proof, assums,
-                            patches |>> cons (contra_l, (l :: co_ls, ss)))
-               |>> cons (if member (op =) (fst (snd patches)) l then
-                           Assume (l, negate_term t)
-                         else
-                           Have (qs, l, negate_term t,
-                                 ByMetis (backpatch_label patches l)))
-           | (contra_ls as _ :: _, co_ls) =>
-             let
-               val proofs =
-                 map_filter
-                     (fn l =>
-                         if member (op =) concl_ls l then
-                           NONE
-                         else
-                           let
-                             val drop_ls = filter (curry (op <>) l) contra_ls
-                           in
-                             second_pass []
-                                 (proof, assums,
-                                  patches ||> apfst (insert (op =) l)
-                                          ||> apsnd (union (op =) drop_ls))
-                             |> fst |> SOME
-                           end) contra_ls
-               val (assumes, facts) =
-                 if member (op =) (fst (snd patches)) l then
-                   ([Assume (l, negate_term t)], (l :: co_ls, ss))
-                 else
-                   ([], (co_ls, ss))
-             in
-               (case join_proofs proofs of
-                  SOME (l, t, proofs, proof_tail) =>
-                  Have (case_split_qualifiers proofs @
-                        (if null proof_tail then end_qs else []), l, t,
-                        smart_case_split proofs facts) :: proof_tail
-                | NONE =>
-                  [Have (case_split_qualifiers proofs @ end_qs, no_label,
-                         concl_t, smart_case_split proofs facts)],
-                patches)
-               |>> append assumes
-             end
-           | _ => raise Fail "malformed proof")
-       | second_pass _ _ = raise Fail "malformed proof"
-    val proof_bottom =
-      second_pass [Show] (contra_proof, [], ([], ([], []))) |> fst
-  in proof_top @ proof_bottom end
-
-(* FIXME: Still needed? Probably not. *)
-val kill_duplicate_assumptions_in_proof =
-  let
-    fun relabel_facts subst =
-      apfst (map (fn l => AList.lookup (op =) subst l |> the_default l))
-    fun do_step (step as Assume (l, t)) (proof, subst, assums) =
-        (case AList.lookup (op aconv) assums t of
-           SOME l' => (proof, (l, l') :: subst, assums)
-         | NONE => (step :: proof, subst, (t, l) :: assums))
-      | do_step (Have (qs, l, t, by)) (proof, subst, assums) =
-        (Have (qs, l, t,
-               case by of
-                 ByMetis facts => ByMetis (relabel_facts subst facts)
-               | CaseSplit (proofs, facts) =>
-                 CaseSplit (map do_proof proofs, relabel_facts subst facts)) ::
-         proof, subst, assums)
-      | do_step step (proof, subst, assums) = (step :: proof, subst, assums)
-    and do_proof proof = fold do_step proof ([], [], []) |> #1 |> rev
-  in do_proof end
-
-val then_chain_proof =
-  let
-    fun aux _ [] = []
-      | aux _ ((step as Assume (l, _)) :: proof) = step :: aux l proof
-      | aux l' (Have (qs, l, t, by) :: proof) =
-        (case by of
-           ByMetis (ls, ss) =>
-           Have (if member (op =) ls l' then
-                   (Then :: qs, l, t,
-                    ByMetis (filter_out (curry (op =) l') ls, ss))
-                 else
-                   (qs, l, t, ByMetis (ls, ss)))
-         | CaseSplit (proofs, facts) =>
-           Have (qs, l, t, CaseSplit (map (aux no_label) proofs, facts))) ::
-        aux l proof
-      | aux _ (step :: proof) = step :: aux no_label proof
-  in aux no_label end
-
-fun kill_useless_labels_in_proof proof =
-  let
-    val used_ls = used_labels_of proof
-    fun do_label l = if member (op =) used_ls l then l else no_label
-    fun do_step (Assume (l, t)) = Assume (do_label l, t)
-      | do_step (Have (qs, l, t, by)) =
-        Have (qs, do_label l, t,
-              case by of
-                CaseSplit (proofs, facts) =>
-                CaseSplit (map (map do_step) proofs, facts)
-              | _ => by)
-      | do_step step = step
-  in map do_step proof end
-
-fun prefix_for_depth n = replicate_string (n + 1)
-
-val relabel_proof =
-  let
-    fun aux _ _ _ [] = []
-      | aux subst depth (next_assum, next_fact) (Assume (l, t) :: proof) =
-        if l = no_label then
-          Assume (l, t) :: aux subst depth (next_assum, next_fact) proof
-        else
-          let val l' = (prefix_for_depth depth assum_prefix, next_assum) in
-            Assume (l', t) ::
-            aux ((l, l') :: subst) depth (next_assum + 1, next_fact) proof
-          end
-      | aux subst depth (next_assum, next_fact) (Have (qs, l, t, by) :: proof) =
-        let
-          val (l', subst, next_fact) =
-            if l = no_label then
-              (l, subst, next_fact)
-            else
-              let
-                val l' = (prefix_for_depth depth fact_prefix, next_fact)
-              in (l', (l, l') :: subst, next_fact + 1) end
-          val relabel_facts =
-            apfst (map (fn l =>
-                           case AList.lookup (op =) subst l of
-                             SOME l' => l'
-                           | NONE => raise Fail ("unknown label " ^
-                                                 quote (string_for_label l))))
-          val by =
-            case by of
-              ByMetis facts => ByMetis (relabel_facts facts)
-            | CaseSplit (proofs, facts) =>
-              CaseSplit (map (aux subst (depth + 1) (1, 1)) proofs,
-                         relabel_facts facts)
-        in
-          Have (qs, l', t, by) ::
-          aux subst depth (next_assum, next_fact) proof
-        end
-      | aux subst depth nextp (step :: proof) =
-        step :: aux subst depth nextp proof
-  in aux [] 0 (1, 1) end
-
-fun string_for_proof ctxt full_types i n =
-  let
-    fun fix_print_mode f x =
-      setmp_CRITICAL show_no_free_types true
-          (setmp_CRITICAL show_types true
-               (Print_Mode.setmp (filter (curry (op =) Symbol.xsymbolsN)
-                                         (print_mode_value ())) f)) x
-    fun do_indent ind = replicate_string (ind * indent_size) " "
-    fun do_free (s, T) =
-      maybe_quote s ^ " :: " ^
-      maybe_quote (fix_print_mode (Syntax.string_of_typ ctxt) T)
-    fun do_label l = if l = no_label then "" else string_for_label l ^ ": "
-    fun do_have qs =
-      (if member (op =) qs Moreover then "moreover " else "") ^
-      (if member (op =) qs Ultimately then "ultimately " else "") ^
-      (if member (op =) qs Then then
-         if member (op =) qs Show then "thus" else "hence"
-       else
-         if member (op =) qs Show then "show" else "have")
-    val do_term = maybe_quote o fix_print_mode (Syntax.string_of_term ctxt)
-    fun do_facts (ls, ss) =
-      metis_command full_types 1 1
-                    (ls |> sort_distinct (prod_ord string_ord int_ord),
-                     ss |> sort_distinct string_ord)
-    and do_step ind (Fix xs) =
-        do_indent ind ^ "fix " ^ space_implode " and " (map do_free xs) ^ "\n"
-      | do_step ind (Let (t1, t2)) =
-        do_indent ind ^ "let " ^ do_term t1 ^ " = " ^ do_term t2 ^ "\n"
-      | do_step ind (Assume (l, t)) =
-        do_indent ind ^ "assume " ^ do_label l ^ do_term t ^ "\n"
-      | do_step ind (Have (qs, l, t, ByMetis facts)) =
-        do_indent ind ^ do_have qs ^ " " ^
-        do_label l ^ do_term t ^ " " ^ do_facts facts ^ "\n"
-      | do_step ind (Have (qs, l, t, CaseSplit (proofs, facts))) =
-        space_implode (do_indent ind ^ "moreover\n")
-                      (map (do_block ind) proofs) ^
-        do_indent ind ^ do_have qs ^ " " ^ do_label l ^ do_term t ^ " " ^
-        do_facts facts ^ "\n"
-    and do_steps prefix suffix ind steps =
-      let val s = implode (map (do_step ind) steps) in
-        replicate_string (ind * indent_size - size prefix) " " ^ prefix ^
-        String.extract (s, ind * indent_size,
-                        SOME (size s - ind * indent_size - 1)) ^
-        suffix ^ "\n"
-      end
-    and do_block ind proof = do_steps "{ " " }" (ind + 1) proof
-    (* One-step proofs are pointless; better use the Metis one-liner
-       directly. *)
-    and do_proof [Have (_, _, _, ByMetis _)] = ""
-      | do_proof proof =
-        (if i <> 1 then "prefer " ^ string_of_int i ^ "\n" else "") ^
-        do_indent 0 ^ "proof -\n" ^
-        do_steps "" "" 1 proof ^
-        do_indent 0 ^ (if n <> 1 then "next" else "qed")
-  in do_proof end
-
-fun isar_proof_text (pool, debug, isar_shrink_factor, ctxt, conjecture_shape)
-                    (other_params as (full_types, _, atp_proof, axiom_names,
-                                      goal, i)) =
-  let
-    val (params, hyp_ts, concl_t) = strip_subgoal goal i
-    val frees = fold Term.add_frees (concl_t :: hyp_ts) []
-    val tfrees = fold Term.add_tfrees (concl_t :: hyp_ts) []
-    val n = Logic.count_prems (prop_of goal)
-    val (one_line_proof, lemma_names) = metis_proof_text other_params
-    fun isar_proof_for () =
-      case proof_from_atp_proof pool ctxt full_types tfrees isar_shrink_factor
-                                atp_proof conjecture_shape axiom_names params
-                                frees
-           |> redirect_proof conjecture_shape hyp_ts concl_t
-           |> kill_duplicate_assumptions_in_proof
-           |> then_chain_proof
-           |> kill_useless_labels_in_proof
-           |> relabel_proof
-           |> string_for_proof ctxt full_types i n of
-        "" => "\nNo structured proof available."
-      | proof => "\n\nStructured proof:\n" ^ Markup.markup Markup.sendback proof
-    val isar_proof =
-      if debug then
-        isar_proof_for ()
-      else
-        try isar_proof_for ()
-        |> the_default "\nWarning: The Isar proof construction failed."
-  in (one_line_proof ^ isar_proof, lemma_names) end
-
-fun proof_text isar_proof isar_params other_params =
-  (if isar_proof then isar_proof_text isar_params else metis_proof_text)
-      other_params
-
-end;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_reconstruct.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -0,0 +1,1038 @@
+(*  Title:      HOL/Tools/Sledgehammer/sledgehammer_reconstruct.ML
+    Author:     Lawrence C. Paulson, Cambridge University Computer Laboratory
+    Author:     Claire Quigley, Cambridge University Computer Laboratory
+    Author:     Jasmin Blanchette, TU Muenchen
+
+Transfer of proofs from external provers.
+*)
+
+signature SLEDGEHAMMER_RECONSTRUCT =
+sig
+  type locality = Sledgehammer_Filter.locality
+  type minimize_command = string list -> string
+  type metis_params =
+    bool * minimize_command * string * (string * locality) list vector * thm
+    * int
+  type isar_params =
+    string Symtab.table * bool * int * Proof.context * int list list
+  type text_result = string * (string * locality) list
+
+  val metis_proof_text : metis_params -> text_result
+  val isar_proof_text : isar_params -> metis_params -> text_result
+  val proof_text : bool -> isar_params -> metis_params -> text_result
+end;
+
+structure Sledgehammer_Reconstruct : SLEDGEHAMMER_RECONSTRUCT =
+struct
+
+open ATP_Problem
+open Metis_Clauses
+open Sledgehammer_Util
+open Sledgehammer_Filter
+open Sledgehammer_Translate
+
+type minimize_command = string list -> string
+type metis_params =
+  bool * minimize_command * string * (string * locality) list vector * thm * int
+type isar_params =
+  string Symtab.table * bool * int * Proof.context * int list list
+type text_result = string * (string * locality) list
+
+(* Simple simplifications to ensure that sort annotations don't leave a trail of
+   spurious "True"s. *)
+fun s_not @{const False} = @{const True}
+  | s_not @{const True} = @{const False}
+  | s_not (@{const Not} $ t) = t
+  | s_not t = @{const Not} $ t
+fun s_conj (@{const True}, t2) = t2
+  | s_conj (t1, @{const True}) = t1
+  | s_conj p = HOLogic.mk_conj p
+fun s_disj (@{const False}, t2) = t2
+  | s_disj (t1, @{const False}) = t1
+  | s_disj p = HOLogic.mk_disj p
+fun s_imp (@{const True}, t2) = t2
+  | s_imp (t1, @{const False}) = s_not t1
+  | s_imp p = HOLogic.mk_imp p
+fun s_iff (@{const True}, t2) = t2
+  | s_iff (t1, @{const True}) = t1
+  | s_iff (t1, t2) = HOLogic.eq_const HOLogic.boolT $ t1 $ t2
+
+fun mk_anot (AConn (ANot, [phi])) = phi
+  | mk_anot phi = AConn (ANot, [phi])
+fun mk_aconn c (phi1, phi2) = AConn (c, [phi1, phi2])
+
+fun index_in_shape x = find_index (exists (curry (op =) x))
+fun is_axiom_number axiom_names num =
+  num > 0 andalso num <= Vector.length axiom_names andalso
+  not (null (Vector.sub (axiom_names, num - 1)))
+fun is_conjecture_number conjecture_shape num =
+  index_in_shape num conjecture_shape >= 0
+
+fun negate_term (Const (@{const_name All}, T) $ Abs (s, T', t')) =
+    Const (@{const_name Ex}, T) $ Abs (s, T', negate_term t')
+  | negate_term (Const (@{const_name Ex}, T) $ Abs (s, T', t')) =
+    Const (@{const_name All}, T) $ Abs (s, T', negate_term t')
+  | negate_term (@{const HOL.implies} $ t1 $ t2) =
+    @{const HOL.conj} $ t1 $ negate_term t2
+  | negate_term (@{const HOL.conj} $ t1 $ t2) =
+    @{const HOL.disj} $ negate_term t1 $ negate_term t2
+  | negate_term (@{const HOL.disj} $ t1 $ t2) =
+    @{const HOL.conj} $ negate_term t1 $ negate_term t2
+  | negate_term (@{const Not} $ t) = t
+  | negate_term t = @{const Not} $ t
+
+datatype ('a, 'b, 'c, 'd, 'e) raw_step =
+  Definition of 'a * 'b * 'c |
+  Inference of 'a * 'd * 'e list
+
+fun raw_step_number (Definition (num, _, _)) = num
+  | raw_step_number (Inference (num, _, _)) = num
+
+(**** PARSING OF TSTP FORMAT ****)
+
+(*Strings enclosed in single quotes, e.g. filenames*)
+val scan_quoted = $$ "'" |-- Scan.repeat (~$$ "'") --| $$ "'" >> implode;
+
+val scan_dollar_name =
+  Scan.repeat ($$ "$") -- Symbol.scan_id >> (fn (ss, s) => implode ss ^ s)
+
+fun repair_name _ "$true" = "c_True"
+  | repair_name _ "$false" = "c_False"
+  | repair_name _ "$$e" = "c_equal" (* seen in Vampire proofs *)
+  | repair_name _ "equal" = "c_equal" (* needed by SPASS? *)
+  | repair_name pool s =
+    case Symtab.lookup pool s of
+      SOME s' => s'
+    | NONE =>
+      if String.isPrefix "sQ" s andalso String.isSuffix "_eqProxy" s then
+        "c_equal" (* seen in Vampire proofs *)
+      else
+        s
+(* Generalized first-order terms, which include file names, numbers, etc. *)
+val parse_potential_integer =
+  (scan_dollar_name || scan_quoted) >> K NONE
+  || scan_integer >> SOME
+fun parse_annotation x =
+  ((parse_potential_integer ::: Scan.repeat ($$ " " |-- parse_potential_integer)
+    >> map_filter I) -- Scan.optional parse_annotation []
+     >> uncurry (union (op =))
+   || $$ "(" |-- parse_annotations --| $$ ")"
+   || $$ "[" |-- parse_annotations --| $$ "]") x
+and parse_annotations x =
+  (Scan.optional (parse_annotation
+                  ::: Scan.repeat ($$ "," |-- parse_annotation)) []
+   >> (fn numss => fold (union (op =)) numss [])) x
+
+(* Vampire proof lines sometimes contain needless information such as "(0:3)",
+   which can be hard to disambiguate from function application in an LL(1)
+   parser. As a workaround, we extend the TPTP term syntax with such detritus
+   and ignore it. *)
+fun parse_vampire_detritus x =
+  (scan_integer |-- $$ ":" --| scan_integer >> K []) x
+
+fun parse_term pool x =
+  ((scan_dollar_name >> repair_name pool)
+    -- Scan.optional ($$ "(" |-- (parse_vampire_detritus || parse_terms pool)
+                      --| $$ ")") []
+    --| Scan.optional ($$ "(" |-- parse_vampire_detritus --| $$ ")") []
+   >> ATerm) x
+and parse_terms pool x =
+  (parse_term pool ::: Scan.repeat ($$ "," |-- parse_term pool)) x
+
+fun parse_atom pool =
+  parse_term pool -- Scan.option (Scan.option ($$ "!") --| $$ "="
+                                  -- parse_term pool)
+  >> (fn (u1, NONE) => AAtom u1
+       | (u1, SOME (NONE, u2)) => AAtom (ATerm ("c_equal", [u1, u2]))
+       | (u1, SOME (SOME _, u2)) =>
+         mk_anot (AAtom (ATerm ("c_equal", [u1, u2]))))
+
+fun fo_term_head (ATerm (s, _)) = s
+
+(* TPTP formulas are fully parenthesized, so we don't need to worry about
+   operator precedence. *)
+fun parse_formula pool x =
+  (($$ "(" |-- parse_formula pool --| $$ ")"
+    || ($$ "!" >> K AForall || $$ "?" >> K AExists)
+       --| $$ "[" -- parse_terms pool --| $$ "]" --| $$ ":"
+       -- parse_formula pool
+       >> (fn ((q, ts), phi) => AQuant (q, map fo_term_head ts, phi))
+    || $$ "~" |-- parse_formula pool >> mk_anot
+    || parse_atom pool)
+   -- Scan.option ((Scan.this_string "=>" >> K AImplies
+                    || Scan.this_string "<=>" >> K AIff
+                    || Scan.this_string "<~>" >> K ANotIff
+                    || Scan.this_string "<=" >> K AIf
+                    || $$ "|" >> K AOr || $$ "&" >> K AAnd)
+                   -- parse_formula pool)
+   >> (fn (phi1, NONE) => phi1
+        | (phi1, SOME (c, phi2)) => mk_aconn c (phi1, phi2))) x
+
+val parse_tstp_extra_arguments =
+  Scan.optional ($$ "," |-- parse_annotation
+                 --| Scan.option ($$ "," |-- parse_annotations)) []
+
+(* Syntax: (fof|cnf)\(<num>, <formula_role>, <formula> <extra_arguments>\).
+   The <num> could be an identifier, but we assume integers. *)
+ fun parse_tstp_line pool =
+   ((Scan.this_string "fof" || Scan.this_string "cnf") -- $$ "(")
+     |-- scan_integer --| $$ "," -- Symbol.scan_id --| $$ ","
+     -- parse_formula pool -- parse_tstp_extra_arguments --| $$ ")" --| $$ "."
+    >> (fn (((num, role), phi), deps) =>
+           case role of
+             "definition" =>
+             (case phi of
+                AConn (AIff, [phi1 as AAtom _, phi2]) =>
+                Definition (num, phi1, phi2)
+              | AAtom (ATerm ("c_equal", _)) =>
+                Inference (num, phi, deps) (* Vampire's equality proxy axiom *)
+              | _ => raise Fail "malformed definition")
+           | _ => Inference (num, phi, deps))
+
+(**** PARSING OF VAMPIRE OUTPUT ****)
+
+(* Syntax: <num>. <formula> <annotation> *)
+fun parse_vampire_line pool =
+  scan_integer --| $$ "." -- parse_formula pool -- parse_annotation
+  >> (fn ((num, phi), deps) => Inference (num, phi, deps))
+
+(**** PARSING OF SPASS OUTPUT ****)
+
+(* SPASS returns clause references of the form "x.y". We ignore "y", whose role
+   is not clear anyway. *)
+val parse_dot_name = scan_integer --| $$ "." --| scan_integer
+
+val parse_spass_annotations =
+  Scan.optional ($$ ":" |-- Scan.repeat (parse_dot_name
+                                         --| Scan.option ($$ ","))) []
+
+(* It is not clear why some literals are followed by sequences of stars and/or
+   pluses. We ignore them. *)
+fun parse_decorated_atom pool =
+  parse_atom pool --| Scan.repeat ($$ "*" || $$ "+" || $$ " ")
+
+fun mk_horn ([], []) = AAtom (ATerm ("c_False", []))
+  | mk_horn ([], pos_lits) = foldr1 (mk_aconn AOr) pos_lits
+  | mk_horn (neg_lits, []) = mk_anot (foldr1 (mk_aconn AAnd) neg_lits)
+  | mk_horn (neg_lits, pos_lits) =
+    mk_aconn AImplies (foldr1 (mk_aconn AAnd) neg_lits,
+                       foldr1 (mk_aconn AOr) pos_lits)
+
+fun parse_horn_clause pool =
+  Scan.repeat (parse_decorated_atom pool) --| $$ "|" --| $$ "|"
+    -- Scan.repeat (parse_decorated_atom pool) --| $$ "-" --| $$ ">"
+    -- Scan.repeat (parse_decorated_atom pool)
+  >> (mk_horn o apfst (op @))
+
+(* Syntax: <num>[0:<inference><annotations>]
+   <atoms> || <atoms> -> <atoms>. *)
+fun parse_spass_line pool =
+  scan_integer --| $$ "[" --| $$ "0" --| $$ ":" --| Symbol.scan_id
+    -- parse_spass_annotations --| $$ "]" -- parse_horn_clause pool --| $$ "."
+  >> (fn ((num, deps), u) => Inference (num, u, deps))
+
+fun parse_line pool =
+  parse_tstp_line pool || parse_vampire_line pool || parse_spass_line pool
+fun parse_lines pool = Scan.repeat1 (parse_line pool)
+fun parse_proof pool =
+  fst o Scan.finite Symbol.stopper
+            (Scan.error (!! (fn _ => raise Fail "unrecognized ATP output")
+                            (parse_lines pool)))
+  o explode o strip_spaces_except_between_ident_chars
+
+(**** INTERPRETATION OF TSTP SYNTAX TREES ****)
+
+exception FO_TERM of string fo_term list
+exception FORMULA of (string, string fo_term) formula list
+exception SAME of unit
+
+(* Type variables are given the basic sort "HOL.type". Some will later be
+   constrained by information from type literals, or by type inference. *)
+fun type_from_fo_term tfrees (u as ATerm (a, us)) =
+  let val Ts = map (type_from_fo_term tfrees) us in
+    case strip_prefix_and_unascii type_const_prefix a of
+      SOME b => Type (invert_const b, Ts)
+    | NONE =>
+      if not (null us) then
+        raise FO_TERM [u]  (* only "tconst"s have type arguments *)
+      else case strip_prefix_and_unascii tfree_prefix a of
+        SOME b =>
+        let val s = "'" ^ b in
+          TFree (s, AList.lookup (op =) tfrees s |> the_default HOLogic.typeS)
+        end
+      | NONE =>
+        case strip_prefix_and_unascii tvar_prefix a of
+          SOME b => TVar (("'" ^ b, 0), HOLogic.typeS)
+        | NONE =>
+          (* Variable from the ATP, say "X1" *)
+          Type_Infer.param 0 (a, HOLogic.typeS)
+  end
+
+(* Type class literal applied to a type. Returns triple of polarity, class,
+   type. *)
+fun type_constraint_from_term pos tfrees (u as ATerm (a, us)) =
+  case (strip_prefix_and_unascii class_prefix a,
+        map (type_from_fo_term tfrees) us) of
+    (SOME b, [T]) => (pos, b, T)
+  | _ => raise FO_TERM [u]
+
+(** Accumulate type constraints in a formula: negative type literals **)
+fun add_var (key, z)  = Vartab.map_default (key, []) (cons z)
+fun add_type_constraint (false, cl, TFree (a ,_)) = add_var ((a, ~1), cl)
+  | add_type_constraint (false, cl, TVar (ix, _)) = add_var (ix, cl)
+  | add_type_constraint _ = I
+
+fun repair_atp_variable_name f s =
+  let
+    fun subscript_name s n = s ^ nat_subscript n
+    val s = String.map f s
+  in
+    case space_explode "_" s of
+      [_] => (case take_suffix Char.isDigit (String.explode s) of
+                (cs1 as _ :: _, cs2 as _ :: _) =>
+                subscript_name (String.implode cs1)
+                               (the (Int.fromString (String.implode cs2)))
+              | (_, _) => s)
+    | [s1, s2] => (case Int.fromString s2 of
+                     SOME n => subscript_name s1 n
+                   | NONE => s)
+    | _ => s
+  end
+
+(* First-order translation. No types are known for variables. "HOLogic.typeT"
+   should allow them to be inferred. *)
+fun raw_term_from_pred thy full_types tfrees =
+  let
+    fun aux opt_T extra_us u =
+      case u of
+        ATerm ("hBOOL", [u1]) => aux (SOME @{typ bool}) [] u1
+      | ATerm ("hAPP", [u1, u2]) => aux opt_T (u2 :: extra_us) u1
+      | ATerm (a, us) =>
+        if a = type_wrapper_name then
+          case us of
+            [typ_u, term_u] =>
+            aux (SOME (type_from_fo_term tfrees typ_u)) extra_us term_u
+          | _ => raise FO_TERM us
+        else case strip_prefix_and_unascii const_prefix a of
+          SOME "equal" =>
+          list_comb (Const (@{const_name HOL.eq}, HOLogic.typeT),
+                     map (aux NONE []) us)
+        | SOME b =>
+          let
+            val c = invert_const b
+            val num_type_args = num_type_args thy c
+            val (type_us, term_us) =
+              chop (if full_types then 0 else num_type_args) us
+            (* Extra args from "hAPP" come after any arguments given directly to
+               the constant. *)
+            val term_ts = map (aux NONE []) term_us
+            val extra_ts = map (aux NONE []) extra_us
+            val t =
+              Const (c, if full_types then
+                          case opt_T of
+                            SOME T => map fastype_of term_ts ---> T
+                          | NONE =>
+                            if num_type_args = 0 then
+                              Sign.const_instance thy (c, [])
+                            else
+                              raise Fail ("no type information for " ^ quote c)
+                        else
+                          Sign.const_instance thy (c,
+                              map (type_from_fo_term tfrees) type_us))
+          in list_comb (t, term_ts @ extra_ts) end
+        | NONE => (* a free or schematic variable *)
+          let
+            val ts = map (aux NONE []) (us @ extra_us)
+            val T = map fastype_of ts ---> HOLogic.typeT
+            val t =
+              case strip_prefix_and_unascii fixed_var_prefix a of
+                SOME b => Free (b, T)
+              | NONE =>
+                case strip_prefix_and_unascii schematic_var_prefix a of
+                  SOME b => Var ((b, 0), T)
+                | NONE =>
+                  if is_tptp_variable a then
+                    Var ((repair_atp_variable_name Char.toLower a, 0), T)
+                  else
+                    (* Skolem constants? *)
+                    Var ((repair_atp_variable_name Char.toUpper a, 0), T)
+          in list_comb (t, ts) end
+  in aux (SOME HOLogic.boolT) [] end
+
+fun term_from_pred thy full_types tfrees pos (u as ATerm (s, _)) =
+  if String.isPrefix class_prefix s then
+    add_type_constraint (type_constraint_from_term pos tfrees u)
+    #> pair @{const True}
+  else
+    pair (raw_term_from_pred thy full_types tfrees u)
+
+val combinator_table =
+  [(@{const_name COMBI}, @{thm COMBI_def_raw}),
+   (@{const_name COMBK}, @{thm COMBK_def_raw}),
+   (@{const_name COMBB}, @{thm COMBB_def_raw}),
+   (@{const_name COMBC}, @{thm COMBC_def_raw}),
+   (@{const_name COMBS}, @{thm COMBS_def_raw})]
+
+fun uncombine_term (t1 $ t2) = betapply (pairself uncombine_term (t1, t2))
+  | uncombine_term (Abs (s, T, t')) = Abs (s, T, uncombine_term t')
+  | uncombine_term (t as Const (x as (s, _))) =
+    (case AList.lookup (op =) combinator_table s of
+       SOME thm => thm |> prop_of |> specialize_type @{theory} x |> Logic.dest_equals |> snd
+     | NONE => t)
+  | uncombine_term t = t
+
+(* Update schematic type variables with detected sort constraints. It's not
+   totally clear when this code is necessary. *)
+fun repair_tvar_sorts (t, tvar_tab) =
+  let
+    fun do_type (Type (a, Ts)) = Type (a, map do_type Ts)
+      | do_type (TVar (xi, s)) =
+        TVar (xi, the_default s (Vartab.lookup tvar_tab xi))
+      | do_type (TFree z) = TFree z
+    fun do_term (Const (a, T)) = Const (a, do_type T)
+      | do_term (Free (a, T)) = Free (a, do_type T)
+      | do_term (Var (xi, T)) = Var (xi, do_type T)
+      | do_term (t as Bound _) = t
+      | do_term (Abs (a, T, t)) = Abs (a, do_type T, do_term t)
+      | do_term (t1 $ t2) = do_term t1 $ do_term t2
+  in t |> not (Vartab.is_empty tvar_tab) ? do_term end
+
+fun quantify_over_free quant_s free_s body_t =
+  case Term.add_frees body_t [] |> filter (curry (op =) free_s o fst) of
+    [] => body_t
+  | frees as (_, free_T) :: _ =>
+    Abs (free_s, free_T, fold (curry abstract_over) (map Free frees) body_t)
+
+(* Interpret an ATP formula as a HOL term, extracting sort constraints as they
+   appear in the formula. *)
+fun prop_from_formula thy full_types tfrees phi =
+  let
+    fun do_formula pos phi =
+      case phi of
+        AQuant (_, [], phi) => do_formula pos phi
+      | AQuant (q, x :: xs, phi') =>
+        do_formula pos (AQuant (q, xs, phi'))
+        #>> quantify_over_free (case q of
+                                  AForall => @{const_name All}
+                                | AExists => @{const_name Ex})
+                               (repair_atp_variable_name Char.toLower x)
+      | AConn (ANot, [phi']) => do_formula (not pos) phi' #>> s_not
+      | AConn (c, [phi1, phi2]) =>
+        do_formula (pos |> c = AImplies ? not) phi1
+        ##>> do_formula pos phi2
+        #>> (case c of
+               AAnd => s_conj
+             | AOr => s_disj
+             | AImplies => s_imp
+             | AIf => s_imp o swap
+             | AIff => s_iff
+             | ANotIff => s_not o s_iff)
+      | AAtom tm => term_from_pred thy full_types tfrees pos tm
+      | _ => raise FORMULA [phi]
+  in repair_tvar_sorts (do_formula true phi Vartab.empty) end
+
+fun check_formula ctxt =
+  Type_Infer.constrain HOLogic.boolT
+  #> Syntax.check_term (ProofContext.set_mode ProofContext.mode_schematic ctxt)
+
+
+(**** Translation of TSTP files to Isar Proofs ****)
+
+fun unvarify_term (Var ((s, 0), T)) = Free (s, T)
+  | unvarify_term t = raise TERM ("unvarify_term: non-Var", [t])
+
+fun decode_line full_types tfrees (Definition (num, phi1, phi2)) ctxt =
+    let
+      val thy = ProofContext.theory_of ctxt
+      val t1 = prop_from_formula thy full_types tfrees phi1
+      val vars = snd (strip_comb t1)
+      val frees = map unvarify_term vars
+      val unvarify_args = subst_atomic (vars ~~ frees)
+      val t2 = prop_from_formula thy full_types tfrees phi2
+      val (t1, t2) =
+        HOLogic.eq_const HOLogic.typeT $ t1 $ t2
+        |> unvarify_args |> uncombine_term |> check_formula ctxt
+        |> HOLogic.dest_eq
+    in
+      (Definition (num, t1, t2),
+       fold Variable.declare_term (maps OldTerm.term_frees [t1, t2]) ctxt)
+    end
+  | decode_line full_types tfrees (Inference (num, u, deps)) ctxt =
+    let
+      val thy = ProofContext.theory_of ctxt
+      val t = u |> prop_from_formula thy full_types tfrees
+                |> uncombine_term |> check_formula ctxt
+    in
+      (Inference (num, t, deps),
+       fold Variable.declare_term (OldTerm.term_frees t) ctxt)
+    end
+fun decode_lines ctxt full_types tfrees lines =
+  fst (fold_map (decode_line full_types tfrees) lines ctxt)
+
+fun is_same_inference _ (Definition _) = false
+  | is_same_inference t (Inference (_, t', _)) = t aconv t'
+
+(* No "real" literals means only type information (tfree_tcs, clsrel, or
+   clsarity). *)
+val is_only_type_information = curry (op aconv) HOLogic.true_const
+
+fun replace_one_dep (old, new) dep = if dep = old then new else [dep]
+fun replace_deps_in_line _ (line as Definition _) = line
+  | replace_deps_in_line p (Inference (num, t, deps)) =
+    Inference (num, t, fold (union (op =) o replace_one_dep p) deps [])
+
+(* Discard axioms; consolidate adjacent lines that prove the same formula, since
+   they differ only in type information.*)
+fun add_line _ _ (line as Definition _) lines = line :: lines
+  | add_line conjecture_shape axiom_names (Inference (num, t, [])) lines =
+    (* No dependencies: axiom, conjecture, or (for Vampire) internal axioms or
+       definitions. *)
+    if is_axiom_number axiom_names num then
+      (* Axioms are not proof lines. *)
+      if is_only_type_information t then
+        map (replace_deps_in_line (num, [])) lines
+      (* Is there a repetition? If so, replace later line by earlier one. *)
+      else case take_prefix (not o is_same_inference t) lines of
+        (_, []) => lines (*no repetition of proof line*)
+      | (pre, Inference (num', _, _) :: post) =>
+        pre @ map (replace_deps_in_line (num', [num])) post
+    else if is_conjecture_number conjecture_shape num then
+      Inference (num, negate_term t, []) :: lines
+    else
+      map (replace_deps_in_line (num, [])) lines
+  | add_line _ _ (Inference (num, t, deps)) lines =
+    (* Type information will be deleted later; skip repetition test. *)
+    if is_only_type_information t then
+      Inference (num, t, deps) :: lines
+    (* Is there a repetition? If so, replace later line by earlier one. *)
+    else case take_prefix (not o is_same_inference t) lines of
+      (* FIXME: Doesn't this code risk conflating proofs involving different
+         types? *)
+       (_, []) => Inference (num, t, deps) :: lines
+     | (pre, Inference (num', t', _) :: post) =>
+       Inference (num, t', deps) ::
+       pre @ map (replace_deps_in_line (num', [num])) post
+
+(* Recursively delete empty lines (type information) from the proof. *)
+fun add_nontrivial_line (Inference (num, t, [])) lines =
+    if is_only_type_information t then delete_dep num lines
+    else Inference (num, t, []) :: lines
+  | add_nontrivial_line line lines = line :: lines
+and delete_dep num lines =
+  fold_rev add_nontrivial_line (map (replace_deps_in_line (num, [])) lines) []
+
+(* ATPs sometimes reuse free variable names in the strangest ways. Removing
+   offending lines often does the trick. *)
+fun is_bad_free frees (Free x) = not (member (op =) frees x)
+  | is_bad_free _ _ = false
+
+(* Vampire is keen on producing these. *)
+fun is_trivial_formula (@{const Not} $ (Const (@{const_name HOL.eq}, _)
+                                        $ t1 $ t2)) = (t1 aconv t2)
+  | is_trivial_formula _ = false
+
+fun add_desired_line _ _ _ _ (line as Definition (num, _, _)) (j, lines) =
+    (j, line :: map (replace_deps_in_line (num, [])) lines)
+  | add_desired_line isar_shrink_factor conjecture_shape axiom_names frees
+                     (Inference (num, t, deps)) (j, lines) =
+    (j + 1,
+     if is_axiom_number axiom_names num orelse
+        is_conjecture_number conjecture_shape num orelse
+        (not (is_only_type_information t) andalso
+         null (Term.add_tvars t []) andalso
+         not (exists_subterm (is_bad_free frees) t) andalso
+         not (is_trivial_formula t) andalso
+         (null lines orelse (* last line must be kept *)
+          (length deps >= 2 andalso j mod isar_shrink_factor = 0))) then
+       Inference (num, t, deps) :: lines  (* keep line *)
+     else
+       map (replace_deps_in_line (num, deps)) lines)  (* drop line *)
+
+(** EXTRACTING LEMMAS **)
+
+(* Like "split_line", but ignores "\n" that follow a comma (as in SNARK's
+   output). *)
+val split_proof_lines =
+  let
+    fun aux [] [] = []
+      | aux line [] = [implode (rev line)]
+      | aux line ("," :: "\n" :: rest) = aux ("," :: line) rest
+      | aux line ("\n" :: rest) = aux line [] @ aux [] rest
+      | aux line (s :: rest) = aux (s :: line) rest
+  in aux [] o explode end
+
+(* A list consisting of the first number in each line is returned. For TSTP,
+   interesting lines have the form "fof(108, axiom, ...)", where the number
+   (108) is extracted. For SPASS, lines have the form "108[0:Inp] ...", where
+   the first number (108) is extracted. For Vampire, we look for
+   "108. ... [input]". *)
+fun used_facts_in_atp_proof axiom_names atp_proof =
+  let
+    fun axiom_names_at_index num =
+      let val j = Int.fromString num |> the_default ~1 in
+        if is_axiom_number axiom_names j then Vector.sub (axiom_names, j - 1)
+        else []
+      end
+    val tokens_of =
+      String.tokens (fn c => not (Char.isAlphaNum c) andalso c <> #"_")
+    fun do_line (tag :: num :: "axiom" :: (rest as _ :: _)) =
+        if tag = "cnf" orelse tag = "fof" then
+          (case strip_prefix_and_unascii axiom_prefix (List.last rest) of
+             SOME name =>
+             if member (op =) rest "file" then
+               ([(name, name |> find_first_in_list_vector axiom_names |> the)]
+                handle Option.Option =>
+                       error ("No such fact: " ^ quote name ^ "."))
+             else
+               axiom_names_at_index num
+           | NONE => axiom_names_at_index num)
+        else
+          []
+      | do_line (num :: "0" :: "Inp" :: _) = axiom_names_at_index num
+      | do_line (num :: rest) =
+        (case List.last rest of "input" => axiom_names_at_index num | _ => [])
+      | do_line _ = []
+  in atp_proof |> split_proof_lines |> maps (do_line o tokens_of) end
+
+val indent_size = 2
+val no_label = ("", ~1)
+
+val raw_prefix = "X"
+val assum_prefix = "A"
+val fact_prefix = "F"
+
+fun string_for_label (s, num) = s ^ string_of_int num
+
+fun metis_using [] = ""
+  | metis_using ls =
+    "using " ^ space_implode " " (map string_for_label ls) ^ " "
+fun metis_apply _ 1 = "by "
+  | metis_apply 1 _ = "apply "
+  | metis_apply i _ = "prefer " ^ string_of_int i ^ " apply "
+fun metis_name full_types = if full_types then "metisFT" else "metis"
+fun metis_call full_types [] = metis_name full_types
+  | metis_call full_types ss =
+    "(" ^ metis_name full_types ^ " " ^ space_implode " " ss ^ ")"
+fun metis_command full_types i n (ls, ss) =
+  metis_using ls ^ metis_apply i n ^ metis_call full_types ss
+fun metis_line full_types i n ss =
+  "Try this command: " ^
+  Markup.markup Markup.sendback (metis_command full_types i n ([], ss)) ^ "."
+fun minimize_line _ [] = ""
+  | minimize_line minimize_command ss =
+    case minimize_command ss of
+      "" => ""
+    | command =>
+      "\nTo minimize the number of lemmas, try this: " ^
+      Markup.markup Markup.sendback command ^ "."
+
+fun used_facts axiom_names =
+  used_facts_in_atp_proof axiom_names
+  #> List.partition (curry (op =) Chained o snd)
+  #> pairself (sort_distinct (string_ord o pairself fst))
+
+fun metis_proof_text (full_types, minimize_command, atp_proof, axiom_names,
+                      goal, i) =
+  let
+    val (chained_lemmas, other_lemmas) = used_facts axiom_names atp_proof
+    val n = Logic.count_prems (prop_of goal)
+  in
+    (metis_line full_types i n (map fst other_lemmas) ^
+     minimize_line minimize_command (map fst (other_lemmas @ chained_lemmas)),
+     other_lemmas @ chained_lemmas)
+  end
+
+(** Isar proof construction and manipulation **)
+
+fun merge_fact_sets (ls1, ss1) (ls2, ss2) =
+  (union (op =) ls1 ls2, union (op =) ss1 ss2)
+
+type label = string * int
+type facts = label list * string list
+
+datatype qualifier = Show | Then | Moreover | Ultimately
+
+datatype step =
+  Fix of (string * typ) list |
+  Let of term * term |
+  Assume of label * term |
+  Have of qualifier list * label * term * byline
+and byline =
+  ByMetis of facts |
+  CaseSplit of step list list * facts
+
+fun smart_case_split [] facts = ByMetis facts
+  | smart_case_split proofs facts = CaseSplit (proofs, facts)
+
+fun add_fact_from_dep axiom_names num =
+  if is_axiom_number axiom_names num then
+    apsnd (union (op =) (map fst (Vector.sub (axiom_names, num - 1))))
+  else
+    apfst (insert (op =) (raw_prefix, num))
+
+fun forall_of v t = HOLogic.all_const (fastype_of v) $ lambda v t
+fun forall_vars t = fold_rev forall_of (map Var (Term.add_vars t [])) t
+
+fun step_for_line _ _ (Definition (_, t1, t2)) = Let (t1, t2)
+  | step_for_line _ _ (Inference (num, t, [])) = Assume ((raw_prefix, num), t)
+  | step_for_line axiom_names j (Inference (num, t, deps)) =
+    Have (if j = 1 then [Show] else [], (raw_prefix, num),
+          forall_vars t,
+          ByMetis (fold (add_fact_from_dep axiom_names) deps ([], [])))
+
+fun proof_from_atp_proof pool ctxt full_types tfrees isar_shrink_factor
+                         atp_proof conjecture_shape axiom_names params frees =
+  let
+    val lines =
+      atp_proof ^ "$" (* the $ sign acts as a sentinel (FIXME: needed?) *)
+      |> parse_proof pool
+      |> sort (int_ord o pairself raw_step_number)
+      |> decode_lines ctxt full_types tfrees
+      |> rpair [] |-> fold_rev (add_line conjecture_shape axiom_names)
+      |> rpair [] |-> fold_rev add_nontrivial_line
+      |> rpair (0, []) |-> fold_rev (add_desired_line isar_shrink_factor
+                                             conjecture_shape axiom_names frees)
+      |> snd
+  in
+    (if null params then [] else [Fix params]) @
+    map2 (step_for_line axiom_names) (length lines downto 1) lines
+  end
+
+(* When redirecting proofs, we keep information about the labels seen so far in
+   the "backpatches" data structure. The first component indicates which facts
+   should be associated with forthcoming proof steps. The second component is a
+   pair ("assum_ls", "drop_ls"), where "assum_ls" are the labels that should
+   become assumptions and "drop_ls" are the labels that should be dropped in a
+   case split. *)
+type backpatches = (label * facts) list * (label list * label list)
+
+fun used_labels_of_step (Have (_, _, _, by)) =
+    (case by of
+       ByMetis (ls, _) => ls
+     | CaseSplit (proofs, (ls, _)) =>
+       fold (union (op =) o used_labels_of) proofs ls)
+  | used_labels_of_step _ = []
+and used_labels_of proof = fold (union (op =) o used_labels_of_step) proof []
+
+fun new_labels_of_step (Fix _) = []
+  | new_labels_of_step (Let _) = []
+  | new_labels_of_step (Assume (l, _)) = [l]
+  | new_labels_of_step (Have (_, l, _, _)) = [l]
+val new_labels_of = maps new_labels_of_step
+
+val join_proofs =
+  let
+    fun aux _ [] = NONE
+      | aux proof_tail (proofs as (proof1 :: _)) =
+        if exists null proofs then
+          NONE
+        else if forall (curry (op =) (hd proof1) o hd) (tl proofs) then
+          aux (hd proof1 :: proof_tail) (map tl proofs)
+        else case hd proof1 of
+          Have ([], l, t, _) => (* FIXME: should we really ignore the "by"? *)
+          if forall (fn Have ([], l', t', _) :: _ => (l, t) = (l', t')
+                      | _ => false) (tl proofs) andalso
+             not (exists (member (op =) (maps new_labels_of proofs))
+                         (used_labels_of proof_tail)) then
+            SOME (l, t, map rev proofs, proof_tail)
+          else
+            NONE
+        | _ => NONE
+  in aux [] o map rev end
+
+fun case_split_qualifiers proofs =
+  case length proofs of
+    0 => []
+  | 1 => [Then]
+  | _ => [Ultimately]
+
+fun redirect_proof conjecture_shape hyp_ts concl_t proof =
+  let
+    (* The first pass outputs those steps that are independent of the negated
+       conjecture. The second pass flips the proof by contradiction to obtain a
+       direct proof, introducing case splits when an inference depends on
+       several facts that depend on the negated conjecture. *)
+    fun find_hyp num =
+      nth hyp_ts (index_in_shape num conjecture_shape)
+      handle Subscript =>
+             raise Fail ("Cannot find hypothesis " ^ Int.toString num)
+     val concl_ls = map (pair raw_prefix) (List.last conjecture_shape)
+     val canonicalize_labels =
+       map (fn l => if member (op =) concl_ls l then hd concl_ls else l)
+       #> distinct (op =)
+     fun first_pass ([], contra) = ([], contra)
+       | first_pass ((step as Fix _) :: proof, contra) =
+         first_pass (proof, contra) |>> cons step
+       | first_pass ((step as Let _) :: proof, contra) =
+         first_pass (proof, contra) |>> cons step
+       | first_pass ((step as Assume (l as (_, num), _)) :: proof, contra) =
+         if member (op =) concl_ls l then
+           first_pass (proof, contra ||> l = hd concl_ls ? cons step)
+         else
+           first_pass (proof, contra) |>> cons (Assume (l, find_hyp num))
+       | first_pass (Have (qs, l, t, ByMetis (ls, ss)) :: proof, contra) =
+         let
+           val ls = canonicalize_labels ls
+           val step = Have (qs, l, t, ByMetis (ls, ss))
+         in
+           if exists (member (op =) (fst contra)) ls then
+             first_pass (proof, contra |>> cons l ||> cons step)
+           else
+             first_pass (proof, contra) |>> cons step
+         end
+       | first_pass _ = raise Fail "malformed proof"
+    val (proof_top, (contra_ls, contra_proof)) =
+      first_pass (proof, (concl_ls, []))
+    val backpatch_label = the_default ([], []) oo AList.lookup (op =) o fst
+    fun backpatch_labels patches ls =
+      fold merge_fact_sets (map (backpatch_label patches) ls) ([], [])
+    fun second_pass end_qs ([], assums, patches) =
+        ([Have (end_qs, no_label, concl_t,
+                ByMetis (backpatch_labels patches (map snd assums)))], patches)
+      | second_pass end_qs (Assume (l, t) :: proof, assums, patches) =
+        second_pass end_qs (proof, (t, l) :: assums, patches)
+      | second_pass end_qs (Have (qs, l, t, ByMetis (ls, ss)) :: proof, assums,
+                            patches) =
+        if member (op =) (snd (snd patches)) l andalso
+           not (member (op =) (fst (snd patches)) l) andalso
+           not (AList.defined (op =) (fst patches) l) then
+          second_pass end_qs (proof, assums, patches ||> apsnd (append ls))
+        else
+          (case List.partition (member (op =) contra_ls) ls of
+             ([contra_l], co_ls) =>
+             if member (op =) qs Show then
+               second_pass end_qs (proof, assums,
+                                   patches |>> cons (contra_l, (co_ls, ss)))
+             else
+               second_pass end_qs
+                           (proof, assums,
+                            patches |>> cons (contra_l, (l :: co_ls, ss)))
+               |>> cons (if member (op =) (fst (snd patches)) l then
+                           Assume (l, negate_term t)
+                         else
+                           Have (qs, l, negate_term t,
+                                 ByMetis (backpatch_label patches l)))
+           | (contra_ls as _ :: _, co_ls) =>
+             let
+               val proofs =
+                 map_filter
+                     (fn l =>
+                         if member (op =) concl_ls l then
+                           NONE
+                         else
+                           let
+                             val drop_ls = filter (curry (op <>) l) contra_ls
+                           in
+                             second_pass []
+                                 (proof, assums,
+                                  patches ||> apfst (insert (op =) l)
+                                          ||> apsnd (union (op =) drop_ls))
+                             |> fst |> SOME
+                           end) contra_ls
+               val (assumes, facts) =
+                 if member (op =) (fst (snd patches)) l then
+                   ([Assume (l, negate_term t)], (l :: co_ls, ss))
+                 else
+                   ([], (co_ls, ss))
+             in
+               (case join_proofs proofs of
+                  SOME (l, t, proofs, proof_tail) =>
+                  Have (case_split_qualifiers proofs @
+                        (if null proof_tail then end_qs else []), l, t,
+                        smart_case_split proofs facts) :: proof_tail
+                | NONE =>
+                  [Have (case_split_qualifiers proofs @ end_qs, no_label,
+                         concl_t, smart_case_split proofs facts)],
+                patches)
+               |>> append assumes
+             end
+           | _ => raise Fail "malformed proof")
+       | second_pass _ _ = raise Fail "malformed proof"
+    val proof_bottom =
+      second_pass [Show] (contra_proof, [], ([], ([], []))) |> fst
+  in proof_top @ proof_bottom end
+
+(* FIXME: Still needed? Probably not. *)
+val kill_duplicate_assumptions_in_proof =
+  let
+    fun relabel_facts subst =
+      apfst (map (fn l => AList.lookup (op =) subst l |> the_default l))
+    fun do_step (step as Assume (l, t)) (proof, subst, assums) =
+        (case AList.lookup (op aconv) assums t of
+           SOME l' => (proof, (l, l') :: subst, assums)
+         | NONE => (step :: proof, subst, (t, l) :: assums))
+      | do_step (Have (qs, l, t, by)) (proof, subst, assums) =
+        (Have (qs, l, t,
+               case by of
+                 ByMetis facts => ByMetis (relabel_facts subst facts)
+               | CaseSplit (proofs, facts) =>
+                 CaseSplit (map do_proof proofs, relabel_facts subst facts)) ::
+         proof, subst, assums)
+      | do_step step (proof, subst, assums) = (step :: proof, subst, assums)
+    and do_proof proof = fold do_step proof ([], [], []) |> #1 |> rev
+  in do_proof end
+
+val then_chain_proof =
+  let
+    fun aux _ [] = []
+      | aux _ ((step as Assume (l, _)) :: proof) = step :: aux l proof
+      | aux l' (Have (qs, l, t, by) :: proof) =
+        (case by of
+           ByMetis (ls, ss) =>
+           Have (if member (op =) ls l' then
+                   (Then :: qs, l, t,
+                    ByMetis (filter_out (curry (op =) l') ls, ss))
+                 else
+                   (qs, l, t, ByMetis (ls, ss)))
+         | CaseSplit (proofs, facts) =>
+           Have (qs, l, t, CaseSplit (map (aux no_label) proofs, facts))) ::
+        aux l proof
+      | aux _ (step :: proof) = step :: aux no_label proof
+  in aux no_label end
+
+fun kill_useless_labels_in_proof proof =
+  let
+    val used_ls = used_labels_of proof
+    fun do_label l = if member (op =) used_ls l then l else no_label
+    fun do_step (Assume (l, t)) = Assume (do_label l, t)
+      | do_step (Have (qs, l, t, by)) =
+        Have (qs, do_label l, t,
+              case by of
+                CaseSplit (proofs, facts) =>
+                CaseSplit (map (map do_step) proofs, facts)
+              | _ => by)
+      | do_step step = step
+  in map do_step proof end
+
+fun prefix_for_depth n = replicate_string (n + 1)
+
+val relabel_proof =
+  let
+    fun aux _ _ _ [] = []
+      | aux subst depth (next_assum, next_fact) (Assume (l, t) :: proof) =
+        if l = no_label then
+          Assume (l, t) :: aux subst depth (next_assum, next_fact) proof
+        else
+          let val l' = (prefix_for_depth depth assum_prefix, next_assum) in
+            Assume (l', t) ::
+            aux ((l, l') :: subst) depth (next_assum + 1, next_fact) proof
+          end
+      | aux subst depth (next_assum, next_fact) (Have (qs, l, t, by) :: proof) =
+        let
+          val (l', subst, next_fact) =
+            if l = no_label then
+              (l, subst, next_fact)
+            else
+              let
+                val l' = (prefix_for_depth depth fact_prefix, next_fact)
+              in (l', (l, l') :: subst, next_fact + 1) end
+          val relabel_facts =
+            apfst (map (fn l =>
+                           case AList.lookup (op =) subst l of
+                             SOME l' => l'
+                           | NONE => raise Fail ("unknown label " ^
+                                                 quote (string_for_label l))))
+          val by =
+            case by of
+              ByMetis facts => ByMetis (relabel_facts facts)
+            | CaseSplit (proofs, facts) =>
+              CaseSplit (map (aux subst (depth + 1) (1, 1)) proofs,
+                         relabel_facts facts)
+        in
+          Have (qs, l', t, by) ::
+          aux subst depth (next_assum, next_fact) proof
+        end
+      | aux subst depth nextp (step :: proof) =
+        step :: aux subst depth nextp proof
+  in aux [] 0 (1, 1) end
+
+fun string_for_proof ctxt full_types i n =
+  let
+    fun fix_print_mode f x =
+      setmp_CRITICAL show_no_free_types true
+          (setmp_CRITICAL show_types true
+               (Print_Mode.setmp (filter (curry (op =) Symbol.xsymbolsN)
+                                         (print_mode_value ())) f)) x
+    fun do_indent ind = replicate_string (ind * indent_size) " "
+    fun do_free (s, T) =
+      maybe_quote s ^ " :: " ^
+      maybe_quote (fix_print_mode (Syntax.string_of_typ ctxt) T)
+    fun do_label l = if l = no_label then "" else string_for_label l ^ ": "
+    fun do_have qs =
+      (if member (op =) qs Moreover then "moreover " else "") ^
+      (if member (op =) qs Ultimately then "ultimately " else "") ^
+      (if member (op =) qs Then then
+         if member (op =) qs Show then "thus" else "hence"
+       else
+         if member (op =) qs Show then "show" else "have")
+    val do_term = maybe_quote o fix_print_mode (Syntax.string_of_term ctxt)
+    fun do_facts (ls, ss) =
+      metis_command full_types 1 1
+                    (ls |> sort_distinct (prod_ord string_ord int_ord),
+                     ss |> sort_distinct string_ord)
+    and do_step ind (Fix xs) =
+        do_indent ind ^ "fix " ^ space_implode " and " (map do_free xs) ^ "\n"
+      | do_step ind (Let (t1, t2)) =
+        do_indent ind ^ "let " ^ do_term t1 ^ " = " ^ do_term t2 ^ "\n"
+      | do_step ind (Assume (l, t)) =
+        do_indent ind ^ "assume " ^ do_label l ^ do_term t ^ "\n"
+      | do_step ind (Have (qs, l, t, ByMetis facts)) =
+        do_indent ind ^ do_have qs ^ " " ^
+        do_label l ^ do_term t ^ " " ^ do_facts facts ^ "\n"
+      | do_step ind (Have (qs, l, t, CaseSplit (proofs, facts))) =
+        space_implode (do_indent ind ^ "moreover\n")
+                      (map (do_block ind) proofs) ^
+        do_indent ind ^ do_have qs ^ " " ^ do_label l ^ do_term t ^ " " ^
+        do_facts facts ^ "\n"
+    and do_steps prefix suffix ind steps =
+      let val s = implode (map (do_step ind) steps) in
+        replicate_string (ind * indent_size - size prefix) " " ^ prefix ^
+        String.extract (s, ind * indent_size,
+                        SOME (size s - ind * indent_size - 1)) ^
+        suffix ^ "\n"
+      end
+    and do_block ind proof = do_steps "{ " " }" (ind + 1) proof
+    (* One-step proofs are pointless; better use the Metis one-liner
+       directly. *)
+    and do_proof [Have (_, _, _, ByMetis _)] = ""
+      | do_proof proof =
+        (if i <> 1 then "prefer " ^ string_of_int i ^ "\n" else "") ^
+        do_indent 0 ^ "proof -\n" ^
+        do_steps "" "" 1 proof ^
+        do_indent 0 ^ (if n <> 1 then "next" else "qed")
+  in do_proof end
+
+fun isar_proof_text (pool, debug, isar_shrink_factor, ctxt, conjecture_shape)
+                    (other_params as (full_types, _, atp_proof, axiom_names,
+                                      goal, i)) =
+  let
+    val (params, hyp_ts, concl_t) = strip_subgoal goal i
+    val frees = fold Term.add_frees (concl_t :: hyp_ts) []
+    val tfrees = fold Term.add_tfrees (concl_t :: hyp_ts) []
+    val n = Logic.count_prems (prop_of goal)
+    val (one_line_proof, lemma_names) = metis_proof_text other_params
+    fun isar_proof_for () =
+      case proof_from_atp_proof pool ctxt full_types tfrees isar_shrink_factor
+                                atp_proof conjecture_shape axiom_names params
+                                frees
+           |> redirect_proof conjecture_shape hyp_ts concl_t
+           |> kill_duplicate_assumptions_in_proof
+           |> then_chain_proof
+           |> kill_useless_labels_in_proof
+           |> relabel_proof
+           |> string_for_proof ctxt full_types i n of
+        "" => "\nNo structured proof available."
+      | proof => "\n\nStructured proof:\n" ^ Markup.markup Markup.sendback proof
+    val isar_proof =
+      if debug then
+        isar_proof_for ()
+      else
+        try isar_proof_for ()
+        |> the_default "\nWarning: The Isar proof construction failed."
+  in (one_line_proof ^ isar_proof, lemma_names) end
+
+fun proof_text isar_proof isar_params other_params =
+  (if isar_proof then isar_proof_text isar_params else metis_proof_text)
+      other_params
+
+end;
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_translate.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_translate.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -9,6 +9,7 @@
 signature SLEDGEHAMMER_TRANSLATE =
 sig
   type 'a problem = 'a ATP_Problem.problem
+  type fol_formula
 
   val axiom_prefix : string
   val conjecture_prefix : string
@@ -16,9 +17,12 @@
   val class_rel_clause_prefix : string
   val arity_clause_prefix : string
   val tfrees_name : string
+  val prepare_axiom :
+    Proof.context -> (string * 'a) * thm
+    -> term * ((string * 'a) * fol_formula) option
   val prepare_problem :
     Proof.context -> bool -> bool -> bool -> bool -> term list -> term
-    -> ((string * 'a) * thm) list
+    -> (term * ((string * 'a) * fol_formula) option) list
     -> string problem * string Symtab.table * int * (string * 'a) list vector
 end;
 
@@ -246,20 +250,15 @@
     |> map_filter (Option.map snd o make_axiom ctxt false)
   end
 
-fun prepare_formulas ctxt full_types hyp_ts concl_t axiom_pairs =
+fun prepare_axiom ctxt (ax as (_, th)) = (prop_of th, make_axiom ctxt true ax)
+
+fun prepare_formulas ctxt full_types hyp_ts concl_t axioms =
   let
     val thy = ProofContext.theory_of ctxt
-    val axiom_ts = map (prop_of o snd) axiom_pairs
-    val hyp_ts =
-      if null hyp_ts then
-        []
-      else
-        (* Remove existing axioms from the conjecture, as this can dramatically
-           boost an ATP's performance (for some reason). *)
-        let
-          val axiom_table = fold (Termtab.update o rpair ()) axiom_ts
-                                 Termtab.empty
-        in hyp_ts |> filter_out (Termtab.defined axiom_table) end
+    val (axiom_ts, prepared_axioms) = ListPair.unzip axioms
+    (* Remove existing axioms from the conjecture, as this can dramatically
+       boost an ATP's performance (for some reason). *)
+    val hyp_ts = hyp_ts |> filter_out (member (op aconv) axiom_ts)
     val goal_t = Logic.list_implies (hyp_ts, concl_t)
     val is_FO = Meson.is_fol_term thy goal_t
     val subs = tfree_classes_of_terms [goal_t]
@@ -267,8 +266,7 @@
     val tycons = type_consts_of_terms thy (goal_t :: axiom_ts)
     (* TFrees in the conjecture; TVars in the axioms *)
     val conjectures = make_conjecture ctxt (hyp_ts @ [concl_t])
-    val (axiom_names, axioms) =
-      ListPair.unzip (map_filter (make_axiom ctxt true) axiom_pairs)
+    val (axiom_names, axioms) = ListPair.unzip (map_filter I prepared_axioms)
     val helper_facts = get_helper_facts ctxt is_FO full_types conjectures axioms
     val (supers', arity_clauses) = make_arity_clauses thy tycons supers
     val class_rel_clauses = make_class_rel_clauses thy subs supers'
@@ -501,12 +499,12 @@
       (const_table_for_problem explicit_apply problem) problem
 
 fun prepare_problem ctxt readable_names explicit_forall full_types
-                    explicit_apply hyp_ts concl_t axiom_pairs =
+                    explicit_apply hyp_ts concl_t axioms =
   let
     val thy = ProofContext.theory_of ctxt
     val (axiom_names, (conjectures, axioms, helper_facts, class_rel_clauses,
                        arity_clauses)) =
-      prepare_formulas ctxt full_types hyp_ts concl_t axiom_pairs
+      prepare_formulas ctxt full_types hyp_ts concl_t axioms
     val axiom_lines = map (problem_line_for_fact axiom_prefix full_types) axioms
     val helper_lines =
       map (problem_line_for_fact helper_prefix full_types) helper_facts
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/async_manager.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -0,0 +1,234 @@
+(*  Title:      HOL/Tools/async_manager.ML
+    Author:     Fabian Immler, TU Muenchen
+    Author:     Makarius
+    Author:     Jasmin Blanchette, TU Muenchen
+
+Central manager for asynchronous diagnosis tool threads.
+*)
+
+signature ASYNC_MANAGER =
+sig
+  val launch :
+    string -> Time.time -> Time.time -> string -> (unit -> string) -> unit
+  val kill_threads : string -> string -> unit
+  val running_threads : string -> string -> unit
+  val thread_messages : string -> string -> int option -> unit
+end;
+
+structure Async_Manager : ASYNC_MANAGER =
+struct
+
+(** preferences **)
+
+val message_store_limit = 20;
+val message_display_limit = 5;
+
+
+(** thread management **)
+
+(* data structures over threads *)
+
+structure Thread_Heap = Heap
+(
+  type elem = Time.time * Thread.thread;
+  fun ord ((a, _), (b, _)) = Time.compare (a, b);
+);
+
+fun lookup_thread xs = AList.lookup Thread.equal xs;
+fun delete_thread xs = AList.delete Thread.equal xs;
+fun update_thread xs = AList.update Thread.equal xs;
+
+
+(* state of thread manager *)
+
+type state =
+  {manager: Thread.thread option,
+   timeout_heap: Thread_Heap.T,
+   active: (Thread.thread * (string * Time.time * Time.time * string)) list,
+   canceling: (Thread.thread * (string * Time.time * string)) list,
+   messages: (string * string) list,
+   store: (string * string) list}
+
+fun make_state manager timeout_heap active canceling messages store : state =
+  {manager = manager, timeout_heap = timeout_heap, active = active,
+   canceling = canceling, messages = messages, store = store}
+
+val global_state = Synchronized.var "async_manager"
+  (make_state NONE Thread_Heap.empty [] [] [] []);
+
+
+(* unregister thread *)
+
+fun unregister message thread =
+  Synchronized.change global_state
+  (fn state as {manager, timeout_heap, active, canceling, messages, store} =>
+    (case lookup_thread active thread of
+      SOME (tool, birth_time, _, desc) =>
+        let
+          val active' = delete_thread thread active;
+          val now = Time.now ()
+          val canceling' = (thread, (tool, now, desc)) :: canceling
+          val message' = desc ^ "\n" ^ message
+          val messages' = (tool, message') :: messages;
+          val store' = (tool, message') ::
+            (if length store <= message_store_limit then store
+             else #1 (chop message_store_limit store));
+        in make_state manager timeout_heap active' canceling' messages' store' end
+    | NONE => state));
+
+
+(* main manager thread -- only one may exist *)
+
+val min_wait_time = Time.fromMilliseconds 300;
+val max_wait_time = Time.fromSeconds 10;
+
+fun replace_all bef aft =
+  let
+    fun aux seen "" = String.implode (rev seen)
+      | aux seen s =
+        if String.isPrefix bef s then
+          aux seen "" ^ aft ^ aux [] (unprefix bef s)
+        else
+          aux (String.sub (s, 0) :: seen) (String.extract (s, 1, NONE))
+  in aux [] end
+
+(* This is a workaround for Proof General's off-by-a-few sendback display bug,
+   whereby "pr" in "proof" is not highlighted. *)
+fun break_into_chunks xs =
+  maps (space_explode "\000" o replace_all "\n\n" "\000" o snd) xs
+
+fun print_new_messages () =
+  case Synchronized.change_result global_state
+         (fn {manager, timeout_heap, active, canceling, messages, store} =>
+             (messages, make_state manager timeout_heap active canceling []
+                                   store)) of
+    [] => ()
+  | msgs as (tool, _) :: _ =>
+    let val ss = break_into_chunks msgs in
+      List.app priority (tool ^ ": " ^ hd ss :: tl ss)
+    end
+
+fun check_thread_manager () = Synchronized.change global_state
+  (fn state as {manager, timeout_heap, active, canceling, messages, store} =>
+    if (case manager of SOME thread => Thread.isActive thread | NONE => false) then state
+    else let val manager = SOME (Toplevel.thread false (fn () =>
+      let
+        fun time_limit timeout_heap =
+          (case try Thread_Heap.min timeout_heap of
+            NONE => Time.+ (Time.now (), max_wait_time)
+          | SOME (time, _) => time);
+
+        (*action: find threads whose timeout is reached, and interrupt canceling threads*)
+        fun action {manager, timeout_heap, active, canceling, messages, store} =
+          let val (timeout_threads, timeout_heap') =
+            Thread_Heap.upto (Time.now (), Thread.self ()) timeout_heap;
+          in
+            if null timeout_threads andalso null canceling then
+              NONE
+            else
+              let
+                val _ = List.app (Simple_Thread.interrupt o #1) canceling
+                val canceling' = filter (Thread.isActive o #1) canceling
+                val state' = make_state manager timeout_heap' active canceling' messages store;
+              in SOME (map #2 timeout_threads, state') end
+          end;
+      in
+        while Synchronized.change_result global_state
+          (fn state as {timeout_heap, active, canceling, messages, store, ...} =>
+            if null active andalso null canceling andalso null messages
+            then (false, make_state NONE timeout_heap active canceling messages store)
+            else (true, state))
+        do
+          (Synchronized.timed_access global_state (SOME o time_limit o #timeout_heap) action
+            |> these
+            |> List.app (unregister "Timed out.\n");
+            print_new_messages ();
+            (*give threads some time to respond to interrupt*)
+            OS.Process.sleep min_wait_time)
+      end))
+    in make_state manager timeout_heap active canceling messages store end)
+
+
+(* register thread *)
+
+fun register tool birth_time death_time desc thread =
+ (Synchronized.change global_state
+    (fn {manager, timeout_heap, active, canceling, messages, store} =>
+      let
+        val timeout_heap' = Thread_Heap.insert (death_time, thread) timeout_heap;
+        val active' = update_thread (thread, (tool, birth_time, death_time, desc)) active;
+        val state' = make_state manager timeout_heap' active' canceling messages store;
+      in state' end);
+  check_thread_manager ())
+
+
+fun launch tool birth_time death_time desc f =
+  (Toplevel.thread true
+       (fn () =>
+           let
+             val self = Thread.self ()
+             val _ = register tool birth_time death_time desc self
+             val message = f ()
+           in unregister message self end);
+   ())
+
+
+(** user commands **)
+
+(* kill threads *)
+
+fun kill_threads tool workers = Synchronized.change global_state
+  (fn {manager, timeout_heap, active, canceling, messages, store} =>
+    let
+      val killing =
+        map_filter (fn (th, (tool', _, _, desc)) =>
+                       if tool' = tool then SOME (th, (tool', Time.now (), desc))
+                       else NONE) active
+      val state' = make_state manager timeout_heap [] (killing @ canceling) messages store;
+      val _ = if null killing then () else priority ("Killed active " ^ workers ^ ".")
+    in state' end);
+
+
+(* running threads *)
+
+fun seconds time = string_of_int (Time.toSeconds time) ^ " s"
+
+fun running_threads tool workers =
+  let
+    val {active, canceling, ...} = Synchronized.value global_state;
+
+    val now = Time.now ();
+    fun running_info (_, (tool', birth_time, death_time, desc)) =
+      if tool' = tool then
+        SOME ("Running: " ^ seconds (Time.- (now, birth_time)) ^ " -- " ^
+              seconds (Time.- (death_time, now)) ^ " to live:\n" ^ desc)
+      else
+        NONE
+    fun canceling_info (_, (tool', death_time, desc)) =
+      if tool' = tool then
+        SOME ("Trying to interrupt thread since " ^
+              seconds (Time.- (now, death_time)) ^ ":\n" ^ desc)
+      else
+        NONE
+    val running =
+      case map_filter running_info active of
+        [] => ["No " ^ workers ^ " running."]
+      | ss => "Running " ^ workers ^ ":" :: ss
+    val interrupting =
+      case map_filter canceling_info canceling of
+        [] => []
+      | ss => "Trying to interrupt the following " ^ workers ^ ":" :: ss
+  in priority (space_implode "\n\n" (running @ interrupting)) end
+
+fun thread_messages tool worker opt_limit =
+  let
+    val limit = the_default message_display_limit opt_limit;
+    val tool_store = Synchronized.value global_state
+                     |> #store |> filter (curry (op =) tool o fst)
+    val header =
+      "Recent " ^ worker ^ " messages" ^
+        (if length tool_store <= limit then ":"
+         else " (" ^ string_of_int limit ^ " displayed):");
+  in List.app priority (header :: break_into_chunks (#1 (chop limit tool_store))) end
+
+end;
--- a/src/HOL/Tools/cnf_funcs.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/cnf_funcs.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -430,7 +430,7 @@
 				in
 					make_cnf_disj_conj_r OF [thm1, thm2]  (* (x' | (y1 & y2)) = ((x' | y1)' & (x' | y2)') *)
 				end
-			  | make_cnfx_disj_thm (Const (@{const_name Ex}, _) $ x') y' =
+			  | make_cnfx_disj_thm (@{term "Ex::(bool => bool) => bool"} $ x') y' =
 				let
 					val thm1 = inst_thm thy [x', y'] make_cnfx_disj_ex_l   (* ((Ex x') | y') = (Ex (x' | y')) *)
 					val var  = new_free ()
@@ -441,7 +441,7 @@
 				in
 					iff_trans OF [thm1, thm5]  (* ((Ex x') | y') = (Ex v. body') *)
 				end
-			  | make_cnfx_disj_thm x' (Const (@{const_name Ex}, _) $ y') =
+			  | make_cnfx_disj_thm x' (@{term "Ex::(bool => bool) => bool"} $ y') =
 				let
 					val thm1 = inst_thm thy [x', y'] make_cnfx_disj_ex_r   (* (x' | (Ex y')) = (Ex (x' | y')) *)
 					val var  = new_free ()
--- a/src/HOL/Tools/meson.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/HOL/Tools/meson.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -25,7 +25,9 @@
   val depth_prolog_tac: thm list -> tactic
   val gocls: thm list -> thm list
   val skolemize_prems_tac: Proof.context -> thm list -> int -> tactic
-  val MESON: (thm list -> thm list) -> (thm list -> tactic) -> Proof.context -> int -> tactic
+  val MESON:
+    (int -> tactic) -> (thm list -> thm list) -> (thm list -> tactic)
+    -> Proof.context -> int -> tactic
   val best_meson_tac: (thm -> int) -> Proof.context -> int -> tactic
   val safe_best_meson_tac: Proof.context -> int -> tactic
   val depth_meson_tac: Proof.context -> int -> tactic
@@ -315,8 +317,8 @@
   (nf prem) returns a LIST of theorems, we can backtrack to get all combinations.*)
 fun resop nf [prem] = resolve_tac (nf prem) 1;
 
-(*Any need to extend this list with
-  "HOL.type_class","HOL.eq_class","Pure.term"?*)
+(* Any need to extend this list with "HOL.type_class", "HOL.eq_class",
+   and "Pure.term"? *)
 val has_meta_conn = exists_Const (member (op =) ["==", "==>", "=simp=>", "all", "prop"] o #1);
 
 fun apply_skolem_theorem (th, rls) =
@@ -573,7 +575,8 @@
 
 (*Make clauses from a list of theorems, previously Skolemized and put into nnf.
   The resulting clauses are HOL disjunctions.*)
-fun make_clauses_unsorted ths = fold_rev add_clauses ths [];
+fun make_clauses_unsorted ths = fold_rev add_clauses ths []
+handle exn => error (ML_Compiler.exn_message exn) (*###*)
 val make_clauses = sort_clauses o make_clauses_unsorted;
 
 (*Convert a list of clauses (disjunctions) to Horn clauses (contrapositives)*)
@@ -598,20 +601,22 @@
 
 (*Basis of all meson-tactics.  Supplies cltac with clauses: HOL disjunctions.
   Function mkcl converts theorems to clauses.*)
-fun MESON mkcl cltac ctxt i st =
+fun MESON preskolem_tac mkcl cltac ctxt i st =
   SELECT_GOAL
     (EVERY [Object_Logic.atomize_prems_tac 1,
             rtac ccontr 1,
             Subgoal.FOCUS (fn {context = ctxt', prems = negs, ...} =>
-                      EVERY1 [skolemize_prems_tac ctxt negs,
+                      EVERY1 [preskolem_tac,
+                              skolemize_prems_tac ctxt negs,
                               Subgoal.FOCUS (cltac o mkcl o #prems) ctxt']) ctxt 1]) i st
   handle THM _ => no_tac st;    (*probably from make_meta_clause, not first-order*)
 
+
 (** Best-first search versions **)
 
 (*ths is a list of additional clauses (HOL disjunctions) to use.*)
 fun best_meson_tac sizef =
-  MESON make_clauses
+  MESON (K all_tac) make_clauses
     (fn cls =>
          THEN_BEST_FIRST (resolve_tac (gocls cls) 1)
                          (has_fewer_prems 1, sizef)
@@ -625,7 +630,7 @@
 (** Depth-first search version **)
 
 val depth_meson_tac =
-  MESON make_clauses
+  MESON (K all_tac) make_clauses
     (fn cls => EVERY [resolve_tac (gocls cls) 1, depth_prolog_tac (make_horns cls)]);
 
 
@@ -645,7 +650,7 @@
 fun iter_deepen_prolog_tac horns =
     ITER_DEEPEN iter_deepen_limit (has_fewer_prems 1) (prolog_step_tac' horns);
 
-fun iter_deepen_meson_tac ctxt ths = ctxt |> MESON make_clauses
+fun iter_deepen_meson_tac ctxt ths = ctxt |> MESON (K all_tac) make_clauses
   (fn cls =>
     (case (gocls (cls @ ths)) of
       [] => no_tac  (*no goal clauses*)
--- a/src/Pure/General/graph.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/General/graph.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -21,7 +21,7 @@
   val maximals: 'a T -> key list
   val subgraph: (key -> bool) -> 'a T -> 'a T
   val get_entry: 'a T -> key -> 'a * (key list * key list)            (*exception UNDEF*)
-  val map_nodes: ('a -> 'b) -> 'a T -> 'b T
+  val map: (key -> 'a -> 'b) -> 'a T -> 'b T
   val get_node: 'a T -> key -> 'a                                     (*exception UNDEF*)
   val map_node: key -> ('a -> 'a) -> 'a T -> 'a T
   val map_node_yield: key -> ('a -> 'b * 'a) -> 'a T -> 'b * 'a T
@@ -114,7 +114,7 @@
 
 (* nodes *)
 
-fun map_nodes f (Graph tab) = Graph (Table.map (fn (i, ps) => (f i, ps)) tab);
+fun map_nodes f (Graph tab) = Graph (Table.map (apfst o f) tab);
 
 fun get_node G = #1 o get_entry G;
 
@@ -161,7 +161,7 @@
 fun del_nodes xs (Graph tab) =
   Graph (tab
     |> fold Table.delete xs
-    |> Table.map (fn (i, (preds, succs)) =>
+    |> Table.map (fn _ => fn (i, (preds, succs)) =>
       (i, (fold remove_key xs preds, fold remove_key xs succs))));
 
 fun del_node x (G as Graph tab) =
@@ -206,13 +206,13 @@
 fun join f (G1 as Graph tab1, G2 as Graph tab2) =
   let fun join_node key ((i1, edges1), (i2, _)) = (f key (i1, i2), edges1) in
     if pointer_eq (G1, G2) then G1
-    else fold add_edge (edges G2) (Graph (Table.join join_node (tab1, Table.map no_edges tab2)))
+    else fold add_edge (edges G2) (Graph (Table.join join_node (tab1, Table.map (K no_edges) tab2)))
   end;
 
 fun gen_merge add eq (G1 as Graph tab1, G2 as Graph tab2) =
   let fun eq_node ((i1, _), (i2, _)) = eq (i1, i2) in
     if pointer_eq (G1, G2) then G1
-    else fold add (edges G2) (Graph (Table.merge eq_node (tab1, Table.map no_edges tab2)))
+    else fold add (edges G2) (Graph (Table.merge eq_node (tab1, Table.map (K no_edges) tab2)))
   end;
 
 fun merge eq GG = gen_merge add_edge eq GG;
@@ -286,6 +286,7 @@
 
 
 (*final declarations of this structure!*)
+val map = map_nodes;
 val fold = fold_graph;
 
 end;
--- a/src/Pure/General/table.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/General/table.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -20,8 +20,7 @@
   exception UNDEF of key
   val empty: 'a table
   val is_empty: 'a table -> bool
-  val map: ('a -> 'b) -> 'a table -> 'b table
-  val map': (key -> 'a -> 'b) -> 'a table -> 'b table
+  val map: (key -> 'a -> 'b) -> 'a table -> 'b table
   val fold: (key * 'b -> 'a -> 'a) -> 'b table -> 'a -> 'a
   val fold_rev: (key * 'b -> 'a -> 'a) -> 'b table -> 'a -> 'a
   val dest: 'a table -> (key * 'a) list
@@ -403,8 +402,7 @@
 
 
 (*final declarations of this structure!*)
-fun map f = map_table (K f);
-val map' = map_table;
+val map = map_table;
 val fold = fold_table;
 val fold_rev = fold_rev_table;
 
--- a/src/Pure/IsaMakefile	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/IsaMakefile	Thu Sep 02 20:44:33 2010 +0200
@@ -230,7 +230,6 @@
   morphism.ML						\
   name.ML						\
   net.ML						\
-  old_goals.ML						\
   old_term.ML						\
   pattern.ML						\
   primitive_defs.ML					\
--- a/src/Pure/Isar/attrib.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/Isar/attrib.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -392,7 +392,8 @@
 (* theory setup *)
 
 val _ = Context.>> (Context.map_theory
- (register_config Unify.trace_bound_value #>
+ (register_config show_question_marks_value #>
+  register_config Unify.trace_bound_value #>
   register_config Unify.search_bound_value #>
   register_config Unify.trace_simp_value #>
   register_config Unify.trace_types_value #>
--- a/src/Pure/Isar/code.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/Isar/code.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -282,7 +282,7 @@
   then NONE
   else thy
     |> (Code_Data.map o apfst)
-        ((map_functions o Symtab.map) (fn ((changed, current), history) =>
+        ((map_functions o Symtab.map) (fn _ => fn ((changed, current), history) =>
           ((false, current),
             if changed then (serial (), current) :: history else history))
         #> map_history_concluded (K true))
--- a/src/Pure/Isar/proof_context.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/Isar/proof_context.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -413,8 +413,7 @@
     SOME (x, i) =>
       (case try Name.dest_skolem x of
         NONE => Pretty.mark Markup.var (Pretty.str s)
-      | SOME x' => Pretty.mark Markup.skolem
-          (Pretty.str (setmp_CRITICAL show_question_marks true Term.string_of_vname (x', i))))
+      | SOME x' => Pretty.mark Markup.skolem (Pretty.str (Term.string_of_vname (x', i))))
   | NONE => Pretty.mark Markup.var (Pretty.str s));
 
 fun plain_markup m _ s = Pretty.mark m (Pretty.str s);
@@ -536,14 +535,16 @@
 fun certify_consts ctxt = Consts.certify (Syntax.pp ctxt) (tsig_of ctxt)
   (not (abbrev_mode ctxt)) (consts_of ctxt);
 
-fun reject_schematic (Var (xi, _)) =
-      error ("Unbound schematic variable: " ^ Term.string_of_vname xi)
-  | reject_schematic (Abs (_, _, t)) = reject_schematic t
-  | reject_schematic (t $ u) = (reject_schematic t; reject_schematic u)
-  | reject_schematic _ = ();
+fun expand_binds ctxt =
+  let
+    val Mode {pattern, schematic, ...} = get_mode ctxt;
 
-fun expand_binds ctxt =
-  let val Mode {pattern, schematic, ...} = get_mode ctxt in
+    fun reject_schematic (t as Var _) =
+          error ("Unbound schematic variable: " ^ Syntax.string_of_term ctxt t)
+      | reject_schematic (Abs (_, _, t)) = reject_schematic t
+      | reject_schematic (t $ u) = (reject_schematic t; reject_schematic u)
+      | reject_schematic _ = ();
+  in
     if pattern then I
     else Variable.expand_binds ctxt #> (if schematic then I else tap reject_schematic)
   end;
@@ -576,9 +577,9 @@
     val _ =
       pattern orelse schematic orelse
         T |> Term.exists_subtype
-          (fn TVar (xi, _) =>
+          (fn T as TVar (xi, _) =>
             not (Type_Infer.is_param xi) andalso
-              error ("Illegal schematic type variable: " ^ Term.string_of_vname xi)
+              error ("Illegal schematic type variable: " ^ Syntax.string_of_typ ctxt T)
           | _ => false)
   in T end;
 
--- a/src/Pure/PIDE/document.scala	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/PIDE/document.scala	Thu Sep 02 20:44:33 2010 +0200
@@ -182,17 +182,15 @@
 
     val init = State().define_version(Version.init, Map()).assign(Version.init.id, Nil)._2
 
-    class Assignment(former_assignment: Map[Command, Exec_ID])
+    case class Assignment(
+      val assignment: Map[Command, Exec_ID],
+      val is_finished: Boolean = false)
     {
-      @volatile private var tmp_assignment = former_assignment
-      private val promise = Future.promise[Map[Command, Exec_ID]]
-      def is_finished: Boolean = promise.is_finished
-      def join: Map[Command, Exec_ID] = promise.join
-      def get_finished: Map[Command, Exec_ID] = promise.get_finished
-      def assign(command_execs: List[(Command, Exec_ID)])
+      def get_finished: Map[Command, Exec_ID] = { require(is_finished); assignment }
+      def assign(command_execs: List[(Command, Exec_ID)]): Assignment =
       {
-        promise.fulfill(tmp_assignment ++ command_execs)
-        tmp_assignment = Map()
+        require(!is_finished)
+        Assignment(assignment ++ command_execs, true)
       }
     }
   }
@@ -212,7 +210,7 @@
       val id = version.id
       if (versions.isDefinedAt(id) || disposed(id)) fail
       copy(versions = versions + (id -> version),
-        assignments = assignments + (version -> new State.Assignment(former_assignment)))
+        assignments = assignments + (version -> State.Assignment(former_assignment)))
     }
 
     def define_command(command: Command): State =
@@ -256,8 +254,10 @@
           new_execs += (exec_id -> (st, occs))
           (st.command, exec_id)
         }
-      the_assignment(version).assign(assigned_execs)  // FIXME explicit value instead of promise (!?)
-      (assigned_execs.map(_._1), copy(execs = new_execs))
+      val new_assignment = the_assignment(version).assign(assigned_execs)
+      val new_state =
+        copy(assignments = assignments + (version -> new_assignment), execs = new_execs)
+      (assigned_execs.map(_._1), new_state)
     }
 
     def is_assigned(version: Version): Boolean =
--- a/src/Pure/ProofGeneral/preferences.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/ProofGeneral/preferences.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -141,7 +141,7 @@
     "prems-limit"
     "Setting for maximum number of premises printed",
   print_depth_pref,
-  bool_pref show_question_marks
+  bool_pref show_question_marks_default
     "show-question-marks"
     "Show leading question mark of variable name"];
 
--- a/src/Pure/ROOT.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/ROOT.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -238,7 +238,6 @@
 use "Thy/term_style.ML";
 use "Thy/thy_output.ML";
 use "Thy/thy_syntax.ML";
-use "old_goals.ML";
 use "Isar/outer_syntax.ML";
 use "PIDE/document.ML";
 use "Thy/thy_info.ML";
--- a/src/Pure/Syntax/printer.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/Syntax/printer.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -12,6 +12,9 @@
   val show_no_free_types: bool Unsynchronized.ref
   val show_all_types: bool Unsynchronized.ref
   val show_structs: bool Unsynchronized.ref
+  val show_question_marks_default: bool Unsynchronized.ref
+  val show_question_marks_value: Config.value Config.T
+  val show_question_marks: bool Config.T
   val pp_show_brackets: Pretty.pp -> Pretty.pp
 end;
 
@@ -52,6 +55,11 @@
 val show_all_types = Unsynchronized.ref false;
 val show_structs = Unsynchronized.ref false;
 
+val show_question_marks_default = Unsynchronized.ref true;
+val show_question_marks_value =
+  Config.declare false "show_question_marks" (fn _ => Config.Bool (! show_question_marks_default));
+val show_question_marks = Config.bool show_question_marks_value;
+
 fun pp_show_brackets pp = Pretty.pp (setmp_CRITICAL show_brackets true (Pretty.term pp),
   Pretty.typ pp, Pretty.sort pp, Pretty.classrel pp, Pretty.arity pp);
 
@@ -72,15 +80,18 @@
 
 (* simple_ast_of *)
 
-fun simple_ast_of (Const (c, _)) = Ast.Constant c
-  | simple_ast_of (Free (x, _)) = Ast.Variable x
-  | simple_ast_of (Var (xi, _)) = Ast.Variable (Term.string_of_vname xi)
-  | simple_ast_of (t as _ $ _) =
-      let val (f, args) = strip_comb t in
-        Ast.mk_appl (simple_ast_of f) (map simple_ast_of args)
-      end
-  | simple_ast_of (Bound i) = Ast.Variable ("B." ^ string_of_int i)
-  | simple_ast_of (Abs _) = raise Fail "simple_ast_of: Abs";
+fun simple_ast_of ctxt =
+  let
+    val tune_var = if Config.get ctxt show_question_marks then I else unprefix "?";
+    fun ast_of (Const (c, _)) = Ast.Constant c
+      | ast_of (Free (x, _)) = Ast.Variable x
+      | ast_of (Var (xi, _)) = Ast.Variable (tune_var (Term.string_of_vname xi))
+      | ast_of (t as _ $ _) =
+          let val (f, args) = strip_comb t
+          in Ast.mk_appl (ast_of f) (map ast_of args) end
+      | ast_of (Bound i) = Ast.Variable ("B." ^ string_of_int i)
+      | ast_of (Abs _) = raise Fail "simple_ast_of: Abs";
+  in ast_of end;
 
 
 
@@ -88,14 +99,14 @@
 
 fun ast_of_termT ctxt trf tm =
   let
-    fun ast_of (t as Const ("_tfree", _) $ Free _) = simple_ast_of t
-      | ast_of (t as Const ("_tvar", _) $ Var _) = simple_ast_of t
+    fun ast_of (t as Const ("_tfree", _) $ Free _) = simple_ast_of ctxt t
+      | ast_of (t as Const ("_tvar", _) $ Var _) = simple_ast_of ctxt t
       | ast_of (Const (a, _)) = trans a []
       | ast_of (t as _ $ _) =
           (case strip_comb t of
             (Const (a, _), args) => trans a args
           | (f, args) => Ast.Appl (map ast_of (f :: args)))
-      | ast_of t = simple_ast_of t
+      | ast_of t = simple_ast_of ctxt t
     and trans a args =
       ast_of (apply_trans ctxt (apply_typed false dummyT (trf a)) args)
         handle Match => Ast.mk_appl (Ast.Constant a) (map ast_of args);
@@ -168,7 +179,7 @@
           if show_all_types
           then Ast.mk_appl (constrain const T) (map ast_of ts)
           else trans c T ts
-      | (t, ts) => Ast.mk_appl (simple_ast_of t) (map ast_of ts))
+      | (t, ts) => Ast.mk_appl (simple_ast_of ctxt t) (map ast_of ts))
 
     and trans a T args =
       ast_of (apply_trans ctxt (apply_typed show_sorts T (trf a)) args)
@@ -176,9 +187,9 @@
 
     and constrain t T =
       if show_types andalso T <> dummyT then
-        Ast.Appl [Ast.Constant Syn_Ext.constrainC, simple_ast_of t,
+        Ast.Appl [Ast.Constant Syn_Ext.constrainC, simple_ast_of ctxt t,
           ast_of_termT ctxt trf (Type_Ext.term_of_typ show_sorts T)]
-      else simple_ast_of t;
+      else simple_ast_of ctxt t;
   in
     tm
     |> Syn_Trans.prop_tr'
--- a/src/Pure/Thy/thy_info.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/Thy/thy_info.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -346,6 +346,6 @@
 
 (* finish all theories *)
 
-fun finish () = change_thys (Graph.map_nodes (fn (_, entry) => (NONE, entry)));
+fun finish () = change_thys (Graph.map (fn _ => fn (_, entry) => (NONE, entry)));
 
 end;
--- a/src/Pure/Thy/thy_output.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/Thy/thy_output.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -447,7 +447,7 @@
 val _ = add_option "show_types" (add_wrapper o setmp_CRITICAL Syntax.show_types o boolean);
 val _ = add_option "show_sorts" (add_wrapper o setmp_CRITICAL Syntax.show_sorts o boolean);
 val _ = add_option "show_structs" (add_wrapper o setmp_CRITICAL show_structs o boolean);
-val _ = add_option "show_question_marks" (add_wrapper o setmp_CRITICAL show_question_marks o boolean);
+val _ = add_option "show_question_marks" (Config.put show_question_marks o boolean);
 val _ = add_option "long_names" (add_wrapper o setmp_CRITICAL Name_Space.long_names o boolean);
 val _ = add_option "short_names" (add_wrapper o setmp_CRITICAL Name_Space.short_names o boolean);
 val _ = add_option "unique_names" (add_wrapper o setmp_CRITICAL Name_Space.unique_names o boolean);
--- a/src/Pure/context.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/context.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -137,7 +137,7 @@
     val _ = CRITICAL (fn () => Unsynchronized.change kinds (Datatab.update (k, kind)));
   in k end;
 
-val extend_data = Datatab.map' invoke_extend;
+val extend_data = Datatab.map invoke_extend;
 
 fun merge_data pp (data1, data2) =
   Datatab.keys (Datatab.merge (K true) (data1, data2))
@@ -476,7 +476,7 @@
   | NONE => raise Fail "Invalid proof data identifier");
 
 fun init_data thy =
-  Datatab.map' (fn k => fn _ => invoke_init k thy) (! kinds);
+  Datatab.map (fn k => fn _ => invoke_init k thy) (! kinds);
 
 fun init_new_data data thy =
   Datatab.merge (K true) (data, init_data thy);
--- a/src/Pure/old_goals.ML	Thu Sep 02 20:29:12 2010 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,440 +0,0 @@
-(*  Title:      Pure/old_goals.ML
-    Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
-    Copyright   1993  University of Cambridge
-
-Old-style goal stack package.  The goal stack initially holds a dummy
-proof, and can never become empty.  Each goal stack consists of a list
-of levels.  The undo list is a list of goal stacks.  Finally, there
-may be a stack of pending proofs.
-*)
-
-signature OLD_GOALS =
-sig
-  type proof
-  val premises: unit -> thm list
-  val reset_goals: unit -> unit
-  val result_error_fn: (thm -> string -> thm) Unsynchronized.ref
-  val print_sign_exn: theory -> exn -> 'a
-  val prove_goalw_cterm: thm list->cterm->(thm list->tactic list)->thm
-  val prove_goalw_cterm_nocheck: thm list->cterm->(thm list->tactic list)->thm
-  val prove_goalw: theory -> thm list -> string -> (thm list -> tactic list) -> thm
-  val prove_goal: theory -> string -> (thm list -> tactic list) -> thm
-  val topthm: unit -> thm
-  val result: unit -> thm
-  val uresult: unit -> thm
-  val getgoal: int -> term
-  val print_exn: exn -> 'a
-  val filter_goal: (term*term->bool) -> thm list -> int -> thm list
-  val prlev: int -> unit
-  val pr: unit -> unit
-  val prlim: int -> unit
-  val goalw_cterm: thm list -> cterm -> thm list
-  val goalw: theory -> thm list -> string -> thm list
-  val goal: theory -> string -> thm list
-  val Goalw: thm list -> string -> thm list
-  val Goal: string -> thm list
-  val simple_prove_goal_cterm: cterm->(thm list->tactic list)->thm
-  val by: tactic -> unit
-  val byev: tactic list -> unit
-  val back: unit -> unit
-  val choplev: int -> unit
-  val chop: unit -> unit
-  val undo: unit -> unit
-  val save_proof: unit -> proof
-  val restore_proof: proof -> thm list
-  val push_proof: unit -> unit
-  val pop_proof: unit -> thm list
-  val rotate_proof: unit -> thm list
-  val qed: string -> unit
-  val qed_goal: string -> theory -> string -> (thm list -> tactic list) -> unit
-  val qed_goalw: string -> theory -> thm list -> string
-    -> (thm list -> tactic list) -> unit
-  val qed_spec_mp: string -> unit
-  val qed_goal_spec_mp: string -> theory -> string -> (thm list -> tactic list) -> unit
-  val qed_goalw_spec_mp: string -> theory -> thm list -> string
-    -> (thm list -> tactic list) -> unit
-end;
-
-structure OldGoals: OLD_GOALS =
-struct
-
-
-(*** Goal package ***)
-
-(*Each level of goal stack includes a proof state and alternative states,
-  the output of the tactic applied to the preceeding level.  *)
-type gstack = (thm * thm Seq.seq) list;
-
-datatype proof = Proof of gstack list * thm list * (bool*thm->thm);
-
-
-(*** References ***)
-
-(*Current assumption list -- set by "goal".*)
-val curr_prems = Unsynchronized.ref([] : thm list);
-
-(*Return assumption list -- useful if you didn't save "goal"'s result. *)
-fun premises() = !curr_prems;
-
-(*Current result maker -- set by "goal", used by "result".  *)
-fun init_mkresult _ = error "No goal has been supplied in subgoal module";
-val curr_mkresult = Unsynchronized.ref (init_mkresult: bool*thm->thm);
-
-(*List of previous goal stacks, for the undo operation.  Set by setstate.
-  A list of lists!*)
-val undo_list = Unsynchronized.ref([[(asm_rl, Seq.empty)]] : gstack list);
-
-(* Stack of proof attempts *)
-val proofstack = Unsynchronized.ref([]: proof list);
-
-(*reset all refs*)
-fun reset_goals () =
-  (curr_prems := []; curr_mkresult := init_mkresult;
-    undo_list := [[(asm_rl, Seq.empty)]]);
-
-
-(*** Setting up goal-directed proof ***)
-
-(*Generates the list of new theories when the proof state's theory changes*)
-fun thy_error (thy,thy') =
-  let val names = subtract (op =) (Context.display_names thy) (Context.display_names thy')
-  in  case names of
-        [name] => "\nNew theory: " ^ name
-      | _       => "\nNew theories: " ^ space_implode ", " names
-  end;
-
-(*Default action is to print an error message; could be suppressed for
-  special applications.*)
-fun result_error_default state msg : thm =
-  Pretty.str "Bad final proof state:" ::
-      Goal_Display.pretty_goals_without_context (!goals_limit) state @
-    [Pretty.str msg, Pretty.str "Proof failed!"] |> Pretty.chunks |> Pretty.string_of |> error;
-
-val result_error_fn = Unsynchronized.ref result_error_default;
-
-
-(*Common treatment of "goal" and "prove_goal":
-  Return assumptions, initial proof state, and function to make result.
-  "atomic" indicates if the goal should be wrapped up in the function
-  "Goal::prop=>prop" to avoid assumptions being returned separately.
-*)
-fun prepare_proof atomic rths chorn =
-  let
-      val thy = Thm.theory_of_cterm chorn;
-      val horn = Thm.term_of chorn;
-      val _ = Term.no_dummy_patterns horn handle TERM (msg, _) => error msg;
-      val (As, B) = Logic.strip_horn horn;
-      val atoms = atomic andalso
-            forall (fn t => not (can Logic.dest_implies t orelse Logic.is_all t)) As;
-      val (As,B) = if atoms then ([],horn) else (As,B);
-      val cAs = map (cterm_of thy) As;
-      val prems = map (rewrite_rule rths o Thm.forall_elim_vars 0 o Thm.assume) cAs;
-      val cB = cterm_of thy B;
-      val st0 = let val st = Goal.init cB |> fold Thm.weaken cAs
-                in  rewrite_goals_rule rths st end
-      (*discharges assumptions from state in the order they appear in goal;
-        checks (if requested) that resulting theorem is equivalent to goal. *)
-      fun mkresult (check,state) =
-        let val state = Seq.hd (Thm.flexflex_rule state)
-                        handle THM _ => state   (*smash flexflex pairs*)
-            val ngoals = nprems_of state
-            val ath = implies_intr_list cAs state
-            val th = Goal.conclude ath
-            val thy' = Thm.theory_of_thm th
-            val {hyps, prop, ...} = Thm.rep_thm th
-            val final_th = Drule.export_without_context th
-        in  if not check then final_th
-            else if not (Theory.eq_thy(thy,thy')) then !result_error_fn state
-                ("Theory of proof state has changed!" ^
-                 thy_error (thy,thy'))
-            else if ngoals>0 then !result_error_fn state
-                (string_of_int ngoals ^ " unsolved goals!")
-            else if not (null hyps) then !result_error_fn state
-                ("Additional hypotheses:\n" ^
-                 cat_lines (map (Syntax.string_of_term_global thy) hyps))
-            else if Pattern.matches thy
-                                    (Envir.beta_norm (term_of chorn), Envir.beta_norm prop)
-                 then final_th
-            else  !result_error_fn state "proved a different theorem"
-        end
-  in
-     if Theory.eq_thy(thy, Thm.theory_of_thm st0)
-     then (prems, st0, mkresult)
-     else error ("Definitions would change the proof state's theory" ^
-                 thy_error (thy, Thm.theory_of_thm st0))
-  end
-  handle THM(s,_,_) => error("prepare_proof: exception THM was raised!\n" ^ s);
-
-(*Prints exceptions readably to users*)
-fun print_sign_exn_unit thy e =
-  case e of
-     THM (msg,i,thms) =>
-         (writeln ("Exception THM " ^ string_of_int i ^ " raised:\n" ^ msg);
-          List.app (writeln o Display.string_of_thm_global thy) thms)
-   | THEORY (msg,thys) =>
-         (writeln ("Exception THEORY raised:\n" ^ msg);
-          List.app (writeln o Context.str_of_thy) thys)
-   | TERM (msg,ts) =>
-         (writeln ("Exception TERM raised:\n" ^ msg);
-          List.app (writeln o Syntax.string_of_term_global thy) ts)
-   | TYPE (msg,Ts,ts) =>
-         (writeln ("Exception TYPE raised:\n" ^ msg);
-          List.app (writeln o Syntax.string_of_typ_global thy) Ts;
-          List.app (writeln o Syntax.string_of_term_global thy) ts)
-   | e => raise e;
-
-(*Prints an exception, then fails*)
-fun print_sign_exn thy e = (print_sign_exn_unit thy e; raise ERROR "");
-
-(** the prove_goal.... commands
-    Prove theorem using the listed tactics; check it has the specified form.
-    Augment theory with all type assignments of goal.
-    Syntax is similar to "goal" command for easy keyboard use. **)
-
-(*Version taking the goal as a cterm*)
-fun prove_goalw_cterm_general check rths chorn tacsf =
-  let val (prems, st0, mkresult) = prepare_proof false rths chorn
-      val tac = EVERY (tacsf prems)
-      fun statef() =
-          (case Seq.pull (tac st0) of
-               SOME(st,_) => st
-             | _ => error ("prove_goal: tactic failed"))
-  in  mkresult (check, cond_timeit (!Output.timing) "" statef)  end;
-
-(*Two variants: one checking the result, one not.
-  Neither prints runtime messages: they are for internal packages.*)
-fun prove_goalw_cterm rths chorn =
-        setmp_CRITICAL Output.timing false (prove_goalw_cterm_general true rths chorn)
-and prove_goalw_cterm_nocheck rths chorn =
-        setmp_CRITICAL Output.timing false (prove_goalw_cterm_general false rths chorn);
-
-
-(*Version taking the goal as a string*)
-fun prove_goalw thy rths agoal tacsf =
-  let val chorn = cterm_of thy (Syntax.read_prop_global thy agoal)
-  in prove_goalw_cterm_general true rths chorn tacsf end
-  handle ERROR msg => cat_error msg (*from read_prop?*)
-                ("The error(s) above occurred for " ^ quote agoal);
-
-(*String version with no meta-rewrite-rules*)
-fun prove_goal thy = prove_goalw thy [];
-
-
-
-(*** Commands etc ***)
-
-(*Return the current goal stack, if any, from undo_list*)
-fun getstate() : gstack = case !undo_list of
-      []   => error"No current state in subgoal module"
-    | x::_ => x;
-
-(*Pops the given goal stack*)
-fun pop [] = error"Cannot go back past the beginning of the proof!"
-  | pop (pair::pairs) = (pair,pairs);
-
-
-(* Print a level of the goal stack *)
-
-fun print_top ((th, _), pairs) =
-  let
-    val n = length pairs;
-    val m = (! goals_limit);
-    val ngoals = nprems_of th;
-  in
-    [Pretty.str ("Level " ^ string_of_int n ^
-      (if ngoals > 0 then " (" ^ string_of_int ngoals ^ " subgoal" ^
-        (if ngoals <> 1 then "s" else "") ^ ")"
-      else ""))] @
-    Goal_Display.pretty_goals_without_context m th
-  end |> Pretty.chunks |> Pretty.writeln;
-
-(*Printing can raise exceptions, so the assignment occurs last.
-  Can do   setstate[(st,Seq.empty)]  to set st as the state.  *)
-fun setstate newgoals =
-  (print_top (pop newgoals);  undo_list := newgoals :: !undo_list);
-
-(*Given a proof state transformation, return a command that updates
-    the goal stack*)
-fun make_command com = setstate (com (pop (getstate())));
-
-(*Apply a function on proof states to the current goal stack*)
-fun apply_fun f = f (pop(getstate()));
-
-(*Return the top theorem, representing the proof state*)
-fun topthm () = apply_fun  (fn ((th,_), _) => th);
-
-(*Return the final result.  *)
-fun result () = !curr_mkresult (true, topthm());
-
-(*Return the result UNCHECKED that it equals the goal -- for synthesis,
-  answer extraction, or other instantiation of Vars *)
-fun uresult () = !curr_mkresult (false, topthm());
-
-(*Get subgoal i from goal stack*)
-fun getgoal i = Logic.get_goal (prop_of (topthm())) i;
-
-(*Prints exceptions nicely at top level;
-  raises the exception in order to have a polymorphic type!*)
-fun print_exn e = (print_sign_exn_unit (Thm.theory_of_thm (topthm())) e;  raise e);
-
-(*Which thms could apply to goal i? (debugs tactics involving filter_thms) *)
-fun filter_goal could ths i = filter_thms could (999, getgoal i, ths);
-
-(*For inspecting earlier levels of the backward proof*)
-fun chop_level n (pair,pairs) =
-  let val level = length pairs
-  in  if n<0 andalso ~n <= level
-      then  List.drop (pair::pairs, ~n)
-      else if 0<=n andalso n<= level
-      then  List.drop (pair::pairs, level - n)
-      else  error ("Level number must lie between 0 and " ^
-                   string_of_int level)
-  end;
-
-(*Print the given level of the proof; prlev ~1 prints previous level*)
-fun prlev n = apply_fun (print_top o pop o (chop_level n));
-fun pr () = apply_fun print_top;
-
-(*Set goals_limit and print again*)
-fun prlim n = (goals_limit:=n; pr());
-
-(** the goal.... commands
-    Read main goal.  Set global variables curr_prems, curr_mkresult.
-    Initial subgoal and premises are rewritten using rths. **)
-
-(*Version taking the goal as a cterm; if you have a term t and theory thy, use
-    goalw_cterm rths (cterm_of thy t);      *)
-fun agoalw_cterm atomic rths chorn =
-  let val (prems, st0, mkresult) = prepare_proof atomic rths chorn
-  in  undo_list := [];
-      setstate [ (st0, Seq.empty) ];
-      curr_prems := prems;
-      curr_mkresult := mkresult;
-      prems
-  end;
-
-val goalw_cterm = agoalw_cterm false;
-
-(*Version taking the goal as a string*)
-fun agoalw atomic thy rths agoal =
-    agoalw_cterm atomic rths (cterm_of thy (Syntax.read_prop_global thy agoal))
-    handle ERROR msg => cat_error msg (*from type_assign, etc via prepare_proof*)
-        ("The error(s) above occurred for " ^ quote agoal);
-
-val goalw = agoalw false;
-fun goal thy = goalw thy [];
-
-(*now the versions that wrap the goal up in `Goal' to make it atomic*)
-fun Goalw thms s = agoalw true (ML_Context.the_global_context ()) thms s;
-val Goal = Goalw [];
-
-(*simple version with minimal amount of checking and postprocessing*)
-fun simple_prove_goal_cterm G f =
-  let
-    val As = Drule.strip_imp_prems G;
-    val B = Drule.strip_imp_concl G;
-    val asms = map Assumption.assume As;
-    fun check NONE = error "prove_goal: tactic failed"
-      | check (SOME (thm, _)) = (case nprems_of thm of
-            0 => thm
-          | i => !result_error_fn thm (string_of_int i ^ " unsolved goals!"))
-  in
-    Drule.export_without_context (implies_intr_list As
-      (check (Seq.pull (EVERY (f asms) (Thm.trivial B)))))
-  end;
-
-
-(*Proof step "by" the given tactic -- apply tactic to the proof state*)
-fun by_com tac ((th,ths), pairs) : gstack =
-  (case  Seq.pull(tac th)  of
-     NONE      => error"by: tactic failed"
-   | SOME(th2,ths2) =>
-       (if Thm.eq_thm(th,th2)
-          then warning "Warning: same as previous level"
-          else if Thm.eq_thm_thy(th,th2) then ()
-          else warning ("Warning: theory of proof state has changed" ^
-                       thy_error (Thm.theory_of_thm th, Thm.theory_of_thm th2));
-       ((th2,ths2)::(th,ths)::pairs)));
-
-fun by tac = cond_timeit (!Output.timing) ""
-    (fn() => make_command (by_com tac));
-
-(* byev[tac1,...,tacn] applies tac1 THEN ... THEN tacn.
-   Good for debugging proofs involving prove_goal.*)
-val byev = by o EVERY;
-
-
-(*Backtracking means find an alternative result from a tactic.
-  If none at this level, try earlier levels*)
-fun backtrack [] = error"back: no alternatives"
-  | backtrack ((th,thstr) :: pairs) =
-     (case Seq.pull thstr of
-          NONE      => (writeln"Going back a level..."; backtrack pairs)
-        | SOME(th2,thstr2) =>
-           (if Thm.eq_thm(th,th2)
-              then warning "Warning: same as previous choice at this level"
-              else if Thm.eq_thm_thy(th,th2) then ()
-              else warning "Warning: theory of proof state has changed";
-            (th2,thstr2)::pairs));
-
-fun back() = setstate (backtrack (getstate()));
-
-(*Chop back to previous level of the proof*)
-fun choplev n = make_command (chop_level n);
-
-(*Chopping back the goal stack*)
-fun chop () = make_command (fn (_,pairs) => pairs);
-
-(*Restore the previous proof state;  discard current state. *)
-fun undo() = case !undo_list of
-      [] => error"No proof state"
-    | [_] => error"Already at initial state"
-    | _::newundo =>  (undo_list := newundo;  pr()) ;
-
-
-(*** Managing the proof stack ***)
-
-fun save_proof() = Proof(!undo_list, !curr_prems, !curr_mkresult);
-
-fun restore_proof(Proof(ul,prems,mk)) =
- (undo_list:= ul;  curr_prems:= prems;  curr_mkresult := mk;  prems);
-
-
-fun top_proof() = case !proofstack of
-        [] => error("Stack of proof attempts is empty!")
-    | p::ps => (p,ps);
-
-(*  push a copy of the current proof state on to the stack *)
-fun push_proof() = (proofstack := (save_proof() :: !proofstack));
-
-(* discard the top proof state of the stack *)
-fun pop_proof() =
-  let val (p,ps) = top_proof()
-      val prems = restore_proof p
-  in proofstack := ps;  pr();  prems end;
-
-(* rotate the stack so that the top element goes to the bottom *)
-fun rotate_proof() =
-  let val (p,ps) = top_proof()
-  in proofstack := ps@[save_proof()];
-     restore_proof p;
-     pr();
-     !curr_prems
-  end;
-
-
-(** theorem bindings **)
-
-fun qed name = ML_Context.ml_store_thm (name, result ());
-fun qed_goal name thy goal tacsf = ML_Context.ml_store_thm (name, prove_goal thy goal tacsf);
-fun qed_goalw name thy rews goal tacsf =
-  ML_Context.ml_store_thm (name, prove_goalw thy rews goal tacsf);
-fun qed_spec_mp name =
-  ML_Context.ml_store_thm (name, Object_Logic.rulify_no_asm (result ()));
-fun qed_goal_spec_mp name thy s p =
-  bind_thm (name, Object_Logic.rulify_no_asm (prove_goal thy s p));
-fun qed_goalw_spec_mp name thy defs s p =
-  bind_thm (name, Object_Logic.rulify_no_asm (prove_goalw thy defs s p));
-
-end;
-
--- a/src/Pure/proofterm.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/proofterm.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -515,7 +515,7 @@
   | remove_types t = t;
 
 fun remove_types_env (Envir.Envir {maxidx, tenv, tyenv}) =
-  Envir.Envir {maxidx = maxidx, tenv = Vartab.map (apsnd remove_types) tenv, tyenv = tyenv};
+  Envir.Envir {maxidx = maxidx, tenv = Vartab.map (K (apsnd remove_types)) tenv, tyenv = tyenv};
 
 fun norm_proof' env prf = norm_proof (remove_types_env env) prf;
 
--- a/src/Pure/pure_thy.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/pure_thy.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -320,6 +320,7 @@
     ("_indexvar",   typ "index",                       Delimfix "'\\<index>"),
     ("_struct",     typ "index => logic",              Mixfix ("\\<struct>_", [1000], 1000)),
     ("_update_name", typ "idt",                        NoSyn),
+    (Syntax.constrainAbsC, typ "'a",                   NoSyn),
     (const "==>",   typ "prop => prop => prop",        Delimfix "op ==>"),
     (const Term.dummy_patternN, typ "aprop",           Delimfix "'_"),
     ("_sort_constraint", typ "type => prop",           Delimfix "(1SORT'_CONSTRAINT/(1'(_')))"),
--- a/src/Pure/sorts.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/sorts.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -321,7 +321,7 @@
         | NONE => NONE)
       else NONE;
     val classes' = classes |> Graph.subgraph P;
-    val arities' = arities |> Symtab.map' (map_filter o restrict_arity);
+    val arities' = arities |> Symtab.map (map_filter o restrict_arity);
   in (restrict_sort, rebuild_arities pp (make_algebra (classes', arities'))) end;
 
 
--- a/src/Pure/term.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Pure/term.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -114,7 +114,6 @@
   val exists_type: (typ -> bool) -> term -> bool
   val exists_subterm: (term -> bool) -> term -> bool
   val exists_Const: (string * typ -> bool) -> term -> bool
-  val show_question_marks: bool Unsynchronized.ref
 end;
 
 signature TERM =
@@ -983,11 +982,8 @@
 
 (* display variables *)
 
-val show_question_marks = Unsynchronized.ref true;
-
 fun string_of_vname (x, i) =
   let
-    val question_mark = if ! show_question_marks then "?" else "";
     val idx = string_of_int i;
     val dot =
       (case rev (Symbol.explode x) of
@@ -996,9 +992,9 @@
       | c :: _ => Symbol.is_digit c
       | _ => true);
   in
-    if dot then question_mark ^ x ^ "." ^ idx
-    else if i <> 0 then question_mark ^ x ^ idx
-    else question_mark ^ x
+    if dot then "?" ^ x ^ "." ^ idx
+    else if i <> 0 then "?" ^ x ^ idx
+    else "?" ^ x
   end;
 
 fun string_of_vname' (x, ~1) = x
--- a/src/Tools/Code/code_haskell.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Tools/Code/code_haskell.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -261,13 +261,31 @@
           end;
   in print_stmt end;
 
+fun mk_name_module reserved module_prefix module_alias program =
+  let
+    fun mk_alias name = case module_alias name
+     of SOME name' => name'
+      | NONE => name
+          |> Long_Name.explode
+          |> map (fn name => (the_single o fst) (Name.variants [name] reserved))
+          |> Long_Name.implode;
+    fun mk_prefix name = case module_prefix
+     of SOME module_prefix => Long_Name.append module_prefix name
+      | NONE => name;
+    val tab =
+      Symtab.empty
+      |> Graph.fold ((fn name => Symtab.default (name, (mk_alias #> mk_prefix) name))
+           o fst o Code_Namespace.dest_name o fst)
+             program
+  in the o Symtab.lookup tab end;
+
 fun haskell_program_of_program labelled_name module_prefix reserved module_alias program =
   let
     val reserved = Name.make_context reserved;
     val mk_name_module = mk_name_module reserved module_prefix module_alias program;
     fun add_stmt (name, (stmt, deps)) =
       let
-        val (module_name, base) = dest_name name;
+        val (module_name, base) = Code_Namespace.dest_name name;
         val module_name' = mk_name_module module_name;
         val mk_name_stmt = yield_singleton Name.variants;
         fun add_fun upper (nsp_fun, nsp_typ) =
@@ -309,14 +327,14 @@
       (Graph.get_node program name, Graph.imm_succs program name))
       (Graph.strong_conn program |> flat)) Symtab.empty;
     fun deresolver name = (fst o the o AList.lookup (op =) ((fst o snd o the
-      o Symtab.lookup hs_program) ((mk_name_module o fst o dest_name) name))) name
+      o Symtab.lookup hs_program) ((mk_name_module o fst o Code_Namespace.dest_name) name))) name
       handle Option => error ("Unknown statement name: " ^ labelled_name name);
   in (deresolver, hs_program) end;
 
 fun serialize_haskell module_prefix string_classes { labelled_name,
     reserved_syms, includes, module_alias,
     class_syntax, tyco_syntax, const_syntax, program,
-    names, presentation_names } =
+    names } =
   let
     val reserved = fold (insert (op =) o fst) includes reserved_syms;
     val (deresolver, hs_program) = haskell_program_of_program labelled_name
@@ -347,10 +365,10 @@
         content,
         str "}"
       ]);
-    fun serialize_module1 (module_name', (deps, (stmts, _))) =
+    fun serialize_module (module_name', (deps, (stmts, _))) =
       let
         val stmt_names = map fst stmts;
-        val qualified = null presentation_names;
+        val qualified = true;
         val imports = subtract (op =) stmt_names deps
           |> distinct (op =)
           |> map_filter (try deresolver)
@@ -363,18 +381,10 @@
         val import_ps = map print_import_include includes @ map print_import_module imports
         val content = Pretty.chunks2 ((if null import_ps then [] else [Pretty.chunks import_ps])
             @ map_filter
-              (fn (name, (_, SOME stmt)) => SOME (print_stmt qualified (name, stmt))
+              (fn (name, (_, SOME stmt)) => SOME (markup_stmt name (print_stmt qualified (name, stmt)))
                 | (_, (_, NONE)) => NONE) stmts
           );
       in print_module module_name' content end;
-    fun serialize_module2 (_, (_, (stmts, _))) = Pretty.chunks2 (map_filter
-        (fn (name, (_, SOME stmt)) => if null presentation_names
-              orelse member (op =) presentation_names name
-              then SOME (print_stmt false (name, stmt))
-              else NONE
-          | (_, (_, NONE)) => NONE) stmts);
-    val serialize_module =
-      if null presentation_names then serialize_module1 else pair "" o serialize_module2;
     fun write_module width (SOME destination) (modlname, content) =
           let
             val _ = File.check destination;
@@ -386,13 +396,13 @@
             val _ = File.mkdir_leaf (Path.dir pathname);
           in File.write pathname
             ("{-# OPTIONS_GHC -fglasgow-exts #-}\n\n"
-              ^ string_of_pretty width content)
+              ^ format [] width content)
           end
-      | write_module width NONE (_, content) = writeln_pretty width content;
+      | write_module width NONE (_, content) = writeln (format [] width content);
   in
     Code_Target.serialization
       (fn width => fn destination => K () o map (write_module width destination))
-      (fn width => rpair [] o cat_lines o map (string_of_pretty width o snd))
+      (fn present => fn width => rpair [] o format present width o Pretty.chunks o map snd)
       (map (uncurry print_module) includes
         @ map serialize_module (Symtab.dest hs_program))
   end;
--- a/src/Tools/Code/code_ml.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Tools/Code/code_ml.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -707,73 +707,39 @@
 
 (** SML/OCaml generic part **)
 
-local
-
-datatype ml_node =
-    Dummy of string
-  | Stmt of string * ml_stmt
-  | Module of string * ((Name.context * Name.context) * ml_node Graph.T);
-
-in
-
-fun ml_node_of_program labelled_name reserved module_alias program =
+fun ml_program_of_program labelled_name reserved module_alias program =
   let
-    val reserved = Name.make_context reserved;
-    val empty_module = ((reserved, reserved), Graph.empty);
-    fun map_node [] f = f
-      | map_node (m::ms) f =
-          Graph.default_node (m, Module (m, empty_module))
-          #> Graph.map_node m (fn (Module (module_name, (nsp, nodes))) =>
-               Module (module_name, (nsp, map_node ms f nodes)));
-    fun map_nsp_yield [] f (nsp, nodes) =
-          let
-            val (x, nsp') = f nsp
-          in (x, (nsp', nodes)) end
-      | map_nsp_yield (m::ms) f (nsp, nodes) =
-          let
-            val (x, nodes') =
-              nodes
-              |> Graph.default_node (m, Module (m, empty_module))
-              |> Graph.map_node_yield m (fn Module (d_module_name, nsp_nodes) => 
-                  let
-                    val (x, nsp_nodes') = map_nsp_yield ms f nsp_nodes
-                  in (x, Module (d_module_name, nsp_nodes')) end)
-          in (x, (nsp, nodes')) end;
-    fun map_nsp_fun_yield f (nsp_fun, nsp_typ) =
+    fun namify_const upper base (nsp_const, nsp_type) =
+      let
+        val (base', nsp_const') = yield_singleton Name.variants
+          (if upper then first_upper base else base) nsp_const
+      in (base', (nsp_const', nsp_type)) end;
+    fun namify_type base (nsp_const, nsp_type) =
       let
-        val (x, nsp_fun') = f nsp_fun
-      in (x, (nsp_fun', nsp_typ)) end;
-    fun map_nsp_typ_yield f (nsp_fun, nsp_typ) =
-      let
-        val (x, nsp_typ') = f nsp_typ
-      in (x, (nsp_fun, nsp_typ')) end;
-    val mk_name_module = mk_name_module reserved NONE module_alias program;
-    fun mk_name_stmt upper name nsp =
-      let
-        val (_, base) = dest_name name;
-        val base' = if upper then first_upper base else base;
-        val ([base''], nsp') = Name.variants [base'] nsp;
-      in (base'', nsp') end;
-    fun deps_of names =
-      []
-      |> fold (fold (insert (op =)) o Graph.imm_succs program) names
-      |> subtract (op =) names
-      |> filter_out (Code_Thingol.is_case o Graph.get_node program);
+        val (base', nsp_type') = yield_singleton Name.variants base nsp_type
+      in (base', (nsp_const, nsp_type')) end;
+    fun namify_stmt (Code_Thingol.Fun _) = namify_const false
+      | namify_stmt (Code_Thingol.Datatype _) = namify_type
+      | namify_stmt (Code_Thingol.Datatypecons _) = namify_const true
+      | namify_stmt (Code_Thingol.Class _) = namify_type
+      | namify_stmt (Code_Thingol.Classrel _) = namify_const false
+      | namify_stmt (Code_Thingol.Classparam _) = namify_const false
+      | namify_stmt (Code_Thingol.Classinst _) = namify_const false;
     fun ml_binding_of_stmt (name, Code_Thingol.Fun (_, ((tysm as (vs, ty), raw_eqs), _))) =
           let
             val eqs = filter (snd o snd) raw_eqs;
-            val (eqs', is_value) = if null (filter_out (null o snd) vs) then case eqs
+            val (eqs', some_value_name) = if null (filter_out (null o snd) vs) then case eqs
                of [(([], t), some_thm)] => if (not o null o fst o Code_Thingol.unfold_fun) ty
                   then ([(([IVar (SOME "x")], t `$ IVar (SOME "x")), some_thm)], NONE)
                   else (eqs, SOME (name, member (op =) (Code_Thingol.add_constnames t []) name))
                 | _ => (eqs, NONE)
               else (eqs, NONE)
-          in (ML_Function (name, (tysm, eqs')), is_value) end
+          in (ML_Function (name, (tysm, eqs')), some_value_name) end
       | ml_binding_of_stmt (name, Code_Thingol.Classinst (stmt as ((_, (_, vs)), _))) =
           (ML_Instance (name, stmt), if forall (null o snd) vs then SOME (name, false) else NONE)
       | ml_binding_of_stmt (name, _) =
           error ("Binding block containing illegal statement: " ^ labelled_name name)
-    fun add_fun (name, stmt) =
+    fun modify_fun (name, stmt) =
       let
         val (binding, some_value_name) = ml_binding_of_stmt (name, stmt);
         val ml_stmt = case binding
@@ -784,157 +750,73 @@
              of NONE => ML_Funs ([binding], [])
               | SOME (name, true) => ML_Funs ([binding], [name])
               | SOME (name, false) => ML_Val binding
-      in
-        map_nsp_fun_yield (mk_name_stmt false name)
-        #>> (fn name' => ([name'], ml_stmt))
-      end;
-    fun add_funs stmts =
-      let
-        val ml_stmt = ML_Funs (map_split ml_binding_of_stmt stmts |> (apsnd o map_filter o Option.map) fst);
-      in
-        fold_map (fn (name, _) => map_nsp_fun_yield (mk_name_stmt false name)) stmts
-        #>> rpair ml_stmt
-      end;
-    fun add_datatypes stmts =
-      fold_map
-        (fn (name, Code_Thingol.Datatype (_, stmt)) =>
-              map_nsp_typ_yield (mk_name_stmt false name) #>> rpair (SOME (name, stmt))
-          | (name, Code_Thingol.Datatypecons _) =>
-              map_nsp_fun_yield (mk_name_stmt true name) #>> rpair NONE
-          | (name, _) =>
-              error ("Datatype block containing illegal statement: " ^ labelled_name name)
-        ) stmts
-      #>> (split_list #> apsnd (map_filter I
-        #> (fn [] => error ("Datatype block without data statement: "
-                  ^ (Library.commas o map (labelled_name o fst)) stmts)
-             | stmts => ML_Datas stmts)));
-    fun add_class stmts =
-      fold_map
-        (fn (name, Code_Thingol.Class (_, stmt)) =>
-              map_nsp_typ_yield (mk_name_stmt false name) #>> rpair (SOME (name, stmt))
-          | (name, Code_Thingol.Classrel _) =>
-              map_nsp_fun_yield (mk_name_stmt false name) #>> rpair NONE
-          | (name, Code_Thingol.Classparam _) =>
-              map_nsp_fun_yield (mk_name_stmt false name) #>> rpair NONE
-          | (name, _) =>
-              error ("Class block containing illegal statement: " ^ labelled_name name)
-        ) stmts
-      #>> (split_list #> apsnd (map_filter I
-        #> (fn [] => error ("Class block without class statement: "
-                  ^ (Library.commas o map (labelled_name o fst)) stmts)
-             | [stmt] => ML_Class stmt)));
-    fun add_stmts ([stmt as (name, Code_Thingol.Fun _)]) =
-          add_fun stmt
-      | add_stmts ((stmts as (_, Code_Thingol.Fun _)::_)) =
-          add_funs stmts
-      | add_stmts ((stmts as (_, Code_Thingol.Datatypecons _)::_)) =
-          add_datatypes stmts
-      | add_stmts ((stmts as (_, Code_Thingol.Datatype _)::_)) =
-          add_datatypes stmts
-      | add_stmts ((stmts as (_, Code_Thingol.Class _)::_)) =
-          add_class stmts
-      | add_stmts ((stmts as (_, Code_Thingol.Classrel _)::_)) =
-          add_class stmts
-      | add_stmts ((stmts as (_, Code_Thingol.Classparam _)::_)) =
-          add_class stmts
-      | add_stmts ([stmt as (_, Code_Thingol.Classinst _)]) =
-          add_fun stmt
-      | add_stmts ((stmts as (_, Code_Thingol.Classinst _)::_)) =
-          add_funs stmts
-      | add_stmts stmts = error ("Illegal mutual dependencies: " ^
+      in SOME ml_stmt end;
+    fun modify_funs stmts = single (SOME
+      (ML_Funs (map_split ml_binding_of_stmt stmts |> (apsnd o map_filter o Option.map) fst)))
+    fun modify_datatypes stmts = single (SOME
+      (ML_Datas (map_filter
+        (fn (name, Code_Thingol.Datatype (_, stmt)) => SOME (name, stmt) | _ => NONE) stmts)))
+    fun modify_class stmts = single (SOME
+      (ML_Class (the_single (map_filter
+        (fn (name, Code_Thingol.Class (_, stmt)) => SOME (name, stmt) | _ => NONE) stmts))))
+    fun modify_stmts ([stmt as (name, stmt' as Code_Thingol.Fun _)]) =
+          if Code_Thingol.is_case stmt' then [] else [modify_fun stmt]
+      | modify_stmts ((stmts as (_, Code_Thingol.Fun _)::_)) =
+          modify_funs (filter_out (Code_Thingol.is_case o snd) stmts)
+      | modify_stmts ((stmts as (_, Code_Thingol.Datatypecons _)::_)) =
+          modify_datatypes stmts
+      | modify_stmts ((stmts as (_, Code_Thingol.Datatype _)::_)) =
+          modify_datatypes stmts
+      | modify_stmts ((stmts as (_, Code_Thingol.Class _)::_)) =
+          modify_class stmts
+      | modify_stmts ((stmts as (_, Code_Thingol.Classrel _)::_)) =
+          modify_class stmts
+      | modify_stmts ((stmts as (_, Code_Thingol.Classparam _)::_)) =
+          modify_class stmts
+      | modify_stmts ([stmt as (_, Code_Thingol.Classinst _)]) =
+          [modify_fun stmt]
+      | modify_stmts ((stmts as (_, Code_Thingol.Classinst _)::_)) =
+          modify_funs stmts
+      | modify_stmts stmts = error ("Illegal mutual dependencies: " ^
           (Library.commas o map (labelled_name o fst)) stmts);
-    fun add_stmts' stmts nsp_nodes =
-      let
-        val names as (name :: names') = map fst stmts;
-        val deps = deps_of names;
-        val (module_names, _) = (split_list o map dest_name) names;
-        val module_name = (the_single o distinct (op =) o map mk_name_module) module_names
-          handle Empty =>
-            error ("Different namespace prefixes for mutual dependencies:\n"
-              ^ Library.commas (map labelled_name names)
-              ^ "\n"
-              ^ Library.commas module_names);
-        val module_name_path = Long_Name.explode module_name;
-        fun add_dep name name' =
-          let
-            val module_name' = (mk_name_module o fst o dest_name) name';
-          in if module_name = module_name' then
-            map_node module_name_path (Graph.add_edge (name, name'))
-          else let
-            val (common, (diff1 :: _, diff2 :: _)) = chop_prefix (op =)
-              (module_name_path, Long_Name.explode module_name');
-          in
-            map_node common
-              (fn node => Graph.add_edge_acyclic (diff1, diff2) node
-                handle Graph.CYCLES _ => error ("Dependency "
-                  ^ quote name ^ " -> " ^ quote name'
-                  ^ " would result in module dependency cycle"))
-          end end;
-      in
-        nsp_nodes
-        |> map_nsp_yield module_name_path (add_stmts stmts)
-        |-> (fn (base' :: bases', stmt') =>
-           apsnd (map_node module_name_path (Graph.new_node (name, (Stmt (base', stmt')))
-              #> fold2 (fn name' => fn base' =>
-                   Graph.new_node (name', (Dummy base'))) names' bases')))
-        |> apsnd (fold (fn name => fold (add_dep name) deps) names)
-        |> apsnd (fold_product (curry (map_node module_name_path o Graph.add_edge)) names names)
-      end;
-    val stmts = map (AList.make (Graph.get_node program)) (rev (Graph.strong_conn program))
-      |> filter_out (fn [(_, stmt)] => Code_Thingol.is_case stmt | _ => false);
-    val (_, nodes) = fold add_stmts' stmts empty_module;
-    fun deresolver prefix name = 
-      let
-        val module_name = (fst o dest_name) name;
-        val module_name' = (Long_Name.explode o mk_name_module) module_name;
-        val (_, (_, remainder)) = chop_prefix (op =) (prefix, module_name');
-        val stmt_name =
-          nodes
-          |> fold (fn name => fn node => case Graph.get_node node name
-              of Module (_, (_, node)) => node) module_name'
-          |> (fn node => case Graph.get_node node name of Stmt (stmt_name, _) => stmt_name
-               | Dummy stmt_name => stmt_name);
-      in
-        Long_Name.implode (remainder @ [stmt_name])
-      end handle Graph.UNDEF _ =>
-        error ("Unknown statement name: " ^ labelled_name name);
-  in (deresolver, nodes) end;
+  in
+    Code_Namespace.hierarchical_program labelled_name { module_alias = module_alias, reserved = reserved,
+      empty_nsp = (reserved, reserved), namify_module = pair, namify_stmt = namify_stmt,
+      cyclic_modules = false, empty_data = (), memorize_data = K I, modify_stmts = modify_stmts } program
+  end;
 
 fun serialize_ml target print_module print_stmt with_signatures { labelled_name,
   reserved_syms, includes, module_alias, class_syntax, tyco_syntax,
-  const_syntax, program, names, presentation_names } =
+  const_syntax, program, names } =
   let
     val is_cons = Code_Thingol.is_cons program;
-    val is_presentation = not (null presentation_names);
-    val (deresolver, nodes) = ml_node_of_program labelled_name
-      reserved_syms module_alias program;
-    val reserved = make_vars reserved_syms;
-    fun print_node prefix (Dummy _) =
+    val { deresolver, hierarchical_program = ml_program } =
+      ml_program_of_program labelled_name (Name.make_context reserved_syms) module_alias program;
+    val print_stmt = print_stmt labelled_name tyco_syntax const_syntax
+      (make_vars reserved_syms) is_cons;
+    fun print_node _ (_, Code_Namespace.Dummy) =
           NONE
-      | print_node prefix (Stmt (_, stmt)) = if is_presentation andalso
-          (null o filter (member (op =) presentation_names) o stmt_names_of) stmt
-          then NONE
-          else SOME (print_stmt labelled_name tyco_syntax const_syntax reserved is_cons (deresolver prefix) stmt)
-      | print_node prefix (Module (module_name, (_, nodes))) =
+      | print_node prefix_fragments (name, Code_Namespace.Stmt stmt) =
+          SOME (apsnd (markup_stmt name) (print_stmt (deresolver prefix_fragments) stmt))
+      | print_node prefix_fragments (name_fragment, Code_Namespace.Module (_, nodes)) =
           let
-            val (decls, body) = print_nodes (prefix @ [module_name]) nodes;
-            val p = if is_presentation then Pretty.chunks2 body
-              else print_module module_name (if with_signatures then SOME decls else NONE) body;
+            val (decls, body) = print_nodes (prefix_fragments @ [name_fragment]) nodes;
+            val p = print_module name_fragment
+                (if with_signatures then SOME decls else NONE) body;
           in SOME ([], p) end
-    and print_nodes prefix nodes = (map_filter (print_node prefix o Graph.get_node nodes)
+    and print_nodes prefix_fragments nodes = (map_filter
+        (fn name => print_node prefix_fragments (name, snd (Graph.get_node nodes name)))
         o rev o flat o Graph.strong_conn) nodes
       |> split_list
       |> (fn (decls, body) => (flat decls, body))
     val names' = map (try (deresolver [])) names;
-    val p = Pretty.chunks2 (map snd includes @ snd (print_nodes [] nodes));
-    fun write width NONE = writeln_pretty width
-      | write width (SOME p) = File.write p o string_of_pretty width;
+    val p = Pretty.chunks2 (map snd includes @ snd (print_nodes [] ml_program));
+    fun write width NONE = writeln o format [] width
+      | write width (SOME p) = File.write p o format [] width;
   in
-    Code_Target.serialization write (fn width => (rpair names' o string_of_pretty width)) p
+    Code_Target.serialization write (rpair names' ooo format) p
   end;
 
-end; (*local*)
-
 val serializer_sml : Code_Target.serializer =
   Code_Target.parse_args (Scan.optional (Args.$$$ "no_signatures" >> K false) true
   >> (fn with_signatures => serialize_ml target_SML
--- a/src/Tools/Code/code_namespace.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Tools/Code/code_namespace.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -6,31 +6,46 @@
 
 signature CODE_NAMESPACE =
 sig
-  datatype 'a node =
+  val dest_name: string -> string * string
+  datatype ('a, 'b) node =
       Dummy
-    | Stmt of Code_Thingol.stmt
-    | Module of ('a * (string * 'a node) Graph.T);
+    | Stmt of 'a
+    | Module of ('b * (string * ('a, 'b) node) Graph.T);
   val hierarchical_program: (string -> string) -> { module_alias: string -> string option,
-    reserved: Name.context, empty_nsp: 'b, namify_module: string -> 'b -> string * 'b,
-    namify_stmt: Code_Thingol.stmt -> string -> 'b -> string * 'b,
-    cyclic_modules: bool, empty_data: 'a, memorize_data: string -> 'a -> 'a }
+    reserved: Name.context, empty_nsp: 'c, namify_module: string -> 'c -> string * 'c,
+    namify_stmt: Code_Thingol.stmt -> string -> 'c -> string * 'c,
+    cyclic_modules: bool, empty_data: 'b, memorize_data: string -> 'b -> 'b,
+    modify_stmts: (string * Code_Thingol.stmt) list -> 'a option list }
       -> Code_Thingol.program
       -> { deresolver: string list -> string -> string,
-           hierarchical_program: (string * 'a node) Graph.T }
+           hierarchical_program: (string * ('a, 'b) node) Graph.T }
 end;
 
 structure Code_Namespace : CODE_NAMESPACE =
 struct
 
-(* hierarchical program structure *)
+(** splitting names in module and base part **)
+
+val dest_name =
+  apfst Long_Name.implode o split_last o fst o split_last o Long_Name.explode;
+
+
+(** hierarchical program structure **)
 
-datatype 'a node =
+datatype ('a, 'b) node =
     Dummy
-  | Stmt of Code_Thingol.stmt
-  | Module of ('a * (string * 'a node) Graph.T);
+  | Stmt of 'a
+  | Module of ('b * (string * ('a, 'b) node) Graph.T);
+
+fun map_module_content f (Module content) = Module (f content);
+
+fun map_module [] = I
+  | map_module (name_fragment :: name_fragments) =
+      apsnd o Graph.map_node name_fragment o apsnd o map_module_content
+        o map_module name_fragments;
 
 fun hierarchical_program labelled_name { module_alias, reserved, empty_nsp,
-      namify_module, namify_stmt, cyclic_modules, empty_data, memorize_data } program =
+      namify_module, namify_stmt, cyclic_modules, empty_data, memorize_data, modify_stmts } program =
   let
 
     (* building module name hierarchy *)
@@ -38,18 +53,13 @@
      of SOME name' => Long_Name.explode name'
       | NONE => map (fn name => fst (yield_singleton Name.variants name reserved))
           (Long_Name.explode name);
-    val module_names = Graph.fold (insert (op =) o fst o Code_Printer.dest_name o fst) program [];
+    val module_names = Graph.fold (insert (op =) o fst o dest_name o fst) program [];
     val fragments_tab = fold (fn name => Symtab.update
       (name, alias_fragments name)) module_names Symtab.empty;
-    val dest_name = Code_Printer.dest_name #>> (the o Symtab.lookup fragments_tab);
+    val dest_name = dest_name #>> (the o Symtab.lookup fragments_tab);
 
     (* building empty module hierarchy *)
     val empty_module = (empty_data, Graph.empty);
-    fun map_module f (Module content) = Module (f content);
-    fun change_module [] = I
-      | change_module (name_fragment :: name_fragments) =
-          apsnd o Graph.map_node name_fragment o apsnd o map_module
-            o change_module name_fragments;
     fun ensure_module name_fragment (data, nodes) =
       if can (Graph.get_node nodes) name_fragment then (data, nodes)
       else (data,
@@ -57,7 +67,7 @@
     fun allocate_module [] = I
       | allocate_module (name_fragment :: name_fragments) =
           ensure_module name_fragment
-          #> (apsnd o Graph.map_node name_fragment o apsnd o map_module o allocate_module) name_fragments;
+          #> (apsnd o Graph.map_node name_fragment o apsnd o map_module_content o allocate_module) name_fragments;
     val empty_program = Symtab.fold (fn (_, fragments) => allocate_module fragments)
       fragments_tab empty_module;
 
@@ -66,8 +76,7 @@
       let
         val (name_fragments, base) = dest_name name;
       in
-        change_module name_fragments (fn (data, nodes) =>
-          (memorize_data name data, Graph.new_node (name, (base, Stmt stmt)) nodes))
+        (map_module name_fragments o apsnd) (Graph.new_node (name, (base, Stmt stmt)))
       end;
     fun add_dependency name name' =
       let
@@ -75,42 +84,47 @@
         val (name_fragments', base') = dest_name name';
         val (name_fragments_common, (diff, diff')) =
           chop_prefix (op =) (name_fragments, name_fragments');
-        val (is_module, dep) = if null diff then (false, (name, name'))
-          else (true, (hd diff, hd diff'))
+        val is_module = not (null diff andalso null diff');
+        val dep = pairself hd (diff @ [name], diff' @ [name']);
         val add_edge = if is_module andalso not cyclic_modules
           then (fn node => Graph.add_edge_acyclic dep node
             handle Graph.CYCLES _ => error ("Dependency "
               ^ quote name ^ " -> " ^ quote name'
               ^ " would result in module dependency cycle"))
           else Graph.add_edge dep
-      in (change_module name_fragments_common o apsnd) add_edge end;
+      in (map_module name_fragments_common o apsnd) add_edge end;
     val proto_program = empty_program
       |> Graph.fold (fn (name, (stmt, _)) => add_stmt name stmt) program
       |> Graph.fold (fn (name, (_, (_, names))) => fold (add_dependency name) names) program;
 
-    (* name declarations *)
+    (* name declarations, data and statement modifications *)
     fun make_declarations nsps (data, nodes) =
       let
         val (module_fragments, stmt_names) = List.partition
           (fn name_fragment => case Graph.get_node nodes name_fragment
             of (_, Module _) => true | _ => false) (Graph.keys nodes);
-        fun modify_stmt (Stmt (Code_Thingol.Datatypecons _)) = Dummy
-          | modify_stmt (Stmt (Code_Thingol.Classrel _)) = Dummy
-          | modify_stmt (Stmt (Code_Thingol.Classparam _)) = Dummy
-          | modify_stmt stmt = stmt;
-        fun declare namify modify name (nsps, nodes) =
+        fun declare namify name (nsps, nodes) =
           let
             val (base, node) = Graph.get_node nodes name;
             val (base', nsps') = namify node base nsps;
-            val nodes' = Graph.map_node name (K (base', modify node)) nodes;
+            val nodes' = Graph.map_node name (K (base', node)) nodes;
           in (nsps', nodes') end;
         val (nsps', nodes') = (nsps, nodes)
-          |> fold (declare (K namify_module) I) module_fragments
-          |> fold (declare (namify_stmt o (fn Stmt stmt => stmt)) modify_stmt) stmt_names;
-        val nodes'' = nodes'
-          |> fold (fn name_fragment => (Graph.map_node name_fragment
-              o apsnd o map_module) (make_declarations nsps')) module_fragments;
-      in (data, nodes'') end;
+          |> fold (declare (K namify_module)) module_fragments
+          |> fold (declare (namify_stmt o (fn Stmt stmt => stmt))) stmt_names;
+        fun zip_fillup xs ys = xs ~~ ys @ replicate (length xs - length ys) NONE;
+        fun select_names names = case filter (member (op =) stmt_names) names
+         of [] => NONE
+          | xs => SOME xs;
+        val modify_stmts' = AList.make (snd o Graph.get_node nodes)
+          #> split_list
+          ##> map (fn Stmt stmt => stmt)
+          #> (fn (names, stmts) => zip_fillup names (modify_stmts (names ~~ stmts)));
+        val stmtss' = (maps modify_stmts' o map_filter select_names o Graph.strong_conn) nodes;
+        val nodes'' = Graph.map (fn name => apsnd (fn Module content => Module (make_declarations nsps' content)
+            | _ => case AList.lookup (op =) stmtss' name of SOME (SOME stmt) => Stmt stmt | _ => Dummy)) nodes';
+        val data' = fold memorize_data stmt_names data;
+      in (data', nodes'') end;
     val (_, hierarchical_program) = make_declarations empty_nsp proto_program;
 
     (* deresolving *)
--- a/src/Tools/Code/code_printer.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Tools/Code/code_printer.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -25,8 +25,8 @@
   val semicolon: Pretty.T list -> Pretty.T
   val doublesemicolon: Pretty.T list -> Pretty.T
   val indent: int -> Pretty.T -> Pretty.T
-  val string_of_pretty: int -> Pretty.T -> string
-  val writeln_pretty: int -> Pretty.T -> unit
+  val markup_stmt: string -> Pretty.T -> Pretty.T
+  val format: string list -> int -> Pretty.T -> string
 
   val first_upper: string -> string
   val first_lower: string -> string
@@ -70,7 +70,6 @@
   val applify: string -> string -> ('a -> Pretty.T) -> fixity -> Pretty.T -> 'a list -> Pretty.T
   val tuplify: (fixity -> 'a -> Pretty.T) -> fixity -> 'a list -> Pretty.T option
 
-
   type tyco_syntax
   type simple_const_syntax
   type complex_const_syntax
@@ -93,10 +92,6 @@
   val gen_print_bind: (thm option -> var_ctxt -> fixity -> iterm -> Pretty.T)
     -> thm option -> fixity
     -> iterm -> var_ctxt -> Pretty.T * var_ctxt
-
-  val mk_name_module: Name.context -> string option -> (string -> string option)
-    -> 'a Graph.T -> string -> string
-  val dest_name: string -> string * string
 end;
 
 structure Code_Printer : CODE_PRINTER =
@@ -104,9 +99,16 @@
 
 open Code_Thingol;
 
+(** generic nonsense *)
+
 fun eqn_error (SOME thm) s = error (s ^ ",\nin equation " ^ Display.string_of_thm_without_context thm)
   | eqn_error NONE s = error s;
 
+val code_presentationN = "code_presentation";
+val stmt_nameN = "stmt_name";
+val _ = Markup.add_mode code_presentationN YXML.output_markup;
+
+
 (** assembling and printing text pieces **)
 
 infixr 5 @@;
@@ -125,8 +127,35 @@
 fun doublesemicolon ps = Pretty.block [concat ps, str ";;"];
 fun indent i = Print_Mode.setmp [] (Pretty.indent i);
 
-fun string_of_pretty width p = Print_Mode.setmp [] (Pretty.string_of_margin width) p ^ "\n";
-fun writeln_pretty width p = writeln (string_of_pretty width p);
+fun markup_stmt name = Print_Mode.setmp [code_presentationN]
+  (Pretty.mark (code_presentationN, [(stmt_nameN, name)]));
+
+fun filter_presentation presentation_names selected (XML.Elem ((name, attrs), xs)) =
+      implode (map (filter_presentation presentation_names
+        (selected orelse (name = code_presentationN
+          andalso member (op =) presentation_names (the (Properties.get attrs stmt_nameN))))) xs)
+  | filter_presentation presentation_names selected (XML.Text s) =
+      if selected then s else "";
+
+fun maps_string s f [] = ""
+  | maps_string s f (x :: xs) =
+      let
+        val s1 = f x;
+        val s2 = maps_string s f xs;
+      in if s1 = "" then s2
+        else if s2 = "" then s1
+        else s1 ^ s ^ s2
+      end;
+
+fun plain_text (XML.Elem (_, xs)) = implode (map plain_text xs)
+  | plain_text (XML.Text s) = s
+
+fun format presentation_names width =
+  Print_Mode.setmp [code_presentationN] (Pretty.string_of_margin width)
+  #> YXML.parse_body
+  #> (if null presentation_names then implode o map plain_text
+      else maps_string "\n\n" (filter_presentation presentation_names false))
+  #> suffix "\n";
 
 
 (** names and variable name contexts **)
@@ -375,28 +404,4 @@
 
 val parse_const_syntax = parse_syntax plain_const_syntax simple_const_syntax fst;
 
-
-(** module name spaces **)
-
-val dest_name =
-  apfst Long_Name.implode o split_last o fst o split_last o Long_Name.explode;
-
-fun mk_name_module reserved module_prefix module_alias program =
-  let
-    fun mk_alias name = case module_alias name
-     of SOME name' => name'
-      | NONE => name
-          |> Long_Name.explode
-          |> map (fn name => (the_single o fst) (Name.variants [name] reserved))
-          |> Long_Name.implode;
-    fun mk_prefix name = case module_prefix
-     of SOME module_prefix => Long_Name.append module_prefix name
-      | NONE => name;
-    val tab =
-      Symtab.empty
-      |> Graph.fold ((fn name => Symtab.default (name, (mk_alias #> mk_prefix) name))
-           o fst o dest_name o fst)
-             program
-  in the o Symtab.lookup tab end;
-
 end; (*struct*)
--- a/src/Tools/Code/code_scala.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Tools/Code/code_scala.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -193,8 +193,7 @@
                 str "match", str "{"], str "}")
               (map print_clause eqs)
           end;
-    val print_method = str o Library.enclose "`" "`" o space_implode "+"
-      o Long_Name.explode o deresolve_full;
+    val print_method = str o Library.enclose "`" "`" o deresolve_full;
     fun print_stmt (name, Code_Thingol.Fun (_, (((vs, ty), raw_eqs), _))) =
           print_def name (vs, ty) (filter (snd o snd) raw_eqs)
       | print_stmt (name, Code_Thingol.Datatype (_, (vs, cos))) =
@@ -318,21 +317,25 @@
         val implicits = filter (is_classinst o Graph.get_node program)
           (Graph.imm_succs program name);
       in union (op =) implicits end;
+    fun modify_stmt (_, Code_Thingol.Fun (_, (_, SOME _))) = NONE
+      | modify_stmt (_, Code_Thingol.Datatypecons _) = NONE
+      | modify_stmt (_, Code_Thingol.Classrel _) = NONE
+      | modify_stmt (_, Code_Thingol.Classparam _) = NONE
+      | modify_stmt (_, stmt) = SOME stmt;
   in
     Code_Namespace.hierarchical_program labelled_name { module_alias = module_alias, reserved = reserved,
       empty_nsp = ((reserved, reserved), reserved), namify_module = namify_module, namify_stmt = namify_stmt,
-      cyclic_modules = true, empty_data = [], memorize_data = memorize_implicits } program
+      cyclic_modules = true, empty_data = [], memorize_data = memorize_implicits, modify_stmts = map modify_stmt } program
   end;
 
 fun serialize_scala { labelled_name, reserved_syms, includes,
     module_alias, class_syntax, tyco_syntax, const_syntax, program,
-    names, presentation_names } =
+    names } =
   let
 
     (* build program *)
-    val reserved = fold (insert (op =) o fst) includes reserved_syms;
     val { deresolver, hierarchical_program = sca_program } =
-      scala_program_of_program labelled_name (Name.make_context reserved) module_alias program;
+      scala_program_of_program labelled_name (Name.make_context reserved_syms) module_alias program;
 
     (* print statements *)
     fun lookup_constr tyco constr = case Graph.get_node program tyco
@@ -352,7 +355,7 @@
      of Code_Thingol.Datatypecons (_, tyco) => null (lookup_constr tyco c)
       | _ => false;
     val print_stmt = print_scala_stmt labelled_name tyco_syntax const_syntax
-      (make_vars reserved) args_num is_singleton_constr;
+      (make_vars reserved_syms) args_num is_singleton_constr;
 
     (* print nodes *)
     fun print_module base implicit_ps p = Pretty.chunks2
@@ -364,23 +367,17 @@
       let
         val s = deresolver prefix_fragments implicit;
       in if length (Long_Name.explode s) = 1 then NONE else SOME (str s) end;
-    fun print_node _ (_, Dummy) = NONE
+    fun print_node _ (_, Code_Namespace.Dummy) = NONE
       | print_node prefix_fragments (name, Code_Namespace.Stmt stmt) =
-          if null presentation_names
-          orelse member (op =) presentation_names name
-          then SOME (print_stmt (deresolver prefix_fragments, deresolver []) (name, stmt))
-          else NONE
+          SOME (markup_stmt name (print_stmt (deresolver prefix_fragments, deresolver []) (name, stmt)))
       | print_node prefix_fragments (name_fragment, Code_Namespace.Module (implicits, nodes)) =
-          if null presentation_names
-          then
-            let
-              val prefix_fragments' = prefix_fragments @ [name_fragment];
-            in
-              Option.map (print_module name_fragment
-                (map_filter (print_implicit prefix_fragments') implicits))
-                  (print_nodes prefix_fragments' nodes)
-            end
-          else print_nodes [] nodes
+          let
+            val prefix_fragments' = prefix_fragments @ [name_fragment];
+          in
+            Option.map (print_module name_fragment
+              (map_filter (print_implicit prefix_fragments') implicits))
+                (print_nodes prefix_fragments' nodes)
+          end
     and print_nodes prefix_fragments nodes = let
         val ps = map_filter (fn name => print_node prefix_fragments (name,
           snd (Graph.get_node nodes name)))
@@ -388,12 +385,11 @@
       in if null ps then NONE else SOME (Pretty.chunks2 ps) end;
 
     (* serialization *)
-    val p_includes = if null presentation_names then map snd includes else [];
-    val p = Pretty.chunks2 (p_includes @ the_list (print_nodes [] sca_program));
-    fun write width NONE = writeln_pretty width
-      | write width (SOME p) = File.write p o string_of_pretty width;
+    val p = Pretty.chunks2 (map snd includes @ the_list (print_nodes [] sca_program));
+    fun write width NONE = writeln o format [] width
+      | write width (SOME p) = File.write p o format [] width;
   in
-    Code_Target.serialization write (rpair [] oo string_of_pretty) p
+    Code_Target.serialization write (rpair [] ooo format) p
   end;
 
 val serializer : Code_Target.serializer =
--- a/src/Tools/Code/code_target.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Tools/Code/code_target.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -8,6 +8,7 @@
 sig
   val cert_tyco: theory -> string -> string
   val read_tyco: theory -> string -> string
+  val read_const_exprs: theory -> string list -> string list
 
   val export_code_for: theory -> Path.T option -> string -> int option -> string -> Token.T list
     -> Code_Thingol.naming -> Code_Thingol.program -> string list -> unit
@@ -42,7 +43,7 @@
   type serialization
   val parse_args: 'a parser -> Token.T list -> 'a
   val serialization: (int -> Path.T option -> 'a -> unit)
-    -> (int -> 'a -> string * string option list)
+    -> (string list -> int -> 'a -> string * string option list)
     -> 'a -> serialization
   val set_default_code_width: int -> theory -> theory
 
@@ -72,12 +73,9 @@
 datatype destination = Export of Path.T option | Produce | Present of string list;
 type serialization = int -> destination -> (string * string option list) option;
 
-fun stmt_names_of_destination (Present stmt_names) = stmt_names
-  | stmt_names_of_destination _ = [];
-
 fun serialization output _ content width (Export some_path) = (output width some_path content; NONE)
-  | serialization _ string content width Produce = SOME (string width content)
-  | serialization _ string content width (Present _) = SOME (string width content);
+  | serialization _ string content width Produce = SOME (string [] width content)
+  | serialization _ string content width (Present stmt_names) = SOME (string stmt_names width content);
 
 fun export some_path f = (f (Export some_path); ());
 fun produce f = the (f Produce);
@@ -116,8 +114,7 @@
     tyco_syntax: string -> Code_Printer.tyco_syntax option,
     const_syntax: string -> Code_Printer.activated_const_syntax option,
     program: Code_Thingol.program,
-    names: string list,
-    presentation_names: string list }
+    names: string list }
   -> serialization;
 
 datatype description = Fundamental of { serializer: serializer,
@@ -308,7 +305,7 @@
 
 fun invoke_serializer thy abortable serializer literals reserved abs_includes 
     module_alias proto_class_syntax proto_instance_syntax proto_tyco_syntax proto_const_syntax
-    module_name args naming proto_program (names, presentation_names) =
+    module_name args naming proto_program names =
   let
     val (names_hidden, (class_syntax, tyco_syntax, const_syntax)) =
       activate_symbol_syntax thy literals naming
@@ -325,19 +322,15 @@
       tyco_syntax = Symtab.lookup tyco_syntax,
       const_syntax = Symtab.lookup const_syntax,
       program = program,
-      names = names,
-      presentation_names = presentation_names }
+      names = names }
   end;
 
-fun mount_serializer thy target some_width raw_module_name args naming proto_program names destination =
+fun mount_serializer thy target some_width module_name args naming proto_program names =
   let
     val (default_width, abortable, data, program) =
       activate_target_for thy target naming proto_program;
     val serializer = case the_description data
      of Fundamental seri => #serializer seri;
-    val presentation_names = stmt_names_of_destination destination;
-    val module_name = if null presentation_names
-      then raw_module_name else "Code";
     val reserved = the_reserved data;
     fun select_include names_all (name, (content, cs)) =
       if null cs then SOME (name, content)
@@ -354,7 +347,7 @@
   in
     invoke_serializer thy abortable serializer literals reserved
       includes module_alias class instance tyco const module_name args
-        naming program (names, presentation_names) width destination
+        naming program names width
   end;
 
 fun assert_module_name "" = error ("Empty module name not allowed.")
--- a/src/Tools/WWW_Find/find_theorems.ML	Thu Sep 02 20:29:12 2010 +0200
+++ b/src/Tools/WWW_Find/find_theorems.ML	Thu Sep 02 20:44:33 2010 +0200
@@ -143,9 +143,9 @@
 
 fun html_thm ctxt (n, (thmref, thm)) =
   let
-    val string_of_thm =
+    val output_thm =
       Output.output o Pretty.string_of_margin 100 o
-        setmp_CRITICAL show_question_marks false (Display.pretty_thm ctxt);
+        Display.pretty_thm (Config.put show_question_marks false ctxt);
   in
     tag' "tr" (class ("row" ^ Int.toString (n mod 2)))
       [
@@ -153,7 +153,7 @@
           [span' (sorry_class thm)
              [raw_text (Facts.string_of_ref thmref)]
           ],
-        tag' "td" (class "thm") [pre noid (string_of_thm thm)]
+        tag' "td" (class "thm") [pre noid (output_thm thm)]
       ]
   end;
 
@@ -236,7 +236,7 @@
   end;
 in
 
-val () = show_question_marks := false;
+val () = show_question_marks_default := false;
 val () = ScgiServer.register (find_theorems_url, SOME Mime.html, find_theorems);
 
 end;