merged -- with manual conflict resolution for src/HOL/SMT_Examples/SMT_Examples.certs2, src/HOL/SMT_Examples/SMT_Word_Examples.certs2, src/Doc/Prog_Prove/document/intro-isabelle.tex;
authorwenzelm
Sun, 10 Aug 2014 14:34:43 +0200
changeset 57882 38bf4de248a6
parent 57881 37920df63ab9 (diff)
parent 57880 47c092fd4b45 (current diff)
child 57883 d50aeb916a4b
merged -- with manual conflict resolution for src/HOL/SMT_Examples/SMT_Examples.certs2, src/HOL/SMT_Examples/SMT_Word_Examples.certs2, src/Doc/Prog_Prove/document/intro-isabelle.tex;
CONTRIBUTORS
NEWS
src/Doc/Prog_Prove/document/intro-isabelle.tex
src/HOL/Tools/BNF/bnf_fp_def_sugar.ML
src/HOL/Tools/BNF/bnf_fp_def_sugar_tactics.ML
src/HOL/Tools/Ctr_Sugar/ctr_sugar.ML
src/HOL/Tools/Sledgehammer/sledgehammer_util.ML
--- a/CONTRIBUTORS	Sun Aug 10 14:08:36 2014 +0200
+++ b/CONTRIBUTORS	Sun Aug 10 14:34:43 2014 +0200
@@ -3,6 +3,10 @@
 who is listed as an author in one of the source files of this Isabelle
 distribution.
 
+Contributions to this Isabelle version
+--------------------------------------
+
+
 Contributions to Isabelle2014
 -----------------------------
 
--- a/NEWS	Sun Aug 10 14:08:36 2014 +0200
+++ b/NEWS	Sun Aug 10 14:34:43 2014 +0200
@@ -1,6 +1,18 @@
 Isabelle NEWS -- history of user-relevant changes
 =================================================
 
+New in this Isabelle version
+----------------------------
+
+*** HOL ***
+
+* Sledgehammer:
+  - Minimization is now always enabled by default.
+    Removed subcommand:
+      min
+
+
+
 New in Isabelle2014 (August 2014)
 ---------------------------------
 
--- a/src/Doc/Datatypes/Datatypes.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/Doc/Datatypes/Datatypes.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -956,14 +956,13 @@
 \item[@{text "t."}\hthm{induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n, induct t]"}\rm:] ~ \\
 @{thm list.induct[no_vars]}
 
-\item[@{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n]"}\rm:] ~ \\
-Given $m > 1$ mutually recursive datatypes, this induction rule can be used to
-prove $m$ properties simultaneously.
-
 \item[@{text "t."}\hthm{rel_induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n, induct pred]"}\rm:] ~ \\
 @{thm list.rel_induct[no_vars]}
 
-\item[@{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{rel_induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n]"}\rm:] ~ \\
+\item[\begin{tabular}{@ {}l@ {}}
+  @{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n]"}\rm: \\
+  @{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{rel_induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n]"}\rm: \\
+\end{tabular}] ~ \\
 Given $m > 1$ mutually recursive datatypes, this induction rule can be used to
 prove $m$ properties simultaneously.
 
@@ -1775,6 +1774,13 @@
 Given $m > 1$ mutually corecursive codatatypes, these coinduction rules can be
 used to prove $m$ properties simultaneously.
 
+\item[\begin{tabular}{@ {}l@ {}}
+  @{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{set_induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n,"} \\
+  \phantom{@{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{set_induct} @{text "["}}@{text "induct set: set\<^sub>j_t\<^sub>1, \<dots>, induct set: set\<^sub>j_t\<^sub>m]"}\rm: \\
+\end{tabular}] ~ \\
+@{thm llist.set_induct[no_vars]} \\
+If $m = 1$, the attribute @{text "[consumes 1]"} is generated as well.
+
 \item[@{text "t."}\hthm{corec}\rm:] ~ \\
 @{thm llist.corec(1)[no_vars]} \\
 @{thm llist.corec(2)[no_vars]}
--- a/src/Doc/Main/document/root.tex	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/Doc/Main/document/root.tex	Sun Aug 10 14:34:43 2014 +0200
@@ -24,6 +24,8 @@
 
 \parindent 0pt\parskip 0.5ex
 
+\renewcommand{\isacharunderscore}{\_}
+
 \usepackage{supertabular}
 
 \begin{document}
--- a/src/Doc/Prog_Prove/Basics.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/Doc/Prog_Prove/Basics.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -73,10 +73,9 @@
 called \concept{type inference}.  Despite type inference, it is sometimes
 necessary to attach an explicit \concept{type constraint} (or \concept{type
 annotation}) to a variable or term.  The syntax is @{text "t :: \<tau>"} as in
-\mbox{\noquotes{@{prop[source] "m < (n::nat)"}}}. Type constraints may be
+\mbox{\noquotes{@{term[source] "m + (n::nat)"}}}. Type constraints may be
 needed to
-disambiguate terms involving overloaded functions such as @{text "+"}, @{text
-"*"} and @{text"\<le>"}.
+disambiguate terms involving overloaded functions such as @{text "+"}.
 
 Finally there are the universal quantifier @{text"\<And>"}\index{$4@\isasymAnd} and the implication
 @{text"\<Longrightarrow>"}\index{$3@\isasymLongrightarrow}. They are part of the Isabelle framework, not the logic
@@ -100,7 +99,7 @@
 
 Roughly speaking, a \concept{theory} is a named collection of types,
 functions, and theorems, much like a module in a programming language.
-All the Isabelle text that you ever type needs to go into a theory.
+All Isabelle text needs to go into a theory.
 The general format of a theory @{text T} is
 \begin{quote}
 \indexed{\isacom{theory}}{theory} @{text T}\\
--- a/src/Doc/Prog_Prove/Bool_nat_list.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/Doc/Prog_Prove/Bool_nat_list.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -213,6 +213,7 @@
 \input{MyList.thy}\end{alltt}
 \caption{A Theory of Lists}
 \label{fig:MyList}
+\index{comment}
 \end{figure}
 
 \subsubsection{Structural Induction for Lists}
--- a/src/Doc/Prog_Prove/MyList.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/Doc/Prog_Prove/MyList.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -14,4 +14,6 @@
 
 value "rev(Cons True (Cons False Nil))"
 
+(* a comment *)
+
 end
--- a/src/Doc/Prog_Prove/Types_and_funs.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/Doc/Prog_Prove/Types_and_funs.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -93,8 +93,9 @@
 text{*
 Note that @{text"\<tau>\<^sub>1 * \<tau>\<^sub>2"} is the type of pairs, also written @{text"\<tau>\<^sub>1 \<times> \<tau>\<^sub>2"}.
 Pairs can be taken apart either by pattern matching (as above) or with the
-projection functions @{const fst} and @{const snd}: @{thm fst_conv} and @{thm snd_conv}. Tuples are simulated by pairs nested to the right: @{term"(a,b,c)"}
-abbreviates @{text"(a, (b, c))"} and @{text "\<tau>\<^sub>1 \<times> \<tau>\<^sub>2 \<times> \<tau>\<^sub>3"} abbreviates
+projection functions @{const fst} and @{const snd}: @{thm fst_conv[of x y]} and @{thm snd_conv[of x y]}.
+Tuples are simulated by pairs nested to the right: @{term"(a,b,c)"}
+is short for @{text"(a, (b, c))"} and @{text "\<tau>\<^sub>1 \<times> \<tau>\<^sub>2 \<times> \<tau>\<^sub>3"} is short for
 @{text "\<tau>\<^sub>1 \<times> (\<tau>\<^sub>2 \<times> \<tau>\<^sub>3)"}.
 
 \subsection{Definitions}
@@ -388,7 +389,7 @@
 \begin{array}{r@ {}c@ {}l@ {\quad}l}
 @{text"(0 + Suc 0"} & \leq & @{text"Suc 0 + x)"}  & \stackrel{(1)}{=} \\
 @{text"(Suc 0"}     & \leq & @{text"Suc 0 + x)"}  & \stackrel{(2)}{=} \\
-@{text"(Suc 0"}     & \leq & @{text"Suc (0 + x)"} & \stackrel{(3)}{=} \\
+@{text"(Suc 0"}     & \leq & @{text"Suc (0 + x))"} & \stackrel{(3)}{=} \\
 @{text"(0"}         & \leq & @{text"0 + x)"}      & \stackrel{(4)}{=} \\[1ex]
  & @{const True}
 \end{array}
--- a/src/Doc/Prog_Prove/document/intro-isabelle.tex	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/Doc/Prog_Prove/document/intro-isabelle.tex	Sun Aug 10 14:34:43 2014 +0200
@@ -82,13 +82,13 @@
 \emph{Programming and Proving in Isabelle/HOL} constitutes part~I of
 \href{http://www.concrete-semantics.org}{Concrete Semantics}.  The web
 pages for \href{http://www.concrete-semantics.org}{Concrete Semantics}
-also provide a set of \LaTeX-based slides for teaching \emph{Programming and
-Proving in Isabelle/HOL}.
+also provide a set of \LaTeX-based slides and Isabelle demo files
+for teaching \emph{Programming and Proving in Isabelle/HOL}.
 \fi
 
 \ifsem\else
 \paragraph{Acknowledgements}
 I wish to thank the following people for their comments on this document:
-Florian Haftmann, Ren\'{e} Thiemann, Sean Seefried, Christian Sternagel
-and Carl Witty.
+Florian Haftmann, Peter Johnson, Ren\'{e} Thiemann, Sean Seefried,
+Christian Sternagel and Carl Witty.
 \fi
\ No newline at end of file
--- a/src/Doc/Sledgehammer/document/root.tex	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/Doc/Sledgehammer/document/root.tex	Sun Aug 10 14:34:43 2014 +0200
@@ -447,36 +447,15 @@
 \point{Why does Metis fail to reconstruct the proof?}
 
 There are many reasons. If Metis runs seemingly forever, that is a sign that the
-proof is too difficult for it. Metis's search is complete, so it should
-eventually find it, but that's little consolation. There are several possible
-solutions:
-
-\begin{enum}
-\item[\labelitemi] Try the \textit{isar\_proofs} option (\S\ref{output-format}) to
-obtain a step-by-step Isar proof. Since the steps are fairly small, \textit{metis}
-and the other Isabelle proof methods are more likely to be able to replay them.
-
-\item[\labelitemi] Try the \textit{smt2} proof method instead of \textit{metis}.
-It is usually stronger, but you need to either have Z3 available to replay the
-proofs, trust the SMT solver, or use certificates. See the documentation in the
-\textit{SMT2} theory (\texttt{\$ISABELLE\_HOME/src/HOL/SMT2.thy}) for details.
-
-\item[\labelitemi] Try the \textit{blast} or \textit{auto} proof methods, passing
-the necessary facts via \textbf{unfolding}, \textbf{using}, \textit{intro}{:},
-\textit{elim}{:}, \textit{dest}{:}, or \textit{simp}{:}, as appropriate.
-\end{enum}
+proof is too difficult for it. Metis's search is complete for first-order logic
+with equality, so if the proof was found by an ATP such as E, SPASS, or Vampire,
+Metis should eventually find it, but that's little consolation.
 
 In some rare cases, \textit{metis} fails fairly quickly, and you get the error
-message
-
-\prew
-\slshape
-One-line proof reconstruction failed.
-\postw
-
-This message indicates that Sledgehammer determined that the goal is provable,
-but the proof is, for technical reasons, beyond \textit{metis}'s power. You can
-then try again with the \textit{strict} option (\S\ref{problem-encoding}).
+message ``One-line proof reconstruction failed.'' This indicates that
+Sledgehammer determined that the goal is provable, but the proof is, for
+technical reasons, beyond \textit{metis}'s power. You can then try again with
+the \textit{strict} option (\S\ref{problem-encoding}).
 
 If the goal is actually unprovable and you did not specify an unsound encoding
 using \textit{type\_enc} (\S\ref{problem-encoding}), this is a bug, and you are
@@ -519,7 +498,7 @@
 generated by Sledgehammer instead of \textit{metis} if the proof obviously
 requires type information or if \textit{metis} failed when Sledgehammer
 preplayed the proof. (By default, Sledgehammer tries to run \textit{metis} with
-various sets of option for up to 2~seconds each time to ensure that the generated
+various sets of option for up to 1~second each time to ensure that the generated
 one-line proofs actually work and to display timing information. This can be
 configured using the \textit{preplay\_timeout} and \textit{dont\_preplay}
 options (\S\ref{timeouts}).)
@@ -554,26 +533,15 @@
 \point{Are generated proofs minimal?}
 
 Automatic provers frequently use many more facts than are necessary.
-Sledgehammer inclues a minimization tool that takes a set of facts returned by a
-given prover and repeatedly calls the same prover, \textit{metis}, or
-\textit{smt2} with subsets of those axioms in order to find a minimal set.
-Reducing the number of axioms typically improves Metis's speed and success rate,
-while also removing superfluous clutter from the proof scripts.
+Sledgehammer includes a minimization tool that takes a set of facts returned by
+a given prover and repeatedly calls a prover or proof method with subsets of
+those facts to find a minimal set. Reducing the number of facts typically helps
+reconstruction, while also removing superfluous clutter from the proof scripts.
 
 In earlier versions of Sledgehammer, generated proofs were systematically
 accompanied by a suggestion to invoke the minimization tool. This step is now
-performed implicitly if it can be done in a reasonable amount of time (something
-that can be guessed from the number of facts in the original proof and the time
-it took to find or preplay it).
-
-In addition, some provers do not provide proofs or sometimes produce incomplete
-proofs. The minimizer is then invoked to find out which facts are actually needed
-from the (large) set of facts that was initially given to the prover. Finally,
-if a prover returns a proof with lots of facts, the minimizer is invoked
-automatically since Metis would be unlikely to re-find the proof.
-%
-Automatic minimization can be forced or disabled using the \textit{minimize}
-option (\S\ref{mode-of-operation}).
+performed by default but can be disabled using the \textit{minimize} option
+(\S\ref{mode-of-operation}).
 
 \point{A strange error occurred---what should I do?}
 
@@ -623,10 +591,6 @@
 \item[\labelitemi] \textbf{\textit{run} (the default):} Runs Sledgehammer on
 subgoal number \qty{num} (1 by default), with the given options and facts.
 
-\item[\labelitemi] \textbf{\textit{min}:} Attempts to minimize the facts
-specified in the \qty{facts\_override} argument to obtain a simpler proof
-involving fewer facts. The options and goal number are as for \textit{run}.
-
 \item[\labelitemi] \textbf{\textit{messages}:} Redisplays recent messages issued
 by Sledgehammer. This allows you to examine results that might have been lost
 due to Sledgehammer's asynchronous nature. The \qty{num} argument specifies a
@@ -973,16 +937,6 @@
 SPASS, and Vampire for 5~seconds yields a similar success rate to running the
 most effective of these for 120~seconds \cite{boehme-nipkow-2010}.
 
-In addition to the local and remote provers, the Isabelle proof methods
-\textit{metis} and \textit{smt2} can be specified as \textbf{\textit{metis}}
-and \textbf{\textit{smt}}, respectively. They are generally not recommended
-for proof search but occasionally arise in Sledgehammer-generated
-minimization commands (e.g.,
-``\textbf{sledgehammer} \textit{min} [\textit{prover} = \textit{metis}]'').
-
-For the \textit{min} subcommand, the default prover is \textit{metis}. If
-several provers are set, the first one is used.
-
 \opnodefault{prover}{string}
 Alias for \textit{provers}.
 
@@ -1008,12 +962,9 @@
 \nopagebreak
 {\small See also \textit{verbose} (\S\ref{output-format}).}
 
-\opsmart{minimize}{dont\_minimize}
+\optrue{minimize}{dont\_minimize}
 Specifies whether the minimization tool should be invoked automatically after
-proof search. By default, automatic minimization takes place only if
-it can be done in a reasonable amount of time (as determined by
-the number of facts in the original proof and the time it took to find or
-preplay it) or the proof involves an unreasonably large number of facts.
+proof search.
 
 \nopagebreak
 {\small See also \textit{preplay\_timeout} (\S\ref{timeouts})
@@ -1321,13 +1272,16 @@
 one-line proofs. If the option is set to \textit{smart} (the default), Isar
 proofs are only generated when no working one-line proof is available.
 
-\opdefault{compress}{int}{\upshape 10}
+\opdefault{compress}{int}{smart}
 Specifies the granularity of the generated Isar proofs if \textit{isar\_proofs}
 is explicitly enabled. A value of $n$ indicates that each Isar proof step should
-correspond to a group of up to $n$ consecutive proof steps in the ATP proof.
+correspond to a group of up to $n$ consecutive proof steps in the ATP proof. If
+the option is set to \textit{smart} (the default), the compression factor is 10
+if the \textit{isar\_proofs} option is explicitly enabled; otherwise, it is
+$\infty$.
 
 \optrueonly{dont\_compress}
-Alias for ``\textit{compress} = 0''.
+Alias for ``\textit{compress} = 1''.
 
 \optrue{try0}{dont\_try0}
 Specifies whether standard proof methods such as \textit{auto} and
@@ -1335,8 +1289,8 @@
 The collection of methods is roughly the same as for the \textbf{try0} command.
 
 \opsmart{smt\_proofs}{no\_smt\_proofs}
-Specifies whether the \textit{smt2} proof method should be tried as an
-alternative to \textit{metis}.  If the option is set to \textit{smart} (the
+Specifies whether the \textit{smt2} proof method should be tried in addition to
+Isabelle's other proof methods. If the option is set to \textit{smart} (the
 default), the \textit{smt2} method is used for one-line proofs but not in Isar
 proofs.
 \end{enum}
@@ -1373,7 +1327,7 @@
 Specifies the maximum number of seconds that the automatic provers should spend
 searching for a proof. This excludes problem preparation and is a soft limit.
 
-\opdefault{preplay\_timeout}{float}{\upshape 2}
+\opdefault{preplay\_timeout}{float}{\upshape 1}
 Specifies the maximum number of seconds that \textit{metis} or other proof
 methods should spend trying to ``preplay'' the found proof. If this option
 is set to 0, no preplaying takes place, and no timing information is displayed
--- a/src/HOL/ATP.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/ATP.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -15,6 +15,8 @@
 ML_file "Tools/ATP/atp_problem.ML"
 ML_file "Tools/ATP/atp_proof.ML"
 ML_file "Tools/ATP/atp_proof_redirect.ML"
+ML_file "Tools/ATP/atp_satallax.ML"
+
 
 subsection {* Higher-order reasoning helpers *}
 
--- a/src/HOL/BNF_Comp.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/BNF_Comp.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -1,6 +1,7 @@
 (*  Title:      HOL/BNF_Comp.thy
     Author:     Dmitriy Traytel, TU Muenchen
-    Copyright   2012
+    Author:     Jasmin Blanchette, TU Muenchen
+    Copyright   2012, 2013, 2014
 
 Composition of bounded natural functors.
 *)
--- a/src/HOL/BNF_Def.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/BNF_Def.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -1,7 +1,7 @@
 (*  Title:      HOL/BNF_Def.thy
     Author:     Dmitriy Traytel, TU Muenchen
     Author:     Jasmin Blanchette, TU Muenchen
-    Copyright   2012
+    Copyright   2012, 2013, 2014
 
 Definition of bounded natural functors.
 *)
@@ -159,6 +159,11 @@
 "case_sum f g \<circ> Inr = g"
 by auto
 
+lemma map_sum_o_inj:
+"map_sum f g o Inl = Inl o f"
+"map_sum f g o Inr = Inr o g"
+by auto
+
 lemma card_order_csum_cone_cexp_def:
   "card_order r \<Longrightarrow> ( |A1| +c cone) ^c r = |Func UNIV (Inl ` A1 \<union> {Inr ()})|"
   unfolding cexp_def cone_def Field_csum Field_card_of by (auto dest: Field_card_order)
--- a/src/HOL/BNF_Examples/Misc_Datatype.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/BNF_Examples/Misc_Datatype.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -184,4 +184,45 @@
 datatype_new d5'' = is_D: D nat | E int
 datatype_new d5''' = is_D: D nat | is_E: E int
 
+datatype_compat simple
+datatype_compat simple'
+datatype_compat simple''
+datatype_compat mylist
+datatype_compat some_passive
+datatype_compat I1 I2
+datatype_compat tree forest
+datatype_compat tree' branch
+datatype_compat bin_rose_tree
+datatype_compat exp trm factor
+datatype_compat ftree
+datatype_compat nofail1
+datatype_compat kk1 kk2 kk3
+datatype_compat t1 t2 t3
+datatype_compat t1' t2' t3'
+datatype_compat k1 k2 k3 k4
+datatype_compat tt1 tt2 tt3 tt4
+datatype_compat deadbar
+datatype_compat deadbar_option
+datatype_compat bar
+datatype_compat foo
+datatype_compat deadfoo
+datatype_compat dead_foo
+datatype_compat use_dead_foo
+datatype_compat d1
+datatype_compat d1'
+datatype_compat d2
+datatype_compat d2'
+datatype_compat d3
+datatype_compat d3'
+datatype_compat d3''
+datatype_compat d3'''
+datatype_compat d4
+datatype_compat d4'
+datatype_compat d4''
+datatype_compat d4'''
+datatype_compat d5
+datatype_compat d5'
+datatype_compat d5''
+datatype_compat d5'''
+
 end
--- a/src/HOL/BNF_FP_Base.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/BNF_FP_Base.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -2,7 +2,8 @@
     Author:     Lorenz Panny, TU Muenchen
     Author:     Dmitriy Traytel, TU Muenchen
     Author:     Jasmin Blanchette, TU Muenchen
-    Copyright   2012, 2013
+    Author:     Martin Desharnais, TU Muenchen
+    Copyright   2012, 2013, 2014
 
 Shared fixed point operations on bounded natural functors.
 *)
--- a/src/HOL/BNF_GFP.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/BNF_GFP.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -2,7 +2,7 @@
     Author:     Dmitriy Traytel, TU Muenchen
     Author:     Lorenz Panny, TU Muenchen
     Author:     Jasmin Blanchette, TU Muenchen
-    Copyright   2012, 2013
+    Copyright   2012, 2013, 2014
 
 Greatest fixed point operation on bounded natural functors.
 *)
--- a/src/HOL/BNF_LFP.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/BNF_LFP.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -1,9 +1,8 @@
-
 (*  Title:      HOL/BNF_LFP.thy
     Author:     Dmitriy Traytel, TU Muenchen
     Author:     Lorenz Panny, TU Muenchen
     Author:     Jasmin Blanchette, TU Muenchen
-    Copyright   2012, 2013
+    Copyright   2012, 2013, 2014
 
 Least fixed point operation on bounded natural functors.
 *)
--- a/src/HOL/Decision_Procs/Cooper.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Decision_Procs/Cooper.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -2052,7 +2052,7 @@
     let ?v = "Neg e"
     have vb: "?v \<in> set (\<beta> (Gt (CN 0 c e)))"
       by simp
-    from 7(5)[simplified simp_thms Inum.simps \<beta>.simps set_simps bex_simps numbound0_I[OF bn,where b="a" and b'="x" and bs="bs"]]
+    from 7(5)[simplified simp_thms Inum.simps \<beta>.simps list.set bex_simps numbound0_I[OF bn,where b="a" and b'="x" and bs="bs"]]
     have nob: "\<not> (\<exists>j\<in> {1 ..d}. x = - ?e + j)"
       by auto
     from H p have "x + ?e > 0 \<and> x + ?e \<le> d"
@@ -2085,7 +2085,7 @@
     let ?v = "Sub (C -1) e"
     have vb: "?v \<in> set (\<beta> (Ge (CN 0 c e)))"
       by simp
-    from 8(5)[simplified simp_thms Inum.simps \<beta>.simps set_simps bex_simps numbound0_I[OF bn,where b="a" and b'="x" and bs="bs"]]
+    from 8(5)[simplified simp_thms Inum.simps \<beta>.simps list.set bex_simps numbound0_I[OF bn,where b="a" and b'="x" and bs="bs"]]
     have nob: "\<not> (\<exists>j\<in> {1 ..d}. x =  - ?e - 1 + j)"
       by auto
     from H p have "x + ?e \<ge> 0 \<and> x + ?e < d"
--- a/src/HOL/Decision_Procs/MIR.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Decision_Procs/MIR.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -2612,7 +2612,7 @@
     {assume H: "\<not> real (x-d) + ?e > 0" 
       let ?v="Neg e"
       have vb: "?v \<in> set (\<beta> (Gt (CN 0 c e)))" by simp
-      from 7(5)[simplified simp_thms Inum.simps \<beta>.simps set_simps bex_simps numbound0_I[OF bn,where b="a" and b'="real x" and bs="bs"]] 
+      from 7(5)[simplified simp_thms Inum.simps \<beta>.simps list.set bex_simps numbound0_I[OF bn,where b="a" and b'="real x" and bs="bs"]] 
       have nob: "\<not> (\<exists> j\<in> {1 ..d}. real x =  - ?e + real j)" by auto 
       from H p have "real x + ?e > 0 \<and> real x + ?e \<le> real d" by (simp add: c1)
       hence "real (x + floor ?e) > real (0::int) \<and> real (x + floor ?e) \<le> real d"
@@ -2638,7 +2638,7 @@
     {assume H: "\<not> real (x-d) + ?e \<ge> 0" 
       let ?v="Sub (C -1) e"
       have vb: "?v \<in> set (\<beta> (Ge (CN 0 c e)))" by simp
-      from 8(5)[simplified simp_thms Inum.simps \<beta>.simps set_simps bex_simps numbound0_I[OF bn,where b="a" and b'="real x" and bs="bs"]] 
+      from 8(5)[simplified simp_thms Inum.simps \<beta>.simps list.set bex_simps numbound0_I[OF bn,where b="a" and b'="real x" and bs="bs"]] 
       have nob: "\<not> (\<exists> j\<in> {1 ..d}. real x =  - ?e - 1 + real j)" by auto 
       from H p have "real x + ?e \<ge> 0 \<and> real x + ?e < real d" by (simp add: c1)
       hence "real (x + floor ?e) \<ge> real (0::int) \<and> real (x + floor ?e) < real d"
@@ -3394,7 +3394,7 @@
     ((UNION {(p,n,s). (p,n,s) \<in> ?SS a \<and> n=0} (\<lambda> (p,n,s). {(p,0,Floor s)})) Un 
     (UNION {(p,n,s). (p,n,s) \<in> ?SS a \<and> n>0} (\<lambda> (p,n,s). (?f(p,n,s)) ` {0 .. n})) Un 
     (UNION {(p,n,s). (p,n,s) \<in> ?SS a \<and> n<0} (\<lambda> (p,n,s). (?f(p,n,s)) ` {n .. 0})))"
-    by (simp only: set_map set_upto set_simps)
+    by (simp only: set_map set_upto list.set)
   also have "\<dots> =   
     ((UNION {(p,n,s). (p,n,s) \<in> ?SS a \<and> n=0} (\<lambda> (p,n,s). {(p,0,Floor s)})) Un 
     (UNION {(p,n,s). (p,n,s) \<in> ?SS a \<and> n>0} (\<lambda> (p,n,s). {?f(p,n,s) j| j. j\<in> {0 .. n}})) Un 
@@ -3548,7 +3548,7 @@
     ((UNION {(p,n,s). (p,n,s) \<in> ?SS a \<and> n=0} (\<lambda> (p,n,s). {(p,0,Floor s)})) Un 
     (UNION {(p,n,s). (p,n,s) \<in> ?SS a \<and> n>0} (\<lambda> (p,n,s). (?f(p,n,s)) ` {0 .. n})) Un 
     (UNION {(p,n,s). (p,n,s) \<in> ?SS a \<and> n<0} (\<lambda> (p,n,s). (?f(p,n,s)) ` {n .. 0})))"
-    by (simp only: set_map set_upto set_simps)
+    by (simp only: set_map set_upto list.set)
   also have "\<dots> =   
     ((UNION {(p,n,s). (p,n,s) \<in> ?SS a \<and> n=0} (\<lambda> (p,n,s). {(p,0,Floor s)})) Un 
     (UNION {(p,n,s). (p,n,s) \<in> ?SS a \<and> n>0} (\<lambda> (p,n,s). {?f(p,n,s) j| j. j\<in> {0 .. n}})) Un 
--- a/src/HOL/Enum.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Enum.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -537,6 +537,24 @@
 
 end
 
+instantiation finite_1 :: complete_lattice
+begin
+
+definition [simp]: "Inf = (\<lambda>_. a\<^sub>1)"
+definition [simp]: "Sup = (\<lambda>_. a\<^sub>1)"
+definition [simp]: "bot = a\<^sub>1"
+definition [simp]: "top = a\<^sub>1"
+definition [simp]: "inf = (\<lambda>_ _. a\<^sub>1)"
+definition [simp]: "sup = (\<lambda>_ _. a\<^sub>1)"
+
+instance by intro_classes(simp_all add: less_eq_finite_1_def)
+end
+
+instance finite_1 :: complete_distrib_lattice
+by intro_classes(simp_all add: INF_def SUP_def)
+
+instance finite_1 :: complete_linorder ..
+
 hide_const (open) a\<^sub>1
 
 datatype finite_2 = a\<^sub>1 | a\<^sub>2
@@ -584,6 +602,42 @@
 
 end
 
+instantiation finite_2 :: complete_lattice
+begin
+
+definition "\<Sqinter>A = (if a\<^sub>1 \<in> A then a\<^sub>1 else a\<^sub>2)"
+definition "\<Squnion>A = (if a\<^sub>2 \<in> A then a\<^sub>2 else a\<^sub>1)"
+definition [simp]: "bot = a\<^sub>1"
+definition [simp]: "top = a\<^sub>2"
+definition "x \<sqinter> y = (if x = a\<^sub>1 \<or> y = a\<^sub>1 then a\<^sub>1 else a\<^sub>2)"
+definition "x \<squnion> y = (if x = a\<^sub>2 \<or> y = a\<^sub>2 then a\<^sub>2 else a\<^sub>1)"
+
+lemma neq_finite_2_a\<^sub>1_iff [simp]: "x \<noteq> a\<^sub>1 \<longleftrightarrow> x = a\<^sub>2"
+by(cases x) simp_all
+
+lemma neq_finite_2_a\<^sub>1_iff' [simp]: "a\<^sub>1 \<noteq> x \<longleftrightarrow> x = a\<^sub>2"
+by(cases x) simp_all
+
+lemma neq_finite_2_a\<^sub>2_iff [simp]: "x \<noteq> a\<^sub>2 \<longleftrightarrow> x = a\<^sub>1"
+by(cases x) simp_all
+
+lemma neq_finite_2_a\<^sub>2_iff' [simp]: "a\<^sub>2 \<noteq> x \<longleftrightarrow> x = a\<^sub>1"
+by(cases x) simp_all
+
+instance
+proof
+  fix x :: finite_2 and A
+  assume "x \<in> A"
+  then show "\<Sqinter>A \<le> x" "x \<le> \<Squnion>A"
+    by(case_tac [!] x)(auto simp add: less_eq_finite_2_def less_finite_2_def Inf_finite_2_def Sup_finite_2_def)
+qed(auto simp add: less_eq_finite_2_def less_finite_2_def inf_finite_2_def sup_finite_2_def Inf_finite_2_def Sup_finite_2_def)
+end
+
+instance finite_2 :: complete_distrib_lattice
+by(intro_classes)(auto simp add: INF_def SUP_def sup_finite_2_def inf_finite_2_def Inf_finite_2_def Sup_finite_2_def)
+
+instance finite_2 :: complete_linorder ..
+
 hide_const (open) a\<^sub>1 a\<^sub>2
 
 datatype finite_3 = a\<^sub>1 | a\<^sub>2 | a\<^sub>3
@@ -629,6 +683,53 @@
 
 end
 
+instantiation finite_3 :: complete_lattice
+begin
+
+definition "\<Sqinter>A = (if a\<^sub>1 \<in> A then a\<^sub>1 else if a\<^sub>2 \<in> A then a\<^sub>2 else a\<^sub>3)"
+definition "\<Squnion>A = (if a\<^sub>3 \<in> A then a\<^sub>3 else if a\<^sub>2 \<in> A then a\<^sub>2 else a\<^sub>1)"
+definition [simp]: "bot = a\<^sub>1"
+definition [simp]: "top = a\<^sub>3"
+definition [simp]: "inf = (min :: finite_3 \<Rightarrow> _)"
+definition [simp]: "sup = (max :: finite_3 \<Rightarrow> _)"
+
+instance
+proof
+  fix x :: finite_3 and A
+  assume "x \<in> A"
+  then show "\<Sqinter>A \<le> x" "x \<le> \<Squnion>A"
+    by(case_tac [!] x)(auto simp add: Inf_finite_3_def Sup_finite_3_def less_eq_finite_3_def less_finite_3_def)
+next
+  fix A and z :: finite_3
+  assume "\<And>x. x \<in> A \<Longrightarrow> z \<le> x"
+  then show "z \<le> \<Sqinter>A"
+    by(cases z)(auto simp add: Inf_finite_3_def less_eq_finite_3_def less_finite_3_def)
+next
+  fix A and z :: finite_3
+  assume *: "\<And>x. x \<in> A \<Longrightarrow> x \<le> z"
+  show "\<Squnion>A \<le> z"
+    by(auto simp add: Sup_finite_3_def less_eq_finite_3_def less_finite_3_def dest: *)
+qed(auto simp add: Inf_finite_3_def Sup_finite_3_def)
+end
+
+instance finite_3 :: complete_distrib_lattice
+proof
+  fix a :: finite_3 and B
+  show "a \<squnion> \<Sqinter>B = (\<Sqinter>b\<in>B. a \<squnion> b)"
+  proof(cases a "\<Sqinter>B" rule: finite_3.exhaust[case_product finite_3.exhaust])
+    case a\<^sub>2_a\<^sub>3
+    then have "\<And>x. x \<in> B \<Longrightarrow> x = a\<^sub>3"
+      by(case_tac x)(auto simp add: Inf_finite_3_def split: split_if_asm)
+    then show ?thesis using a\<^sub>2_a\<^sub>3
+      by(auto simp add: INF_def Inf_finite_3_def max_def less_eq_finite_3_def less_finite_3_def split: split_if_asm)
+  qed(auto simp add: INF_def Inf_finite_3_def max_def less_finite_3_def less_eq_finite_3_def split: split_if_asm)
+  show "a \<sqinter> \<Squnion>B = (\<Squnion>b\<in>B. a \<sqinter> b)"
+    by(cases a "\<Squnion>B" rule: finite_3.exhaust[case_product finite_3.exhaust])
+      (auto simp add: SUP_def Sup_finite_3_def min_def less_finite_3_def less_eq_finite_3_def split: split_if_asm)
+qed
+
+instance finite_3 :: complete_linorder ..
+
 hide_const (open) a\<^sub>1 a\<^sub>2 a\<^sub>3
 
 datatype finite_4 = a\<^sub>1 | a\<^sub>2 | a\<^sub>3 | a\<^sub>4
@@ -659,6 +760,69 @@
 
 end
 
+instantiation finite_4 :: complete_lattice begin
+
+text {* @{term a\<^sub>1} $<$ @{term a\<^sub>2},@{term a\<^sub>3} $<$ @{term a\<^sub>4},
+  but @{term a\<^sub>2} and @{term a\<^sub>3} are incomparable. *}
+
+definition
+  "x < y \<longleftrightarrow> (case (x, y) of
+     (a\<^sub>1, a\<^sub>1) \<Rightarrow> False | (a\<^sub>1, _) \<Rightarrow> True
+   |  (a\<^sub>2, a\<^sub>4) \<Rightarrow> True
+   |  (a\<^sub>3, a\<^sub>4) \<Rightarrow> True  | _ \<Rightarrow> False)"
+
+definition 
+  "x \<le> y \<longleftrightarrow> (case (x, y) of
+     (a\<^sub>1, _) \<Rightarrow> True
+   | (a\<^sub>2, a\<^sub>2) \<Rightarrow> True | (a\<^sub>2, a\<^sub>4) \<Rightarrow> True
+   | (a\<^sub>3, a\<^sub>3) \<Rightarrow> True | (a\<^sub>3, a\<^sub>4) \<Rightarrow> True
+   | (a\<^sub>4, a\<^sub>4) \<Rightarrow> True | _ \<Rightarrow> False)"
+
+definition
+  "\<Sqinter>A = (if a\<^sub>1 \<in> A \<or> a\<^sub>2 \<in> A \<and> a\<^sub>3 \<in> A then a\<^sub>1 else if a\<^sub>2 \<in> A then a\<^sub>2 else if a\<^sub>3 \<in> A then a\<^sub>3 else a\<^sub>4)"
+definition
+  "\<Squnion>A = (if a\<^sub>4 \<in> A \<or> a\<^sub>2 \<in> A \<and> a\<^sub>3 \<in> A then a\<^sub>4 else if a\<^sub>2 \<in> A then a\<^sub>2 else if a\<^sub>3 \<in> A then a\<^sub>3 else a\<^sub>1)"
+definition [simp]: "bot = a\<^sub>1"
+definition [simp]: "top = a\<^sub>4"
+definition
+  "x \<sqinter> y = (case (x, y) of
+     (a\<^sub>1, _) \<Rightarrow> a\<^sub>1 | (_, a\<^sub>1) \<Rightarrow> a\<^sub>1 | (a\<^sub>2, a\<^sub>3) \<Rightarrow> a\<^sub>1 | (a\<^sub>3, a\<^sub>2) \<Rightarrow> a\<^sub>1
+   | (a\<^sub>2, _) \<Rightarrow> a\<^sub>2 | (_, a\<^sub>2) \<Rightarrow> a\<^sub>2
+   | (a\<^sub>3, _) \<Rightarrow> a\<^sub>3 | (_, a\<^sub>3) \<Rightarrow> a\<^sub>3
+   | _ \<Rightarrow> a\<^sub>4)"
+definition
+  "x \<squnion> y = (case (x, y) of
+     (a\<^sub>4, _) \<Rightarrow> a\<^sub>4 | (_, a\<^sub>4) \<Rightarrow> a\<^sub>4 | (a\<^sub>2, a\<^sub>3) \<Rightarrow> a\<^sub>4 | (a\<^sub>3, a\<^sub>2) \<Rightarrow> a\<^sub>4
+  | (a\<^sub>2, _) \<Rightarrow> a\<^sub>2 | (_, a\<^sub>2) \<Rightarrow> a\<^sub>2
+  | (a\<^sub>3, _) \<Rightarrow> a\<^sub>3 | (_, a\<^sub>3) \<Rightarrow> a\<^sub>3
+  | _ \<Rightarrow> a\<^sub>1)"
+
+instance
+proof
+  fix A and z :: finite_4
+  assume *: "\<And>x. x \<in> A \<Longrightarrow> x \<le> z"
+  show "\<Squnion>A \<le> z"
+    by(auto simp add: Sup_finite_4_def less_eq_finite_4_def dest!: * split: finite_4.splits)
+next
+  fix A and z :: finite_4
+  assume *: "\<And>x. x \<in> A \<Longrightarrow> z \<le> x"
+  show "z \<le> \<Sqinter>A"
+    by(auto simp add: Inf_finite_4_def less_eq_finite_4_def dest!: * split: finite_4.splits)
+qed(auto simp add: less_finite_4_def less_eq_finite_4_def Inf_finite_4_def Sup_finite_4_def inf_finite_4_def sup_finite_4_def split: finite_4.splits)
+
+end
+
+instance finite_4 :: complete_distrib_lattice
+proof
+  fix a :: finite_4 and B
+  show "a \<squnion> \<Sqinter>B = (\<Sqinter>b\<in>B. a \<squnion> b)"
+    by(cases a "\<Sqinter>B" rule: finite_4.exhaust[case_product finite_4.exhaust])
+      (auto simp add: sup_finite_4_def Inf_finite_4_def INF_def split: finite_4.splits split_if_asm)
+  show "a \<sqinter> \<Squnion>B = (\<Squnion>b\<in>B. a \<sqinter> b)"
+    by(cases a "\<Squnion>B" rule: finite_4.exhaust[case_product finite_4.exhaust])
+      (auto simp add: inf_finite_4_def Sup_finite_4_def SUP_def split: finite_4.splits split_if_asm)
+qed
+
 hide_const (open) a\<^sub>1 a\<^sub>2 a\<^sub>3 a\<^sub>4
 
 
@@ -691,6 +855,72 @@
 
 end
 
+instantiation finite_5 :: complete_lattice
+begin
+
+text {* The non-distributive pentagon lattice $N_5$ *}
+
+definition
+  "x < y \<longleftrightarrow> (case (x, y) of
+     (a\<^sub>1, a\<^sub>1) \<Rightarrow> False | (a\<^sub>1, _) \<Rightarrow> True
+   | (a\<^sub>2, a\<^sub>3) \<Rightarrow> True  | (a\<^sub>2, a\<^sub>5) \<Rightarrow> True
+   | (a\<^sub>3, a\<^sub>5) \<Rightarrow> True
+   | (a\<^sub>4, a\<^sub>5) \<Rightarrow> True  | _ \<Rightarrow> False)"
+
+definition
+  "x \<le> y \<longleftrightarrow> (case (x, y) of
+     (a\<^sub>1, _) \<Rightarrow> True
+   | (a\<^sub>2, a\<^sub>2) \<Rightarrow> True | (a\<^sub>2, a\<^sub>3) \<Rightarrow> True | (a\<^sub>2, a\<^sub>5) \<Rightarrow> True
+   | (a\<^sub>3, a\<^sub>3) \<Rightarrow> True | (a\<^sub>3, a\<^sub>5) \<Rightarrow> True
+   | (a\<^sub>4, a\<^sub>4) \<Rightarrow> True | (a\<^sub>4, a\<^sub>5) \<Rightarrow> True
+   | (a\<^sub>5, a\<^sub>5) \<Rightarrow> True | _ \<Rightarrow> False)"
+
+definition
+  "\<Sqinter>A = 
+  (if a\<^sub>1 \<in> A \<or> a\<^sub>4 \<in> A \<and> (a\<^sub>2 \<in> A \<or> a\<^sub>3 \<in> A) then a\<^sub>1
+   else if a\<^sub>2 \<in> A then a\<^sub>2
+   else if a\<^sub>3 \<in> A then a\<^sub>3
+   else if a\<^sub>4 \<in> A then a\<^sub>4
+   else a\<^sub>5)"
+definition
+  "\<Squnion>A = 
+  (if a\<^sub>5 \<in> A \<or> a\<^sub>4 \<in> A \<and> (a\<^sub>2 \<in> A \<or> a\<^sub>3 \<in> A) then a\<^sub>5
+   else if a\<^sub>3 \<in> A then a\<^sub>3
+   else if a\<^sub>2 \<in> A then a\<^sub>2
+   else if a\<^sub>4 \<in> A then a\<^sub>4
+   else a\<^sub>1)"
+definition [simp]: "bot = a\<^sub>1"
+definition [simp]: "top = a\<^sub>5"
+definition
+  "x \<sqinter> y = (case (x, y) of
+     (a\<^sub>1, _) \<Rightarrow> a\<^sub>1 | (_, a\<^sub>1) \<Rightarrow> a\<^sub>1 | (a\<^sub>2, a\<^sub>4) \<Rightarrow> a\<^sub>1 | (a\<^sub>4, a\<^sub>2) \<Rightarrow> a\<^sub>1 | (a\<^sub>3, a\<^sub>4) \<Rightarrow> a\<^sub>1 | (a\<^sub>4, a\<^sub>3) \<Rightarrow> a\<^sub>1
+   | (a\<^sub>2, _) \<Rightarrow> a\<^sub>2 | (_, a\<^sub>2) \<Rightarrow> a\<^sub>2
+   | (a\<^sub>3, _) \<Rightarrow> a\<^sub>3 | (_, a\<^sub>3) \<Rightarrow> a\<^sub>3
+   | (a\<^sub>4, _) \<Rightarrow> a\<^sub>4 | (_, a\<^sub>4) \<Rightarrow> a\<^sub>4
+   | _ \<Rightarrow> a\<^sub>5)"
+definition
+  "x \<squnion> y = (case (x, y) of
+     (a\<^sub>5, _) \<Rightarrow> a\<^sub>5 | (_, a\<^sub>5) \<Rightarrow> a\<^sub>5 | (a\<^sub>2, a\<^sub>4) \<Rightarrow> a\<^sub>5 | (a\<^sub>4, a\<^sub>2) \<Rightarrow> a\<^sub>5 | (a\<^sub>3, a\<^sub>4) \<Rightarrow> a\<^sub>5 | (a\<^sub>4, a\<^sub>3) \<Rightarrow> a\<^sub>5
+   | (a\<^sub>3, _) \<Rightarrow> a\<^sub>3 | (_, a\<^sub>3) \<Rightarrow> a\<^sub>3
+   | (a\<^sub>2, _) \<Rightarrow> a\<^sub>2 | (_, a\<^sub>2) \<Rightarrow> a\<^sub>2
+   | (a\<^sub>4, _) \<Rightarrow> a\<^sub>4 | (_, a\<^sub>4) \<Rightarrow> a\<^sub>4
+   | _ \<Rightarrow> a\<^sub>1)"
+
+instance 
+proof intro_classes
+  fix A and z :: finite_5
+  assume *: "\<And>x. x \<in> A \<Longrightarrow> z \<le> x"
+  show "z \<le> \<Sqinter>A"
+    by(auto simp add: less_eq_finite_5_def Inf_finite_5_def split: finite_5.splits split_if_asm dest!: *)
+next
+  fix A and z :: finite_5
+  assume *: "\<And>x. x \<in> A \<Longrightarrow> x \<le> z"
+  show "\<Squnion>A \<le> z"
+    by(auto simp add: less_eq_finite_5_def Sup_finite_5_def split: finite_5.splits split_if_asm dest!: *)
+qed(auto simp add: less_eq_finite_5_def less_finite_5_def inf_finite_5_def sup_finite_5_def Inf_finite_5_def Sup_finite_5_def split: finite_5.splits split_if_asm)
+
+end
+
 hide_const (open) a\<^sub>1 a\<^sub>2 a\<^sub>3 a\<^sub>4 a\<^sub>5
 
 
--- a/src/HOL/Imperative_HOL/ex/Linked_Lists.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Imperative_HOL/ex/Linked_Lists.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -642,7 +642,7 @@
   with init all_ref_present have q_is_new: "q \<notin> set (p#refs)"
     by (auto elim!: effect_refE intro!: Ref.noteq_I)
   from refs_of_p refs_of_q q_is_new have a3: "\<forall>qrs prs. refs_of' h2 q qrs \<and> refs_of' h2 p prs \<longrightarrow> set prs \<inter> set qrs = {}"
-    by (fastforce simp only: set_simps dest: refs_of'_is_fun)
+    by (fastforce simp only: list.set dest: refs_of'_is_fun)
   from rev'_invariant [OF effect_rev' a1 a2 a3] have "list_of h3 (Ref.get h3 v) (List.rev xs)" 
     unfolding list_of'_def by auto
   with lookup show ?thesis
--- a/src/HOL/Lattices_Big.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Lattices_Big.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -633,6 +633,16 @@
 
 end
 
+lemma Max_eq_if:
+  assumes "finite A"  "finite B"  "\<forall>a\<in>A. \<exists>b\<in>B. a \<le> b"  "\<forall>b\<in>B. \<exists>a\<in>A. b \<le> a"
+  shows "Max A = Max B"
+proof cases
+  assume "A = {}" thus ?thesis using assms by simp
+next
+  assume "A \<noteq> {}" thus ?thesis using assms
+    by(blast intro: antisym Max_in Max_ge_iff[THEN iffD2])
+qed
+
 lemma Min_antimono:
   assumes "M \<subseteq> N" and "M \<noteq> {}" and "finite N"
   shows "Min N \<le> Min M"
--- a/src/HOL/Library/Permutation.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Library/Permutation.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -162,7 +162,7 @@
   apply (case_tac "remdups xs")
    apply simp_all
   apply (subgoal_tac "a \<in> set (remdups ys)")
-   prefer 2 apply (metis set_simps(2) insert_iff set_remdups)
+   prefer 2 apply (metis list.set(2) insert_iff set_remdups)
   apply (drule split_list) apply (elim exE conjE)
   apply (drule_tac x = list in spec) apply (erule impE) prefer 2
    apply (drule_tac x = "ysa @ zs" in spec) apply (erule impE) prefer 2
--- a/src/HOL/Library/RBT_Set.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Library/RBT_Set.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -522,7 +522,7 @@
 
 code_datatype Set Coset
 
-declare set_simps[code]
+declare list.set[code] (* needed? *)
 
 lemma empty_Set [code]:
   "Set.empty = Set RBT.empty"
--- a/src/HOL/Library/refute.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Library/refute.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -2909,7 +2909,7 @@
           Node xs => xs
         | _       => raise REFUTE ("set_printer",
           "interpretation for set type is a leaf"))
-      val elements = List.mapPartial (fn (arg, result) =>
+      val elements = map_filter (fn (arg, result) =>
         case result of
           Leaf [fmTrue, (* fmFalse *) _] =>
           if Prop_Logic.eval assignment fmTrue then
--- a/src/HOL/List.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/List.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -39,6 +39,8 @@
 
 setup {* Sign.parent_path *}
 
+lemmas set_simps = list.set (* legacy *)
+
 syntax
   -- {* list Enumeration *}
   "_list" :: "args => 'a list"    ("[(_)]")
@@ -54,16 +56,9 @@
 "last (x # xs) = (if xs = [] then x else last xs)"
 
 primrec butlast :: "'a list \<Rightarrow> 'a list" where
-"butlast []= []" |
+"butlast [] = []" |
 "butlast (x # xs) = (if xs = [] then [] else x # butlast xs)"
 
-declare list.set[simp del, code del]
-
-lemma set_simps[simp, code, code_post]:
-  "set [] = {}"
-  "set (x # xs) = insert x (set xs)"
-by (simp_all add: list.set)
-
 lemma set_rec: "set xs = rec_list {} (\<lambda>x _. insert x) xs"
   by (induct xs) auto
 
@@ -575,7 +570,7 @@
 
 fun simproc ctxt redex =
   let
-    val set_Nil_I = @{thm trans} OF [@{thm set_simps(1)}, @{thm empty_def}]
+    val set_Nil_I = @{thm trans} OF [@{thm list.set(1)}, @{thm empty_def}]
     val set_singleton = @{lemma "set [a] = {x. x = a}" by simp}
     val inst_Collect_mem_eq = @{lemma "set A = {x. x : set A}" by simp}
     val del_refl_eq = @{lemma "(t = t & P) == P" by simp}
@@ -1255,6 +1250,8 @@
 
 subsubsection {* @{const set} *}
 
+declare list.set[code_post]  --"pretty output"
+
 lemma finite_set [iff]: "finite (set xs)"
 by (induct xs) auto
 
@@ -1404,7 +1401,7 @@
 
 
 lemma finite_list: "finite A ==> EX xs. set xs = A"
-  by (erule finite_induct) (auto simp add: set_simps(2) [symmetric] simp del: set_simps(2))
+  by (erule finite_induct) (auto simp add: list.set(2)[symmetric] simp del: list.set(2))
 
 lemma card_length: "card (set xs) \<le> length xs"
 by (induct xs) (auto simp add: card_insert_if)
--- a/src/HOL/MicroJava/Comp/CorrCompTp.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/MicroJava/Comp/CorrCompTp.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -1392,7 +1392,7 @@
 
   apply (simp (no_asm_simp) add: max_ssize_def del: max_of_list_append)
     apply (rule max_of_list_sublist)
-    apply (simp (no_asm_simp) only: set_append set_simps list.map) apply blast
+    apply (simp (no_asm_simp) only: set_append list.set list.map) apply blast
   apply (simp (no_asm_simp))
   apply simp                    (* subgoal bc3 = [] *)
   apply (simp add: comb_nil_def) (* subgoal mt3 = [] \<and> sttp2 = sttp3 *)
@@ -1419,7 +1419,7 @@
      (* (some) preconditions of  wt_instr_offset *)
   apply (simp (no_asm_simp) add: max_ssize_def del: max_of_list_append)
   apply (rule max_of_list_sublist)
-    apply (simp (no_asm_simp) only: set_append set_simps list.map) apply blast
+    apply (simp (no_asm_simp) only: set_append list.set list.map) apply blast
   apply (simp (no_asm_simp))
 
 apply (drule_tac x=sttp2 in spec, simp) (* subgoal \<exists>mt3_rest. \<dots> *)
--- a/src/HOL/Mirabelle/Tools/mirabelle_sledgehammer.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Mirabelle/Tools/mirabelle_sledgehammer.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -15,19 +15,15 @@
 val proverK = "prover" (*=NAME: name of the external prover to call*)
 val prover_timeoutK = "prover_timeout" (*=TIME: timeout for invoked ATP (seconds of process time)*)
 val keepK = "keep" (*=PATH: path where to keep temporary files created by sledgehammer*)
-val minimizeK = "minimize" (*: enable minimization of theorem set found by sledgehammer*)
-                           (*refers to minimization attempted by Mirabelle*)
-val minimize_timeoutK = "minimize_timeout" (*=TIME: timeout for each minimization step (seconds of*)
 
 val proof_methodK = "proof_method" (*=NAME: how to reconstruct proofs (ie. using metis/smt)*)
-val metis_ftK = "metis_ft" (*: apply metis with fully-typed encoding to the theorems found by sledgehammer*)
 
 val max_factsK = "max_facts" (*=NUM: max. relevant clauses to use*)
 val max_relevantK = "max_relevant" (*=NUM: max. relevant clauses to use*)
 val max_callsK = "max_calls" (*=NUM: max. no. of calls to sledgehammer*)
 val preplay_timeoutK = "preplay_timeout" (*=TIME: timeout for finding reconstructed proof*)
 val isar_proofsK = "isar_proofs" (*: enable Isar proof generation*)
-val sh_minimizeK = "sh_minimize" (*: instruct sledgehammer to run its minimizer*)
+val minimizeK = "minimize" (*: instruct sledgehammer to run its minimizer*)
 
 val check_trivialK = "check_trivial" (*: check if goals are "trivial" (false by default)*)
 val fact_filterK = "fact_filter" (*=STRING: fact filter*)
@@ -43,14 +39,13 @@
 val max_mono_itersK = "max_mono_iters" (*=NUM: max. iterations of monomorphiser*)
 
 fun sh_tag id = "#" ^ string_of_int id ^ " sledgehammer: "
-fun minimize_tag id = "#" ^ string_of_int id ^ " minimize (sledgehammer): "
 fun proof_method_tag meth id = "#" ^ string_of_int id ^ " " ^ (!meth) ^ " (sledgehammer): "
 
 val separator = "-----"
 
 (*FIXME sensible to have Mirabelle-level Sledgehammer defaults?*)
 (*defaults used in this Mirabelle action*)
-val preplay_timeout_default = "3"
+val preplay_timeout_default = "1"
 val lam_trans_default = "smart"
 val uncurried_aliases_default = "smart"
 val fact_filter_default = "smart"
@@ -60,7 +55,6 @@
 val slice_default = "true"
 val max_calls_default = "10000000"
 val trivial_default = "false"
-val minimize_timeout_default = 5
 
 (*If a key is present in args then augment a list with its pair*)
 (*This is used to avoid fixing default values at the Mirabelle level, and
@@ -93,11 +87,6 @@
   posns: (Position.T * bool) list
   }
 
-datatype min_data = MinData of {
-  succs: int,
-  ab_ratios: int
-  }
-
 fun make_sh_data
       (calls,success,nontriv_calls,nontriv_success,lemmas,max_lems,time_isa,
        time_prover,time_prover_fail) =
@@ -106,9 +95,6 @@
          time_isa=time_isa, time_prover=time_prover,
          time_prover_fail=time_prover_fail}
 
-fun make_min_data (succs, ab_ratios) =
-  MinData{succs=succs, ab_ratios=ab_ratios}
-
 fun make_re_data (calls,success,nontriv_calls,nontriv_success,proofs,time,
                   timeout,lemmas,posns) =
   ReData{calls=calls, success=success, nontriv_calls=nontriv_calls,
@@ -116,7 +102,6 @@
          timeout=timeout, lemmas=lemmas, posns=posns}
 
 val empty_sh_data = make_sh_data (0, 0, 0, 0, 0, 0, 0, 0, 0)
-val empty_min_data = make_min_data (0, 0)
 val empty_re_data = make_re_data (0, 0, 0, 0, 0, 0, 0, (0,0,0), [])
 
 fun tuple_of_sh_data (ShData {calls, success, nontriv_calls, nontriv_success,
@@ -124,55 +109,28 @@
   time_prover, time_prover_fail}) = (calls, success, nontriv_calls,
   nontriv_success, lemmas, max_lems, time_isa, time_prover, time_prover_fail)
 
-fun tuple_of_min_data (MinData {succs, ab_ratios}) = (succs, ab_ratios)
-
 fun tuple_of_re_data (ReData {calls, success, nontriv_calls, nontriv_success,
   proofs, time, timeout, lemmas, posns}) = (calls, success, nontriv_calls,
   nontriv_success, proofs, time, timeout, lemmas, posns)
 
-datatype proof_method_mode =
-  Unminimized | Minimized | UnminimizedFT | MinimizedFT
-
 datatype data = Data of {
   sh: sh_data,
-  min: min_data,
-  re_u: re_data, (* proof method with unminimized set of lemmas *)
-  re_m: re_data, (* proof method with minimized set of lemmas *)
-  re_uft: re_data, (* proof method with unminimized set of lemmas and fully-typed *)
-  re_mft: re_data, (* proof method with minimized set of lemmas and fully-typed *)
-  mini: bool   (* with minimization *)
+  re_u: re_data (* proof method with unminimized set of lemmas *)
   }
 
-fun make_data (sh, min, re_u, re_m, re_uft, re_mft, mini) =
-  Data {sh=sh, min=min, re_u=re_u, re_m=re_m, re_uft=re_uft, re_mft=re_mft,
-    mini=mini}
-
-val empty_data = make_data (empty_sh_data, empty_min_data,
-  empty_re_data, empty_re_data, empty_re_data, empty_re_data, false)
+fun make_data (sh, re_u) = Data {sh=sh, re_u=re_u}
 
-fun map_sh_data f (Data {sh, min, re_u, re_m, re_uft, re_mft, mini}) =
-  let val sh' = make_sh_data (f (tuple_of_sh_data sh))
-  in make_data (sh', min, re_u, re_m, re_uft, re_mft, mini) end
-
-fun map_min_data f (Data {sh, min, re_u, re_m, re_uft, re_mft, mini}) =
-  let val min' = make_min_data (f (tuple_of_min_data min))
-  in make_data (sh, min', re_u, re_m, re_uft, re_mft, mini) end
+val empty_data = make_data (empty_sh_data, empty_re_data)
 
-fun map_re_data f m (Data {sh, min, re_u, re_m, re_uft, re_mft, mini}) =
+fun map_sh_data f (Data {sh, re_u}) =
+  let val sh' = make_sh_data (f (tuple_of_sh_data sh))
+  in make_data (sh', re_u) end
+
+fun map_re_data f (Data {sh, re_u}) =
   let
-    fun map_me g Unminimized   (u, m, uft, mft) = (g u, m, uft, mft)
-      | map_me g Minimized     (u, m, uft, mft) = (u, g m, uft, mft)
-      | map_me g UnminimizedFT (u, m, uft, mft) = (u, m, g uft, mft)
-      | map_me g MinimizedFT   (u, m, uft, mft) = (u, m, uft, g mft)
-
     val f' = make_re_data o f o tuple_of_re_data
-
-    val (re_u', re_m', re_uft', re_mft') =
-      map_me f' m (re_u, re_m, re_uft, re_mft)
-  in make_data (sh, min, re_u', re_m', re_uft', re_mft', mini) end
-
-fun set_mini mini (Data {sh, min, re_u, re_m, re_uft, re_mft, ...}) =
-  make_data (sh, min, re_u, re_m, re_uft, re_mft, mini)
+    val re_u' = f' re_u
+  in make_data (sh, re_u') end
 
 fun inc_max (n:int) (s,sos,m) = (s+n, sos + n*n, Int.max(m,n));
 
@@ -212,12 +170,6 @@
   (fn (calls,success,nontriv_calls, nontriv_success, lemmas,max_lems,time_isa,time_prover,time_prover_fail)
     => (calls,success,nontriv_calls, nontriv_success, lemmas,max_lems,time_isa,time_prover,time_prover_fail + t))
 
-val inc_min_succs = map_min_data
-  (fn (succs,ab_ratios) => (succs+1, ab_ratios))
-
-fun inc_min_ab_ratios r = map_min_data
-  (fn (succs, ab_ratios) => (succs, ab_ratios+r))
-
 val inc_proof_method_calls = map_re_data
   (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns)
     => (calls + 1, success, nontriv_calls, nontriv_success, proofs, time, timeout, lemmas,posns))
@@ -238,21 +190,21 @@
   (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns)
     => (calls, success, nontriv_calls, nontriv_success, proofs + 1, time, timeout, lemmas,posns))
 
-fun inc_proof_method_time m t = map_re_data
+fun inc_proof_method_time t = map_re_data
  (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns)
-  => (calls, success, nontriv_calls, nontriv_success, proofs, time + t, timeout, lemmas,posns)) m
+  => (calls, success, nontriv_calls, nontriv_success, proofs, time + t, timeout, lemmas,posns))
 
 val inc_proof_method_timeout = map_re_data
   (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns)
     => (calls, success, nontriv_calls, nontriv_success, proofs, time, timeout + 1, lemmas,posns))
 
-fun inc_proof_method_lemmas m n = map_re_data
+fun inc_proof_method_lemmas n = map_re_data
   (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns)
-    => (calls, success, nontriv_calls, nontriv_success, proofs, time, timeout, inc_max n lemmas, posns)) m
+    => (calls, success, nontriv_calls, nontriv_success, proofs, time, timeout, inc_max n lemmas, posns))
 
-fun inc_proof_method_posns m pos = map_re_data
+fun inc_proof_method_posns pos = map_re_data
   (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns)
-    => (calls, success, nontriv_calls, nontriv_success, proofs, time, timeout, lemmas, pos::posns)) m
+    => (calls, success, nontriv_calls, nontriv_success, proofs, time, timeout, lemmas, pos::posns))
 
 val str0 = string_of_int o the_default 0
 
@@ -311,37 +263,23 @@
   else ()
  )
 
-fun log_min_data log (succs, ab_ratios) =
-  (log ("Number of successful minimizations: " ^ string_of_int succs);
-   log ("After/before ratios: " ^ string_of_int ab_ratios)
-  )
-
 in
 
-fun log_data id log (Data {sh, min, re_u, re_m, re_uft, re_mft, mini}) =
+fun log_data id log (Data {sh, re_u}) =
   let
     val ShData {calls=sh_calls, ...} = sh
 
     fun app_if (ReData {calls, ...}) f = if calls > 0 then f () else ()
     fun log_re tag m =
       log_re_data log tag sh_calls (tuple_of_re_data m)
-    fun log_proof_method (tag1, m1) (tag2, m2) = app_if m1 (fn () =>
-      (log_re tag1 m1; log ""; app_if m2 (fn () => log_re tag2 m2)))
+    fun log_proof_method (tag1, m1) = app_if m1 (fn () => (log_re tag1 m1; log ""))
   in
     if sh_calls > 0
     then
      (log ("\n\n\nReport #" ^ string_of_int id ^ ":\n");
       log_sh_data log (tuple_of_sh_data sh);
       log "";
-      if not mini
-      then log_proof_method ("", re_u) ("fully-typed ", re_uft)
-      else
-        app_if re_u (fn () =>
-         (log_proof_method ("unminimized ", re_u) ("unminimized fully-typed ", re_uft);
-          log "";
-          app_if re_m (fn () =>
-            (log_min_data log (tuple_of_min_data min); log "";
-             log_proof_method ("", re_m) ("fully-typed ", re_mft))))))
+      log_proof_method ("", re_u))
     else ()
   end
 
@@ -411,7 +349,7 @@
 
 fun run_sh prover_name fact_filter type_enc strict max_facts slice
       lam_trans uncurried_aliases e_selection_heuristic term_order force_sos
-      hard_timeout timeout preplay_timeout isar_proofsLST sh_minimizeLST
+      hard_timeout timeout preplay_timeout isar_proofsLST minimizeLST
       max_new_mono_instancesLST max_mono_itersLST dir pos st =
   let
     val thy = Proof.theory_of st
@@ -421,7 +359,7 @@
         Config.put Sledgehammer_Prover_ATP.atp_dest_dir dir
         #> Config.put Sledgehammer_Prover_ATP.atp_problem_prefix
           ("prob_" ^ str0 (Position.line_of pos) ^ "__")
-        #> Config.put SMT_Config.debug_files
+        #> Config.put SMT2_Config.debug_files
           (dir ^ "/" ^ Name.desymbolize (SOME false) (ATP_Util.timestamp ()) ^ "_"
           ^ serial_string ())
       | set_file_name NONE = I
@@ -435,7 +373,7 @@
                   term_order |> the_default I)
             #> (Option.map (Config.put ATP_Systems.force_sos)
                   force_sos |> the_default I))
-    val params as {max_facts, ...} =
+    val params as {max_facts, minimize, preplay_timeout, ...} =
       Sledgehammer_Commands.default_params thy
          ([("verbose", "true"),
            ("fact_filter", fact_filter),
@@ -448,7 +386,7 @@
            ("timeout", string_of_int timeout),
            ("preplay_timeout", preplay_timeout)]
           |> isar_proofsLST
-          |> sh_minimizeLST (*don't confuse the two minimization flags*)
+          |> minimizeLST (*don't confuse the two minimization flags*)
           |> max_new_mono_instancesLST
           |> max_mono_itersLST)
     val default_max_facts =
@@ -460,11 +398,9 @@
       | SOME secs => TimeLimit.timeLimit (Time.fromSeconds secs))
     fun failed failure =
       ({outcome = SOME failure, used_facts = [], used_from = [],
-        run_time = Time.zeroTime,
-        preplay = Lazy.value (Sledgehammer_Proof_Methods.Metis_Method (NONE, NONE),
-          Sledgehammer_Proof_Methods.Play_Failed),
-        message = K "", message_tail = ""}, ~1)
-    val ({outcome, used_facts, run_time, preplay, message, message_tail, ...}
+        preferred_methss = (Sledgehammer_Proof_Methods.Auto_Method, []), run_time = Time.zeroTime,
+        message = K ""}, ~1)
+    val ({outcome, used_facts, preferred_methss, run_time, message, ...}
          : Sledgehammer_Prover.prover_result,
          time_isa) = time_limit (Mirabelle.cpu_time (fn () =>
       let
@@ -488,11 +424,12 @@
         val problem =
           {comment = "", state = st', goal = goal, subgoal = i,
            subgoal_count = Sledgehammer_Util.subgoal_count st, factss = factss}
-      in prover params (K (K (K ""))) problem end)) ()
+      in prover params problem end)) ()
       handle TimeLimit.TimeOut => failed ATP_Proof.TimedOut
            | Fail "inappropriate" => failed ATP_Proof.Inappropriate
     val time_prover = run_time |> Time.toMilliseconds
-    val msg = message (Lazy.force preplay) ^ message_tail
+    val msg = message (fn () => Sledgehammer.play_one_line_proof minimize preplay_timeout used_facts
+      st' i preferred_methss)
   in
     (case outcome of
       NONE => (msg, SH_OK (time_isa, time_prover, used_facts))
@@ -534,7 +471,7 @@
     val preplay_timeout = AList.lookup (op =) args preplay_timeoutK
       |> the_default preplay_timeout_default
     val isar_proofsLST = available_parameter args isar_proofsK "isar_proofs"
-    val sh_minimizeLST = available_parameter args sh_minimizeK "minimize"
+    val minimizeLST = available_parameter args minimizeK "minimize"
     val max_new_mono_instancesLST =
       available_parameter args max_new_mono_instancesK max_new_mono_instancesK
     val max_mono_itersLST = available_parameter args max_mono_itersK max_mono_itersK
@@ -542,7 +479,7 @@
     val (msg, result) =
       run_sh prover_name fact_filter type_enc strict max_facts slice lam_trans
         uncurried_aliases e_selection_heuristic term_order force_sos
-        hard_timeout timeout preplay_timeout isar_proofsLST sh_minimizeLST
+        hard_timeout timeout preplay_timeout isar_proofsLST minimizeLST
         max_new_mono_instancesLST max_mono_itersLST dir pos st
   in
     (case result of
@@ -574,57 +511,6 @@
 
 end
 
-fun run_minimize args meth named_thms id ({pre = st, log, ...} : Mirabelle.run_args) =
-  let
-    val thy = Proof.theory_of st
-    val {goal, ...} = Proof.goal st
-    val n0 = length (these (!named_thms))
-    val prover_name = get_prover_name thy args
-    val type_enc = AList.lookup (op =) args type_encK |> the_default type_enc_default
-    val strict = AList.lookup (op =) args strictK |> the_default strict_default
-    val timeout =
-      AList.lookup (op =) args minimize_timeoutK
-      |> Option.map (fst o read_int o raw_explode)  (* FIXME Symbol.explode (?) *)
-      |> the_default minimize_timeout_default
-    val preplay_timeout = AList.lookup (op =) args preplay_timeoutK
-      |> the_default preplay_timeout_default
-    val isar_proofsLST = available_parameter args isar_proofsK "isar_proofs"
-    val sh_minimizeLST = available_parameter args sh_minimizeK "minimize"
-    val max_new_mono_instancesLST =
-      available_parameter args max_new_mono_instancesK max_new_mono_instancesK
-    val max_mono_itersLST = available_parameter args max_mono_itersK max_mono_itersK
-    val params = Sledgehammer_Commands.default_params thy
-     ([("provers", prover_name),
-       ("verbose", "true"),
-       ("type_enc", type_enc),
-       ("strict", strict),
-       ("timeout", string_of_int timeout),
-       ("preplay_timeout", preplay_timeout)]
-      |> isar_proofsLST
-      |> sh_minimizeLST (*don't confuse the two minimization flags*)
-      |> max_new_mono_instancesLST
-      |> max_mono_itersLST)
-    val minimize =
-      Sledgehammer_Prover_Minimize.minimize_facts (K ()) prover_name params true 1
-        (Sledgehammer_Util.subgoal_count st)
-    val _ = log separator
-    val (used_facts, (preplay, message, message_tail)) =
-      minimize st goal NONE (these (!named_thms))
-    val msg = message (Lazy.force preplay) ^ message_tail
-  in
-    (case used_facts of
-      SOME named_thms' =>
-        (change_data id inc_min_succs;
-         change_data id (inc_min_ab_ratios ((100 * length named_thms') div n0));
-         if length named_thms' = n0
-         then log (minimize_tag id ^ "already minimal")
-         else (meth := proof_method_from_msg args msg;
-               named_thms := SOME named_thms';
-               log (minimize_tag id ^ "succeeded:\n" ^ msg))
-        )
-    | NONE => log (minimize_tag id ^ "failed: " ^ msg))
-  end
-
 fun override_params prover type_enc timeout =
   [("provers", prover),
    ("max_facts", "0"),
@@ -633,7 +519,7 @@
    ("slice", "false"),
    ("timeout", timeout |> Time.toSeconds |> string_of_int)]
 
-fun run_proof_method trivial full m name meth named_thms id
+fun run_proof_method trivial full name meth named_thms id
     ({pre=st, timeout, log, pos, ...}: Mirabelle.run_args) =
   let
     fun do_method named_thms ctxt =
@@ -648,16 +534,16 @@
           timeout |> Time.toReal |> curry Real.* time_slice |> Time.fromReal
         fun sledge_tac time_slice prover type_enc =
           Sledgehammer_Tactics.sledgehammer_as_oracle_tac ctxt
-            (override_params prover type_enc (my_timeout time_slice)) fact_override
+            (override_params prover type_enc (my_timeout time_slice)) fact_override []
       in
         if !meth = "sledgehammer_tac" then
           sledge_tac 0.2 ATP_Proof.vampireN "mono_native"
           ORELSE' sledge_tac 0.2 ATP_Proof.eN "poly_guards??"
           ORELSE' sledge_tac 0.2 ATP_Proof.spassN "mono_native"
           ORELSE' sledge_tac 0.2 ATP_Proof.z3_tptpN "poly_tags??"
-          ORELSE' SMT_Solver.smt_tac ctxt thms
+          ORELSE' SMT2_Solver.smt2_tac ctxt thms
         else if !meth = "smt" then
-          SMT_Solver.smt_tac ctxt thms
+          SMT2_Solver.smt2_tac ctxt thms
         else if full then
           Metis_Tactic.metis_tac [ATP_Proof_Reconstruct.full_typesN]
             ATP_Proof_Reconstruct.default_metis_lam_trans ctxt thms
@@ -680,22 +566,22 @@
       Mirabelle.can_apply timeout (do_method named_thms) st
 
     fun with_time (false, t) = "failed (" ^ string_of_int t ^ ")"
-      | with_time (true, t) = (change_data id (inc_proof_method_success m);
+      | with_time (true, t) = (change_data id inc_proof_method_success;
           if trivial then ()
-          else change_data id (inc_proof_method_nontriv_success m);
-          change_data id (inc_proof_method_lemmas m (length named_thms));
-          change_data id (inc_proof_method_time m t);
-          change_data id (inc_proof_method_posns m (pos, trivial));
-          if name = "proof" then change_data id (inc_proof_method_proofs m) else ();
+          else change_data id inc_proof_method_nontriv_success;
+          change_data id (inc_proof_method_lemmas (length named_thms));
+          change_data id (inc_proof_method_time t);
+          change_data id (inc_proof_method_posns (pos, trivial));
+          if name = "proof" then change_data id inc_proof_method_proofs else ();
           "succeeded (" ^ string_of_int t ^ ")")
     fun timed_method named_thms =
       (with_time (Mirabelle.cpu_time apply_method named_thms), true)
-      handle TimeLimit.TimeOut => (change_data id (inc_proof_method_timeout m); ("timeout", false))
+      handle TimeLimit.TimeOut => (change_data id inc_proof_method_timeout; ("timeout", false))
            | ERROR msg => ("error: " ^ msg, false)
 
     val _ = log separator
-    val _ = change_data id (inc_proof_method_calls m)
-    val _ = if trivial then () else change_data id (inc_proof_method_nontriv_calls m)
+    val _ = change_data id inc_proof_method_calls
+    val _ = if trivial then () else change_data id inc_proof_method_nontriv_calls
   in
     named_thms
     |> timed_method
@@ -724,38 +610,18 @@
           val meth = Unsynchronized.ref ""
           val named_thms =
             Unsynchronized.ref (NONE : ((string * stature) * thm list) list option)
-          val minimize = AList.defined (op =) args minimizeK
-          val metis_ft = AList.defined (op =) args metis_ftK
           val trivial =
             if AList.lookup (op =) args check_trivialK |> the_default trivial_default
                             |> Markup.parse_bool then
               Try0.try0 (SOME try_timeout) ([], [], [], []) pre
               handle TimeLimit.TimeOut => false
             else false
-          fun apply_method m1 m2 =
-            if metis_ft
-            then
-              if not (Mirabelle.catch_result (proof_method_tag meth) false
-                  (run_proof_method trivial false m1 name meth (these (!named_thms))) id st)
-              then
-                (Mirabelle.catch_result (proof_method_tag meth) false
-                  (run_proof_method trivial true m2 name meth (these (!named_thms))) id st; ())
-              else ()
-            else
-              (Mirabelle.catch_result (proof_method_tag meth) false
-                (run_proof_method trivial false m1 name meth (these (!named_thms))) id st; ())
+          fun apply_method () =
+            (Mirabelle.catch_result (proof_method_tag meth) false
+              (run_proof_method trivial false name meth (these (!named_thms))) id st; ())
         in
-          change_data id (set_mini minimize);
           Mirabelle.catch sh_tag (run_sledgehammer trivial args meth named_thms) id st;
-          if is_some (!named_thms)
-          then
-           (apply_method Unminimized UnminimizedFT;
-            if minimize andalso not (null (these (!named_thms)))
-            then
-             (Mirabelle.catch minimize_tag (run_minimize args meth named_thms) id st;
-              apply_method Minimized MinimizedFT)
-            else ())
-          else ()
+          if is_some (!named_thms) then apply_method () else ()
         end
     end
   end
--- a/src/HOL/Quotient_Examples/FSet.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Quotient_Examples/FSet.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -985,7 +985,7 @@
   have b: "\<And>x' ys'. \<lbrakk>\<not> List.member ys' x'; a # xs \<approx> x' # ys'\<rbrakk> \<Longrightarrow> thesis" by fact
   have c: "xs = [] \<Longrightarrow> thesis" using b 
     apply(simp)
-    by (metis List.set_simps(1) emptyE empty_subsetI)
+    by (metis list.set(1) emptyE empty_subsetI)
   have "\<And>x ys. \<lbrakk>\<not> List.member ys x; xs \<approx> x # ys\<rbrakk> \<Longrightarrow> thesis"
   proof -
     fix x :: 'a
--- a/src/HOL/Quotient_Examples/Lift_FSet.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Quotient_Examples/Lift_FSet.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -151,7 +151,7 @@
   using filter_filter [Transfer.transferred] .
 
 lemma "fset (fcons x xs) = insert x (fset xs)"
-  using set_simps(2) [Transfer.transferred] .
+  using list.set(2) [Transfer.transferred] .
 
 lemma "fset (fappend xs ys) = fset xs \<union> fset ys"
   using set_append [Transfer.transferred] .
--- a/src/HOL/SMT2.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/SMT2.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -108,6 +108,7 @@
 ML_file "Tools/SMT2/smtlib2.ML"
 ML_file "Tools/SMT2/smtlib2_interface.ML"
 ML_file "Tools/SMT2/smtlib2_proof.ML"
+ML_file "Tools/SMT2/smtlib2_isar.ML"
 ML_file "Tools/SMT2/z3_new_proof.ML"
 ML_file "Tools/SMT2/z3_new_isar.ML"
 ML_file "Tools/SMT2/smt2_solver.ML"
@@ -117,6 +118,9 @@
 ML_file "Tools/SMT2/z3_new_replay_rules.ML"
 ML_file "Tools/SMT2/z3_new_replay_methods.ML"
 ML_file "Tools/SMT2/z3_new_replay.ML"
+ML_file "Tools/SMT2/verit_proof.ML"
+ML_file "Tools/SMT2/verit_isar.ML"
+ML_file "Tools/SMT2/verit_proof_parse.ML"
 ML_file "Tools/SMT2/smt2_systems.ML"
 
 method_setup smt2 = {*
--- a/src/HOL/SMT_Examples/SMT_Examples.certs2	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/SMT_Examples/SMT_Examples.certs2	Sun Aug 10 14:34:43 2014 +0200
@@ -1,10 +1,3 @@
-3aa17d1c77bc1a92bca05df291d11d81c645a931 6 0
-unsat
-((set-logic AUFLIA)
-(proof
-(let ((@x30 (rewrite (= (not true) false))))
-(mp (asserted (not true)) @x30 false))))
-
 572677daa32981bf8212986300f1362edf42a0c1 7 0
 unsat
 ((set-logic AUFLIA)
@@ -13,6 +6,13 @@
 (let ((@x40 (trans @x36 (rewrite (= (not true) false)) (= (not (or p$ (not p$))) false))))
 (mp (asserted (not (or p$ (not p$)))) @x40 false)))))
 
+3aa17d1c77bc1a92bca05df291d11d81c645a931 6 0
+unsat
+((set-logic AUFLIA)
+(proof
+(let ((@x30 (rewrite (= (not true) false))))
+(mp (asserted (not true)) @x30 false))))
+
 dfd95b23f80baacb2acdc442487bd8121f072035 9 0
 unsat
 ((set-logic AUFLIA)
@@ -1033,7 +1033,7 @@
 (let ((@x59 (trans @x55 (rewrite (= (not true) false)) (= (not (< 5 (ite (<= 3 8) 8 3))) false))))
 (mp (asserted (not (< 5 (ite (<= 3 8) 8 3)))) @x59 false)))))))))
 
-2d63144daf211d89368e355b9b23a3b5118b7ba9 88 0
+6b0b089fbe179e8a27509c818f9a5e6847ac6bf2 88 0
 unsat
 ((set-logic AUFLIRA)
 (proof
@@ -1207,7 +1207,7 @@
 (let ((@x67 (mp (asserted (not (< a$ 0.0))) @x66 $x58)))
 ((_ th-lemma arith farkas 7 3/2 1) @x67 @x52 @x40 false)))))))))))))))))
 
-b9f61649fae66ac195bf2593181f9d76c958bfe2 22 0
+3a6df2b095b936aac9a1d533e306f2d31b4fb44e 22 0
 unsat
 ((set-logic AUFLIA)
 (proof
@@ -1390,7 +1390,7 @@
 (let ((@x433 (mp (not-or-elim @x205 (not $x57)) @x432 $x422)))
 (unit-resolution @x433 @x488 (mp @x478 @x480 $x44) false)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))
 
-fbc59441c65d9a844da44405d06d138b55c5d187 933 0
+32286f9c5e71eb2b15c18f86f04c80931e2e307b 933 0
 unsat
 ((set-logic AUFLIA)
 (proof
@@ -2345,7 +2345,7 @@
 (let ((@x62 (monotonicity @x59 (= $x36 (not $x43)))))
 (mp (asserted $x36) (trans @x62 @x71 (= $x36 false)) false))))))))))))))))))
 
-d2037888c28cf52f7a45f66288246169de6f14ad 113 0
+faae12ee7efe4347f92e42776a0e0e57a624319c 113 0
 unsat
 ((set-logic <null>)
 (proof
@@ -2459,7 +2459,7 @@
 (let ((@x74 (mp (asserted $x36) @x73 $x67)))
 ((_ th-lemma arith farkas -1 1 1) @x74 (unit-resolution ((_ th-lemma arith) (or false $x305)) (true-axiom true) $x305) @x337 false))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))
 
-29e336c1b1dbb5e85401e6a2954560661ff3cadc 112 0
+57f344c9e787868c98d1e622f158c445c1899c96 112 0
 unsat
 ((set-logic <null>)
 (proof
@@ -2572,7 +2572,7 @@
 (let ((@x70 (mp (asserted (not (< (+ x$ (+ ?x29 ?x29)) (+ x$ 3)))) @x69 $x63)))
 ((_ th-lemma arith farkas -1 1 1) @x70 @x336 (unit-resolution ((_ th-lemma arith) (or false $x319)) (true-axiom true) $x319) false)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))
 
-5bcedd8db3cccf5f1df2ef7ad1ca5e39c817a6f4 32 0
+3938db798ebafb55191dcdaf83a8615d1d59c0c3 32 0
 unsat
 ((set-logic <null>)
 (proof
@@ -2605,7 +2605,7 @@
 (let ((@x117 (unit-resolution ((_ th-lemma arith assign-bounds 1) (or $x102 (not $x100))) (unit-resolution ((_ th-lemma arith triangle-eq) (or (not $x95) $x100)) @x98 $x100) $x102)))
 (unit-resolution ((_ th-lemma arith triangle-eq) (or $x28 (not $x101) (not $x102))) @x117 @x110 @x30 false))))))))))))))))))))))))))))))
 
-97186805a3315ef9a1cc4847a2e19a6d09c77915 236 0
+353c8b65ed1b98772a89ffdae52f1cfae628dd6a 236 0
 unsat
 ((set-logic <null>)
 (proof
@@ -3224,7 +3224,7 @@
 (let ((@x66 (mp~ (mp (asserted $x33) @x60 $x56) (nnf-pos (refl (~ $x53 $x53)) (~ $x56 $x56)) $x56)))
 (unit-resolution @x66 @x464 false)))))))))))))))))))))))))
 
-a8cb4a130675f119ab8ba11cbe3a15041f18d2c6 62 0
+a02ae6c9688537bbe4c3be0d3dcebbc703417864 62 0
 unsat
 ((set-logic AUFLIA)
 (declare-fun ?v0!1 () Int)
@@ -3287,7 +3287,7 @@
 (let ((@x515 (unit-resolution (def-axiom (or z3name!0 $x220)) (unit-resolution @x131 @x238 $x88) $x220)))
 (unit-resolution ((_ th-lemma arith triangle-eq) (or (not $x220) (>= ?x96 3))) @x515 @x245 false))))))))))))))))))))))))))))))))))))))))))))))))))))))
 
-9e0e67e5bd5078ab683d440f1a73c518a403be1b 39 0
+9853592ad3514c1450e40271884a9f21f57ff85b 39 0
 unsat
 ((set-logic AUFLIA)
 (proof
@@ -3380,7 +3380,7 @@
 (let ((@x117 (and-elim (not-or-elim @x112 (and $x100 $x102)) $x102)))
 ((_ th-lemma arith farkas 1 1 1) @x117 @x116 @x118 false)))))))))))))))))))))))))))))))))))
 
-0d380fa4e68ab250e8351813b95695943794f02d 46 0
+9201a8009730b821ad6a3a2b64598e50ab5748ca 46 0
 unsat
 ((set-logic AUFLIRA)
 (declare-fun ?v1!1 () Int)
@@ -3600,6 +3600,33 @@
 (let ((@x73 (not-or-elim @x70 $x62)))
 (unit-resolution (unit-resolution ((_ th-lemma arith farkas 1 1) (or $x65 (not $x62))) @x73 $x65) @x74 false))))))))))))))))))
 
+d98ad8f668dead6f610669a52351ea0176a811b0 26 0
+unsat
+((set-logic <null>)
+(proof
+(let (($x58 (<= b$ 0)))
+(let (($x62 (or (not (and (not (<= a$ 0)) (not (<= (* a$ b$) 0)))) (not $x58))))
+(let (($x65 (not $x62)))
+(let (($x35 (not (=> (and (< 0 a$) (< 0 (* a$ b$))) (< 0 b$)))))
+(let (($x33 (< 0 b$)))
+(let (($x38 (or (not (and (< 0 a$) (< 0 (* a$ b$)))) $x33)))
+(let (($x56 (= (not (and (< 0 a$) (< 0 (* a$ b$)))) (not (and (not (<= a$ 0)) (not (<= (* a$ b$) 0)))))))
+(let ((?x30 (* a$ b$)))
+(let (($x48 (<= ?x30 0)))
+(let (($x49 (not $x48)))
+(let (($x44 (<= a$ 0)))
+(let (($x45 (not $x44)))
+(let (($x52 (and $x45 $x49)))
+(let (($x32 (and (< 0 a$) (< 0 ?x30))))
+(let ((@x54 (monotonicity (rewrite (= (< 0 a$) $x45)) (rewrite (= (< 0 ?x30) $x49)) (= $x32 $x52))))
+(let ((@x64 (monotonicity (monotonicity @x54 $x56) (rewrite (= $x33 (not $x58))) (= $x38 $x62))))
+(let ((@x43 (monotonicity (rewrite (= (=> $x32 $x33) $x38)) (= $x35 (not $x38)))))
+(let ((@x69 (trans @x43 (monotonicity @x64 (= (not $x38) $x65)) (= $x35 $x65))))
+(let ((@x74 (not-or-elim (mp (asserted $x35) @x69 $x65) $x58)))
+(let ((@x72 (and-elim (not-or-elim (mp (asserted $x35) @x69 $x65) $x52) $x45)))
+(let ((@x73 (and-elim (not-or-elim (mp (asserted $x35) @x69 $x65) $x52) $x49)))
+((_ th-lemma arith farkas 1 1 1) @x73 @x72 @x74 false))))))))))))))))))))))))
+
 271390ea915947de195c2202e30f90bb84689d60 26 0
 unsat
 ((set-logic <null>)
@@ -3627,33 +3654,6 @@
 (let ((@x92 (trans @x88 (rewrite (= (not true) false)) (= $x39 false))))
 (mp (asserted $x39) @x92 false))))))))))))))))))))))))
 
-d98ad8f668dead6f610669a52351ea0176a811b0 26 0
-unsat
-((set-logic <null>)
-(proof
-(let (($x58 (<= b$ 0)))
-(let (($x62 (or (not (and (not (<= a$ 0)) (not (<= (* a$ b$) 0)))) (not $x58))))
-(let (($x65 (not $x62)))
-(let (($x35 (not (=> (and (< 0 a$) (< 0 (* a$ b$))) (< 0 b$)))))
-(let (($x33 (< 0 b$)))
-(let (($x38 (or (not (and (< 0 a$) (< 0 (* a$ b$)))) $x33)))
-(let (($x56 (= (not (and (< 0 a$) (< 0 (* a$ b$)))) (not (and (not (<= a$ 0)) (not (<= (* a$ b$) 0)))))))
-(let ((?x30 (* a$ b$)))
-(let (($x48 (<= ?x30 0)))
-(let (($x49 (not $x48)))
-(let (($x44 (<= a$ 0)))
-(let (($x45 (not $x44)))
-(let (($x52 (and $x45 $x49)))
-(let (($x32 (and (< 0 a$) (< 0 ?x30))))
-(let ((@x54 (monotonicity (rewrite (= (< 0 a$) $x45)) (rewrite (= (< 0 ?x30) $x49)) (= $x32 $x52))))
-(let ((@x64 (monotonicity (monotonicity @x54 $x56) (rewrite (= $x33 (not $x58))) (= $x38 $x62))))
-(let ((@x43 (monotonicity (rewrite (= (=> $x32 $x33) $x38)) (= $x35 (not $x38)))))
-(let ((@x69 (trans @x43 (monotonicity @x64 (= (not $x38) $x65)) (= $x35 $x65))))
-(let ((@x74 (not-or-elim (mp (asserted $x35) @x69 $x65) $x58)))
-(let ((@x72 (and-elim (not-or-elim (mp (asserted $x35) @x69 $x65) $x52) $x45)))
-(let ((@x73 (and-elim (not-or-elim (mp (asserted $x35) @x69 $x65) $x52) $x49)))
-((_ th-lemma arith farkas 1 1 1) @x73 @x72 @x74 false))))))))))))))))))))))))
-
 b216c79478e44396acca3654b76845499fc18a04 23 0
 unsat
 ((set-logic <null>)
@@ -3944,6 +3944,21 @@
 (let ((@x53 (trans (monotonicity @x46 (= $x33 (not true))) (rewrite (= (not true) false)) (= $x33 false))))
 (mp (asserted $x33) @x53 false)))))))))))
 
+8b09776b03122aeacc9dd9526e1f0e5d41a07f14 14 0
+unsat
+((set-logic AUFLIA)
+(proof
+(let (($x29 (forall ((?v0 A$) )(g$ ?v0))
+))
+(let (($x30 (ite $x29 true false)))
+(let (($x31 (f$ $x30)))
+(let (($x32 (=> $x31 true)))
+(let (($x33 (not $x32)))
+(let ((@x42 (monotonicity (monotonicity (rewrite (= $x30 $x29)) (= $x31 (f$ $x29))) (= $x32 (=> (f$ $x29) true)))))
+(let ((@x46 (trans @x42 (rewrite (= (=> (f$ $x29) true) true)) (= $x32 true))))
+(let ((@x53 (trans (monotonicity @x46 (= $x33 (not true))) (rewrite (= (not true) false)) (= $x33 false))))
+(mp (asserted $x33) @x53 false)))))))))))
+
 b221de9d8dbe279344ac85e2ada07f5722636ce5 46 0
 unsat
 ((set-logic AUFLIA)
@@ -3991,21 +4006,6 @@
 (let ((@x478 (mp ((_ quant-inst 3 42) (or (not $x52) $x171)) (trans (monotonicity @x131 $x137) (rewrite (= $x134 $x134)) $x137) $x134)))
 (unit-resolution (unit-resolution @x478 @x78 $x168) (mp @x77 @x472 (not $x168)) false)))))))))))))))))))))))))))))))))))
 
-8b09776b03122aeacc9dd9526e1f0e5d41a07f14 14 0
-unsat
-((set-logic AUFLIA)
-(proof
-(let (($x29 (forall ((?v0 A$) )(g$ ?v0))
-))
-(let (($x30 (ite $x29 true false)))
-(let (($x31 (f$ $x30)))
-(let (($x32 (=> $x31 true)))
-(let (($x33 (not $x32)))
-(let ((@x42 (monotonicity (monotonicity (rewrite (= $x30 $x29)) (= $x31 (f$ $x29))) (= $x32 (=> (f$ $x29) true)))))
-(let ((@x46 (trans @x42 (rewrite (= (=> (f$ $x29) true) true)) (= $x32 true))))
-(let ((@x53 (trans (monotonicity @x46 (= $x33 (not true))) (rewrite (= (not true) false)) (= $x33 false))))
-(mp (asserted $x33) @x53 false)))))))))))
-
 5d3ccbcf168a634cad3952ad8f6d2798329d6a77 75 0
 unsat
 ((set-logic AUFLIA)
@@ -4204,7 +4204,7 @@
 (let ((@x81 (asserted $x80)))
 (unit-resolution @x81 (trans @x397 ((_ th-lemma arith eq-propagate 1 1 -4 -4) @x410 @x422 @x428 @x438 (= ?x490 6)) $x79) false)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))
 
-fa62bf7228a50eb8c663092f87f9af7c25feaffe 336 0
+640bb6103260f74026864b86c2301c00ea737ff0 336 0
 unsat
 ((set-logic <null>)
 (proof
--- a/src/HOL/SMT_Examples/SMT_Word_Examples.certs2	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/SMT_Examples/SMT_Word_Examples.certs2	Sun Aug 10 14:34:43 2014 +0200
@@ -43,14 +43,6 @@
 (let ((@x49 (trans @x45 (rewrite (= (not true) false)) (= (not (= (bvmul (_ bv7 8) (_ bv3 8)) (_ bv21 8))) false))))
 (mp (asserted (not (= (bvmul (_ bv7 8) (_ bv3 8)) (_ bv21 8)))) @x49 false)))))))
 
-6dd848d7b26e0521039e21a5e2bafebc1fee8c9b 7 0
-unsat
-((set-logic <null>)
-(proof
-(let ((@x35 (monotonicity (rewrite (= (= (_ bv11 5) (_ bv11 5)) true)) (= (not (= (_ bv11 5) (_ bv11 5))) (not true)))))
-(let ((@x39 (trans @x35 (rewrite (= (not true) false)) (= (not (= (_ bv11 5) (_ bv11 5))) false))))
-(mp (asserted (not (= (_ bv11 5) (_ bv11 5)))) @x39 false)))))
-
 45bf9e0a746f7dde46f8242e5851928c2671c7f2 9 0
 unsat
 ((set-logic <null>)
@@ -61,6 +53,14 @@
 (let ((@x54 (trans @x50 (rewrite (= (not true) false)) (= (not (= (bvsub (_ bv11 8) (_ bv27 8)) (bvneg (_ bv16 8)))) false))))
 (mp (asserted (not (= (bvsub (_ bv11 8) (_ bv27 8)) (bvneg (_ bv16 8))))) @x54 false)))))))
 
+6dd848d7b26e0521039e21a5e2bafebc1fee8c9b 7 0
+unsat
+((set-logic <null>)
+(proof
+(let ((@x35 (monotonicity (rewrite (= (= (_ bv11 5) (_ bv11 5)) true)) (= (not (= (_ bv11 5) (_ bv11 5))) (not true)))))
+(let ((@x39 (trans @x35 (rewrite (= (not true) false)) (= (not (= (_ bv11 5) (_ bv11 5))) false))))
+(mp (asserted (not (= (_ bv11 5) (_ bv11 5)))) @x39 false)))))
+
 00a7ff287d9c23d984163ea8e0cac8ac61008afd 11 0
 unsat
 ((set-logic <null>)
@@ -142,6 +142,15 @@
 (let ((@x48 (trans @x44 (rewrite (= (not true) false)) (= (not (= (bvxor (_ bv240 8) (_ bv255 8)) (_ bv15 8))) false))))
 (mp (asserted (not (= (bvxor (_ bv240 8) (_ bv255 8)) (_ bv15 8)))) @x48 false)))))))
 
+3838eb33edcd292c3a0ecbf1662b54af3940189f 8 0
+unsat
+((set-logic <null>)
+(proof
+(let ((@x36 (monotonicity (rewrite (= (bvnot (_ bv240 16)) (_ bv65295 16))) (= (= (bvnot (_ bv240 16)) (_ bv65295 16)) (= (_ bv65295 16) (_ bv65295 16))))))
+(let ((@x40 (trans @x36 (rewrite (= (= (_ bv65295 16) (_ bv65295 16)) true)) (= (= (bvnot (_ bv240 16)) (_ bv65295 16)) true))))
+(let ((@x47 (trans (monotonicity @x40 (= (not (= (bvnot (_ bv240 16)) (_ bv65295 16))) (not true))) (rewrite (= (not true) false)) (= (not (= (bvnot (_ bv240 16)) (_ bv65295 16))) false))))
+(mp (asserted (not (= (bvnot (_ bv240 16)) (_ bv65295 16)))) @x47 false))))))
+
 14431ccb33137348161eb6ca6cfb2e57942c3f9d 9 0
 unsat
 ((set-logic <null>)
@@ -152,14 +161,15 @@
 (let ((@x48 (trans @x44 (rewrite (= (not true) false)) (= (not (= (concat (_ bv11 4) (_ bv27 8)) (_ bv2843 12))) false))))
 (mp (asserted (not (= (concat (_ bv11 4) (_ bv27 8)) (_ bv2843 12)))) @x48 false)))))))
 
-3838eb33edcd292c3a0ecbf1662b54af3940189f 8 0
+880bee16a8f6469b14122277b92e87533ef6a071 9 0
 unsat
 ((set-logic <null>)
 (proof
-(let ((@x36 (monotonicity (rewrite (= (bvnot (_ bv240 16)) (_ bv65295 16))) (= (= (bvnot (_ bv240 16)) (_ bv65295 16)) (= (_ bv65295 16) (_ bv65295 16))))))
-(let ((@x40 (trans @x36 (rewrite (= (= (_ bv65295 16) (_ bv65295 16)) true)) (= (= (bvnot (_ bv240 16)) (_ bv65295 16)) true))))
-(let ((@x47 (trans (monotonicity @x40 (= (not (= (bvnot (_ bv240 16)) (_ bv65295 16))) (not true))) (rewrite (= (not true) false)) (= (not (= (bvnot (_ bv240 16)) (_ bv65295 16))) false))))
-(mp (asserted (not (= (bvnot (_ bv240 16)) (_ bv65295 16)))) @x47 false))))))
+(let ((@x37 (monotonicity (rewrite (= (concat (_ bv3 4) (_ bv15 6)) (_ bv207 10))) (= (= (concat (_ bv3 4) (_ bv15 6)) (_ bv207 10)) (= (_ bv207 10) (_ bv207 10))))))
+(let ((@x41 (trans @x37 (rewrite (= (= (_ bv207 10) (_ bv207 10)) true)) (= (= (concat (_ bv3 4) (_ bv15 6)) (_ bv207 10)) true))))
+(let ((@x44 (monotonicity @x41 (= (not (= (concat (_ bv3 4) (_ bv15 6)) (_ bv207 10))) (not true)))))
+(let ((@x48 (trans @x44 (rewrite (= (not true) false)) (= (not (= (concat (_ bv3 4) (_ bv15 6)) (_ bv207 10))) false))))
+(mp (asserted (not (= (concat (_ bv3 4) (_ bv15 6)) (_ bv207 10)))) @x48 false)))))))
 
 8d29c9b5ef712a3d9d2db87383c9c25c0b553e01 8 0
 unsat
@@ -170,16 +180,6 @@
 (let ((@x47 (trans (monotonicity @x40 (= (not (= ((_ extract 2 1) (_ bv6 4)) (_ bv3 2))) (not true))) (rewrite (= (not true) false)) (= (not (= ((_ extract 2 1) (_ bv6 4)) (_ bv3 2))) false))))
 (mp (asserted (not (= ((_ extract 2 1) (_ bv6 4)) (_ bv3 2)))) @x47 false))))))
 
-880bee16a8f6469b14122277b92e87533ef6a071 9 0
-unsat
-((set-logic <null>)
-(proof
-(let ((@x37 (monotonicity (rewrite (= (concat (_ bv3 4) (_ bv15 6)) (_ bv207 10))) (= (= (concat (_ bv3 4) (_ bv15 6)) (_ bv207 10)) (= (_ bv207 10) (_ bv207 10))))))
-(let ((@x41 (trans @x37 (rewrite (= (= (_ bv207 10) (_ bv207 10)) true)) (= (= (concat (_ bv3 4) (_ bv15 6)) (_ bv207 10)) true))))
-(let ((@x44 (monotonicity @x41 (= (not (= (concat (_ bv3 4) (_ bv15 6)) (_ bv207 10))) (not true)))))
-(let ((@x48 (trans @x44 (rewrite (= (not true) false)) (= (not (= (concat (_ bv3 4) (_ bv15 6)) (_ bv207 10))) false))))
-(mp (asserted (not (= (concat (_ bv3 4) (_ bv15 6)) (_ bv207 10)))) @x48 false)))))))
-
 446b3cb9d63aa1f488dc092ed3fb111d2ad50b4e 9 0
 unsat
 ((set-logic <null>)
@@ -240,6 +240,16 @@
 (let ((@x47 (trans @x43 (rewrite (= (not true) false)) (= (not (= ((_ rotate_right 2) (_ bv6 4)) (_ bv9 4))) false))))
 (mp (asserted (not (= ((_ rotate_right 2) (_ bv6 4)) (_ bv9 4)))) @x47 false)))))))
 
+33a52e620069e1ecebbc00aa6b0099170558c111 9 0
+unsat
+((set-logic <null>)
+(proof
+(let ((@x36 (monotonicity (rewrite (= ((_ rotate_left 1) (_ bv14 4)) (_ bv13 4))) (= (= ((_ rotate_left 1) (_ bv14 4)) (_ bv13 4)) (= (_ bv13 4) (_ bv13 4))))))
+(let ((@x40 (trans @x36 (rewrite (= (= (_ bv13 4) (_ bv13 4)) true)) (= (= ((_ rotate_left 1) (_ bv14 4)) (_ bv13 4)) true))))
+(let ((@x43 (monotonicity @x40 (= (not (= ((_ rotate_left 1) (_ bv14 4)) (_ bv13 4))) (not true)))))
+(let ((@x47 (trans @x43 (rewrite (= (not true) false)) (= (not (= ((_ rotate_left 1) (_ bv14 4)) (_ bv13 4))) false))))
+(mp (asserted (not (= ((_ rotate_left 1) (_ bv14 4)) (_ bv13 4)))) @x47 false)))))))
+
 5c4e275bed2d91897e820ccd6744b0a775a6e26e 17 0
 unsat
 ((set-logic <null>)
@@ -258,46 +268,6 @@
 (let ((@x55 (unit-resolution ((_ th-lemma bv) $x53) @x26 @x26 @x26 @x26 @x26 @x26 @x26 @x26 @x26 @x26 @x26 @x26 @x26 @x26 @x26 @x26 $x52)))
 (unit-resolution @x55 @x63 false)))))))))))))))
 
-33a52e620069e1ecebbc00aa6b0099170558c111 9 0
-unsat
-((set-logic <null>)
-(proof
-(let ((@x36 (monotonicity (rewrite (= ((_ rotate_left 1) (_ bv14 4)) (_ bv13 4))) (= (= ((_ rotate_left 1) (_ bv14 4)) (_ bv13 4)) (= (_ bv13 4) (_ bv13 4))))))
-(let ((@x40 (trans @x36 (rewrite (= (= (_ bv13 4) (_ bv13 4)) true)) (= (= ((_ rotate_left 1) (_ bv14 4)) (_ bv13 4)) true))))
-(let ((@x43 (monotonicity @x40 (= (not (= ((_ rotate_left 1) (_ bv14 4)) (_ bv13 4))) (not true)))))
-(let ((@x47 (trans @x43 (rewrite (= (not true) false)) (= (not (= ((_ rotate_left 1) (_ bv14 4)) (_ bv13 4))) false))))
-(mp (asserted (not (= ((_ rotate_left 1) (_ bv14 4)) (_ bv13 4)))) @x47 false)))))))
-
-115ab22c9945d493b971e69a38d9e608c5b40a71 29 0
-unsat
-((set-logic <null>)
-(proof
-(let ((?x28 (bv2int$ (_ bv0 2))))
-(let (($x183 (<= ?x28 0)))
-(let (($x184 (not $x183)))
-(let (($x175 (forall ((?v0 (_ BitVec 2)) )(!(let ((?x47 (bv2int$ ?v0)))
-(let (($x53 (<= ?x47 0)))
-(not $x53))) :pattern ( (bv2int$ ?v0) )))
-))
-(let (($x57 (forall ((?v0 (_ BitVec 2)) )(let ((?x47 (bv2int$ ?v0)))
-(let (($x53 (<= ?x47 0)))
-(not $x53))))
-))
-(let ((@x177 (refl (= (not (<= (bv2int$ ?0) 0)) (not (<= (bv2int$ ?0) 0))))))
-(let ((@x112 (refl (~ (not (<= (bv2int$ ?0) 0)) (not (<= (bv2int$ ?0) 0))))))
-(let (($x49 (forall ((?v0 (_ BitVec 2)) )(let ((?x47 (bv2int$ ?v0)))
-(< 0 ?x47)))
-))
-(let ((@x56 (rewrite (= (< 0 (bv2int$ ?0)) (not (<= (bv2int$ ?0) 0))))))
-(let ((@x115 (mp~ (mp (asserted $x49) (quant-intro @x56 (= $x49 $x57)) $x57) (nnf-pos @x112 (~ $x57 $x57)) $x57)))
-(let ((@x180 (mp @x115 (quant-intro @x177 (= $x57 $x175)) $x175)))
-(let (($x187 (not $x175)))
-(let (($x188 (or $x187 $x184)))
-(let ((@x189 ((_ quant-inst (_ bv0 2)) $x188)))
-(let (($x29 (= ?x28 0)))
-(let ((@x30 (asserted $x29)))
-(unit-resolution ((_ th-lemma arith triangle-eq) (or (not $x29) $x183)) @x30 (unit-resolution @x189 @x180 $x184) false)))))))))))))))))))
-
 1d4a0e2a4449a8adbcf5a134bf7f2b0ee940d954 51 0
 unsat
 ((set-logic <null>)
@@ -350,6 +320,36 @@
 (let ((@x314 (unit-resolution ((_ th-lemma bv) $x312) @x26 @x26 @x26 @x26 @x26 @x26 @x26 @x26 (unit-resolution (def-axiom (or $x95 (not $x74))) @x303 (not $x74)) (unit-resolution (def-axiom (or $x95 (not $x75))) @x303 (not $x75)) (unit-resolution (def-axiom (or $x95 (not $x76))) @x303 (not $x76)) (unit-resolution (def-axiom (or $x95 (not $x77))) @x303 (not $x77)) (unit-resolution (def-axiom (or $x95 (not $x78))) @x303 (not $x78)) (unit-resolution (def-axiom (or $x95 (not $x79))) @x303 (not $x79)) (unit-resolution (def-axiom (or $x95 (not $x80))) @x303 (not $x80)) (unit-resolution (def-axiom (or $x95 $x264)) @x303 $x264) $x300)))
 (unit-resolution @x314 @x322 false)))))))))))))))))))))))))))))))))))))))))))))))))
 
+115ab22c9945d493b971e69a38d9e608c5b40a71 29 0
+unsat
+((set-logic <null>)
+(proof
+(let ((?x28 (bv2int$ (_ bv0 2))))
+(let (($x183 (<= ?x28 0)))
+(let (($x184 (not $x183)))
+(let (($x175 (forall ((?v0 (_ BitVec 2)) )(!(let ((?x47 (bv2int$ ?v0)))
+(let (($x53 (<= ?x47 0)))
+(not $x53))) :pattern ( (bv2int$ ?v0) )))
+))
+(let (($x57 (forall ((?v0 (_ BitVec 2)) )(let ((?x47 (bv2int$ ?v0)))
+(let (($x53 (<= ?x47 0)))
+(not $x53))))
+))
+(let ((@x177 (refl (= (not (<= (bv2int$ ?0) 0)) (not (<= (bv2int$ ?0) 0))))))
+(let ((@x112 (refl (~ (not (<= (bv2int$ ?0) 0)) (not (<= (bv2int$ ?0) 0))))))
+(let (($x49 (forall ((?v0 (_ BitVec 2)) )(let ((?x47 (bv2int$ ?v0)))
+(< 0 ?x47)))
+))
+(let ((@x56 (rewrite (= (< 0 (bv2int$ ?0)) (not (<= (bv2int$ ?0) 0))))))
+(let ((@x115 (mp~ (mp (asserted $x49) (quant-intro @x56 (= $x49 $x57)) $x57) (nnf-pos @x112 (~ $x57 $x57)) $x57)))
+(let ((@x180 (mp @x115 (quant-intro @x177 (= $x57 $x175)) $x175)))
+(let (($x187 (not $x175)))
+(let (($x188 (or $x187 $x184)))
+(let ((@x189 ((_ quant-inst (_ bv0 2)) $x188)))
+(let (($x29 (= ?x28 0)))
+(let ((@x30 (asserted $x29)))
+(unit-resolution ((_ th-lemma arith triangle-eq) (or (not $x29) $x183)) @x30 (unit-resolution @x189 @x180 $x184) false)))))))))))))))))))
+
 d14e7b8f0d1858316e700b4eb09e7d03e57cf9c3 16 0
 unsat
 ((set-logic <null>)
--- a/src/HOL/TPTP/ATP_Problem_Import.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/TPTP/ATP_Problem_Import.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -5,10 +5,7 @@
 header {* ATP Problem Importer *}
 
 theory ATP_Problem_Import
-imports
-  Complex_Main
-  TPTP_Interpret
-  "~~/src/HOL/Library/Refute"
+imports Complex_Main TPTP_Interpret "~~/src/HOL/Library/Refute"
 begin
 
 ML_file "sledgehammer_tactics.ML"
@@ -24,8 +21,7 @@
 ML_file "atp_problem_import.ML"
 
 (*
-ML {* ATP_Problem_Import.isabelle_tptp_file @{theory} 50
-          "$TPTP/Problems/PUZ/PUZ107^5.p" *}
+ML {* ATP_Problem_Import.isabelle_tptp_file @{theory} 50 "$TPTP/Problems/PUZ/PUZ107^5.p" *}
 *)
 
 end
--- a/src/HOL/TPTP/TPTP_Interpret.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/TPTP/TPTP_Interpret.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -6,12 +6,12 @@
 *)
 
 theory TPTP_Interpret
-imports Main TPTP_Parser
+imports Complex_Main TPTP_Parser
 keywords "import_tptp" :: thy_decl
 begin
 
-typedecl "ind"
+typedecl ind
 
-ML_file  "TPTP_Parser/tptp_interpret.ML"
+ML_file "TPTP_Parser/tptp_interpret.ML"
 
 end
\ No newline at end of file
--- a/src/HOL/TPTP/TPTP_Parser/README	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/TPTP/TPTP_Parser/README	Sun Aug 10 14:34:43 2014 +0200
@@ -29,4 +29,4 @@
 ML-Yacc's library.
 
 Nik Sultana
-9th March 2012
\ No newline at end of file
+9th March 2012
--- a/src/HOL/TPTP/TPTP_Parser/tptp.lex	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/TPTP/TPTP_Parser/tptp.lex	Sun Aug 10 14:34:43 2014 +0200
@@ -86,6 +86,7 @@
 upper_word                = {upper_alpha}{alpha_numeric}*;
 dollar_dollar_word        = {ddollar}{lower_word};
 dollar_word               = {dollar}{lower_word};
+dollar_underscore         = {dollar}_;
 
 distinct_object           = {double_quote}{do_char}+{double_quote};
 
@@ -177,6 +178,7 @@
 
 {lower_word}   => (col:=yypos-(!eolpos); T.LOWER_WORD(yytext,!linep,!col));
 {dollar_word}  => (col:=yypos-(!eolpos); T.DOLLAR_WORD(yytext,!linep,!col));
+{dollar_underscore}  => (col:=yypos-(!eolpos); T.DOLLAR_WORD(yytext,!linep,!col));
 {dollar_dollar_word}  => (col:=yypos-(!eolpos); T.DOLLAR_DOLLAR_WORD(yytext,!linep,!col));
 
 "+"           => (col:=yypos-(!eolpos); T.PLUS(!linep,!col));
--- a/src/HOL/TPTP/TPTP_Parser/tptp.yacc	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/TPTP/TPTP_Parser/tptp.yacc	Sun Aug 10 14:34:43 2014 +0200
@@ -491,7 +491,7 @@
 tff_unitary_type : tff_atomic_type               (( tff_atomic_type ))
                  | LPAREN tff_xprod_type RPAREN  (( tff_xprod_type ))
 
-tff_atomic_type : atomic_word   (( Atom_type atomic_word ))
+tff_atomic_type : atomic_word   (( Atom_type (atomic_word, []) ))
                 | defined_type  (( Defined_type defined_type ))
                 | atomic_word LPAREN tff_type_arguments RPAREN (( Fmla_type (Fmla (Uninterpreted atomic_word, (map Type_fmla tff_type_arguments))) ))
                 | variable_ (( Fmla_type (Pred (Interpreted_ExtraLogic Apply, [Term_Var variable_])) ))
@@ -634,6 +634,7 @@
   | "$real" => Type_Real
   | "$rat" => Type_Rat
   | "$int" => Type_Int
+  | "$_" => Type_Dummy
   | thing => raise UNRECOGNISED_SYMBOL ("defined_type", thing)
 ))
 
@@ -747,6 +748,7 @@
   | "$real" => TypeSymbol Type_Real
   | "$rat" => TypeSymbol Type_Rat
   | "$tType" => TypeSymbol Type_Type
+  | "$_" => TypeSymbol Type_Dummy
 
   | "$true" => Interpreted_Logic True
   | "$false" => Interpreted_Logic False
--- a/src/HOL/TPTP/TPTP_Parser/tptp_interpret.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/TPTP/TPTP_Parser/tptp_interpret.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -8,11 +8,11 @@
 sig
   (*Signature extension: typing information for variables and constants*)
   type var_types = (string * typ option) list
-  type const_map = (string * term) list
+  type const_map = (string * (term * int list)) list
 
   (*Mapping from TPTP types to Isabelle/HOL types. This map works over all
   base types. The map must be total wrt TPTP types.*)
-  type type_map = (TPTP_Syntax.tptp_type * typ) list
+  type type_map = (string * (string * int)) list
 
   (*A parsed annotated formula is represented as a 5-tuple consisting of
   the formula's label, its role, the TPTP formula, its Isabelle/HOL meaning, and
@@ -35,7 +35,7 @@
      problem_name : TPTP_Problem_Name.problem_name option}
 
   (*Generates a fresh Isabelle/HOL type for interpreting a TPTP type in a theory.*)
-  val declare_type : config -> (TPTP_Syntax.tptp_type * string) -> type_map ->
+  val declare_type : config -> string * (string * int) -> type_map ->
     theory -> type_map * theory
 
   (*Map TPTP types to Isabelle/HOL types*)
@@ -132,9 +132,9 @@
 
 (** Signatures and other type abbrevations **)
 
-type const_map = (string * term) list
+type const_map = (string * (term * int list)) list
 type var_types = (string * typ option) list
-type type_map = (TPTP_Syntax.tptp_type * typ) list
+type type_map = (string * (string * int)) list
 type tptp_formula_meaning =
   string * TPTP_Syntax.role * term * TPTP_Proof.source_info option
 type tptp_general_meaning =
@@ -189,17 +189,19 @@
 (*Returns updated theory and the name of the final type's name -- i.e. if the
 original name is already taken then the function looks for an available
 alternative. It also returns an updated type_map if one has been passed to it.*)
-fun declare_type (config : config) (thf_type, type_name) ty_map thy =
+fun declare_type (config : config) (thf_type, (type_name, arity)) ty_map thy =
   if type_exists config thy type_name then
-    raise MISINTERPRET_TYPE ("Type already exists", Atom_type type_name)
+    raise MISINTERPRET_TYPE ("Type already exists", Atom_type (type_name, []))
   else
     let
       val binding = mk_binding config type_name
       val final_fqn = Sign.full_name thy binding
+      val tyargs =
+        List.tabulate (arity, rpair @{sort type} o prefix "'" o string_of_int)
       val thy' =
-        Typedecl.typedecl_global (mk_binding config type_name, [], NoSyn) thy
+        Typedecl.typedecl_global (mk_binding config type_name, tyargs, NoSyn) thy
         |> snd
-      val typ_map_entry = (thf_type, (Type (final_fqn, [])))
+      val typ_map_entry = (thf_type, (final_fqn, arity))
       val ty_map' = typ_map_entry :: ty_map
     in (ty_map', thy') end
 
@@ -217,42 +219,56 @@
     raise MISINTERPRET_TERM
      ("Const already declared", Term_Func (Uninterpreted const_name, []))
   else
-    Theory.specify_const
-     ((mk_binding config const_name, ty), NoSyn) thy
-
-fun declare_const_ifnot config const_name ty thy =
-  if const_exists config thy const_name then
-    (Sign.mk_const thy ((Sign.full_name thy (mk_binding config const_name)), []), thy)
-  else declare_constant config const_name ty thy
+    Theory.specify_const ((mk_binding config const_name, ty), NoSyn) thy
 
 
 (** Interpretation functions **)
 
-(*Types in THF are encoded as formulas. This function translates them to type form.*)
+(*Types in TFF/THF are encoded as formulas. These functions translate them to type form.*)
+
+fun termtype_to_type (Term_Func (TypeSymbol typ, [])) =
+      Defined_type typ
+  | termtype_to_type (Term_Func (Uninterpreted str, tms)) =
+      Atom_type (str, map termtype_to_type tms)
+  | termtype_to_type (Term_Var str) = Var_type str
+
 (*FIXME possibly incomplete hack*)
-fun fmlatype_to_type (Atom (THF_Atom_term (Term_Func (TypeSymbol typ, [])))) =
-      Defined_type typ
-  | fmlatype_to_type (Atom (THF_Atom_term (Term_Func (Uninterpreted str, [])))) =
-      Atom_type str
+fun fmlatype_to_type (Atom (THF_Atom_term tm)) = termtype_to_type tm
   | fmlatype_to_type (Type_fmla (Fn_type (ty1, ty2))) =
       let
         val ty1' = case ty1 of
             Fn_type _ => fmlatype_to_type (Type_fmla ty1)
           | Fmla_type ty1 => fmlatype_to_type ty1
+          | _ => ty1
         val ty2' = case ty2 of
             Fn_type _ => fmlatype_to_type (Type_fmla ty2)
           | Fmla_type ty2 => fmlatype_to_type ty2
+          | _ => ty2
       in Fn_type (ty1', ty2') end
+  | fmlatype_to_type (Type_fmla ty) = ty
+  | fmlatype_to_type (Fmla (Uninterpreted str, fmla)) =
+      Atom_type (str, map fmlatype_to_type fmla)
+  | fmlatype_to_type (Quant (Dep_Prod, _, fmla)) = fmlatype_to_type fmla
+  | fmlatype_to_type (Pred (Interpreted_ExtraLogic Apply, [tm])) =
+      termtype_to_type tm
+
+fun tfree_name_of_var_type str = "'" ^ Name.desymbolize (SOME false) str
+fun tfree_of_var_type str = TFree (tfree_name_of_var_type str, @{sort type})
 
 fun interpret_type config thy type_map thf_ty =
   let
-    fun lookup_type ty_map thf_ty =
-      (case AList.lookup (op =) ty_map thf_ty of
+    fun lookup_type ty_map ary str =
+      (case AList.lookup (op =) ty_map str of
           NONE =>
-            raise MISINTERPRET_TYPE
+            raise MISINTERPRET_SYMBOL
               ("Could not find the interpretation of this TPTP type in the \
-               \mapping to Isabelle/HOL", thf_ty)
-        | SOME ty => ty)
+               \mapping to Isabelle/HOL", Uninterpreted str)
+        | SOME (str', ary') =>
+            if ary' = ary then
+              str'
+            else
+              raise MISINTERPRET_SYMBOL ("TPTP type used with wrong arity",
+                Uninterpreted str))
   in
     case thf_ty of
        Prod_type (thf_ty1, thf_ty2) =>
@@ -263,22 +279,19 @@
          Type (@{type_name fun},
           [interpret_type config thy type_map thf_ty1,
            interpret_type config thy type_map thf_ty2])
-     | Atom_type _ =>
-         lookup_type type_map thf_ty
+     | Atom_type (str, thf_tys) =>
+         Type (lookup_type type_map (length thf_tys) str,
+           map (interpret_type config thy type_map) thf_tys)
+     | Var_type str => tfree_of_var_type str
      | Defined_type tptp_base_type =>
          (case tptp_base_type of
             Type_Ind => @{typ ind}
           | Type_Bool => HOLogic.boolT
           | Type_Type => raise MISINTERPRET_TYPE ("No type interpretation", thf_ty)
-         (*FIXME these types are currently unsupported, so they're treated as
-         individuals*)
-          | Type_Int =>
-              interpret_type config thy type_map (Defined_type Type_Ind)
-          | Type_Rat =>
-              interpret_type config thy type_map (Defined_type Type_Ind)
-          | Type_Real =>
-              interpret_type config thy type_map (Defined_type Type_Ind)
-          )
+          | Type_Int => @{typ int}
+          | Type_Rat => @{typ rat}
+          | Type_Real => @{typ real}
+          | Type_Dummy => dummyT)
      | Sum_type _ =>
          raise MISINTERPRET_TYPE (*FIXME*)
           ("No type interpretation (sum type)", thf_ty)
@@ -290,19 +303,15 @@
           ("No type interpretation (subtype)", thf_ty)
   end
 
-fun the_const config thy language const_map_payload str =
-  if language = THF then
-    (case AList.lookup (op =) const_map_payload str of
-        NONE => raise MISINTERPRET_TERM
-          ("Could not find the interpretation of this constant in the \
-            \mapping to Isabelle/HOL", Term_Func (Uninterpreted str, []))
-      | SOME t => t)
-  else
-    if const_exists config thy str then
-       Sign.mk_const thy ((Sign.full_name thy (mk_binding config str)), [])
-    else raise MISINTERPRET_TERM
-          ("Could not find the interpretation of this constant in the \
-            \mapping to Isabelle/HOL", Term_Func (Uninterpreted str, []))
+fun permute_type_args perm Ts = map (nth Ts) perm
+
+fun the_const thy const_map str tyargs =
+  (case AList.lookup (op =) const_map str of
+      SOME (Const (s, _), tyarg_perm) =>
+      Sign.mk_const thy (s, permute_type_args tyarg_perm tyargs)
+    | _ => raise MISINTERPRET_TERM
+        ("Could not find the interpretation of this constant in the \
+          \mapping to Isabelle/HOL", Term_Func (Uninterpreted str, [])))
 
 (*Eta-expands n-ary function.
  "str" is the name of an Isabelle/HOL constant*)
@@ -346,18 +355,24 @@
   | Is_Int => 1
   | Is_Rat => 1
   | Distinct => 1
-  | Apply=> 2
+  | Apply => 2
 
-fun interpret_symbol config language const_map symb thy =
+fun type_arity_of_symbol thy config (Uninterpreted n) =
+    let val s = full_name thy config n in
+      length (Sign.const_typargs thy (s, Sign.the_const_type thy s))
+    end
+  | type_arity_of_symbol _ _ _ = 0
+
+fun interpret_symbol config const_map symb tyargs thy =
   case symb of
      Uninterpreted n =>
        (*Constant would have been added to the map at the time of
        declaration*)
-       the_const config thy language const_map n
+       the_const thy const_map n tyargs
    | Interpreted_ExtraLogic interpreted_symbol =>
        (*FIXME not interpreting*)
        Sign.mk_const thy ((Sign.full_name thy (mk_binding config
-          (string_of_interpreted_symbol interpreted_symbol))), [])
+          (string_of_interpreted_symbol interpreted_symbol))), tyargs)
    | Interpreted_Logic logic_symbol =>
        (case logic_symbol of
           Equals => HOLogic.eq_const dummyT
@@ -427,16 +442,14 @@
         in
           case symb of
              Uninterpreted const_name =>
-               declare_const_ifnot config const_name
-                (mk_fun_type (replicate (length tptp_ts) ind_type) value_type I) thy'
-               |> snd
+               perhaps (try (snd o declare_constant config const_name
+                (mk_fun_type (replicate (length tptp_ts) ind_type) value_type I))) thy'
            | _ => thy'
         end
     | Atom_App _ => thy
     | Atom_Arity (const_name, n_args) =>
-        declare_const_ifnot config const_name
-         (mk_fun_type (replicate n_args ind_type) value_type I) thy
-        |> snd
+        perhaps (try (snd o declare_constant config const_name
+         (mk_fun_type (replicate n_args ind_type) value_type I))) thy
   end
 
 (*FIXME only used until interpretation is implemented*)
@@ -499,24 +512,32 @@
                (interpret_term formula_level config language const_map
                 var_types type_map (hd tptp_ts)))
            | _ =>
-              (*apply symb to tptp_ts*)
-              if is_prod_typed thy' config symb then
-                let
-                  val (t, thy'') =
-                    mtimes'
-                      (fold_map (interpret_term false config language const_map
-                       var_types type_map) (tl tptp_ts) thy')
-                      (interpret_term false config language const_map
-                       var_types type_map (hd tptp_ts))
-                in (interpret_symbol config language const_map symb thy' $ t, thy'')
-                end
-              else
-                (
-                mapply'
-                  (fold_map (interpret_term false config language const_map
-                   var_types type_map) tptp_ts thy')
-                  (`(interpret_symbol config language const_map symb))
-                )
+              let
+                val typ_arity = type_arity_of_symbol thy config symb
+                val (tptp_type_args, tptp_term_args) = chop typ_arity tptp_ts
+                val tyargs =
+                  map (interpret_type config thy type_map o termtype_to_type)
+                    tptp_type_args
+              in
+                (*apply symb to tptp_ts*)
+                if is_prod_typed thy' config symb then
+                  let
+                    val (t, thy'') =
+                      mtimes'
+                        (fold_map (interpret_term false config language const_map
+                         var_types type_map) (tl tptp_term_args) thy')
+                        (interpret_term false config language const_map
+                         var_types type_map (hd tptp_term_args))
+                  in (interpret_symbol config const_map symb tyargs thy' $ t, thy'')
+                  end
+                else
+                  (
+                  mapply'
+                    (fold_map (interpret_term false config language const_map
+                     var_types type_map) tptp_term_args thy')
+                    (`(interpret_symbol config const_map symb tyargs))
+                  )
+              end
        end
   | Term_Var n =>
      (if language = THF orelse language = TFF then
@@ -543,14 +564,12 @@
   | Term_Num (number_kind, num) =>
       let
         (*FIXME hack*)
-        val ind_type = interpret_type config thy type_map (Defined_type Type_Ind)
-        val prefix = case number_kind of
-            Int_num => "intn_"
-          | Real_num => "realn_"
-          | Rat_num => "ratn_"
-      (*FIXME hack -- for Int_num should be
-       HOLogic.mk_number @{typ "int"} (the (Int.fromString num))*)
-      in declare_const_ifnot config (prefix ^ num) ind_type thy end
+        val tptp_type = case number_kind of
+            Int_num => Type_Int
+          | Real_num => Type_Real
+          | Rat_num => Type_Rat
+        val T = interpret_type config thy type_map (Defined_type tptp_type)
+      in (HOLogic.mk_number T (the (Int.fromString num)), thy) end
   | Term_Distinct_Object str =>
       declare_constant config ("do_" ^ str)
         (interpret_type config thy type_map (Defined_type Type_Ind)) thy
@@ -577,11 +596,9 @@
                  (Atom_Arity (const_name, length tptp_formulas)) thy'
             in
               (if is_prod_typed thy' config symbol then
-                 mtimes thy' args (interpret_symbol config language
-                  const_map symbol thy')
+                 mtimes thy' args (interpret_symbol config const_map symbol [] thy')
                else
-                mapply args
-                 (interpret_symbol config language const_map symbol thy'),
+                mapply args (interpret_symbol config const_map symbol [] thy'),
               thy')
             end
         | _ =>
@@ -593,11 +610,9 @@
                  tptp_formulas thy
             in
               (if is_prod_typed thy' config symbol then
-                 mtimes thy' args (interpret_symbol config language
-                  const_map symbol thy')
+                 mtimes thy' args (interpret_symbol config const_map symbol [] thy')
                else
-                 mapply args
-                  (interpret_symbol config language const_map symbol thy'),
+                 mapply args (interpret_symbol config const_map symbol [] thy'),
                thy')
             end)
     | Sequent _ =>
@@ -669,12 +684,12 @@
         (case tptp_atom of
           TFF_Typed_Atom (symbol, tptp_type_opt) =>
             (*FIXME ignoring type info*)
-            (interpret_symbol config language const_map symbol thy, thy)
+            (interpret_symbol config const_map symbol [] thy, thy)
         | THF_Atom_term tptp_term =>
             interpret_term true config language const_map var_types
              type_map tptp_term thy
         | THF_Atom_conn_term symbol =>
-            (interpret_symbol config language const_map symbol thy, thy))
+            (interpret_symbol config const_map symbol [] thy, thy))
     | Type_fmla _ =>
          raise MISINTERPRET_FORMULA
           ("Cannot interpret types as formulas", tptp_fmla)
@@ -684,20 +699,31 @@
 
 (*Extract the type from a typing*)
 local
+  fun type_vars_of_fmlatype (Quant (Dep_Prod, varlist, fmla)) =
+      map fst varlist @ type_vars_of_fmlatype fmla
+    | type_vars_of_fmlatype _ = []
+
   fun extract_type tptp_type =
     case tptp_type of
-        Fmla_type typ => fmlatype_to_type typ
-      | _ => tptp_type
+        Fmla_type fmla => (type_vars_of_fmlatype fmla, fmlatype_to_type fmla)
+      | _ => ([], tptp_type)
 in
   fun typeof_typing (THF_typing (_, tptp_type)) = extract_type tptp_type
   fun typeof_tff_typing (Atom (TFF_Typed_Atom (_, SOME tptp_type))) =
     extract_type tptp_type
 end
+
 fun nameof_typing
   (THF_typing
      ((Atom (THF_Atom_term (Term_Func (Uninterpreted str, [])))), _)) = str
 fun nameof_tff_typing (Atom (TFF_Typed_Atom (Uninterpreted str, _))) = str
 
+fun strip_prod_type (Prod_type (ty1, ty2)) = ty1 :: strip_prod_type ty2
+  | strip_prod_type ty = [ty]
+
+fun dest_fn_type (Fn_type (ty1, ty2)) = (strip_prod_type ty1, ty2)
+  | dest_fn_type ty = ([], ty)
+
 fun resolve_include_path path_prefixes path_suffix =
   case find_first (fn prefix => File.exists (Path.append prefix path_suffix))
          path_prefixes of
@@ -705,6 +731,15 @@
   | NONE =>
     error ("Cannot find include file " ^ quote (Path.implode path_suffix))
 
+(* Ideally, to be in sync with TFF1, we should perform full type skolemization here.
+   But the problems originating from HOL systems are restricted to top-level
+   universal quantification for types. *)
+fun remove_leading_type_quantifiers (Quant (Forall, varlist, tptp_fmla)) =
+    (case filter_out (curry (op =) (SOME (Defined_type Type_Type)) o snd) varlist of
+      [] => remove_leading_type_quantifiers tptp_fmla
+    | varlist' => Quant (Forall, varlist', tptp_fmla))
+  | remove_leading_type_quantifiers tptp_fmla = tptp_fmla
+
 fun interpret_line config inc_list type_map const_map path_prefixes line thy =
   case line of
      Include (_, quoted_path, inc_list) =>
@@ -725,7 +760,7 @@
          case role of
            Role_Type =>
              let
-               val (tptp_ty, name) =
+               val ((tptp_type_vars, tptp_ty), name) =
                  if lang = THF then
                    (typeof_typing tptp_formula (*assuming tptp_formula is a typing*),
                     nameof_typing tptp_formula (*and that the LHS is a (atom) name*))
@@ -733,22 +768,15 @@
                    (typeof_tff_typing tptp_formula,
                     nameof_tff_typing tptp_formula)
              in
-               case tptp_ty of
-                 Defined_type Type_Type => (*add new type*)
+               case dest_fn_type tptp_ty of
+                 (tptp_binders, Defined_type Type_Type) => (*add new type*)
                    (*generate an Isabelle/HOL type to interpret this TPTP type,
                    and add it to both the Isabelle/HOL theory and to the type_map*)
                     let
                       val (type_map', thy') =
-                        declare_type
-                         config
-                         (Atom_type name, name)
-                         type_map
-                         thy
-                    in ((type_map',
-                         const_map,
-                         []),
-                        thy')
-                    end
+                        declare_type config
+                          (name, (name, length tptp_binders)) type_map thy
+                    in ((type_map', const_map, []), thy') end
 
                | _ => (*declaration of constant*)
                   (*Here we populate the map from constants to the Isabelle/HOL
@@ -758,7 +786,7 @@
                     (*make sure that the theory contains all the types appearing
                     in this constant's signature. Exception is thrown if encounter
                     an undeclared types.*)
-                    val (t, thy') =
+                    val (t as Const (name', _), thy') =
                       let
                         fun analyse_type thy thf_ty =
                            if #cautious config then
@@ -766,13 +794,13 @@
                                Fn_type (thf_ty1, thf_ty2) =>
                                  (analyse_type thy thf_ty1;
                                  analyse_type thy thf_ty2)
-                             | Atom_type ty_name =>
+                             | Atom_type (ty_name, _) =>
                                  if type_exists config thy ty_name then ()
                                  else
                                   raise MISINTERPRET_TYPE
                                    ("Type (in signature of " ^
                                       name ^ ") has not been declared",
-                                     Atom_type ty_name)
+                                     Atom_type (ty_name, []))
                              | _ => ()
                            else () (*skip test if we're not being cautious.*)
                       in
@@ -784,7 +812,19 @@
                     use "::". Note that here we use a constant's name rather
                     than the possibly- new one, since all references in the
                     theory will be to this name.*)
-                    val const_map' = ((name, t) :: const_map)
+
+                    val tf_tyargs = map tfree_of_var_type tptp_type_vars
+                    val isa_tyargs = Sign.const_typargs thy' (name', ty)
+                    val _ =
+                      if length isa_tyargs < length tf_tyargs then
+                        raise MISINTERPRET_SYMBOL
+                          ("Cannot handle phantom types for constant",
+                           Uninterpreted name)
+                      else
+                        ()
+                    val tyarg_perm =
+                      map (fn T => find_index (curry (op =) T) tf_tyargs) isa_tyargs
+                    val const_map' = ((name, (t, tyarg_perm)) :: const_map)
                   in ((type_map,(*type_map unchanged, since a constant's
                                   declaration shouldn't include any new types.*)
                        const_map',(*typing table of constant was extended*)
@@ -798,7 +838,7 @@
                (*gather interpreted term, and the map of types occurring in that term*)
                val (t, thy') =
                  interpret_formula config lang
-                  const_map [] type_map tptp_formula thy
+                  const_map [] type_map (remove_leading_type_quantifiers tptp_formula) thy
                  |>> HOLogic.mk_Trueprop
                (*type_maps grow monotonically, so return the newest value (type_mapped);
                there's no need to unify it with the type_map parameter.*)
--- a/src/HOL/TPTP/TPTP_Parser/tptp_lexyacc.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/TPTP/TPTP_Parser/tptp_lexyacc.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -174,9 +174,9 @@
 \\000"
 ),
  (1, 
-"\000\000\000\000\000\000\000\000\000\142\144\000\000\143\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\142\138\133\000\101\089\088\083\082\081\080\078\077\072\070\057\
+"\000\000\000\000\000\000\000\000\000\143\145\000\000\144\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\143\139\134\000\101\089\088\083\082\081\080\078\077\072\070\057\
 \\048\048\048\048\048\048\048\048\048\048\045\000\039\037\036\033\
 \\030\029\029\029\029\029\029\029\029\029\029\029\029\029\029\029\
 \\029\029\029\029\029\029\029\029\029\029\029\028\000\027\026\000\
@@ -847,10 +847,10 @@
  (101, 
 "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\000\000\131\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\000\000\000\000\132\000\000\000\000\000\000\000\000\000\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\131\
 \\000\102\102\128\102\102\124\102\102\118\102\102\108\102\102\102\
 \\102\102\102\102\103\102\102\102\102\102\102\000\000\000\000\000\
 \\000"
@@ -1053,76 +1053,76 @@
 \\102\102\102\102\102\102\102\102\102\102\102\000\000\000\000\000\
 \\000"
 ),
- (131, 
-"\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\132\132\132\132\132\132\132\132\132\132\132\132\132\132\132\
-\\132\132\132\132\132\132\132\132\132\132\132\000\000\000\000\000\
-\\000"
-),
  (132, 
 "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\132\132\132\132\132\132\132\132\132\132\000\000\000\000\000\000\
-\\000\132\132\132\132\132\132\132\132\132\132\132\132\132\132\132\
-\\132\132\132\132\132\132\132\132\132\132\132\000\000\000\000\132\
-\\000\132\132\132\132\132\132\132\132\132\132\132\132\132\132\132\
-\\132\132\132\132\132\132\132\132\132\132\132\000\000\000\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\000\133\133\133\133\133\133\133\133\133\133\133\133\133\133\133\
+\\133\133\133\133\133\133\133\133\133\133\133\000\000\000\000\000\
 \\000"
 ),
  (133, 
-"\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134\134\000\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134\134\134\134\134\134\134\134\134\134\134\134\137\134\134\134\
-\\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134"
+"\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\133\133\133\133\133\133\133\133\133\133\000\000\000\000\000\000\
+\\000\133\133\133\133\133\133\133\133\133\133\133\133\133\133\133\
+\\133\133\133\133\133\133\133\133\133\133\133\000\000\000\000\133\
+\\000\133\133\133\133\133\133\133\133\133\133\133\133\133\133\133\
+\\133\133\133\133\133\133\133\133\133\133\133\000\000\000\000\000\
+\\000"
 ),
  (134, 
-"\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134\134\136\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134\134\134\134\134\134\134\134\134\134\134\134\135\134\134\134\
-\\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\134\
-\\134"
+"\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135\135\000\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135\135\135\135\135\135\135\135\135\135\135\135\138\135\135\135\
+\\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135"
 ),
  (135, 
+"\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135\135\137\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135\135\135\135\135\135\135\135\135\135\135\135\136\135\135\135\
+\\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\135\
+\\135"
+),
+ (136, 
 "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\134\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\134\000\000\000\
+\\000\000\135\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\135\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000"
 ),
- (138, 
+ (139, 
 "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\141\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\000\140\139\000\
+\\000\142\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\000\141\140\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000"
 ),
- (142, 
-"\000\000\000\000\000\000\000\000\000\142\000\000\000\000\000\000\
-\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
-\\142\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+ (143, 
+"\000\000\000\000\000\000\000\000\000\143\000\000\000\000\000\000\
+\\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
+\\143\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
@@ -1130,8 +1130,8 @@
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000"
 ),
- (143, 
-"\000\000\000\000\000\000\000\000\000\000\144\000\000\000\000\000\
+ (144, 
+"\000\000\000\000\000\000\000\000\000\000\145\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
 \\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
@@ -1143,12 +1143,12 @@
 ),
 (0, "")]
 fun f x = x 
-val s = map f (rev (tl (rev s))) 
+val s = List.map f (List.rev (tl (List.rev s))) 
 exception LexHackingError 
 fun look ((j,x)::r, i: int) = if i = j then x else look(r, i) 
   | look ([], i) = raise LexHackingError
 fun g {fin=x, trans=i} = {fin=x, trans=look(s,i)} 
-in Vector.fromList(map g 
+in Vector.fromList(List.map g 
 [{fin = [], trans = 0},
 {fin = [(N 2)], trans = 1},
 {fin = [(N 2)], trans = 1},
@@ -1184,7 +1184,7 @@
 {fin = [(N 12)], trans = 0},
 {fin = [(N 78)], trans = 33},
 {fin = [(N 21)], trans = 0},
-{fin = [(N 303)], trans = 0},
+{fin = [(N 306)], trans = 0},
 {fin = [(N 38)], trans = 0},
 {fin = [(N 31)], trans = 37},
 {fin = [(N 48)], trans = 0},
@@ -1193,10 +1193,10 @@
 {fin = [(N 68)], trans = 0},
 {fin = [(N 41)], trans = 42},
 {fin = [(N 45)], trans = 0},
-{fin = [(N 297)], trans = 0},
+{fin = [(N 300)], trans = 0},
 {fin = [(N 27)], trans = 45},
 {fin = [(N 36)], trans = 0},
-{fin = [(N 306)], trans = 0},
+{fin = [(N 309)], trans = 0},
 {fin = [(N 126)], trans = 48},
 {fin = [], trans = 49},
 {fin = [(N 104)], trans = 49},
@@ -1225,11 +1225,11 @@
 {fin = [(N 55)], trans = 0},
 {fin = [(N 123)], trans = 74},
 {fin = [(N 58)], trans = 75},
-{fin = [(N 294)], trans = 0},
+{fin = [(N 297)], trans = 0},
 {fin = [(N 29)], trans = 0},
-{fin = [(N 288)], trans = 78},
+{fin = [(N 291)], trans = 78},
 {fin = [(N 76)], trans = 0},
-{fin = [(N 290)], trans = 0},
+{fin = [(N 293)], trans = 0},
 {fin = [(N 82)], trans = 0},
 {fin = [(N 52)], trans = 0},
 {fin = [], trans = 83},
@@ -1280,19 +1280,20 @@
 {fin = [(N 278)], trans = 128},
 {fin = [(N 278)], trans = 129},
 {fin = [(N 209),(N 278)], trans = 102},
-{fin = [], trans = 131},
-{fin = [(N 286)], trans = 132},
-{fin = [], trans = 133},
+{fin = [(N 281)], trans = 0},
+{fin = [], trans = 132},
+{fin = [(N 289)], trans = 133},
 {fin = [], trans = 134},
 {fin = [], trans = 135},
+{fin = [], trans = 136},
 {fin = [(N 95)], trans = 0},
-{fin = [], trans = 135},
-{fin = [(N 33)], trans = 138},
-{fin = [(N 300)], trans = 0},
+{fin = [], trans = 136},
+{fin = [(N 33)], trans = 139},
+{fin = [(N 303)], trans = 0},
 {fin = [(N 64)], trans = 0},
 {fin = [(N 18)], trans = 0},
-{fin = [(N 2)], trans = 142},
-{fin = [(N 7)], trans = 143},
+{fin = [(N 2)], trans = 143},
+{fin = [(N 7)], trans = 144},
 {fin = [(N 7)], trans = 0}])
 end
 structure StartStates =
@@ -1328,7 +1329,7 @@
 	| action (i,(node::acts)::l) =
 		case node of
 		    Internal.N yyk => 
-			(let fun yymktext() = substring(!yyb,i0,i-i0)
+			(let fun yymktext() = String.substring(!yyb,i0,i-i0)
 			     val yypos = i0+ !yygone
 			open UserDeclarations Internal.StartStates
  in (yybufpos := i; case yyk of 
@@ -1369,15 +1370,16 @@
 | 27 => (col:=yypos-(!eolpos); T.COLON(!linep,!col))
 | 271 => let val yytext=yymktext() in col:=yypos-(!eolpos); T.LOWER_WORD(yytext,!linep,!col) end
 | 278 => let val yytext=yymktext() in col:=yypos-(!eolpos); T.DOLLAR_WORD(yytext,!linep,!col) end
-| 286 => let val yytext=yymktext() in col:=yypos-(!eolpos); T.DOLLAR_DOLLAR_WORD(yytext,!linep,!col) end
-| 288 => (col:=yypos-(!eolpos); T.PLUS(!linep,!col))
+| 281 => let val yytext=yymktext() in col:=yypos-(!eolpos); T.DOLLAR_WORD(yytext,!linep,!col) end
+| 289 => let val yytext=yymktext() in col:=yypos-(!eolpos); T.DOLLAR_DOLLAR_WORD(yytext,!linep,!col) end
 | 29 => (col:=yypos-(!eolpos); T.COMMA(!linep,!col))
-| 290 => (col:=yypos-(!eolpos); T.TIMES(!linep,!col))
-| 294 => (col:=yypos-(!eolpos); T.GENTZEN_ARROW(!linep,!col))
-| 297 => (col:=yypos-(!eolpos); T.SUBTYPE(!linep,!col))
-| 300 => (col:=yypos-(!eolpos); T.DEP_PROD(!linep,!col))
-| 303 => (col:=yypos-(!eolpos); T.DEP_SUM(!linep,!col))
-| 306 => (col:=yypos-(!eolpos); T.LET_TERM(!linep,!col))
+| 291 => (col:=yypos-(!eolpos); T.PLUS(!linep,!col))
+| 293 => (col:=yypos-(!eolpos); T.TIMES(!linep,!col))
+| 297 => (col:=yypos-(!eolpos); T.GENTZEN_ARROW(!linep,!col))
+| 300 => (col:=yypos-(!eolpos); T.SUBTYPE(!linep,!col))
+| 303 => (col:=yypos-(!eolpos); T.DEP_PROD(!linep,!col))
+| 306 => (col:=yypos-(!eolpos); T.DEP_SUM(!linep,!col))
+| 309 => (col:=yypos-(!eolpos); T.LET_TERM(!linep,!col))
 | 31 => (col:=yypos-(!eolpos); T.EQUALS(!linep,!col))
 | 33 => (col:=yypos-(!eolpos); T.EXCLAMATION(!linep,!col))
 | 36 => (col:=yypos-(!eolpos); T.LET(!linep,!col))
@@ -1414,14 +1416,14 @@
 	     if trans = #trans(Vector.sub(Internal.tab,0))
 	       then action(l,NewAcceptingLeaves
 ) else	    let val newchars= if !yydone then "" else yyinput 1024
-	    in if (size newchars)=0
+	    in if (String.size newchars)=0
 		  then (yydone := true;
 		        if (l=i0) then UserDeclarations.eof yyarg
 		                  else action(l,NewAcceptingLeaves))
 		  else (if i0=l then yyb := newchars
-		     else yyb := substring(!yyb,i0,l-i0)^newchars;
+		     else yyb := String.substring(!yyb,i0,l-i0)^newchars;
 		     yygone := !yygone+i0;
-		     yybl := size (!yyb);
+		     yybl := String.size (!yyb);
 		     scan (s,AcceptingLeaves,l-i0,0))
 	    end
 	  else let val NewChar = Char.ord(String.sub(!yyb,l))
@@ -1432,7 +1434,7 @@
 	end
 	end
 (*
-	val start= if substring(!yyb,!yybufpos-1,1)="\n"
+	val start= if String.substring(!yyb,!yybufpos-1,1)="\n"
 then !yybegin+1 else !yybegin
 *)
 	in scan(!yybegin (* start *),nil,!yybufpos,!yybufpos)
@@ -3604,7 +3606,7 @@
        fun f i =
             if i=numstates then g i
             else (Array.update(memo,i,SHIFT (STATE i)); f (i+1))
-          in f 0 handle Subscript => ()
+          in f 0 handle General.Subscript => ()
           end
 in
 val entry_to_action = fn 0 => ACCEPT | 1 => ERROR | j => Array.sub(memo,(j-2))
@@ -3614,7 +3616,7 @@
 val actionRowNumbers = string_to_list actionRowNumbers
 val actionT = let val actionRowLookUp=
 let val a=Array.fromList(actionRows) in fn i=>Array.sub(a,i) end
-in Array.fromList(map actionRowLookUp actionRowNumbers)
+in Array.fromList(List.map actionRowLookUp actionRowNumbers)
 end
 in LrTable.mkLrTable {actions=actionT,gotos=gotoT,numRules=numrules,
 numStates=numstates,initialState=STATE 0}
@@ -4851,7 +4853,7 @@
 end
 |  ( 135, ( ( _, ( MlyValue.atomic_word atomic_word, atomic_word1left,
  atomic_word1right)) :: rest671)) => let val  result = 
-MlyValue.tff_atomic_type (( Atom_type atomic_word ))
+MlyValue.tff_atomic_type (( Atom_type (atomic_word, []) ))
  in ( LrTable.NT 79, ( result, atomic_word1left, atomic_word1right), 
 rest671)
 end
@@ -5316,6 +5318,7 @@
   | "$real" => Type_Real
   | "$rat" => Type_Rat
   | "$int" => Type_Int
+  | "$_" => Type_Dummy
   | thing => raise UNRECOGNISED_SYMBOL ("defined_type", thing)
 )
 )
@@ -5590,6 +5593,7 @@
   | "$real" => TypeSymbol Type_Real
   | "$rat" => TypeSymbol Type_Rat
   | "$tType" => TypeSymbol Type_Type
+  | "$_" => TypeSymbol Type_Dummy
 
   | "$true" => Interpreted_Logic True
   | "$false" => Interpreted_Logic False
--- a/src/HOL/TPTP/TPTP_Parser/tptp_syntax.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/TPTP/TPTP_Parser/tptp_syntax.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -66,7 +66,7 @@
     Forall | Exists | Epsilon | Iota | Lambda | Dep_Prod | Dep_Sum
 
   and tptp_base_type =
-    Type_Ind | Type_Bool | Type_Type | Type_Int | Type_Rat | Type_Real
+    Type_Ind | Type_Bool | Type_Type | Type_Int | Type_Rat | Type_Real | Type_Dummy
 
   and symbol =
       Uninterpreted of string
@@ -111,7 +111,8 @@
   and tptp_type =
       Prod_type of tptp_type * tptp_type
     | Fn_type of tptp_type * tptp_type
-    | Atom_type of string
+    | Atom_type of string * tptp_type list
+    | Var_type of string
     | Defined_type of tptp_base_type
     | Sum_type of tptp_type * tptp_type (*only THF*)
     | Fmla_type of tptp_formula
@@ -141,8 +142,6 @@
 
   val status_to_string : status_value -> string
 
-  val nameof_tff_atom_type : tptp_type -> string
-
   val pos_of_line : tptp_line -> position
 
   (*Returns the list of all files included in a directory and its
@@ -216,7 +215,7 @@
     Forall | Exists | Epsilon | Iota | Lambda | Dep_Prod | Dep_Sum
 
   and tptp_base_type =
-    Type_Ind | Type_Bool | Type_Type | Type_Int | Type_Rat | Type_Real
+    Type_Ind | Type_Bool | Type_Type | Type_Int | Type_Rat | Type_Real | Type_Dummy
 
   and symbol =
       Uninterpreted of string
@@ -261,7 +260,8 @@
   and tptp_type =
       Prod_type of tptp_type * tptp_type
     | Fn_type of tptp_type * tptp_type
-    | Atom_type of string
+    | Atom_type of string * tptp_type list
+    | Var_type of string
     | Defined_type of tptp_base_type
     | Sum_type of tptp_type * tptp_type
     | Fmla_type of tptp_formula
@@ -287,9 +287,6 @@
 
 fun debug f x = if Options.default_bool @{system_option ML_exception_trace} then (f x; ()) else ()
 
-fun nameof_tff_atom_type (Atom_type str) = str
-  | nameof_tff_atom_type _ = raise TPTP_SYNTAX "nameof_tff_atom_type called on non-atom type"
-
 fun pos_of_line tptp_line =
   case tptp_line of
       Annotated_Formula (position, _, _, _, _, _) => position
@@ -428,6 +425,7 @@
   | string_of_tptp_base_type Type_Int = "$int"
   | string_of_tptp_base_type Type_Rat = "$rat"
   | string_of_tptp_base_type Type_Real = "$real"
+  | string_of_tptp_base_type Type_Dummy = "$_"
 
 and string_of_interpreted_symbol x =
   case x of
@@ -517,7 +515,10 @@
       string_of_tptp_type tptp_type1 ^ " * " ^ string_of_tptp_type tptp_type2
   | string_of_tptp_type (Fn_type (tptp_type1, tptp_type2)) =
       string_of_tptp_type tptp_type1 ^ " > " ^ string_of_tptp_type tptp_type2
-  | string_of_tptp_type (Atom_type str) = str
+  | string_of_tptp_type (Atom_type (str, [])) = str
+  | string_of_tptp_type (Atom_type (str, tptp_types)) =
+      str ^ "(" ^ commas (map string_of_tptp_type tptp_types) ^ ")"
+  | string_of_tptp_type (Var_type str) = str
   | string_of_tptp_type (Defined_type tptp_base_type) =
       string_of_tptp_base_type tptp_base_type
   | string_of_tptp_type (Sum_type (tptp_type1, tptp_type2)) = ""
@@ -565,6 +566,7 @@
   | latex_of_tptp_base_type Type_Int = "\\\\mathsf{int} "
   | latex_of_tptp_base_type Type_Rat = "\\\\mathsf{rat} "
   | latex_of_tptp_base_type Type_Real = "\\\\mathsf{real} "
+  | latex_of_tptp_base_type Type_Dummy = "\\\\mathsf{\\\\_} "
 
 and latex_of_interpreted_symbol x =
   case x of
@@ -687,7 +689,10 @@
       latex_of_tptp_type tptp_type1 ^ " \\\\times " ^ latex_of_tptp_type tptp_type2
   | latex_of_tptp_type (Fn_type (tptp_type1, tptp_type2)) =
       latex_of_tptp_type tptp_type1 ^ " \\\\to " ^ latex_of_tptp_type tptp_type2
-  | latex_of_tptp_type (Atom_type str) = "\\\\mathrm{" ^ str ^ "}"
+  | latex_of_tptp_type (Atom_type (str, [])) = "\\\\mathrm{" ^ str ^ "}"
+  | latex_of_tptp_type (Atom_type (str, tptp_types)) =
+    "\\\\mathrm{" ^ str ^ "}(" ^ commas (map latex_of_tptp_type tptp_types) ^ ")"
+  | latex_of_tptp_type (Var_type str) = "\\\\mathrm{" ^ str ^ "}"
   | latex_of_tptp_type (Defined_type tptp_base_type) =
       latex_of_tptp_base_type tptp_base_type
   | latex_of_tptp_type (Sum_type (tptp_type1, tptp_type2)) = ""
--- a/src/HOL/TPTP/TPTP_Proof_Reconstruction.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/TPTP/TPTP_Proof_Reconstruction.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -2116,7 +2116,10 @@
   in
     case inference_name of
       "fo_atp_e" =>
+        HEADGOAL (Metis_Tactic.metis_tac [] ATP_Problem_Generate.combs_or_liftingN ctxt [])
+        (*NOTE To treat E as an oracle use the following line:
         HEADGOAL (etac (oracle_based_reconstruction_tac ctxt prob_name node))
+        *)
     | "copy" =>
          HEADGOAL
           (atac
@@ -2284,4 +2287,4 @@
   end
 *}
 
-end
\ No newline at end of file
+end
--- a/src/HOL/TPTP/TPTP_Proof_Reconstruction_Test.thy	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/TPTP/TPTP_Proof_Reconstruction_Test.thy	Sun Aug 10 14:34:43 2014 +0200
@@ -249,7 +249,7 @@
 (*given a list of tactics to be applied in sequence (i.e., they
   follow a skeleton), we build a single tactic, interleaving
   some tracing info to help with debugging.*)
-fun step_by_step_tacs verbose (thm_tacs : (thm * tactic) list) : tactic =
+fun step_by_step_tacs ctxt verbose (thm_tacs : (thm * tactic) list) : tactic =
     let
       fun interleave_tacs [] [] = all_tac
         | interleave_tacs (tac1 :: tacs1) (tac2 :: tacs2) =
@@ -258,7 +258,7 @@
       val thms_to_traceprint =
         map (fn thm => fn st =>
               (*FIXME uses makestring*)
-              print_tac (PolyML.makestring thm) st)
+              print_tac ctxt (PolyML.makestring thm) st)
 
     in
       if verbose then
@@ -272,9 +272,9 @@
 
 ML {*
 (*apply step_by_step_tacs to all problems under test*)
-val narrated_tactics =
+fun narrated_tactics ctxt =
  map (map (#3 #> the)
-      #> step_by_step_tacs false)
+      #> step_by_step_tacs ctxt false)
    the_tactics;
 
 (*produce thm*)
@@ -284,7 +284,7 @@
   map (fn (prob_name, tac) =>
          TPTP_Reconstruct.reconstruct @{context}
            (fn _ => tac) prob_name)
-    (ListPair.zip (prob_names, narrated_tactics))
+    (ListPair.zip (prob_names, narrated_tactics @{context}))
 *}
 
 
--- a/src/HOL/TPTP/atp_problem_import.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/TPTP/atp_problem_import.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -13,7 +13,8 @@
   val refute_tptp_file : theory -> int -> string -> unit
   val can_tac : Proof.context -> tactic -> term -> bool
   val SOLVE_TIMEOUT :  int -> string -> tactic -> tactic
-  val atp_tac : Proof.context -> int -> (string * string) list -> int -> string -> int -> tactic
+  val atp_tac : Proof.context -> int -> (string * string) list -> int -> term list -> string ->
+    int -> tactic
   val smt_solver_tac : string -> Proof.context -> int -> tactic
   val freeze_problem_consts : theory -> term -> term
   val make_conj : term list * term list -> term list -> term
@@ -45,20 +46,18 @@
 fun read_tptp_file thy postproc file_name =
   let
     fun has_role role (_, role', _, _) = (role' = role)
-    fun get_prop (name, role, P, info) =
-      let val P' = P |> Logic.varify_global |> close_form in
-        (name, P') |> postproc
-      end
+    fun get_prop f (name, _, P, _) = P |> f |> close_form |> pair name |> postproc
+
     val path = exploded_absolute_path file_name
     val ((_, _, problem), thy) =
-      TPTP_Interpret.interpret_file true [Path.dir path, Path.explode "$TPTP"]
-                                    path [] [] thy
-    val (conjs, defs_and_nondefs) =
-      problem |> List.partition (has_role TPTP_Syntax.Role_Conjecture)
-              ||> List.partition (has_role TPTP_Syntax.Role_Definition)
+      TPTP_Interpret.interpret_file true [Path.dir path, Path.explode "$TPTP"] path [] [] thy
+    val (conjs, defs_and_nondefs) = problem
+      |> List.partition (has_role TPTP_Syntax.Role_Conjecture)
+      ||> List.partition (has_role TPTP_Syntax.Role_Definition)
   in
-    (map get_prop conjs, pairself (map get_prop) defs_and_nondefs,
-     thy |> Proof_Context.init_global)
+    (map (get_prop I) conjs,
+     pairself (map (get_prop Logic.varify_global)) defs_and_nondefs,
+     Named_Target.theory_init thy)
   end
 
 
@@ -76,12 +75,10 @@
   let
     val (conjs, (defs, nondefs), ctxt) = read_tptp_file thy snd file_name
     val thy = Proof_Context.theory_of ctxt
-    val (defs, pseudo_defs) =
-      defs |> map (ATP_Util.abs_extensionalize_term ctxt
-                   #> aptrueprop (hol_open_form I))
-           |> List.partition (is_legitimate_tptp_def
-                              o perhaps (try HOLogic.dest_Trueprop)
-                              o ATP_Util.unextensionalize_def)
+    val (defs, pseudo_defs) = defs
+      |> map (ATP_Util.abs_extensionalize_term ctxt #> aptrueprop (hol_open_form I))
+      |> List.partition (is_legitimate_tptp_def o perhaps (try HOLogic.dest_Trueprop)
+        o ATP_Util.unextensionalize_def)
     val nondefs = pseudo_defs @ nondefs
     val state = Proof.init ctxt
     val params =
@@ -104,8 +101,8 @@
     val step = 0
     val subst = []
   in
-    Nitpick.pick_nits_in_term state params Nitpick.TPTP i n step subst
-        defs nondefs (case conjs of conj :: _ => conj | [] => @{prop True});
+    Nitpick.pick_nits_in_term state params Nitpick.TPTP i n step subst defs nondefs
+      (case conjs of conj :: _ => conj | [] => @{prop True});
     ()
   end
 
@@ -127,7 +124,7 @@
        ("maxvars", "100000")]
   in
     Refute.refute_term ctxt params (defs @ nondefs)
-        (case conjs of conj :: _ => conj | [] => @{prop True})
+      (case conjs of conj :: _ => conj | [] => @{prop True})
     |> print_szs_of_outcome (not (null conjs))
   end
 
@@ -138,17 +135,16 @@
 
 fun SOLVE_TIMEOUT seconds name tac st =
   let
-    val _ = Output.urgent_message ("running " ^ name ^ " for " ^
-                                   string_of_int seconds ^ " s")
+    val _ = Output.urgent_message ("running " ^ name ^ " for " ^ string_of_int seconds ^ " s")
     val result =
-      TimeLimit.timeLimit (Time.fromSeconds seconds)
-        (fn () => SINGLE (SOLVE tac) st) ()
-      handle TimeLimit.TimeOut => NONE
-        | ERROR _ => NONE
+      TimeLimit.timeLimit (Time.fromSeconds seconds) (fn () => SINGLE (SOLVE tac) st) ()
+      handle
+        TimeLimit.TimeOut => NONE
+      | ERROR _ => NONE
   in
-    case result of
+    (case result of
       NONE => (Output.urgent_message ("FAILURE: " ^ name); Seq.empty)
-    | SOME st' => (Output.urgent_message ("SUCCESS: " ^ name); Seq.single st')
+    | SOME st' => (Output.urgent_message ("SUCCESS: " ^ name); Seq.single st'))
   end
 
 fun nitpick_finite_oracle_tac ctxt timeout i th =
@@ -157,6 +153,7 @@
       | is_safe @{typ prop} = true
       | is_safe @{typ bool} = true
       | is_safe _ = false
+
     val conj = Thm.term_of (Thm.cprem_of th i)
   in
     if exists_type (not o is_safe) conj then
@@ -179,40 +176,43 @@
         val step = 0
         val subst = []
         val (outcome, _) =
-          Nitpick.pick_nits_in_term state params Nitpick.Normal i n step subst
-                                    [] [] conj
-      in if outcome = "none" then ALLGOALS Skip_Proof.cheat_tac th else Seq.empty end
+          Nitpick.pick_nits_in_term state params Nitpick.Normal i n step subst [] [] conj
+      in
+        if outcome = "none" then ALLGOALS Skip_Proof.cheat_tac th else Seq.empty
+      end
   end
 
-fun atp_tac ctxt completeness override_params timeout prover =
+fun atp_tac ctxt completeness override_params timeout assms prover =
   let
-    val ctxt =
-      ctxt |> Config.put Sledgehammer_Prover_ATP.atp_completish (completeness > 0)
+    val thy = Proof_Context.theory_of ctxt
+    val assm_ths0 = map (Skip_Proof.make_thm thy) assms
+    val ((assm_name, _), ctxt) = ctxt
+      |> Config.put Sledgehammer_Prover_ATP.atp_completish (completeness > 0)
+      |> Local_Theory.note ((@{binding thms}, []), assm_ths0)
+
     fun ref_of th = (Facts.named (Thm.derivation_name th), [])
+    val ref_of_assms = (Facts.named assm_name, [])
   in
     Sledgehammer_Tactics.sledgehammer_as_oracle_tac ctxt
-        ([("debug", if debug then "true" else "false"),
-          ("overlord", if overlord then "true" else "false"),
-          ("provers", prover),
-          ("timeout", string_of_int timeout)] @
-         (if completeness > 0 then
-            [("type_enc",
-              if completeness = 1 then "mono_native" else "poly_tags")]
-          else
-            []) @
-         override_params)
-        {add = map ref_of [ext, @{thm someI}], del = [], only = true}
+      ([("debug", if debug then "true" else "false"),
+        ("overlord", if overlord then "true" else "false"),
+        ("provers", prover),
+        ("timeout", string_of_int timeout)] @
+       (if completeness > 0 then
+          [("type_enc", if completeness = 1 then "mono_native" else "poly_tags")]
+        else
+          []) @
+       override_params)
+      {add = ref_of_assms :: map ref_of [ext, @{thm someI}], del = [], only = true} []
   end
 
-fun sledgehammer_tac demo ctxt timeout i =
+fun sledgehammer_tac demo ctxt timeout assms i =
   let
     val frac = if demo then 16 else 12
     fun slice mult completeness prover =
       SOLVE_TIMEOUT (mult * timeout div frac)
-          (prover ^
-           (if completeness > 0 then "(" ^ string_of_int completeness ^ ")"
-            else ""))
-          (atp_tac ctxt completeness [] (mult * timeout div frac) prover i)
+        (prover ^ (if completeness > 0 then "(" ^ string_of_int completeness ^ ")" else ""))
+        (atp_tac ctxt completeness [] (mult * timeout div frac) assms prover i)
   in
     slice 2 0 ATP_Proof.spassN
     ORELSE slice 2 0 ATP_Proof.vampireN
@@ -235,15 +235,12 @@
     val ctxt = ctxt |> Context.proof_map (SMT_Config.select_solver solver)
   in SMT_Solver.smt_tac ctxt [] end
 
-fun auto_etc_tac ctxt timeout i =
-  SOLVE_TIMEOUT (timeout div 20) "nitpick"
-      (nitpick_finite_oracle_tac ctxt (timeout div 20) i)
-  ORELSE SOLVE_TIMEOUT (timeout div 10) "simp"
-      (asm_full_simp_tac ctxt i)
+fun auto_etc_tac ctxt timeout assms i =
+  SOLVE_TIMEOUT (timeout div 20) "nitpick" (nitpick_finite_oracle_tac ctxt (timeout div 20) i)
+  ORELSE SOLVE_TIMEOUT (timeout div 10) "simp" (asm_full_simp_tac ctxt i)
   ORELSE SOLVE_TIMEOUT (timeout div 10) "blast" (blast_tac ctxt i)
   ORELSE SOLVE_TIMEOUT (timeout div 5) "auto+spass"
-      (auto_tac ctxt
-       THEN ALLGOALS (atp_tac ctxt 0 [] (timeout div 5) ATP_Proof.spassN))
+    (auto_tac ctxt THEN ALLGOALS (atp_tac ctxt 0 [] (timeout div 5) assms ATP_Proof.spassN))
   ORELSE SOLVE_TIMEOUT (timeout div 10) "fast" (fast_tac ctxt i)
   ORELSE SOLVE_TIMEOUT (timeout div 20) "z3" (smt_solver_tac "z3" ctxt i)
   ORELSE SOLVE_TIMEOUT (timeout div 20) "cvc3" (smt_solver_tac "cvc3" ctxt i)
@@ -258,45 +255,44 @@
    unfold "definitions" of free variables than of constants (cf. PUZ107^5). *)
 fun freeze_problem_consts thy =
   let val is_problem_const = String.isPrefix (problem_const_prefix thy) in
-    map_aterms (fn t as Const (s, T) =>
-                   if is_problem_const s then Free (Long_Name.base_name s, T)
-                   else t
-                 | t => t)
+    map_aterms
+      (fn t as Const (s, T) => if is_problem_const s then Free (Long_Name.base_name s, T) else t
+        | t => t)
   end
 
 fun make_conj (defs, nondefs) conjs =
-  Logic.list_implies (rev defs @ rev nondefs,
-                      case conjs of conj :: _ => conj | [] => @{prop False})
+  Logic.list_implies (rev defs @ rev nondefs, case conjs of conj :: _ => conj | [] => @{prop False})
 
 fun print_szs_of_success conjs success =
   Output.urgent_message ("% SZS status " ^
-                         (if success then
-                            if null conjs then "Unsatisfiable" else "Theorem"
-                          else
-                            "Unknown"))
+    (if success then
+       if null conjs then "Unsatisfiable" else "Theorem"
+     else
+       "Unknown"))
 
 fun sledgehammer_tptp_file thy timeout file_name =
   let
-    val (conjs, assms, ctxt) =
-      read_tptp_file thy (freeze_problem_consts thy o snd) file_name
-    val conj = make_conj assms conjs
+    val (conjs, assms, ctxt) = read_tptp_file thy (freeze_problem_consts thy o snd) file_name
+    val conj = make_conj ([], []) conjs
+    val assms = op @ assms
   in
-    can_tac ctxt (sledgehammer_tac true ctxt timeout 1) conj
+    can_tac ctxt (sledgehammer_tac true ctxt timeout assms 1) conj
     |> print_szs_of_success conjs
   end
 
 fun generic_isabelle_tptp_file demo thy timeout file_name =
   let
-    val (conjs, assms, ctxt) =
-      read_tptp_file thy (freeze_problem_consts thy o snd) file_name
-    val conj = make_conj assms conjs
+    val (conjs, assms, ctxt) = read_tptp_file thy (freeze_problem_consts thy o snd) file_name
+    val conj = make_conj ([], []) conjs
+    val full_conj = make_conj assms conjs
+    val assms = op @ assms
     val (last_hope_atp, last_hope_completeness) =
       if demo then (ATP_Proof.satallaxN, 0) else (ATP_Proof.vampireN, 2)
   in
-    (can_tac ctxt (auto_etc_tac ctxt (timeout div 2) 1) conj orelse
-     can_tac ctxt (sledgehammer_tac demo ctxt (timeout div 2) 1) conj orelse
+    (can_tac ctxt (auto_etc_tac ctxt (timeout div 2) assms 1) full_conj orelse
+     can_tac ctxt (sledgehammer_tac demo ctxt (timeout div 2) assms 1) conj orelse
      can_tac ctxt (SOLVE_TIMEOUT timeout (last_hope_atp ^ "(*)")
-         (atp_tac ctxt last_hope_completeness [] timeout last_hope_atp 1)) conj)
+       (atp_tac ctxt last_hope_completeness [] timeout assms last_hope_atp 1)) full_conj)
     |> print_szs_of_success conjs
   end
 
@@ -322,7 +318,8 @@
     val uncurried_aliases = false
     val readable_names = true
     val presimp = true
-    val facts = map (apfst (rpair (Global, Non_Rec_Def))) defs @
+    val facts =
+      map (apfst (rpair (Global, Non_Rec_Def))) defs @
       map (apfst (rpair (Global, General))) nondefs
     val (atp_problem, _, _, _) =
       generate_atp_problem ctxt format Hypothesis (type_enc_of_string Strict type_enc) Translator
--- a/src/HOL/TPTP/sledgehammer_tactics.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/TPTP/sledgehammer_tactics.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -9,10 +9,10 @@
 sig
   type fact_override = Sledgehammer_Fact.fact_override
 
-  val sledgehammer_with_metis_tac :
-    Proof.context -> (string * string) list -> fact_override -> int -> tactic
-  val sledgehammer_as_oracle_tac :
-    Proof.context -> (string * string) list -> fact_override -> int -> tactic
+  val sledgehammer_with_metis_tac : Proof.context -> (string * string) list -> fact_override ->
+    thm list -> int -> tactic
+  val sledgehammer_as_oracle_tac : Proof.context -> (string * string) list -> fact_override ->
+    thm list -> int -> tactic
 end;
 
 structure Sledgehammer_Tactics : SLEDGEHAMMER_TACTICS =
@@ -26,7 +26,7 @@
 open Sledgehammer_MaSh
 open Sledgehammer_Commands
 
-fun run_prover override_params fact_override i n ctxt goal =
+fun run_prover override_params fact_override chained i n ctxt goal =
   let
     val thy = Proof_Context.theory_of ctxt
     val mode = Normal
@@ -39,37 +39,38 @@
     val reserved = reserved_isar_keyword_table ()
     val css_table = clasimpset_rule_table_of ctxt
     val facts =
-      nearly_all_facts ctxt ho_atp fact_override reserved css_table [] hyp_ts concl_t
-      |> relevant_facts ctxt params name
-             (the_default default_max_facts max_facts) fact_override hyp_ts
-             concl_t
+      nearly_all_facts ctxt ho_atp fact_override reserved css_table chained hyp_ts concl_t
+      |> relevant_facts ctxt params name (the_default default_max_facts max_facts) fact_override
+        hyp_ts concl_t
       |> hd |> snd
     val problem =
       {comment = "", state = Proof.init ctxt, goal = goal, subgoal = i, subgoal_count = n,
        factss = [("", facts)]}
   in
-    (case prover params (K (K (K ""))) problem of
+    (case prover params problem of
       {outcome = NONE, used_facts, ...} => used_facts |> map fst |> SOME
     | _ => NONE)
     handle ERROR message => (warning ("Error: " ^ message ^ "\n"); NONE)
   end
 
-fun sledgehammer_with_metis_tac ctxt override_params fact_override i th =
+fun sledgehammer_with_metis_tac ctxt override_params fact_override chained i th =
   let val override_params = override_params @ [("preplay_timeout", "0")] in
-    case run_prover override_params fact_override i i ctxt th of
+    (case run_prover override_params fact_override chained i i ctxt th of
       SOME facts =>
       Metis_Tactic.metis_tac [] ATP_Problem_Generate.combs_or_liftingN ctxt
-          (maps (thms_of_name ctxt) facts) i th
-    | NONE => Seq.empty
+        (maps (thms_of_name ctxt) facts) i th
+    | NONE => Seq.empty)
   end
 
-fun sledgehammer_as_oracle_tac ctxt override_params fact_override i th =
+fun sledgehammer_as_oracle_tac ctxt override_params fact_override chained i th =
   let
     val override_params =
       override_params @
       [("preplay_timeout", "0"),
        ("minimize", "false")]
-    val xs = run_prover override_params fact_override i i ctxt th
-  in if is_some xs then ALLGOALS Skip_Proof.cheat_tac th else Seq.empty end
+    val xs = run_prover override_params fact_override chained i i ctxt th
+  in
+    if is_some xs then ALLGOALS Skip_Proof.cheat_tac th else Seq.empty
+  end
 
 end;
--- a/src/HOL/Tools/ATP/atp_problem.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/ATP/atp_problem.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -76,6 +76,8 @@
   val tptp_ho_exists : string
   val tptp_choice : string
   val tptp_ho_choice : string
+  val tptp_hilbert_choice : string
+  val tptp_hilbert_the : string
   val tptp_not : string
   val tptp_and : string
   val tptp_not_and : string
@@ -239,6 +241,8 @@
 val tptp_iff = "<=>"
 val tptp_not_iff = "<~>"
 val tptp_app = "@"
+val tptp_hilbert_choice = "@+"
+val tptp_hilbert_the = "@-"
 val tptp_not_infix = "!"
 val tptp_equal = "="
 val tptp_not_equal = "!="
@@ -533,11 +537,12 @@
     " : " ^ string_of_type format ty ^ ").\n"
   | tptp_string_of_line format (Formula ((ident, alt), kind, phi, source, _)) =
     tptp_string_of_format format ^ "(" ^ ident ^ ", " ^
-    tptp_string_of_role kind ^ "," ^ maybe_alt alt ^
-    "\n    (" ^ tptp_string_of_formula format phi ^ ")" ^
+    tptp_string_of_role kind ^ "," ^ "\n    (" ^
+    tptp_string_of_formula format phi ^ ")" ^
     (case source of
-       SOME tm => ", " ^ tptp_string_of_term format tm
-     | NONE => "") ^ ").\n"
+      SOME tm => ", " ^ tptp_string_of_term format tm
+    | NONE => "") ^
+    ")." ^ maybe_alt alt ^ "\n"
 
 fun tptp_lines format =
   maps (fn (_, []) => []
@@ -638,8 +643,7 @@
     fun formula pred (Formula ((ident, alt), kind, phi, _, info)) =
         if pred kind then
           let val rank = extract_isabelle_rank info in
-            "formula(" ^ dfg_string_of_formula poly gen_simp info phi ^
-            ", " ^ ident ^
+            "formula(" ^ dfg_string_of_formula poly gen_simp info phi ^ ", " ^ ident ^
             (if rank = default_rank then "" else ", " ^ string_of_int rank) ^
             ")." ^ maybe_alt alt
             |> SOME
--- a/src/HOL/Tools/ATP/atp_proof.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/ATP/atp_proof.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -2,6 +2,7 @@
     Author:     Lawrence C. Paulson, Cambridge University Computer Laboratory
     Author:     Claire Quigley, Cambridge University Computer Laboratory
     Author:     Jasmin Blanchette, TU Muenchen
+    Author:     Mathias Fleury, ENS Rennes
 
 Abstract representation of ATP proofs and TSTP/SPASS syntax.
 *)
@@ -67,7 +68,6 @@
   val remote_prefix : string
 
   val agsyhol_core_rule : string
-  val satallax_core_rule : string
   val spass_input_rule : string
   val spass_pre_skolemize_rule : string
   val spass_skolemize_rule : string
@@ -84,10 +84,25 @@
   val scan_general_id : string list -> string * string list
   val parse_formula : string list ->
     (string, string atp_type, (string, string atp_type) atp_term, string) atp_formula * string list
-  val atp_proof_of_tstplike_proof : string -> string atp_problem -> string -> string atp_proof
   val clean_up_atp_proof_dependencies : string atp_proof -> string atp_proof
   val map_term_names_in_atp_proof : (string -> string) -> string atp_proof -> string atp_proof
   val nasty_atp_proof : string Symtab.table -> string atp_proof -> string atp_proof
+
+  val skip_term: string list -> string * string list
+  val parse_thf0_formula :string list ->
+    ('a, 'b, (string, string ATP_Problem.atp_type) ATP_Problem.atp_term, 'c) ATP_Problem.atp_formula *
+    string list
+  val dummy_atype : string ATP_Problem.atp_type
+  val role_of_tptp_string: string -> ATP_Problem.atp_formula_role
+  val parse_line: string -> ('a * string ATP_Problem.atp_problem_line list) list ->
+    string list -> ((string * string list) * ATP_Problem.atp_formula_role *
+    (string, 'b, (string, string ATP_Problem.atp_type) ATP_Problem.atp_term,
+      'c) ATP_Problem.atp_formula
+    * string * (string * 'd list) list) list * string list
+  val core_inference : 'a -> 'b -> ('b * 'b list) * ATP_Problem.atp_formula_role *
+    ('c, 'd, (string, 'e) ATP_Problem.atp_term, 'f) ATP_Problem.atp_formula * 'a * 'g list
+  val vampire_step_name_ord : (string * 'a) * (string * 'a) -> order
+  val core_of_agsyhol_proof :  string -> string list option
 end;
 
 structure ATP_Proof : ATP_PROOF =
@@ -122,7 +137,6 @@
 val remote_prefix = "remote_"
 
 val agsyhol_core_rule = "__agsyhol_core" (* arbitrary *)
-val satallax_core_rule = "__satallax_core" (* arbitrary *)
 val spass_input_rule = "Inp"
 val spass_pre_skolemize_rule = "__Sko0" (* arbitrary *)
 val spass_skolemize_rule = "__Sko" (* arbitrary *)
@@ -277,7 +291,7 @@
   let
     fun skip _ accum [] = (accum, [])
       | skip n accum (ss as s :: ss') =
-        if s = "," andalso n = 0 then
+        if (s = "," orelse s = ".") andalso n = 0 then
           (accum, ss)
         else if member (op =) [")", "]"] s then
           if n = 0 then (accum, ss) else skip (n - 1) (s :: accum) ss'
@@ -291,11 +305,12 @@
 
 datatype source =
   File_Source of string * string option |
-  Inference_Source of string * string list
+  Inference_Source of string * string list |
+  Introduced_Source of string
 
 val dummy_phi = AAtom (ATerm (("", []), []))
 val dummy_inference = Inference_Source ("", [])
-val dummy_atype = AType(("", []), [])
+val dummy_atype = AType (("", []), [])
 
 (* "skip_term" is there to cope with Waldmeister nonsense such as "theory(equality)". *)
 fun parse_dependency x =
@@ -312,13 +327,13 @@
   (Scan.this_string "inference" |-- $$ "(" |-- scan_general_id
    --| skip_term --| $$ "," --| skip_term --| $$ "," --| $$ "["
    -- parse_dependencies --| $$ "]" --| $$ ")") x
-and skip_introduced x =
-  (Scan.this_string "introduced" |-- $$ "(" |-- skip_term
-   -- Scan.repeat ($$ "," |-- skip_term) --| $$ ")") x
+and parse_introduced_source x =
+  (Scan.this_string "introduced" |-- $$ "(" |-- scan_general_id
+   --| Scan.option ($$ "," |-- skip_term) --| $$ ")") x
 and parse_source x =
   (parse_file_source >> File_Source
    || parse_inference_source >> Inference_Source
-   || skip_introduced >> K dummy_inference (* for Vampire *)
+   || parse_introduced_source >> Introduced_Source
    || scan_general_id >> (fn s => Inference_Source ("", [s])) (* for E *)
    || skip_term >> K dummy_inference) x
 
@@ -459,22 +474,34 @@
   | role_of_tptp_string "type" = Type_Role
   | role_of_tptp_string _ = Unknown
 
-val tptp_binary_op_list = [tptp_and, tptp_not_and, tptp_or, tptp_not_or, tptp_implies, tptp_if,
-                             tptp_iff, tptp_not_iff, tptp_equal, tptp_app]
+val tptp_binary_ops =
+  [tptp_and, tptp_not_and, tptp_or, tptp_not_or, tptp_implies, tptp_if, tptp_iff, tptp_not_iff,
+   tptp_equal, tptp_app]
+
+fun parse_one_in_list xs =
+  foldl1 (op ||) (map Scan.this_string xs)
 
 fun parse_binary_op x =
-  (foldl1 (op ||) (map Scan.this_string tptp_binary_op_list)
-    >> (fn c => if c = tptp_equal then "equal" else c)) x
+  (parse_one_in_list tptp_binary_ops
+   >> (fn c => if c = tptp_equal then "equal" else c)) x
+
+val parse_fo_quantifier =
+   parse_one_in_list [tptp_forall, tptp_exists, tptp_lambda, tptp_hilbert_choice, tptp_hilbert_the]
+
+val parse_ho_quantifier =
+   parse_one_in_list [tptp_ho_forall, tptp_ho_exists, tptp_hilbert_choice, tptp_hilbert_the]
 
 fun parse_thf0_type x =
   (($$ "(" |-- parse_thf0_type --| $$ ")" || scan_general_id >> (fn t => AType ((t, []), [])))
-     -- Scan.option ($$ tptp_fun_type |-- parse_thf0_type)
+   -- Scan.option ($$ tptp_fun_type |-- parse_thf0_type)
   >> (fn (a, NONE) => a
        | (a, SOME b) => AFun (a, b))) x
 
 fun mk_ho_of_fo_quant q =
   if q = tptp_forall then tptp_ho_forall
   else if q = tptp_exists then tptp_ho_exists
+  else if q = tptp_hilbert_choice then tptp_hilbert_choice
+  else if q = tptp_hilbert_the then tptp_hilbert_the
   else raise Fail ("unrecognized quantification: " ^ q)
 
 fun remove_thf_app (ATerm ((x, ty), arg)) =
@@ -492,8 +519,7 @@
    || $$ "(" |-- parse_thf0_typed_var --| $$ ")") x
 
 fun parse_simple_thf0_term x =
-  ((Scan.this_string tptp_forall || Scan.this_string tptp_exists || Scan.this_string tptp_lambda)
-        -- ($$ "[" |-- parse_thf0_typed_var --| $$ "]" --| $$ ":") -- parse_thf0_term
+  (parse_fo_quantifier -- ($$ "[" |-- parse_thf0_typed_var --| $$ "]" --| $$ ":") -- parse_thf0_term
       >> (fn ((q, ys), t) =>
           fold_rev
             (fn (var, ty) => fn r =>
@@ -505,7 +531,7 @@
             ys t)
   || Scan.this_string tptp_not |-- parse_thf0_term >> mk_app (mk_simple_aterm tptp_not)
   || scan_general_id --| $$ ":" -- parse_thf0_type >> (fn (var, typ) => ATerm ((var, [typ]), []))
-  || (Scan.this_string tptp_ho_forall || Scan.this_string tptp_ho_exists) >> mk_simple_aterm
+  || parse_ho_quantifier >> mk_simple_aterm
   || $$ "(" |-- parse_thf0_term --| $$ ")"
   || scan_general_id >> mk_simple_aterm) x
 and parse_thf0_term x =
@@ -546,11 +572,12 @@
     |-- scan_general_id --| $$ "," -- Symbol.scan_ascii_id --| $$ ","
     -- (parse_formula || skip_term >> K dummy_phi) -- parse_tstp_extra_arguments
     --| $$ ")" --| $$ "."
-   >> (fn (((num, role), phi), deps) =>
+   >> (fn (((num, role0), phi), src) =>
           let
-            val ((name, phi), rule, deps) =
+            val role = role_of_tptp_string role0
+            val ((name, phi), role', rule, deps) =
               (* Waldmeister isn't exactly helping. *)
-              (case deps of
+              (case src of
                 File_Source (_, SOME s) =>
                 (if s = waldmeister_conjecture_name then
                    (case find_formula_in_problem (mk_anot phi) problem of
@@ -563,13 +590,15 @@
                  else
                    ((num, [s |> perhaps (try (unprefix tofof_fact_prefix))]),
                     phi),
-                 "", [])
+                 role, "", [])
               | File_Source _ =>
-                (((num, map fst (find_formula_in_problem phi problem)), phi), "", [])
-              | Inference_Source (rule, deps) => (((num, []), phi), rule, deps))
-            fun mk_step () = (name, role_of_tptp_string role, phi, rule, map (rpair []) deps)
+                (((num, map fst (find_formula_in_problem phi problem)), phi), role, "", [])
+              | Inference_Source (rule, deps) => (((num, []), phi), role, rule, deps)
+              | Introduced_Source rule => (((num, []), phi), Lemma, rule, []))
+
+            fun mk_step () = (name, role', phi, rule, map (rpair []) deps)
           in
-            [(case role_of_tptp_string role of
+            [(case role' of
                Definition =>
                (case phi of
                  AAtom (ATerm (("equal", _), _)) =>
@@ -581,15 +610,13 @@
 
 (**** PARSING OF SPASS OUTPUT ****)
 
-(* SPASS returns clause references of the form "x.y". We ignore "y", whose role
-   is not clear anyway. *)
+(* SPASS returns clause references of the form "x.y". We ignore "y". *)
 val parse_dot_name = scan_general_id --| $$ "." --| scan_general_id
 
 val parse_spass_annotations =
   Scan.optional ($$ ":" |-- Scan.repeat (parse_dot_name --| Scan.option ($$ ","))) []
 
-(* It is not clear why some literals are followed by sequences of stars and/or
-   pluses. We ignore them. *)
+(* We ignore the stars and the pluses that follow literals. *)
 fun parse_decorated_atom x =
   (parse_atom --| Scan.repeat ($$ "*" || $$ "+" || $$ " ")) x
 
@@ -598,7 +625,7 @@
   | mk_horn (neg_lits, []) = mk_anot (foldr1 (uncurry (mk_aconn AAnd)) neg_lits)
   | mk_horn (neg_lits, pos_lits) =
     mk_aconn AImplies (foldr1 (uncurry (mk_aconn AAnd)) neg_lits)
-                      (foldr1 (uncurry (mk_aconn AOr)) pos_lits)
+      (foldr1 (uncurry (mk_aconn AOr)) pos_lits)
 
 fun parse_horn_clause x =
   (Scan.repeat parse_decorated_atom --| $$ "|" --| $$ "|"
@@ -647,10 +674,6 @@
 
 fun core_inference inf fact = ((fact, [fact]), Unknown, dummy_phi, inf, [])
 
-(* Syntax: <name> *)
-fun parse_satallax_core_line x =
-  (scan_general_id --| Scan.option ($$ " ") >> (single o core_inference satallax_core_rule)) x
-
 (* Syntax: SZS core <name> ... <name> *)
 fun parse_z3_tptp_core_line x =
   (Scan.this_string "SZS core" |-- Scan.repeat ($$ " " |-- scan_general_id)
@@ -658,35 +681,17 @@
 
 fun parse_line local_name problem =
   if local_name = leo2N then parse_tstp_thf0_line problem
-  else if local_name = satallaxN then parse_satallax_core_line
   else if local_name = spassN then parse_spass_line
   else if local_name = spass_pirateN then parse_spass_pirate_line
   else if local_name = z3_tptpN then parse_z3_tptp_core_line
   else parse_tstp_line problem
 
-fun parse_proof local_name problem =
-  strip_spaces_except_between_idents
-  #> raw_explode
-  #> Scan.error (!! (fn _ => raise UNRECOGNIZED_ATP_PROOF ())
-       (Scan.finite Symbol.stopper (Scan.repeat1 (parse_line local_name problem) >> flat)))
-  #> fst
-
 fun core_of_agsyhol_proof s =
   (case split_lines s of
     "The transformed problem consists of the following conjectures:" :: conj ::
     _ :: proof_term :: _ => SOME (unprefix " " conj :: find_enclosed "<<" ">>" proof_term)
   | _ => NONE)
 
-fun atp_proof_of_tstplike_proof _ _ "" = []
-  | atp_proof_of_tstplike_proof local_prover problem tstp =
-    (case core_of_agsyhol_proof tstp of
-      SOME facts => facts |> map (core_inference agsyhol_core_rule)
-    | NONE =>
-      tstp ^ "$" (* the $ sign acts as a sentinel (FIXME: needed?) *)
-      |> parse_proof local_prover problem
-      |> local_prover = vampireN ? perhaps (try (sort (vampire_step_name_ord o pairself #1)))
-      |> local_prover = zipperpositionN ? rev)
-
 fun clean_up_dependencies _ [] = []
   | clean_up_dependencies seen ((name, role, u, rule, deps) :: steps) =
     (name, role, u, rule, map_filter (fn dep => find_first (is_same_atp_step dep) seen) deps) ::
--- a/src/HOL/Tools/ATP/atp_proof_reconstruct.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/ATP/atp_proof_reconstruct.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -30,6 +30,9 @@
 
   val forall_of : term -> term -> term
   val exists_of : term -> term -> term
+  val simplify_bool : term -> term
+  val var_name_of_typ : typ -> string
+  val rename_bound_vars : term -> term
   val type_enc_aliases : (string * string list) list
   val unalias_type_enc : string -> string list
   val term_of_atp : Proof.context -> ATP_Problem.atp_format -> ATP_Problem_Generate.type_enc ->
@@ -50,8 +53,8 @@
     ATP_Problem_Generate.type_enc -> string Symtab.table -> (string * term) list ->
     int Symtab.table -> string atp_proof -> (term, string) atp_step list
   val repair_waldmeister_endgame : (term, 'a) atp_step list -> (term, 'a) atp_step list
-  val infer_formula_types : Proof.context -> term list -> term list
-  val introduce_spass_skolem : (term, string) atp_step list -> (term, string) atp_step list
+  val infer_formulas_types : Proof.context -> term list -> term list
+  val introduce_spass_skolems : (term, string) atp_step list -> (term, string) atp_step list
   val factify_atp_proof : (string * 'a) list -> term list -> term -> (term, string) atp_step list ->
     (term, string) atp_step list
 end;
@@ -101,6 +104,46 @@
     TFree (ww, the_default @{sort type} (Variable.def_sort ctxt (ww, ~1)))
   end
 
+fun simplify_bool ((all as Const (@{const_name All}, _)) $ Abs (s, T, t)) =
+    let val t' = simplify_bool t in
+      if loose_bvar1 (t', 0) then all $ Abs (s, T, t') else t'
+    end
+  | simplify_bool (Const (@{const_name Not}, _) $ t) = s_not (simplify_bool t)
+  | simplify_bool (Const (@{const_name conj}, _) $ t $ u) =
+    s_conj (simplify_bool t, simplify_bool u)
+  | simplify_bool (Const (@{const_name disj}, _) $ t $ u) =
+    s_disj (simplify_bool t, simplify_bool u)
+  | simplify_bool (Const (@{const_name implies}, _) $ t $ u) =
+    s_imp (simplify_bool t, simplify_bool u)
+  | simplify_bool ((t as Const (@{const_name HOL.eq}, _)) $ u $ v) =
+    (case (u, v) of
+      (Const (@{const_name True}, _), _) => v
+    | (u, Const (@{const_name True}, _)) => u
+    | (Const (@{const_name False}, _), v) => s_not v
+    | (u, Const (@{const_name False}, _)) => s_not u
+    | _ => if u aconv v then @{const True} else t $ simplify_bool u $ simplify_bool v)
+  | simplify_bool (t $ u) = simplify_bool t $ simplify_bool u
+  | simplify_bool (Abs (s, T, t)) = Abs (s, T, simplify_bool t)
+  | simplify_bool t = t
+
+fun single_letter upper s =
+  let val s' = if String.isPrefix "o" s orelse String.isPrefix "O" s then "z" else s in
+    String.extract (Name.desymbolize (SOME upper) (Long_Name.base_name s'), 0, SOME 1)
+  end
+
+fun var_name_of_typ (Type (@{type_name fun}, [_, T])) =
+    if body_type T = HOLogic.boolT then "p" else "f"
+  | var_name_of_typ (Type (@{type_name set}, [T])) = single_letter true (var_name_of_typ T)
+  | var_name_of_typ (Type (s, Ts)) =
+    if String.isSuffix "list" s then var_name_of_typ (the_single Ts) ^ "s"
+    else single_letter false (Long_Name.base_name s)
+  | var_name_of_typ (TFree (s, _)) = single_letter false (perhaps (try (unprefix "'")) s)
+  | var_name_of_typ (TVar ((s, _), T)) = var_name_of_typ (TFree (s, T))
+
+fun rename_bound_vars (t $ u) = rename_bound_vars t $ rename_bound_vars u
+  | rename_bound_vars (Abs (_, T, t)) = Abs (var_name_of_typ T, T, rename_bound_vars t)
+  | rename_bound_vars t = t
+
 exception ATP_CLASS of string list
 exception ATP_TYPE of string atp_type list
 exception ATP_TERM of (string, string atp_type) atp_term list
@@ -116,25 +159,33 @@
 (* Type variables are given the basic sort "HOL.type". Some will later be constrained by information
    from type literals, or by type inference. *)
 fun typ_of_atp_type ctxt (ty as AType ((a, clss), tys)) =
-  let val Ts = map (typ_of_atp_type ctxt) tys in
-    (case unprefix_and_unascii type_const_prefix a of
-      SOME b => Type (invert_const b, Ts)
-    | NONE =>
-      if not (null tys) then
-        raise ATP_TYPE [ty] (* only "tconst"s have type arguments *)
-      else
-        (case unprefix_and_unascii tfree_prefix a of
-          SOME b => make_tfree ctxt b
-        | NONE =>
-          (* The term could be an Isabelle variable or a variable from the ATP, say "X1" or "_5018".
-             Sometimes variables from the ATP are indistinguishable from Isabelle variables, which
-             forces us to use a type parameter in all cases. *)
-          Type_Infer.param 0 ("'" ^ perhaps (unprefix_and_unascii tvar_prefix) a,
-            (if null clss then @{sort type} else map class_of_atp_class clss))))
+    let val Ts = map (typ_of_atp_type ctxt) tys in
+      (case unprefix_and_unascii type_const_prefix a of
+        SOME b => Type (invert_const b, Ts)
+      | NONE =>
+        if not (null tys) then
+          raise ATP_TYPE [ty] (* only "tconst"s have type arguments *)
+        else
+          (case unprefix_and_unascii tfree_prefix a of
+            SOME b => make_tfree ctxt b
+          | NONE =>
+            (* The term could be an Isabelle variable or a variable from the ATP, say "X1" or "_5018".
+               Sometimes variables from the ATP are indistinguishable from Isabelle variables, which
+               forces us to use a type parameter in all cases. *)
+            Type_Infer.param 0 ("'" ^ perhaps (unprefix_and_unascii tvar_prefix) a,
+              (if null clss then @{sort type} else map class_of_atp_class clss))))
+    end
+  | typ_of_atp_type ctxt (AFun (ty1, ty2)) = typ_of_atp_type ctxt ty1 --> typ_of_atp_type ctxt ty2
+
+fun atp_type_of_atp_term (ATerm ((s, _), us)) =
+  let val tys = map atp_type_of_atp_term us in
+    if s = tptp_fun_type then
+      (case tys of
+        [ty1, ty2] => AFun (ty1, ty2)
+      | _ => raise ATP_TYPE tys)
+    else
+      AType ((s, []), tys)
   end
-  | typ_of_atp_type ctxt (AFun (a, tys)) = typ_of_atp_type ctxt a --> typ_of_atp_type ctxt tys
-
-fun atp_type_of_atp_term (ATerm ((s, _), us)) = AType ((s, []), map atp_type_of_atp_term us)
 
 fun typ_of_atp_term ctxt = typ_of_atp_type ctxt o atp_type_of_atp_term
 
@@ -150,25 +201,10 @@
   | add_type_constraint false (cl, TVar (ix, _)) = add_var (ix, cl)
   | add_type_constraint _ _ = I
 
-fun repair_var_name_raw s =
-  let
-    fun subscript_name s n = s ^ nat_subscript n
-    val s = s |> String.map Char.toLower
-  in
-    (case space_explode "_" s of
-      [_] =>
-      (case take_suffix Char.isDigit (String.explode s) of
-        (cs1 as _ :: _, cs2 as _ :: _) =>
-        subscript_name (String.implode cs1) (the (Int.fromString (String.implode cs2)))
-      | (_, _) => s)
-    | [s1, s2] => (case Int.fromString s2 of SOME n => subscript_name s1 n | NONE => s)
-    | _ => s)
-  end
-
-fun repair_var_name textual s =
+fun repair_var_name s =
   (case unprefix_and_unascii schematic_var_prefix s of
-    SOME s => s
-  | NONE => s |> textual ? repair_var_name_raw)
+    SOME s' => s'
+  | NONE => s)
 
 (* The number of type arguments of a constant, zero if it's monomorphic. For (instances of) Skolem
    pseudoconstants, this information is encoded in the constant name. *)
@@ -182,58 +218,52 @@
 
 fun slack_fastype_of t = fastype_of t handle TERM _ => Type_Infer.anyT @{sort type}
 
-(* Cope with "tt(X) = X" atoms, where "X" is existentially quantified. *)
-fun loose_aconv (Free (s, _), Free (s', _)) = s = s'
-  | loose_aconv (t, t') = t aconv t'
-
 val spass_skolem_prefix = "sk" (* "skc" or "skf" *)
 val vampire_skolem_prefix = "sK"
 
-
 fun var_index_of_textual textual = if textual then 0 else 1
 
 fun quantify_over_var textual quant_of var_s var_T t =
   let
-    val vars = ((var_s, var_index_of_textual textual), var_T) ::
-                 filter (fn ((s, _), _) => s = var_s) (Term.add_vars t [])
+    val vars =
+      ((var_s, var_index_of_textual textual), var_T) ::
+      filter (fn ((s, _), _) => s = var_s) (Term.add_vars t [])
     val normTs = vars |> AList.group (op =) |> map (apsnd hd)
     fun norm_var_types (Var (x, T)) =
         Var (x, the_default T (AList.lookup (op =) normTs x))
       | norm_var_types t = t
   in t |> map_aterms norm_var_types |> fold_rev quant_of (map Var normTs) end
 
+(* This assumes that distinct names are mapped to distinct names by "Variable.variant_frees". This
+   does not hold in general but should hold for ATP-generated Skolem function names, since these end
+   with a digit and "variant_frees" appends letters. *)
+fun fresh_up ctxt s = fst (hd (Variable.variant_frees ctxt [] [(s, ())]))
 
 (* Higher-order translation. Variables are typed (although we don't use that information). Lambdas
-   are typed.
-
-  The code is similar to term_of_atp_fo. *)
-fun term_of_atp_ho ctxt textual sym_tab =
+   are typed. The code is similar to "term_of_atp_fo". *)
+fun term_of_atp_ho ctxt sym_tab =
   let
     val thy = Proof_Context.theory_of ctxt
-    val var_index = var_index_of_textual textual
+    val var_index = var_index_of_textual true
 
     fun do_term opt_T u =
       (case u of
-        AAbs(((var, ty), term), []) =>
+        AAbs (((var, ty), term), []) =>
         let
           val typ = typ_of_atp_type ctxt ty
-          val var_name = repair_var_name textual var
+          val var_name = repair_var_name var
           val tm = do_term NONE term
-        in quantify_over_var textual lambda' var_name typ tm end
+        in quantify_over_var true lambda' var_name typ tm end
       | ATerm ((s, tys), us) =>
         if s = ""
         then error "Isar proof reconstruction failed because the ATP proof \
                      \contains unparsable material."
         else if s = tptp_equal then
-          let val ts = map (do_term NONE) us in
-            if textual andalso length ts = 2 andalso loose_aconv (hd ts, List.last ts) then
-              @{const True}
-            else
-              list_comb (Const (@{const_name HOL.eq}, Type_Infer.anyT @{sort type}), ts)
-          end
+          list_comb (Const (@{const_name HOL.eq}, Type_Infer.anyT @{sort type}),
+            map (do_term NONE) us)
         else if not (null us) then
           let
-            val args = List.map (do_term NONE) us
+            val args = map (do_term NONE) us
             val opt_T' = SOME (map slack_fastype_of args ---> the_default dummyT opt_T)
             val func = do_term opt_T' (ATerm ((s, tys), []))
           in foldl1 (op $) (func :: args) end
@@ -241,13 +271,15 @@
         else if s = tptp_and then HOLogic.conj
         else if s = tptp_implies then HOLogic.imp
         else if s = tptp_iff orelse s = tptp_equal then HOLogic.eq_const dummyT
-        else if s = tptp_not_iff orelse s = tptp_not_equal then @{term "% P Q. Q ~= P"}
-        else if s = tptp_if then @{term "% P Q. Q --> P"}
-        else if s = tptp_not_and then @{term "% P Q. ~ (P & Q)"}
-        else if s = tptp_not_or then @{term "% P Q. ~ (P | Q)"}
+        else if s = tptp_not_iff orelse s = tptp_not_equal then @{term "%P Q. Q ~= P"}
+        else if s = tptp_if then @{term "%P Q. Q --> P"}
+        else if s = tptp_not_and then @{term "%P Q. ~ (P & Q)"}
+        else if s = tptp_not_or then @{term "%P Q. ~ (P | Q)"}
         else if s = tptp_not then HOLogic.Not
         else if s = tptp_ho_forall then HOLogic.all_const dummyT
         else if s = tptp_ho_exists then HOLogic.exists_const dummyT
+        else if s = tptp_hilbert_choice then HOLogic.choice_const dummyT
+        else if s = tptp_hilbert_the then @{term "The"}
         else
           (case unprefix_and_unascii const_prefix s of
             SOME s' =>
@@ -259,7 +291,7 @@
               val Ts = map (typ_of_atp_type ctxt) tys @ map (typ_of_atp_term ctxt) type_us
               val T =
                 (if not (null Ts) andalso robust_const_num_type_args thy s' = length Ts then
-                   if textual then try (Sign.const_instance thy) (s', Ts) else NONE
+                   try (Sign.const_instance thy) (s', Ts)
                  else
                    NONE)
                 |> (fn SOME T => T
@@ -270,8 +302,6 @@
             in list_comb (t, term_ts) end
           | NONE => (* a free or schematic variable *)
             let
-              fun fresh_up s =
-                [(s, ())] |> Variable.variant_frees ctxt [] |> hd |> fst
               val ts = map (do_term NONE) us
               val T =
                 (case opt_T of
@@ -285,10 +315,8 @@
                 (case unprefix_and_unascii fixed_var_prefix s of
                   SOME s => Free (s, T)
                 | NONE =>
-                  if textual andalso not (is_tptp_variable s) then
-                    Free (s |> textual ? (repair_var_name_raw #> fresh_up), T)
-                  else
-                    Var ((repair_var_name textual s, var_index), T))
+                  if not (is_tptp_variable s) then Free (fresh_up ctxt s, T)
+                  else Var ((repair_var_name s, var_index), T))
             in list_comb (t, ts) end))
   in do_term end
 
@@ -311,12 +339,8 @@
         else if String.isPrefix native_type_prefix s then
           @{const True} (* ignore TPTP type information *)
         else if s = tptp_equal then
-          let val ts = map (do_term [] NONE) us in
-            if textual andalso length ts = 2 andalso loose_aconv (hd ts, List.last ts) then
-              @{const True}
-            else
-              list_comb (Const (@{const_name HOL.eq}, Type_Infer.anyT @{sort type}), ts)
-          end
+          list_comb (Const (@{const_name HOL.eq}, Type_Infer.anyT @{sort type}),
+            map (do_term [] NONE) us)
         else
           (case unprefix_and_unascii const_prefix s of
             SOME s' =>
@@ -363,16 +387,10 @@
             end
           | NONE => (* a free or schematic variable *)
             let
-              (* This assumes that distinct names are mapped to distinct names by
-                 "Variable.variant_frees". This does not hold in general but should hold for
-                 ATP-generated Skolem function names, since these end with a digit and
-                 "variant_frees" appends letters. *)
-              fun fresh_up s = [(s, ())] |> Variable.variant_frees ctxt [] |> hd |> fst
-
               val term_ts =
                 map (do_term [] NONE) us
                 (* SPASS (3.8ds) and Vampire (2.6) pass arguments to Skolem functions in reverse
-                   order, which is incompatible with the new Metis skolemizer. *)
+                   order, which is incompatible with "metis"'s new skolemizer. *)
                 |> exists (fn pre => String.isPrefix pre s)
                   [spass_skolem_prefix, vampire_skolem_prefix] ? rev
               val ts = term_ts @ extra_ts
@@ -387,19 +405,17 @@
                   SOME s => Free (s, T)
                 | NONE =>
                   if textual andalso not (is_tptp_variable s) then
-                    Free (s |> textual ? (repair_var_name_raw #> fresh_up), T)
+                    Free (s |> textual ? fresh_up ctxt, T)
                   else
-                    Var ((repair_var_name textual s, var_index), T))
+                    Var ((repair_var_name s, var_index), T))
             in list_comb (t, ts) end))
   in do_term [] end
 
 fun term_of_atp ctxt (ATP_Problem.THF _) type_enc =
-    if ATP_Problem_Generate.is_type_enc_higher_order type_enc
-    then term_of_atp_ho ctxt
+    if ATP_Problem_Generate.is_type_enc_higher_order type_enc then K (term_of_atp_ho ctxt)
     else error "Unsupported Isar reconstruction."
   | term_of_atp ctxt _ type_enc =
-    if not (ATP_Problem_Generate.is_type_enc_higher_order type_enc)
-    then term_of_atp_fo ctxt
+    if not (ATP_Problem_Generate.is_type_enc_higher_order type_enc) then term_of_atp_fo ctxt
     else error "Unsupported Isar reconstruction."
 
 fun term_of_atom ctxt format type_enc textual sym_tab pos (u as ATerm ((s, _), _)) =
@@ -436,7 +452,7 @@
         do_formula pos (AQuant (q, xs, phi'))
         (* FIXME: TFF *)
         #>> quantify_over_var textual (case q of AForall => forall_of | AExists => exists_of)
-          (repair_var_name textual s) dummyT
+          (repair_var_name s) dummyT
       | AConn (ANot, [phi']) => do_formula (not pos) phi' #>> s_not
       | AConn (c, [phi1, phi2]) =>
         do_formula (pos |> c = AImplies ? not) phi1
@@ -477,7 +493,7 @@
 val leo2_extcnf_equal_neg_rule = "extcnf_equal_neg"
 
 fun add_fact ctxt facts ((_, ss), _, _, rule, deps) =
-  (if member (op =) [agsyhol_core_rule, leo2_extcnf_equal_neg_rule, satallax_core_rule] rule then
+  (if member (op =) [agsyhol_core_rule, leo2_extcnf_equal_neg_rule] rule then
      insert (op =) (short_thm_name ctxt ext, (Global, General))
    else
      I)
@@ -530,9 +546,9 @@
     else
       s
 
-fun set_var_index j = map_aterms (fn Var ((s, _), T) => Var ((s, j), T) | t => t)
+fun set_var_index j = map_aterms (fn Var ((s, 0), T) => Var ((s, j), T) | t => t)
 
-fun infer_formula_types ctxt =
+fun infer_formulas_types ctxt =
   map_index (uncurry (fn j => set_var_index j #> Type.constraint HOLogic.boolT))
   #> Syntax.check_terms (Proof_Context.set_mode Proof_Context.mode_schematic ctxt)
   #> map (set_var_index 0)
@@ -546,25 +562,26 @@
 
 fun uncombine_term thy =
   let
-    fun aux (t1 $ t2) = betapply (pairself aux (t1, t2))
-      | aux (Abs (s, T, t')) = Abs (s, T, aux t')
-      | aux (t as Const (x as (s, _))) =
+    fun uncomb (t1 $ t2) = betapply (uncomb t1, uncomb t2)
+      | uncomb (Abs (s, T, t)) = Abs (s, T, uncomb t)
+      | uncomb (t as Const (x as (s, _))) =
         (case AList.lookup (op =) combinator_table s of
           SOME thm => thm |> prop_of |> specialize_type thy x |> Logic.dest_equals |> snd
         | NONE => t)
-      | aux t = t
-  in aux end
+      | uncomb t = t
+  in uncomb end
 
-fun unlift_term lifted =
-  map_aterms (fn t as Const (s, _) =>
-                 if String.isPrefix lam_lifted_prefix s then
-                   (* FIXME: do something about the types *)
-                   (case AList.lookup (op =) lifted s of
-                     SOME t => unlift_term lifted t
-                   | NONE => t)
-                 else
-                   t
-               | t => t)
+fun unlift_aterm lifted (t as Const (s, _)) =
+    if String.isPrefix lam_lifted_prefix s then
+      (* FIXME: do something about the types *)
+      (case AList.lookup (op =) lifted s of
+        SOME t' => unlift_term lifted t'
+      | NONE => t)
+    else
+      t
+  | unlift_aterm _ t = t
+and unlift_term lifted =
+  map_aterms (unlift_aterm lifted)
 
 fun termify_line _ _ _ _ _ (_, Type_Role, _, _, _) = NONE
   | termify_line ctxt format type_enc lifted sym_tab (name, role, u, rule, deps) =
@@ -572,8 +589,9 @@
       val thy = Proof_Context.theory_of ctxt
       val t = u
         |> prop_of_atp ctxt format type_enc true sym_tab
+        |> unlift_term lifted
         |> uncombine_term thy
-        |> unlift_term lifted
+        |> simplify_bool
     in
       SOME (name, role, t, rule, deps)
     end
@@ -599,7 +617,7 @@
   nasty_atp_proof pool
   #> map_term_names_in_atp_proof repair_name
   #> map_filter (termify_line ctxt format type_enc lifted sym_tab)
-  #> map_proof_terms (infer_formula_types ctxt #> map HOLogic.mk_Trueprop)
+  #> map_proof_terms (infer_formulas_types ctxt #> map HOLogic.mk_Trueprop)
   #> local_prover = waldmeisterN ? repair_waldmeister_endgame
 
 fun take_distinct_vars seen ((t as Var _) :: ts) =
@@ -608,12 +626,12 @@
 
 fun unskolemize_term skos t =
   let
-    val is_sko = member (op =) skos
+    val is_skolem = member (op =) skos
 
     fun find_args args (t $ u) = find_args (u :: args) t #> find_args [] u
       | find_args _ (Abs (_, _, t)) = find_args [] t
       | find_args args (Free (s, _)) =
-        if is_sko s then
+        if is_skolem s then
           let val new = take_distinct_vars [] args in
             (fn [] => new | old => if length new < length old then new else old)
           end
@@ -626,7 +644,7 @@
 
     fun fix_skos args (t $ u) = fix_skos (fix_skos [] u :: args) t
       | fix_skos args (t as Free (s, T)) =
-        if is_sko s then list_comb (Free (s, funpow num_alls body_type T), drop num_alls args)
+        if is_skolem s then list_comb (Free (s, funpow num_alls body_type T), drop num_alls args)
         else list_comb (t, args)
       | fix_skos [] (Abs (s, T, t)) = Abs (s, T, fix_skos [] t)
       | fix_skos [] t = t
@@ -634,10 +652,10 @@
 
     val t' = fix_skos [] t
 
-    fun add_sko (t as Free (s, _)) = is_sko s ? insert (op aconv) t
-      | add_sko _ = I
+    fun add_skolem (t as Free (s, _)) = is_skolem s ? insert (op aconv) t
+      | add_skolem _ = I
 
-    val exs = Term.fold_aterms add_sko t' []
+    val exs = Term.fold_aterms add_skolem t' []
   in
     t'
     |> HOLogic.dest_Trueprop
@@ -646,62 +664,80 @@
     |> HOLogic.mk_Trueprop
   end
 
-fun introduce_spass_skolem [] = []
-  | introduce_spass_skolem (proof as (_, _, _, rule1, _) :: _) =
-    if rule1 = spass_input_rule then
-      let
-        fun add_sko (Free (s, _)) = String.isPrefix spass_skolem_prefix s ? insert (op =) s
-          | add_sko _ = I
+fun rename_skolem_args t =
+  let
+    fun add_skolem_args (Abs (_, _, t)) = add_skolem_args t
+      | add_skolem_args t =
+        (case strip_comb t of
+          (Free (s, _), args as _ :: _) =>
+          if String.isPrefix spass_skolem_prefix s then
+            insert (op =) (s, fst (take_prefix is_Var args))
+          else
+            fold add_skolem_args args
+        | (u, args as _ :: _) => fold add_skolem_args (u :: args)
+        | _ => I)
+
+    fun subst_of_skolem (sk, args) =
+      map_index (fn (j, Var (z, T)) => (z, Var ((sk ^ "_" ^ string_of_int j, 0), T))) args
 
-        (* union-find would be faster *)
-        fun add_cycle [] = I
-          | add_cycle ss =
-            fold (fn s => Graph.default_node (s, ())) ss
-            #> fold Graph.add_edge (ss ~~ tl ss @ [hd ss])
-
-        val (input_steps, other_steps) = List.partition (null o #5) proof
+    val subst = maps subst_of_skolem (add_skolem_args t [])
+  in
+    subst_vars ([], subst) t
+  end
 
-        val skoss = map (fn (_, _, t, _, _) => Term.fold_aterms add_sko t []) input_steps
-        val skoss_input_steps = filter_out (null o fst) (skoss ~~ input_steps)
-        val groups = Graph.strong_conn (fold add_cycle skoss Graph.empty)
+fun introduce_spass_skolems proof =
+  let
+    fun add_skolem (Free (s, _)) = String.isPrefix spass_skolem_prefix s ? insert (op =) s
+      | add_skolem _ = I
 
-        fun step_name_of_group skos = (implode skos, [])
-        fun in_group group = member (op =) group o hd
-        fun group_of sko = the (find_first (fn group => in_group group sko) groups)
+    (* union-find would be faster *)
+    fun add_cycle [] = I
+      | add_cycle ss =
+        fold (fn s => Graph.default_node (s, ())) ss
+        #> fold Graph.add_edge (ss ~~ tl ss @ [hd ss])
+
+    val (input_steps, other_steps) = List.partition (null o #5) proof
 
-        fun new_steps (skoss_steps : (string list * (term, 'a) atp_step) list) group =
-          let
-            val name = step_name_of_group group
-            val name0 = name |>> prefix "0"
-            val t =
-              skoss_steps
-              |> map (snd #> #3 #> HOLogic.dest_Trueprop)
-              |> Library.foldr1 s_conj
-              |> HOLogic.mk_Trueprop
-            val skos = fold (union (op =) o fst) skoss_steps []
-            val deps = map (snd #> #1) skoss_steps
-          in
-            [(name0, Unknown, unskolemize_term skos t, spass_pre_skolemize_rule, deps),
-             (name, Unknown, t, spass_skolemize_rule, [name0])]
-          end
+    val skoss = map (fn (_, _, t, _, _) => Term.fold_aterms add_skolem t []) input_steps
+    val skoss_input_steps = filter_out (null o fst) (skoss ~~ input_steps)
+    val groups = Graph.strong_conn (fold add_cycle skoss Graph.empty)
+
+    fun step_name_of_group skos = (implode skos, [])
+    fun in_group group = member (op =) group o hd
+    fun group_of sko = the (find_first (fn group => in_group group sko) groups)
 
-        val sko_steps =
-          maps (fn group => new_steps (filter (in_group group o fst) skoss_input_steps) group)
-            groups
+    fun new_steps (skoss_steps : (string list * (term, 'a) atp_step) list) group =
+      let
+        val name = step_name_of_group group
+        val name0 = name |>> prefix "0"
+        val t =
+          (case map (snd #> #3) skoss_steps of
+            [t] => t
+          | ts => ts
+            |> map (HOLogic.dest_Trueprop #> rename_skolem_args)
+            |> Library.foldr1 s_conj
+            |> HOLogic.mk_Trueprop)
+        val skos = fold (union (op =) o fst) skoss_steps []
+        val deps = map (snd #> #1) skoss_steps
+      in
+        [(name0, Unknown, unskolemize_term skos t, spass_pre_skolemize_rule, deps),
+         (name, Unknown, t, spass_skolemize_rule, [name0])]
+      end
 
-        val old_news =
-          map (fn (skos, (name, _, _, _, _)) => (name, [step_name_of_group (group_of skos)]))
-            skoss_input_steps
-        val repair_deps = fold replace_dependencies_in_line old_news
-      in
-        input_steps @ sko_steps @ map repair_deps other_steps
-      end
-  else
-    proof
+    val sko_steps =
+      maps (fn group => new_steps (filter (in_group group o fst) skoss_input_steps) group) groups
+
+    val old_news =
+      map (fn (skos, (name, _, _, _, _)) => (name, [step_name_of_group (group_of skos)]))
+        skoss_input_steps
+    val repair_deps = fold replace_dependencies_in_line old_news
+  in
+    input_steps @ sko_steps @ map repair_deps other_steps
+  end
 
 fun factify_atp_proof facts hyp_ts concl_t atp_proof =
   let
-    fun factify_step ((num, ss), _, t, rule, deps) =
+    fun factify_step ((num, ss), role, t, rule, deps) =
       let
         val (ss', role', t') =
           (case resolve_conjectures ss of
@@ -709,7 +745,7 @@
             if j = length hyp_ts then ([], Conjecture, concl_t) else ([], Hypothesis, nth hyp_ts j)
           | _ =>
             (case resolve_facts facts ss of
-              [] => (ss, Plain, t)
+              [] => (ss, if role = Lemma then Lemma else Plain, t)
             | facts => (map fst facts, Axiom, t)))
       in
         ((num, ss'), role', t', rule, deps)
--- a/src/HOL/Tools/ATP/atp_proof_redirect.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/ATP/atp_proof_redirect.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -167,8 +167,9 @@
           val subseqss = map (subsequents seqs) zones
           val seqs = fold (subtract direct_sequent_eq) subseqss seqs
           val cases = map2 (fn l => fn subseqs => ([l], redirect [l] proved subseqs)) c subseqss
+          val cases' = filter_out (null o snd) cases
         in
-          s_cases cases @ redirect (succedent_of_cases cases) proved seqs
+          s_cases cases' @ redirect (succedent_of_cases cases) proved seqs
         end
   in
     redirect [] axioms seqs
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/ATP/atp_satallax.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -0,0 +1,412 @@
+(*  Title:      HOL/Tools/ATP/atp_satallax.ML
+    Author:     Mathias Fleury, ENS Rennes
+    Author:     Jasmin Blanchette, TU Muenchen
+
+Satallax proof parser and transformation for Sledgehammer.
+*)
+
+signature ATP_SATALLAX =
+sig
+  val atp_proof_of_tstplike_proof : string -> string ATP_Proof.atp_problem -> string ->
+    string ATP_Proof.atp_proof
+end;
+
+structure ATP_Satallax : ATP_SATALLAX =
+struct
+
+open ATP_Proof
+open ATP_Util
+open ATP_Problem
+
+(*Undocumented format:
+thf (number, plain | Axiom | ..., inference(rule, [status(thm), assumptions ([hypotheses list]),
+detailed_rule(discharge,used_hypothese_list) *], used_hypotheses_list, premises))
+(seen by tab_mat)
+
+Also seen -- but we can ignore it:
+"tab_inh (a) __11." meaning that the type a is inhabited usueful of variable eigen__11,
+*)
+fun parse_satallax_tstp_information x =
+  ((Symbol.scan_ascii_id || ($$ "$" |-- Symbol.scan_ascii_id))
+  -- Scan.option ( $$ "(" |-- Scan.option (Symbol.scan_ascii_id --| $$ ",")
+  -- ((Scan.option (($$ "[" |-- (Scan.option ((scan_general_id
+         -- Scan.repeat ($$ "," |-- scan_general_id)) >> op ::) --| $$ "]"))
+         || (skip_term >> K "") >> (fn x => SOME [x]))
+       >> (fn (SOME x) => x | NONE => NONE)) --| $$ ")"))) x
+
+fun parse_prem x =
+  ((Symbol.scan_ascii_id || scan_general_id) --| Scan.option ($$ ":" -- skip_term)) x
+
+fun parse_prems x =
+  (($$ "[" |-- parse_prem -- Scan.repeat ($$ "," |-- parse_prem) --| $$ "]")
+     >> op ::) x
+
+fun parse_tstp_satallax_extra_arguments x =
+  ($$ "," |-- scan_general_id -- (($$ "(" |-- Symbol.scan_ascii_id --| $$ "," )
+  -- ($$ "[" |-- Scan.option ((parse_satallax_tstp_information
+  -- Scan.repeat ($$ "," |-- parse_satallax_tstp_information)) >> op ::)
+  --| $$ "]") --
+  (Scan.optional ($$ "," |-- parse_prems) [] -- Scan.optional ($$ "," |-- parse_prems) []
+    >> (fn (x, []) => x | (_, x) => x))
+   --| $$ ")")) x
+
+val dummy_satallax_step = ((("~1", "tab_inh"), AAtom (ATerm(("False", []), []))), NONE)
+fun parse_tstp_thf0_satallax_line x =
+  (((Scan.this_string tptp_thf
+  -- $$ "(") |-- scan_general_id --| $$ "," -- Symbol.scan_ascii_id --| $$ ","
+  -- parse_thf0_formula -- Scan.option parse_tstp_satallax_extra_arguments --| $$ ")" --| $$ ".")
+  || (Scan.this_string "tab_inh" |-- skip_term --| $$ ".")
+     >> K dummy_satallax_step) x
+
+datatype satallax_step = Satallax_Step of {
+  id: string,
+  role: string,
+  theorem: (string, string, (string, string ATP_Problem.atp_type) ATP_Problem.atp_term, string)
+    ATP_Problem.atp_formula,
+  assumptions: string list,
+  rule: string,
+  used_assumptions: string list,
+  detailed_eigen: string,
+  generated_goal_assumptions: (string * string list) list}
+
+fun mk_satallax_step id role theorem assumptions rule used_assumptions
+    generated_goal_assumptions detailed_eigen =
+  Satallax_Step {id = id, role = role, theorem = theorem, assumptions = assumptions, rule = rule,
+    used_assumptions = used_assumptions, generated_goal_assumptions = generated_goal_assumptions,
+    detailed_eigen = detailed_eigen}
+
+fun get_assumptions (("assumptions", SOME (_ , assumptions)) :: _) =
+    the_default [] assumptions
+  | get_assumptions (_ :: l) = get_assumptions l
+  | get_assumptions [] = []
+
+fun get_detailled_eigen ((_, SOME (SOME "eigenvar" , var)) :: _) =
+    hd (the_default [""] var)
+  | get_detailled_eigen (_ :: l) = get_detailled_eigen l
+  | get_detailled_eigen [] = ""
+
+fun seperate_dependices dependencies =
+  let
+    fun sep_dep [] used_assumptions new_goals generated_assumptions _ =
+        (used_assumptions, new_goals, generated_assumptions)
+      | sep_dep (x :: l) used_assumptions new_goals generated_assumptions state =
+        if hd (raw_explode x) = "h" orelse Int.fromString x = NONE  then
+          if state = 0 then
+            sep_dep l (x :: used_assumptions) new_goals generated_assumptions state
+          else
+            sep_dep l used_assumptions new_goals (x :: generated_assumptions) 3
+        else
+          if state = 1 orelse state = 0 then
+            sep_dep l used_assumptions (x :: new_goals) generated_assumptions 1
+          else
+            raise Fail ("incorrect Satallax proof" ^ PolyML.makestring l)
+  in
+    sep_dep dependencies [] [] [] 0
+  end
+
+fun create_grouped_goal_assumption rule new_goals generated_assumptions =
+  let
+    val number_of_new_goals = length new_goals
+    val number_of_new_assms = length generated_assumptions
+  in
+    if number_of_new_goals = number_of_new_assms then
+      new_goals ~~ (map single generated_assumptions)
+    else if 2 * number_of_new_goals = number_of_new_assms then
+      let
+        fun group_by_pair (new_goal :: new_goals) (assumpt1 :: assumpt2 :: generated_assumptions) =
+            (new_goal, [assumpt1, assumpt2]) :: group_by_pair new_goals generated_assumptions
+          | group_by_pair [] [] = []
+      in
+        group_by_pair new_goals generated_assumptions
+      end
+    else
+      raise Fail ("the rule " ^ rule ^" is not supported in the reconstruction.")
+  end
+fun to_satallax_step (((id, role), formula), (SOME (_,((rule, l), used_rules)))) =
+    let
+      val (used_assumptions, new_goals, generated_assumptions) = seperate_dependices used_rules
+    in
+      mk_satallax_step id role formula (get_assumptions (the_default [] l)) rule used_assumptions
+        (create_grouped_goal_assumption rule new_goals generated_assumptions)
+        (get_detailled_eigen (the_default [] l))
+    end
+  | to_satallax_step (((id, role), formula), NONE) =
+      mk_satallax_step id role formula [] "assumption" [] [] ""
+
+fun is_assumption (Satallax_Step {role, ...}) = role = "assumption" orelse role = "axiom" orelse
+  role = "negated_conjecture" orelse role = "conjecture"
+
+fun seperate_assumptions_and_steps l =
+  let
+    fun seperate_assumption [] l l' = (l, l')
+      | seperate_assumption (step :: steps) l l' =
+        if is_assumption step then
+          seperate_assumption steps (step :: l) l'
+        else
+          seperate_assumption steps l (step :: l')
+  in
+    seperate_assumption l [] []
+  end
+
+datatype satallax_proof_graph =
+  Linear_Part of {node: satallax_step, succs: satallax_proof_graph list} |
+  Tree_Part of {node: satallax_step, deps: satallax_proof_graph list}
+
+fun find_proof_step ((x as Satallax_Step {id, ...}) :: l) h =
+    if h = id then x else find_proof_step l h
+  | find_proof_step [] h = raise Fail ("not_found: " ^ PolyML.makestring h ^ "(probably a parsing \
+    \error)")
+
+fun remove_not_not (x as ATerm ((op1, _), [ATerm ((op2, _), [th])])) =
+    if op1 = op2 andalso op1 = tptp_not then th else x
+  | remove_not_not th = th
+
+fun tptp_term_equal (ATerm((op1, _), l1), ATerm((op2, _), l2)) = op1 = op2 andalso
+    fold2 (fn t1 => fn t2 => fn c => c andalso t1 = t2) l1 l2 true
+  | tptp_term_equal (_, _) = false
+
+val dummy_true_aterm = ATerm (("$true", [dummy_atype]), [])
+
+fun find_assumptions_to_inline ths (assm :: assms) to_inline no_inline =
+    (case List.find (curry (op =) assm o fst) no_inline of
+      SOME _ => find_assumptions_to_inline ths assms to_inline no_inline
+    | NONE =>
+      (case List.find (curry (op =) assm o fst) to_inline of
+        NONE => find_assumptions_to_inline ths assms to_inline no_inline
+      | SOME (_, th) =>
+        let
+          val simplified_ths_with_inlined_assumption =
+            (case List.partition (curry tptp_term_equal th o remove_not_not) ths of
+              ([], ths) => ATerm ((tptp_not, [dummy_atype]), [th]) :: ths
+            | (_, _) => [])
+        in
+          find_assumptions_to_inline simplified_ths_with_inlined_assumption assms to_inline no_inline
+        end))
+  | find_assumptions_to_inline ths [] _ _ = ths
+
+fun inline_if_needed_and_simplify th assms to_inline no_inline =
+    (case find_assumptions_to_inline [th] assms to_inline no_inline of
+      [] => dummy_true_aterm
+  | l => foldl1 (fn (a, b) =>
+    (case b of
+      ATerm (("$false", _), _) => a
+    | ATerm (("~", _ ), [ATerm (("$true", _), _)]) => a
+    | _ => ATerm ((tptp_or, [dummy_atype]), [a, b]))) l)
+
+fun get_conclusion (Satallax_Step {theorem = AAtom theorem, ...}) = theorem
+
+fun add_assumption new_used_assumptions ((Satallax_Step {id, role, theorem, assumptions,
+    rule, generated_goal_assumptions, used_assumptions, detailed_eigen})) =
+  mk_satallax_step id role theorem assumptions rule (new_used_assumptions @ used_assumptions)
+    generated_goal_assumptions detailed_eigen
+
+fun set_rule new_rule (Satallax_Step {id, role, theorem, assumptions,
+    generated_goal_assumptions, used_assumptions, detailed_eigen, ...}) =
+  mk_satallax_step id role theorem assumptions new_rule used_assumptions
+    generated_goal_assumptions detailed_eigen
+
+fun transform_inline_assumption hypotheses_step proof_sketch =
+  let
+    fun inline_in_step (Linear_Part {node as Satallax_Step {generated_goal_assumptions,
+          used_assumptions, rule, ...}, succs}) =
+        if generated_goal_assumptions = [] then
+          Linear_Part {node = node, succs = []}
+        else
+          let
+            (*one singel goal is created, two hypothesis can be created, for the "and" rule:
+              (A /\ B) create two hypotheses A, and B.*)
+            fun set_hypotheses_as_goal [hypothesis] succs =
+                Linear_Part {node = set_rule rule (add_assumption used_assumptions 
+                    (find_proof_step hypotheses_step hypothesis)),
+                  succs = map inline_in_step succs}
+              | set_hypotheses_as_goal (hypothesis :: new_goal_hypotheses) succs =
+                Linear_Part {node = set_rule rule (add_assumption used_assumptions
+                    (find_proof_step hypotheses_step hypothesis)),
+                  succs = [set_hypotheses_as_goal new_goal_hypotheses succs]}
+          in
+            set_hypotheses_as_goal (snd (hd generated_goal_assumptions)) succs
+          end
+      | inline_in_step (Tree_Part {node = node, deps}) =
+        Tree_Part {node = node, deps = map inline_in_step deps}
+
+    fun inline_contradictory_assumptions (Linear_Part {node as Satallax_Step{id, theorem, ...},
+       succs}) (to_inline, no_inline) =
+      let
+        val (succs, inliner) = fold_map inline_contradictory_assumptions succs 
+          (to_inline, (id, theorem) :: no_inline)
+      in
+        (Linear_Part {node = node, succs = succs}, inliner)
+      end
+    | inline_contradictory_assumptions (Tree_Part {node = Satallax_Step {id, role,
+        theorem = AAtom theorem, assumptions, rule, generated_goal_assumptions,
+        used_assumptions, detailed_eigen}, deps}) (to_inline, no_inline) =
+      let
+        val (dep', (to_inline', no_inline')) = fold_map inline_contradictory_assumptions deps 
+          (to_inline, no_inline)
+        val to_inline'' =
+          map (fn s => (s, get_conclusion (find_proof_step hypotheses_step s)))
+            (List.filter (fn s => nth_string s 0 = "h")
+              (used_assumptions @ (map snd generated_goal_assumptions |> flat)))
+          @ to_inline'
+        val node' = Satallax_Step {id = id, role = role, theorem =
+            AAtom (inline_if_needed_and_simplify theorem assumptions to_inline'' no_inline'),
+          assumptions = assumptions, rule = rule,
+          generated_goal_assumptions = generated_goal_assumptions, detailed_eigen = detailed_eigen,
+          used_assumptions =
+            List.filter (fn s => List.find (curry (op =) s o fst) to_inline = NONE)
+            used_assumptions}
+      in
+        (Tree_Part {node = node', deps = dep'}, (to_inline'', no_inline'))
+      end
+  in
+    fst (inline_contradictory_assumptions (inline_in_step proof_sketch) ([], []))
+  end
+
+fun correct_dependencies (Linear_Part {node, succs}) =
+    Linear_Part {node = node, succs = map correct_dependencies succs}
+  | correct_dependencies (Tree_Part {node, deps}) =
+    let
+      val new_used_assumptions =
+        map (fn Linear_Part {node = (Satallax_Step{id, ...}), ...} => id
+              | Tree_Part {node = (Satallax_Step{id, ...}), ...} => id) deps
+    in
+      Tree_Part {node = add_assumption new_used_assumptions node,
+        deps = map correct_dependencies deps}
+    end
+
+fun create_satallax_proof_graph (hypotheses_step, proof_sketch) =
+  let
+    fun create_step (step as Satallax_Step {generated_goal_assumptions, ...}) =
+      Linear_Part {node = step,
+        succs = map (create_step o (find_proof_step (hypotheses_step @ proof_sketch)))
+          (map fst generated_goal_assumptions)}
+    fun reverted_discharged_steps is_branched (Linear_Part {node as
+          Satallax_Step {generated_goal_assumptions, ...}, succs}) =
+        if is_branched orelse length generated_goal_assumptions > 1 then
+          Tree_Part {node = node, deps = map (reverted_discharged_steps true) succs}
+        else
+          Linear_Part {node = node, succs = map (reverted_discharged_steps is_branched) succs}
+    val proof_with_correct_sense =
+        correct_dependencies (reverted_discharged_steps false
+          (create_step (find_proof_step proof_sketch "0" )))
+  in
+    transform_inline_assumption hypotheses_step proof_with_correct_sense
+  end
+
+val satallax_known_theorems = ["eq_ind", "eq_trans2", "eq_trans3", "eq_neg_neg_id", "eq_true",
+  "eq_and_nor", "eq_or_nand", "eq_or_imp", "eq_and_imp", "eq_imp_or", "eq_iff", "eq_sym_eq",
+  "eq_forall_nexists", "eq_exists_nforall", "eq_leib1", "eq_leib2", "eq_leib3", "eq_leib4",
+  "eq_eta", "SinhE", "eq_forall_Seps", "eq_SPi_Seps", "eq_exists_Seps"]
+val is_special_satallax_rule = member (op =) satallax_known_theorems
+
+fun terms_to_upper_case var (AAbs (((var', ty), b), ts)) =
+    let
+      val bdy = terms_to_upper_case var b
+      val ts' = map (terms_to_upper_case var) ts
+    in
+      AAbs (((((var = var' ? String.implode o (map Char.toUpper) o String.explode) var'), ty),
+        bdy), ts')
+    end
+  | terms_to_upper_case var (ATerm ((var', ty), ts)) =
+    ATerm ((((var = var' ? String.implode o (map Char.toUpper) o String.explode) var'),
+      ty), map (terms_to_upper_case var) ts)
+
+fun add_quantifier_in_tree_part vars_to_add assumption_to_add (Linear_Part {node, succs}) =
+    Linear_Part {node = node, succs = map (add_quantifier_in_tree_part vars_to_add assumption_to_add) succs}
+  | add_quantifier_in_tree_part vars_to_add assumption_to_add (Tree_Part {node = Satallax_Step {rule, detailed_eigen,
+      id, role, theorem = AAtom theorem, assumptions, used_assumptions, generated_goal_assumptions},
+      deps}) =
+    let
+      val theorem' = fold (fn v => fn body => terms_to_upper_case v body) vars_to_add theorem
+      val theorem'' = theorem'
+      val node' = mk_satallax_step id role (AAtom theorem'') assumptions rule
+        (used_assumptions @ (filter (curry (op <=) (the (Int.fromString id)) o size)
+        assumption_to_add)) generated_goal_assumptions detailed_eigen
+    in
+      if detailed_eigen <> "" then
+        Tree_Part {node = node',
+          deps = map (add_quantifier_in_tree_part (detailed_eigen :: vars_to_add)
+          (used_assumptions @ assumption_to_add)) deps}
+      else
+        Tree_Part {node = node',
+                   deps = map (add_quantifier_in_tree_part vars_to_add assumption_to_add) deps}
+    end
+
+fun transform_to_standard_atp_step already_transformed proof =
+  let
+    fun create_fact_step id =
+      ((id, [id]), Axiom, AAtom (ATerm((id, []), [])), "", [])
+    fun transform_one_step already_transformed (Satallax_Step {id, theorem, used_assumptions, role,
+        rule, ...}) =
+      let
+        val role' = role_of_tptp_string role
+        val new_transformed = List.filter
+          (fn s => size s >=4 andalso not (is_special_satallax_rule s) andalso not
+          (member (op =) already_transformed s)) used_assumptions
+      in
+        (map create_fact_step new_transformed
+        @ [((id, []), if role' = Unknown then Plain else role', theorem, rule,
+           map (fn s => (s, [])) (filter_out is_special_satallax_rule used_assumptions))],
+        new_transformed @ already_transformed)
+      end
+    fun transform_steps (Linear_Part {node, succs}) (already_transformed:string list) =
+        transform_one_step already_transformed node
+        ||> (fold_map transform_steps succs)
+        ||> apfst flat
+        |> (fn (a, (b, transformed)) => (a @ b, transformed))
+      | transform_steps (Tree_Part {node, deps}) (already_transformed: string list) =
+        fold_map transform_steps deps already_transformed
+        |>> flat
+        ||> (fn transformed => transform_one_step transformed node)
+        |> (fn (a, (b, transformed)) => (a @ b, transformed))
+  in
+    fst (transform_steps proof already_transformed)
+  end
+
+fun remove_unused_dependency steps =
+  let
+    fun find_all_ids [] = []
+      | find_all_ids (((id, _), _, _, _, _) :: steps) = id :: find_all_ids steps
+    fun keep_only_used used_ids steps =
+      let
+        fun remove_unused (((id, ids), role, theorem, rule, deps) :: steps) =
+            (((id, ids), role, theorem, rule, filter (member (op =) used_ids o fst) deps) :: steps)
+          | remove_unused [] = []
+      in
+        remove_unused steps
+      end
+  in
+    keep_only_used (find_all_ids steps) steps
+  end
+
+fun parse_proof local_name problem =
+  strip_spaces_except_between_idents
+  #> raw_explode
+  #>
+    (if local_name <> satallaxN then
+      (Scan.error (!! (fn _ => raise UNRECOGNIZED_ATP_PROOF ())
+        (Scan.finite Symbol.stopper (Scan.repeat1 (parse_line local_name problem) >> flat)))
+         #> fst)
+    else
+      (Scan.error (!! (fn _ => raise UNRECOGNIZED_ATP_PROOF ())
+        (Scan.finite Symbol.stopper (Scan.repeat1 (parse_tstp_thf0_satallax_line))))
+        #> fst
+        #> rev
+        #> map to_satallax_step
+        #> seperate_assumptions_and_steps
+        #> create_satallax_proof_graph
+        #> add_quantifier_in_tree_part [] []
+        #> transform_to_standard_atp_step []
+        #> remove_unused_dependency))
+
+fun atp_proof_of_tstplike_proof _ _ "" = []
+  | atp_proof_of_tstplike_proof local_prover problem tstp =
+    (case core_of_agsyhol_proof tstp of
+      SOME facts => facts |> map (core_inference agsyhol_core_rule)
+    | NONE =>
+      tstp ^ "$" (* the $ sign acts as a sentinel (FIXME: needed?) *)
+      |> parse_proof local_prover problem
+      |> local_prover = vampireN ? perhaps (try (sort (vampire_step_name_ord o pairself #1)))
+      |> local_prover = zipperpositionN ? rev)
+
+end;
--- a/src/HOL/Tools/ATP/atp_systems.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/ATP/atp_systems.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -397,17 +397,16 @@
 
 (* LEO-II *)
 
-(* LEO-II supports definitions, but it performs significantly better on our
-   benchmarks when they are not used. *)
 val leo2_thf0 = THF (Monomorphic, THF_Without_Choice)
 
 val leo2_config : atp_config =
   {exec = K (["LEO2_HOME"], ["leo.opt", "leo"]),
-   arguments = fn _ => fn _ => fn _ => fn timeout => fn file_name => fn _ =>
-       "--foatp e --atp e=\"$E_HOME\"/eprover \
-       \--atp epclextract=\"$E_HOME\"/epclextract \
-       \--proofoutput 1 --timeout " ^ string_of_int (to_secs 1 timeout) ^ " " ^
-       file_name,
+   arguments = fn _ => fn full_proofs => fn _ => fn timeout => fn file_name => fn _ =>
+     "--foatp e --atp e=\"$E_HOME\"/eprover \
+     \--atp epclextract=\"$E_HOME\"/epclextract \
+     \--proofoutput 1 --timeout " ^ string_of_int (to_secs 1 timeout) ^ " " ^
+     (if full_proofs then "--notReplLeibnizEQ --notReplAndrewsEQ --notUseExtCnfCmbd " else "") ^
+     file_name,
    proof_delims = tstp_proof_delims,
    known_failures =
      [(TimedOut, "CPU time limit exceeded, terminating"),
@@ -431,9 +430,9 @@
 val satallax_config : atp_config =
   {exec = K (["SATALLAX_HOME"], ["satallax.opt", "satallax"]),
    arguments = fn _ => fn _ => fn _ => fn timeout => fn file_name => fn _ =>
-       "-p hocore -t " ^ string_of_int (to_secs 1 timeout) ^ " " ^ file_name,
+       "-p tstp -t " ^ string_of_int (to_secs 1 timeout) ^ " " ^ file_name,
    proof_delims =
-     [("% Higher-Order Unsat Core BEGIN", "% Higher-Order Unsat Core END")],
+     [("% SZS output start Proof", "% SZS output end Proof")],
    known_failures = known_szs_status_failures,
    prem_role = Hypothesis,
    best_slices =
--- a/src/HOL/Tools/ATP/atp_util.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/ATP/atp_util.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -77,31 +77,29 @@
   | stringN_of_int k n =
     stringN_of_int (k - 1) (n div 10) ^ string_of_int (n mod 10)
 
+fun is_spaceish c = Char.isSpace c orelse c = #"\127" (* DEL -- no idea where these come from *)
+
 fun strip_spaces skip_comments is_evil =
   let
     fun strip_c_style_comment [] accum = accum
-      | strip_c_style_comment (#"*" :: #"/" :: cs) accum =
-        strip_spaces_in_list true cs accum
+      | strip_c_style_comment (#"*" :: #"/" :: cs) accum = strip_spaces_in_list true cs accum
       | strip_c_style_comment (_ :: cs) accum = strip_c_style_comment cs accum
     and strip_spaces_in_list _ [] accum = accum
       | strip_spaces_in_list true (#"%" :: cs) accum =
-        strip_spaces_in_list true (cs |> take_prefix (not_equal #"\n") |> snd)
-                             accum
-      | strip_spaces_in_list true (#"/" :: #"*" :: cs) accum =
-        strip_c_style_comment cs accum
-      | strip_spaces_in_list _ [c1] accum =
-        accum |> not (Char.isSpace c1) ? cons c1
+        strip_spaces_in_list true (cs |> take_prefix (not_equal #"\n") |> snd) accum
+      | strip_spaces_in_list true (#"/" :: #"*" :: cs) accum = strip_c_style_comment cs accum
+      | strip_spaces_in_list _ [c1] accum = accum |> not (is_spaceish c1) ? cons c1
       | strip_spaces_in_list skip_comments (cs as [_, _]) accum =
         accum |> fold (strip_spaces_in_list skip_comments o single) cs
       | strip_spaces_in_list skip_comments (c1 :: c2 :: c3 :: cs) accum =
-        if Char.isSpace c1 then
+        if is_spaceish c1 then
           strip_spaces_in_list skip_comments (c2 :: c3 :: cs) accum
-        else if Char.isSpace c2 then
-          if Char.isSpace c3 then
+        else if is_spaceish c2 then
+          if is_spaceish c3 then
             strip_spaces_in_list skip_comments (c1 :: c3 :: cs) accum
           else
             strip_spaces_in_list skip_comments (c3 :: cs)
-                (c1 :: accum |> forall is_evil [c1, c3] ? cons #" ")
+              (c1 :: accum |> forall is_evil [c1, c3] ? cons #" ")
         else
           strip_spaces_in_list skip_comments (c2 :: c3 :: cs) (cons c1 accum)
   in
--- a/src/HOL/Tools/ATP/atp_waldmeister.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/ATP/atp_waldmeister.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -181,7 +181,7 @@
     val thy = Proof_Context.theory_of ctxt
     val t = u
       |> atp_to_trm
-      |> singleton (infer_formula_types ctxt)
+      |> singleton (infer_formulas_types ctxt)
       |> HOLogic.mk_Trueprop
   in
     (name, role, t, rule, deps)
--- a/src/HOL/Tools/BNF/bnf_fp_def_sugar.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/BNF/bnf_fp_def_sugar.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -103,6 +103,7 @@
 val sel_mapN = "sel_map";
 val sel_setN = "sel_set";
 val set_emptyN = "set_empty";
+val set_inductN = "set_induct";
 
 structure Data = Generic_Data
 (
@@ -112,10 +113,16 @@
   fun merge data : T = Symtab.merge (K true) data;
 );
 
-fun choose_relator Rs AB = find_first (fastype_of #> binder_types #> (fn [A, B] => AB = (A, B))) Rs;
-fun build_the_rel ctxt Rs Ts A B = build_rel ctxt Ts (the o choose_relator Rs) (A, B);
+fun choose_binary_fun fs AB =
+  find_first (fastype_of #> binder_types #> (fn [A, B] => AB = (A, B))) fs;
+fun build_binary_fun_app fs a b =
+  Option.map (rapp b o rapp a) (choose_binary_fun fs (fastype_of a, fastype_of b));
+
+fun build_the_rel ctxt Rs Ts A B = build_rel ctxt Ts (the o choose_binary_fun Rs) (A, B);
 fun build_rel_app ctxt Rs Ts a b = build_the_rel ctxt Rs Ts (fastype_of a) (fastype_of b) $ a $ b;
 
+val name_of_set = name_of_const "set";
+
 fun fp_sugar_of ctxt =
   Symtab.lookup (Data.get (Context.Proof ctxt))
   #> Option.map (transfer_fp_sugar ctxt);
@@ -759,6 +766,98 @@
      mk_coinduct_attributes fpA_Ts (map #ctrs ctr_sugars) (map #discs ctr_sugars) mss)
   end;
 
+fun derive_set_induct_thms_for_types lthy nn fpTs ctrss setss dtor_set_inducts exhausts
+    set_pre_defs ctor_defs dtor_ctors Abs_pre_inverses =
+  let
+    fun mk_prems A Ps ctr_args t ctxt =
+      (case fastype_of t of
+        Type (type_name, xs) =>
+        (case bnf_of ctxt type_name of
+          NONE => ([], ctxt)
+        | SOME bnf =>
+          let
+            fun seq_assm a set ctxt =
+              let
+                val X = HOLogic.dest_setT (range_type (fastype_of set));
+                val (x, ctxt') = yield_singleton (mk_Frees "x") X ctxt;
+                val assm = mk_Trueprop_mem (x, set $ a);
+              in
+                (case build_binary_fun_app Ps x a of
+                  NONE =>
+                  mk_prems A Ps ctr_args x ctxt'
+                  |>> map (Logic.all x o Logic.mk_implies o pair assm)
+                | SOME f =>
+                  ([Logic.all x
+                      (Logic.mk_implies (assm,
+                         Logic.mk_implies (HOLogic.mk_Trueprop f,
+                           HOLogic.mk_Trueprop (the (build_binary_fun_app Ps x ctr_args)))))],
+                   ctxt'))
+              end;
+          in
+            fold_map (seq_assm t o mk_set xs) (sets_of_bnf bnf) ctxt
+            |>> flat
+          end)
+      | T =>
+        if T = A then ([HOLogic.mk_Trueprop (the (build_binary_fun_app Ps t ctr_args))], ctxt)
+        else ([], ctxt));
+
+    fun mk_prems_for_ctr A Ps ctr ctxt =
+      let
+        val (args, ctxt') = mk_Frees "arg" (binder_types (fastype_of ctr)) ctxt;
+      in
+        fold_map (mk_prems A Ps (list_comb (ctr, args))) args ctxt'
+        |>> map (fold_rev Logic.all args) o flat
+        |>> (fn prems => (prems, mk_names (length prems) (name_of_ctr ctr)))
+      end;
+
+    fun mk_prems_and_concl_for_type A Ps ((fpT, ctrs), set) ctxt =
+      let
+        val ((x, fp), ctxt') = ctxt
+          |> yield_singleton (mk_Frees "x") A
+          ||>> yield_singleton (mk_Frees "a") fpT;
+        val concl = mk_Ball (set $ fp) (Term.absfree (dest_Free x)
+          (the (build_binary_fun_app Ps x fp)));
+      in
+        fold_map (mk_prems_for_ctr A Ps) ctrs ctxt'
+        |>> split_list
+        |>> map_prod flat flat
+        |>> apfst (rpair concl)
+      end;
+
+    fun mk_thm ctxt fpTs ctrss sets =
+      let
+        val A = HOLogic.dest_setT (range_type (fastype_of (hd sets)));
+        val (Ps, ctxt') = mk_Frees "P" (map (fn fpT => A --> fpT --> HOLogic.boolT) fpTs) ctxt;
+        val (((premises, conclusion), case_names), ctxt'') =
+          (fold_map (mk_prems_and_concl_for_type A Ps) (fpTs ~~ ctrss ~~ sets) ctxt')
+          |>> apfst split_list o split_list
+          |>> apfst (apfst flat)
+          |>> apfst (apsnd (Library.foldr1 HOLogic.mk_conj))
+          |>> apsnd flat;
+
+        val thm =  Goal.prove_sorry lthy [] premises (HOLogic.mk_Trueprop conclusion)
+          (fn {context = ctxt, prems = prems} =>
+             mk_set_induct0_tac ctxt (map (certify ctxt'') Ps) prems dtor_set_inducts exhausts
+              set_pre_defs ctor_defs dtor_ctors Abs_pre_inverses)
+          |> singleton (Proof_Context.export ctxt'' ctxt)
+          |> Thm.close_derivation;
+
+        val case_names_attr =
+          Attrib.internal (K (Rule_Cases.case_names (quasi_unambiguous_case_names case_names)));
+        val induct_set_attrs = map (Attrib.internal o K o Induct.induct_pred o name_of_set) sets;
+      in
+        (thm, case_names_attr :: induct_set_attrs)
+      end
+    val consumes_attr = Attrib.internal (K (Rule_Cases.consumes 1));
+  in
+    map (fn Asets =>
+      let
+        fun massage_thm thm = rotate_prems (~1) (thm RS bspec);
+      in
+        mk_thm lthy fpTs ctrss Asets |> nn = 1 ? map_prod massage_thm (cons consumes_attr)
+      end) (transpose setss)
+  end;
+
 fun derive_coinduct_corecs_thms_for_types pre_bnfs (x, cs, cpss, ((pgss, cqgsss), _))
     dtor_coinduct dtor_injects dtor_ctors dtor_corec_thms live_nesting_bnfs fpTs Cs Xs ctrXs_Tsss
     kss mss ns fp_abs_inverses abs_inverses mk_vimage2p ctr_defss (ctr_sugars : ctr_sugar list)
@@ -1072,7 +1171,7 @@
     val ((pre_bnfs, absT_infos), (fp_res as {bnfs = fp_bnfs as any_fp_bnf :: _, ctors = ctors0,
              dtors = dtors0, xtor_co_recs = xtor_co_recs0, xtor_co_induct, dtor_ctors,
              ctor_dtors, ctor_injects, dtor_injects, xtor_map_thms, xtor_set_thmss, xtor_rel_thms,
-             xtor_co_rec_thms, rel_xtor_co_induct_thm, ...},
+             xtor_co_rec_thms, rel_xtor_co_induct_thm, dtor_set_induct_thms, ...},
            lthy)) =
       fp_bnf (construct_fp mixfixes map_bs rel_bs set_bss) fp_bs (map dest_TFree unsorted_As)
         (map dest_TFree killed_As) fp_eqs no_defs_lthy0
@@ -1143,8 +1242,8 @@
 
     fun massage_simple_notes base =
       filter_out (null o #2)
-      #> map (fn (thmN, thms, attrs) =>
-        ((Binding.qualify true base (Binding.name thmN), attrs), [(thms, [])]));
+      #> map (fn (thmN, thms, f_attrs) =>
+        ((Binding.qualify true base (Binding.name thmN), []), map_index (fn (i, thm) => ([thm], f_attrs i)) thms));
 
     val massage_multi_notes =
       maps (fn (thmN, thmss, attrs) =>
@@ -1290,19 +1389,23 @@
                          else fp_map_thm RS ctor_cong RS (ctor_dtor RS sym RS trans))))
                 |> singleton (Proof_Context.export names_lthy no_defs_lthy);
 
-              fun mk_set_thm fp_set_thm ctr_def' cxIn =
+
+              fun mk_set0_thm fp_set_thm ctr_def' cxIn =
                 fold_thms lthy [ctr_def']
                   (unfold_thms lthy (pre_set_defs @ fp_nesting_set_maps @ live_nesting_set_maps @
                        (if fp = Least_FP then [] else [dtor_ctor]) @ sumprod_thms_set @
-                       abs_inverses)
+                       @{thms UN_Un sup_assoc[THEN sym]} @ abs_inverses)
                      (cterm_instantiate_pos [SOME cxIn] fp_set_thm))
                 |> singleton (Proof_Context.export names_lthy no_defs_lthy);
 
-              fun mk_set_thms fp_set_thm = map2 (mk_set_thm fp_set_thm) ctr_defs' cxIns;
+              fun mk_set0_thms fp_set_thm = map2 (mk_set0_thm fp_set_thm) ctr_defs' cxIns;
 
               val map_thms = map2 mk_map_thm ctr_defs' cxIns;
-              val set_thmss = map mk_set_thms fp_set_thms;
-              val set_thms = flat set_thmss;
+              val set0_thmss = map mk_set0_thms fp_set_thms;
+              val set0_thms = flat set0_thmss;
+              val set_thms = set0_thms
+                |> map (unfold_thms lthy @{thms insert_is_Un[THEN sym] Un_empty_left
+                  Un_insert_left});
 
               val sets = map (mk_set (snd (Term.dest_Type fpT))) (sets_of_bnf fp_bnf);
 
@@ -1334,7 +1437,7 @@
                   else
                     Goal.prove_sorry lthy [] [] (Logic.mk_conjunction_balanced goals)
                       (fn {context = ctxt, prems = _} =>
-                        mk_set_empty_tac ctxt (certify ctxt u) exhaust set_thms (flat disc_thmss))
+                        mk_set_empty_tac ctxt (certify ctxt u) exhaust set0_thms (flat disc_thmss))
                       |> Conjunction.elim_balanced (length goals)
                       |> Proof_Context.export names_lthy lthy
                       |> map Thm.close_derivation
@@ -1604,7 +1707,7 @@
                         Goal.prove_sorry lthy [] [] (Logic.mk_conjunction_balanced goals)
                           (fn {context = ctxt, prems = _} =>
                             mk_sel_set_tac ctxt (certify ctxt ta) exhaust (flat disc_thmss)
-                              (flat sel_thmss) set_thms)
+                              (flat sel_thmss) set0_thms)
                           |> Conjunction.elim_balanced (length goals)
                           |> Proof_Context.export names_lthy lthy
                     end;
@@ -1619,17 +1722,17 @@
                 |> map (fn (thms, attrs) => ((Binding.empty, attrs), [(thms, [])]));
 
               val notes =
-                [(disc_map_iffN, disc_map_iff_thms, simp_attrs),
-                 (mapN, map_thms, code_nitpicksimp_attrs @ simp_attrs),
-                 (rel_casesN, [rel_cases_thm], rel_cases_attrs),
-                 (rel_distinctN, rel_distinct_thms, simp_attrs),
-                 (rel_injectN, rel_inject_thms, simp_attrs),
-                 (rel_introsN, rel_intro_thms, []),
-                 (rel_selN, rel_sel_thms, []),
-                 (setN, set_thms, code_nitpicksimp_attrs @ simp_attrs),
-                 (sel_mapN, sel_map_thms, []),
-                 (sel_setN, sel_set_thms, []),
-                 (set_emptyN, set_empty_thms, [])]
+                [(disc_map_iffN, disc_map_iff_thms, K simp_attrs),
+                 (mapN, map_thms, K (code_nitpicksimp_attrs @ simp_attrs)),
+                 (rel_casesN, [rel_cases_thm], K rel_cases_attrs),
+                 (rel_distinctN, rel_distinct_thms, K simp_attrs),
+                 (rel_injectN, rel_inject_thms, K simp_attrs),
+                 (rel_introsN, rel_intro_thms, K []),
+                 (rel_selN, rel_sel_thms, K []),
+                 (setN, set_thms, K (code_nitpicksimp_attrs @ simp_attrs)),
+                 (sel_mapN, sel_map_thms, K []),
+                 (sel_setN, sel_set_thms, K []),
+                 (set_emptyN, set_empty_thms, K [])]
                 |> massage_simple_notes fp_b_name;
 
               val (noted, lthy') =
@@ -1637,13 +1740,13 @@
                 |> Spec_Rules.add Spec_Rules.Equational (`(single o lhs_head_of o hd) map_thms)
                 |> fp = Least_FP
                   ? Spec_Rules.add Spec_Rules.Equational (`(single o lhs_head_of o hd) rel_eq_thms)
-                |> Spec_Rules.add Spec_Rules.Equational (`(single o lhs_head_of o hd) set_thms)
+                |> Spec_Rules.add Spec_Rules.Equational (`(single o lhs_head_of o hd) set0_thms)
                 |> Local_Theory.notes (anonymous_notes @ notes);
 
               val subst = Morphism.thm (substitute_noted_thm noted);
             in
               (((map subst map_thms, map subst rel_inject_thms, map subst rel_distinct_thms,
-                 map (map subst) set_thmss), ctr_sugar), lthy')
+                 map (map subst) set0_thmss), ctr_sugar), lthy')
             end;
 
         fun mk_binding pre = Binding.qualify false fp_b_name (Binding.prefix_name (pre ^ "_") fp_b);
@@ -1700,8 +1803,8 @@
 
         val common_notes =
           (if nn > 1 then
-             [(inductN, [induct_thm], induct_attrs),
-              (rel_inductN, common_rel_induct_thms, common_rel_induct_attrs)]
+             [(inductN, [induct_thm], K induct_attrs),
+              (rel_inductN, common_rel_induct_thms, K common_rel_induct_attrs)]
            else
              [])
           |> massage_simple_notes fp_common_name;
@@ -1773,17 +1876,25 @@
                (rel_coinduct_attrs, common_rel_coinduct_attrs))
             end;
 
+        val (set_induct_thms, set_induct_attrss) =
+          derive_set_induct_thms_for_types lthy nn fpTs (map #ctrs ctr_sugars)
+            (map (map (mk_set As)) (map sets_of_bnf fp_bnfs)) dtor_set_induct_thms
+            (map #exhaust ctr_sugars) (flat pre_set_defss) (flat ctr_defss)
+            dtor_ctors abs_inverses
+          |> split_list;
+
         val simp_thmss =
           map6 mk_simp_thms ctr_sugars
             (map3 flat_corec_thms disc_corec_thmss disc_corec_iff_thmss sel_corec_thmss)
             mapss rel_injectss rel_distinctss setss;
 
         val common_notes =
+          (set_inductN, set_induct_thms, nth set_induct_attrss) ::
           (if nn > 1 then
-            [(coinductN, [coinduct_thm], common_coinduct_attrs),
-             (rel_coinductN, common_rel_coinduct_thms, common_rel_coinduct_attrs),
-             (strong_coinductN, [strong_coinduct_thm], common_coinduct_attrs)]
-          else [])
+            [(coinductN, [coinduct_thm], K common_coinduct_attrs),
+             (rel_coinductN, common_rel_coinduct_thms, K common_rel_coinduct_attrs),
+             (strong_coinductN, [strong_coinduct_thm], K common_coinduct_attrs)]
+           else [])
           |> massage_simple_notes fp_common_name;
 
         val notes =
--- a/src/HOL/Tools/BNF/bnf_fp_def_sugar_tactics.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/BNF/bnf_fp_def_sugar_tactics.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -39,6 +39,8 @@
   val mk_sel_map_tac: Proof.context -> cterm -> thm -> thm list -> thm list -> thm list -> tactic
   val mk_sel_set_tac: Proof.context -> cterm -> thm -> thm list -> thm list -> thm list -> tactic
   val mk_set_empty_tac: Proof.context -> cterm -> thm -> thm list -> thm list -> tactic
+  val mk_set_induct0_tac: Proof.context -> cterm list -> thm list -> thm list -> thm list ->
+    thm list -> thm list -> thm list -> thm list -> tactic
 end;
 
 structure BNF_FP_Def_Sugar_Tactics : BNF_FP_DEF_SUGAR_TACTICS =
@@ -55,9 +57,8 @@
 
 val sumprod_thms_map = @{thms id_apply map_prod_simp prod.case sum.case map_sum.simps};
 val sumprod_thms_set =
-  @{thms UN_empty UN_insert Un_empty_left Un_empty_right Un_iff UN_simps(10) UN_iff
-      Union_Un_distrib image_iff o_apply map_prod_simp
-      mem_Collect_eq prod_set_simps map_sum.simps sum_set_simps};
+  @{thms UN_empty UN_insert UN_simps(10) UN_iff Un_empty_left Un_empty_right Un_iff Union_Un_distrib
+      image_iff o_apply map_prod_simp mem_Collect_eq prod_set_simps map_sum.simps sum_set_simps};
 val sumprod_thms_rel = @{thms rel_sum_simps rel_prod_apply prod.inject id_apply conj_assoc};
 
 fun hhf_concl_conv cv ctxt ct =
@@ -257,7 +258,7 @@
         HEADGOAL (rtac exhaust THEN_ALL_NEW (rtac exhaust THEN_ALL_NEW
           (rtac (cterm_instantiate_pos (replicate 4 NONE @ [SOME cterm]) @{thm arg_cong2} RS iffD2)
             THEN' atac THEN' atac THEN' TRY o resolve_tac assms))) THEN
-        unfold_tac ctxt (ctor_inject :: rel_pre_list_def :: ctor_defs @ nesting_rel_eqs @
+        unfold_thms_tac ctxt (ctor_inject :: rel_pre_list_def :: ctor_defs @ nesting_rel_eqs @
           @{thms BNF_Comp.id_bnf_comp_def vimage2p_def}) THEN
         TRYALL (hyp_subst_tac ctxt) THEN
         unfold_thms_tac ctxt (Abs_inverse :: @{thms rel_sum_simps rel_prod_apply Inl_Inr_False
@@ -306,4 +307,28 @@
     SOME (thm RS eqFalseI) handle THM _ => NONE) discs) THEN
   ALLGOALS (rtac refl ORELSE' etac FalseE);
 
+fun mk_set_induct0_tac ctxt cts assms dtor_set_inducts exhausts set_pre_defs ctor_defs dtor_ctors
+    Abs_pre_inverses =
+  let
+    val assms_ctor_defs =
+      map (unfold_thms ctxt (@{thm BNF_Comp.id_bnf_comp_def} :: ctor_defs)) assms;
+    val exhausts' = map (fn thm => thm RS @{thm asm_rl[of "P x y" for P x y]}) exhausts
+      |> map2 (fn ct => cterm_instantiate_pos [NONE, SOME ct]) cts;
+  in
+    ALLGOALS (resolve_tac dtor_set_inducts) THEN
+    TRYALL (resolve_tac exhausts' THEN_ALL_NEW
+      (resolve_tac (map (fn ct => refl RS
+         cterm_instantiate_pos (replicate 4 NONE @ [SOME ct]) @{thm arg_cong2} RS iffD2) cts)
+        THEN' atac THEN' hyp_subst_tac ctxt)) THEN
+    unfold_thms_tac ctxt (Abs_pre_inverses @ dtor_ctors @ set_pre_defs @ ctor_defs @
+      @{thms BNF_Comp.id_bnf_comp_def o_apply sum_set_simps prod_set_simps UN_empty UN_insert
+        Un_empty_left Un_empty_right empty_iff singleton_iff}) THEN
+    REPEAT_DETERM (HEADGOAL
+      (TRY o etac UnE THEN' TRY o etac @{thm singletonE} THEN' TRY o hyp_subst_tac ctxt THEN'
+       REPEAT_DETERM o eresolve_tac @{thms UN_E UnE singletonE} THEN'
+       fold (curry (op ORELSE')) (map (fn thm =>
+         funpow (length (prems_of thm)) (fn tac => tac THEN' atac) (rtac thm)) assms_ctor_defs)
+         (etac FalseE)))
+  end;
+
 end;
--- a/src/HOL/Tools/BNF/bnf_fp_n2m.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/BNF/bnf_fp_n2m.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -240,29 +240,26 @@
     val co_recs = of_fp_res #xtor_co_recs;
     val ns = map (length o #Ts o #fp_res) fp_sugars;
 
-    fun substT rho (Type (@{type_name fun}, [T, U])) = substT rho T --> substT rho U
-      | substT rho (Type (s, Ts)) = Type (s, map (typ_subst_nonatomic rho) Ts)
-      | substT _ T = T;
-
     val typ_subst_nonatomic_sorted = fold_rev (typ_subst_nonatomic o single);
 
     fun foldT_of_recT recT =
       let
-        val ((FTXs, Xs), TX) = strip_fun_type recT |>> map_split dest_co_algT;
+        val ((FTXs, Ys), TX) = strip_fun_type recT |>> map_split dest_co_algT;
+        val Zs = union op = Xs Ys;
         fun subst (Type (C, Ts as [_, X])) =
-            if C = co_productC andalso member op = Xs X then X else Type (C, map subst Ts)
+            if C = co_productC andalso member op = Zs X then X else Type (C, map subst Ts)
           | subst (Type (C, Ts)) = Type (C, map subst Ts)
           | subst T = T;
       in
-        map2 (mk_co_algT o subst) FTXs Xs ---> TX
+        map2 (mk_co_algT o subst) FTXs Ys ---> TX
       end;
 
-    fun force_rec i TU TU_rec raw_rec =
+    fun force_rec i TU raw_rec =
       let
         val thy = Proof_Context.theory_of lthy;
 
         val approx_rec = raw_rec
-          |> force_typ names_lthy (replicate (nth ns i) dummyT ---> TU_rec);
+          |> force_typ names_lthy (replicate (nth ns i) dummyT ---> TU);
         val subst = Term.typ_subst_atomic fold_thetaAs;
 
         fun mk_fp_absT_repT fp_repT fp_absT = mk_absT thy fp_repT fp_absT ooo mk_repT;
@@ -299,9 +296,7 @@
         val i = find_index (fn T => x = T) Xs;
         val TUrec =
           (case find_first (fn f => body_fun_type (fastype_of f) = TU) recs of
-            NONE =>
-              force_rec i TU
-                (TU |> is_none b_opt ? substT (map2 mk_co_productT fpTs Xs ~~ Xs)) (nth co_recs i)
+            NONE => force_rec i TU (nth co_recs i)
           | SOME f => f);
 
         val TUs = binder_fun_types (fastype_of TUrec);
@@ -340,14 +335,21 @@
                     let
                       val (TY, (U, X)) = TU |> dest_co_algT ||> dest_co_productT;
                       val T = mk_co_algT TY U;
+                      fun mk_co_proj TU = build_map lthy [] (fn TU =>
+                        let
+                          val ((T1, T2), U) = TU |> co_swap |>> dest_co_productT
+                        in
+                          if T1 = U then co_proj1_const TU
+                          else mk_co_comp (mk_co_proj (co_swap (T1, U)),
+                            co_proj1_const (co_swap (mk_co_productT T1 T2, T1)))
+                        end) TU;
                     in
-                      (case try (force_typ names_lthy T o build_map lthy [] co_proj1_const o dest_funT) T of
-                        SOME f => mk_co_product f
-                          (fst (fst (mk_rec NONE recs lthy (mk_co_algT TY X))))
-                      | NONE => mk_map_co_product
-                          (build_map lthy [] co_proj1_const
-                            (dest_funT (mk_co_algT (dest_co_productT TY |> fst) U)))
-                          (HOLogic.id_const X))
+                      if not (can dest_co_productT TY)
+                      then mk_co_product (mk_co_proj (dest_funT T))
+                        (fst (fst (mk_rec NONE recs lthy (mk_co_algT TY X))))
+                      else mk_map_co_product
+                        (mk_co_proj (co_swap (dest_co_productT TY |> fst, U)))
+                        (HOLogic.id_const X)
                     end)
                 val smap_args = map mk_smap_arg smap_argTs;
               in
@@ -413,8 +415,8 @@
           map (fn thm => thm RS rewrite_comp_comp) @{thms map_prod.comp map_sum.comp} @
           @{thms id_apply comp_id id_comp map_prod.comp map_prod.id map_sum.comp map_sum.id};
         val rec_thms = fold_thms @ case_fp fp
-          @{thms fst_convol map_prod_o_convol convol_o}
-          @{thms case_sum_o_inj(1) case_sum_o_map_sum o_case_sum};
+          @{thms fst_convol map_prod_o_convol convol_o fst_comp_map_prod}
+          @{thms case_sum_o_inj(1) case_sum_o_map_sum o_case_sum map_sum_o_inj(1)};
 
         val eq_thm_prop_untyped = Term.aconv_untyped o pairself Thm.full_prop_of;
 
@@ -429,10 +431,11 @@
               rewrite_comp_comp)
           type_definitions bnfs);
 
-        fun mk_Rep_o_Abs thm = thm RS @{thm type_copy_Rep_o_Abs} RS rewrite_comp_comp;
+        fun mk_Rep_o_Abs thm = (thm RS @{thm type_copy_Rep_o_Abs})
+          |> (fn thm => [thm, thm RS rewrite_comp_comp]);
 
-        val fp_Rep_o_Abss = map mk_Rep_o_Abs fp_type_definitions;
-        val Rep_o_Abss = map mk_Rep_o_Abs type_definitions;
+        val fp_Rep_o_Abss = maps mk_Rep_o_Abs fp_type_definitions;
+        val Rep_o_Abss = maps mk_Rep_o_Abs type_definitions;
 
         fun tac {context = ctxt, prems = _} =
           unfold_thms_tac ctxt (flat [rec_thms, raw_co_rec_defs, pre_map_defs, fp_pre_map_defs,
@@ -463,7 +466,8 @@
         xtor_rel_thms = of_fp_res #xtor_rel_thms (*too general types and terms*),
         xtor_co_rec_thms = xtor_co_rec_thms,
         xtor_co_rec_o_map_thms = fp_rec_o_maps (*theorems about old constants*),
-        rel_xtor_co_induct_thm = rel_xtor_co_induct_thm}
+        rel_xtor_co_induct_thm = rel_xtor_co_induct_thm,
+        dtor_set_induct_thms = []}
        |> morph_fp_result (Morphism.term_morphism "BNF" (singleton (Variable.polymorphic lthy))));
   in
     (fp_res, lthy)
--- a/src/HOL/Tools/BNF/bnf_fp_util.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/BNF/bnf_fp_util.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -25,7 +25,8 @@
      xtor_rel_thms: thm list,
      xtor_co_rec_thms: thm list,
      xtor_co_rec_o_map_thms: thm list,
-     rel_xtor_co_induct_thm: thm}
+     rel_xtor_co_induct_thm: thm,
+     dtor_set_induct_thms: thm list}
 
   val morph_fp_result: morphism -> fp_result -> fp_result
 
@@ -237,11 +238,12 @@
    xtor_rel_thms: thm list,
    xtor_co_rec_thms: thm list,
    xtor_co_rec_o_map_thms: thm list,
-   rel_xtor_co_induct_thm: thm};
+   rel_xtor_co_induct_thm: thm,
+   dtor_set_induct_thms: thm list};
 
 fun morph_fp_result phi {Ts, bnfs, ctors, dtors, xtor_co_recs, xtor_co_induct,
     dtor_ctors, ctor_dtors, ctor_injects, dtor_injects, xtor_map_thms, xtor_set_thmss,
-    xtor_rel_thms, xtor_co_rec_thms, xtor_co_rec_o_map_thms, rel_xtor_co_induct_thm} =
+    xtor_rel_thms, xtor_co_rec_thms, xtor_co_rec_o_map_thms, rel_xtor_co_induct_thm, dtor_set_induct_thms} =
   {Ts = map (Morphism.typ phi) Ts,
    bnfs = map (morph_bnf phi) bnfs,
    ctors = map (Morphism.term phi) ctors,
@@ -257,7 +259,8 @@
    xtor_rel_thms = map (Morphism.thm phi) xtor_rel_thms,
    xtor_co_rec_thms = map (Morphism.thm phi) xtor_co_rec_thms,
    xtor_co_rec_o_map_thms = map (Morphism.thm phi) xtor_co_rec_o_map_thms,
-   rel_xtor_co_induct_thm = Morphism.thm phi rel_xtor_co_induct_thm};
+   rel_xtor_co_induct_thm = Morphism.thm phi rel_xtor_co_induct_thm,
+   dtor_set_induct_thms = map (Morphism.thm phi) dtor_set_induct_thms}; (* No idea of what this is doing... *)
 
 type fp_sugar =
   {T: typ,
--- a/src/HOL/Tools/BNF/bnf_gfp.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/BNF/bnf_gfp.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -223,7 +223,7 @@
     val rel_congs = map rel_cong_of_bnf bnfs;
     val rel_converseps = map rel_conversep_of_bnf bnfs;
     val rel_Grps = map rel_Grp_of_bnf bnfs;
-    val rel_OOs = map rel_OO_of_bnf bnfs;
+    val le_rel_OOs = map le_rel_OO_of_bnf bnfs;
     val in_rels = map in_rel_of_bnf bnfs;
 
     val timer = time (timer "Extracted terms & thms");
@@ -589,7 +589,7 @@
           HOLogic.mk_Trueprop (mk_bis Bs ss B''s s''s (map2 (curry mk_rel_comp) Rs R's));
       in
         Goal.prove_sorry lthy [] [] (Logic.list_implies (prems, concl))
-          (K (mk_bis_O_tac lthy m bis_rel_thm rel_congs rel_OOs))
+          (K (mk_bis_O_tac lthy m bis_rel_thm rel_congs le_rel_OOs))
         |> Thm.close_derivation
         |> singleton (Proof_Context.export names_lthy lthy)
       end;
@@ -1659,12 +1659,12 @@
 
     (*register new codatatypes as BNFs*)
     val (timer, Jbnfs, (dtor_Jmap_o_thms, dtor_Jmap_thms), dtor_Jset_thmss',
-      dtor_Jrel_thms, Jrel_coinduct_thm, Jbnf_notes, lthy) =
+      dtor_Jrel_thms, Jrel_coinduct_thm, Jbnf_notes, dtor_Jset_induct_thms, lthy) =
       if m = 0 then
         (timer, replicate n DEADID_bnf,
         map_split (`(mk_pointfree lthy)) (map2 mk_dtor_map_DEADID_thm dtor_inject_thms map_ids),
         replicate n [], map2 mk_dtor_Jrel_DEADID_thm dtor_inject_thms bnfs,
-        mk_Jrel_DEADID_coinduct_thm (), [], lthy)
+        mk_Jrel_DEADID_coinduct_thm (), [], [], lthy)
       else let
         val fTs = map2 (curry op -->) passiveAs passiveBs;
         val gTs = map2 (curry op -->) passiveBs passiveCs;
@@ -2260,7 +2260,7 @@
             val goal = HOLogic.mk_Trueprop (Library.foldr1 HOLogic.mk_conj goals);
           in
             Goal.prove_sorry lthy [] [] goal
-              (K (mk_le_rel_OO_tac Jrel_coinduct_thm dtor_Jrel_thms rel_OOs))
+              (K (mk_le_rel_OO_tac Jrel_coinduct_thm dtor_Jrel_thms le_rel_OOs))
             |> Thm.close_derivation
             |> singleton (Proof_Context.export names_lthy lthy)
           end;
@@ -2464,7 +2464,8 @@
             bs thmss)
       in
         (timer, Jbnfs, (Jmap_thms, dtor_Jmap_thms), dtor_Jset_thmss',
-          dtor_Jrel_thms, Jrel_coinduct_thm, Jbnf_common_notes @ Jbnf_notes, lthy)
+          dtor_Jrel_thms, Jrel_coinduct_thm, Jbnf_common_notes @ Jbnf_notes, dtor_Jset_induct_thms,
+          lthy)
       end;
 
     val dtor_unfold_o_Jmap_thms = mk_xtor_un_fold_o_map_thms Greatest_FP false m
@@ -2526,7 +2527,8 @@
        ctor_injects = ctor_inject_thms, dtor_injects = dtor_inject_thms,
        xtor_map_thms = dtor_Jmap_thms, xtor_set_thmss = dtor_Jset_thmss',
        xtor_rel_thms = dtor_Jrel_thms, xtor_co_rec_thms = dtor_corec_thms,
-       xtor_co_rec_o_map_thms = dtor_corec_o_Jmap_thms, rel_xtor_co_induct_thm = Jrel_coinduct_thm}
+       xtor_co_rec_o_map_thms = dtor_corec_o_Jmap_thms, rel_xtor_co_induct_thm = Jrel_coinduct_thm,
+       dtor_set_induct_thms = dtor_Jset_induct_thms}
       |> morph_fp_result (substitute_noted_thm noted);
   in
     timer; (fp_res, lthy')
--- a/src/HOL/Tools/BNF/bnf_gfp_tactics.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/BNF/bnf_gfp_tactics.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -277,21 +277,21 @@
         REPEAT_DETERM o etac allE,
         rtac @{thm conversepI}, etac mp, etac @{thm converseD}]) (rel_congs ~~ rel_converseps)] 1;
 
-fun mk_bis_O_tac ctxt m bis_rel rel_congs rel_OOs =
+fun mk_bis_O_tac ctxt m bis_rel rel_congs le_rel_OOs =
   EVERY' [rtac (bis_rel RS iffD2), REPEAT_DETERM o dtac (bis_rel RS iffD1),
     REPEAT_DETERM o etac conjE, rtac conjI,
     CONJ_WRAP' (K (EVERY' [etac @{thm relcomp_subset_Sigma}, atac])) rel_congs,
-    CONJ_WRAP' (fn (rel_cong, rel_OO) =>
+    CONJ_WRAP' (fn (rel_cong, le_rel_OO) =>
       EVERY' [rtac allI, rtac allI, rtac impI,
         rtac (rel_cong RS @{thm eq_refl} RS @{thm predicate2D}),
         REPEAT_DETERM_N m o rtac @{thm eq_OO},
         REPEAT_DETERM_N (length rel_congs) o rtac @{thm relcompp_in_rel},
-        rtac (rel_OO RS sym RS @{thm eq_refl} RS @{thm predicate2D}),
+        rtac (le_rel_OO RS @{thm predicate2D}),
         etac @{thm relcompE},
         REPEAT_DETERM o dtac prod_injectD,
         etac conjE, hyp_subst_tac ctxt,
         REPEAT_DETERM o etac allE, rtac @{thm relcomppI},
-        etac mp, atac, etac mp, atac]) (rel_congs ~~ rel_OOs)] 1;
+        etac mp, atac, etac mp, atac]) (rel_congs ~~ le_rel_OOs)] 1;
 
 fun mk_bis_Gr_tac ctxt bis_rel rel_Grps mor_images morEs coalg_ins =
   unfold_thms_tac ctxt (bis_rel :: @{thm eq_alt} :: @{thm in_rel_Gr} :: rel_Grps) THEN
@@ -898,14 +898,14 @@
   EVERY' (map rtac [@{thm UNION_Cinfinite_bound}, ordIso_ordLeq_trans, @{thm card_of_nat},
     @{thm natLeq_ordLeq_cinfinite}, sbd_Cinfinite, ballI, col_bd, sbd_Cinfinite]) 1;
 
-fun mk_le_rel_OO_tac coinduct rel_Jrels rel_OOs =
-  EVERY' (rtac coinduct :: map2 (fn rel_Jrel => fn rel_OO =>
+fun mk_le_rel_OO_tac coinduct rel_Jrels le_rel_OOs =
+  EVERY' (rtac coinduct :: map2 (fn rel_Jrel => fn le_rel_OO =>
     let val Jrel_imp_rel = rel_Jrel RS iffD1;
     in
-      EVERY' [rtac (rel_OO RS sym RS @{thm eq_refl} RS @{thm predicate2D}), etac @{thm relcomppE},
+      EVERY' [rtac (le_rel_OO RS @{thm predicate2D}), etac @{thm relcomppE},
       rtac @{thm relcomppI}, etac Jrel_imp_rel, etac Jrel_imp_rel]
     end)
-  rel_Jrels rel_OOs) 1;
+  rel_Jrels le_rel_OOs) 1;
 
 fun mk_wit_tac ctxt n dtor_ctors dtor_set wit coind_wits =
   ALLGOALS (TRY o (eresolve_tac coind_wits THEN' rtac refl)) THEN
@@ -973,8 +973,6 @@
                 hyp_subst_tac ctxt,
                 dtac (in_Jrel RS iffD1),
                 dtac @{thm someI_ex}, REPEAT_DETERM o etac conjE,
-                TRY o
-                  EVERY' [dtac (Thm.permute_prems 0 1 @{thm ssubst_mem}), atac, hyp_subst_tac ctxt],
                 REPEAT_DETERM o eresolve_tac [CollectE, conjE], atac])
             (rev (active_set_map0s ~~ in_Jrels))])
         (dtor_sets ~~ passive_set_map0s),
--- a/src/HOL/Tools/BNF/bnf_lfp.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/BNF/bnf_lfp.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -168,7 +168,7 @@
     val map_ids = map map_id_of_bnf bnfs;
     val set_mapss = map set_map_of_bnf bnfs;
     val rel_mono_strongs = map rel_mono_strong_of_bnf bnfs;
-    val rel_OOs = map rel_OO_of_bnf bnfs;
+    val le_rel_OOs = map le_rel_OO_of_bnf bnfs;
 
     val timer = time (timer "Extracted terms & thms");
 
@@ -1691,7 +1691,7 @@
           in
             Goal.prove_sorry lthy [] [] goal
               (fn {context = ctxt, prems = _} => mk_le_rel_OO_tac ctxt m induct ctor_nchotomy_thms
-                ctor_Irel_thms rel_mono_strongs rel_OOs)
+                ctor_Irel_thms rel_mono_strongs le_rel_OOs)
             |> Thm.close_derivation
             |> singleton (Proof_Context.export names_lthy lthy)
           end;
@@ -1814,7 +1814,8 @@
        ctor_injects = ctor_inject_thms, dtor_injects = dtor_inject_thms,
        xtor_map_thms = ctor_Imap_thms, xtor_set_thmss = ctor_Iset_thmss',
        xtor_rel_thms = ctor_Irel_thms, xtor_co_rec_thms = ctor_rec_thms,
-       xtor_co_rec_o_map_thms = ctor_rec_o_Imap_thms, rel_xtor_co_induct_thm = Irel_induct_thm}
+       xtor_co_rec_o_map_thms = ctor_rec_o_Imap_thms, rel_xtor_co_induct_thm = Irel_induct_thm,
+       dtor_set_induct_thms = []}
       |> morph_fp_result (substitute_noted_thm noted);
   in
     timer; (fp_res, lthy')
--- a/src/HOL/Tools/BNF/bnf_lfp_compat.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/BNF/bnf_lfp_compat.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -80,10 +80,27 @@
     val (orig_descr' :: nested_descrs, _) =
       Datatype_Aux.unfold_datatypes lthy orig_descr all_infos orig_descr nn_fp;
 
+    fun cliquify_descr [] = []
+      | cliquify_descr [entry] = [[entry]]
+      | cliquify_descr (full_descr as (_, (T_name1, _, _)) :: _) =
+        let
+          val nn =
+            if member (op =) fpT_names T_name1 then
+              nn_fp
+            else
+              (case Symtab.lookup all_infos T_name1 of
+                SOME {descr, ...} =>
+                length (filter_out (exists Datatype_Aux.is_rec_type o #2 o snd) descr)
+              | NONE => raise Fail "unknown old-style datatype");
+        in
+          chop nn full_descr ||> cliquify_descr |> op ::
+        end;
+
     (* put nested types before the types that nest them, as needed for N2M *)
     val descrs = burrow reindex_desc (orig_descr' :: rev nested_descrs);
     val (cliques, descr) =
-      split_list (flat (map_index (fn (i, descr) => map (pair i) descr) descrs));
+      split_list (flat (map_index (fn (i, descr) => map (pair i) descr)
+        (maps cliquify_descr descrs)));
 
     val dest_dtyp = Datatype_Aux.typ_of_dtyp descr;
 
@@ -124,10 +141,10 @@
         distincts, case_thms, case_cong, weak_case_cong, split, split_asm, ...}, ...} : fp_sugar) =
       (T_name0,
        {index = kk, descr = descr, inject = injects, distinct = distincts, induct = induct,
-       inducts = inducts, exhaust = exhaust, nchotomy = nchotomy, rec_names = recs,
-       rec_rewrites = rec_thms, case_name = fst (dest_Const casex), case_rewrites = case_thms,
-       case_cong = case_cong, weak_case_cong = weak_case_cong, split = split,
-       split_asm = split_asm});
+        inducts = inducts, exhaust = exhaust, nchotomy = nchotomy, rec_names = recs,
+        rec_rewrites = rec_thms, case_name = fst (dest_Const casex), case_rewrites = case_thms,
+        case_cong = case_cong, weak_case_cong = weak_case_cong, split = split,
+        split_asm = split_asm});
 
     val infos = map_index mk_info (take nn_fp fp_sugars);
 
--- a/src/HOL/Tools/BNF/bnf_lfp_tactics.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/BNF/bnf_lfp_tactics.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -582,17 +582,17 @@
     (induct_tac THEN' EVERY' (map3 mk_map_cong0 ctor_maps map_cong0s set_setsss)) 1
   end;
 
-fun mk_le_rel_OO_tac ctxt m induct ctor_nchotomys ctor_Irels rel_mono_strongs rel_OOs =
+fun mk_le_rel_OO_tac ctxt m induct ctor_nchotomys ctor_Irels rel_mono_strongs le_rel_OOs =
   EVERY' (rtac induct ::
-  map4 (fn nchotomy => fn Irel => fn rel_mono => fn rel_OO =>
+  map4 (fn nchotomy => fn Irel => fn rel_mono => fn le_rel_OO =>
     EVERY' [rtac impI, etac (nchotomy RS @{thm nchotomy_relcomppE}),
       REPEAT_DETERM_N 2 o dtac (Irel RS iffD1), rtac (Irel RS iffD2),
-      rtac rel_mono, rtac (rel_OO RS @{thm predicate2_eqD} RS iffD2),
+      rtac rel_mono, rtac (le_rel_OO RS @{thm predicate2D}),
       rtac @{thm relcomppI}, atac, atac,
       REPEAT_DETERM_N m o EVERY' [rtac ballI, rtac ballI, rtac impI, atac],
-      REPEAT_DETERM_N (length rel_OOs) o
+      REPEAT_DETERM_N (length le_rel_OOs) o
         EVERY' [rtac ballI, rtac ballI, Goal.assume_rule_tac ctxt]])
-  ctor_nchotomys ctor_Irels rel_mono_strongs rel_OOs) 1;
+  ctor_nchotomys ctor_Irels rel_mono_strongs le_rel_OOs) 1;
 
 (* BNF tactics *)
 
@@ -682,8 +682,6 @@
                   @{thms case_prodE iffD1[OF prod.inject, elim_format]}),
                 hyp_subst_tac ctxt,
                 dtac (in_Irel RS iffD1), dtac @{thm someI_ex}, REPEAT_DETERM o etac conjE,
-                TRY o
-                  EVERY' [dtac (Thm.permute_prems 0 1 @{thm ssubst_mem}), atac, hyp_subst_tac ctxt],
                 REPEAT_DETERM o eresolve_tac [CollectE, conjE], atac])
             (rev (active_set_map0s ~~ in_Irels))])
         (ctor_sets ~~ passive_set_map0s),
--- a/src/HOL/Tools/Ctr_Sugar/ctr_sugar.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Ctr_Sugar/ctr_sugar.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -256,12 +256,6 @@
 fun mk_disc_or_sel Ts t =
   subst_nonatomic_types (snd (Term.dest_Type (domain_type (fastype_of t))) ~~ Ts) t;
 
-fun name_of_const what t =
-  (case head_of t of
-    Const (s, _) => s
-  | Free (s, _) => s
-  | _ => error ("Cannot extract name of " ^ what));
-
 val name_of_ctr = name_of_const "constructor";
 
 fun name_of_disc t =
--- a/src/HOL/Tools/Ctr_Sugar/ctr_sugar_util.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Ctr_Sugar/ctr_sugar_util.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -39,6 +39,8 @@
     (string * sort) list * Proof.context
   val variant_tfrees: string list -> Proof.context -> typ list * Proof.context
 
+  val name_of_const: string -> term -> string
+
   val typ_subst_nonatomic: (typ * typ) list -> typ -> typ
   val subst_nonatomic_types: (typ * typ) list -> term -> term
 
@@ -177,6 +179,12 @@
   apfst (map TFree) o
     variant_types (map (ensure_prefix "'") ss) (replicate (length ss) @{sort type});
 
+fun name_of_const what t =
+  (case head_of t of
+    Const (s, _) => s
+  | Free (s, _) => s
+  | _ => error ("Cannot extract name of " ^ what));
+
 (*Replace each Ti by Ui (starting from the leaves); inst = [(T1, U1), ..., (Tn, Un)].*)
 fun typ_subst_nonatomic [] = I
   | typ_subst_nonatomic inst =
--- a/src/HOL/Tools/SMT2/smt2_real.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/SMT2/smt2_real.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -36,7 +36,7 @@
   fold (SMT2_Builtin.add_builtin_fun' SMTLIB2_Interface.smtlib2C) [
     (@{const less (real)}, "<"),
     (@{const less_eq (real)}, "<="),
-    (@{const uminus (real)}, "~"),
+    (@{const uminus (real)}, "-"),
     (@{const plus (real)}, "+"),
     (@{const minus (real)}, "-") ] #>
   SMT2_Builtin.add_builtin_fun SMTLIB2_Interface.smtlib2C
--- a/src/HOL/Tools/SMT2/smt2_systems.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/SMT2/smt2_systems.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -37,6 +37,8 @@
     val (l, ls) = split_first (snd (take_prefix (curry (op =) "") lines))
   in (test_outcome solver_name l, ls) end
 
+fun on_first_non_unsupported_line test_outcome solver_name lines =
+  on_first_line test_outcome solver_name (filter (curry (op <>) "unsupported") lines)
 
 (* CVC3 *)
 
@@ -85,6 +87,27 @@
 
 end
 
+(* veriT *)
+
+val veriT: SMT2_Solver.solver_config = {
+  name = "veriT",
+  class = K SMTLIB2_Interface.smtlib2C,
+  avail = make_avail "VERIT",
+  command = make_command "VERIT",
+  options = (fn ctxt => [
+    "--proof-version=1",
+    "--proof=-",
+    "--proof-prune",
+    "--proof-merge",
+    "--disable-print-success",
+    "--disable-banner",
+    "--max-time=" ^ string_of_int (Real.ceil (Config.get ctxt SMT2_Config.timeout))]),
+  smt_options = [],
+  default_max_relevant = 120 (* FUDGE *),
+  outcome = on_first_non_unsupported_line (outcome_of "unsat" "sat"
+    "warning : proof_done: status is still open"),
+  parse_proof = SOME VeriT_Proof_Parse.parse_proof,
+  replay = NONE }
 
 (* Z3 *)
 
@@ -160,6 +183,7 @@
 val _ = Theory.setup (
   SMT2_Solver.add_solver cvc3 #>
   SMT2_Solver.add_solver cvc4 #>
+  SMT2_Solver.add_solver veriT #>
   SMT2_Solver.add_solver z3)
 
 end;
--- a/src/HOL/Tools/SMT2/smtlib2.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/SMT2/smtlib2.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -118,7 +118,7 @@
       read l cs None ((S (rev ts1) :: ts2) :: tss)
   | read l ("#" :: "x" :: cs) None (ts :: tss) =
       token read_hex l cs ts tss
-  | read l ("#" :: cs) None (ts :: tss) =
+  | read l ("#" :: "b" :: cs) None (ts :: tss) =
       token read_bin l cs ts tss
   | read l (":" :: cs) None (ts :: tss) =
       token (read_sym Key) l cs ts tss
--- a/src/HOL/Tools/SMT2/smtlib2_interface.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/SMT2/smtlib2_interface.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -11,6 +11,7 @@
   val add_logic: int * (term list -> string option) -> Context.generic -> Context.generic
   val translate_config: Proof.context -> SMT2_Translate.config
   val assert_index_of_name: string -> int
+  val assert_prefix : string
 end;
 
 structure SMTLIB2_Interface: SMTLIB2_INTERFACE =
@@ -48,7 +49,7 @@
     (@{const If ('a)}, "ite"),
     (@{const less (int)}, "<"),
     (@{const less_eq (int)}, "<="),
-    (@{const uminus (int)}, "~"),
+    (@{const uminus (int)}, "-"),
     (@{const plus (int)}, "+"),
     (@{const minus (int)}, "-")] #>
   SMT2_Builtin.add_builtin_fun smtlib2C
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/SMT2/smtlib2_isar.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -0,0 +1,71 @@
+(*  Title:      HOL/Tools/SMT2/smtlib2_isar.ML
+    Author:     Jasmin Blanchette, TU Muenchen
+    Author:     Mathias Fleury, ENS Rennes
+
+General tools for Isar proof reconstruction.
+*)
+
+signature SMTLIB2_ISAR =
+sig
+  val unlift_term: term list -> term -> term
+  val postprocess_step_conclusion: theory -> thm list -> term list -> term -> term
+  val normalizing_prems : Proof.context -> term -> (string * string list) list
+  val distinguish_conjecture_and_hypothesis : ''a list -> ''b -> ''b -> ''b list ->
+    (''a * 'c) list -> 'c list -> 'c -> (ATP_Problem.atp_formula_role * 'c) option
+  val unskolemize_names: term -> term
+end;
+
+structure SMTLIB2_Isar: SMTLIB2_ISAR =
+struct
+
+open ATP_Util
+open ATP_Problem
+open ATP_Proof_Reconstruct
+
+fun unlift_term ll_defs =
+  let
+    val lifted = map (ATP_Util.extract_lambda_def dest_Free o ATP_Util.hol_open_form I) ll_defs
+
+    fun un_free (t as Free (s, _)) =
+       (case AList.lookup (op =) lifted s of
+         SOME t => un_term t
+       | NONE => t)
+     | un_free t = t
+    and un_term t = map_aterms un_free t
+  in un_term end
+
+(* It is not entirely clear why this should be necessary, especially for abstractions variables. *)
+val unskolemize_names =
+  Term.map_abs_vars (perhaps (try Name.dest_skolem))
+  #> Term.map_aterms (perhaps (try (fn Free (s, T) => Free (Name.dest_skolem s, T))))
+
+fun postprocess_step_conclusion thy rewrite_rules ll_defs =
+  Raw_Simplifier.rewrite_term thy rewrite_rules []
+  #> Object_Logic.atomize_term thy
+  #> not (null ll_defs) ? unlift_term ll_defs
+  #> simplify_bool
+  #> unskolemize_names
+  #> HOLogic.mk_Trueprop
+
+fun normalizing_prems ctxt concl0 =
+  SMT2_Normalize.case_bool_entry :: SMT2_Normalize.special_quant_table @
+  SMT2_Normalize.abs_min_max_table
+  |> map_filter (fn (c, th) =>
+    if exists_Const (curry (op =) c o fst) concl0 then
+      let val s = short_thm_name ctxt th in SOME (s, [s]) end
+    else
+      NONE)
+
+fun distinguish_conjecture_and_hypothesis ss id conjecture_id prem_ids fact_helper_ts hyp_ts
+    concl_t =
+  (case ss of
+    [s] => SOME (Axiom, the (AList.lookup (op =) fact_helper_ts s))
+  | _ =>
+    if id = conjecture_id then
+      SOME (Conjecture, concl_t)
+    else
+     (case find_index (curry (op =) id) prem_ids of
+       ~1 => NONE (* lambda-lifting definition *)
+     | i => SOME (Hypothesis, nth hyp_ts i)))
+
+end;
--- a/src/HOL/Tools/SMT2/smtlib2_proof.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/SMT2/smtlib2_proof.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -21,8 +21,7 @@
     ('a, 'b) context -> 'c * ('d, 'b) context
   val next_id: ('a, 'b) context -> int * ('a, 'b) context
   val with_fresh_names: (('a list, 'b) context ->
-    term * ((string * (string * typ)) list, 'b) context) -> ('c, 'b) context ->
-    (term * string list) * ('c, 'b) context
+    term * ((string * (string * typ)) list, 'b) context) -> ('c, 'b) context -> (term * string list)
 
   (*type and term parsers*)
   type type_parser = SMTLIB2.tree * typ list -> typ option
@@ -56,7 +55,7 @@
   extra: 'a}
 
 fun mk_context ctxt id syms typs funs extra: ('a, 'b) context =
-  {ctxt=ctxt, id=id, syms=syms, typs=typs, funs=funs, extra=extra}
+  {ctxt = ctxt, id = id, syms = syms, typs = typs, funs = funs, extra = extra}
 
 fun empty_context ctxt typs funs = mk_context ctxt 1 Symtab.empty typs funs []
 
@@ -82,7 +81,7 @@
 fun next_id ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) =
   (id, mk_context ctxt (id + 1) syms typs funs extra)
 
-fun with_fresh_names f ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) =
+fun with_fresh_names f ({ctxt, id, syms, typs, funs, ...}: ('a, 'b) context) =
   let
     fun bind (_, v as (_, T)) t = Logic.all_const T $ Term.absfree v t
 
@@ -93,11 +92,11 @@
       singleton (Proof_Context.standard_term_check_finish ctxt)
     fun infer ctxt t = if needs_infer t then infer_types ctxt t else t
 
-    val (t, {ctxt=ctxt', extra=names, ...}: ((string * (string * typ)) list, 'b) context) =
+    val (t, {ctxt = ctxt', extra = names, ...}: ((string * (string * typ)) list, 'b) context) =
       f (mk_context ctxt id syms typs funs [])
     val t' = infer ctxt' (fold_rev bind names (HOLogic.mk_Trueprop t))
   in
-    ((t', map fst names), mk_context ctxt id syms typs funs extra)
+    (t', map fst names)
   end
 
 fun lookup_typ ({typs, ...}: ('a, 'b) context) = Symtab.lookup typs
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/SMT2/verit_isar.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -0,0 +1,59 @@
+(*  Title:      HOL/Tools/SMT2/verit_isar.ML
+    Author:     Mathias Fleury, TU Muenchen
+    Author:     Jasmin Blanchette, TU Muenchen
+
+VeriT proofs as generic ATP proofs for Isar proof reconstruction.
+*)
+
+signature VERIT_ISAR =
+sig
+  type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step
+  val atp_proof_of_veriT_proof: Proof.context -> term list -> thm list -> term list -> term ->
+    (string * term) list -> int list -> int -> (int * string) list -> VeriT_Proof.veriT_step list ->
+    (term, string) ATP_Proof.atp_step list
+end;
+
+structure VeriT_Isar: VERIT_ISAR =
+struct
+
+open ATP_Util
+open ATP_Problem
+open ATP_Proof
+open ATP_Proof_Reconstruct
+open SMTLIB2_Isar
+open VeriT_Proof
+
+fun atp_proof_of_veriT_proof ctxt ll_defs rewrite_rules hyp_ts concl_t fact_helper_ts prem_ids
+    conjecture_id fact_helper_ids proof =
+  let
+    val thy = Proof_Context.theory_of ctxt
+    fun steps_of (VeriT_Proof.VeriT_Step {id, rule, prems, concl, ...}) =
+      let
+        val concl' = postprocess_step_conclusion thy rewrite_rules ll_defs concl
+        fun standard_step role = ((id, []), role, concl', rule, map (fn id => (id, [])) prems)
+      in
+        if rule = veriT_input_rule then
+          let val ss = the_list (AList.lookup (op =) fact_helper_ids (the (Int.fromString id))) in
+            (case distinguish_conjecture_and_hypothesis ss (the (Int.fromString id))
+                conjecture_id prem_ids fact_helper_ts hyp_ts concl_t of
+              NONE => []
+            | SOME (role0, concl00) =>
+              let
+                val name0 = (id ^ "a", ss)
+                val concl0 = unskolemize_names concl00
+              in
+                [(name0, role0, concl0, rule, []),
+                 ((id, []), Plain, concl', veriT_rewrite_rule,
+                  name0 :: normalizing_prems ctxt concl0)]
+              end)
+          end
+        else if rule = veriT_tmp_ite_elim_rule then
+          [standard_step Lemma]
+        else
+          [standard_step Plain]
+      end
+  in
+    maps steps_of proof
+  end
+
+end;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/SMT2/verit_proof.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -0,0 +1,334 @@
+(*  Title:      HOL/Tools/SMT2/verit_proof.ML
+    Author:     Mathias Fleury, ENS Rennes
+    Author:     Sascha Boehme, TU Muenchen
+
+VeriT proofs: parsing and abstract syntax tree.
+*)
+
+signature VERIT_PROOF =
+sig
+  (*proofs*)
+  datatype veriT_step = VeriT_Step of {
+    id: string,
+    rule: string,
+    prems: string list,
+    concl: term,
+    fixes: string list}
+
+  (*proof parser*)
+  val parse: typ Symtab.table -> term Symtab.table -> string list ->
+    Proof.context -> veriT_step list * Proof.context
+
+  val veriT_step_prefix : string
+  val veriT_input_rule: string
+  val veriT_la_generic_rule : string
+  val veriT_rewrite_rule : string
+  val veriT_simp_arith_rule : string
+  val veriT_tmp_ite_elim_rule : string
+  val veriT_tmp_skolemize_rule : string
+end;
+
+structure VeriT_Proof: VERIT_PROOF =
+struct
+
+open SMTLIB2_Proof
+
+datatype veriT_node = VeriT_Node of {
+  id: string,
+  rule: string,
+  prems: string list,
+  concl: term,
+  bounds: string list}
+
+fun mk_node id rule prems concl bounds =
+  VeriT_Node {id = id, rule = rule, prems = prems, concl = concl, bounds = bounds}
+
+datatype veriT_step = VeriT_Step of {
+  id: string,
+  rule: string,
+  prems: string list,
+  concl: term,
+  fixes: string list}
+
+fun mk_step id rule prems concl fixes =
+  VeriT_Step {id = id, rule = rule, prems = prems, concl = concl, fixes = fixes}
+
+val veriT_step_prefix = ".c"
+val veriT_alpha_conv_rule = "tmp_alphaconv"
+val veriT_input_rule = "input"
+val veriT_la_generic_rule = "la_generic"
+val veriT_rewrite_rule = "__rewrite" (* arbitrary *)
+val veriT_simp_arith_rule = "simp_arith"
+val veriT_tmp_ite_elim_rule = "tmp_ite_elim"
+val veriT_tmp_skolemize_rule = "tmp_skolemize"
+
+(* proof parser *)
+
+fun node_of p cx =
+  ([], cx)
+  ||>> `(with_fresh_names (term_of p))
+  |>> snd
+
+(*in order to get Z3-style quantification*)
+fun repair_quantification (SMTLIB2.S (SMTLIB2.Sym "forall" :: l)) =
+    let val (quantified_vars, t) = split_last (map repair_quantification l)
+    in
+      SMTLIB2.S (SMTLIB2.Sym "forall" :: SMTLIB2.S quantified_vars :: t :: [])
+    end
+  | repair_quantification (SMTLIB2.S (SMTLIB2.Sym "exists" :: l)) =
+    let val (quantified_vars, t) = split_last (map repair_quantification l)
+    in
+      SMTLIB2.S (SMTLIB2.Sym "exists" :: SMTLIB2.S quantified_vars :: t :: [])
+    end
+  | repair_quantification (SMTLIB2.S l) = SMTLIB2.S (map repair_quantification l)
+  | repair_quantification x = x
+
+fun replace_bound_var_by_free_var (q $ Abs (var, ty, u)) free_var =
+    (case List.find (fn v => String.isPrefix v var) free_var of
+      NONE => q $ Abs (var, ty, replace_bound_var_by_free_var u free_var)
+    | SOME _ => replace_bound_var_by_free_var (Term.subst_bound (Free (var, ty), u)) free_var)
+  | replace_bound_var_by_free_var (u $ v) free_vars = replace_bound_var_by_free_var u free_vars $
+     replace_bound_var_by_free_var v free_vars
+  | replace_bound_var_by_free_var u _ = u
+
+fun find_type_in_formula (Abs(v, ty, u)) var_name =
+    if String.isPrefix var_name v then SOME ty else find_type_in_formula u var_name
+  | find_type_in_formula (u $ v) var_name =
+    (case find_type_in_formula u var_name of
+      NONE => find_type_in_formula v var_name
+    | a => a)
+  | find_type_in_formula _ _ = NONE
+
+fun add_bound_variables_to_ctxt cx bounds concl =
+    fold (fn a => fn b => update_binding a b)
+      (map (fn s => ((s, Term (Free (s, the_default dummyT (find_type_in_formula concl s))))))
+       bounds) cx
+
+fun update_step_and_cx (st as VeriT_Node {id, rule, prems, concl, bounds}) cx =
+  if rule = veriT_tmp_ite_elim_rule then
+    (mk_node id rule prems concl bounds, add_bound_variables_to_ctxt cx bounds concl)
+  else if rule = veriT_tmp_skolemize_rule then
+    let
+      val concl' = replace_bound_var_by_free_var concl bounds
+    in
+      (mk_node id rule prems concl' [], add_bound_variables_to_ctxt cx bounds concl)
+    end
+  else
+    (st, cx)
+
+(*FIXME: using a reference would be better to know th numbers of the steps to add*)
+fun fix_subproof_steps ((((id_of_father_step, rule), prems), subproof), ((step_concl, bounds),
+    cx)) =
+  let
+    fun mk_prop_of_term concl = (fastype_of concl = @{typ "bool"} ?
+      curry (op $) @{term "Trueprop"}) concl
+    fun inline_assumption assumption assumption_id (st as VeriT_Node {id, rule, prems, concl,
+        bounds}) =
+      if List.find (curry (op =) assumption_id) prems <> NONE then
+        let
+          val prems' = filter_out (curry (op =) assumption_id) prems
+        in
+          mk_node id rule (filter_out (curry (op =) assumption_id) prems')
+            (Const (@{const_name "Pure.imp"}, @{typ "prop"} --> @{typ "prop"} --> @{typ "prop"})
+            $ mk_prop_of_term assumption $ mk_prop_of_term concl) bounds
+        end
+      else
+        st
+    fun find_input_steps_and_inline [] last_step = ([], last_step)
+      | find_input_steps_and_inline (VeriT_Node {id = id', rule, prems, concl, bounds} :: steps)
+          last_step =
+        if rule = veriT_input_rule then
+          find_input_steps_and_inline (map (inline_assumption concl id') steps) last_step
+        else
+          apfst (cons (mk_node (id_of_father_step ^ id') rule prems concl bounds))
+            (find_input_steps_and_inline steps (id_of_father_step ^ id'))
+    val (subproof', last_step_id) = find_input_steps_and_inline subproof ""
+    val prems' =
+      if last_step_id = "" then prems
+      else
+        (case prems of
+          NONE => SOME [last_step_id]
+        | SOME l => SOME (last_step_id :: l))
+  in
+    (subproof', (((((id_of_father_step, rule), prems'), step_concl), bounds), cx))
+  end
+
+(*
+(set id rule :clauses(...) :args(..) :conclusion (...)).
+or
+(set id subproof (set ...) :conclusion (...)).
+*)
+
+fun parse_proof_step cx =
+  let
+    fun rotate_pair (a, (b, c)) = ((a, b), c)
+    fun get_id (SMTLIB2.S [SMTLIB2.Sym "set", SMTLIB2.Sym id, SMTLIB2.S l]) = (id, l)
+      | get_id t = raise Fail ("unrecognized VeriT Proof" ^ PolyML.makestring t)
+    fun parse_rule (SMTLIB2.Sym rule :: l) = (rule, l)
+    fun parse_source (SMTLIB2.Key "clauses" :: SMTLIB2.S source ::l) =
+        (SOME (map (fn (SMTLIB2.Sym id) => id) source), l)
+      | parse_source l = (NONE, l)
+    fun parse_subproof cx id_of_father_step ((subproof_step as SMTLIB2.S (SMTLIB2.Sym "set" :: _)) :: l) =
+        let val (subproof_steps, cx') = parse_proof_step cx subproof_step in
+          apfst (apfst (curry (op @) subproof_steps)) (parse_subproof cx' id_of_father_step l)
+        end
+      | parse_subproof cx _ l = (([], cx), l)
+    fun skip_args (SMTLIB2.Key "args" :: SMTLIB2.S _ :: l) = l
+      | skip_args l = l
+    fun parse_conclusion (SMTLIB2.Key "conclusion" :: SMTLIB2.S concl :: []) = concl
+    fun make_or_from_clausification l =
+      foldl1 (fn ((concl1, bounds1), (concl2, bounds2)) =>
+        (HOLogic.mk_disj (perhaps (try HOLogic.dest_Trueprop) concl1,
+        perhaps (try HOLogic.dest_Trueprop) concl2), bounds1 @ bounds2)) l
+    fun to_node (((((id, rule), prems), concl), bounds), cx) = (mk_node id rule
+      (the_default [] prems) concl bounds, cx)
+  in
+    get_id
+    ##> parse_rule
+    #> rotate_pair
+    ##> parse_source
+    #> rotate_pair
+    ##> skip_args
+    #> (fn (((id, rule), prems), sub) => (((id, rule), prems), parse_subproof cx id sub))
+    #> rotate_pair
+    ##> parse_conclusion
+    ##> map repair_quantification
+    #> (fn ((((id, rule), prems), (subproof, cx)), terms) =>
+         (((((id, rule), prems), subproof), fold_map (fn t => fn cx => node_of t cx) terms cx)))
+    ##> apfst (fn [] => (@{const False}, []) | concls => make_or_from_clausification concls)
+    #> fix_subproof_steps
+    ##> to_node
+    #> (fn (subproof, (step, cx)) => (subproof @ [step], cx))
+    #-> fold_map update_step_and_cx
+  end
+
+(*subproofs are written on multiple lines: SMTLIB can not parse then, because parentheses are
+unbalanced on each line*)
+fun seperate_into_steps lines =
+  let
+    fun count ("(" :: l) n = count l (n+1)
+      | count (")" :: l) n = count l (n-1)
+      | count (_ :: l) n = count l n
+      | count [] n = n
+    fun seperate (line :: l) actual_lines m =
+        let val n = count (raw_explode line) 0 in
+          if m + n = 0 then
+            [actual_lines ^ line] :: seperate l "" 0
+          else seperate l (actual_lines ^ line) (m + n)
+        end
+      | seperate [] _ 0 = []
+  in
+    seperate lines "" 0
+  end
+
+ (* VeriT adds @ before every variable. *)
+fun remove_all_at (SMTLIB2.Sym v :: l) = SMTLIB2.Sym (perhaps (try (unprefix "@")) v) :: remove_all_at l
+  | remove_all_at (SMTLIB2.S l :: l') = SMTLIB2.S (remove_all_at l) :: remove_all_at l'
+  | remove_all_at (SMTLIB2.Key v :: l) = SMTLIB2.Key v :: remove_all_at l
+  | remove_all_at (v :: l) = v :: remove_all_at l
+  | remove_all_at [] = []
+
+fun find_in_which_step_defined var (VeriT_Node {id, bounds, ...} :: l) =
+    (case List.find (fn v => String.isPrefix v var) bounds of
+      NONE => find_in_which_step_defined var l
+    | SOME _ => id)
+  | find_in_which_step_defined var _ = raise Fail ("undefined " ^ var)
+
+(*Yes every case is possible: the introduced var is not on a special size of the equality sign.*)
+fun find_ite_var_in_term (Const ("HOL.If", _) $ _ $
+      (Const (@{const_name "HOL.eq"}, _) $ Free (var1, _) $ Free (var2, _) ) $
+      (Const (@{const_name "HOL.eq"}, _) $ Free (var3, _) $ Free (var4, _) )) =
+    let
+      fun get_number_of_ite_transformed_var var =
+        perhaps (try (unprefix "ite")) var
+        |> Int.fromString
+      fun is_equal_and_has_correct_substring var var' var'' =
+        if var = var' andalso String.isPrefix "ite" var then SOME var'
+        else if var = var'' andalso String.isPrefix "ite" var then SOME var'' else NONE
+      val var1_introduced_var = is_equal_and_has_correct_substring var1 var3 var4
+      val var2_introduced_var = is_equal_and_has_correct_substring var3 var1 var2
+    in
+      (case (var1_introduced_var, var2_introduced_var) of
+        (SOME a, SOME b) =>
+          (*ill-generated case, might be possible when applying the rule to max a a. Only if the
+          variable have been introduced before. Probably an impossible edge case*)
+          (case (get_number_of_ite_transformed_var a, get_number_of_ite_transformed_var b) of
+            (SOME a, SOME b) => if a < b then var2_introduced_var else var1_introduced_var
+            (*Otherwise, it is a name clase between a parameter name and the introduced variable.
+             Or the name convention has been changed.*)
+          | (NONE, SOME _) => var2_introduced_var
+          | (SOME _, NONE) => var2_introduced_var)
+      | (_, SOME _) => var2_introduced_var
+      | (SOME _, _) => var1_introduced_var)
+    end
+  | find_ite_var_in_term (Const (@{const_name "If"}, _) $ _ $
+      (Const (@{const_name "HOL.eq"}, _) $ Free (var, _) $ _ ) $
+      (Const (@{const_name "HOL.eq"}, _) $ Free (var', _) $ _ )) =
+    if var = var' then SOME var else NONE
+  | find_ite_var_in_term (Const (@{const_name "If"}, _) $ _ $
+      (Const (@{const_name "HOL.eq"}, _) $ _ $ Free (var, _)) $
+      (Const (@{const_name "HOL.eq"}, _) $ _ $ Free (var', _))) =
+    if var = var' then SOME var else NONE
+  | find_ite_var_in_term (p $ q) =
+    (case find_ite_var_in_term p of
+      NONE => find_ite_var_in_term q
+    | x => x)
+  | find_ite_var_in_term (Abs (_, _, body)) = find_ite_var_in_term body
+  | find_ite_var_in_term _ = NONE
+
+fun correct_veriT_step steps (st as VeriT_Node {id, rule, prems, concl, bounds}) =
+  if rule = veriT_tmp_ite_elim_rule then
+    if bounds = [] then
+      (*if the introduced var has already been defined, adding the definition as a dependency*)
+      let
+        val new_prems =
+          (case find_ite_var_in_term concl of
+            NONE => prems
+          | SOME var => find_in_which_step_defined var steps :: prems)
+      in
+        VeriT_Node {id = id, rule = rule, prems = new_prems, concl = concl, bounds = bounds}
+      end
+    else
+      (*some new variables are created*)
+      let
+        val concl' = replace_bound_var_by_free_var concl bounds
+      in
+        mk_node id rule prems concl' []
+      end
+  else
+    st
+
+fun remove_alpha_conversion _ [] = []
+  | remove_alpha_conversion replace_table (VeriT_Node {id, rule, prems, concl, bounds} :: steps) =
+    let
+      fun correct_dependency prems =
+        map (fn x => perhaps (Symtab.lookup replace_table) x) prems
+      fun find_predecessor prem = perhaps (Symtab.lookup replace_table) prem
+    in
+      if rule = veriT_alpha_conv_rule then
+        remove_alpha_conversion (Symtab.update (id, find_predecessor (hd prems))
+          replace_table) steps
+      else
+        VeriT_Node {id = id, rule = rule, prems = correct_dependency prems,
+          concl = concl, bounds = bounds} :: remove_alpha_conversion replace_table steps
+    end
+
+fun correct_veriT_steps steps =
+  steps
+  |> map (correct_veriT_step steps)
+  |> remove_alpha_conversion Symtab.empty
+
+fun parse typs funs lines ctxt =
+  let
+    val smtlib2_lines_without_at =
+      remove_all_at (map SMTLIB2.parse (seperate_into_steps lines))
+    val (u, env) = apfst flat (fold_map (fn l => fn cx => parse_proof_step cx l)
+      smtlib2_lines_without_at (empty_context ctxt typs funs))
+    val t = correct_veriT_steps u
+    fun node_to_step (VeriT_Node {id, rule, prems, concl, bounds, ...}) =
+      mk_step id rule prems concl bounds
+   in
+    (map node_to_step t, ctxt_of env)
+  end
+
+end;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/SMT2/verit_proof_parse.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -0,0 +1,104 @@
+(*  Title:      HOL/Tools/SMT2/verit_proof_parse.ML
+    Author:     Mathias Fleury, TU Muenchen
+    Author:     Jasmin Blanchette, TU Muenchen
+
+VeriT proof parsing.
+*)
+
+signature VERIT_PROOF_PARSE =
+sig
+  type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step
+  val parse_proof: Proof.context -> SMT2_Translate.replay_data ->
+    ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list ->
+    SMT2_Solver.parsed_proof
+end;
+
+structure VeriT_Proof_Parse: VERIT_PROOF_PARSE =
+struct
+
+open ATP_Util
+open ATP_Problem
+open ATP_Proof
+open ATP_Proof_Reconstruct
+open VeriT_Isar
+open VeriT_Proof
+
+fun find_and_add_missing_dependances steps assms ll_offset =
+  let
+    fun prems_to_theorem_number [] id repl = (([], []), (id, repl))
+      | prems_to_theorem_number (x :: ths) id replaced =
+        (case Int.fromString (perhaps (try (unprefix SMTLIB2_Interface.assert_prefix)) x) of
+          NONE =>
+          let
+            val ((prems, iidths), (id', replaced')) = prems_to_theorem_number ths id replaced
+          in
+            ((x :: prems, iidths), (id', replaced'))
+          end
+        | SOME th =>
+          (case Option.map snd (List.find (fst #> curry (op =) x) replaced) of
+            NONE =>
+            let
+              val id' = if th = ll_offset then 0 else id - ll_offset (* 0: for the conjecture*)
+              val ((prems, iidths), (id'', replaced')) =
+                prems_to_theorem_number ths (if th <> ll_offset then id + 1 else id)
+                  ((x, string_of_int id') :: replaced)
+            in
+              ((string_of_int id' :: prems, (th, (id', th - ll_offset)) :: iidths),
+               (id'', replaced'))
+            end
+          | SOME x =>
+            let
+              val ((prems, iidths), (id', replaced')) = prems_to_theorem_number ths id replaced
+            in ((x :: prems, iidths), (id', replaced')) end))
+    fun update_step (VeriT_Proof.VeriT_Step {prems, id = id0, rule = rule0,
+        concl = concl0, fixes = fixes0}) (id, replaced) =
+      let val ((prems', iidths), (id', replaced)) = prems_to_theorem_number prems id replaced
+      in
+        ((VeriT_Proof.VeriT_Step {id = id0, rule = rule0, prems = prems', concl = concl0,
+           fixes = fixes0}, iidths), (id', replaced))
+      end
+  in
+    fold_map update_step steps (1, [])
+    |> fst
+    |> `(map snd)
+    ||> (map fst)
+    |>> flat
+    |>> map (fn (_, (id, tm_id)) => let val (i, tm) = nth assms tm_id in (i, (id, tm)) end)
+  end
+
+fun add_missing_steps iidths =
+  let
+    fun add_single_step (_, (id, th)) = VeriT_Proof.VeriT_Step {id = string_of_int id,
+      rule = veriT_input_rule, prems = [], concl = prop_of th, fixes = []}
+  in map add_single_step iidths end
+
+fun parse_proof _
+    ({context = ctxt, typs, terms, ll_defs, rewrite_rules, assms} : SMT2_Translate.replay_data)
+    xfacts prems concl output =
+  let
+    val (steps, _) = VeriT_Proof.parse typs terms output ctxt
+    val (iidths, steps'') = find_and_add_missing_dependances steps assms (length ll_defs)
+    val steps' = add_missing_steps iidths @ steps''
+    fun id_of_index i = the_default ~1 (Option.map fst (AList.lookup (op =) iidths i))
+
+    val prems_i = 1
+    val facts_i = prems_i + length prems
+    val conjecture_i = 0
+    val ll_offset = id_of_index conjecture_i
+    val prem_ids = map id_of_index (prems_i upto facts_i - 1)
+    val helper_ids = map_filter (try (fn (~1, idth) => idth)) iidths
+
+    val fact_ids = map_filter (fn (i, (id, _)) =>
+      (try (apsnd (nth xfacts)) (id, i - facts_i))) iidths
+    val fact_helper_ts =
+      map (fn (_, th) => (ATP_Util.short_thm_name ctxt th, prop_of th)) helper_ids @
+      map (fn (_, ((s, _), th)) => (s, prop_of th)) fact_ids
+    val fact_helper_ids =
+      map (apsnd (ATP_Util.short_thm_name ctxt)) helper_ids @ map (apsnd (fst o fst)) fact_ids
+  in
+    {outcome = NONE, fact_ids = fact_ids,
+     atp_proof = fn () => atp_proof_of_veriT_proof ctxt ll_defs rewrite_rules prems concl
+       fact_helper_ts prem_ids ll_offset fact_helper_ids steps'}
+  end
+
+end;
--- a/src/HOL/Tools/SMT2/z3_new_isar.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/SMT2/z3_new_isar.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -18,6 +18,7 @@
 open ATP_Problem
 open ATP_Proof
 open ATP_Proof_Reconstruct
+open SMTLIB2_Isar
 
 val z3_apply_def_rule = Z3_New_Proof.string_of_rule Z3_New_Proof.Apply_Def
 val z3_hypothesis_rule = Z3_New_Proof.string_of_rule Z3_New_Proof.Hypothesis
@@ -62,104 +63,49 @@
           end
       end
 
-fun simplify_bool ((all as Const (@{const_name All}, _)) $ Abs (s, T, t)) =
-    let val t' = simplify_bool t in
-      if loose_bvar1 (t', 0) then all $ Abs (s, T, t') else t'
+fun dest_alls (Const (@{const_name Pure.all}, _) $ Abs (abs as (_, T, _))) =
+    let val (s', t') = Term.dest_abs abs in
+      dest_alls t' |>> cons (s', T)
     end
-  | simplify_bool (@{const Not} $ t) = s_not (simplify_bool t)
-  | simplify_bool (@{const conj} $ t $ u) = s_conj (simplify_bool t, simplify_bool u)
-  | simplify_bool (@{const disj} $ t $ u) = s_disj (simplify_bool t, simplify_bool u)
-  | simplify_bool (@{const implies} $ t $ u) = s_imp (simplify_bool t, simplify_bool u)
-  | simplify_bool (@{const HOL.eq (bool)} $ t $ u) = s_iff (simplify_bool t, simplify_bool u)
-  | simplify_bool (t as Const (@{const_name HOL.eq}, _) $ u $ v) =
-    if u aconv v then @{const True} else t
-  | simplify_bool (t $ u) = simplify_bool t $ simplify_bool u
-  | simplify_bool (Abs (s, T, t)) = Abs (s, T, simplify_bool t)
-  | simplify_bool t = t
-
-(* It is not entirely clear why this should be necessary, especially for abstractions variables. *)
-val unskolemize_names =
-  Term.map_abs_vars (perhaps (try Name.dest_skolem))
-  #> Term.map_aterms (perhaps (try (fn Free (s, T) => Free (Name.dest_skolem s, T))))
+  | dest_alls t = ([], t)
 
-fun strip_alls (Const (@{const_name All}, _) $ Abs (s, T, body)) = strip_alls body |>> cons (s, T)
-  | strip_alls t = ([], t)
-
-fun push_skolem_all_under_iff t =
-  (case strip_alls t of
-    (qs as _ :: _,
-     (t0 as Const (@{const_name HOL.eq}, _)) $ (t1 as Const (@{const_name Ex}, _) $ _) $ t2) =>
-    t0 $ HOLogic.list_all (qs, t1) $ HOLogic.list_all (qs, t2)
-  | _ => t)
-
-fun unlift_term ll_defs =
-  let
-    val lifted = map (ATP_Util.extract_lambda_def dest_Free o ATP_Util.hol_open_form I) ll_defs
-
-    fun un_free (t as Free (s, _)) =
-       (case AList.lookup (op =) lifted s of
-         SOME t => un_term t
-       | NONE => t)
-     | un_free t = t
-    and un_term t = map_aterms un_free t
-  in un_term end
+val reorder_foralls =
+  dest_alls
+  #>> sort_wrt fst
+  #-> fold_rev (Logic.all o Free);
 
 fun atp_proof_of_z3_proof ctxt ll_defs rewrite_rules hyp_ts concl_t fact_helper_ts prem_ids
     conjecture_id fact_helper_ids proof =
   let
     val thy = Proof_Context.theory_of ctxt
 
-    val unlift_term = unlift_term ll_defs
-
     fun steps_of (Z3_New_Proof.Z3_Step {id, rule, prems, concl, ...}) =
       let
         val sid = string_of_int id
 
-        val concl' =
-          concl
-          |> Raw_Simplifier.rewrite_term thy rewrite_rules []
-          |> Object_Logic.atomize_term thy
-          |> simplify_bool
-          |> not (null ll_defs) ? unlift_term
-          |> unskolemize_names
-          |> push_skolem_all_under_iff
-          |> HOLogic.mk_Trueprop
-
+        val concl' = concl
+          |> reorder_foralls (* crucial for skolemization steps *)
+          |> postprocess_step_conclusion thy rewrite_rules ll_defs
         fun standard_step role =
           ((sid, []), role, concl', Z3_New_Proof.string_of_rule rule,
            map (fn id => (string_of_int id, [])) prems)
       in
         (case rule of
           Z3_New_Proof.Asserted =>
-          let
-            val ss = the_list (AList.lookup (op =) fact_helper_ids id)
-            val name0 = (sid ^ "a", ss)
-
-            val (role0, concl0) =
-              (case ss of
-                [s] => (Axiom, the (AList.lookup (op =) fact_helper_ts s))
-              | _ =>
-                if id = conjecture_id then
-                  (Conjecture, concl_t)
-                else
-                  (Hypothesis,
-                   (case find_index (curry (op =) id) prem_ids of
-                     ~1 => concl
-                   | i => nth hyp_ts i)))
-
-            val normalize_prems =
-              SMT2_Normalize.case_bool_entry :: SMT2_Normalize.special_quant_table @
-              SMT2_Normalize.abs_min_max_table
-              |> map_filter (fn (c, th) =>
-                if exists_Const (curry (op =) c o fst) concl0 then
-                  let val s = short_thm_name ctxt th in SOME (s, [s]) end
-                else
-                  NONE)
-          in
-            (if role0 = Axiom then []
-             else [(name0, role0, concl0, Z3_New_Proof.string_of_rule rule, [])]) @
-            [((sid, []), Plain, concl', Z3_New_Proof.string_of_rule Z3_New_Proof.Rewrite,
-              name0 :: normalize_prems)]
+          let val ss = the_list (AList.lookup (op =) fact_helper_ids id) in
+            (case distinguish_conjecture_and_hypothesis ss id conjecture_id prem_ids fact_helper_ts
+                hyp_ts concl_t of
+              NONE => []
+            | SOME (role0, concl00) =>
+              let
+                val name0 = (sid ^ "a", ss)
+                val concl0 = unskolemize_names concl00
+              in
+                (if role0 = Axiom then []
+                 else [(name0, role0, concl0, Z3_New_Proof.string_of_rule rule, [])]) @
+                [((sid, []), Plain, concl', Z3_New_Proof.string_of_rule Z3_New_Proof.Rewrite,
+                  name0 :: normalizing_prems ctxt concl0)]
+              end)
           end
         | Z3_New_Proof.Rewrite => [standard_step Lemma]
         | Z3_New_Proof.Rewrite_Star => [standard_step Lemma]
--- a/src/HOL/Tools/SMT2/z3_new_proof.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/SMT2/z3_new_proof.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -7,14 +7,14 @@
 signature Z3_NEW_PROOF =
 sig
   (*proof rules*)
-  datatype z3_rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity |
-    Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro |
-    Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star |
-    Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars |
-    Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution |
-    Iff_True | Iff_False | Commutativity | Def_Axiom | Intro_Def | Apply_Def |
-    Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize |
-    Modus_Ponens_Oeq | Th_Lemma of string
+  datatype z3_rule =
+    True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | Symmetry | Transitivity |
+    Transitivity_Star | Monotonicity | Quant_Intro | Distributivity | And_Elim | Not_Or_Elim |
+    Rewrite | Rewrite_Star | Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars |
+    Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False |
+    Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star |
+    Cnf_Star | Skolemize | Modus_Ponens_Oeq | Th_Lemma of string
+
   val is_assumption: z3_rule -> bool
   val string_of_rule: z3_rule -> string
 
@@ -40,16 +40,14 @@
 
 (* proof rules *)
 
-datatype z3_rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity |
-  Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro |
-  Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star |
-  Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | Dest_Eq_Res |
-  Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False |
-  Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos |
-  Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize | Modus_Ponens_Oeq |
-  Th_Lemma of string
-  (* TODO: some proof rules come with further information
-     that is currently dropped by the parser *)
+datatype z3_rule =
+  True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | Symmetry | Transitivity |
+  Transitivity_Star | Monotonicity | Quant_Intro | Distributivity | And_Elim | Not_Or_Elim |
+  Rewrite | Rewrite_Star | Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars |
+  Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False |
+  Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star |
+  Cnf_Star | Skolemize | Modus_Ponens_Oeq | Th_Lemma of string
+  (* some proof rules include further information that is currently dropped by the parser *)
 
 val rule_names = Symtab.make [
   ("true-axiom", True_Axiom),
@@ -102,7 +100,7 @@
     SOME rule => rule
   | NONE => error ("unknown Z3 proof rule " ^ quote name))
 
-fun string_of_rule (Th_Lemma kind) = "th-lemma " ^ kind
+fun string_of_rule (Th_Lemma kind) = "th-lemma" ^ (if kind = "" then "" else " " ^ kind)
   | string_of_rule r =
       let fun eq_rule (s, r') = if r = r' then SOME s else NONE
       in the (Symtab.get_first eq_rule rule_names) end
@@ -118,7 +116,7 @@
   bounds: string list}
 
 fun mk_node id rule prems concl bounds =
-  Z3_Node {id=id, rule=rule, prems=prems, concl=concl, bounds=bounds}
+  Z3_Node {id = id, rule = rule, prems = prems, concl = concl, bounds = bounds}
 
 datatype z3_step = Z3_Step of {
   id: int,
@@ -129,8 +127,8 @@
   is_fix_step: bool}
 
 fun mk_step id rule prems concl fixes is_fix_step =
-  Z3_Step {id=id, rule=rule, prems=prems, concl=concl, fixes=fixes,
-    is_fix_step=is_fix_step}
+  Z3_Step {id = id, rule = rule, prems = prems, concl = concl, fixes = fixes,
+    is_fix_step = is_fix_step}
 
 
 (* proof parser *)
@@ -161,7 +159,7 @@
       in
         cx
         |> fold_map node_of ps
-        ||>> with_fresh_names (term_of p)
+        ||>> `(with_fresh_names (term_of p))
         ||>> next_id
         |>> (fn ((prems, (t, ns)), id) => mk_node id r prems t ns)
       end
@@ -188,10 +186,8 @@
     val ts = dest_seq (SMTLIB2.parse lines)
     val (node, cx) = parse' ts (empty_context ctxt typs funs)
   in (node, ctxt_of cx) end
-  handle SMTLIB2.PARSE (l, msg) =>
-           error ("parsing error at line " ^ string_of_int l ^ ": " ^ msg)
-       | SMTLIB2_PARSE (msg, t) =>
-           error (msg ^ ": " ^ SMTLIB2.str_of t)
+  handle SMTLIB2.PARSE (l, msg) => error ("parsing error at line " ^ string_of_int l ^ ": " ^ msg)
+       | SMTLIB2_PARSE (msg, t) => error (msg ^ ": " ^ SMTLIB2.str_of t)
 
 
 (* handling of bound variables *)
@@ -253,7 +249,7 @@
 
 fun dest_alls t = dest_all (Term.maxidx_of_term t + 1) t
 
-fun match_rule ctxt env (Z3_Node {bounds=bs', concl=t', ...}) bs t =
+fun match_rule ctxt env (Z3_Node {bounds = bs', concl = t', ...}) bs t =
   let
     val t'' = singleton (Variable.polymorphic ctxt) t'
     val (i, obj) = dest_alls (subst_types ctxt env bs t)
--- a/src/HOL/Tools/Sledgehammer/sledgehammer.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -10,7 +10,8 @@
 sig
   type fact = Sledgehammer_Fact.fact
   type fact_override = Sledgehammer_Fact.fact_override
-  type minimize_command = Sledgehammer_Proof_Methods.minimize_command
+  type proof_method = Sledgehammer_Proof_Methods.proof_method
+  type play_outcome = Sledgehammer_Proof_Methods.play_outcome
   type mode = Sledgehammer_Prover.mode
   type params = Sledgehammer_Prover.params
 
@@ -19,10 +20,11 @@
   val timeoutN : string
   val unknownN : string
 
+  val play_one_line_proof : bool -> Time.time -> (string * 'a) list -> Proof.state -> int ->
+    proof_method * proof_method list list -> (string * 'a) list * (proof_method * play_outcome)
   val string_of_factss : (string * fact list) list -> string
   val run_sledgehammer : params -> mode -> (string -> unit) option -> int -> fact_override ->
-    ((string * string list) list -> string -> minimize_command) -> Proof.state ->
-    bool * (string * Proof.state)
+    Proof.state -> bool * (string * Proof.state)
 end;
 
 structure Sledgehammer : SLEDGEHAMMER =
@@ -34,6 +36,9 @@
 open Sledgehammer_Util
 open Sledgehammer_Fact
 open Sledgehammer_Proof_Methods
+open Sledgehammer_Isar_Proof
+open Sledgehammer_Isar_Preplay
+open Sledgehammer_Isar_Minimize
 open Sledgehammer_Prover
 open Sledgehammer_Prover_ATP
 open Sledgehammer_Prover_Minimize
@@ -49,17 +54,61 @@
 fun max_outcome_code codes =
   NONE
   |> fold (fn candidate =>
-              fn accum as SOME _ => accum
-               | NONE => if member (op =) codes candidate then SOME candidate else NONE)
-       ordered_outcome_codes
+      fn accum as SOME _ => accum
+       | NONE => if member (op =) codes candidate then SOME candidate else NONE)
+    ordered_outcome_codes
   |> the_default unknownN
 
 fun prover_description verbose name num_facts =
   (quote name,
    if verbose then " with " ^ string_of_int num_facts ^ " fact" ^ plural_s num_facts else "")
 
-fun launch_prover (params as {debug, verbose, spy, blocking, max_facts, timeout, expect, ...}) mode
-    output_result minimize_command only learn
+fun is_metis_method (Metis_Method _) = true
+  | is_metis_method _ = false
+
+fun play_one_line_proof minimize timeout used_facts state i (preferred_meth, methss) =
+  if timeout = Time.zeroTime then
+    (used_facts, (preferred_meth, Play_Timed_Out Time.zeroTime))
+  else
+    let
+      val fact_names = map fst used_facts
+
+      val {context = ctxt, facts = chained, goal} = Proof.goal state
+      val (_, hyp_ts, concl_t) = strip_subgoal goal i ctxt
+      val goal_t = Logic.list_implies (map prop_of chained @ hyp_ts, concl_t)
+
+      fun try_methss ress [] =
+          (used_facts,
+           (case AList.lookup (op =) ress preferred_meth of
+             SOME play => (preferred_meth, play)
+           | NONE => hd (sort (play_outcome_ord o pairself snd) (rev ress))))
+        | try_methss ress (meths :: methss) =
+          let
+            fun mk_step fact_names meths =
+              Prove ([], [], ("", 0), goal_t, [], ([], fact_names), meths, "")
+          in
+            (case preplay_isar_step ctxt timeout [] (mk_step fact_names meths) of
+              (res as (meth, Played time)) :: _ =>
+              (* if a fact is needed by an ATP, it will be needed by "metis" *)
+              if not minimize orelse is_metis_method meth then
+                (used_facts, res)
+              else
+                let
+                  val (time', used_names') =
+                    minimized_isar_step ctxt time (mk_step fact_names [meth])
+                    ||> (facts_of_isar_step #> snd)
+                  val used_facts' = filter (member (op =) used_names' o fst) used_facts
+                in
+                  (used_facts', (meth, Played time'))
+                end
+            | ress' => try_methss (ress' @ ress) methss)
+          end
+    in
+      try_methss [] methss
+    end
+
+fun launch_prover (params as {debug, verbose, spy, blocking, max_facts, minimize, timeout,
+      preplay_timeout, expect, ...}) mode output_result only learn
     {comment, state, goal, subgoal, subgoal_count, factss as (_, facts) :: _} name =
   let
     val ctxt = Proof.context_of state
@@ -75,10 +124,9 @@
       {comment = comment, state = state, goal = goal, subgoal = subgoal,
        subgoal_count = subgoal_count,
        factss = factss
-         |> map (apsnd ((not (is_ho_atp ctxt name)
-                         ? filter_out (fn ((_, (_, Induction)), _) => true
-                                        | _ => false))
-                        #> take num_facts))}
+       |> map (apsnd ((not (is_ho_atp ctxt name)
+           ? filter_out (fn ((_, (_, Induction)), _) => true | _ => false))
+         #> take num_facts))}
 
     fun print_used_facts used_facts used_from =
       tag_list 1 used_from
@@ -115,8 +163,8 @@
             |> AList.group (op =)
             |> map (fn (indices, fact_filters) => commas fact_filters ^ ": " ^ indices)
         in
-          "Success: Found proof with " ^ string_of_int num_used_facts ^
-          " of " ^ string_of_int num_facts ^ " fact" ^ plural_s num_facts ^
+          "Success: Found proof with " ^ string_of_int num_used_facts ^ " of " ^
+          string_of_int num_facts ^ " fact" ^ plural_s num_facts ^
           (if num_used_facts = 0 then "" else ": " ^ commas filter_infos)
         end
       | spying_str_of_res {outcome = SOME failure, ...} =
@@ -124,16 +172,17 @@
 
     fun really_go () =
       problem
-      |> get_minimizing_prover ctxt mode learn name params minimize_command
-      |> verbose
-         ? tap (fn {outcome = NONE, used_facts as _ :: _, used_from, ...} =>
-                   print_used_facts used_facts used_from
-                 | _ => ())
+      |> get_minimizing_prover ctxt mode learn name params
+      |> verbose ? tap (fn {outcome = NONE, used_facts as _ :: _, used_from, ...} =>
+          print_used_facts used_facts used_from
+        | _ => ())
       |> spy ? tap (fn res => spying spy (fn () => (state, subgoal, name, spying_str_of_res res)))
-      |> (fn {outcome, preplay, message, message_tail, ...} =>
-             (if outcome = SOME ATP_Proof.TimedOut then timeoutN
-              else if is_some outcome then noneN
-              else someN, fn () => message (Lazy.force preplay) ^ message_tail))
+      |> (fn {outcome, used_facts, preferred_methss, message, ...} =>
+        (if outcome = SOME ATP_Proof.TimedOut then timeoutN
+         else if is_some outcome then noneN
+         else someN,
+         fn () => message (fn () => play_one_line_proof minimize preplay_timeout used_facts state
+           subgoal preferred_methss)))
 
     fun go () =
       let
@@ -203,7 +252,7 @@
       (if filter = "" then "" else quote filter ^ ": ") ^ string_of_facts facts) factss)
 
 fun run_sledgehammer (params as {verbose, spy, blocking, provers, max_facts, ...}) mode
-    output_result i (fact_override as {only, ...}) minimize_command state =
+    output_result i (fact_override as {only, ...}) state =
   if null provers then
     error "No prover is set."
   else
@@ -260,7 +309,7 @@
               {comment = "", state = state, goal = goal, subgoal = i, subgoal_count = n,
                factss = factss}
             val learn = mash_learn_proof ctxt params (prop_of goal) all_facts
-            val launch = launch_prover params mode output_result minimize_command only learn
+            val launch = launch_prover params mode output_result only learn
           in
             if mode = Auto_Try then
               (unknownN, state)
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_commands.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_commands.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -31,7 +31,6 @@
 val z3N = "z3"
 
 val runN = "run"
-val minN = "min"
 val messagesN = "messages"
 val supported_proversN = "supported_provers"
 val kill_allN = "kill_all"
@@ -92,17 +91,17 @@
    ("max_mono_iters", "smart"),
    ("max_new_mono_instances", "smart"),
    ("isar_proofs", "smart"),
-   ("compress", "10"),
+   ("compress", "smart"),
    ("try0", "true"),
    ("smt_proofs", "smart"),
    ("slice", "true"),
-   ("minimize", "smart"),
-   ("preplay_timeout", "2")]
+   ("minimize", "true"),
+   ("preplay_timeout", "1")]
 
 val alias_params =
   [("prover", ("provers", [])), (* undocumented *)
    ("dont_preplay", ("preplay_timeout", ["0"])),
-   ("dont_compress", ("compress", ["0"])),
+   ("dont_compress", ("compress", ["1"])),
    ("isar_proof", ("isar_proofs", [])) (* legacy *)]
 val negated_alias_params =
   [("no_debug", "debug"),
@@ -119,9 +118,6 @@
    ("dont_try0", "try0"),
    ("no_smt_proofs", "smt_proofs")]
 
-val params_not_for_minimize =
-  ["provers", "blocking", "fact_filter", "max_facts", "fact_thresholds", "slice", "minimize"];
-
 val property_dependent_params = ["provers", "timeout"]
 
 fun is_known_raw_param s =
@@ -295,11 +291,11 @@
     val max_new_mono_instances =
       lookup_option lookup_int "max_new_mono_instances"
     val isar_proofs = lookup_option lookup_bool "isar_proofs"
-    val compress = Real.max (1.0, lookup_real "compress")
+    val compress = Option.map (curry Real.max 1.0) (lookup_option lookup_real "compress")
     val try0 = lookup_bool "try0"
     val smt_proofs = lookup_option lookup_bool "smt_proofs"
     val slice = mode <> Auto_Try andalso lookup_bool "slice"
-    val minimize = if mode = Auto_Try then NONE else lookup_option lookup_bool "minimize"
+    val minimize = mode <> Auto_Try andalso lookup_bool "minimize"
     val timeout = lookup_time "timeout"
     val preplay_timeout = if mode = Auto_Try then Time.zeroTime else lookup_time "preplay_timeout"
     val expect = lookup_string "expect"
@@ -318,33 +314,6 @@
 
 (* Sledgehammer the given subgoal *)
 
-val default_minimize_prover = metisN
-
-fun is_raw_param_relevant_for_minimize (name, _) = not (member (op =) params_not_for_minimize name)
-
-fun string_of_raw_param (key, values) =
-  key ^ (case implode_param values of "" => "" | value => " = " ^ value)
-
-fun nice_string_of_raw_param (p as (key, ["false"])) =
-    (case AList.find (op =) negated_alias_params key of
-       [neg] => neg
-     | _ => string_of_raw_param p)
-  | nice_string_of_raw_param p = string_of_raw_param p
-
-fun minimize_command override_params i more_override_params prover_name fact_names =
-  let
-    val params =
-      (override_params |> filter_out (AList.defined (op =) more_override_params o fst)) @
-      more_override_params
-      |> filter is_raw_param_relevant_for_minimize
-      |> map nice_string_of_raw_param
-      |> (if prover_name = default_minimize_prover then I else cons prover_name)
-      |> space_implode ", "
-  in
-    sledgehammerN ^ " " ^ minN ^ (if params = "" then "" else enclose " [" "]" params) ^
-    " (" ^ space_implode " " fact_names ^ ")" ^ (if i = 1 then "" else " " ^ string_of_int i)
-  end
-
 val default_learn_prover_timeout = 2.0
 
 fun hammer_away override_params subcommand opt_i fact_override state =
@@ -357,20 +326,9 @@
   in
     if subcommand = runN then
       let val i = the_default 1 opt_i in
-        run_sledgehammer (get_params Normal thy override_params) Normal NONE i fact_override
-          (minimize_command override_params i) state
-        |> K ()
+        ignore (run_sledgehammer (get_params Normal thy override_params) Normal NONE i fact_override
+          state)
       end
-    else if subcommand = minN then
-      let
-        val ctxt = ctxt |> Config.put instantiate_inducts false
-        val i = the_default 1 opt_i
-        val params =
-          get_params Minimize thy (("provers", [default_minimize_prover]) :: override_params)
-        val goal = prop_of (#goal (Proof.goal state))
-        val facts = nearly_all_facts ctxt false fact_override Symtab.empty Termtab.empty [] [] goal
-        val learn = mash_learn_proof ctxt params goal facts
-      in run_minimize params learn i (#add fact_override) state end
     else if subcommand = messagesN then
       messages opt_i
     else if subcommand = supported_proversN then
@@ -391,8 +349,7 @@
                 ([("timeout", [string_of_real default_learn_prover_timeout]),
                   ("slice", ["false"])] @
                  override_params @
-                 [("minimize", ["true"]),
-                  ("preplay_timeout", ["0"])]))
+                 [("preplay_timeout", ["0"])]))
            fact_override (#facts (Proof.goal state))
            (subcommand = learn_proverN orelse subcommand = relearn_proverN))
     else if subcommand = running_learnersN then
@@ -407,7 +364,8 @@
   Toplevel.unknown_proof o
   Toplevel.keep (hammer_away params subcommand opt_i fact_override o Toplevel.proof_of)
 
-fun string_of_raw_param (name, value) = name ^ " = " ^ implode_param value
+fun string_of_raw_param (key, values) =
+  key ^ (case implode_param values of "" => "" | value => " = " ^ value)
 
 fun sledgehammer_params_trans params =
   Toplevel.theory (fold set_default_raw_param params #> tap (fn thy =>
@@ -446,8 +404,7 @@
     val mode = if auto then Auto_Try else Try
     val i = 1
   in
-    run_sledgehammer (get_params mode thy []) mode NONE i no_fact_override (minimize_command [] i)
-      state
+    run_sledgehammer (get_params mode thy []) mode NONE i no_fact_override state
   end
 
 val _ = Try.tool_setup (sledgehammerN, (40, @{system_option auto_sledgehammer}, try_sledgehammer))
@@ -456,24 +413,23 @@
   Query_Operation.register sledgehammerN (fn {state = st, args, output_result} =>
     (case try Toplevel.proof_of st of
       SOME state =>
-        let
-          val thy = Proof.theory_of state
-          val ctxt = Proof.context_of state
-          val [provers_arg, isar_proofs_arg] = args
+      let
+        val thy = Proof.theory_of state
+        val ctxt = Proof.context_of state
+        val [provers_arg, isar_proofs_arg] = args
 
-          val override_params =
-            ((if provers_arg = "" then [] else [("provers", space_explode " " provers_arg)]) @
-              [("isar_proofs", [isar_proofs_arg]),
-               ("blocking", ["true"]),
-               ("minimize", ["true"]),
-               ("debug", ["false"]),
-               ("verbose", ["false"]),
-               ("overlord", ["false"])])
-            |> map (normalize_raw_param ctxt)
-
-          val _ = run_sledgehammer (get_params Normal thy override_params) Normal
-            (SOME output_result) 1 no_fact_override (minimize_command override_params 1) state
-        in () end
+        val override_params =
+          ((if provers_arg = "" then [] else [("provers", space_explode " " provers_arg)]) @
+            [("isar_proofs", [isar_proofs_arg]),
+             ("blocking", ["true"]),
+             ("debug", ["false"]),
+             ("verbose", ["false"]),
+             ("overlord", ["false"])])
+          |> map (normalize_raw_param ctxt)
+      in
+        ignore (run_sledgehammer (get_params Normal thy override_params) Normal
+          (SOME output_result) 1 no_fact_override state)
+      end
     | NONE => error "Unknown proof context"))
 
 end;
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_fact.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_fact.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -77,8 +77,7 @@
   | explode_interval max (Facts.From i) = i upto i + max - 1
   | explode_interval _ (Facts.Single i) = [i]
 
-val backquote =
-  raw_explode #> map (fn "`" => "\\`" | s => s) #> implode #> enclose "`" "`"
+val backquote = raw_explode #> map (fn "`" => "\\`" | s => s) #> implode #> enclose "`" "`"
 
 (* unfolding these can yield really huge terms *)
 val risky_defs = @{thms Bit0_def Bit1_def}
@@ -101,9 +100,8 @@
   else Local
 
 val may_be_induction =
-  exists_subterm (fn Var (_, Type (@{type_name fun}, [_, T])) =>
-                     body_type T = @{typ bool}
-                   | _ => false)
+  exists_subterm (fn Var (_, Type (@{type_name fun}, [_, T])) => body_type T = @{typ bool}
+    | _ => false)
 
 (* TODO: get rid of *)
 fun normalize_vars t =
@@ -118,14 +116,12 @@
 
     fun norm (t $ u) = norm t ##>> norm u #>> op $
       | norm (Const (s, T)) = normT T #>> curry Const s
-      | norm (Var (z as (_, T))) =
-        normT T
+      | norm (Var (z as (_, T))) = normT T
         #> (fn (T, (accumT, (known, n))) =>
-               (case find_index (equal z) known of
-                 ~1 => (Var ((Name.uu, n), T), (accumT, (z :: known, n + 1)))
-               | j => (Var ((Name.uu, n - j - 1), T), (accumT, (known, n)))))
-      | norm (Abs (_, T, t)) =
-        norm t ##>> normT T #>> (fn (t, T) => Abs (Name.uu, T, t))
+          (case find_index (equal z) known of
+            ~1 => (Var ((Name.uu, n), T), (accumT, (z :: known, n + 1)))
+          | j => (Var ((Name.uu, n - j - 1), T), (accumT, (known, n)))))
+      | norm (Abs (_, T, t)) = norm t ##>> normT T #>> (fn (t, T) => Abs (Name.uu, T, t))
       | norm (Bound j) = pair (Bound j)
       | norm (Free (s, T)) = normT T #>> curry Free s
   in fst (norm t (([], 0), ([], 0))) end
@@ -163,50 +159,43 @@
 fun fact_of_ref ctxt reserved chained css (xthm as (xref, args)) =
   let
     val ths = Attrib.eval_thms ctxt [xthm]
-    val bracket =
-      map (enclose "[" "]" o Pretty.str_of o Args.pretty_src ctxt) args
-      |> implode
+    val bracket = implode (map (enclose "[" "]" o Pretty.str_of o Args.pretty_src ctxt) args)
 
     fun nth_name j =
       (case xref of
-        Facts.Fact s => backquote s ^ bracket
+        Facts.Fact s => backquote (simplify_spaces (unyxml s)) ^ bracket
       | Facts.Named (("", _), _) => "[" ^ bracket ^ "]"
-      | Facts.Named ((name, _), NONE) =>
-        make_name reserved (length ths > 1) (j + 1) name ^ bracket
+      | Facts.Named ((name, _), NONE) => make_name reserved (length ths > 1) (j + 1) name ^ bracket
       | Facts.Named ((name, _), SOME intervals) =>
         make_name reserved true
           (nth (maps (explode_interval (length ths)) intervals) j) name ^ bracket)
 
     fun add_nth th (j, rest) =
       let val name = nth_name j in
-        (j + 1, ((name, stature_of_thm false [] chained css name th), th)
-                :: rest)
+        (j + 1, ((name, stature_of_thm false [] chained css name th), th) :: rest)
       end
   in
     (0, []) |> fold add_nth ths |> snd
   end
 
-(* Reject theorems with names like "List.filter.filter_list_def" or
-  "Accessible_Part.acc.defs", as these are definitions arising from packages. *)
+(* Reject theorems with names like "List.filter.filter_list_def" or "Accessible_Part.acc.defs", as
+   these are definitions arising from packages. *)
 fun is_package_def s =
   let val ss = Long_Name.explode s in
     length ss > 2 andalso not (hd ss = "local") andalso
     exists (fn suf => String.isSuffix suf s)
-           ["_case_def", "_rec_def", "_size_def", "_size_overloaded_def"]
+      ["_case_def", "_rec_def", "_size_def", "_size_overloaded_def"]
   end
 
 (* FIXME: put other record thms here, or declare as "no_atp" *)
 fun multi_base_blacklist ctxt ho_atp =
-  ["defs", "select_defs", "update_defs", "split", "splits", "split_asm",
-   "ext_cases", "eq.simps", "eq.refl", "nchotomy", "case_cong",
-   "weak_case_cong", "nat_of_char_simps", "nibble.simps",
+  ["defs", "select_defs", "update_defs", "split", "splits", "split_asm", "ext_cases", "eq.simps",
+   "eq.refl", "nchotomy", "case_cong", "weak_case_cong", "nat_of_char_simps", "nibble.simps",
    "nibble.distinct"]
-  |> not (ho_atp orelse Config.get ctxt instantiate_inducts) ?
-        append ["induct", "inducts"]
+  |> not (ho_atp orelse Config.get ctxt instantiate_inducts) ? append ["induct", "inducts"]
   |> map (prefix Long_Name.separator)
 
-(* The maximum apply depth of any "metis" call in "Metis_Examples" (on
-   2007-10-31) was 11. *)
+(* The maximum apply depth of any "metis" call in "Metis_Examples" (back in 2007) was 11. *)
 val max_apply_depth = 18
 
 fun apply_depth (f $ t) = Int.max (apply_depth f, apply_depth t + 1)
@@ -217,23 +206,20 @@
 
 (* FIXME: Ad hoc list *)
 val technical_prefixes =
-  ["ATP", "Code_Evaluation", "Datatype", "Enum", "Lazy_Sequence",
-   "Limited_Sequence", "Meson", "Metis", "Nitpick",
-   "Quickcheck_Random", "Quickcheck_Exhaustive", "Quickcheck_Narrowing",
+  ["ATP", "Code_Evaluation", "Datatype", "Enum", "Lazy_Sequence", "Limited_Sequence", "Meson",
+   "Metis", "Nitpick", "Quickcheck_Random", "Quickcheck_Exhaustive", "Quickcheck_Narrowing",
    "Random_Sequence", "Sledgehammer", "SMT"]
   |> map (suffix Long_Name.separator)
 
-fun is_technical_const (s, _) =
-  exists (fn pref => String.isPrefix pref s) technical_prefixes
+fun is_technical_const s = exists (fn pref => String.isPrefix pref s) technical_prefixes
 
 (* FIXME: make more reliable *)
 val sep_class_sep = Long_Name.separator ^ "class" ^ Long_Name.separator
 
-fun is_low_level_class_const (s, _) =
+fun is_low_level_class_const s =
   s = @{const_name equal_class.equal} orelse String.isSubstring sep_class_sep s
 
 val sep_that = Long_Name.separator ^ Obtain.thatN
-
 val skolem_thesis = Name.skolem Auto_Bind.thesisN
 
 fun is_that_fact th =
@@ -259,11 +245,11 @@
       | is_interesting_subterm _ = false
 
     fun interest_of_bool t =
-      if exists_Const (is_technical_const orf is_low_level_class_const orf
-                       type_has_top_sort o snd) t then
+      if exists_Const ((is_technical_const o fst) orf (is_low_level_class_const o fst) orf
+          type_has_top_sort o snd) t then
         Deal_Breaker
       else if exists_type (exists_subtype (curry (op =) @{typ prop})) t orelse
-              not (exists_subterm is_interesting_subterm t) then
+          not (exists_subterm is_interesting_subterm t) then
         Boring
       else
         Interesting
@@ -281,8 +267,7 @@
 
     val t = prop_of th
   in
-    (interest_of_prop [] t <> Interesting andalso
-     not (Thm.eq_thm_prop (@{thm ext}, th))) orelse
+    (interest_of_prop [] t <> Interesting andalso not (Thm.eq_thm_prop (@{thm ext}, th))) orelse
     is_that_fact th
   end
 
@@ -297,19 +282,18 @@
    prefixes of all free variables. In the worse case scenario, where the fact
    won't be resolved correctly, the user can fix it manually, e.g., by giving a
    name to the offending fact. *)
-fun all_prefixes_of s =
-  map (fn i => String.extract (s, 0, SOME i)) (1 upto size s - 1)
+fun all_prefixes_of s = map (fn i => String.extract (s, 0, SOME i)) (1 upto size s - 1)
 
 fun close_form t =
   (t, [] |> Term.add_free_names t |> maps all_prefixes_of)
   |> fold (fn ((s, i), T) => fn (t', taken) =>
-              let val s' = singleton (Name.variant_list taken) s in
-                ((if fastype_of t' = HOLogic.boolT then HOLogic.all_const
-                  else Logic.all_const) T
-                 $ Abs (s', T, abstract_over (Var ((s, i), T), t')),
-                 s' :: taken)
-              end)
-          (Term.add_vars t [] |> sort_wrt (fst o fst))
+      let val s' = singleton (Name.variant_list taken) s in
+        ((if fastype_of t' = HOLogic.boolT then HOLogic.all_const
+          else Logic.all_const) T
+         $ Abs (s', T, abstract_over (Var ((s, i), T), t')),
+         s' :: taken)
+      end)
+    (Term.add_vars t [] |> sort_wrt (fst o fst))
   |> fst
 
 fun backquote_term ctxt = close_form #> hackish_string_of_term ctxt #> backquote
@@ -322,26 +306,23 @@
       Termtab.empty
     else
       let
-        fun add stature th =
-          Termtab.update (normalize_vars (prop_of th), stature)
+        fun add stature th = Termtab.update (normalize_vars (prop_of th), stature)
 
-        val {safeIs, (* safeEs, *) hazIs, (* hazEs, *) ...} =
-          ctxt |> claset_of |> Classical.rep_cs
+        val {safeIs, (* safeEs, *) hazIs, (* hazEs, *) ...} = ctxt |> claset_of |> Classical.rep_cs
         val intros = Item_Net.content safeIs @ Item_Net.content hazIs
 (* Add once it is used:
-        val elims =
-          Item_Net.content safeEs @ Item_Net.content hazEs
+        val elims = Item_Net.content safeEs @ Item_Net.content hazEs
           |> map Classical.classical_rule
 *)
-        val specs = ctxt |> Spec_Rules.get
-        val (rec_defs, nonrec_defs) =
-          specs |> filter (curry (op =) Spec_Rules.Equational o fst)
-                |> maps (snd o snd)
-                |> filter_out (member Thm.eq_thm_prop risky_defs)
-                |> List.partition (is_rec_def o prop_of)
-        val spec_intros =
-          specs |> filter (member (op =) [Spec_Rules.Inductive, Spec_Rules.Co_Inductive] o fst)
-                |> maps (snd o snd)
+        val specs = Spec_Rules.get ctxt
+        val (rec_defs, nonrec_defs) = specs
+          |> filter (curry (op =) Spec_Rules.Equational o fst)
+          |> maps (snd o snd)
+          |> filter_out (member Thm.eq_thm_prop risky_defs)
+          |> List.partition (is_rec_def o prop_of)
+        val spec_intros = specs
+          |> filter (member (op =) [Spec_Rules.Inductive, Spec_Rules.Co_Inductive] o fst)
+          |> maps (snd o snd)
       in
         Termtab.empty
         |> fold (add Simp o snd) simps
@@ -358,7 +339,7 @@
 fun normalize_eq (@{const Trueprop} $ (t as (t0 as Const (@{const_name HOL.eq}, _)) $ t1 $ t2)) =
     if Term_Ord.fast_term_ord (t1, t2) <> GREATER then t else t0 $ t2 $ t1
   | normalize_eq (@{const Trueprop} $ (t as @{const Not}
-        $ ((t0 as Const (@{const_name HOL.eq}, _)) $ t1 $ t2))) =
+      $ ((t0 as Const (@{const_name HOL.eq}, _)) $ t1 $ t2))) =
     if Term_Ord.fast_term_ord (t1, t2) <> GREATER then t else HOLogic.mk_not (t0 $ t2 $ t1)
   | normalize_eq (Const (@{const_name Pure.eq}, Type (_, [T, _])) $ t1 $ t2) =
     (if Term_Ord.fast_term_ord (t1, t2) <> GREATER then (t1, t2) else (t2, t1))
@@ -398,8 +379,7 @@
 
 fun struct_induct_rule_on th =
   (case Logic.strip_horn (prop_of th) of
-    (prems, @{const Trueprop}
-            $ ((p as Var ((p_name, 0), _)) $ (a as Var (_, ind_T)))) =>
+    (prems, @{const Trueprop} $ ((p as Var ((p_name, 0), _)) $ (a as Var (_, ind_T)))) =>
     if not (is_TVar ind_T) andalso length prems > 1 andalso
        exists (exists_subterm (curry (op aconv) p)) prems andalso
        not (exists (exists_subterm (curry (op aconv) a)) prems) then
@@ -416,13 +396,14 @@
         if (s, T) = ind_x orelse can dest_funT T then t else Var ((s, 0), T)
       | varify_noninducts t = t
 
-    val p_inst =
-      concl_prop |> map_aterms varify_noninducts |> close_form
-                 |> lambda (Free ind_x)
-                 |> hackish_string_of_term ctxt
+    val p_inst = concl_prop
+      |> map_aterms varify_noninducts
+      |> close_form
+      |> lambda (Free ind_x)
+      |> hackish_string_of_term ctxt
   in
-    ((fn () => name () ^ "[where " ^ p_name ^ " = " ^ quote p_inst ^ "]",
-      stature), th |> Rule_Insts.read_instantiate ctxt [((p_name, 0), p_inst)] [])
+    ((fn () => name () ^ "[where " ^ p_name ^ " = " ^ quote p_inst ^ "]", stature),
+     th |> Rule_Insts.read_instantiate ctxt [((p_name, 0), p_inst)] [])
   end
 
 fun type_match thy (T1, T2) =
@@ -436,7 +417,7 @@
       stmt_xs
       |> filter (fn (_, T) => type_match thy (T, ind_T))
       |> map_filter (try (TimeLimit.timeLimit instantiate_induct_timeout
-           (instantiate_induct_rule ctxt stmt p_name ax)))
+        (instantiate_induct_rule ctxt stmt p_name ax)))
     end
   | NONE => [ax])
 
@@ -451,7 +432,9 @@
         (hyp_ts |> filter_out (null o external_frees), concl_t)
         |> Logic.list_implies |> Object_Logic.atomize_term thy
       val ind_stmt_xs = external_frees ind_stmt
-    in maps (instantiate_if_induct_rule ctxt ind_stmt ind_stmt_xs) end
+    in
+      maps (instantiate_if_induct_rule ctxt ind_stmt ind_stmt_xs)
+    end
   else
     I
 
@@ -464,10 +447,8 @@
     val thy = Proof_Context.theory_of ctxt
     val global_facts = Global_Theory.facts_of thy
     val is_too_complex =
-      if generous orelse fact_count global_facts >= max_facts_for_complex_check then
-        K false
-      else
-        is_too_complex
+      if generous orelse fact_count global_facts >= max_facts_for_complex_check then K false
+      else is_too_complex
     val local_facts = Proof_Context.facts_of ctxt
     val named_locals = local_facts |> Facts.dest_static true [global_facts]
     val assms = Assumption.all_assms_of ctxt
@@ -479,8 +460,7 @@
     val unnamed_locals =
       union Thm.eq_thm_prop (Facts.props local_facts) chained
       |> filter is_good_unnamed_local |> map (pair "" o single)
-    val full_space =
-      Name_Space.merge (Facts.space_of global_facts, Facts.space_of local_facts)
+    val full_space = Name_Space.merge (Facts.space_of global_facts, Facts.space_of local_facts)
     val is_blacklisted_or_something = is_blacklisted_or_something ctxt ho_atp
 
     fun add_facts global foldx facts =
@@ -532,12 +512,12 @@
                  end)) ths (n, accum))
           end)
   in
-    (* The single-theorem names go before the multiple-theorem ones (e.g.,
-       "xxx" vs. "xxx(3)"), so that single names are preferred when both are
-       available. *)
-    `I [] |> add_facts false fold local_facts (unnamed_locals @ named_locals)
-          |> add_facts true Facts.fold_static global_facts global_facts
-          |> op @
+    (* The single-theorem names go before the multiple-theorem ones (e.g., "xxx" vs. "xxx(3)"), so
+       that single names are preferred when both are available. *)
+    `I []
+    |> add_facts false fold local_facts (unnamed_locals @ named_locals)
+    |> add_facts true Facts.fold_static global_facts global_facts
+    |> op @
   end
 
 fun nearly_all_facts ctxt ho_atp {add, del, only} reserved css chained hyp_ts concl_t =
@@ -545,13 +525,11 @@
     []
   else
     let
-      val chained =
-        chained
-        |> maps (fn th => insert Thm.eq_thm_prop (zero_var_indexes th) [th])
+      val chained = chained |> maps (fn th => insert Thm.eq_thm_prop (zero_var_indexes th) [th])
     in
       (if only then
          maps (map (fn ((name, stature), th) => ((K name, stature), th))
-               o fact_of_ref ctxt reserved chained css) add
+           o fact_of_ref ctxt reserved chained css) add
        else
          let
            val (add, del) = pairself (Attrib.eval_thms ctxt) (add, del)
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -16,7 +16,7 @@
   val trace : bool Config.T
 
   type isar_params =
-    bool * (string option * string option) * Time.time * real * bool * bool
+    bool * (string option * string option) * Time.time * real option * bool * bool
     * (term, string) atp_step list * thm
 
   val proof_text : Proof.context -> bool -> bool option -> bool option -> (unit -> isar_params) ->
@@ -46,18 +46,29 @@
 
 val trace = Attrib.setup_config_bool @{binding sledgehammer_isar_trace} (K false)
 
+val e_definition_rule = "definition"
 val e_skolemize_rule = "skolemize"
+val leo2_extcnf_forall_neg_rule = "extcnf_forall_neg"
+val satallax_skolemize_rule = "tab_ex"
 val spass_pirate_datatype_rule = "DT"
 val vampire_skolemisation_rule = "skolemisation"
-(* TODO: Use "Z3_Proof.string_of_rule" once it is moved to Isabelle *)
-val z3_skolemize_rule = "sk"
-val z3_th_lemma_rule = "th-lemma"
+val veriT_la_generic_rule = "la_generic"
+val veriT_simp_arith_rule = "simp_arith"
+val veriT_tmp_ite_elim_rule = "tmp_ite_elim"
+val veriT_tmp_skolemize_rule = "tmp_skolemize"
+val z3_skolemize_rule = Z3_New_Proof.string_of_rule Z3_New_Proof.Skolemize
+val z3_th_lemma_rule_prefix = Z3_New_Proof.string_of_rule (Z3_New_Proof.Th_Lemma "")
+val zipperposition_cnf_rule = "cnf"
 
 val skolemize_rules =
-  [e_skolemize_rule, spass_skolemize_rule, vampire_skolemisation_rule, z3_skolemize_rule]
+  [e_definition_rule, e_skolemize_rule, leo2_extcnf_forall_neg_rule, satallax_skolemize_rule,
+   spass_skolemize_rule, vampire_skolemisation_rule, veriT_tmp_ite_elim_rule,
+   veriT_tmp_skolemize_rule, z3_skolemize_rule, zipperposition_cnf_rule]
 
 val is_skolemize_rule = member (op =) skolemize_rules
-val is_arith_rule = String.isPrefix z3_th_lemma_rule
+fun is_arith_rule rule =
+  String.isPrefix z3_th_lemma_rule_prefix rule orelse rule = veriT_simp_arith_rule orelse
+  rule = veriT_la_generic_rule
 val is_datatype_rule = String.isPrefix spass_pirate_datatype_rule
 
 fun raw_label_of_num num = (num, 0)
@@ -71,59 +82,74 @@
 fun add_line_pass1 (line as (name, role, t, rule, [])) lines =
     (* No dependencies: lemma (for Z3), fact, conjecture, or (for Vampire) internal facts or
        definitions. *)
-    if role = Conjecture orelse role = Negated_Conjecture then line :: lines
-    else if t aconv @{prop True} then map (replace_dependencies_in_line (name, [])) lines
-    else if role = Lemma orelse role = Hypothesis orelse is_arith_rule rule then line :: lines
-    else if role = Axiom then lines (* axioms (facts) need no proof lines *)
-    else map (replace_dependencies_in_line (name, [])) lines
+    if role = Conjecture orelse role = Negated_Conjecture then
+      line :: lines
+    else if t aconv @{prop True} then
+      map (replace_dependencies_in_line (name, [])) lines
+    else if role = Lemma orelse role = Hypothesis orelse is_arith_rule rule then
+      line :: lines
+    else if role = Axiom then
+      lines (* axioms (facts) need no proof lines *)
+    else
+      map (replace_dependencies_in_line (name, [])) lines
   | add_line_pass1 line lines = line :: lines
 
-fun add_lines_pass2 res _ [] = rev res
-  | add_lines_pass2 res prev_t ((line as (name, role, t, rule, deps)) :: lines) =
+fun add_lines_pass2 res [] = rev res
+  | add_lines_pass2 res ((line as (name, role, t, rule, deps)) :: lines) =
     let
-      fun looks_boring () =
-        t aconv @{prop True} orelse t aconv @{prop False} orelse t aconv prev_t orelse
-        length deps < 2
+      fun normalize role =
+        role = Conjecture ? (HOLogic.dest_Trueprop #> s_not #> HOLogic.mk_Trueprop)
+
+      val norm_t = normalize role t
+      val is_duplicate =
+        exists (fn (prev_name, prev_role, prev_t, _, _) =>
+            member (op =) deps prev_name andalso
+            Term.aconv_untyped (normalize prev_role prev_t, norm_t))
+          res
+
+      fun looks_boring () = t aconv @{prop False} orelse length deps < 2
 
       fun is_skolemizing_line (_, _, _, rule', deps') =
         is_skolemize_rule rule' andalso member (op =) deps' name
+
       fun is_before_skolemize_rule () = exists is_skolemizing_line lines
     in
-      if role <> Plain orelse is_skolemize_rule rule orelse is_arith_rule rule orelse
-         is_datatype_rule rule orelse null lines orelse not (looks_boring ()) orelse
-         is_before_skolemize_rule () then
-        add_lines_pass2 (line :: res) t lines
+      if is_duplicate orelse
+          (role = Plain andalso not (is_skolemize_rule rule) andalso
+           not (is_arith_rule rule) andalso not (is_datatype_rule rule) andalso
+           not (null lines) andalso looks_boring () andalso not (is_before_skolemize_rule ())) then
+        add_lines_pass2 res (map (replace_dependencies_in_line (name, deps)) lines)
       else
-        add_lines_pass2 res t (map (replace_dependencies_in_line (name, deps)) lines)
+        add_lines_pass2 (line :: res) lines
     end
 
 type isar_params =
-  bool * (string option * string option) * Time.time * real * bool * bool
+  bool * (string option * string option) * Time.time * real option * bool * bool
   * (term, string) atp_step list * thm
 
 val basic_systematic_methods = [Metis_Method (NONE, NONE), Meson_Method, Blast_Method, SATx_Method]
-val simp_based_methods = [Auto_Method, Simp_Method, Fastforce_Method, Force_Method]
+val basic_simp_based_methods = [Auto_Method, Simp_Method, Force_Method]
 val basic_arith_methods = [Linarith_Method, Presburger_Method, Algebra_Method]
 
-val arith_methods = basic_arith_methods @ simp_based_methods @ basic_systematic_methods
+val arith_methods = basic_arith_methods @ basic_simp_based_methods @ basic_systematic_methods
 val datatype_methods = [Simp_Method, Simp_Size_Method]
-val systematic_methods0 = basic_systematic_methods @ basic_arith_methods @ simp_based_methods @
-  [Metis_Method (SOME no_typesN, NONE)]
-val rewrite_methods = simp_based_methods @ basic_systematic_methods @ basic_arith_methods
-val skolem_methods = basic_systematic_methods
+val systematic_methods =
+  basic_systematic_methods @ basic_arith_methods @ basic_simp_based_methods @
+  [Metis_Method (SOME full_typesN, NONE), Metis_Method (SOME no_typesN, NONE)]
+val rewrite_methods = basic_simp_based_methods @ basic_systematic_methods @ basic_arith_methods
+val skolem_methods = basic_systematic_methods @ [Auto_Choice_Method]
 
 fun isar_proof_text ctxt debug isar_proofs smt_proofs isar_params
-    (one_line_params as ((_, one_line_play), banner, used_facts, minimize_command, subgoal,
-       subgoal_count)) =
+    (one_line_params as ((used_facts, (_, one_line_play)), banner, subgoal, subgoal_count)) =
   let
     val _ = if debug then Output.urgent_message "Constructing Isar proof..." else ()
 
     fun generate_proof_text () =
       let
-        val (verbose, alt_metis_args, preplay_timeout, compress, try0, minimize, atp_proof, goal) =
+        val (verbose, alt_metis_args, preplay_timeout, compress, try0, minimize, atp_proof0, goal) =
           isar_params ()
 
-        val systematic_methods = insert (op =) (Metis_Method alt_metis_args) systematic_methods0
+        val systematic_methods' = insert (op =) (Metis_Method alt_metis_args) systematic_methods
 
         fun massage_methods (meths as meth :: _) =
           if not try0 then [meth]
@@ -135,7 +161,10 @@
         val ctxt = ctxt |> Variable.set_body false |> Proof_Context.add_fixes fixes |> snd
 
         val do_preplay = preplay_timeout <> Time.zeroTime
-        val compress = if isar_proofs = NONE andalso do_preplay then 1000.0 else compress
+        val compress =
+          (case compress of
+            NONE => if isar_proofs = NONE andalso do_preplay then 1000.0 else 10.0
+          | SOME n => n)
 
         fun is_fixed ctxt = Variable.is_declared ctxt orf Name.is_skolem
         fun skolems_of ctxt t = Term.add_frees t [] |> filter_out (is_fixed ctxt o fst) |> rev
@@ -144,9 +173,8 @@
           if keep_role role then SOME ((raw_label_of_num num, t), rule) else NONE
 
         val atp_proof =
-          atp_proof
-          |> rpair [] |-> fold_rev add_line_pass1
-          |> add_lines_pass2 [] Term.dummy
+          fold_rev add_line_pass1 atp_proof0 []
+          |> add_lines_pass2 []
 
         val conjs =
           map_filter (fn (name, role, _, _, _) =>
@@ -169,7 +197,7 @@
         val (lems, _) =
           fold_map add_lemma (map_filter (get_role (curry (op =) Lemma)) atp_proof) ctxt
 
-        val bot = atp_proof |> List.last |> #1
+        val bot = #1 (List.last atp_proof)
 
         val refute_graph =
           atp_proof
@@ -196,18 +224,20 @@
 
         val rule_of_clause_id = fst o the o Symtab.lookup steps o fst
 
-        fun prop_of_clause [(num, _)] = Symtab.lookup steps num |> the |> snd |> close_form
+        val finish_off = close_form #> rename_bound_vars
+
+        fun prop_of_clause [(num, _)] = Symtab.lookup steps num |> the |> snd |> finish_off
           | prop_of_clause names =
             let
-              val lits = map (HOLogic.dest_Trueprop o snd)
-                (map_filter (Symtab.lookup steps o fst) names)
+              val lits =
+                map (HOLogic.dest_Trueprop o snd) (map_filter (Symtab.lookup steps o fst) names)
             in
               (case List.partition (can HOLogic.dest_not) lits of
                 (negs as _ :: _, pos as _ :: _) =>
                 s_imp (Library.foldr1 s_conj (map HOLogic.dest_not negs), Library.foldr1 s_disj pos)
               | _ => fold (curry s_disj) lits @{term False})
             end
-            |> HOLogic.mk_Trueprop |> close_form
+            |> HOLogic.mk_Trueprop |> finish_off
 
         fun maybe_show outer c = (outer andalso eq_set (op =) (c, conjs)) ? cons Show
 
@@ -215,7 +245,7 @@
             accum
             |> (if tainted = [] then
                   cons (Prove (if outer then [Show] else [], [], no_label, concl_t, [],
-                    (the_list predecessor, []), massage_methods systematic_methods, ""))
+                    sort_facts (the_list predecessor, []), massage_methods systematic_methods', ""))
                 else
                   I)
             |> rev
@@ -226,12 +256,14 @@
               val rule = rule_of_clause_id id
               val skolem = is_skolemize_rule rule
 
-              val deps = fold add_fact_of_dependencies gamma ([], [])
+              val deps = ([], [])
+                |> fold add_fact_of_dependencies gamma
+                |> sort_facts
               val meths =
                 (if skolem then skolem_methods
                  else if is_arith_rule rule then arith_methods
                  else if is_datatype_rule rule then datatype_methods
-                 else systematic_methods)
+                 else systematic_methods')
                 |> massage_methods
 
               fun prove sub facts = Prove (maybe_show outer c [], [], l, t, sub, facts, meths, "")
@@ -241,16 +273,23 @@
                 (case gamma of
                   [g] =>
                   if skolem andalso is_clause_tainted g then
-                    let val subproof = Proof (skolems_of ctxt (prop_of_clause g), [], rev accum) in
-                      isar_steps outer (SOME l) [prove [subproof] ([], [])] infs
-                    end
+                    (case skolems_of ctxt (prop_of_clause g) of
+                      [] => steps_of_rest (prove [] deps)
+                    | skos =>
+                      let val subproof = Proof (skos, [], rev accum) in
+                        isar_steps outer (SOME l) [prove [subproof] ([], [])] infs
+                      end)
                   else
                     steps_of_rest (prove [] deps)
                 | _ => steps_of_rest (prove [] deps))
               else
                 steps_of_rest
-                  (if skolem then Prove ([], skolems_of ctxt t, l, t, [], deps, meths, "")
-                   else prove [] deps)
+                  (if skolem then
+                     (case skolems_of ctxt t of
+                       [] => prove [] deps
+                     | skos => Prove ([], skos, l, t, [], deps, meths, ""))
+                   else
+                     prove [] deps)
             end
           | isar_steps outer predecessor accum (Cases cases :: infs) =
             let
@@ -262,7 +301,7 @@
               val step =
                 Prove (maybe_show outer c [], [], l, t,
                   map isar_case (filter_out (null o snd) cases),
-                  (the_list predecessor, []), massage_methods systematic_methods, "")
+                  sort_facts (the_list predecessor, []), massage_methods systematic_methods', "")
             in
               isar_steps outer (SOME l) (step :: accum) infs
             end
@@ -318,18 +357,12 @@
                (keep_fastest_method_of_isar_step (!preplay_data)
                 #> minimize ? minimize_isar_step_dependencies ctxt preplay_data)
           |> tap (trace_isar_proof "Minimized")
-          (* It's not clear whether this is worth the trouble (and if so, "compress" has an
-             unnatural semantics): *)
-(*
-          |> minimize
-               ? (compress_isar_proof ctxt compress preplay_timeout preplay_data
-                  #> tap (trace_isar_proof "Compressed again"))
-*)
           |> `(preplay_outcome_of_isar_proof (!preplay_data))
           ||> (comment_isar_proof comment_of
                #> chain_isar_proof
                #> kill_useless_labels_in_isar_proof
-               #> relabel_isar_proof_nicely)
+               #> relabel_isar_proof_nicely
+               #> rationalize_obtains_in_isar_proofs ctxt)
       in
         (case add_isar_steps (steps_of_isar_proof isar_proof) 0 of
           1 =>
@@ -338,12 +371,11 @@
                (case isar_proof of
                  Proof (_, _, [Prove (_, _, _, _, _, (_, gfs), meth :: _, _)]) =>
                  let val used_facts' = filter (member (op =) gfs o fst) used_facts in
-                   ((meth, play_outcome), banner, used_facts', minimize_command, subgoal,
-                    subgoal_count)
+                   ((used_facts', (meth, play_outcome)), banner, subgoal, subgoal_count)
                  end)
              else
                one_line_params) ^
-          (if isar_proofs = SOME true then "\n(No structured proof available -- proof too simple.)"
+          (if isar_proofs = SOME true then "\n(No Isar proof available.)"
            else "")
         | num_steps =>
           let
@@ -352,7 +384,7 @@
               (if do_preplay then [string_of_play_outcome play_outcome] else [])
           in
             one_line_proof_text ctxt 0 one_line_params ^
-            "\n\nStructured proof" ^ (commas msg |> not (null msg) ? enclose " (" ")") ^ ":\n" ^
+            "\n\nIsar proof" ^ (commas msg |> not (null msg) ? enclose " (" ")") ^ ":\n" ^
             Active.sendback_markup [Markup.padding_command]
               (string_of_isar_proof ctxt subgoal subgoal_count isar_proof)
           end)
@@ -375,7 +407,7 @@
   | Play_Failed => true)
 
 fun proof_text ctxt debug isar_proofs smt_proofs isar_params num_chained
-    (one_line_params as (preplay, _, _, _, _, _)) =
+    (one_line_params as ((_, preplay), _, _, _)) =
   (if isar_proofs = SOME true orelse
       (isar_proofs = NONE andalso isar_proof_would_be_a_good_idea smt_proofs preplay) then
      isar_proof_text ctxt debug isar_proofs smt_proofs isar_params
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_isar_compress.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_isar_compress.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -53,10 +53,10 @@
            updates as Prove (qs', xs', l', t', subproofs', facts', meths', comment') :: updates') =
         (if l = l' then
            update_subproofs subproofs' updates'
-           |>> (fn subproofs' => Prove (qs', xs', l', t', subproofs', facts', meths', comment'))
+           |>> (fn subproofs'' => Prove (qs', xs', l', t', subproofs'', facts', meths', comment'))
          else
            update_subproofs subproofs updates
-           |>> (fn subproofs => Prove (qs, xs, l, t, subproofs, facts, meths, comment)))
+           |>> (fn subproofs' => Prove (qs, xs, l, t, subproofs', facts, meths, comment)))
         |>> (fn step => step :: steps)
       | update_step step (steps, updates) = (step :: steps, updates)
     and update_subproofs [] updates = ([], updates)
@@ -64,9 +64,9 @@
       | update_subproofs (proof :: subproofs) updates =
         update_proof proof (update_subproofs subproofs updates)
     and update_proof proof (proofs, []) = (proof :: proofs, [])
-      | update_proof (Proof (fix, assms, steps)) (proofs, updates) =
-        let val (steps, updates) = update_steps steps updates in
-          (Proof (fix, assms, steps) :: proofs, updates)
+      | update_proof (Proof (xs, assms, steps)) (proofs, updates) =
+        let val (steps', updates') = update_steps steps updates in
+          (Proof (xs, assms, steps') :: proofs, updates')
         end
   in
     fst (update_steps steps (rev updates))
@@ -87,18 +87,19 @@
     (hopeful @ hopeless, hopeless)
   end
 
-fun merge_steps preplay_data (Prove ([], fix1, l1, _, subproofs1, (lfs1, gfs1), meths1, comment1))
-      (Prove (qs2, fix2, l2, t, subproofs2, (lfs2, gfs2), meths2, comment2)) =
+fun merge_steps preplay_data (Prove ([], xs1, l1, _, subproofs1, (lfs1, gfs1), meths1, comment1))
+      (Prove (qs2, xs2, l2, t, subproofs2, (lfs2, gfs2), meths2, comment2)) =
   let
     val (meths, hopeless) = merge_methods preplay_data (l1, meths1) (l2, meths2)
     val lfs = union (op =) lfs1 (remove (op =) l1 lfs2)
     val gfs = union (op =) gfs1 gfs2
   in
-    (Prove (qs2, if member (op =) qs2 Show then [] else union (op =) fix1 fix2, l2, t,
-       subproofs1 @ subproofs2, (lfs, gfs), meths, comment1 ^ comment2), hopeless)
+    (Prove (qs2, inter (op =) (Term.add_frees t []) (xs1 @ xs2), l2, t,
+       subproofs1 @ subproofs2, sort_facts (lfs, gfs), meths, comment1 ^ comment2),
+     hopeless)
   end
 
-val merge_slack_time = seconds 0.005
+val merge_slack_time = seconds 0.01
 val merge_slack_factor = 1.5
 
 fun adjust_merge_timeout max time =
@@ -117,7 +118,7 @@
       val (compress_further, decrement_step_count) =
         let
           val number_of_steps = add_isar_steps (steps_of_isar_proof proof) 0
-          val target_number_of_steps = Real.round (Real.fromInt number_of_steps / compress)
+          val target_number_of_steps = Real.ceil (Real.fromInt number_of_steps / compress)
           val delta = Unsynchronized.ref (number_of_steps - target_number_of_steps)
         in
           (fn () => !delta > 0, fn () => delta := !delta - 1)
@@ -153,10 +154,10 @@
         | _ => preplay_timeout)
 
       (* elimination of trivial, one-step subproofs *)
-      fun elim_one_subproof time (step as Prove (qs, fix, l, t, _, (lfs, gfs), meths, comment)) subs
+      fun elim_one_subproof time (step as Prove (qs, xs, l, t, _, (lfs, gfs), meths, comment)) subs
           nontriv_subs =
         if null subs orelse not (compress_further ()) then
-          Prove (qs, fix, l, t, List.revAppend (nontriv_subs, subs), (lfs, gfs), meths, comment)
+          Prove (qs, xs, l, t, List.revAppend (nontriv_subs, subs), (lfs, gfs), meths, comment)
         else
           (case subs of
             (sub as Proof (_, assms, [Prove (_, _, l', _, [], (lfs', gfs'), meths', _)])) :: subs =>
@@ -167,14 +168,14 @@
               val gfs'' = union (op =) gfs' gfs
               val (meths'' as _ :: _, hopeless) =
                 merge_methods (!preplay_data) (l', meths') (l, meths)
-              val step'' = Prove (qs, fix, l, t, subs'', (lfs'', gfs''), meths'', comment)
+              val step'' = Prove (qs, xs, l, t, subs'', (lfs'', gfs''), meths'', comment)
 
               (* check if the modified step can be preplayed fast enough *)
               val timeout = adjust_merge_timeout preplay_timeout (Time.+ (time, reference_time l'))
             in
               (case preplay_isar_step ctxt timeout hopeless step'' of
                 meths_outcomes as (_, Played time'') :: _ =>
-                (* l' successfully eliminated *)
+                (* "l'" successfully eliminated *)
                 (decrement_step_count ();
                  set_preplay_outcomes_of_isar_step ctxt time'' preplay_data step'' meths_outcomes;
                  map (replace_successor l' [l]) lfs';
@@ -192,8 +193,9 @@
 
       fun compress_top_level steps =
         let
-          fun cand_key (l, t_size) = (length (get_successors l), t_size)
-          val cand_ord = prod_ord int_ord (int_ord o swap) o pairself cand_key
+          val cand_key = apfst (length o get_successors)
+          val cand_ord =
+            prod_ord int_ord (prod_ord (int_ord o swap) (int_ord o swap)) o pairself cand_key
 
           fun pop_next_candidate [] = (NONE, [])
             | pop_next_candidate (cands as (cand :: cands')) =
@@ -215,13 +217,12 @@
                   val time_slice = time_mult (1.0 / Real.fromInt (length labels)) (reference_time l)
                   val timeouts =
                     map (adjust_merge_timeout preplay_timeout o curry Time.+ time_slice) times0
-                  val meths_outcomess =
-                    map3 (preplay_isar_step ctxt) timeouts hopelesses succs'
+                  val meths_outcomess = map3 (preplay_isar_step ctxt) timeouts hopelesses succs'
                 in
                   (case try (map (fn (_, Played time) :: _ => time)) meths_outcomess of
                     NONE => steps
                   | SOME times =>
-                    (* candidate successfully eliminated *)
+                    (* "l" successfully eliminated *)
                     (decrement_step_count ();
                      map3 (fn time => set_preplay_outcomes_of_isar_step ctxt time preplay_data)
                        times succs' meths_outcomess;
@@ -236,12 +237,17 @@
             else
               (case pop_next_candidate candidates of
                 (NONE, _) => steps (* no more candidates for elimination *)
-              | (SOME (l, _), candidates') =>
+              | (SOME (l, (num_xs, _)), candidates') =>
                 (case find_index (curry (op =) (SOME l) o label_of_isar_step) steps of
                   ~1 => steps
                 | i =>
-                  let val successors = get_successors l in
-                    if length successors > compress_degree then
+                  let
+                    val successors = get_successors l
+                    val num_successors = length successors
+                  in
+                    (* Careful with "obtain", so we don't "obtain" twice the same variable after a
+                       merge. *)
+                    if num_successors > (if num_xs > 0 then 1 else compress_degree) then
                       steps
                     else
                       steps
@@ -249,7 +255,7 @@
                       |> compression_loop candidates'
                   end))
 
-          fun add_cand (Prove (_, _, l, t, _, _, _, _)) = cons (l, size_of_term t)
+          fun add_cand (Prove (_, xs, l, t, _, _, _, _)) = cons (l, (length xs, size_of_term t))
             | add_cand _ = I
 
           (* the very last step is not a candidate *)
@@ -264,12 +270,13 @@
          can be eliminated. In the best case, this once again leads to a proof whose proof steps do
          not have subproofs. Applying this approach recursively will result in a flat proof in the
          best cast. *)
-      fun compress_proof (proof as (Proof (fix, assms, steps))) =
-        if compress_further () then Proof (fix, assms, compress_steps steps) else proof
+      fun compress_proof (proof as (Proof (xs, assms, steps))) =
+        if compress_further () then Proof (xs, assms, compress_steps steps) else proof
       and compress_steps steps =
         (* bottom-up: compress innermost proofs first *)
-        steps |> map (fn step => step |> compress_further () ? compress_sub_levels)
-              |> compress_further () ? compress_top_level
+        steps
+        |> map (fn step => step |> compress_further () ? compress_sub_levels)
+        |> compress_further () ? compress_top_level
       and compress_sub_levels (Prove (qs, xs, l, t, subproofs, facts, meths, comment)) =
           (* compress subproofs *)
           Prove (qs, xs, l, t, map compress_proof subproofs, facts, meths, comment)
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_isar_minimize.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_isar_minimize.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -12,6 +12,7 @@
   type isar_preplay_data = Sledgehammer_Isar_Preplay.isar_preplay_data
 
   val keep_fastest_method_of_isar_step : isar_preplay_data -> isar_step -> isar_step
+  val minimized_isar_step : Proof.context -> Time.time -> isar_step -> Time.time * isar_step
   val minimize_isar_step_dependencies : Proof.context -> isar_preplay_data Unsynchronized.ref ->
     isar_step -> isar_step
   val postprocess_isar_proof_remove_unreferenced_steps : (isar_step -> isar_step) -> isar_proof ->
@@ -35,28 +36,33 @@
 
 val slack = seconds 0.025
 
+fun minimized_isar_step ctxt time
+    (Prove (qs, xs, l, t, subproofs, (lfs0, gfs00), meths as meth :: _, comment)) =
+  let
+    val gfs0 = filter (thms_influence_proof_method ctxt meth o thms_of_name ctxt) gfs00
+
+    fun mk_step_lfs_gfs lfs gfs =
+      Prove (qs, xs, l, t, subproofs, sort_facts (lfs, gfs), meths, comment)
+
+    fun minimize_half _ min_facts [] time = (min_facts, time)
+      | minimize_half mk_step min_facts (fact :: facts) time =
+        (case preplay_isar_step_for_method ctxt (Time.+ (time, slack)) meth
+            (mk_step (min_facts @ facts)) of
+          Played time' => minimize_half mk_step min_facts facts time'
+        | _ => minimize_half mk_step (fact :: min_facts) facts time)
+
+    val (min_lfs, time') = minimize_half (fn lfs => mk_step_lfs_gfs lfs gfs0) [] lfs0 time
+    val (min_gfs, time'') = minimize_half (mk_step_lfs_gfs min_lfs) [] gfs0 time'
+  in
+    (time'', mk_step_lfs_gfs min_lfs min_gfs)
+  end
+
 fun minimize_isar_step_dependencies ctxt preplay_data
-      (step as Prove (qs, xs, l, t, subproofs, (lfs0, gfs00), meths as meth :: _, comment)) =
+      (step as Prove (_, _, l, _, _, _, meth :: _, _)) =
     (case Lazy.force (preplay_outcome_of_isar_step_for_method (!preplay_data) l meth) of
       Played time =>
-      let
-        val gfs0 = filter (influence_proof_method ctxt meth o thms_of_name ctxt) gfs00
-
-        fun mk_step_lfs_gfs lfs gfs = Prove (qs, xs, l, t, subproofs, (lfs, gfs), meths, comment)
-
-        fun minimize_facts _ min_facts [] time = (min_facts, time)
-          | minimize_facts mk_step min_facts (fact :: facts) time =
-            (case preplay_isar_step_for_method ctxt (Time.+ (time, slack)) meth
-                (mk_step (min_facts @ facts)) of
-              Played time' => minimize_facts mk_step min_facts facts time'
-            | _ => minimize_facts mk_step (fact :: min_facts) facts time)
-
-        val (min_lfs, time') = minimize_facts (fn lfs => mk_step_lfs_gfs lfs gfs0) [] lfs0 time
-        val (min_gfs, time'') = minimize_facts (mk_step_lfs_gfs min_lfs) [] gfs0 time'
-
-        val step' = mk_step_lfs_gfs min_lfs min_gfs
-      in
-        set_preplay_outcomes_of_isar_step ctxt time'' preplay_data step' [(meth, Played time'')];
+      let val (time', step') = minimized_isar_step ctxt time step in
+        set_preplay_outcomes_of_isar_step ctxt time' preplay_data step' [(meth, Played time')];
         step'
       end
     | _ => step (* don't touch steps that time out or fail *))
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_isar_preplay.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_isar_preplay.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -41,6 +41,27 @@
 
 val trace = Attrib.setup_config_bool @{binding sledgehammer_preplay_trace} (K false)
 
+fun par_list_map_filter_with_timeout _ _ _ _ [] = []
+  | par_list_map_filter_with_timeout get_time min_timeout timeout0 f xs =
+    let
+      val next_timeout = Unsynchronized.ref timeout0
+
+      fun apply_f x =
+        let val timeout = !next_timeout in
+          if Time.<= (timeout, min_timeout) then
+            NONE
+          else
+            let val y = f timeout x in
+              (case get_time y of
+                SOME time => next_timeout := time
+              | _ => ());
+              SOME y
+            end
+        end
+    in
+      map_filter I (Par_List.map apply_f xs)
+    end
+
 fun enrich_context_with_local_facts proof ctxt =
   let
     val thy = Proof_Context.theory_of ctxt
@@ -140,16 +161,18 @@
 fun preplay_isar_step_for_method ctxt timeout meth =
   try (raw_preplay_step_for_method ctxt timeout meth) #> the_default Play_Failed
 
-fun preplay_isar_step ctxt timeout hopeless step =
+val min_preplay_timeout = seconds 0.002
+
+fun preplay_isar_step ctxt timeout0 hopeless step =
   let
-    fun try_method meth =
-      (case preplay_isar_step_for_method ctxt timeout meth step of
-        outcome as Played _ => SOME (meth, outcome)
-      | _ => NONE)
+    fun preplay timeout meth = (meth, preplay_isar_step_for_method ctxt timeout meth step)
+    fun get_time (_, Played time) = SOME time
+      | get_time _ = NONE
 
     val meths = proof_methods_of_isar_step step |> subtract (op =) hopeless
   in
-    the_list (Par_List.get_some try_method meths)
+    par_list_map_filter_with_timeout get_time min_preplay_timeout timeout0 preplay meths
+    |> sort (play_outcome_ord o pairself snd)
   end
 
 type isar_preplay_data = (proof_method * play_outcome Lazy.lazy) list Canonical_Label_Tab.table
@@ -181,7 +204,7 @@
   if Lazy.is_finished outcome then Lazy.force outcome else Play_Timed_Out Time.zeroTime
 
 fun get_best_method_outcome force =
-  tap (List.app (K () o Lazy.future Future.default_params o snd)) (* optional parallelism *)
+  tap (List.app (K () o Lazy.future Future.default_params o snd)) (* could be parallelized *)
   #> map (apsnd force)
   #> sort (play_outcome_ord o pairself snd)
   #> hd
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_isar_proof.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_isar_proof.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -26,9 +26,9 @@
 
   val label_ord : label * label -> order
   val string_of_label : label -> string
+  val sort_facts : facts -> facts
 
   val steps_of_isar_proof : isar_proof -> isar_step list
-
   val label_of_isar_step : isar_step -> label option
   val facts_of_isar_step : isar_step -> facts
   val proof_methods_of_isar_step : isar_step -> proof_method list
@@ -46,6 +46,7 @@
   val kill_useless_labels_in_isar_proof : isar_proof -> isar_proof
   val relabel_isar_proof_canonically : isar_proof -> isar_proof
   val relabel_isar_proof_nicely : isar_proof -> isar_proof
+  val rationalize_obtains_in_isar_proofs : Proof.context -> isar_proof -> isar_proof
 
   val string_of_isar_proof : Proof.context -> int -> int -> isar_proof -> string
 end;
@@ -75,10 +76,24 @@
 
 val no_label = ("", ~1)
 
-val label_ord = pairself swap #> prod_ord int_ord fast_string_ord
+(* cf. "label_ord" below *)
+val assume_prefix = "a"
+val have_prefix = "f"
+
+fun label_ord ((s1, i1), (s2, i2)) =
+  (case int_ord (pairself String.size (s1, s2)) of
+    EQUAL =>
+    (case string_ord (s1, s2) of
+      EQUAL => int_ord (i1, i2)
+    | ord => ord (* "assume" before "have" *))
+  | ord => ord)
 
 fun string_of_label (s, num) = s ^ string_of_int num
 
+(* Put the nearest local label first, since it's the most likely to be replaced by a "hence".
+   (Some preplaying proof methods, e.g. "blast", react poorly to fact reorderings.) *)
+fun sort_facts (lfs, gfs) = (sort (label_ord o swap) lfs, sort string_ord gfs)
+
 fun steps_of_isar_proof (Proof (_, _, steps)) = steps
 
 fun label_of_isar_step (Prove (_, _, l, _, _, _, _, _)) = SOME l
@@ -174,9 +189,6 @@
     fst (relabel_proof proof (0, []))
   end
 
-val assume_prefix = "a"
-val have_prefix = "f"
-
 val relabel_isar_proof_nicely =
   let
     fun next_label depth prefix l (accum as (next, subst)) =
@@ -206,6 +218,35 @@
     relabel_proof [] 0
   end
 
+fun stutter_single_letter s = String.extract (s, 0, SOME 1) ^ s
+
+fun rationalize_obtains_in_isar_proofs ctxt =
+  let
+    fun rename_obtains xs (subst, ctxt) =
+      let
+        val Ts = map snd xs
+        val new_names0 = map (stutter_single_letter o var_name_of_typ o body_type) Ts
+        val (new_names, ctxt') = Variable.variant_fixes new_names0 ctxt
+        val ys = map2 pair new_names Ts
+      in
+        (ys, ((map Free xs ~~ map Free ys) @ subst, ctxt'))
+      end
+
+    fun rationalize_step (Prove (qs, xs, l, t, subs, facts, meths, comment)) subst_ctxt =
+        let
+          val (xs', subst_ctxt' as (subst', _)) = rename_obtains xs subst_ctxt
+          val t' = subst_atomic subst' t
+          val subs' = map (rationalize_proof subst_ctxt') subs
+        in
+          (Prove (qs, xs', l, t', subs', facts, meths, comment), subst_ctxt')
+        end
+    and rationalize_proof (subst_ctxt as (subst, _)) (Proof (fix, assms, steps)) =
+      Proof (fix, map (apsnd (subst_atomic subst)) assms,
+        fst (fold_map rationalize_step steps subst_ctxt))
+  in
+    rationalize_proof ([], ctxt)
+  end
+
 val indent_size = 2
 
 fun string_of_isar_proof ctxt0 i n proof =
@@ -290,18 +331,17 @@
     and add_step_pre ind qs subs (s, ctxt) =
       (s ^ of_subproofs ind ctxt qs subs ^ of_indent ind, ctxt)
     and add_step ind (Let (t1, t2)) =
-        add_str (of_indent ind ^ "let ")
-        #> add_term t1 #> add_str " = " #> add_term t2 #> add_str "\n"
+        add_str (of_indent ind ^ "let ") #> add_term t1 #> add_str " = " #> add_term t2
+        #> add_str "\n"
       | add_step ind (Prove (qs, xs, l, t, subs, (ls, ss), meth :: _, comment)) =
         add_step_pre ind qs subs
         #> (case xs of
-             [] => add_str (of_have qs (length subs) ^ " ")
-           | _ => add_str (of_obtain qs (length subs) ^ " ") #> add_frees xs #> add_str " where ")
+            [] => add_str (of_have qs (length subs) ^ " ")
+          | _ => add_str (of_obtain qs (length subs) ^ " ") #> add_frees xs #> add_str " where ")
         #> add_str (of_label l)
         #> add_term t
-        #> add_str (" " ^
-             of_method (sort_distinct label_ord ls) (sort_distinct string_ord ss) meth ^
-             (if comment = "" then "" else " (* " ^ comment ^ " *)") ^ "\n")
+        #> add_str (" " ^ of_method ls ss meth ^
+          (if comment = "" then "" else " (* " ^ comment ^ " *)") ^ "\n")
     and add_steps ind = fold (add_step ind)
     and of_proof ind ctxt (Proof (xs, assms, steps)) =
       ("", ctxt)
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_mash.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_mash.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -814,7 +814,7 @@
       {comment = "Goal: " ^ goal_name, state = Proof.init ctxt, goal = goal, subgoal = 1,
        subgoal_count = 1, factss = [("", facts)]}
   in
-    get_minimizing_prover ctxt MaSh (K ()) prover params (K (K (K ""))) problem
+    get_minimizing_prover ctxt MaSh (K ()) prover params problem
   end
 
 val bad_types = [@{type_name prop}, @{type_name bool}, @{type_name fun}]
@@ -974,8 +974,8 @@
   thms_in_proof max_dependencies (SOME name_tabs) th
   |> Option.map (fn deps =>
     if deps = [typedef_dep] orelse deps = [class_some_dep] orelse
-       exists (member (op =) fundef_ths) deps orelse exists (member (op =) typedef_ths) deps orelse
-       is_size_def deps th then
+        exists (member (op =) fundef_ths) deps orelse exists (member (op =) typedef_ths) deps orelse
+        is_size_def deps th then
       []
     else
       deps)
@@ -1378,7 +1378,7 @@
             let
               val name = nickname_of_thm th
               val feats = features_of ctxt (theory_of_thm th) stature [prop_of th]
-              val deps = deps_of status th |> these
+              val deps = these (deps_of status th)
               val num_nontrivial = num_nontrivial |> not (null deps) ? Integer.add 1
               val learns = (name, parents, feats, deps) :: learns
               val (learns, next_commit) =
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_proof_methods.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_proof_methods.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -18,7 +18,7 @@
     Simp_Method |
     Simp_Size_Method |
     Auto_Method |
-    Fastforce_Method |
+    Auto_Choice_Method |
     Force_Method |
     Linarith_Method |
     Presburger_Method |
@@ -29,14 +29,13 @@
     Play_Timed_Out of Time.time |
     Play_Failed
 
-  type minimize_command = string list -> string
   type one_line_params =
-    (proof_method * play_outcome) * string * (string * stature) list * minimize_command * int * int
+    ((string * stature) list * (proof_method * play_outcome)) * string * int * int
 
   val is_proof_method_direct : proof_method -> bool
   val string_of_proof_method : Proof.context -> string list -> proof_method -> string
   val tac_of_proof_method : Proof.context -> thm list * thm list -> proof_method -> int -> tactic
-  val influence_proof_method : Proof.context -> proof_method -> thm list -> bool
+  val thms_influence_proof_method : Proof.context -> proof_method -> thm list -> bool
   val string_of_play_outcome : play_outcome -> string
   val play_outcome_ord : play_outcome * play_outcome -> order
   val one_line_proof_text : Proof.context -> int -> one_line_params -> string
@@ -58,7 +57,7 @@
   Simp_Method |
   Simp_Size_Method |
   Auto_Method |
-  Fastforce_Method |
+  Auto_Choice_Method |
   Force_Method |
   Linarith_Method |
   Presburger_Method |
@@ -69,9 +68,8 @@
   Play_Timed_Out of Time.time |
   Play_Failed
 
-type minimize_command = string list -> string
 type one_line_params =
-  (proof_method * play_outcome) * string * (string * stature) list * minimize_command * int * int
+  ((string * stature) list * (proof_method * play_outcome)) * string * int * int
 
 fun is_proof_method_direct (Metis_Method _) = true
   | is_proof_method_direct Meson_Method = true
@@ -96,7 +94,7 @@
       | Simp_Method => if null ss then "simp" else "simp add:"
       | Simp_Size_Method => "simp add: " ^ short_thm_name ctxt @{thm size_ne_size_imp_ne}
       | Auto_Method => "auto"
-      | Fastforce_Method => "fastforce"
+      | Auto_Choice_Method => "atomize_elim, auto intro!: " ^ short_thm_name ctxt @{thm choice}
       | Force_Method => "force"
       | Linarith_Method => "linarith"
       | Presburger_Method => "presburger"
@@ -129,18 +127,20 @@
     (case meth of
       SATx_Method => SAT.satx_tac ctxt
     | Blast_Method => blast_tac ctxt
-    | Auto_Method => K (Clasimp.auto_tac (silence_methods ctxt))
-    | Fastforce_Method => Clasimp.fast_force_tac (silence_methods ctxt)
-    | Force_Method => Clasimp.force_tac (silence_methods ctxt)
+    | Auto_Method => SELECT_GOAL (Clasimp.auto_tac (silence_methods ctxt))
+    | Auto_Choice_Method =>
+      AtomizeElim.atomize_elim_tac ctxt THEN'
+      SELECT_GOAL (Clasimp.auto_tac (silence_methods ctxt addSIs @{thms choice}))
+    | Force_Method => SELECT_GOAL (Clasimp.auto_tac (silence_methods ctxt))
     | Linarith_Method =>
       let val ctxt = Config.put Lin_Arith.verbose false ctxt in Lin_Arith.tac ctxt end
     | Presburger_Method => Cooper.tac true [] [] ctxt
     | Algebra_Method => Groebner.algebra_tac [] [] ctxt))
 
 val simp_based_methods =
-  [Simp_Method, Simp_Size_Method, Auto_Method, Fastforce_Method, Force_Method]
+  [Simp_Method, Simp_Size_Method, Auto_Method, Auto_Choice_Method, Force_Method]
 
-fun influence_proof_method ctxt meth ths =
+fun thms_influence_proof_method ctxt meth ths =
   not (member (op =) simp_based_methods meth) orelse
   let val ctxt' = silence_methods ctxt in
     (* unfortunate pointer comparison -- but it's always safe to consider a theorem useful *)
@@ -179,29 +179,13 @@
     (s |> s <> "" ? enclose " (" ")") ^ "."
   end
 
-fun minimize_line _ [] = ""
-  | minimize_line minimize_command ss =
-    (case minimize_command ss of
-      "" => ""
-    | command => "\nTo minimize: " ^ Active.sendback_markup [Markup.padding_command] command ^ ".")
-
-fun split_used_facts facts =
-  facts
-  |> List.partition (fn (_, (sc, _)) => sc = Chained)
-  |> pairself (sort_distinct (string_ord o pairself fst))
-
 fun one_line_proof_text ctxt num_chained
-    ((meth, play), banner, used_facts, minimize_command, subgoal, subgoal_count) =
-  let
-    val (chained, extra) = split_used_facts used_facts
-
-    val try_line =
-      map fst extra
-      |> proof_method_command ctxt meth subgoal subgoal_count (map fst chained) num_chained
-      |> (if play = Play_Failed then enclose "One-line proof reconstruction failed: " "."
-          else try_command_line banner play)
-  in
-    try_line ^ minimize_line minimize_command (map fst (extra @ chained))
+    ((used_facts, (meth, play)), banner, subgoal, subgoal_count) =
+  let val (chained, extra) = List.partition (fn (_, (sc, _)) => sc = Chained) used_facts in
+    map fst extra
+    |> proof_method_command ctxt meth subgoal subgoal_count (map fst chained) num_chained
+    |> (if play = Play_Failed then enclose "One-line proof reconstruction failed: " "."
+        else try_command_line banner play)
   end
 
 end;
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_prover.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_prover.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -14,7 +14,6 @@
   type fact = Sledgehammer_Fact.fact
   type proof_method = Sledgehammer_Proof_Methods.proof_method
   type play_outcome = Sledgehammer_Proof_Methods.play_outcome
-  type minimize_command = Sledgehammer_Proof_Methods.minimize_command
 
   datatype mode = Auto_Try | Try | Normal | MaSh | Auto_Minimize | Minimize
 
@@ -36,11 +35,11 @@
      max_mono_iters : int option,
      max_new_mono_instances : int option,
      isar_proofs : bool option,
-     compress : real,
+     compress : real option,
      try0 : bool,
      smt_proofs : bool option,
      slice : bool,
-     minimize : bool option,
+     minimize : bool,
      timeout : Time.time,
      preplay_timeout : Time.time,
      expect : string}
@@ -57,32 +56,21 @@
     {outcome : atp_failure option,
      used_facts : (string * stature) list,
      used_from : fact list,
+     preferred_methss : proof_method * proof_method list list,
      run_time : Time.time,
-     preplay : (proof_method * play_outcome) Lazy.lazy,
-     message : proof_method * play_outcome -> string,
-     message_tail : string}
+     message : (unit -> (string * stature) list * (proof_method * play_outcome)) -> string}
 
-  type prover =
-    params -> ((string * string list) list -> string -> minimize_command) -> prover_problem ->
-    prover_result
+  type prover = params -> prover_problem -> prover_result
 
   val SledgehammerN : string
   val str_of_mode : mode -> string
-  val smtN : string
   val overlord_file_location_of_prover : string -> string * string
   val proof_banner : mode -> string -> string
-  val extract_proof_method : params -> proof_method -> string * (string * string list) list
-  val is_proof_method : string -> bool
   val is_atp : theory -> string -> bool
-  val bunch_of_proof_methods : bool -> bool -> string -> proof_method list
+  val bunches_of_proof_methods : bool -> bool -> bool -> string -> proof_method list list
   val is_fact_chained : (('a * stature) * 'b) -> bool
   val filter_used_facts : bool -> (''a * stature) list -> ((''a * stature) * 'b) list ->
     ((''a * stature) * 'b) list
-  val play_one_line_proof : mode -> bool -> Time.time -> ((string * 'a) * thm) list ->
-    Proof.state -> int -> proof_method -> proof_method list -> proof_method * play_outcome
-  val isar_supported_prover_of : theory -> string -> string
-  val choose_minimize_command : theory -> params -> ((string * string list) list -> string -> 'a) ->
-    string -> proof_method * play_outcome -> 'a
   val repair_monomorph_context : int option -> int -> int option -> int -> Proof.context ->
     Proof.context
 
@@ -118,11 +106,6 @@
   | str_of_mode Auto_Minimize = "Auto_Minimize"
   | str_of_mode Minimize = "Minimize"
 
-val smtN = "smt"
-
-val proof_method_names = [metisN, smtN]
-val is_proof_method = member (op =) proof_method_names
-
 val is_atp = member (op =) o supported_atps
 
 type params =
@@ -143,11 +126,11 @@
    max_mono_iters : int option,
    max_new_mono_instances : int option,
    isar_proofs : bool option,
-   compress : real,
+   compress : real option,
    try0 : bool,
    smt_proofs : bool option,
    slice : bool,
-   minimize : bool option,
+   minimize : bool,
    timeout : Time.time,
    preplay_timeout : Time.time,
    expect : string}
@@ -164,14 +147,11 @@
   {outcome : atp_failure option,
    used_facts : (string * stature) list,
    used_from : fact list,
+   preferred_methss : proof_method * proof_method list list,
    run_time : Time.time,
-   preplay : (proof_method * play_outcome) Lazy.lazy,
-   message : proof_method * play_outcome -> string,
-   message_tail : string}
+   message : (unit -> (string * stature) list * (proof_method * play_outcome)) -> string}
 
-type prover =
-  params -> ((string * string list) list -> string -> minimize_command)
-  -> prover_problem -> prover_result
+type prover = params -> prover_problem -> prover_result
 
 fun overlord_file_location_of_prover prover = (getenv "ISABELLE_HOME_USER", "prob_" ^ prover)
 
@@ -181,101 +161,29 @@
   | Try => "Sledgehammer (" ^ quote name ^ ") found a proof"
   | _ => "Try this")
 
-fun bunch_of_proof_methods smt_proofs needs_full_types desperate_lam_trans =
-  (if needs_full_types then
-     [Metis_Method (SOME full_typesN, NONE),
-      Metis_Method (SOME really_full_type_enc, NONE),
-      Metis_Method (SOME full_typesN, SOME desperate_lam_trans),
-      Metis_Method (SOME really_full_type_enc, SOME desperate_lam_trans)]
+fun bunches_of_proof_methods try0 smt_proofs needs_full_types desperate_lam_trans =
+  (if try0 then
+    [[Simp_Method, Auto_Method, Blast_Method, Linarith_Method],
+     [Meson_Method, Force_Method, Presburger_Method]]
    else
-     [Metis_Method (NONE, NONE),
-      Metis_Method (SOME full_typesN, NONE),
-      Metis_Method (SOME no_typesN, SOME desperate_lam_trans),
-      Metis_Method (SOME really_full_type_enc, SOME desperate_lam_trans)]) @
-  (if smt_proofs then [SMT2_Method] else [])
-
-fun extract_proof_method ({type_enc, lam_trans, ...} : params)
-      (Metis_Method (type_enc', lam_trans')) =
-    let
-      val override_params =
-        (if is_none type_enc' andalso is_none type_enc then []
-         else [("type_enc", [hd (unalias_type_enc (type_enc' |> the_default partial_typesN))])]) @
-        (if is_none lam_trans' andalso is_none lam_trans then []
-         else [("lam_trans", [lam_trans' |> the_default default_metis_lam_trans])])
-    in (metisN, override_params) end
-  | extract_proof_method _ SMT2_Method = (smtN, [])
-
-(* based on "Mirabelle.can_apply" and generalized *)
-fun timed_apply timeout tac state i =
-  let
-    val {context = ctxt, facts, goal} = Proof.goal state
-    val full_tac = Method.insert_tac facts i THEN tac ctxt i
-  in
-    TimeLimit.timeLimit timeout (try (Seq.pull o full_tac)) goal
-  end
-
-fun timed_proof_method timeout ths meth =
-  timed_apply timeout (fn ctxt => tac_of_proof_method ctxt ([], ths) meth)
+     []) @
+  [[Metis_Method (if needs_full_types then SOME full_typesN else NONE, NONE)],
+   (if needs_full_types then
+      [Metis_Method (NONE, NONE),
+       Metis_Method (SOME really_full_type_enc, NONE),
+       Metis_Method (SOME full_typesN, SOME desperate_lam_trans),
+       Metis_Method (SOME really_full_type_enc, SOME desperate_lam_trans)]
+    else
+      [Metis_Method (SOME full_typesN, NONE),
+       Metis_Method (SOME no_typesN, SOME desperate_lam_trans),
+       Metis_Method (SOME really_full_type_enc, SOME desperate_lam_trans)])] @
+  (if smt_proofs then [[SMT2_Method]] else [])
 
 fun is_fact_chained ((_, (sc, _)), _) = sc = Chained
 
 fun filter_used_facts keep_chained used =
   filter ((member (op =) used o fst) orf (if keep_chained then is_fact_chained else K false))
 
-fun play_one_line_proof mode verbose timeout pairs state i preferred (meths as meth :: _) =
-  let
-    val ctxt = Proof.context_of state
-
-    fun get_preferred meths = if member (op =) meths preferred then preferred else meth
-  in
-    if timeout = Time.zeroTime then
-      (get_preferred meths, Play_Timed_Out Time.zeroTime)
-    else
-      let
-        val _ = if mode = Minimize then Output.urgent_message "Preplaying proof..." else ()
-        val ths = pairs |> sort_wrt (fst o fst) |> map snd
-        fun play [] [] = (get_preferred meths, Play_Failed)
-          | play timed_outs [] = (get_preferred timed_outs, Play_Timed_Out timeout)
-          | play timed_out (meth :: meths) =
-            let
-              val _ =
-                if verbose then
-                  Output.urgent_message ("Trying \"" ^ string_of_proof_method ctxt [] meth ^
-                    "\" for " ^ string_of_time timeout ^ "...")
-                else
-                  ()
-              val timer = Timer.startRealTimer ()
-            in
-              (case timed_proof_method timeout ths meth state i of
-                SOME (SOME _) => (meth, Played (Timer.checkRealTimer timer))
-              | _ => play timed_out meths)
-            end
-            handle TimeLimit.TimeOut => play (meth :: timed_out) meths
-      in
-        play [] meths
-      end
-  end
-
-val canonical_isar_supported_prover = eN
-val z3N = "z3"
-
-fun isar_supported_prover_of thy name =
-  if is_atp thy name orelse name = z3N then name
-  else if is_atp_installed thy canonical_isar_supported_prover then canonical_isar_supported_prover
-  else name
-
-(* FIXME: See the analogous logic in the function "maybe_minimize" in
-   "sledgehammer_prover_minimize.ML". *)
-fun choose_minimize_command thy (params as {isar_proofs, ...}) minimize_command name preplay =
-  let
-    val maybe_isar_name = name |> isar_proofs = SOME true ? isar_supported_prover_of thy
-    val (min_name, override_params) =
-      (case preplay of
-        (meth as Metis_Method _, Played _) =>
-        if isar_proofs = SOME true then (maybe_isar_name, []) else extract_proof_method params meth
-      | _ => (maybe_isar_name, []))
-  in minimize_command override_params min_name end
-
 val max_fact_instances = 10 (* FUDGE *)
 
 fun repair_monomorph_context max_iters best_max_iters max_new_instances best_max_new_instances =
@@ -288,7 +196,6 @@
   let
     val thy = Proof_Context.theory_of ctxt
     val (remote_provers, local_provers) =
-      proof_method_names @
       sort_strings (supported_atps thy) @
       sort_strings (SMT2_Config.available_solvers_of ctxt)
       |> List.partition (String.isPrefix remote_prefix)
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -30,6 +30,7 @@
 open ATP_Problem_Generate
 open ATP_Proof_Reconstruct
 open ATP_Waldmeister
+open ATP_Satallax
 open ATP_Systems
 open Sledgehammer_Util
 open Sledgehammer_Proof_Methods
@@ -126,15 +127,13 @@
    the only ATP that does not honor its time limit. *)
 val atp_timeout_slack = seconds 1.0
 
-(* Important messages are important but not so important that users want to see
-   them each time. *)
+(* Important messages are important but not so important that users want to see them each time. *)
 val atp_important_message_keep_quotient = 25
 
 fun run_atp mode name
-    (params as {debug, verbose, overlord, type_enc, strict, lam_trans, uncurried_aliases,
-       fact_filter, max_facts, max_mono_iters, max_new_mono_instances, isar_proofs, compress, try0,
-       smt_proofs, slice, minimize, timeout, preplay_timeout, ...})
-    minimize_command
+    ({debug, verbose, overlord, type_enc, strict, lam_trans, uncurried_aliases, fact_filter,
+     max_facts, max_mono_iters, max_new_mono_instances, isar_proofs, compress, try0, smt_proofs,
+     slice, minimize, timeout, preplay_timeout, ...} : params)
     ({comment, state, goal, subgoal, subgoal_count, factss, ...} : prover_problem) =
   let
     val thy = Proof.theory_of state
@@ -295,19 +294,21 @@
               |>> (if overlord then prefix ("% " ^ command ^ "\n% " ^ timestamp () ^ "\n") else I)
               |> fst |> split_time
               |> (fn accum as (output, _) =>
-                     (accum,
-                      extract_tstplike_proof_and_outcome verbose proof_delims known_failures output
-                      |>> atp_proof_of_tstplike_proof (perhaps (try (unprefix remote_prefix)) name) atp_problem
-                      handle UNRECOGNIZED_ATP_PROOF () => ([], SOME ProofUnparsable)))
+                (accum,
+                 extract_tstplike_proof_and_outcome verbose proof_delims known_failures output
+                 |>> atp_proof_of_tstplike_proof (perhaps (try (unprefix remote_prefix)) name)
+                   atp_problem
+                 handle UNRECOGNIZED_ATP_PROOF () => ([], SOME ProofUnparsable)))
               handle TimeLimit.TimeOut => (("", slice_timeout), ([], SOME TimedOut))
 
             val outcome =
               (case outcome of
                 NONE =>
-                (case used_facts_in_unsound_atp_proof ctxt (map fst facts) atp_proof
-                      |> Option.map (sort string_ord) of
+                (case used_facts_in_unsound_atp_proof ctxt (map fst facts) atp_proof of
                   SOME facts =>
-                  let val failure = UnsoundProof (is_type_enc_sound type_enc, facts) in
+                  let
+                    val failure = UnsoundProof (is_type_enc_sound type_enc, sort string_ord facts)
+                  in
                     if debug then (warning (string_of_atp_failure failure); NONE) else SOME failure
                   end
                 | NONE => NONE)
@@ -354,22 +355,18 @@
       else
         ""
 
-    val (used_facts, preplay, message, message_tail) =
+    val (used_facts, preferred_methss, message) =
       (case outcome of
         NONE =>
         let
-          val used_facts = used_facts_in_atp_proof ctxt (map fst used_from) atp_proof
+          val used_facts = sort_wrt fst (used_facts_in_atp_proof ctxt (map fst used_from) atp_proof)
           val needs_full_types = is_typed_helper_used_in_atp_proof atp_proof
-          val meths =
-            bunch_of_proof_methods (smt_proofs <> SOME false) needs_full_types
-              (if atp_proof_prefers_lifting atp_proof then liftingN else hide_lamsN)
+          val preferred_methss =
+            (Metis_Method (NONE, NONE),
+             bunches_of_proof_methods try0 (smt_proofs <> SOME false) needs_full_types
+               (if atp_proof_prefers_lifting atp_proof then liftingN else hide_lamsN))
         in
-          (used_facts,
-           Lazy.lazy (fn () =>
-             let val used_pairs = used_from |> filter_used_facts false used_facts in
-               play_one_line_proof mode verbose preplay_timeout used_pairs state subgoal (hd meths)
-                 meths
-             end),
+          (used_facts, preferred_methss,
            fn preplay =>
               let
                 val _ = if verbose then Output.urgent_message "Generating proof text..." else ()
@@ -383,33 +380,29 @@
                     val atp_proof =
                       atp_proof
                       |> termify_atp_proof ctxt name format type_enc pool lifted sym_tab
-                      |> spass ? introduce_spass_skolem
+                      |> spass ? introduce_spass_skolems
                       |> factify_atp_proof (map fst used_from) hyp_ts concl_t
                   in
                     (verbose, (metis_type_enc, metis_lam_trans), preplay_timeout, compress, try0,
-                     minimize <> SOME false, atp_proof, goal)
+                     minimize, atp_proof, goal)
                   end
 
-                val one_line_params =
-                  (preplay, proof_banner mode name, used_facts,
-                   choose_minimize_command thy params minimize_command name preplay, subgoal,
-                   subgoal_count)
+                val one_line_params = (preplay (), proof_banner mode name, subgoal, subgoal_count)
                 val num_chained = length (#facts (Proof.goal state))
               in
-                proof_text ctxt debug isar_proofs smt_proofs isar_params num_chained one_line_params
-              end,
-           (if verbose then "\nATP real CPU time: " ^ string_of_time run_time ^ "." else "") ^
-           (if important_message <> "" then
-              "\n\nImportant message from Dr. Geoff Sutcliffe:\n" ^ important_message
-            else
-              ""))
+                proof_text ctxt debug isar_proofs smt_proofs isar_params num_chained
+                  one_line_params ^
+                (if important_message <> "" then
+                   "\n\nImportant message from Dr. Geoff Sutcliffe:\n" ^ important_message
+                 else
+                   "")
+              end)
         end
       | SOME failure =>
-        ([], Lazy.value (Metis_Method (NONE, NONE), Play_Failed),
-         fn _ => string_of_atp_failure failure, ""))
+        ([], (Auto_Method (* dummy *), []), fn _ => string_of_atp_failure failure))
   in
-    {outcome = outcome, used_facts = used_facts, used_from = used_from, run_time = run_time,
-     preplay = preplay, message = message, message_tail = message_tail}
+    {outcome = outcome, used_facts = used_facts, used_from = used_from,
+     preferred_methss = preferred_methss, run_time = run_time, message = message}
   end
 
 end;
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_prover_minimize.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_prover_minimize.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -20,18 +20,11 @@
   val get_prover : Proof.context -> mode -> string -> prover
 
   val binary_min_facts : int Config.T
-  val auto_minimize_min_facts : int Config.T
-  val auto_minimize_max_time : real Config.T
   val minimize_facts : (thm list -> unit) -> string -> params -> bool -> int -> int ->
-    Proof.state -> thm -> (proof_method * play_outcome) Lazy.lazy option ->
-    ((string * stature) * thm list) list ->
+    Proof.state -> thm -> ((string * stature) * thm list) list ->
     ((string * stature) * thm list) list option
-      * ((proof_method * play_outcome) Lazy.lazy * ((proof_method * play_outcome) -> string)
-         * string)
+    * ((unit -> (string * stature) list * (proof_method * play_outcome)) -> string)
   val get_minimizing_prover : Proof.context -> mode -> (thm list -> unit) -> string -> prover
-
-  val run_minimize : params -> (thm list -> unit) -> int -> (Facts.ref * Attrib.src list) list ->
-    Proof.state -> unit
 end;
 
 structure Sledgehammer_Prover_Minimize : SLEDGEHAMMER_PROVER_MINIMIZE =
@@ -50,59 +43,17 @@
 open Sledgehammer_Prover_ATP
 open Sledgehammer_Prover_SMT2
 
-fun run_proof_method mode name (params as {verbose, timeout, type_enc, lam_trans, ...})
-    minimize_command
-    ({state, subgoal, subgoal_count, factss = (_, facts) :: _, ...} : prover_problem) =
-  let
-    val meth =
-      if name = metisN then Metis_Method (type_enc, lam_trans)
-      else if name = smtN then SMT2_Method
-      else raise Fail ("unknown proof_method: " ^ quote name)
-    val used_facts = facts |> map fst
-  in
-    (case play_one_line_proof (if mode = Minimize then Normal else mode) verbose timeout facts state
-        subgoal meth [meth] of
-      play as (_, Played time) =>
-      {outcome = NONE, used_facts = used_facts, used_from = facts, run_time = time,
-       preplay = Lazy.value play,
-       message = fn play =>
-          let
-            val ctxt = Proof.context_of state
-            val (_, override_params) = extract_proof_method params meth
-            val one_line_params =
-              (play, proof_banner mode name, used_facts, minimize_command override_params name,
-               subgoal, subgoal_count)
-            val num_chained = length (#facts (Proof.goal state))
-          in
-            one_line_proof_text ctxt num_chained one_line_params
-          end,
-       message_tail = ""}
-    | play =>
-      let
-        val failure = (case play of (_, Play_Failed) => GaveUp | _ => TimedOut)
-      in
-        {outcome = SOME failure, used_facts = [], used_from = [],
-         run_time = Time.zeroTime, preplay = Lazy.value play,
-         message = fn _ => string_of_atp_failure failure, message_tail = ""}
-      end)
-  end
-
 fun is_prover_supported ctxt =
   let val thy = Proof_Context.theory_of ctxt in
-    is_proof_method orf is_atp thy orf is_smt2_prover ctxt
+    is_atp thy orf is_smt2_prover ctxt
   end
 
 fun is_prover_installed ctxt =
-  is_proof_method orf is_smt2_prover ctxt orf
-  is_atp_installed (Proof_Context.theory_of ctxt)
-
-val proof_method_default_max_facts = 20
+  is_smt2_prover ctxt orf is_atp_installed (Proof_Context.theory_of ctxt)
 
 fun default_max_facts_of_prover ctxt name =
   let val thy = Proof_Context.theory_of ctxt in
-    if is_proof_method name then
-      proof_method_default_max_facts
-    else if is_atp thy name then
+    if is_atp thy name then
       fold (Integer.max o fst o #1 o fst o snd) (#best_slices (get_atp thy name ()) ctxt) 0
     else if is_smt2_prover ctxt name then
       SMT2_Solver.default_max_relevant ctxt name
@@ -112,8 +63,7 @@
 
 fun get_prover ctxt mode name =
   let val thy = Proof_Context.theory_of ctxt in
-    if is_proof_method name then run_proof_method mode name
-    else if is_atp thy name then run_atp mode name
+    if is_atp thy name then run_atp mode name
     else if is_smt2_prover ctxt name then run_smt2_solver mode name
     else error ("No such prover: " ^ name ^ ".")
   end
@@ -149,8 +99,7 @@
     val problem =
       {comment = "", state = state, goal = goal, subgoal = i, subgoal_count = n,
        factss = [("", facts)]}
-    val result as {outcome, used_facts, run_time, ...} =
-      prover params (K (K (K ""))) problem
+    val result as {outcome, used_facts, run_time, ...} = prover params problem
   in
     print silent (fn () =>
       (case outcome of
@@ -180,11 +129,6 @@
    actually needed, we heuristically set the threshold to 10 facts. *)
 val binary_min_facts =
   Attrib.setup_config_int @{binding sledgehammer_minimize_binary_min_facts} (K 20)
-val auto_minimize_min_facts =
-  Attrib.setup_config_int @{binding sledgehammer_auto_minimize_min_facts}
-      (fn generic => Config.get_generic generic binary_min_facts)
-val auto_minimize_max_time =
-  Attrib.setup_config_real @{binding sledgehammer_auto_minimize_max_time} (K 5.0)
 
 fun linear_minimize test timeout result xs =
   let
@@ -216,7 +160,7 @@
           (case test timeout (sup @ l0) of
             result as {outcome = NONE, used_facts, ...} =>
             min depth result (filter_used_facts true used_facts sup)
-                      (filter_used_facts true used_facts l0)
+              (filter_used_facts true used_facts l0)
           | _ =>
             (case test timeout (sup @ r0) of
               result as {outcome = NONE, used_facts, ...} =>
@@ -244,7 +188,7 @@
   end
 
 fun minimize_facts do_learn prover_name (params as {learn, timeout, ...}) silent i n state goal
-    preplay0 facts =
+    facts =
   let
     val ctxt = Proof.context_of state
     val prover = get_prover ctxt (if silent then Auto_Minimize else Minimize) prover_name
@@ -262,135 +206,50 @@
          val min =
            if length facts >= Config.get ctxt binary_min_facts then binary_minimize
            else linear_minimize
-         val (min_facts, {preplay, message, message_tail, ...}) =
-           min test (new_timeout timeout run_time) result facts
+         val (min_facts, {message, ...}) = min test (new_timeout timeout run_time) result facts
        in
          print silent (fn () => cat_lines
-             ["Minimized to " ^ n_facts (map fst min_facts)] ^
-              (case min_facts |> filter is_fact_chained |> length of
-                 0 => ""
-               | n => "\n(including " ^ string_of_int n ^ " chained)") ^ ".");
+           ["Minimized to " ^ n_facts (map fst min_facts)] ^
+            (case min_facts |> filter is_fact_chained |> length of
+              0 => ""
+            | n => "\n(including " ^ string_of_int n ^ " chained)") ^ ".");
          (if learn then do_learn (maps snd min_facts) else ());
-         (SOME min_facts,
-          (if is_some preplay0 andalso length min_facts = length facts then the preplay0
-           else preplay,
-           message, message_tail))
+         (SOME min_facts, message)
        end
-     | {outcome = SOME TimedOut, preplay, ...} =>
-       (NONE, (preplay, fn _ =>
+     | {outcome = SOME TimedOut, ...} =>
+       (NONE, fn _ =>
           "Timeout: You can increase the time limit using the \"timeout\" option (e.g., \
-          \timeout = " ^ string_of_int (10 + Time.toMilliseconds timeout div 1000) ^ "\").", ""))
-     | {preplay, message, ...} => (NONE, (preplay, prefix "Prover error: " o message, ""))))
-    handle ERROR msg =>
-      (NONE, (Lazy.value (Metis_Method (NONE, NONE), Play_Failed), fn _ => "Error: " ^ msg, ""))
+          \timeout = " ^ string_of_int (10 + Time.toMilliseconds timeout div 1000) ^ "\").")
+     | {message, ...} => (NONE, (prefix "Prover error: " o message))))
+    handle ERROR msg => (NONE, fn _ => "Error: " ^ msg)
   end
 
-fun adjust_proof_method_params override_params
-    ({debug, verbose, overlord, spy, blocking, provers, type_enc, strict, lam_trans,
-      uncurried_aliases, learn, fact_filter, max_facts, fact_thresholds, max_mono_iters,
-      max_new_mono_instances, isar_proofs, compress, try0, smt_proofs, slice, minimize, timeout,
-      preplay_timeout, expect} : params) =
-  let
-    fun lookup_override name default_value =
-      (case AList.lookup (op =) override_params name of
-        SOME [s] => SOME s
-      | _ => default_value)
-    (* Only those options that proof_methods are interested in are considered here. *)
-    val type_enc = lookup_override "type_enc" type_enc
-    val lam_trans = lookup_override "lam_trans" lam_trans
-  in
-    {debug = debug, verbose = verbose, overlord = overlord, spy = spy, blocking = blocking,
-     provers = provers, type_enc = type_enc, strict = strict, lam_trans = lam_trans,
-     uncurried_aliases = uncurried_aliases, learn = learn, fact_filter = fact_filter,
-     max_facts = max_facts, fact_thresholds = fact_thresholds, max_mono_iters = max_mono_iters,
-     max_new_mono_instances = max_new_mono_instances, isar_proofs = isar_proofs,
-     compress = compress, try0 = try0, smt_proofs = smt_proofs, slice = slice, minimize = minimize,
-     timeout = timeout, preplay_timeout = preplay_timeout, expect = expect}
-  end
-
-fun maybe_minimize ctxt mode do_learn name (params as {verbose, isar_proofs, minimize, ...})
+fun maybe_minimize mode do_learn name (params as {verbose, minimize, ...})
     ({state, goal, subgoal, subgoal_count, ...} : prover_problem)
-    (result as {outcome, used_facts, used_from, run_time, preplay, message, message_tail} :
-     prover_result) =
+    (result as {outcome, used_facts, used_from, preferred_methss, run_time, message}
+     : prover_result) =
   if is_some outcome orelse null used_facts then
     result
   else
     let
-      val thy = Proof_Context.theory_of ctxt
-      val num_facts = length used_facts
-
-      val ((perhaps_minimize, (minimize_name, params)), preplay) =
-        if mode = Normal then
-          if num_facts >= Config.get ctxt auto_minimize_min_facts then
-            ((true, (name, params)), preplay)
-          else
-            let
-              fun can_min_fast_enough time =
-                0.001
-                * Real.fromInt ((num_facts + 1) * Time.toMilliseconds time)
-                <= Config.get ctxt auto_minimize_max_time
-              fun prover_fast_enough () = can_min_fast_enough run_time
-            in
-              (case Lazy.force preplay of
-                 (meth as Metis_Method _, Played timeout) =>
-                 if isar_proofs = SOME true then
-                   (* Cheat: Assume the selected ATP is as fast as "metis" for the goal it proved
-                      itself. *)
-                   (can_min_fast_enough timeout, (isar_supported_prover_of thy name, params))
-                 else if can_min_fast_enough timeout then
-                   (true, extract_proof_method params meth
-                          ||> (fn override_params =>
-                                  adjust_proof_method_params override_params params))
-                 else
-                   (prover_fast_enough (), (name, params))
-               | (SMT2_Method, Played timeout) =>
-                 (* Cheat: Assume the original prover is as fast as "smt" for the goal it proved
-                    itself. *)
-                 (can_min_fast_enough timeout, (name, params))
-               | _ => (prover_fast_enough (), (name, params)),
-               preplay)
-            end
-        else
-          ((false, (name, params)), preplay)
-      val minimize = minimize |> the_default perhaps_minimize
-      val (used_facts, (preplay, message, _)) =
+      val (used_facts, message) =
         if minimize then
-          minimize_facts do_learn minimize_name params
+          minimize_facts do_learn name params
             (not verbose orelse (mode <> Normal andalso mode <> MaSh)) subgoal subgoal_count state
-            goal (SOME preplay) (filter_used_facts true used_facts (map (apsnd single) used_from))
+            goal (filter_used_facts true used_facts (map (apsnd single) used_from))
           |>> Option.map (map fst)
         else
-          (SOME used_facts, (preplay, message, ""))
+          (SOME used_facts, message)
     in
       (case used_facts of
         SOME used_facts =>
-        {outcome = NONE, used_facts = used_facts, used_from = used_from, run_time = run_time,
-         preplay = preplay, message = message, message_tail = message_tail}
+        {outcome = NONE, used_facts = sort_wrt fst used_facts, used_from = used_from,
+         preferred_methss = preferred_methss, run_time = run_time, message = message}
       | NONE => result)
     end
 
-fun get_minimizing_prover ctxt mode do_learn name params minimize_command problem =
-  get_prover ctxt mode name params minimize_command problem
-  |> maybe_minimize ctxt mode do_learn name params problem
-
-fun run_minimize (params as {provers, ...}) do_learn i refs state =
-  let
-    val ctxt = Proof.context_of state
-    val {goal, facts = chained_ths, ...} = Proof.goal state
-    val reserved = reserved_isar_keyword_table ()
-    val css = clasimpset_rule_table_of ctxt
-    val facts = refs |> maps (map (apsnd single) o fact_of_ref ctxt reserved chained_ths css)
-  in
-    (case subgoal_count state of
-      0 => Output.urgent_message "No subgoal!"
-    | n =>
-      (case provers of
-        [] => error "No prover is set."
-      | prover :: _ =>
-        (kill_provers ();
-         minimize_facts do_learn prover params false i n state goal NONE facts
-         |> (fn (_, (preplay, message, message_tail)) =>
-                Output.urgent_message (message (Lazy.force preplay) ^ message_tail)))))
-  end
+fun get_minimizing_prover ctxt mode do_learn name params problem =
+  get_prover ctxt mode name params problem
+  |> maybe_minimize mode do_learn name params problem
 
 end;
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_prover_smt2.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_prover_smt2.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -179,7 +179,7 @@
 
 fun run_smt2_solver mode name (params as {debug, verbose, isar_proofs, compress, try0, smt_proofs,
       minimize, preplay_timeout, ...})
-    minimize_command ({state, goal, subgoal, subgoal_count, factss, ...} : prover_problem) =
+    ({state, goal, subgoal, subgoal_count, factss, ...} : prover_problem) =
   let
     val thy = Proof.theory_of state
     val ctxt = Proof.context_of state
@@ -189,36 +189,36 @@
     val {outcome, filter_result = {fact_ids, atp_proof, ...}, used_from, run_time} =
       smt2_filter_loop name params state goal subgoal factss
     val used_named_facts = map snd fact_ids
-    val used_facts = map fst used_named_facts
+    val used_facts = sort_wrt fst (map fst used_named_facts)
     val outcome = Option.map failure_of_smt2_failure outcome
 
-    val (preplay, message, message_tail) =
+    val (preferred_methss, message) =
       (case outcome of
         NONE =>
-        (Lazy.lazy (fn () =>
-           play_one_line_proof mode verbose preplay_timeout used_named_facts state subgoal
-             SMT2_Method (bunch_of_proof_methods (smt_proofs <> SOME false) false liftingN)),
-         fn preplay =>
-            let
-              fun isar_params () =
-                (verbose, (NONE, NONE), preplay_timeout, compress, try0, minimize <> SOME false,
-                 atp_proof (), goal)
+        let
+          val preferred_methss =
+            (SMT2_Method, bunches_of_proof_methods try0 (smt_proofs <> SOME false) false liftingN)
+        in
+          (preferred_methss,
+           fn preplay =>
+             let
+               val _ = if verbose then Output.urgent_message "Generating proof text..." else ()
 
-              val one_line_params =
-                (preplay, proof_banner mode name, used_facts,
-                 choose_minimize_command thy params minimize_command name preplay, subgoal,
-                 subgoal_count)
-              val num_chained = length (#facts (Proof.goal state))
-            in
-              proof_text ctxt debug isar_proofs smt_proofs isar_params num_chained one_line_params
-            end,
-         if verbose then "\nSMT solver real CPU time: " ^ string_of_time run_time ^ "." else "")
-      | SOME failure =>
-        (Lazy.value (Metis_Method (NONE, NONE), Play_Failed),
-         fn _ => string_of_atp_failure failure, ""))
+               fun isar_params () =
+                 (verbose, (NONE, NONE), preplay_timeout, compress, try0, minimize, atp_proof (),
+                  goal)
+
+               val one_line_params = (preplay (), proof_banner mode name, subgoal, subgoal_count)
+               val num_chained = length (#facts (Proof.goal state))
+             in
+               proof_text ctxt debug isar_proofs smt_proofs isar_params num_chained
+                 one_line_params
+             end)
+        end
+      | SOME failure => ((Auto_Method (* dummy *), []), fn _ => string_of_atp_failure failure))
   in
-    {outcome = outcome, used_facts = used_facts, used_from = used_from, run_time = run_time,
-     preplay = preplay, message = message, message_tail = message_tail}
+    {outcome = outcome, used_facts = used_facts, used_from = used_from,
+     preferred_methss = preferred_methss, run_time = run_time, message = message}
   end
 
 end;
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_util.ML	Sun Aug 10 14:08:36 2014 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_util.ML	Sun Aug 10 14:34:43 2014 +0200
@@ -101,7 +101,10 @@
           else (false, false)
       in
         if anonymous then
-          app_body (if enter_class then map_inclass_name else map_name) (Future.join body) accum
+          (case Future.peek body of
+            SOME (Exn.Res the_body) =>
+            app_body (if enter_class then map_inclass_name else map_name) the_body accum
+          | NONE => accum)
         else
           (case map_name name of
             SOME name' =>
@@ -144,9 +147,9 @@
   Print_Mode.setmp (filter (curry (op =) Symbol.xsymbolsN) (print_mode_value ())) f x
 
 fun hackish_string_of_term ctxt =
-  with_vanilla_print_mode (Syntax.string_of_term ctxt) #> simplify_spaces
+  with_vanilla_print_mode (Syntax.string_of_term ctxt) #> unyxml #> simplify_spaces
 
-val spying_version = "c"
+val spying_version = "d"
 
 fun spying false _ = ()
   | spying true f =