merged
authorhaftmann
Mon, 02 Mar 2009 08:15:54 +0100
changeset 30192 51e3e0e821c5
parent 30189 3633f560f4c3 (diff)
parent 30191 e3e3d28fe5bc (current diff)
child 30193 391e10b42889
merged
--- a/Admin/CHECKLIST	Mon Mar 02 08:15:32 2009 +0100
+++ b/Admin/CHECKLIST	Mon Mar 02 08:15:54 2009 +0100
@@ -1,7 +1,7 @@
 Checklist for official releases
 ===============================
 
-- test alice, mosml, polyml-5.0, polyml-4.1.3, polyml-4.1.4, polyml-4.2.0, x86-solaris, x86-cygwin;
+- test mosml, polyml-5.0, polyml-4.1.3, polyml-4.1.4, polyml-4.2.0, x86-solaris, x86-cygwin;
 
 - test ProofGeneral;
 
--- a/CONTRIBUTORS	Mon Mar 02 08:15:32 2009 +0100
+++ b/CONTRIBUTORS	Mon Mar 02 08:15:54 2009 +0100
@@ -7,10 +7,16 @@
 Contributions to this Isabelle version
 --------------------------------------
 
-* February 2008: Timothy Bourke, NICTA
+* February 2009: Filip Maric, Univ. of Belgrade
+  A Serbian theory.
+
+* February 2009: Jasmin Christian Blanchette, TUM
+  Misc cleanup of HOL/refute.
+
+* February 2009: Timothy Bourke, NICTA
   New find_consts command.
 
-* February 2008: Timothy Bourke, NICTA
+* February 2009: Timothy Bourke, NICTA
   "solves" criterion for find_theorems and auto_solve option
 
 * December 2008: Clemens Ballarin, TUM
@@ -31,6 +37,9 @@
   processes.  Additional ATP wrappers, including remote SystemOnTPTP
   services.
 
+* September 2008: Stefan Berghofer, TUM and Marc Bezem, Univ. Bergen
+  Prover for coherent logic.
+
 * August 2008: Fabian Immler, TUM
   Vampire wrapper script for remote SystemOnTPTP service.
 
@@ -46,7 +55,7 @@
   HOLCF library improvements.
 
 * 2007/2008: Stefan Berghofer, TUM
-  HOL-Nominal package improvements.  
+  HOL-Nominal package improvements.
 
 * March 2008: Markus Reiter, TUM
   HOL/Library/RBT: red-black trees.
--- a/NEWS	Mon Mar 02 08:15:32 2009 +0100
+++ b/NEWS	Mon Mar 02 08:15:54 2009 +0100
@@ -64,6 +64,8 @@
 * There is a new syntactic category "float_const" for signed decimal
 fractions (e.g. 123.45 or -123.45).
 
+* New prover for coherent logic (see src/Tools/coherent.ML).
+
 
 *** Pure ***
 
@@ -244,7 +246,7 @@
     src/HOL/Library/Order_Relation.thy ~> src/HOL/
     src/HOL/Library/Parity.thy ~> src/HOL/
     src/HOL/Library/Univ_Poly.thy ~> src/HOL/
-    src/HOL/Real/ContNotDenum.thy ~> src/HOL/
+    src/HOL/Real/ContNotDenum.thy ~> src/HOL/Library/
     src/HOL/Real/Lubs.thy ~> src/HOL/
     src/HOL/Real/PReal.thy ~> src/HOL/
     src/HOL/Real/Rational.thy ~> src/HOL/
@@ -254,8 +256,8 @@
     src/HOL/Real/Real.thy ~> src/HOL/
     src/HOL/Complex/Complex_Main.thy ~> src/HOL/
     src/HOL/Complex/Complex.thy ~> src/HOL/
-    src/HOL/Complex/FrechetDeriv.thy ~> src/HOL/
-    src/HOL/Complex/Fundamental_Theorem_Algebra.thy ~> src/HOL/
+    src/HOL/Complex/FrechetDeriv.thy ~> src/HOL/Library/
+    src/HOL/Complex/Fundamental_Theorem_Algebra.thy ~> src/HOL/Library/
     src/HOL/Hyperreal/Deriv.thy ~> src/HOL/
     src/HOL/Hyperreal/Fact.thy ~> src/HOL/
     src/HOL/Hyperreal/Integration.thy ~> src/HOL/
@@ -383,6 +385,7 @@
 nat_mod_mod_trivial    -> mod_mod_trivial
 zdiv_zadd_self1        -> div_add_self1
 zdiv_zadd_self2        -> div_add_self2
+zdiv_zmult_self1       -> div_mult_self2_is_id
 zdiv_zmult_self2       -> div_mult_self1_is_id
 zdvd_triv_left         -> dvd_triv_left
 zdvd_triv_right        -> dvd_triv_right
@@ -418,7 +421,7 @@
 * HOL/Library/GCD: Curried operations gcd, lcm (for nat) and zgcd,
 zlcm (for int); carried together from various gcd/lcm developements in
 the HOL Distribution.  zgcd and zlcm replace former igcd and ilcm;
-corresponding theorems renamed accordingly.  INCOMPATIBILY.  To
+corresponding theorems renamed accordingly.  INCOMPATIBILITY.  To
 recover tupled syntax, use syntax declarations like:
 
     hide (open) const gcd
@@ -432,7 +435,7 @@
 * HOL/Real/Rational: 'Fract k 0' now equals '0'.  INCOMPATIBILITY.
 
 * The real numbers offer decimal input syntax: 12.34 is translated into
-  1234/10^4. This translation is not reversed upon output.
+  1234/10^2. This translation is not reversed upon output.
 
 * New ML antiquotation @{code}: takes constant as argument, generates
 corresponding code in background and inserts name of the corresponding
@@ -532,6 +535,23 @@
 * Proof of Zorn's Lemma for partial orders.
 
 
+*** HOLCF ***
+
+* Reimplemented the simplification procedure for proving continuity
+subgoals.  The new simproc is extensible; users can declare additional
+continuity introduction rules with the attribute [cont2cont].
+
+* The continuity simproc now uses a different introduction rule for
+solving continuity subgoals on terms with lambda abstractions.  In
+some rare cases the new simproc may fail to solve subgoals that the
+old one could solve, and "simp add: cont2cont_LAM" may be necessary.
+Potential INCOMPATIBILITY.
+
+* The syntax of the fixrec package has changed.  The specification
+syntax now conforms in style to definition, primrec, function, etc.
+See HOLCF/ex/Fixrec_ex.thy for examples.  INCOMPATIBILITY.
+
+
 *** ML ***
 
 * High-level support for concurrent ML programming, see
--- a/README_REPOSITORY	Mon Mar 02 08:15:32 2009 +0100
+++ b/README_REPOSITORY	Mon Mar 02 08:15:54 2009 +0100
@@ -32,9 +32,9 @@
 Initial configuration
 ---------------------
 
-Always use Mercurial version 1.0 or later, such as 1.0.1 or 1.0.2.
+Always use Mercurial versions from the 1.0 or 1.1 branch, or later.
 The old 0.9.x versions do not work in a multi-user environment with
-shared file spaces.
+shared file spaces!
 
 
 The official Isabelle repository can be cloned like this:
@@ -62,7 +62,8 @@
 
 In principle, user names can be chosen freely, but for longterm
 committers of the Isabelle repository the obvious choice is to keep
-with the old CVS naming scheme.
+with the old CVS naming scheme.  Others should use their regular "full
+name"; including an email address is optional.
 
 
 There are other useful configuration to go into $HOME/.hgrc,
@@ -135,6 +136,29 @@
   hg clone ssh://wenzelm@atbroy100//home/isabelle-repository/repos/isabelle
 
 
+Simplified merges
+-----------------
+
+The main idea of Mercurial is to let individual users produce
+independent branches of development first, but merge with others
+frequently.  The basic hg merge operation is more general than
+required for the mode of operation with a shared pull/push area.  The
+hg fetch extension accommodates this case nicely, automating trivial
+merges and requiring manual intervention for actual conflicts only.
+
+The fetch extension can be configured via the user's ~/.hgrc like
+this:
+
+  [extensions]
+  hgext.fetch =
+
+  [defaults]
+  fetch = -m "merged"
+
+Note that the potential for merge conflicts can be greatly reduced by
+doing "hg fetch" before any starting local changes!
+
+
 Content discipline
 ------------------
 
@@ -172,7 +196,9 @@
     Mercurial provides nice web presentation of incoming changes with
     a digest of log entries; this also includes RSS/Atom news feeds.
     Users should be aware that others will actually read what is
-    written into log messages.
+    written into log messages.  There are also add-on browsers,
+    notably hgtk that is part of the TortoiseHg distribution and works
+    for generic Python/GTk platforms.
 
     The usual changelog presentation style for the Isabelle repository
     admits log entries that consist of several lines, but without the
@@ -194,6 +220,3 @@
 
 Needless to say, the results from the build process must not be added
 to the repository!
-
-
-Makarius 30-Nov-2008
--- a/doc-src/IsarRef/Makefile	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Makefile	Mon Mar 02 08:15:54 2009 +0100
@@ -1,7 +1,3 @@
-#
-# $Id$
-#
-
 ## targets
 
 default: dvi
--- a/doc-src/IsarRef/Thy/Document_Preparation.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/Document_Preparation.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
 theory Document_Preparation
 imports Main
 begin
--- a/doc-src/IsarRef/Thy/Generic.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/Generic.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
 theory Generic
 imports Main
 begin
--- a/doc-src/IsarRef/Thy/HOLCF_Specific.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/HOLCF_Specific.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
 theory HOLCF_Specific
 imports HOLCF
 begin
--- a/doc-src/IsarRef/Thy/HOL_Specific.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/HOL_Specific.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -771,6 +771,55 @@
 *}
 
 
+section {* Intuitionistic proof search *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) iprover} & : & @{text method} \\
+  \end{matharray}
+
+  \begin{rail}
+    'iprover' ('!' ?) (rulemod *)
+    ;
+  \end{rail}
+
+  The @{method (HOL) iprover} method performs intuitionistic proof
+  search, depending on specifically declared rules from the context,
+  or given as explicit arguments.  Chained facts are inserted into the
+  goal before commencing proof search; ``@{method (HOL) iprover}@{text
+  "!"}''  means to include the current @{fact prems} as well.
+  
+  Rules need to be classified as @{attribute (Pure) intro},
+  @{attribute (Pure) elim}, or @{attribute (Pure) dest}; here the
+  ``@{text "!"}'' indicator refers to ``safe'' rules, which may be
+  applied aggressively (without considering back-tracking later).
+  Rules declared with ``@{text "?"}'' are ignored in proof search (the
+  single-step @{method rule} method still observes these).  An
+  explicit weight annotation may be given as well; otherwise the
+  number of rule premises will be taken into account here.
+*}
+
+
+section {* Coherent Logic *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) "coherent"} & : & @{text method} \\
+  \end{matharray}
+
+  \begin{rail}
+    'coherent' thmrefs?
+    ;
+  \end{rail}
+
+  The @{method (HOL) coherent} method solves problems of
+  \emph{Coherent Logic} \cite{Bezem-Coquand:2005}, which covers
+  applications in confluence theory, lattice theory and projective
+  geometry.  See @{"file" "~~/src/HOL/ex/Coherent.thy"} for some
+  examples.
+*}
+
+
 section {* Invoking automated reasoning tools -- The Sledgehammer *}
 
 text {*
--- a/doc-src/IsarRef/Thy/ML_Tactic.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/ML_Tactic.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
 theory ML_Tactic
 imports Main
 begin
--- a/doc-src/IsarRef/Thy/Misc.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/Misc.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
 theory Misc
 imports Main
 begin
--- a/doc-src/IsarRef/Thy/Outer_Syntax.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/Outer_Syntax.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
 theory Outer_Syntax
 imports Main
 begin
--- a/doc-src/IsarRef/Thy/Proof.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/Proof.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -683,7 +683,6 @@
     @{method_def "assumption"} & : & @{text method} \\
     @{method_def "this"} & : & @{text method} \\
     @{method_def "rule"} & : & @{text method} \\
-    @{method_def "iprover"} & : & @{text method} \\[0.5ex]
     @{attribute_def (Pure) "intro"} & : & @{text attribute} \\
     @{attribute_def (Pure) "elim"} & : & @{text attribute} \\
     @{attribute_def (Pure) "dest"} & : & @{text attribute} \\
@@ -698,8 +697,6 @@
     ;
     'rule' thmrefs?
     ;
-    'iprover' ('!' ?) (rulemod *)
-    ;
     rulemod: ('intro' | 'elim' | 'dest') ((('!' | () | '?') nat?) | 'del') ':' thmrefs
     ;
     ('intro' | 'elim' | 'dest') ('!' | () | '?') nat?
@@ -758,27 +755,11 @@
   default behavior of @{command "proof"} and ``@{command ".."}'' 
   (double-dot) steps (see \secref{sec:proof-steps}).
   
-  \item @{method iprover} performs intuitionistic proof search,
-  depending on specifically declared rules from the context, or given
-  as explicit arguments.  Chained facts are inserted into the goal
-  before commencing proof search; ``@{method iprover}@{text "!"}''
-  means to include the current @{fact prems} as well.
-  
-  Rules need to be classified as @{attribute (Pure) intro},
-  @{attribute (Pure) elim}, or @{attribute (Pure) dest}; here the
-  ``@{text "!"}'' indicator refers to ``safe'' rules, which may be
-  applied aggressively (without considering back-tracking later).
-  Rules declared with ``@{text "?"}'' are ignored in proof search (the
-  single-step @{method rule} method still observes these).  An
-  explicit weight annotation may be given as well; otherwise the
-  number of rule premises will be taken into account here.
-  
   \item @{attribute (Pure) intro}, @{attribute (Pure) elim}, and
   @{attribute (Pure) dest} declare introduction, elimination, and
-  destruct rules, to be used with the @{method rule} and @{method
-  iprover} methods.  Note that the latter will ignore rules declared
-  with ``@{text "?"}'', while ``@{text "!"}''  are used most
-  aggressively.
+  destruct rules, to be used with method @{method rule}, and similar
+  tools.  Note that the latter will ignore rules declared with
+  ``@{text "?"}'', while ``@{text "!"}''  are used most aggressively.
   
   The classical reasoner (see \secref{sec:classical}) introduces its
   own variants of these attributes; use qualified names to access the
--- a/doc-src/IsarRef/Thy/Quick_Reference.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/Quick_Reference.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
 theory Quick_Reference
 imports Main
 begin
--- a/doc-src/IsarRef/Thy/ROOT-HOLCF.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/ROOT-HOLCF.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,6 +1,3 @@
-
-(* $Id$ *)
-
 set ThyOutput.source;
 use "../../antiquote_setup.ML";
 
--- a/doc-src/IsarRef/Thy/ROOT-ZF.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/ROOT-ZF.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,6 +1,3 @@
-
-(* $Id$ *)
-
 set ThyOutput.source;
 use "../../antiquote_setup.ML";
 
--- a/doc-src/IsarRef/Thy/ROOT.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/ROOT.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -2,17 +2,19 @@
 set ThyOutput.source;
 use "../../antiquote_setup.ML";
 
-use_thy "Introduction";
-use_thy "Framework";
-use_thy "First_Order_Logic";
-use_thy "Outer_Syntax";
-use_thy "Document_Preparation";
-use_thy "Spec";
-use_thy "Proof";
-use_thy "Inner_Syntax";
-use_thy "Misc";
-use_thy "Generic";
-use_thy "HOL_Specific";
-use_thy "Quick_Reference";
-use_thy "Symbols";
-use_thy "ML_Tactic";
+use_thys [
+  "Introduction",
+  "Framework",
+  "First_Order_Logic",
+  "Outer_Syntax",
+  "Document_Preparation",
+  "Spec",
+  "Proof",
+  "Inner_Syntax",
+  "Misc",
+  "Generic",
+  "HOL_Specific",
+  "Quick_Reference",
+  "Symbols",
+  "ML_Tactic"
+];
--- a/doc-src/IsarRef/Thy/Symbols.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/Symbols.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
 theory Symbols
 imports Pure
 begin
--- a/doc-src/IsarRef/Thy/ZF_Specific.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/ZF_Specific.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
 theory ZF_Specific
 imports Main
 begin
--- a/doc-src/IsarRef/Thy/document/Document_Preparation.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Document_Preparation.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -3,8 +3,6 @@
 \def\isabellecontext{Document{\isacharunderscore}Preparation}%
 %
 \isadelimtheory
-\isanewline
-\isanewline
 %
 \endisadelimtheory
 %
--- a/doc-src/IsarRef/Thy/document/Generic.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Generic.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -3,8 +3,6 @@
 \def\isabellecontext{Generic}%
 %
 \isadelimtheory
-\isanewline
-\isanewline
 %
 \endisadelimtheory
 %
--- a/doc-src/IsarRef/Thy/document/HOLCF_Specific.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/HOLCF_Specific.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -3,8 +3,6 @@
 \def\isabellecontext{HOLCF{\isacharunderscore}Specific}%
 %
 \isadelimtheory
-\isanewline
-\isanewline
 %
 \endisadelimtheory
 %
--- a/doc-src/IsarRef/Thy/document/HOL_Specific.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/HOL_Specific.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -779,6 +779,58 @@
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
+\isamarkupsection{Intuitionistic proof search%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+\begin{matharray}{rcl}
+    \indexdef{HOL}{method}{iprover}\hypertarget{method.HOL.iprover}{\hyperlink{method.HOL.iprover}{\mbox{\isa{iprover}}}} & : & \isa{method} \\
+  \end{matharray}
+
+  \begin{rail}
+    'iprover' ('!' ?) (rulemod *)
+    ;
+  \end{rail}
+
+  The \hyperlink{method.HOL.iprover}{\mbox{\isa{iprover}}} method performs intuitionistic proof
+  search, depending on specifically declared rules from the context,
+  or given as explicit arguments.  Chained facts are inserted into the
+  goal before commencing proof search; ``\hyperlink{method.HOL.iprover}{\mbox{\isa{iprover}}}\isa{{\isachardoublequote}{\isacharbang}{\isachardoublequote}}''  means to include the current \hyperlink{fact.prems}{\mbox{\isa{prems}}} as well.
+  
+  Rules need to be classified as \hyperlink{attribute.Pure.intro}{\mbox{\isa{intro}}},
+  \hyperlink{attribute.Pure.elim}{\mbox{\isa{elim}}}, or \hyperlink{attribute.Pure.dest}{\mbox{\isa{dest}}}; here the
+  ``\isa{{\isachardoublequote}{\isacharbang}{\isachardoublequote}}'' indicator refers to ``safe'' rules, which may be
+  applied aggressively (without considering back-tracking later).
+  Rules declared with ``\isa{{\isachardoublequote}{\isacharquery}{\isachardoublequote}}'' are ignored in proof search (the
+  single-step \hyperlink{method.rule}{\mbox{\isa{rule}}} method still observes these).  An
+  explicit weight annotation may be given as well; otherwise the
+  number of rule premises will be taken into account here.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsection{Coherent Logic%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+\begin{matharray}{rcl}
+    \indexdef{HOL}{method}{coherent}\hypertarget{method.HOL.coherent}{\hyperlink{method.HOL.coherent}{\mbox{\isa{coherent}}}} & : & \isa{method} \\
+  \end{matharray}
+
+  \begin{rail}
+    'coherent' thmrefs?
+    ;
+  \end{rail}
+
+  The \hyperlink{method.HOL.coherent}{\mbox{\isa{coherent}}} method solves problems of
+  \emph{Coherent Logic} \cite{Bezem-Coquand:2005}, which covers
+  applications in confluence theory, lattice theory and projective
+  geometry.  See \hyperlink{file.~~/src/HOL/ex/Coherent.thy}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}HOL{\isacharslash}ex{\isacharslash}Coherent{\isachardot}thy}}}} for some
+  examples.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
 \isamarkupsection{Invoking automated reasoning tools -- The Sledgehammer%
 }
 \isamarkuptrue%
--- a/doc-src/IsarRef/Thy/document/ML_Tactic.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/ML_Tactic.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -3,8 +3,6 @@
 \def\isabellecontext{ML{\isacharunderscore}Tactic}%
 %
 \isadelimtheory
-\isanewline
-\isanewline
 %
 \endisadelimtheory
 %
--- a/doc-src/IsarRef/Thy/document/Misc.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Misc.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -3,8 +3,6 @@
 \def\isabellecontext{Misc}%
 %
 \isadelimtheory
-\isanewline
-\isanewline
 %
 \endisadelimtheory
 %
--- a/doc-src/IsarRef/Thy/document/Outer_Syntax.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Outer_Syntax.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -3,8 +3,6 @@
 \def\isabellecontext{Outer{\isacharunderscore}Syntax}%
 %
 \isadelimtheory
-\isanewline
-\isanewline
 %
 \endisadelimtheory
 %
--- a/doc-src/IsarRef/Thy/document/Proof.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Proof.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -693,7 +693,6 @@
     \indexdef{}{method}{assumption}\hypertarget{method.assumption}{\hyperlink{method.assumption}{\mbox{\isa{assumption}}}} & : & \isa{method} \\
     \indexdef{}{method}{this}\hypertarget{method.this}{\hyperlink{method.this}{\mbox{\isa{this}}}} & : & \isa{method} \\
     \indexdef{}{method}{rule}\hypertarget{method.rule}{\hyperlink{method.rule}{\mbox{\isa{rule}}}} & : & \isa{method} \\
-    \indexdef{}{method}{iprover}\hypertarget{method.iprover}{\hyperlink{method.iprover}{\mbox{\isa{iprover}}}} & : & \isa{method} \\[0.5ex]
     \indexdef{Pure}{attribute}{intro}\hypertarget{attribute.Pure.intro}{\hyperlink{attribute.Pure.intro}{\mbox{\isa{intro}}}} & : & \isa{attribute} \\
     \indexdef{Pure}{attribute}{elim}\hypertarget{attribute.Pure.elim}{\hyperlink{attribute.Pure.elim}{\mbox{\isa{elim}}}} & : & \isa{attribute} \\
     \indexdef{Pure}{attribute}{dest}\hypertarget{attribute.Pure.dest}{\hyperlink{attribute.Pure.dest}{\mbox{\isa{dest}}}} & : & \isa{attribute} \\
@@ -708,8 +707,6 @@
     ;
     'rule' thmrefs?
     ;
-    'iprover' ('!' ?) (rulemod *)
-    ;
     rulemod: ('intro' | 'elim' | 'dest') ((('!' | () | '?') nat?) | 'del') ':' thmrefs
     ;
     ('intro' | 'elim' | 'dest') ('!' | () | '?') nat?
@@ -764,26 +761,11 @@
   default behavior of \hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}} and ``\hyperlink{command.ddot}{\mbox{\isa{\isacommand{{\isachardot}{\isachardot}}}}}'' 
   (double-dot) steps (see \secref{sec:proof-steps}).
   
-  \item \hyperlink{method.iprover}{\mbox{\isa{iprover}}} performs intuitionistic proof search,
-  depending on specifically declared rules from the context, or given
-  as explicit arguments.  Chained facts are inserted into the goal
-  before commencing proof search; ``\hyperlink{method.iprover}{\mbox{\isa{iprover}}}\isa{{\isachardoublequote}{\isacharbang}{\isachardoublequote}}''
-  means to include the current \hyperlink{fact.prems}{\mbox{\isa{prems}}} as well.
-  
-  Rules need to be classified as \hyperlink{attribute.Pure.intro}{\mbox{\isa{intro}}},
-  \hyperlink{attribute.Pure.elim}{\mbox{\isa{elim}}}, or \hyperlink{attribute.Pure.dest}{\mbox{\isa{dest}}}; here the
-  ``\isa{{\isachardoublequote}{\isacharbang}{\isachardoublequote}}'' indicator refers to ``safe'' rules, which may be
-  applied aggressively (without considering back-tracking later).
-  Rules declared with ``\isa{{\isachardoublequote}{\isacharquery}{\isachardoublequote}}'' are ignored in proof search (the
-  single-step \hyperlink{method.rule}{\mbox{\isa{rule}}} method still observes these).  An
-  explicit weight annotation may be given as well; otherwise the
-  number of rule premises will be taken into account here.
-  
   \item \hyperlink{attribute.Pure.intro}{\mbox{\isa{intro}}}, \hyperlink{attribute.Pure.elim}{\mbox{\isa{elim}}}, and
   \hyperlink{attribute.Pure.dest}{\mbox{\isa{dest}}} declare introduction, elimination, and
-  destruct rules, to be used with the \hyperlink{method.rule}{\mbox{\isa{rule}}} and \hyperlink{method.iprover}{\mbox{\isa{iprover}}} methods.  Note that the latter will ignore rules declared
-  with ``\isa{{\isachardoublequote}{\isacharquery}{\isachardoublequote}}'', while ``\isa{{\isachardoublequote}{\isacharbang}{\isachardoublequote}}''  are used most
-  aggressively.
+  destruct rules, to be used with method \hyperlink{method.rule}{\mbox{\isa{rule}}}, and similar
+  tools.  Note that the latter will ignore rules declared with
+  ``\isa{{\isachardoublequote}{\isacharquery}{\isachardoublequote}}'', while ``\isa{{\isachardoublequote}{\isacharbang}{\isachardoublequote}}''  are used most aggressively.
   
   The classical reasoner (see \secref{sec:classical}) introduces its
   own variants of these attributes; use qualified names to access the
--- a/doc-src/IsarRef/Thy/document/Quick_Reference.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Quick_Reference.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -3,8 +3,6 @@
 \def\isabellecontext{Quick{\isacharunderscore}Reference}%
 %
 \isadelimtheory
-\isanewline
-\isanewline
 %
 \endisadelimtheory
 %
--- a/doc-src/IsarRef/Thy/document/Symbols.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Symbols.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -3,8 +3,6 @@
 \def\isabellecontext{Symbols}%
 %
 \isadelimtheory
-\isanewline
-\isanewline
 %
 \endisadelimtheory
 %
--- a/doc-src/IsarRef/Thy/document/ZF_Specific.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/ZF_Specific.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -3,8 +3,6 @@
 \def\isabellecontext{ZF{\isacharunderscore}Specific}%
 %
 \isadelimtheory
-\isanewline
-\isanewline
 %
 \endisadelimtheory
 %
--- a/doc-src/IsarRef/isar-ref.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/IsarRef/isar-ref.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -24,12 +24,13 @@
   With Contributions by
   Clemens Ballarin,
   Stefan Berghofer, \\
+  Timothy Bourke
   Lucas Dixon,
-  Florian Haftmann,
-  Gerwin Klein, \\
+  Florian Haftmann, \\
+  Gerwin Klein,
   Alexander Krauss,
-  Tobias Nipkow,
-  David von Oheimb, \\
+  Tobias Nipkow, \\
+  David von Oheimb,
   Larry Paulson,
   and Sebastian Skalberg
 }
--- a/doc-src/Ref/Makefile	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/Ref/Makefile	Mon Mar 02 08:15:54 2009 +0100
@@ -1,6 +1,3 @@
-#
-# $Id$
-#
 
 ## targets
 
@@ -12,16 +9,15 @@
 include ../Makefile.in
 
 NAME = ref
-FILES = ref.tex introduction.tex goals.tex tactic.tex tctical.tex \
-	thm.tex theories.tex defining.tex syntax.tex substitution.tex \
-	simplifier.tex classical.tex theory-syntax.tex \
-	../rail.sty ../proof.sty ../iman.sty ../extra.sty ../ttbox.sty ../manual.bib
+FILES = ref.tex introduction.tex tactic.tex tctical.tex thm.tex	\
+	theories.tex defining.tex syntax.tex substitution.tex	\
+	simplifier.tex classical.tex ../proof.sty ../iman.sty	\
+	../extra.sty ../ttbox.sty ../manual.bib
 
 dvi: $(NAME).dvi
 
 $(NAME).dvi: $(FILES) isabelle.eps
 	$(LATEX) $(NAME)
-	$(RAIL) $(NAME)
 	$(BIBTEX) $(NAME)
 	$(LATEX) $(NAME)
 	$(LATEX) $(NAME)
@@ -32,7 +28,6 @@
 
 $(NAME).pdf: $(FILES) isabelle.pdf
 	$(PDFLATEX) $(NAME)
-	$(RAIL) $(NAME)
 	$(BIBTEX) $(NAME)
 	$(PDFLATEX) $(NAME)
 	$(PDFLATEX) $(NAME)
--- a/doc-src/Ref/classical.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/Ref/classical.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+
 \chapter{The Classical Reasoner}\label{chap:classical}
 \index{classical reasoner|(}
 \newcommand\ainfer[2]{\begin{array}{r@{\,}l}#2\\ \hline#1\end{array}}
@@ -28,29 +28,6 @@
 be traced, and their components can be called directly; in this manner,
 any proof can be viewed interactively.
 
-The simplest way to apply the classical reasoner (to subgoal~$i$) is to type
-\begin{ttbox}
-by (Blast_tac \(i\));
-\end{ttbox}
-This command quickly proves most simple formulas of the predicate calculus or
-set theory.  To attempt to prove subgoals using a combination of
-rewriting and classical reasoning, try
-\begin{ttbox}
-auto();                         \emph{\textrm{applies to all subgoals}}
-force i;                        \emph{\textrm{applies to one subgoal}}
-\end{ttbox}
-To do all obvious logical steps, even if they do not prove the
-subgoal, type one of the following:
-\begin{ttbox}
-by Safe_tac;                   \emph{\textrm{applies to all subgoals}}
-by (Clarify_tac \(i\));            \emph{\textrm{applies to one subgoal}}
-\end{ttbox}
-
-
-You need to know how the classical reasoner works in order to use it
-effectively.  There are many tactics to choose from, including 
-{\tt Fast_tac} and \texttt{Best_tac}.
-
 We shall first discuss the underlying principles, then present the classical
 reasoner.  Finally, we shall see how to instantiate it for new logics.  The
 logics FOL, ZF, HOL and HOLCF have it already installed.
--- a/doc-src/Ref/defining.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/Ref/defining.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -1,376 +1,5 @@
-%% $Id$
+
 \chapter{Defining Logics} \label{Defining-Logics}
-This chapter explains how to define new formal systems --- in particular,
-their concrete syntax.  While Isabelle can be regarded as a theorem prover
-for set theory, higher-order logic or the sequent calculus, its
-distinguishing feature is support for the definition of new logics.
-
-Isabelle logics are hierarchies of theories, which are described and
-illustrated in
-\iflabelundefined{sec:defining-theories}{{\em Introduction to Isabelle}}%
-{\S\ref{sec:defining-theories}}.  That material, together with the theory
-files provided in the examples directories, should suffice for all simple
-applications.  The easiest way to define a new theory is by modifying a
-copy of an existing theory.
-
-This chapter documents the meta-logic syntax, mixfix declarations and
-pretty printing.  The extended examples in \S\ref{sec:min_logics}
-demonstrate the logical aspects of the definition of theories.
-
-
-\section{Priority grammars} \label{sec:priority_grammars}
-\index{priority grammars|(}
-
-A context-free grammar contains a set of {\bf nonterminal symbols}, a set of
-{\bf terminal symbols} and a set of {\bf productions}\index{productions}.
-Productions have the form ${A=\gamma}$, where $A$ is a nonterminal and
-$\gamma$ is a string of terminals and nonterminals.  One designated
-nonterminal is called the {\bf start symbol}.  The language defined by the
-grammar consists of all strings of terminals that can be derived from the
-start symbol by applying productions as rewrite rules.
-
-The syntax of an Isabelle logic is specified by a {\bf priority
-  grammar}.\index{priorities} Each nonterminal is decorated by an integer
-priority, as in~$A^{(p)}$.  A nonterminal $A^{(p)}$ in a derivation may be
-rewritten using a production $A^{(q)} = \gamma$ only if~$p \leq q$.  Any
-priority grammar can be translated into a normal context free grammar by
-introducing new nonterminals and productions.
-
-Formally, a set of context free productions $G$ induces a derivation
-relation $\longrightarrow@G$.  Let $\alpha$ and $\beta$ denote strings of
-terminal or nonterminal symbols.  Then
-\[ \alpha\, A^{(p)}\, \beta ~\longrightarrow@G~ \alpha\,\gamma\,\beta \]
-if and only if $G$ contains some production $A^{(q)}=\gamma$ for~$p \leq q$.
-
-The following simple grammar for arithmetic expressions demonstrates how
-binding power and associativity of operators can be enforced by priorities.
-\begin{center}
-\begin{tabular}{rclr}
-  $A^{(9)}$ & = & {\tt0} \\
-  $A^{(9)}$ & = & {\tt(} $A^{(0)}$ {\tt)} \\
-  $A^{(0)}$ & = & $A^{(0)}$ {\tt+} $A^{(1)}$ \\
-  $A^{(2)}$ & = & $A^{(3)}$ {\tt*} $A^{(2)}$ \\
-  $A^{(3)}$ & = & {\tt-} $A^{(3)}$
-\end{tabular}
-\end{center}
-The choice of priorities determines that {\tt -} binds tighter than {\tt *},
-which binds tighter than {\tt +}.  Furthermore {\tt +} associates to the
-left and {\tt *} to the right.
-
-For clarity, grammars obey these conventions:
-\begin{itemize}
-\item All priorities must lie between~0 and \ttindex{max_pri}, which is a
-  some fixed integer.  Sometimes {\tt max_pri} is written as $\infty$.
-\item Priority 0 on the right-hand side and priority \ttindex{max_pri} on
-  the left-hand side may be omitted.
-\item The production $A^{(p)} = \alpha$ is written as $A = \alpha~(p)$; the
-  priority of the left-hand side actually appears in a column on the far
-  right.
-\item Alternatives are separated by~$|$.
-\item Repetition is indicated by dots~(\dots) in an informal but obvious
-  way.
-\end{itemize}
-
-Using these conventions and assuming $\infty=9$, the grammar
-takes the form
-\begin{center}
-\begin{tabular}{rclc}
-$A$ & = & {\tt0} & \hspace*{4em} \\
- & $|$ & {\tt(} $A$ {\tt)} \\
- & $|$ & $A$ {\tt+} $A^{(1)}$ & (0) \\
- & $|$ & $A^{(3)}$ {\tt*} $A^{(2)}$ & (2) \\
- & $|$ & {\tt-} $A^{(3)}$ & (3)
-\end{tabular}
-\end{center}
-\index{priority grammars|)}
-
-
-\begin{figure}\small
-\begin{center}
-\begin{tabular}{rclc}
-$any$ &=& $prop$ ~~$|$~~ $logic$ \\\\
-$prop$ &=& {\tt(} $prop$ {\tt)} \\
-     &$|$& $prop^{(4)}$ {\tt::} $type$ & (3) \\
-     &$|$& {\tt PROP} $aprop$ \\
-     &$|$& $any^{(3)}$ {\tt ==} $any^{(2)}$ & (2) \\
-     &$|$& $any^{(3)}$ {\tt =?=} $any^{(2)}$ & (2) \\
-     &$|$& $prop^{(2)}$ {\tt ==>} $prop^{(1)}$ & (1) \\
-     &$|$& {\tt[|} $prop$ {\tt;} \dots {\tt;} $prop$ {\tt|]} {\tt==>} $prop^{(1)}$ & (1) \\
-     &$|$& {\tt!!} $idts$ {\tt.} $prop$ & (0) \\
-     &$|$& {\tt OFCLASS} {\tt(} $type$ {\tt,} $logic$ {\tt)} \\\\
-$aprop$ &=& $id$ ~~$|$~~ $longid$ ~~$|$~~ $var$
-    ~~$|$~~ $logic^{(\infty)}$ {\tt(} $any$ {\tt,} \dots {\tt,} $any$ {\tt)} \\\\
-$logic$ &=& {\tt(} $logic$ {\tt)} \\
-      &$|$& $logic^{(4)}$ {\tt::} $type$ & (3) \\
-      &$|$& $id$ ~~$|$~~ $longid$ ~~$|$~~ $var$
-    ~~$|$~~ $logic^{(\infty)}$ {\tt(} $any$ {\tt,} \dots {\tt,} $any$ {\tt)} \\
-      &$|$& {\tt \%} $pttrns$ {\tt.} $any^{(3)}$ & (3) \\
-      &$|$& {\tt TYPE} {\tt(} $type$ {\tt)} \\\\
-$idts$ &=& $idt$ ~~$|$~~ $idt^{(1)}$ $idts$ \\\\
-$idt$ &=& $id$ ~~$|$~~ {\tt(} $idt$ {\tt)} \\
-    &$|$& $id$ {\tt ::} $type$ & (0) \\\\
-$pttrns$ &=& $pttrn$ ~~$|$~~ $pttrn^{(1)}$ $pttrns$ \\\\
-$pttrn$ &=& $idt$ \\\\
-$type$ &=& {\tt(} $type$ {\tt)} \\
-     &$|$& $tid$ ~~$|$~~ $tvar$ ~~$|$~~ $tid$ {\tt::} $sort$
-       ~~$|$~~ $tvar$ {\tt::} $sort$ \\
-     &$|$& $id$ ~~$|$~~ $type^{(\infty)}$ $id$
-                ~~$|$~~ {\tt(} $type$ {\tt,} \dots {\tt,} $type$ {\tt)} $id$ \\
-     &$|$& $longid$ ~~$|$~~ $type^{(\infty)}$ $longid$
-                ~~$|$~~ {\tt(} $type$ {\tt,} \dots {\tt,} $type$ {\tt)} $longid$ \\
-     &$|$& $type^{(1)}$ {\tt =>} $type$ & (0) \\
-     &$|$& {\tt[}  $type$ {\tt,} \dots {\tt,} $type$ {\tt]} {\tt=>} $type$&(0) \\\\
-$sort$ &=& $id$ ~~$|$~~ $longid$ ~~$|$~~ {\tt\ttlbrace\ttrbrace} ~~$|$~~
-  {\tt\ttlbrace} $id$ ~$|$~ $longid${\tt,}\dots{\tt,} $id$ ~$|$~$longid$ {\tt\ttrbrace}
-\end{tabular}
-\index{*PROP symbol}
-\index{*== symbol}\index{*=?= symbol}\index{*==> symbol}
-\index{*:: symbol}\index{*=> symbol}
-\index{sort constraints}
-%the index command: a percent is permitted, but braces must match!
-\index{%@{\tt\%} symbol}
-\index{{}@{\tt\ttlbrace} symbol}\index{{}@{\tt\ttrbrace} symbol}
-\index{*[ symbol}\index{*] symbol}
-\index{*"!"! symbol}
-\index{*"["| symbol}
-\index{*"|"] symbol}
-\end{center}
-\caption{Meta-logic syntax}\label{fig:pure_gram}
-\end{figure}
-
-
-\section{The Pure syntax} \label{sec:basic_syntax}
-\index{syntax!Pure|(}
-
-At the root of all object-logics lies the theory \thydx{Pure}.  It
-contains, among many other things, the Pure syntax.  An informal account of
-this basic syntax (types, terms and formulae) appears in
-\iflabelundefined{sec:forward}{{\em Introduction to Isabelle}}%
-{\S\ref{sec:forward}}.  A more precise description using a priority grammar
-appears in Fig.\ts\ref{fig:pure_gram}.  It defines the following
-nonterminals:
-\begin{ttdescription}
-  \item[\ndxbold{any}] denotes any term.
-
-  \item[\ndxbold{prop}] denotes terms of type {\tt prop}.  These are formulae
-    of the meta-logic.  Note that user constants of result type {\tt prop}
-    (i.e.\ $c :: \ldots \To prop$) should always provide concrete syntax.
-    Otherwise atomic propositions with head $c$ may be printed incorrectly.
-
-  \item[\ndxbold{aprop}] denotes atomic propositions.
-
-%% FIXME huh!?
-%  These typically
-%  include the judgement forms of the object-logic; its definition
-%  introduces a meta-level predicate for each judgement form.
-
-  \item[\ndxbold{logic}] denotes terms whose type belongs to class
-    \cldx{logic}, excluding type \tydx{prop}.
-
-  \item[\ndxbold{idts}] denotes a list of identifiers, possibly constrained
-    by types.
-    
-  \item[\ndxbold{pttrn}, \ndxbold{pttrns}] denote patterns for
-    abstraction, cases etc.  Initially the same as $idt$ and $idts$,
-    these are intended to be augmented by user extensions.
-
-  \item[\ndxbold{type}] denotes types of the meta-logic.
-
-  \item[\ndxbold{sort}] denotes meta-level sorts.
-\end{ttdescription}
-
-\begin{warn}
-  In {\tt idts}, note that \verb|x::nat y| is parsed as \verb|x::(nat y)|,
-  treating {\tt y} like a type constructor applied to {\tt nat}.  The
-  likely result is an error message.  To avoid this interpretation, use
-  parentheses and write \verb|(x::nat) y|.
-  \index{type constraints}\index{*:: symbol}
-
-  Similarly, \verb|x::nat y::nat| is parsed as \verb|x::(nat y::nat)| and
-  yields an error.  The correct form is \verb|(x::nat) (y::nat)|.
-\end{warn}
-
-\begin{warn}
-  Type constraints bind very weakly.  For example, \verb!x<y::nat! is normally
-  parsed as \verb!(x<y)::nat!, unless \verb$<$ has priority of 3 or less, in
-  which case the string is likely to be ambiguous.  The correct form is
-  \verb!x<(y::nat)!.
-\end{warn}
-
-\subsection{Logical types and default syntax}\label{logical-types}
-\index{lambda calc@$\lambda$-calculus}
-
-Isabelle's representation of mathematical languages is based on the
-simply typed $\lambda$-calculus.  All logical types, namely those of
-class \cldx{logic}, are automatically equipped with a basic syntax of
-types, identifiers, variables, parentheses, $\lambda$-abstraction and
-application.
-\begin{warn}
-  Isabelle combines the syntaxes for all types of class \cldx{logic} by
-  mapping all those types to the single nonterminal $logic$.  Thus all
-  productions of $logic$, in particular $id$, $var$ etc, become available.
-\end{warn}
-
-
-\subsection{Lexical matters}
-The parser does not process input strings directly.  It operates on token
-lists provided by Isabelle's \bfindex{lexer}.  There are two kinds of
-tokens: \bfindex{delimiters} and \bfindex{name tokens}.
-
-\index{reserved words}
-Delimiters can be regarded as reserved words of the syntax.  You can
-add new ones when extending theories.  In Fig.\ts\ref{fig:pure_gram} they
-appear in typewriter font, for example {\tt ==}, {\tt =?=} and
-{\tt PROP}\@.
-
-Name tokens have a predefined syntax.  The lexer distinguishes six disjoint
-classes of names: \rmindex{identifiers}, \rmindex{unknowns}, type
-identifiers\index{type identifiers}, type unknowns\index{type unknowns},
-\rmindex{numerals}, \rmindex{strings}.  They are denoted by \ndxbold{id},
-\ndxbold{var}, \ndxbold{tid}, \ndxbold{tvar}, \ndxbold{num}, \ndxbold{xnum},
-\ndxbold{xstr}, respectively.  Typical examples are {\tt x}, {\tt ?x7}, {\tt
-  'a}, {\tt ?'a3}, {\tt \#42}, {\tt ''foo bar''}.  Here is the precise syntax:
-\begin{eqnarray*}
-id        & =   & letter\,quasiletter^* \\
-longid    & =   & id (\mbox{\tt .}id)^+ \\
-var       & =   & \mbox{\tt ?}id ~~|~~ \mbox{\tt ?}id\mbox{\tt .}nat \\
-tid       & =   & \mbox{\tt '}id \\
-tvar      & =   & \mbox{\tt ?}tid ~~|~~
-                  \mbox{\tt ?}tid\mbox{\tt .}nat \\
-num       & =   & nat ~~|~~ \mbox{\tt-}nat ~~|~~ \verb,0x,\,hex^+ ~~|~~ \verb,0b,\,bin^+ \\
-xnum      & =   & \mbox{\tt \#}num \\
-xstr      & =   & \mbox{\tt ''~\dots~\tt ''} \\[1ex]
-letter & = & latin ~|~ \verb,\<,latin\verb,>, ~|~ \verb,\<,latin\,latin\verb,>, ~|~ greek ~| \\
-      &   & \verb,\<^isub>, ~|~ \verb,\<^isup>, \\
-quasiletter & = & letter ~|~ digit ~|~ \verb,_, ~|~ \verb,', \\
-latin & = & \verb,a, ~|~ \dots ~|~ \verb,z, ~|~ \verb,A, ~|~ \dots ~|~ \verb,Z, \\
-digit & = & \verb,0, ~|~ \dots ~|~ \verb,9, \\
-nat & = & digit^+ \\
-bin & = & \verb,0, ~|~ \verb,1, \\
-hex & = & digit  ~|~  \verb,a, ~|~ \dots ~|~ \verb,f, ~|~ \verb,A, ~|~ \dots ~|~ \verb,F, \\
-greek & = & \verb,\<alpha>, ~|~ \verb,\<beta>, ~|~ \verb,\<gamma>, ~|~ \verb,\<delta>, ~| \\
-      &   & \verb,\<epsilon>, ~|~ \verb,\<zeta>, ~|~ \verb,\<eta>, ~|~ \verb,\<theta>, ~| \\
-      &   & \verb,\<iota>, ~|~ \verb,\<kappa>, ~|~ \verb,\<mu>, ~|~ \verb,\<nu>, ~| \\
-      &   & \verb,\<xi>, ~|~ \verb,\<pi>, ~|~ \verb,\<rho>, ~|~ \verb,\<sigma>, ~| \\
-      &   & \verb,\<tau>, ~|~ \verb,\<upsilon>, ~|~ \verb,\<phi>, ~|~ \verb,\<psi>, ~| \\
-      &   & \verb,\<omega>, ~|~ \verb,\<Gamma>, ~|~ \verb,\<Delta>, ~|~ \verb,\<Theta>, ~| \\
-      &   & \verb,\<Lambda>, ~|~ \verb,\<Xi>, ~|~ \verb,\<Pi>, ~|~ \verb,\<Sigma>, ~| \\
-      &   & \verb,\<Upsilon>, ~|~ \verb,\<Phi>, ~|~ \verb,\<Psi>, ~|~ \verb,\<Omega>, \\
-\end{eqnarray*}
-The lexer repeatedly takes the longest prefix of the input string that
-forms a valid token.  A maximal prefix that is both a delimiter and a
-name is treated as a delimiter.  Spaces, tabs, newlines and formfeeds
-are separators; they never occur within tokens, except those of class
-$xstr$.
-
-\medskip
-Delimiters need not be separated by white space.  For example, if {\tt -}
-is a delimiter but {\tt --} is not, then the string {\tt --} is treated as
-two consecutive occurrences of the token~{\tt -}.  In contrast, \ML\
-treats {\tt --} as a single symbolic name.  The consequence of Isabelle's
-more liberal scheme is that the same string may be parsed in different ways
-after extending the syntax: after adding {\tt --} as a delimiter, the input
-{\tt --} is treated as a single token.
-
-A \ndxbold{var} or \ndxbold{tvar} describes an unknown, which is internally
-a pair of base name and index (\ML\ type \mltydx{indexname}).  These
-components are either separated by a dot as in {\tt ?x.1} or {\tt ?x7.3} or
-run together as in {\tt ?x1}.  The latter form is possible if the base name
-does not end with digits.  If the index is 0, it may be dropped altogether:
-{\tt ?x} abbreviates both {\tt ?x0} and {\tt ?x.0}.
-
-Tokens of class $num$, $xnum$ or $xstr$ are not used by the meta-logic.
-Object-logics may provide numerals and string constants by adding appropriate
-productions and translation functions.
-
-\medskip
-Although name tokens are returned from the lexer rather than the parser, it
-is more logical to regard them as nonterminals.  Delimiters, however, are
-terminals; they are just syntactic sugar and contribute nothing to the
-abstract syntax tree.
-
-
-\subsection{*Inspecting the syntax} \label{pg:print_syn}
-\begin{ttbox}
-syn_of              : theory -> Syntax.syntax
-print_syntax        : theory -> unit
-Syntax.print_syntax : Syntax.syntax -> unit
-Syntax.print_gram   : Syntax.syntax -> unit
-Syntax.print_trans  : Syntax.syntax -> unit
-\end{ttbox}
-The abstract type \mltydx{Syntax.syntax} allows manipulation of syntaxes
-in \ML.  You can display values of this type by calling the following
-functions:
-\begin{ttdescription}
-\item[\ttindexbold{syn_of} {\it thy}] returns the syntax of the Isabelle
-  theory~{\it thy} as an \ML\ value.
-
-\item[\ttindexbold{print_syntax} $thy$] uses {\tt Syntax.print_syntax}
- to display the syntax part of theory $thy$.
-
-\item[\ttindexbold{Syntax.print_syntax} {\it syn}] shows virtually all
-  information contained in the syntax {\it syn}.  The displayed output can
-  be large.  The following two functions are more selective.
-
-\item[\ttindexbold{Syntax.print_gram} {\it syn}] shows the grammar part
-  of~{\it syn}, namely the lexicon, logical types and productions.  These are
-  discussed below.
-
-\item[\ttindexbold{Syntax.print_trans} {\it syn}] shows the translation
-  part of~{\it syn}, namely the constants, parse/print macros and
-  parse/print translations.
-\end{ttdescription}
-
-The output of the above print functions is divided into labelled sections.
-The grammar is represented by {\tt lexicon}, {\tt logtypes} and {\tt prods}.
-The rest refers to syntactic translations and macro expansion.  Here is an
-explanation of the various sections.
-\begin{description}
-  \item[{\tt lexicon}] lists the delimiters used for lexical
-    analysis.\index{delimiters}
-
-  \item[{\tt logtypes}] lists the types that are regarded the same as {\tt
-    logic} syntactically.  Thus types of object-logics (e.g.\ {\tt nat}, say)
-    will be automatically equipped with the standard syntax of
-    $\lambda$-calculus.
-
-  \item[{\tt prods}] lists the \rmindex{productions} of the priority grammar.
-    The nonterminal $A^{(n)}$ is rendered in {\sc ascii} as {\tt $A$[$n$]}.
-    Each delimiter is quoted.  Some productions are shown with {\tt =>} and
-    an attached string.  These strings later become the heads of parse
-    trees; they also play a vital role when terms are printed (see
-    \S\ref{sec:asts}).
-
-    Productions with no strings attached are called {\bf copy
-      productions}\indexbold{productions!copy}.  Their right-hand side must
-    have exactly one nonterminal symbol (or name token).  The parser does
-    not create a new parse tree node for copy productions, but simply
-    returns the parse tree of the right-hand symbol.
-
-    If the right-hand side consists of a single nonterminal with no
-    delimiters, then the copy production is called a {\bf chain
-      production}.  Chain productions act as abbreviations:
-    conceptually, they are removed from the grammar by adding new
-    productions.  Priority information attached to chain productions is
-    ignored; only the dummy value $-1$ is displayed.
-    
-  \item[\ttindex{print_modes}] lists the alternative print modes
-    provided by this syntax (see \S\ref{sec:prmodes}).
-
-  \item[{\tt consts}, {\tt parse_rules}, {\tt print_rules}]
-    relate to macros (see \S\ref{sec:macros}).
-
-  \item[{\tt parse_ast_translation}, {\tt print_ast_translation}]
-    list sets of constants that invoke translation functions for abstract
-    syntax trees.  Section \S\ref{sec:asts} below discusses this obscure
-    matter.\index{constants!for translations}
-
-  \item[{\tt parse_translation}, {\tt print_translation}] list the sets
-    of constants that invoke translation functions for terms (see
-    \S\ref{sec:tr_funs}).
-\end{description}
-\index{syntax!Pure|)}
-
 
 \section{Mixfix declarations} \label{sec:mixfix}
 \index{mixfix declarations|(}
@@ -515,49 +144,6 @@
   syntax}.  Try this as an exercise and study the changes in the
 grammar.
 
-\subsection{The mixfix template}
-Let us now take a closer look at the string $template$ appearing in mixfix
-annotations.  This string specifies a list of parsing and printing
-directives: delimiters\index{delimiters}, arguments, spaces, blocks of
-indentation and line breaks.  These are encoded by the following character
-sequences:
-\index{pretty printing|(}
-\begin{description}
-\item[~$d$~] is a delimiter, namely a non-empty sequence of characters
-  other than the special characters {\tt _}, {\tt(}, {\tt)} and~{\tt/}.
-  Even these characters may appear if escaped; this means preceding it with
-  a~{\tt '} (single quote).  Thus you have to write {\tt ''} if you really
-  want a single quote.  Furthermore, a~{\tt '} followed by a space separates
-  delimiters without extra white space being added for printing.
-
-\item[~{\tt_}~] is an argument position, which stands for a nonterminal symbol
-  or name token.
-
-\item[~$s$~] is a non-empty sequence of spaces for printing.  This and the
-  following specifications do not affect parsing at all.
-
-\item[~{\tt(}$n$~] opens a pretty printing block.  The optional number $n$
-  specifies how much indentation to add when a line break occurs within the
-  block.  If {\tt(} is not followed by digits, the indentation defaults
-  to~0.
-
-\item[~{\tt)}~] closes a pretty printing block.
-
-\item[~{\tt//}~] forces a line break.
-
-\item[~{\tt/}$s$~] allows a line break.  Here $s$ stands for the string of
-  spaces (zero or more) right after the {\tt /} character.  These spaces
-  are printed if the break is not taken.
-\end{description}
-For example, the template {\tt"(_ +/ _)"} specifies an infix operator.
-There are two argument positions; the delimiter~{\tt+} is preceded by a
-space and followed by a space or line break; the entire phrase is a pretty
-printing block.  Other examples appear in Fig.\ts\ref{fig:set_trans} below.
-Isabelle's pretty printer resembles the one described in
-Paulson~\cite{paulson-ml2}.
-
-\index{pretty printing|)}
-
 
 \subsection{Infixes}
 \indexbold{infixes}
@@ -723,141 +309,6 @@
 ambiguity should be eliminated by changing the grammar or the rule.
 
 
-\section{Example: some minimal logics} \label{sec:min_logics}
-\index{examples!of logic definitions}
-
-This section presents some examples that have a simple syntax.  They
-demonstrate how to define new object-logics from scratch.
-
-First we must define how an object-logic syntax is embedded into the
-meta-logic.  Since all theorems must conform to the syntax for~\ndx{prop}
-(see Fig.\ts\ref{fig:pure_gram}), that syntax has to be extended with the
-object-level syntax.  Assume that the syntax of your object-logic defines a
-meta-type~\tydx{o} of formulae which refers to the nonterminal {\tt logic}.
-These formulae can now appear in axioms and theorems wherever \ndx{prop} does
-if you add the production
-\[ prop ~=~ logic. \]
-This is not supposed to be a copy production but an implicit coercion from
-formulae to propositions:
-\begin{ttbox}
-Base = Pure +
-types
-  o
-arities
-  o :: logic
-consts
-  Trueprop :: o => prop   ("_" 5)
-end
-\end{ttbox}
-The constant \cdx{Trueprop} (the name is arbitrary) acts as an invisible
-coercion function.  Assuming this definition resides in a file {\tt Base.thy},
-you have to load it with the command {\tt use_thy "Base"}.
-
-One of the simplest nontrivial logics is {\bf minimal logic} of
-implication.  Its definition in Isabelle needs no advanced features but
-illustrates the overall mechanism nicely:
-\begin{ttbox}
-Hilbert = Base +
-consts
-  "-->" :: [o, o] => o   (infixr 10)
-rules
-  K     "P --> Q --> P"
-  S     "(P --> Q --> R) --> (P --> Q) --> P --> R"
-  MP    "[| P --> Q; P |] ==> Q"
-end
-\end{ttbox}
-After loading this definition from the file {\tt Hilbert.thy}, you can
-start to prove theorems in the logic:
-\begin{ttbox}
-Goal "P --> P";
-{\out Level 0}
-{\out P --> P}
-{\out  1.  P --> P}
-\ttbreak
-by (resolve_tac [Hilbert.MP] 1);
-{\out Level 1}
-{\out P --> P}
-{\out  1.  ?P --> P --> P}
-{\out  2.  ?P}
-\ttbreak
-by (resolve_tac [Hilbert.MP] 1);
-{\out Level 2}
-{\out P --> P}
-{\out  1.  ?P1 --> ?P --> P --> P}
-{\out  2.  ?P1}
-{\out  3.  ?P}
-\ttbreak
-by (resolve_tac [Hilbert.S] 1);
-{\out Level 3}
-{\out P --> P}
-{\out  1.  P --> ?Q2 --> P}
-{\out  2.  P --> ?Q2}
-\ttbreak
-by (resolve_tac [Hilbert.K] 1);
-{\out Level 4}
-{\out P --> P}
-{\out  1.  P --> ?Q2}
-\ttbreak
-by (resolve_tac [Hilbert.K] 1);
-{\out Level 5}
-{\out P --> P}
-{\out No subgoals!}
-\end{ttbox}
-As we can see, this Hilbert-style formulation of minimal logic is easy to
-define but difficult to use.  The following natural deduction formulation is
-better:
-\begin{ttbox}
-MinI = Base +
-consts
-  "-->" :: [o, o] => o   (infixr 10)
-rules
-  impI  "(P ==> Q) ==> P --> Q"
-  impE  "[| P --> Q; P |] ==> Q"
-end
-\end{ttbox}
-Note, however, that although the two systems are equivalent, this fact
-cannot be proved within Isabelle.  Axioms {\tt S} and {\tt K} can be
-derived in {\tt MinI} (exercise!), but {\tt impI} cannot be derived in {\tt
-  Hilbert}.  The reason is that {\tt impI} is only an {\bf admissible} rule
-in {\tt Hilbert}, something that can only be shown by induction over all
-possible proofs in {\tt Hilbert}.
-
-We may easily extend minimal logic with falsity:
-\begin{ttbox}
-MinIF = MinI +
-consts
-  False :: o
-rules
-  FalseE "False ==> P"
-end
-\end{ttbox}
-On the other hand, we may wish to introduce conjunction only:
-\begin{ttbox}
-MinC = Base +
-consts
-  "&" :: [o, o] => o   (infixr 30)
-\ttbreak
-rules
-  conjI  "[| P; Q |] ==> P & Q"
-  conjE1 "P & Q ==> P"
-  conjE2 "P & Q ==> Q"
-end
-\end{ttbox}
-And if we want to have all three connectives together, we create and load a
-theory file consisting of a single line:
-\begin{ttbox}
-MinIFC = MinIF + MinC
-\end{ttbox}
-Now we can prove mixed theorems like
-\begin{ttbox}
-Goal "P & False --> Q";
-by (resolve_tac [MinI.impI] 1);
-by (dresolve_tac [MinC.conjE2] 1);
-by (eresolve_tac [MinIF.FalseE] 1);
-\end{ttbox}
-Try this as an exercise!
-
-
 %%% Local Variables: 
 %%% mode: latex
 %%% TeX-master: "ref"
--- a/doc-src/Ref/goals.tex	Mon Mar 02 08:15:32 2009 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,722 +0,0 @@
-%% $Id$
-\chapter{Proof Management: The Subgoal Module}
-\index{proofs|(}
-\index{subgoal module|(}
-\index{reading!goals|see{proofs, starting}}
-
-The subgoal module stores the current proof state\index{proof state} and
-many previous states; commands can produce new states or return to previous
-ones.  The {\em state list\/} at level $n$ is a list of pairs
-\[ [(\psi@n,\Psi@n),\; (\psi@{n-1},\Psi@{n-1}),\; \ldots,\; (\psi@0,[])] \]
-where $\psi@n$ is the current proof state, $\psi@{n-1}$ is the previous
-one, \ldots, and $\psi@0$ is the initial proof state.  The $\Psi@i$ are
-sequences (lazy lists) of proof states, storing branch points where a
-tactic returned a list longer than one.  The state lists permit various
-forms of backtracking.
-
-Chopping elements from the state list reverts to previous proof states.
-Besides this, the \ttindex{undo} command keeps a list of state lists.  The
-module actually maintains a stack of state lists, to support several
-proofs at the same time.
-
-The subgoal module always contains some proof state.  At the start of the
-Isabelle session, this state consists of a dummy formula.
-
-
-\section{Basic commands}
-Most proofs begin with \texttt{Goal} or \texttt{Goalw} and require no other
-commands than \texttt{by}, \texttt{chop} and \texttt{undo}.  They typically end
-with a call to \texttt{qed}.
-\subsection{Starting a backward proof}
-\index{proofs!starting}
-\begin{ttbox}
-Goal        :                       string -> thm list
-Goalw       :           thm list -> string -> thm list
-goal        : theory ->             string -> thm list 
-goalw       : theory -> thm list -> string -> thm list 
-goalw_cterm :           thm list -> cterm  -> thm list 
-premises    : unit -> thm list
-\end{ttbox}
-
-The goal commands start a new proof by setting the goal.  They replace
-the current state list by a new one consisting of the initial proof
-state.  They also empty the \ttindex{undo} list; this command cannot
-be undone!
-
-They all return a list of meta-hypotheses taken from the main goal.  If
-this list is non-empty, bind its value to an \ML{} identifier by typing
-something like
-\begin{ttbox} 
-val prems = goal{\it theory\/ formula};
-\end{ttbox}\index{assumptions!of main goal}
-These assumptions typically serve as the premises when you are
-deriving a rule.  They are also stored internally and can be retrieved
-later by the function \texttt{premises}.  When the proof is finished,
-\texttt{qed} compares the stored assumptions with the actual
-assumptions in the proof state.
-
-The capital versions of \texttt{Goal} are the basic user level
-commands and should be preferred over the more primitive lowercase
-\texttt{goal} commands.  Apart from taking the current theory context
-as implicit argument, the former ones try to be smart in handling
-meta-hypotheses when deriving rules.  Thus \texttt{prems} have to be
-seldom bound explicitly, the effect is as if \texttt{cut_facts_tac}
-had been called automatically.
-
-\index{definitions!unfolding}
-Some of the commands unfold definitions using meta-rewrite rules.  This
-expansion affects both the initial subgoal and the premises, which would
-otherwise require use of \texttt{rewrite_goals_tac} and
-\texttt{rewrite_rule}.
-
-\begin{ttdescription}
-\item[\ttindexbold{Goal} {\it formula};] begins a new proof, where
-  {\it formula\/} is written as an \ML\ string.
-  
-\item[\ttindexbold{Goalw} {\it defs} {\it formula};] is like
-  \texttt{Goal} but also applies the list of {\it defs\/} as
-  meta-rewrite rules to the first subgoal and the premises.
-  \index{meta-rewriting}
-
-\item[\ttindexbold{goal} {\it theory} {\it formula};] 
-begins a new proof, where {\it theory} is usually an \ML\ identifier
-and the {\it formula\/} is written as an \ML\ string.
-
-\item[\ttindexbold{goalw} {\it theory} {\it defs} {\it formula};] 
-is like \texttt{goal} but also applies the list of {\it defs\/} as
-meta-rewrite rules to the first subgoal and the premises.
-\index{meta-rewriting}
-
-\item[\ttindexbold{goalw_cterm} {\it defs} {\it ct};] is
-  a version of \texttt{goalw} intended for programming.  The main
-  goal is supplied as a cterm, not as a string.  See also
-  \texttt{prove_goalw_cterm}, \S\ref{sec:executing-batch}. 
-
-\item[\ttindexbold{premises}()] 
-returns the list of meta-hypotheses associated with the current proof (in
-case you forgot to bind them to an \ML{} identifier).
-\end{ttdescription}
-
-
-\subsection{Applying a tactic}
-\index{tactics!commands for applying}
-\begin{ttbox} 
-by   : tactic -> unit
-byev : tactic list -> unit
-\end{ttbox}
-These commands extend the state list.  They apply a tactic to the current
-proof state.  If the tactic succeeds, it returns a non-empty sequence of
-next states.  The head of the sequence becomes the next state, while the
-tail is retained for backtracking (see~\texttt{back}).
-\begin{ttdescription} \item[\ttindexbold{by} {\it tactic};] 
-applies the {\it tactic\/} to the proof state.
-
-\item[\ttindexbold{byev} {\it tactics};] 
-applies the list of {\it tactics}, one at a time.  It is useful for testing
-calls to \texttt{prove_goal}, and abbreviates \texttt{by (EVERY {\it
-tactics})}.
-\end{ttdescription}
-
-\noindent{\it Error indications:}\nobreak
-\begin{itemize}
-\item {\footnotesize\tt "by:\ tactic failed"} means that the tactic
-  returned an empty sequence when applied to the current proof state.
-\item {\footnotesize\tt "Warning:\ same as previous level"} means that the
-  new proof state is identical to the previous state.
-\item{\footnotesize\tt "Warning:\ signature of proof state has changed"}
-  means that some rule was applied whose theory is outside the theory of
-  the initial proof state.  This could signify a mistake such as expressing
-  the goal in intuitionistic logic and proving it using classical logic.
-\end{itemize}
-
-\subsection{Extracting and storing the proved theorem}
-\label{ExtractingAndStoringTheProvedTheorem}
-\index{theorems!extracting proved}
-\begin{ttbox} 
-qed        : string -> unit
-no_qed     : unit -> unit
-result     : unit -> thm
-uresult    : unit -> thm
-bind_thm   : string * thm -> unit
-bind_thms  : string * thm list -> unit
-store_thm  : string * thm -> thm
-store_thms : string * thm list -> thm list
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{qed} $name$;] is the usual way of ending a proof.
-  It combines \texttt{result} and \texttt{bind_thm}: it gets the theorem
-  using \texttt{result()} and stores it the theorem database associated
-  with its theory.  See below for details.
-  
-\item[\ttindexbold{no_qed}();] indicates that the proof is not concluded by a
-  proper \texttt{qed} command.  This is a do-nothing operation, it merely
-  helps user interfaces such as Proof~General \cite{proofgeneral} to figure
-  out the structure of the {\ML} text.
-
-\item[\ttindexbold{result}()]\index{assumptions!of main goal}
-  returns the final theorem, after converting the free variables to
-  schematics.  It discharges the assumptions supplied to the matching 
-  \texttt{goal} command.  
-
-  It raises an exception unless the proof state passes certain checks.  There
-  must be no assumptions other than those supplied to \texttt{goal}.  There
-  must be no subgoals.  The theorem proved must be a (first-order) instance
-  of the original goal, as stated in the \texttt{goal} command.  This allows
-  {\bf answer extraction} --- instantiation of variables --- but no other
-  changes to the main goal.  The theorem proved must have the same signature
-  as the initial proof state.
-
-  These checks are needed because an Isabelle tactic can return any proof
-  state at all.
-
-\item[\ttindexbold{uresult}()] is like \texttt{result()} but omits the checks.
-  It is needed when the initial goal contains function unknowns, when
-  definitions are unfolded in the main goal (by calling
-  \ttindex{rewrite_tac}),\index{definitions!unfolding} or when
-  \ttindex{assume_ax} has been used.
-  
-\item[\ttindexbold{bind_thm} ($name$, $thm$);]\index{theorems!storing}
-  stores \texttt{standard $thm$} (see \S\ref{MiscellaneousForwardRules})
-  in the theorem database associated with its theory and in the {\ML}
-  variable $name$.  The theorem can be retrieved from the database
-  using \texttt{get_thm} (see \S\ref{BasicOperationsOnTheories}).
-  
-\item[\ttindexbold{store_thm} ($name$, $thm$)]\index{theorems!storing}
-  stores $thm$ in the theorem database associated with its theory and
-  returns that theorem.
-  
-\item[\ttindexbold{bind_thms} \textrm{and} \ttindexbold{store_thms}] are similar to
-  \texttt{bind_thm} and \texttt{store_thm}, but store lists of theorems.
-
-\end{ttdescription}
-
-The name argument of \texttt{qed}, \texttt{bind_thm} etc.\ may be the empty
-string as well; in that case the result is \emph{not} stored, but proper
-checks and presentation of the result still apply.
-
-
-\subsection{Extracting axioms and stored theorems}
-\index{theories!axioms of}\index{axioms!extracting}
-\index{theories!theorems of}\index{theorems!extracting}
-\begin{ttbox}
-thm       : xstring -> thm
-thms      : xstring -> thm list
-get_axiom : theory -> xstring -> thm
-get_thm   : theory -> xstring -> thm
-get_thms  : theory -> xstring -> thm list
-axioms_of : theory -> (string * thm) list
-thms_of   : theory -> (string * thm) list
-assume_ax : theory -> string -> thm
-\end{ttbox}
-\begin{ttdescription}
-  
-\item[\ttindexbold{thm} $name$] is the basic user function for
-  extracting stored theorems from the current theory context.
-  
-\item[\ttindexbold{thms} $name$] is like \texttt{thm}, but returns a
-  whole list associated with $name$ rather than a single theorem.
-  Typically this will be collections stored by packages, e.g.\ 
-  \verb|list.simps|.
-
-\item[\ttindexbold{get_axiom} $thy$ $name$] returns an axiom with the
-  given $name$ from $thy$ or any of its ancestors, raising exception
-  \xdx{THEORY} if none exists.  Merging theories can cause several
-  axioms to have the same name; {\tt get_axiom} returns an arbitrary
-  one.  Usually, axioms are also stored as theorems and may be
-  retrieved via \texttt{get_thm} as well.
-  
-\item[\ttindexbold{get_thm} $thy$ $name$] is analogous to {\tt
-    get_axiom}, but looks for a theorem stored in the theory's
-  database.  Like {\tt get_axiom} it searches all parents of a theory
-  if the theorem is not found directly in $thy$.
-  
-\item[\ttindexbold{get_thms} $thy$ $name$] is like \texttt{get_thm}
-  for retrieving theorem lists stored within the theory.  It returns a
-  singleton list if $name$ actually refers to a theorem rather than a
-  theorem list.
-  
-\item[\ttindexbold{axioms_of} $thy$] returns the axioms of this theory
-  node, not including the ones of its ancestors.
-  
-\item[\ttindexbold{thms_of} $thy$] returns all theorems stored within
-  the database of this theory node, not including the ones of its
-  ancestors.
-  
-\item[\ttindexbold{assume_ax} $thy$ $formula$] reads the {\it formula}
-  using the syntax of $thy$, following the same conventions as axioms
-  in a theory definition.  You can thus pretend that {\it formula} is
-  an axiom and use the resulting theorem like an axiom.  Actually {\tt
-    assume_ax} returns an assumption; \ttindex{qed} and
-  \ttindex{result} complain about additional assumptions, but
-  \ttindex{uresult} does not.
-
-For example, if {\it formula} is
-\hbox{\tt a=b ==> b=a} then the resulting theorem has the form
-\hbox{\verb'?a=?b ==> ?b=?a  [!!a b. a=b ==> b=a]'}
-\end{ttdescription}
-
-
-\subsection{Retrieving theorems}
-\index{theorems!retrieving}
-
-The following functions retrieve theorems (together with their names)
-from the theorem database that is associated with the current proof
-state's theory.  They can only find theorems that have explicitly been
-stored in the database using \ttindex{qed}, \ttindex{bind_thm} or
-related functions.
-\begin{ttbox} 
-findI           :          int -> (string * thm) list
-findE           :   int -> int -> (string * thm) list
-findEs          :          int -> (string * thm) list
-thms_containing : xstring list -> (string * thm) list
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{findI} $i$]\index{assumptions!of main goal}
-  returns all ``introduction rules'' applicable to subgoal $i$ --- all
-  theorems whose conclusion matches (rather than unifies with) subgoal
-  $i$.  Useful in connection with \texttt{resolve_tac}.
-
-\item[\ttindexbold{findE} $n$ $i$] returns all ``elimination rules''
-  applicable to premise $n$ of subgoal $i$ --- all those theorems whose
-  first premise matches premise $n$ of subgoal $i$.  Useful in connection with
-  \texttt{eresolve_tac} and \texttt{dresolve_tac}.
-
-\item[\ttindexbold{findEs} $i$] returns all ``elimination rules'' applicable
-  to subgoal $i$ --- all those theorems whose first premise matches some
-  premise of subgoal $i$.  Useful in connection with \texttt{eresolve_tac} and
-  \texttt{dresolve_tac}.
-  
-\item[\ttindexbold{thms_containing} $consts$] returns all theorems that
-  contain \emph{all} of the given constants.
-\end{ttdescription}
-
-
-\subsection{Undoing and backtracking}
-\begin{ttbox} 
-chop    : unit -> unit
-choplev : int -> unit
-back    : unit -> unit
-undo    : unit -> unit
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{chop}();] 
-deletes the top level of the state list, cancelling the last \ttindex{by}
-command.  It provides a limited undo facility, and the \texttt{undo} command
-can cancel it.
-
-\item[\ttindexbold{choplev} {\it n};] 
-truncates the state list to level~{\it n}, if $n\geq0$.  A negative value
-of~$n$ refers to the $n$th previous level: 
-\hbox{\verb|choplev ~1|} has the same effect as \texttt{chop}.
-
-\item[\ttindexbold{back}();]
-searches the state list for a non-empty branch point, starting from the top
-level.  The first one found becomes the current proof state --- the most
-recent alternative branch is taken.  This is a form of interactive
-backtracking.
-
-\item[\ttindexbold{undo}();] 
-cancels the most recent change to the proof state by the commands \ttindex{by},
-\texttt{chop}, \texttt{choplev}, and~\texttt{back}.  It {\bf cannot}
-cancel \texttt{goal} or \texttt{undo} itself.  It can be repeated to
-cancel a series of commands.
-\end{ttdescription}
-
-\goodbreak
-\noindent{\it Error indications for {\tt back}:}\par\nobreak
-\begin{itemize}
-\item{\footnotesize\tt"Warning:\ same as previous choice at this level"}
-  means \texttt{back} found a non-empty branch point, but that it contained
-  the same proof state as the current one.
-\item{\footnotesize\tt "Warning:\ signature of proof state has changed"}
-  means the signature of the alternative proof state differs from that of
-  the current state.
-\item {\footnotesize\tt "back:\ no alternatives"} means \texttt{back} could
-  find no alternative proof state.
-\end{itemize}
-
-\subsection{Printing the proof state}\label{sec:goals-printing}
-\index{proof state!printing of}
-\begin{ttbox} 
-pr    : unit -> unit
-prlev : int -> unit
-prlim : int -> unit
-goals_limit: int ref \hfill{\bf initially 10}
-\end{ttbox}
-See also the printing control options described 
-in~\S\ref{sec:printing-control}. 
-\begin{ttdescription}
-\item[\ttindexbold{pr}();] 
-prints the current proof state.
-
-\item[\ttindexbold{prlev} {\it n};] 
-prints the proof state at level {\it n}, if $n\geq0$.  A negative value
-of~$n$ refers to the $n$th previous level.  This command allows
-you to review earlier stages of the proof.
-
-\item[\ttindexbold{prlim} {\it k};] 
-prints the current proof state, limiting the number of subgoals to~$k$.  It
-updates \texttt{goals_limit} (see below) and is helpful when there are many
-subgoals. 
-
-\item[\ttindexbold{goals_limit} := {\it k};] 
-specifies~$k$ as the maximum number of subgoals to print.
-\end{ttdescription}
-
-
-\subsection{Timing}
-\index{timing statistics}\index{proofs!timing}
-\begin{ttbox} 
-timing: bool ref \hfill{\bf initially false}
-\end{ttbox}
-
-\begin{ttdescription}
-\item[set \ttindexbold{timing};] enables global timing in Isabelle.  In
-  particular, this makes the \ttindex{by} and \ttindex{prove_goal} commands
-  display how much processor time was spent.  This information is
-  compiler-dependent.
-\end{ttdescription}
-
-
-\section{Shortcuts for applying tactics}
-\index{shortcuts!for \texttt{by} commands}
-These commands call \ttindex{by} with common tactics.  Their chief purpose
-is to minimise typing, although the scanning shortcuts are useful in their
-own right.  Chapter~\ref{tactics} explains the tactics themselves.
-
-\subsection{Refining a given subgoal}
-\begin{ttbox} 
-ba  :             int -> unit
-br  : thm      -> int -> unit
-be  : thm      -> int -> unit
-bd  : thm      -> int -> unit
-brs : thm list -> int -> unit
-bes : thm list -> int -> unit
-bds : thm list -> int -> unit
-\end{ttbox}
-
-\begin{ttdescription}
-\item[\ttindexbold{ba} {\it i};] 
-performs \texttt{by (assume_tac {\it i});}
-
-\item[\ttindexbold{br} {\it thm} {\it i};] 
-performs \texttt{by (resolve_tac [{\it thm}] {\it i});}
-
-\item[\ttindexbold{be} {\it thm} {\it i};] 
-performs \texttt{by (eresolve_tac [{\it thm}] {\it i});}
-
-\item[\ttindexbold{bd} {\it thm} {\it i};] 
-performs \texttt{by (dresolve_tac [{\it thm}] {\it i});}
-
-\item[\ttindexbold{brs} {\it thms} {\it i};] 
-performs \texttt{by (resolve_tac {\it thms} {\it i});}
-
-\item[\ttindexbold{bes} {\it thms} {\it i};] 
-performs \texttt{by (eresolve_tac {\it thms} {\it i});}
-
-\item[\ttindexbold{bds} {\it thms} {\it i};] 
-performs \texttt{by (dresolve_tac {\it thms} {\it i});}
-\end{ttdescription}
-
-
-\subsection{Scanning shortcuts}
-These shortcuts scan for a suitable subgoal (starting from subgoal~1).
-They refine the first subgoal for which the tactic succeeds.  Thus, they
-require less typing than \texttt{br}, etc.  They display the selected
-subgoal's number; please watch this, for it may not be what you expect!
-
-\begin{ttbox} 
-fa  : unit     -> unit
-fr  : thm      -> unit
-fe  : thm      -> unit
-fd  : thm      -> unit
-frs : thm list -> unit
-fes : thm list -> unit
-fds : thm list -> unit
-\end{ttbox}
-
-\begin{ttdescription}
-\item[\ttindexbold{fa}();] 
-solves some subgoal by assumption.
-
-\item[\ttindexbold{fr} {\it thm};] 
-refines some subgoal using \texttt{resolve_tac [{\it thm}]}
-
-\item[\ttindexbold{fe} {\it thm};] 
-refines some subgoal using \texttt{eresolve_tac [{\it thm}]}
-
-\item[\ttindexbold{fd} {\it thm};] 
-refines some subgoal using \texttt{dresolve_tac [{\it thm}]}
-
-\item[\ttindexbold{frs} {\it thms};] 
-refines some subgoal using \texttt{resolve_tac {\it thms}}
-
-\item[\ttindexbold{fes} {\it thms};] 
-refines some subgoal using \texttt{eresolve_tac {\it thms}} 
-
-\item[\ttindexbold{fds} {\it thms};] 
-refines some subgoal using \texttt{dresolve_tac {\it thms}} 
-\end{ttdescription}
-
-\subsection{Other shortcuts}
-\begin{ttbox} 
-bw  : thm -> unit
-bws : thm list -> unit
-ren : string -> int -> unit
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{bw} {\it def};] performs \texttt{by
-    (rewrite_goals_tac [{\it def}]);} It unfolds definitions in the
-  subgoals (but not the main goal), by meta-rewriting with the given
-  definition (see also \S\ref{sec:rewrite_goals}).
-  \index{meta-rewriting}
-
-\item[\ttindexbold{bws}] 
-is like \texttt{bw} but takes a list of definitions.
-
-\item[\ttindexbold{ren} {\it names} {\it i};] 
-performs \texttt{by (rename_tac {\it names} {\it i});} it renames
-parameters in subgoal~$i$.  (Ignore the message {\footnotesize\tt Warning:\
-  same as previous level}.)
-\index{parameters!renaming}
-\end{ttdescription}
-
-
-\section{Executing batch proofs}\label{sec:executing-batch}
-\index{batch execution}%
-To save space below, let type \texttt{tacfn} abbreviate \texttt{thm list ->
-  tactic list}, which is the type of a tactical proof.
-\begin{ttbox}
-prove_goal :           theory ->             string -> tacfn -> thm
-qed_goal   : string -> theory ->             string -> tacfn -> unit
-prove_goalw:           theory -> thm list -> string -> tacfn -> thm
-qed_goalw  : string -> theory -> thm list -> string -> tacfn -> unit
-prove_goalw_cterm:               thm list -> cterm  -> tacfn -> thm
-\end{ttbox}
-These batch functions create an initial proof state, then apply a tactic to
-it, yielding a sequence of final proof states.  The head of the sequence is
-returned, provided it is an instance of the theorem originally proposed.
-The forms \texttt{prove_goal}, \texttt{prove_goalw} and
-\texttt{prove_goalw_cterm} are analogous to \texttt{goal}, \texttt{goalw} and
-\texttt{goalw_cterm}.  
-
-The tactic is specified by a function from theorem lists to tactic lists.
-The function is applied to the list of meta-assumptions taken from
-the main goal.  The resulting tactics are applied in sequence (using {\tt
-  EVERY}).  For example, a proof consisting of the commands
-\begin{ttbox} 
-val prems = goal {\it theory} {\it formula};
-by \(tac@1\);  \ldots  by \(tac@n\);
-qed "my_thm";
-\end{ttbox}
-can be transformed to an expression as follows:
-\begin{ttbox} 
-qed_goal "my_thm" {\it theory} {\it formula}
- (fn prems=> [ \(tac@1\), \ldots, \(tac@n\) ]);
-\end{ttbox}
-The methods perform identical processing of the initial {\it formula} and
-the final proof state.  But \texttt{prove_goal} executes the tactic as a
-atomic operation, bypassing the subgoal module; the current interactive
-proof is unaffected.
-%
-\begin{ttdescription}
-\item[\ttindexbold{prove_goal} {\it theory} {\it formula} {\it tacsf};] 
-executes a proof of the {\it formula\/} in the given {\it theory}, using
-the given tactic function.
-
-\item[\ttindexbold{qed_goal} $name$ $theory$ $formula$ $tacsf$;] acts
-  like \texttt{prove_goal} but it also stores the resulting theorem in the
-  theorem database associated with its theory and in the {\ML}
-  variable $name$ (see \S\ref{ExtractingAndStoringTheProvedTheorem}).
-
-\item[\ttindexbold{prove_goalw} {\it theory} {\it defs} {\it formula} 
-      {\it tacsf};]\index{meta-rewriting}
-is like \texttt{prove_goal} but also applies the list of {\it defs\/} as
-meta-rewrite rules to the first subgoal and the premises.
-
-\item[\ttindexbold{qed_goalw} $name$ $theory$ $defs$ $formula$ $tacsf$;]
-is analogous to \texttt{qed_goal}.
-
-\item[\ttindexbold{prove_goalw_cterm} {\it defs} {\it ct}
-      {\it tacsf};] 
-is a version of \texttt{prove_goalw} intended for programming.  The main
-goal is supplied as a cterm, not as a string.  A cterm carries a theory with
-      it, and can be created from a term~$t$ by
-\begin{ttbox}
-cterm_of (sign_of thy) \(t\)        
-\end{ttbox}
-  \index{*cterm_of}\index{*sign_of}
-\end{ttdescription}
-
-
-\section{Internal proofs}
-
-\begin{ttbox}
-Tactic.prove: Sign.sg -> string list -> term list -> term ->
-  (thm list -> tactic) -> thm
-Tactic.prove_standard: Sign.sg -> string list -> term list -> term ->
-  (thm list -> tactic) -> thm
-\end{ttbox}
-
-These functions provide a clean internal interface for programmed proofs.  The
-special overhead of top-level statements (cf.\ \verb,prove_goalw_cterm,) is
-omitted.  Statements may be established within a local contexts of fixed
-variables and assumptions, which are the only hypotheses to be discharged in
-the result.
-
-\begin{ttdescription}
-\item[\ttindexbold{Tactic.prove}~$sg~xs~As~C~tacf$] establishes the result
-  $\Forall xs. As \Imp C$ via the given tactic (which is a function taking the
-  assumptions as local premises).
-  
-\item[\ttindexbold{Tactic.prove_standard}] is simular to \verb,Tactic.prove,,
-  but performs the \verb,standard, operation on the result, essentially
-  turning it into a top-level statement.  Never do this for local proofs
-  within other proof tools!
-
-\end{ttdescription}
-
-
-\section{Managing multiple proofs}
-\index{proofs!managing multiple}
-You may save the current state of the subgoal module and resume work on it
-later.  This serves two purposes.  
-\begin{enumerate}
-\item At some point, you may be uncertain of the next step, and
-wish to experiment.
-
-\item During a proof, you may see that a lemma should be proved first.
-\end{enumerate}
-Each saved proof state consists of a list of levels; \ttindex{chop} behaves
-independently for each of the saved proofs.  In addition, each saved state
-carries a separate \ttindex{undo} list.
-
-\subsection{The stack of proof states}
-\index{proofs!stacking}
-\begin{ttbox} 
-push_proof   : unit -> unit
-pop_proof    : unit -> thm list
-rotate_proof : unit -> thm list
-\end{ttbox}
-The subgoal module maintains a stack of proof states.  Most subgoal
-commands affect only the top of the stack.  The \ttindex{Goal} command {\em
-replaces\/} the top of the stack; the only command that pushes a proof on the
-stack is \texttt{push_proof}.
-
-To save some point of the proof, call \texttt{push_proof}.  You may now
-state a lemma using \texttt{goal}, or simply continue to apply tactics.
-Later, you can return to the saved point by calling \texttt{pop_proof} or 
-\texttt{rotate_proof}. 
-
-To view the entire stack, call \texttt{rotate_proof} repeatedly; as it rotates
-the stack, it prints the new top element.
-
-\begin{ttdescription}
-\item[\ttindexbold{push_proof}();]  
-duplicates the top element of the stack, pushing a copy of the current
-proof state on to the stack.
-
-\item[\ttindexbold{pop_proof}();]  
-discards the top element of the stack.  It returns the list of
-assumptions associated with the new proof;  you should bind these to an
-\ML\ identifier.  They can also be obtained by calling \ttindex{premises}.
-
-\item[\ttindexbold{rotate_proof}();]
-\index{assumptions!of main goal}
-rotates the stack, moving the top element to the bottom.  It returns the
-list of assumptions associated with the new proof.
-\end{ttdescription}
-
-
-\subsection{Saving and restoring proof states}
-\index{proofs!saving and restoring}
-\begin{ttbox} 
-save_proof    : unit -> proof
-restore_proof : proof -> thm list
-\end{ttbox}
-States of the subgoal module may be saved as \ML\ values of
-type~\mltydx{proof}, and later restored.
-
-\begin{ttdescription}
-\item[\ttindexbold{save_proof}();]  
-returns the current state, which is on top of the stack.
-
-\item[\ttindexbold{restore_proof} {\it prf};]\index{assumptions!of main goal}
-  replaces the top of the stack by~{\it prf}.  It returns the list of
-  assumptions associated with the new proof.
-\end{ttdescription}
-
-
-\section{*Debugging and inspecting}
-\index{tactics!debugging}
-These functions can be useful when you are debugging a tactic.  They refer
-to the current proof state stored in the subgoal module.  A tactic
-should never call them; it should operate on the proof state supplied as its
-argument.
-
-\subsection{Reading and printing terms}
-\index{terms!reading of}\index{terms!printing of}\index{types!printing of}
-\begin{ttbox} 
-read    : string -> term
-prin    : term -> unit
-printyp : typ -> unit
-\end{ttbox}
-These read and print terms (or types) using the syntax associated with the
-proof state.
-
-\begin{ttdescription}
-\item[\ttindexbold{read} {\it string}]  
-reads the {\it string} as a term, without type-checking.
-
-\item[\ttindexbold{prin} {\it t};]  
-prints the term~$t$ at the terminal.
-
-\item[\ttindexbold{printyp} {\it T};]  
-prints the type~$T$ at the terminal.
-\end{ttdescription}
-
-\subsection{Inspecting the proof state}
-\index{proofs!inspecting the state}
-\begin{ttbox} 
-topthm  : unit -> thm
-getgoal : int -> term
-gethyps : int -> thm list
-\end{ttbox}
-
-\begin{ttdescription}
-\item[\ttindexbold{topthm}()]  
-returns the proof state as an Isabelle theorem.  This is what \ttindex{by}
-would supply to a tactic at this point.  It omits the post-processing of
-\ttindex{result} and \ttindex{uresult}.
-
-\item[\ttindexbold{getgoal} {\it i}]  
-returns subgoal~$i$ of the proof state, as a term.  You may print
-this using \texttt{prin}, though you may have to examine the internal
-data structure in order to locate the problem!
-
-\item[\ttindexbold{gethyps} {\it i}]
-  returns the hypotheses of subgoal~$i$ as meta-level assumptions.  In
-  these theorems, the subgoal's parameters become free variables.  This
-  command is supplied for debugging uses of \ttindex{METAHYPS}.
-\end{ttdescription}
-
-
-\subsection{Filtering lists of rules}
-\begin{ttbox} 
-filter_goal: (term*term->bool) -> thm list -> int -> thm list
-\end{ttbox}
-
-\begin{ttdescription}
-\item[\ttindexbold{filter_goal} {\it could} {\it ths} {\it i}] 
-applies \texttt{filter_thms {\it could}} to subgoal~$i$ of the proof
-state and returns the list of theorems that survive the filtering. 
-\end{ttdescription}
-
-\index{subgoal module|)}
-\index{proofs|)}
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: "ref"
-%%% End: 
--- a/doc-src/Ref/introduction.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/Ref/introduction.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -1,23 +1,5 @@
-
-%% $Id$
 
 \chapter{Basic Use of Isabelle}\index{sessions|(} 
-The Reference Manual is a comprehensive description of Isabelle
-proper, including all \ML{} commands, functions and packages.  It
-really is intended for reference, perhaps for browsing, but not for
-reading through.  It is not a tutorial, but assumes familiarity with
-the basic logical concepts of Isabelle.
-
-When you are looking for a way of performing some task, scan the Table of
-Contents for a relevant heading.  Functions are organized by their purpose,
-by their operands (subgoals, tactics, theorems), and by their usefulness.
-In each section, basic functions appear first, then advanced functions, and
-finally esoteric functions.  Use the Index when you are looking for the
-definition of a particular Isabelle function.
-
-A few examples are presented.  Many example files are distributed with
-Isabelle, however; please experiment interactively.
-
 
 \section{Basic interaction with Isabelle}
 \index{starting up|bold}\nobreak
@@ -217,109 +199,6 @@
 value is returned.
 
 
-\section{Printing of terms and theorems}\label{sec:printing-control}
-\index{printing control|(}
-Isabelle's pretty printer is controlled by a number of parameters.
-
-\subsection{Printing limits}
-\begin{ttbox} 
-Pretty.setdepth  : int -> unit
-Pretty.setmargin : int -> unit
-print_depth      : int -> unit
-\end{ttbox}
-These set limits for terminal output.  See also {\tt goals_limit},
-which limits the number of subgoals printed
-(\S\ref{sec:goals-printing}).
-
-\begin{ttdescription}
-\item[\ttindexbold{Pretty.setdepth} \(d\);] tells Isabelle's pretty printer to
-  limit the printing depth to~$d$.  This affects the display of theorems and
-  terms.  The default value is~0, which permits printing to an arbitrary
-  depth.  Useful values for $d$ are~10 and~20.
-
-\item[\ttindexbold{Pretty.setmargin} \(m\);]  
-  tells Isabelle's pretty printer to assume a right margin (page width)
-  of~$m$.  The initial margin is~76.
-
-\item[\ttindexbold{print_depth} \(n\);]  
-  limits the printing depth of complex \ML{} values, such as theorems and
-  terms.  This command affects the \ML{} top level and its effect is
-  compiler-dependent.  Typically $n$ should be less than~10.
-\end{ttdescription}
-
-
-\subsection{Printing of hypotheses, brackets, types etc.}
-\index{meta-assumptions!printing of}
-\index{types!printing of}\index{sorts!printing of}
-\begin{ttbox} 
-show_hyps     : bool ref \hfill{\bf initially false}
-show_tags     : bool ref \hfill{\bf initially false}
-show_brackets : bool ref \hfill{\bf initially false}
-show_types    : bool ref \hfill{\bf initially false}
-show_sorts    : bool ref \hfill{\bf initially false}
-show_consts   : bool ref \hfill{\bf initially false}
-long_names    : bool ref \hfill{\bf initially false}
-\end{ttbox}
-These flags allow you to control how much information is displayed for
-types, terms and theorems.  The hypotheses of theorems \emph{are}
-normally shown.  Superfluous parentheses of types and terms are not.
-Types and sorts of variables are normally hidden.
-
-Note that displaying types and sorts may explain why a polymorphic
-inference rule fails to resolve with some goal, or why a rewrite rule
-does not apply as expected.
-
-\begin{ttdescription}
-
-\item[reset \ttindexbold{show_hyps};] makes Isabelle show each
-  meta-level hypothesis as a dot.
-  
-\item[set \ttindexbold{show_tags};] makes Isabelle show tags of theorems
-  (which are basically just comments that may be attached by some tools).
-  
-\item[set \ttindexbold{show_brackets};] makes Isabelle show full
-  bracketing.  In particular, this reveals the grouping of infix
-  operators.
-  
-\item[set \ttindexbold{show_types};] makes Isabelle show types when
-  printing a term or theorem.
-  
-\item[set \ttindexbold{show_sorts};] makes Isabelle show both types
-  and the sorts of type variables, independently of the value of
-  \texttt{show_types}.
-  
-\item[set \ttindexbold{show_consts};] makes Isabelle show types of constants
-  when printing proof states.  Note that the output can be enormous as
-  polymorphic constants often occur at several different type instances.
-
-\item[set \ttindexbold{long_names};] forces names of all objects
-  (types, constants, theorems, etc.) to be printed in their fully
-  qualified internal form.
-
-\end{ttdescription}
-
-
-\subsection{Eta-contraction before printing}
-\begin{ttbox} 
-eta_contract: bool ref
-\end{ttbox}
-The {\bf $\eta$-contraction law} asserts $(\lambda x.f(x))\equiv f$,
-provided $x$ is not free in ~$f$.  It asserts {\bf extensionality} of
-functions: $f\equiv g$ if $f(x)\equiv g(x)$ for all~$x$.  Higher-order
-unification frequently puts terms into a fully $\eta$-expanded form.  For
-example, if $F$ has type $(\tau\To\tau)\To\tau$ then its expanded form is
-$\lambda h.F(\lambda x.h(x))$.  By default, the user sees this expanded
-form.
-
-\begin{ttdescription}
-\item[set \ttindexbold{eta_contract};]
-makes Isabelle perform $\eta$-contractions before printing, so that
-$\lambda h.F(\lambda x.h(x))$ appears simply as~$F$.  The
-distinction between a term and its $\eta$-expanded form occasionally
-matters.
-\end{ttdescription}
-\index{printing control|)}
-
 \section{Diagnostic messages}
 \index{error messages}
 \index{warnings}
@@ -351,40 +230,16 @@
 \ttindex{warning} resume normal program execution.
 
 
-\section{Displaying exceptions as error messages}
-\index{exceptions!printing of}
+\section{Timing}
+\index{timing statistics}\index{proofs!timing}
 \begin{ttbox} 
-print_exn: exn -> 'a
+timing: bool ref \hfill{\bf initially false}
 \end{ttbox}
-Certain Isabelle primitives, such as the forward proof functions {\tt RS}
-and {\tt RSN}, are called both interactively and from programs.  They
-indicate errors not by printing messages, but by raising exceptions.  For
-interactive use, \ML's reporting of an uncaught exception may be
-uninformative.  The Poly/ML function {\tt exception_trace} can generate a
-backtrace.\index{Poly/{\ML} compiler}
 
 \begin{ttdescription}
-\item[\ttindexbold{print_exn} $e$] 
-displays the exception~$e$ in a readable manner, and then re-raises~$e$.
-Typical usage is~\hbox{\tt $EXP$ handle e => print_exn e;}, where
-$EXP$ is an expression that may raise an exception.
-
-{\tt print_exn} can display the following common exceptions, which concern
-types, terms, theorems and theories, respectively.  Each carries a message
-and related information.
-\begin{ttbox} 
-exception TYPE   of string * typ list * term list
-exception TERM   of string * term list
-exception THM    of string * int * thm list
-exception THEORY of string * theory list
-\end{ttbox}
+\item[set \ttindexbold{timing};] enables global timing in Isabelle.
+  This information is compiler-dependent.
 \end{ttdescription}
-\begin{warn}
-  {\tt print_exn} prints terms by calling \ttindex{prin}, which obtains
-  pretty printing information from the proof state last stored in the
-  subgoal module.  The appearance of the output thus depends upon the
-  theory used in the last interactive proof.
-\end{warn}
 
 \index{sessions|)}
 
--- a/doc-src/Ref/ref.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/Ref/ref.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -1,7 +1,6 @@
 \documentclass[12pt,a4paper]{report}
-\usepackage{graphicx,../iman,../extra,../ttbox,../proof,../rail,../pdfsetup}
+\usepackage{graphicx,../iman,../extra,../ttbox,../proof,../pdfsetup}
 
-%% $Id$
 %%\includeonly{}
 %%% to index ids: \[\\tt \([a-zA-Z0-9][a-zA-Z0-9_'.]*\)    [\\ttindexbold{\1}
 %%% to delete old ones:  \\indexbold{\*[^}]*}
@@ -22,10 +21,6 @@
 \sloppy
 \binperiod     %%%treat . like a binary operator
 
-\railalias{lbrace}{\ttlbrace}
-\railalias{rbrace}{\ttrbrace}
-\railterm{lbrace,rbrace}
-
 \begin{document}
 \underscoreoff
 
@@ -34,17 +29,10 @@
 \index{meta-rules|see{meta-rules}}
 
 \maketitle 
-\emph{Note}: this document is part of the earlier Isabelle documentation, 
-which is somewhat superseded by the Isabelle/HOL
-\emph{Tutorial}~\cite{isa-tutorial}. Much of it is concerned with 
-the old-style theory syntax and the primitives for conducting proofs 
-using the ML top level. This style of interaction is largely obsolete:
-most Isabelle proofs are now written using the Isar 
-language and the Proof General interface. However, this is the only
-comprehensive Isabelle reference manual.  
-
-See also the \emph{Introduction to Isabelle}, which has tutorial examples
-on conducting proofs using the ML top-level.
+\emph{Note}: this document is part of the earlier Isabelle
+documentation and is mostly outdated.  Fully obsolete parts of the
+original text have already been removed.  The remaining material
+covers some aspects that did not make it into the newer manuals yet.
 
 \subsubsection*{Acknowledgements} 
 Tobias Nipkow, of T. U. Munich, wrote most of
@@ -62,7 +50,6 @@
 \pagenumbering{roman} \tableofcontents \clearfirst
 
 \include{introduction}
-\include{goals}
 \include{tactic}
 \include{tctical}
 \include{thm}
--- a/doc-src/Ref/simplifier.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/Ref/simplifier.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+
 \chapter{Simplification}
 \label{chap:simplification}
 \index{simplification|(}
@@ -810,173 +810,6 @@
 \end{warn}
 
 
-\section{Examples of using the Simplifier}
-\index{examples!of simplification} Assume we are working within {\tt
-  FOL} (see the file \texttt{FOL/ex/Nat}) and that
-\begin{ttdescription}
-\item[Nat.thy] 
-  is a theory including the constants $0$, $Suc$ and $+$,
-\item[add_0]
-  is the rewrite rule $0+\Var{n} = \Var{n}$,
-\item[add_Suc]
-  is the rewrite rule $Suc(\Var{m})+\Var{n} = Suc(\Var{m}+\Var{n})$,
-\item[induct]
-  is the induction rule $\List{\Var{P}(0);\; \Forall x. \Var{P}(x)\Imp
-    \Var{P}(Suc(x))} \Imp \Var{P}(\Var{n})$.
-\end{ttdescription}
-We augment the implicit simpset inherited from \texttt{Nat} with the
-basic rewrite rules for addition of natural numbers:
-\begin{ttbox}
-Addsimps [add_0, add_Suc];
-\end{ttbox}
-
-\subsection{A trivial example}
-Proofs by induction typically involve simplification.  Here is a proof
-that~0 is a right identity:
-\begin{ttbox}
-Goal "m+0 = m";
-{\out Level 0}
-{\out m + 0 = m}
-{\out  1. m + 0 = m}
-\end{ttbox}
-The first step is to perform induction on the variable~$m$.  This returns a
-base case and inductive step as two subgoals:
-\begin{ttbox}
-by (res_inst_tac [("n","m")] induct 1);
-{\out Level 1}
-{\out m + 0 = m}
-{\out  1. 0 + 0 = 0}
-{\out  2. !!x. x + 0 = x ==> Suc(x) + 0 = Suc(x)}
-\end{ttbox}
-Simplification solves the first subgoal trivially:
-\begin{ttbox}
-by (Simp_tac 1);
-{\out Level 2}
-{\out m + 0 = m}
-{\out  1. !!x. x + 0 = x ==> Suc(x) + 0 = Suc(x)}
-\end{ttbox}
-The remaining subgoal requires \ttindex{Asm_simp_tac} in order to use the
-induction hypothesis as a rewrite rule:
-\begin{ttbox}
-by (Asm_simp_tac 1);
-{\out Level 3}
-{\out m + 0 = m}
-{\out No subgoals!}
-\end{ttbox}
-
-\subsection{An example of tracing}
-\index{tracing!of simplification|(}\index{*trace_simp}
-
-Let us prove a similar result involving more complex terms.  We prove
-that addition is commutative.
-\begin{ttbox}
-Goal "m+Suc(n) = Suc(m+n)";
-{\out Level 0}
-{\out m + Suc(n) = Suc(m + n)}
-{\out  1. m + Suc(n) = Suc(m + n)}
-\end{ttbox}
-Performing induction on~$m$ yields two subgoals:
-\begin{ttbox}
-by (res_inst_tac [("n","m")] induct 1);
-{\out Level 1}
-{\out m + Suc(n) = Suc(m + n)}
-{\out  1. 0 + Suc(n) = Suc(0 + n)}
-{\out  2. !!x. x + Suc(n) = Suc(x + n) ==>}
-{\out          Suc(x) + Suc(n) = Suc(Suc(x) + n)}
-\end{ttbox}
-Simplification solves the first subgoal, this time rewriting two
-occurrences of~0:
-\begin{ttbox}
-by (Simp_tac 1);
-{\out Level 2}
-{\out m + Suc(n) = Suc(m + n)}
-{\out  1. !!x. x + Suc(n) = Suc(x + n) ==>}
-{\out          Suc(x) + Suc(n) = Suc(Suc(x) + n)}
-\end{ttbox}
-Switching tracing on illustrates how the simplifier solves the remaining
-subgoal: 
-\begin{ttbox}
-set trace_simp;
-by (Asm_simp_tac 1);
-\ttbreak
-{\out Adding rewrite rule:}
-{\out .x + Suc n == Suc (.x + n)}
-\ttbreak
-{\out Applying instance of rewrite rule:}
-{\out ?m + Suc ?n == Suc (?m + ?n)}
-{\out Rewriting:}
-{\out Suc .x + Suc n == Suc (Suc .x + n)}
-\ttbreak
-{\out Applying instance of rewrite rule:}
-{\out Suc ?m + ?n == Suc (?m + ?n)}
-{\out Rewriting:}
-{\out Suc .x + n == Suc (.x + n)}
-\ttbreak
-{\out Applying instance of rewrite rule:}
-{\out Suc ?m + ?n == Suc (?m + ?n)}
-{\out Rewriting:}
-{\out Suc .x + n == Suc (.x + n)}
-\ttbreak
-{\out Applying instance of rewrite rule:}
-{\out ?x = ?x == True}
-{\out Rewriting:}
-{\out Suc (Suc (.x + n)) = Suc (Suc (.x + n)) == True}
-\ttbreak
-{\out Level 3}
-{\out m + Suc(n) = Suc(m + n)}
-{\out No subgoals!}
-\end{ttbox}
-Many variations are possible.  At Level~1 (in either example) we could have
-solved both subgoals at once using the tactical \ttindex{ALLGOALS}:
-\begin{ttbox}
-by (ALLGOALS Asm_simp_tac);
-{\out Level 2}
-{\out m + Suc(n) = Suc(m + n)}
-{\out No subgoals!}
-\end{ttbox}
-\index{tracing!of simplification|)}
-
-
-\subsection{Free variables and simplification}
-
-Here is a conjecture to be proved for an arbitrary function~$f$
-satisfying the law $f(Suc(\Var{n})) = Suc(f(\Var{n}))$:
-\begin{ttbox}
-val [prem] = Goal
-               "(!!n. f(Suc(n)) = Suc(f(n))) ==> f(i+j) = i+f(j)";
-{\out Level 0}
-{\out f(i + j) = i + f(j)}
-{\out  1. f(i + j) = i + f(j)}
-\ttbreak
-{\out val prem = "f(Suc(?n)) = Suc(f(?n))}
-{\out             [!!n. f(Suc(n)) = Suc(f(n))]" : thm}
-\end{ttbox}
-In the theorem~\texttt{prem}, note that $f$ is a free variable while
-$\Var{n}$ is a schematic variable.
-\begin{ttbox}
-by (res_inst_tac [("n","i")] induct 1);
-{\out Level 1}
-{\out f(i + j) = i + f(j)}
-{\out  1. f(0 + j) = 0 + f(j)}
-{\out  2. !!x. f(x + j) = x + f(j) ==> f(Suc(x) + j) = Suc(x) + f(j)}
-\end{ttbox}
-We simplify each subgoal in turn.  The first one is trivial:
-\begin{ttbox}
-by (Simp_tac 1);
-{\out Level 2}
-{\out f(i + j) = i + f(j)}
-{\out  1. !!x. f(x + j) = x + f(j) ==> f(Suc(x) + j) = Suc(x) + f(j)}
-\end{ttbox}
-The remaining subgoal requires rewriting by the premise, so we add it
-to the current simpset:
-\begin{ttbox}
-by (asm_simp_tac (simpset() addsimps [prem]) 1);
-{\out Level 3}
-{\out f(i + j) = i + f(j)}
-{\out No subgoals!}
-\end{ttbox}
-
-
 \section{Permutative rewrite rules}
 \index{rewrite rules!permutative|(}
 
--- a/doc-src/Ref/substitution.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/Ref/substitution.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+
 \chapter{Substitution Tactics} \label{substitution}
 \index{tactics!substitution|(}\index{equality|(}
 
--- a/doc-src/Ref/syntax.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/Ref/syntax.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+
 \chapter{Syntax Transformations} \label{chap:syntax}
 \newcommand\ttapp{\mathrel{\hbox{\tt\$}}}
 \newcommand\mtt[1]{\mbox{\tt #1}}
--- a/doc-src/Ref/tactic.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/Ref/tactic.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -1,235 +1,8 @@
-%% $Id$
+
 \chapter{Tactics} \label{tactics}
-\index{tactics|(} Tactics have type \mltydx{tactic}.  This is just an
-abbreviation for functions from theorems to theorem sequences, where
-the theorems represent states of a backward proof.  Tactics seldom
-need to be coded from scratch, as functions; instead they are
-expressed using basic tactics and tacticals.
-
-This chapter only presents the primitive tactics.  Substantial proofs
-require the power of automatic tools like simplification
-(Chapter~\ref{chap:simplification}) and classical tableau reasoning
-(Chapter~\ref{chap:classical}).
-
-\section{Resolution and assumption tactics}
-{\bf Resolution} is Isabelle's basic mechanism for refining a subgoal using
-a rule.  {\bf Elim-resolution} is particularly suited for elimination
-rules, while {\bf destruct-resolution} is particularly suited for
-destruction rules.  The {\tt r}, {\tt e}, {\tt d} naming convention is
-maintained for several different kinds of resolution tactics, as well as
-the shortcuts in the subgoal module.
-
-All the tactics in this section act on a subgoal designated by a positive
-integer~$i$.  They fail (by returning the empty sequence) if~$i$ is out of
-range.
-
-\subsection{Resolution tactics}
-\index{resolution!tactics}
-\index{tactics!resolution|bold}
-\begin{ttbox} 
-resolve_tac  : thm list -> int -> tactic
-eresolve_tac : thm list -> int -> tactic
-dresolve_tac : thm list -> int -> tactic
-forward_tac  : thm list -> int -> tactic 
-\end{ttbox}
-These perform resolution on a list of theorems, $thms$, representing a list
-of object-rules.  When generating next states, they take each of the rules
-in the order given.  Each rule may yield several next states, or none:
-higher-order resolution may yield multiple resolvents.
-\begin{ttdescription}
-\item[\ttindexbold{resolve_tac} {\it thms} {\it i}] 
-  refines the proof state using the rules, which should normally be
-  introduction rules.  It resolves a rule's conclusion with
-  subgoal~$i$ of the proof state.
-
-\item[\ttindexbold{eresolve_tac} {\it thms} {\it i}] 
-  \index{elim-resolution}
-  performs elim-resolution with the rules, which should normally be
-  elimination rules.  It resolves with a rule, proves its first premise by
-  assumption, and finally \emph{deletes} that assumption from any new
-  subgoals.  (To rotate a rule's premises,
-  see \texttt{rotate_prems} in~{\S}\ref{MiscellaneousForwardRules}.)
-
-\item[\ttindexbold{dresolve_tac} {\it thms} {\it i}] 
-  \index{forward proof}\index{destruct-resolution}
-  performs destruct-resolution with the rules, which normally should
-  be destruction rules.  This replaces an assumption by the result of
-  applying one of the rules.
-
-\item[\ttindexbold{forward_tac}]\index{forward proof}
-  is like {\tt dresolve_tac} except that the selected assumption is not
-  deleted.  It applies a rule to an assumption, adding the result as a new
-  assumption.
-\end{ttdescription}
-
-\subsection{Assumption tactics}
-\index{tactics!assumption|bold}\index{assumptions!tactics for}
-\begin{ttbox} 
-assume_tac    : int -> tactic
-eq_assume_tac : int -> tactic
-\end{ttbox} 
-\begin{ttdescription}
-\item[\ttindexbold{assume_tac} {\it i}] 
-attempts to solve subgoal~$i$ by assumption.
-
-\item[\ttindexbold{eq_assume_tac}] 
-is like {\tt assume_tac} but does not use unification.  It succeeds (with a
-\emph{unique} next state) if one of the assumptions is identical to the
-subgoal's conclusion.  Since it does not instantiate variables, it cannot
-make other subgoals unprovable.  It is intended to be called from proof
-strategies, not interactively.
-\end{ttdescription}
-
-\subsection{Matching tactics} \label{match_tac}
-\index{tactics!matching}
-\begin{ttbox} 
-match_tac  : thm list -> int -> tactic
-ematch_tac : thm list -> int -> tactic
-dmatch_tac : thm list -> int -> tactic
-\end{ttbox}
-These are just like the resolution tactics except that they never
-instantiate unknowns in the proof state.  Flexible subgoals are not updated
-willy-nilly, but are left alone.  Matching --- strictly speaking --- means
-treating the unknowns in the proof state as constants; these tactics merely
-discard unifiers that would update the proof state.
-\begin{ttdescription}
-\item[\ttindexbold{match_tac} {\it thms} {\it i}] 
-refines the proof state using the rules, matching a rule's
-conclusion with subgoal~$i$ of the proof state.
-
-\item[\ttindexbold{ematch_tac}] 
-is like {\tt match_tac}, but performs elim-resolution.
-
-\item[\ttindexbold{dmatch_tac}] 
-is like {\tt match_tac}, but performs destruct-resolution.
-\end{ttdescription}
-
-
-\subsection{Explicit instantiation} \label{res_inst_tac}
-\index{tactics!instantiation}\index{instantiation}
-\begin{ttbox} 
-res_inst_tac    : (string*string)list -> thm -> int -> tactic
-eres_inst_tac   : (string*string)list -> thm -> int -> tactic
-dres_inst_tac   : (string*string)list -> thm -> int -> tactic
-forw_inst_tac   : (string*string)list -> thm -> int -> tactic
-instantiate_tac : (string*string)list -> tactic
-\end{ttbox}
-The first four of these tactics are designed for applying rules by resolution
-such as substitution and induction, which cause difficulties for higher-order 
-unification.  The tactics accept explicit instantiations for unknowns 
-in the rule ---typically, in the rule's conclusion. The last one, 
-{\tt instantiate_tac}, may be used to instantiate unknowns in the proof state,
-independently of rule application. 
-
-Each instantiation is a pair {\tt($v$,$e$)}, 
-where $v$ is an unknown \emph{without} its leading question mark!
-\begin{itemize}
-\item If $v$ is the type unknown {\tt'a}, then
-the rule must contain a type unknown \verb$?'a$ of some
-sort~$s$, and $e$ should be a type of sort $s$.
-
-\item If $v$ is the unknown {\tt P}, then
-the rule must contain an unknown \verb$?P$ of some type~$\tau$,
-and $e$ should be a term of some type~$\sigma$ such that $\tau$ and
-$\sigma$ are unifiable.  If the unification of $\tau$ and $\sigma$
-instantiates any type unknowns in $\tau$, these instantiations
-are recorded for application to the rule.
-\end{itemize}
-Types are instantiated before terms are.  Because type instantiations are
-inferred from term instantiations, explicit type instantiations are seldom
-necessary --- if \verb$?t$ has type \verb$?'a$, then the instantiation list
-\texttt{[("'a","bool"), ("t","True")]} may be simplified to
-\texttt{[("t","True")]}.  Type unknowns in the proof state may cause
-failure because the tactics cannot instantiate them.
-
-The first four instantiation tactics act on a given subgoal.  Terms in the
-instantiations are type-checked in the context of that subgoal --- in
-particular, they may refer to that subgoal's parameters.  Any unknowns in
-the terms receive subscripts and are lifted over the parameters; thus, you
-may not refer to unknowns in the subgoal.
-
-\begin{ttdescription}
-\item[\ttindexbold{res_inst_tac} {\it insts} {\it thm} {\it i}]
-instantiates the rule {\it thm} with the instantiations {\it insts}, as
-described above, and then performs resolution on subgoal~$i$.  Resolution
-typically causes further instantiations; you need not give explicit
-instantiations for every unknown in the rule.
-
-\item[\ttindexbold{eres_inst_tac}] 
-is like {\tt res_inst_tac}, but performs elim-resolution.
-
-\item[\ttindexbold{dres_inst_tac}] 
-is like {\tt res_inst_tac}, but performs destruct-resolution.
-
-\item[\ttindexbold{forw_inst_tac}] 
-is like {\tt dres_inst_tac} except that the selected assumption is not
-deleted.  It applies the instantiated rule to an assumption, adding the
-result as a new assumption.
-
-\item[\ttindexbold{instantiate_tac} {\it insts}] 
-instantiates unknowns in the proof state. This affects the main goal as 
-well as all subgoals.
-\end{ttdescription}
-
+\index{tactics|(}
 
 \section{Other basic tactics}
-\subsection{Tactic shortcuts}
-\index{shortcuts!for tactics}
-\index{tactics!resolution}\index{tactics!assumption}
-\index{tactics!meta-rewriting}
-\begin{ttbox} 
-rtac     :      thm ->        int -> tactic
-etac     :      thm ->        int -> tactic
-dtac     :      thm ->        int -> tactic
-ftac     :      thm ->        int -> tactic
-atac     :                    int -> tactic
-eatac    :      thm -> int -> int -> tactic
-datac    :      thm -> int -> int -> tactic
-fatac    :      thm -> int -> int -> tactic
-ares_tac :      thm list   -> int -> tactic
-rewtac   :      thm ->               tactic
-\end{ttbox}
-These abbreviate common uses of tactics.
-\begin{ttdescription}
-\item[\ttindexbold{rtac} {\it thm} {\it i}] 
-abbreviates \hbox{\tt resolve_tac [{\it thm}] {\it i}}, doing resolution.
-
-\item[\ttindexbold{etac} {\it thm} {\it i}] 
-abbreviates \hbox{\tt eresolve_tac [{\it thm}] {\it i}}, doing elim-resolution.
-
-\item[\ttindexbold{dtac} {\it thm} {\it i}] 
-abbreviates \hbox{\tt dresolve_tac [{\it thm}] {\it i}}, doing
-destruct-resolution.
-
-\item[\ttindexbold{ftac} {\it thm} {\it i}] 
-abbreviates \hbox{\tt forward_tac [{\it thm}] {\it i}}, doing
-destruct-resolution without deleting the assumption.
-
-\item[\ttindexbold{atac} {\it i}] 
-abbreviates \hbox{\tt assume_tac {\it i}}, doing proof by assumption.
-
-\item[\ttindexbold{eatac} {\it thm} {\it j} {\it i}] 
-performs \hbox{\tt etac {\it thm}} and then {\it j} times \texttt{atac}, 
-solving additionally {\it j}~premises of the rule {\it thm} by assumption.
-
-\item[\ttindexbold{datac} {\it thm} {\it j} {\it i}] 
-performs \hbox{\tt dtac {\it thm}} and then {\it j} times \texttt{atac}, 
-solving additionally {\it j}~premises of the rule {\it thm} by assumption.
-
-\item[\ttindexbold{fatac} {\it thm} {\it j} {\it i}] 
-performs \hbox{\tt ftac {\it thm}} and then {\it j} times \texttt{atac}, 
-solving additionally {\it j}~premises of the rule {\it thm} by assumption.
-
-\item[\ttindexbold{ares_tac} {\it thms} {\it i}] 
-tries proof by assumption and resolution; it abbreviates
-\begin{ttbox}
-assume_tac {\it i} ORELSE resolve_tac {\it thms} {\it i}
-\end{ttbox}
-
-\item[\ttindexbold{rewtac} {\it def}] 
-abbreviates \hbox{\tt rewrite_goals_tac [{\it def}]}, unfolding a definition.
-\end{ttdescription}
-
 
 \subsection{Inserting premises and facts}\label{cut_facts_tac}
 \index{tactics!for inserting facts}\index{assumptions!inserting}
@@ -351,52 +124,6 @@
 
 \section{Obscure tactics}
 
-\subsection{Renaming parameters in a goal} \index{parameters!renaming}
-\begin{ttbox} 
-rename_tac        : string -> int -> tactic
-rename_last_tac   : string -> string list -> int -> tactic
-Logic.set_rename_prefix : string -> unit
-Logic.auto_rename       : bool ref      \hfill{\bf initially false}
-\end{ttbox}
-When creating a parameter, Isabelle chooses its name by matching variable
-names via the object-rule.  Given the rule $(\forall I)$ formalized as
-$\left(\Forall x. P(x)\right) \Imp \forall x.P(x)$, Isabelle will note that
-the $\Forall$-bound variable in the premise has the same name as the
-$\forall$-bound variable in the conclusion.  
-
-Sometimes there is insufficient information and Isabelle chooses an
-arbitrary name.  The renaming tactics let you override Isabelle's choice.
-Because renaming parameters has no logical effect on the proof state, the
-{\tt by} command prints the message {\tt Warning:\ same as previous
-level}.
-
-Alternatively, you can suppress the naming mechanism described above and
-have Isabelle generate uniform names for parameters.  These names have the
-form $p${\tt a}, $p${\tt b}, $p${\tt c},~\ldots, where $p$ is any desired
-prefix.  They are ugly but predictable.
-
-\begin{ttdescription}
-\item[\ttindexbold{rename_tac} {\it str} {\it i}] 
-interprets the string {\it str} as a series of blank-separated variable
-names, and uses them to rename the parameters of subgoal~$i$.  The names
-must be distinct.  If there are fewer names than parameters, then the
-tactic renames the innermost parameters and may modify the remaining ones
-to ensure that all the parameters are distinct.
-
-\item[\ttindexbold{rename_last_tac} {\it prefix} {\it suffixes} {\it i}] 
-generates a list of names by attaching each of the {\it suffixes\/} to the 
-{\it prefix}.  It is intended for coding structural induction tactics,
-where several of the new parameters should have related names.
-
-\item[\ttindexbold{Logic.set_rename_prefix} {\it prefix};] 
-sets the prefix for uniform renaming to~{\it prefix}.  The default prefix
-is {\tt"k"}.
-
-\item[set \ttindexbold{Logic.auto_rename};] 
-makes Isabelle generate uniform names for parameters. 
-\end{ttdescription}
-
-
 \subsection{Manipulating assumptions}
 \index{assumptions!rotating}
 \begin{ttbox} 
@@ -594,142 +321,6 @@
 is no longer than {\it limit}.
 \end{ttdescription}
 
-
-\section{Programming tools for proof strategies}
-Do not consider using the primitives discussed in this section unless you
-really need to code tactics from scratch.
-
-\subsection{Operations on tactics}
-\index{tactics!primitives for coding} A tactic maps theorems to sequences of
-theorems.  The type constructor for sequences (lazy lists) is called
-\mltydx{Seq.seq}.  To simplify the types of tactics and tacticals,
-Isabelle defines a type abbreviation:
-\begin{ttbox} 
-type tactic = thm -> thm Seq.seq
-\end{ttbox} 
-The following operations provide means for coding tactics in a clean style.
-\begin{ttbox} 
-PRIMITIVE :                  (thm -> thm) -> tactic  
-SUBGOAL   : ((term*int) -> tactic) -> int -> tactic
-\end{ttbox} 
-\begin{ttdescription}
-\item[\ttindexbold{PRIMITIVE} $f$] packages the meta-rule~$f$ as a tactic that
-  applies $f$ to the proof state and returns the result as a one-element
-  sequence.  If $f$ raises an exception, then the tactic's result is the empty
-  sequence.
-
-\item[\ttindexbold{SUBGOAL} $f$ $i$] 
-extracts subgoal~$i$ from the proof state as a term~$t$, and computes a
-tactic by calling~$f(t,i)$.  It applies the resulting tactic to the same
-state.  The tactic body is expressed using tactics and tacticals, but may
-peek at a particular subgoal:
-\begin{ttbox} 
-SUBGOAL (fn (t,i) => {\it tactic-valued expression})
-\end{ttbox} 
-\end{ttdescription}
-
-
-\subsection{Tracing}
-\index{tactics!tracing}
-\index{tracing!of tactics}
-\begin{ttbox} 
-pause_tac: tactic
-print_tac: string -> tactic
-\end{ttbox}
-These tactics print tracing information when they are applied to a proof
-state.  Their output may be difficult to interpret.  Note that certain of
-the searching tacticals, such as {\tt REPEAT}, have built-in tracing
-options.
-\begin{ttdescription}
-\item[\ttindexbold{pause_tac}] 
-prints {\footnotesize\tt** Press RETURN to continue:} and then reads a line
-from the terminal.  If this line is blank then it returns the proof state
-unchanged; otherwise it fails (which may terminate a repetition).
-
-\item[\ttindexbold{print_tac}~$msg$] 
-returns the proof state unchanged, with the side effect of printing it at
-the terminal.
-\end{ttdescription}
-
-
-\section{*Sequences}
-\index{sequences (lazy lists)|bold}
-The module {\tt Seq} declares a type of lazy lists.  It uses
-Isabelle's type \mltydx{option} to represent the possible presence
-(\ttindexbold{Some}) or absence (\ttindexbold{None}) of
-a value:
-\begin{ttbox}
-datatype 'a option = None  |  Some of 'a;
-\end{ttbox}
-The {\tt Seq} structure is supposed to be accessed via fully qualified
-names and should not be \texttt{open}.
-
-\subsection{Basic operations on sequences}
-\begin{ttbox} 
-Seq.empty   : 'a seq
-Seq.make    : (unit -> ('a * 'a seq) option) -> 'a seq
-Seq.single  : 'a -> 'a seq
-Seq.pull    : 'a seq -> ('a * 'a seq) option
-\end{ttbox}
-\begin{ttdescription}
-\item[Seq.empty] is the empty sequence.
-
-\item[\tt Seq.make (fn () => Some ($x$, $xq$))] constructs the
-  sequence with head~$x$ and tail~$xq$, neither of which is evaluated.
-
-\item[Seq.single $x$] 
-constructs the sequence containing the single element~$x$.
-
-\item[Seq.pull $xq$] returns {\tt None} if the sequence is empty and
-  {\tt Some ($x$, $xq'$)} if the sequence has head~$x$ and tail~$xq'$.
-  Warning: calling \hbox{Seq.pull $xq$} again will {\it recompute\/}
-  the value of~$x$; it is not stored!
-\end{ttdescription}
-
-
-\subsection{Converting between sequences and lists}
-\begin{ttbox} 
-Seq.chop    : int * 'a seq -> 'a list * 'a seq
-Seq.list_of : 'a seq -> 'a list
-Seq.of_list : 'a list -> 'a seq
-\end{ttbox}
-\begin{ttdescription}
-\item[Seq.chop ($n$, $xq$)] returns the first~$n$ elements of~$xq$ as a
-  list, paired with the remaining elements of~$xq$.  If $xq$ has fewer
-  than~$n$ elements, then so will the list.
-  
-\item[Seq.list_of $xq$] returns the elements of~$xq$, which must be
-  finite, as a list.
-  
-\item[Seq.of_list $xs$] creates a sequence containing the elements
-  of~$xs$.
-\end{ttdescription}
-
-
-\subsection{Combining sequences}
-\begin{ttbox} 
-Seq.append      : 'a seq * 'a seq -> 'a seq
-Seq.interleave  : 'a seq * 'a seq -> 'a seq
-Seq.flat        : 'a seq seq -> 'a seq
-Seq.map         : ('a -> 'b) -> 'a seq -> 'b seq
-Seq.filter      : ('a -> bool) -> 'a seq -> 'a seq
-\end{ttbox} 
-\begin{ttdescription}
-\item[Seq.append ($xq$, $yq$)] concatenates $xq$ to $yq$.
-  
-\item[Seq.interleave ($xq$, $yq$)] joins $xq$ with $yq$ by
-  interleaving their elements.  The result contains all the elements
-  of the sequences, even if both are infinite.
-  
-\item[Seq.flat $xqq$] concatenates a sequence of sequences.
-  
-\item[Seq.map $f$ $xq$] applies $f$ to every element
-  of~$xq=x@1,x@2,\ldots$, yielding the sequence $f(x@1),f(x@2),\ldots$.
-  
-\item[Seq.filter $p$ $xq$] returns the sequence consisting of all
-  elements~$x$ of~$xq$ such that $p(x)$ is {\tt true}.
-\end{ttdescription}
-
 \index{tactics|)}
 
 
--- a/doc-src/Ref/tctical.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/Ref/tctical.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+
 \chapter{Tacticals}
 \index{tacticals|(}
 Tacticals are operations on tactics.  Their implementation makes use of
--- a/doc-src/Ref/theories.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/Ref/theories.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -1,216 +1,6 @@
-
-%% $Id$
 
 \chapter{Theories, Terms and Types} \label{theories}
-\index{theories|(}\index{signatures|bold}
-\index{reading!axioms|see{\texttt{assume_ax}}} Theories organize the syntax,
-declarations and axioms of a mathematical development.  They are built,
-starting from the Pure or CPure theory, by extending and merging existing
-theories.  They have the \ML\ type \mltydx{theory}.  Theory operations signal
-errors by raising exception \xdx{THEORY}, returning a message and a list of
-theories.
-
-Signatures, which contain information about sorts, types, constants and
-syntax, have the \ML\ type~\mltydx{Sign.sg}.  For identification, each
-signature carries a unique list of \bfindex{stamps}, which are \ML\
-references to strings.  The strings serve as human-readable names; the
-references serve as unique identifiers.  Each primitive signature has a
-single stamp.  When two signatures are merged, their lists of stamps are
-also merged.  Every theory carries a unique signature.
-
-Terms and types are the underlying representation of logical syntax.  Their
-\ML\ definitions are irrelevant to naive Isabelle users.  Programmers who
-wish to extend Isabelle may need to know such details, say to code a tactic
-that looks for subgoals of a particular form.  Terms and types may be
-`certified' to be well-formed with respect to a given signature.
-
-
-\section{Defining theories}\label{sec:ref-defining-theories}
-
-Theories are defined via theory files $name$\texttt{.thy} (there are also
-\ML-level interfaces which are only intended for people building advanced
-theory definition packages).  Appendix~\ref{app:TheorySyntax} presents the
-concrete syntax for theory files; here follows an explanation of the
-constituent parts.
-\begin{description}
-\item[{\it theoryDef}] is the full definition.  The new theory is called $id$.
-  It is the union of the named \textbf{parent
-    theories}\indexbold{theories!parent}, possibly extended with new
-  components.  \thydx{Pure} and \thydx{CPure} are the basic theories, which
-  contain only the meta-logic.  They differ just in their concrete syntax for
-  function applications.
-  
-  The new theory begins as a merge of its parents.
-  \begin{ttbox}
-    Attempt to merge different versions of theories: "\(T@1\)", \(\ldots\), "\(T@n\)"
-  \end{ttbox}
-  This error may especially occur when a theory is redeclared --- say to
-  change an inappropriate definition --- and bindings to old versions persist.
-  Isabelle ensures that old and new theories of the same name are not involved
-  in a proof.
-
-\item[$classes$]
-  is a series of class declarations.  Declaring {\tt$id$ < $id@1$ \dots\
-    $id@n$} makes $id$ a subclass of the existing classes $id@1\dots
-  id@n$.  This rules out cyclic class structures.  Isabelle automatically
-  computes the transitive closure of subclass hierarchies; it is not
-  necessary to declare \texttt{c < e} in addition to \texttt{c < d} and \texttt{d <
-    e}.
-
-\item[$default$]
-  introduces $sort$ as the new default sort for type variables.  This applies
-  to unconstrained type variables in an input string but not to type
-  variables created internally.  If omitted, the default sort is the listwise
-  union of the default sorts of the parent theories (i.e.\ their logical
-  intersection).
-  
-\item[$sort$] is a finite set of classes.  A single class $id$ abbreviates the
-  sort $\{id\}$.
-
-\item[$types$]
-  is a series of type declarations.  Each declares a new type constructor
-  or type synonym.  An $n$-place type constructor is specified by
-  $(\alpha@1,\dots,\alpha@n)name$, where the type variables serve only to
-  indicate the number~$n$.
-
-  A \textbf{type synonym}\indexbold{type synonyms} is an abbreviation
-  $(\alpha@1,\dots,\alpha@n)name = \tau$, where $name$ and $\tau$ can
-  be strings.
-
-\item[$infix$]
-  declares a type or constant to be an infix operator having priority $nat$
-  and associating to the left (\texttt{infixl}) or right (\texttt{infixr}).
-  Only 2-place type constructors can have infix status; an example is {\tt
-  ('a,'b)~"*"~(infixr~20)}, which may express binary product types.
-
-\item[$arities$] is a series of type arity declarations.  Each assigns
-  arities to type constructors.  The $name$ must be an existing type
-  constructor, which is given the additional arity $arity$.
-  
-\item[$nonterminals$]\index{*nonterminal symbols} declares purely
-  syntactic types to be used as nonterminal symbols of the context
-  free grammar.
-
-\item[$consts$] is a series of constant declarations.  Each new
-  constant $name$ is given the specified type.  The optional $mixfix$
-  annotations may attach concrete syntax to the constant.
-  
-\item[$syntax$] \index{*syntax section}\index{print mode} is a variant
-  of $consts$ which adds just syntax without actually declaring
-  logical constants.  This gives full control over a theory's context
-  free grammar.  The optional $mode$ specifies the print mode where the
-  mixfix productions should be added.  If there is no \texttt{output}
-  option given, all productions are also added to the input syntax
-  (regardless of the print mode).
-
-\item[$mixfix$] \index{mixfix declarations}
-  annotations can take three forms:
-  \begin{itemize}
-  \item A mixfix template given as a $string$ of the form
-    {\tt"}\dots{\tt\_}\dots{\tt\_}\dots{\tt"} where the $i$-th underscore
-    indicates the position where the $i$-th argument should go.  The list
-    of numbers gives the priority of each argument.  The final number gives
-    the priority of the whole construct.
-
-  \item A constant $f$ of type $\tau@1\To(\tau@2\To\tau)$ can be given {\bf
-    infix} status.
-
-  \item A constant $f$ of type $(\tau@1\To\tau@2)\To\tau$ can be given {\bf
-    binder} status.  The declaration \texttt{binder} $\cal Q$ $p$ causes
-  ${\cal Q}\,x.F(x)$ to be treated
-  like $f(F)$, where $p$ is the priority.
-  \end{itemize}
-
-\item[$trans$]
-  specifies syntactic translation rules (macros).  There are three forms:
-  parse rules (\texttt{=>}), print rules (\texttt{<=}), and parse/print rules ({\tt
-  ==}).
-
-\item[$rules$]
-  is a series of rule declarations.  Each has a name $id$ and the formula is
-  given by the $string$.  Rule names must be distinct within any single
-  theory.
-
-\item[$defs$] is a series of definitions.  They are just like $rules$, except
-  that every $string$ must be a definition (see below for details).
-
-\item[$constdefs$] combines the declaration of constants and their
-  definition.  The first $string$ is the type, the second the definition.
-  
-\item[$axclass$] \index{*axclass section} defines an \rmindex{axiomatic type
-    class} \cite{Wenzel:1997:TPHOL} as the intersection of existing classes,
-  with additional axioms holding.  Class axioms may not contain more than one
-  type variable.  The class axioms (with implicit sort constraints added) are
-  bound to the given names.  Furthermore a class introduction rule is
-  generated, which is automatically employed by $instance$ to prove
-  instantiations of this class.
-  
-\item[$instance$] \index{*instance section} proves class inclusions or
-  type arities at the logical level and then transfers these to the
-  type signature.  The instantiation is proven and checked properly.
-  The user has to supply sufficient witness information: theorems
-  ($longident$), axioms ($string$), or even arbitrary \ML{} tactic
-  code $verbatim$.
-
-\item[$oracle$] links the theory to a trusted external reasoner.  It is
-  allowed to create theorems, but each theorem carries a proof object
-  describing the oracle invocation.  See \S\ref{sec:oracles} for details.
-  
-\item[$local$, $global$] change the current name declaration mode.
-  Initially, theories start in $local$ mode, causing all names of
-  types, constants, axioms etc.\ to be automatically qualified by the
-  theory name.  Changing this to $global$ causes all names to be
-  declared as short base names only.
-  
-  The $local$ and $global$ declarations act like switches, affecting
-  all following theory sections until changed again explicitly.  Also
-  note that the final state at the end of the theory will persist.  In
-  particular, this determines how the names of theorems stored later
-  on are handled.
-  
-\item[$setup$]\index{*setup!theory} applies a list of ML functions to
-  the theory.  The argument should denote a value of type
-  \texttt{(theory -> theory) list}.  Typically, ML packages are
-  initialized in this way.
-
-\item[$ml$] \index{*ML section}
-  consists of \ML\ code, typically for parse and print translation functions.
-\end{description}
-%
-Chapters~\ref{Defining-Logics} and \ref{chap:syntax} explain mixfix
-declarations, translation rules and the \texttt{ML} section in more detail.
-
-
-\subsection{*Classes and arities}
-\index{classes!context conditions}\index{arities!context conditions}
-
-In order to guarantee principal types~\cite{nipkow-prehofer},
-arity declarations must obey two conditions:
-\begin{itemize}
-\item There must not be any two declarations $ty :: (\vec{r})c$ and
-  $ty :: (\vec{s})c$ with $\vec{r} \neq \vec{s}$.  For example, this
-  excludes the following:
-\begin{ttbox}
-arities
-  foo :: (\{logic{\}}) logic
-  foo :: (\{{\}})logic
-\end{ttbox}
-
-\item If there are two declarations $ty :: (s@1,\dots,s@n)c$ and $ty ::
-  (s@1',\dots,s@n')c'$ such that $c' < c$ then $s@i' \preceq s@i$ must hold
-  for $i=1,\dots,n$.  The relationship $\preceq$, defined as
-\[ s' \preceq s \iff \forall c\in s. \exists c'\in s'.~ c'\le c, \]
-expresses that the set of types represented by $s'$ is a subset of the
-set of types represented by $s$.  Assuming $term \preceq logic$, the
-following is forbidden:
-\begin{ttbox}
-arities
-  foo :: (\{logic{\}})logic
-  foo :: (\{{\}})term
-\end{ttbox}
-
-\end{itemize}
-
+\index{theories|(}
 
 \section{The theory loader}\label{sec:more-theories}
 \index{theories!reading}\index{files!reading}
@@ -247,13 +37,6 @@
   dispose a large number of theories at once.  Note that {\ML} bindings to
   theorems etc.\ of removed theories may still persist.
   
-\item[reset \ttindexbold{delete_tmpfiles};] processing theory files usually
-  involves temporary {\ML} files to be created.  By default, these are deleted
-  afterwards.  Resetting the \texttt{delete_tmpfiles} flag inhibits this,
-  leaving the generated code for debugging purposes.  The basic location for
-  temporary files is determined by the \texttt{ISABELLE_TMP} environment
-  variable (which is private to the running Isabelle process and may be
-  retrieved by \ttindex{getenv} from {\ML}).
 \end{ttdescription}
 
 \medskip Theory and {\ML} files are located by skimming through the
@@ -296,224 +79,6 @@
 temporarily appended to the load path, too.
 
 
-\section{Locales}
-\label{Locales}
-
-Locales \cite{kammueller-locales} are a concept of local proof contexts.  They
-are introduced as named syntactic objects within theories and can be
-opened in any descendant theory.
-
-\subsection{Declaring Locales}
-
-A locale is declared in a theory section that starts with the
-keyword \texttt{locale}.  It consists typically of three parts, the
-\texttt{fixes} part, the \texttt{assumes} part, and the \texttt{defines} part.
-Appendix \ref{app:TheorySyntax} presents the full syntax.
-
-\subsubsection{Parts of Locales}
-
-The subsection introduced by the keyword \texttt{fixes} declares the locale
-constants in a way that closely resembles a global \texttt{consts}
-declaration.  In particular, there may be an optional pretty printing syntax
-for the locale constants.
-
-The subsequent \texttt{assumes} part specifies the locale rules.  They are
-defined like \texttt{rules}: by an identifier followed by the rule
-given as a string.  Locale rules admit the statement of local assumptions
-about the locale constants.  The \texttt{assumes} part is optional.  Non-fixed
-variables in locale rules are automatically bound by the universal quantifier
-\texttt{!!} of the meta-logic.
-
-Finally, the \texttt{defines} part introduces the definitions that are
-available in the locale.  Locale constants declared in the \texttt{fixes}
-section are defined using the meta-equality \texttt{==}.  If the
-locale constant is a functiond then its definition can (as usual) have
-variables on the left-hand side acting as formal parameters; they are
-considered as schematic variables and are automatically generalized by
-universal quantification of the meta-logic.  The right hand side of a
-definition must not contain variables that are not already on the left hand
-side.  In so far locale definitions behave like theory level definitions.
-However, the locale concept realizes \emph{dependent definitions}: any variable
-that is fixed as a locale constant can occur on the right hand side of
-definitions.  For an illustration of these dependent definitions see the
-occurrence of the locale constant \texttt{G} on the right hand side of the
-definitions of the locale \texttt{group} below.  Naturally, definitions can
-already use the syntax of the locale constants in the \texttt{fixes}
-subsection.  The \texttt{defines} part is, as the \texttt{assumes} part,
-optional.
-
-\subsubsection{Example for Definition}
-The concrete syntax of locale definitions is demonstrated by example below.
-
-Locale \texttt{group} assumes the definition of groups in a theory
-file\footnote{This and other examples are from \texttt{HOL/ex}.}.  A locale
-defining a convenient proof environment for group related proofs may be
-added to the theory as follows:
-\begin{ttbox}
-  locale group =
-    fixes 
-      G         :: "'a grouptype"
-      e         :: "'a"
-      binop     :: "'a => 'a => 'a"        (infixr "#" 80)
-      inv       :: "'a => 'a"              ("i(_)" [90] 91)
-    assumes
-      Group_G   "G: Group"
-    defines
-      e_def     "e == unit G"
-      binop_def "x # y == bin_op G x y"
-      inv_def   "i(x) == inverse G x"
-\end{ttbox}
-
-\subsubsection{Polymorphism}
-
-In contrast to polymorphic definitions in theories, the use of the
-same type variable for the declaration of different locale constants in the
-fixes part means \emph{the same} type.  In other words, the scope of the
-polymorphic variables is extended over all constant declarations of a locale.
-In the above example \texttt{'a} refers to the same type which is fixed inside
-the locale.  In an exported theorem (see \S\ref{sec:locale-export}) the
-constructors of locale \texttt{group} are polymorphic, yet only simultaneously
-instantiatable.
-
-\subsubsection{Nested Locales}
-
-A locale can be defined as the extension of a previously defined
-locale.  This operation of extension is optional and is syntactically
-expressed as 
-\begin{ttbox}
-locale foo = bar + ...
-\end{ttbox}
-The locale \texttt{foo} builds on the constants and syntax of the locale {\tt
-bar}.  That is, all contents of the locale \texttt{bar} can be used in
-definitions and rules of the corresponding parts of the locale {\tt
-foo}.  Although locale \texttt{foo} assumes the \texttt{fixes} part of \texttt{bar} it
-does not automatically subsume its rules and definitions.  Normally, one
-expects to use locale \texttt{foo} only if locale \texttt{bar} is already
-active.  These aspects of use and activation of locales are considered in the
-subsequent section.
-
-
-\subsection{Locale Scope}
-
-Locales are by default inactive, but they can be invoked.  The list of
-currently active locales is called \emph{scope}.  The process of activating
-them is called \emph{opening}; the reverse is \emph{closing}.
-
-\subsubsection{Scope}
-The locale scope is part of each theory.  It is a dynamic stack containing
-all active locales at a certain point in an interactive session.
-The scope lives until all locales are explicitly closed.  At one time there
-can be more than one locale open.  The contents of these various active
-locales are all visible in the scope.  In case of nested locales for example,
-the nesting is actually reflected to the scope, which contains the nested
-locales as layers.  To check the state of the scope during a development the
-function \texttt{Print\_scope} may be used.  It displays the names of all open
-locales on the scope.  The function \texttt{print\_locales} applied to a theory
-displays all locales contained in that theory and in addition also the
-current scope.
-
-The scope is manipulated by the commands for opening and closing of locales. 
-
-\subsubsection{Opening}
-Locales can be \emph{opened} at any point during a session where
-we want to prove theorems concerning the locale.  Opening a locale means
-making its contents visible by pushing it onto the scope of the current
-theory.  Inside a scope of opened locales, theorems can use all definitions and
-rules contained in the locales on the scope.  The rules and definitions may
-be accessed individually using the function \ttindex{thm}.  This function is
-applied to the names assigned to locale rules and definitions as
-strings.  The opening command is called \texttt{Open\_locale} and takes the 
-name of the locale to be opened as its argument.
-
-If one opens a locale \texttt{foo} that is defined by extension from locale
-\texttt{bar}, the function \texttt{Open\_locale} checks if locale \texttt{bar}
-is open.  If so, then it just opens \texttt{foo}, if not, then it prints a
-message and opens \texttt{bar} before opening \texttt{foo}.  Naturally, this
-carries on, if \texttt{bar} is again an extension.
-
-\subsubsection{Closing}
-
-\emph{Closing} means to cancel the last opened locale, pushing it out of the
-scope.  Theorems proved during the life cycle of this locale will be disabled,
-unless they have been explicitly exported, as described below.  However, when
-the same locale is opened again these theorems may be used again as well,
-provided that they were saved as theorems in the first place, using
-\texttt{qed} or ML assignment.  The command \texttt{Close\_locale} takes a
-locale name as a string and checks if this locale is actually the topmost
-locale on the scope.  If this is the case, it removes this locale, otherwise
-it prints a warning message and does not change the scope.
-
-\subsubsection{Export of Theorems}
-\label{sec:locale-export}
-
-Export of theorems transports theorems out of the scope of locales.  Locale
-rules that have been used in the proof of an exported theorem inside the
-locale are carried by the exported form of the theorem as its individual
-meta-assumptions.  The locale constants are universally quantified variables
-in these theorems, hence such theorems can be instantiated individually.
-Definitions become unfolded; locale constants that were merely used for
-definitions vanish.  Logically, exporting corresponds to a combined
-application of introduction rules for implication and universal
-quantification.  Exporting forms a kind of normalization of theorems in a
-locale scope.
-
-According to the possibility of nested locales there are two different forms
-of export.  The first one is realized by the function \texttt{export} that
-exports theorems through all layers of opened locales of the scope.  Hence,
-the application of export to a theorem yields a theorem of the global level,
-that is, the current theory context without any local assumptions or
-definitions.
-
-When locales are nested we might want to export a theorem, not to the global
-level of the current theory but just to the previous level.  The other export
-function, \texttt{Export}, transports theorems one level up in the scope; the
-theorem still uses locale constants, definitions and rules of the locales
-underneath.
-
-\subsection{Functions for Locales}
-\label{Syntax}
-\index{locales!functions}
-
-Here is a quick reference list of locale functions.
-\begin{ttbox}
-  Open_locale  : xstring -> unit 
-  Close_locale : xstring -> unit
-  export       :     thm -> thm
-  Export       :     thm -> thm
-  thm          : xstring -> thm
-  Print_scope  :    unit -> unit
-  print_locales:  theory -> unit
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{Open_locale} $xstring$] 
-    opens the locale {\it xstring}, adding it to the scope of the theory of the
-  current context.  If the opened locale is built by extension, the ancestors
-  are opened automatically.
-  
-\item[\ttindexbold{Close_locale} $xstring$] eliminates the locale {\it
-    xstring} from the scope if it is the topmost item on it, otherwise it does
-  not change the scope and produces a warning.
-
-\item[\ttindexbold{export} $thm$] locale definitions become expanded in {\it
-    thm} and locale rules that were used in the proof of {\it thm} become part
-  of its individual assumptions.  This normalization happens with respect to
-  \emph{all open locales} on the scope.
-  
-\item[\ttindexbold{Export} $thm$] works like \texttt{export} but normalizes
-  theorems only up to the previous level of locales on the scope.
-  
-\item[\ttindexbold{thm} $xstring$] applied to the name of a locale definition
-  or rule it returns the definition as a theorem.
-  
-\item[\ttindexbold{Print_scope}()] prints the names of the locales in the
-  current scope of the current theory context.
-  
-\item[\ttindexbold{print_locale} $theory$] prints all locales that are
-  contained in {\it theory} directly or indirectly.  It also displays the
-  current scope similar to \texttt{Print\_scope}.
-\end{ttdescription}
-
-
 \section{Basic operations on theories}\label{BasicOperationsOnTheories}
 
 \subsection{*Theory inclusion}
@@ -905,111 +470,6 @@
 \end{ttdescription}
 
 
-\section{Oracles: calling trusted external reasoners}
-\label{sec:oracles}
-\index{oracles|(}
-
-Oracles allow Isabelle to take advantage of external reasoners such as
-arithmetic decision procedures, model checkers, fast tautology checkers or
-computer algebra systems.  Invoked as an oracle, an external reasoner can
-create arbitrary Isabelle theorems.  It is your responsibility to ensure that
-the external reasoner is as trustworthy as your application requires.
-Isabelle's proof objects~(\S\ref{sec:proofObjects}) record how each theorem
-depends upon oracle calls.
-
-\begin{ttbox}
-invoke_oracle     : theory -> xstring -> Sign.sg * object -> thm
-Theory.add_oracle : bstring * (Sign.sg * object -> term) -> theory 
-                    -> theory
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{invoke_oracle} $thy$ $name$ ($sign$, $data$)]
-  invokes the oracle $name$ of theory $thy$ passing the information
-  contained in the exception value $data$ and creating a theorem
-  having signature $sign$.  Note that type \ttindex{object} is just an
-  abbreviation for \texttt{exn}.  Errors arise if $thy$ does not have
-  an oracle called $name$, if the oracle rejects its arguments or if
-  its result is ill-typed.
-  
-\item[\ttindexbold{Theory.add_oracle} $name$ $fun$ $thy$] extends
-  $thy$ by oracle $fun$ called $name$.  It is seldom called
-  explicitly, as there is concrete syntax for oracles in theory files.
-\end{ttdescription}
-
-A curious feature of {\ML} exceptions is that they are ordinary constructors.
-The {\ML} type \texttt{exn} is a datatype that can be extended at any time.  (See
-my {\em {ML} for the Working Programmer}~\cite{paulson-ml2}, especially
-page~136.)  The oracle mechanism takes advantage of this to allow an oracle to
-take any information whatever.
-
-There must be some way of invoking the external reasoner from \ML, either
-because it is coded in {\ML} or via an operating system interface.  Isabelle
-expects the {\ML} function to take two arguments: a signature and an
-exception object.
-\begin{itemize}
-\item The signature will typically be that of a desendant of the theory
-  declaring the oracle.  The oracle will use it to distinguish constants from
-  variables, etc., and it will be attached to the generated theorems.
-
-\item The exception is used to pass arbitrary information to the oracle.  This
-  information must contain a full description of the problem to be solved by
-  the external reasoner, including any additional information that might be
-  required.  The oracle may raise the exception to indicate that it cannot
-  solve the specified problem.
-\end{itemize}
-
-A trivial example is provided in theory \texttt{FOL/ex/IffOracle}.  This
-oracle generates tautologies of the form $P\bimp\cdots\bimp P$, with
-an even number of $P$s.
-
-The \texttt{ML} section of \texttt{IffOracle.thy} begins by declaring
-a few auxiliary functions (suppressed below) for creating the
-tautologies.  Then it declares a new exception constructor for the
-information required by the oracle: here, just an integer. It finally
-defines the oracle function itself.
-\begin{ttbox}
-exception IffOracleExn of int;\medskip
-fun mk_iff_oracle (sign, IffOracleExn n) =
-  if n > 0 andalso n mod 2 = 0
-  then Trueprop \$ mk_iff n
-  else raise IffOracleExn n;
-\end{ttbox}
-Observe the function's two arguments, the signature \texttt{sign} and the
-exception given as a pattern.  The function checks its argument for
-validity.  If $n$ is positive and even then it creates a tautology
-containing $n$ occurrences of~$P$.  Otherwise it signals error by
-raising its own exception (just by happy coincidence).  Errors may be
-signalled by other means, such as returning the theorem \texttt{True}.
-Please ensure that the oracle's result is correctly typed; Isabelle
-will reject ill-typed theorems by raising a cryptic exception at top
-level.
-
-The \texttt{oracle} section of \texttt{IffOracle.thy} installs above
-\texttt{ML} function as follows:
-\begin{ttbox}
-IffOracle = FOL +\medskip
-oracle
-  iff = mk_iff_oracle\medskip
-end
-\end{ttbox}
-
-Now in \texttt{IffOracle.ML} we first define a wrapper for invoking
-the oracle:
-\begin{ttbox}
-fun iff_oracle n = invoke_oracle IffOracle.thy "iff"
-                      (sign_of IffOracle.thy, IffOracleExn n);
-\end{ttbox}
-
-Here are some example applications of the \texttt{iff} oracle.  An
-argument of 10 is allowed, but one of 5 is forbidden:
-\begin{ttbox}
-iff_oracle 10;
-{\out  "P <-> P <-> P <-> P <-> P <-> P <-> P <-> P <-> P <-> P" : thm}
-iff_oracle 5;
-{\out Exception- IffOracleExn 5 raised}
-\end{ttbox}
-
-\index{oracles|)}
 \index{theories|)}
 
 
--- a/doc-src/Ref/theory-syntax.tex	Mon Mar 02 08:15:32 2009 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,178 +0,0 @@
-%% $Id$
-
-\appendix
-\newlinechar=-1  %mathsing.sty sets \newlinechar=`\|, which would cause mayhem
-
-\chapter{Syntax of Isabelle Theories}\label{app:TheorySyntax}
-
-Below we present the full syntax of theory definition files as provided by
-Pure Isabelle --- object-logics may add their own sections.
-\S\ref{sec:ref-defining-theories} explains the meanings of these constructs.
-The syntax obeys the following conventions:
-\begin{itemize}
-\item {\tt Typewriter font} denotes terminal symbols.
-  
-\item $id$, $tid$, $nat$, $string$ and $longident$ are the lexical
-  classes of identifiers, type identifiers, natural numbers, quoted
-  strings (without the need for \verb$\$\dots\verb$\$ between lines)
-  and long qualified \ML{} identifiers.
-  The categories $id$, $tid$, $nat$ are fully defined in \iflabelundefined{Defining-Logics}%
-  {{\it The Isabelle Reference Manual}, chapter `Defining Logics'}%
-  {\S\ref{Defining-Logics}}.
-  
-\item $text$ is all text from the current position to the end of file,
-  $verbatim$ is any text enclosed in \verb.{|.\dots\verb.|}.
-  
-\item Comments in theories take the form {\tt (*}\dots{\tt*)} and may
-  be nested, just as in \ML.
-\end{itemize}
-
-\begin{rail}
-
-theoryDef : id '=' (name + '+') ('+' extension | ())
-          ;
-
-name : id | string
-     ;
-
-extension : (section +) 'end' ( () | ml )
-          ;
-
-section : classes
-        | default
-        | types
-        | arities
-        | nonterminals
-        | consts
-        | syntax
-        | trans
-        | defs
-        | constdefs
-        | rules
-        | axclass
-        | instance
-        | oracle
-        | locale
-        | local
-        | global
-        | setup
-        ;
-
-classes : 'classes' ( classDecl + )
-        ;
-
-classDecl : (id (() | '<' (id + ',')))
-          ;
-
-default : 'default' sort 
-        ;
-
-sort :  id
-     | lbrace (id * ',') rbrace
-     ;
-
-types : 'types' ( ( typeDecl ( () | '(' infix ')' ) ) + )
-      ;
-
-infix : ( 'infixr' | 'infixl' ) (() | string) nat
-      ;
-
-typeDecl : typevarlist name
-           ( () | '=' ( string | type ) );
-
-typevarlist : () | tid | '(' ( tid + ',' ) ')';
-
-type : simpleType | '(' type ')' | type '=>' type |
-       '[' ( type + "," ) ']' '=>' type;
-
-simpleType: id | ( tid ( () | '::' id ) ) |
-            '(' ( type + "," ) ')' id | simpleType id
-          ;
-
-arities : 'arities' ((name + ',') '::' arity +)
-        ;
-
-arity : ( () | '(' (sort + ',') ')' ) sort
-      ;
-
-nonterminals : 'nonterminals' (name+)
-             ;
-
-consts : 'consts' ( mixfixConstDecl + )
-       ;
-
-syntax : 'syntax' (() | mode) ( mixfixConstDecl + );
-
-mode : '(' name (() | 'output') ')'
-     ;
-
-mixfixConstDecl : constDecl (() | ( '(' mixfix ')' ))
-                ;
-
-constDecl : ( name + ',') '::' (string | type);
-
-mixfix :  string ( () | ( () | ('[' (nat + ',') ']')) nat )
-       |  infix
-       | 'binder' string nat ;
-
-trans : 'translations' ( pat ( '==' | '=>' | '<=' ) pat + )
-      ;
-
-pat : ( () | ( '(' id ')' ) ) string;
-
-rules : 'rules' (( id string ) + )
-      ;
-
-defs : 'defs' (( id string ) + )
-     ;
-
-constdefs : 'constdefs' (name '::' (string | type) (() | mixfix) string +)
-          ;
-
-axclass : 'axclass' classDecl (() | ( id string ) +)
-        ;
-
-instance : 'instance' ( name '<' name | name '::' arity) witness
-         ;
-
-witness : (() | '(' ((string | id | longident) + ',') ')') (() | verbatim)
-        ;
-
-locale : 'locale' name '=' ( () | name '+' ) localeBody
-       ;
-
-localeBody : localeConsts ( () | localeAsms ) ( () | localeDefs )
-       ;
-
-localeConsts: ( 'fixes' ( ( (name '::' ( string | type )) ( () | '(' mixfix ')' ) ) + ) )
-       ;
-
-
-localeAsms:    ( 'assumes' ( ( id string ) + ) )
-       ;
-
-localeDefs:   ( 'defines' ( ( id string ) +) )
-       ;
-
-oracle : 'oracle' name '=' name
-       ;
-
-local : 'local'
-      ;
-
-global : 'global'
-       ;
-
-setup : 'setup' (id | longident)
-      ;
-
-ml : 'ML' text
-   ;
-
-\end{rail}
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: "ref"
-%%% End: 
--- a/doc-src/Ref/thm.tex	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/Ref/thm.tex	Mon Mar 02 08:15:54 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+
 \chapter{Theorems and Forward Proof}
 \index{theorems|(}
 
@@ -13,19 +13,6 @@
 ignore such complexities --- and skip all but the first section of
 this chapter.
 
-The theorem operations do not print error messages.  Instead, they raise
-exception~\xdx{THM}\@.  Use \ttindex{print_exn} to display
-exceptions nicely:
-\begin{ttbox} 
-allI RS mp  handle e => print_exn e;
-{\out Exception THM raised:}
-{\out RSN: no unifiers -- premise 1}
-{\out (!!x. ?P(x)) ==> ALL x. ?P(x)}
-{\out [| ?P --> ?Q; ?P |] ==> ?Q}
-{\out}
-{\out uncaught exception THM}
-\end{ttbox}
-
 
 \section{Basic operations on theorems}
 \subsection{Pretty-printing a theorem}
--- a/doc-src/manual.bib	Mon Mar 02 08:15:32 2009 +0100
+++ b/doc-src/manual.bib	Mon Mar 02 08:15:54 2009 +0100
@@ -183,6 +183,16 @@
                   {F}ormal-{L}ogic {E}ngineering},
   crossref =     {tphols99}}
 
+
+@InProceedings{Bezem-Coquand:2005,
+  author = 	 {M.A. Bezem and T. Coquand},
+  title = 	 {Automating {Coherent Logic}},
+  booktitle = {LPAR-12},
+  editor = 	 {G. Sutcliffe and A. Voronkov},
+  volume = 	 3835,
+  series = 	 LNCS,
+  publisher = Springer}
+
 @book{Bird-Wadler,author="Richard Bird and Philip Wadler",
 title="Introduction to Functional Programming",publisher=PH,year=1988}
 
--- a/etc/settings	Mon Mar 02 08:15:32 2009 +0100
+++ b/etc/settings	Mon Mar 02 08:15:54 2009 +0100
@@ -60,12 +60,6 @@
 #ML_OPTIONS=""
 #ML_PLATFORM=""
 
-# Alice 1.4 (experimental!)
-#ML_SYSTEM=alice
-#ML_HOME="/usr/local/alice/bin"
-#ML_OPTIONS=""
-#ML_PLATFORM=""
-
 
 ###
 ### JVM components (Scala or Java)
--- a/src/FOL/IFOL.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/FOL/IFOL.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      FOL/IFOL.thy
-    ID:         $Id$
     Author:     Lawrence C Paulson and Markus Wenzel
 *)
 
@@ -14,9 +13,10 @@
   "~~/src/Tools/IsaPlanner/isand.ML"
   "~~/src/Tools/IsaPlanner/rw_tools.ML"
   "~~/src/Tools/IsaPlanner/rw_inst.ML"
-  "~~/src/Provers/eqsubst.ML"
+  "~~/src/Tools/eqsubst.ML"
   "~~/src/Provers/quantifier1.ML"
-  "~~/src/Provers/project_rule.ML"
+  "~~/src/Tools/intuitionistic.ML"
+  "~~/src/Tools/project_rule.ML"
   "~~/src/Tools/atomize_elim.ML"
   ("fologic.ML")
   ("hypsubstdata.ML")
@@ -610,6 +610,8 @@
 
 subsection {* Intuitionistic Reasoning *}
 
+setup {* Intuitionistic.method_setup "iprover" *}
+
 lemma impE':
   assumes 1: "P --> Q"
     and 2: "Q ==> R"
--- a/src/FOL/IsaMakefile	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/FOL/IsaMakefile	Mon Mar 02 08:15:54 2009 +0100
@@ -32,12 +32,13 @@
   $(SRC)/Provers/clasimp.ML $(SRC)/Provers/classical.ML			\
   $(SRC)/Tools/IsaPlanner/zipper.ML $(SRC)/Tools/IsaPlanner/isand.ML	\
   $(SRC)/Tools/IsaPlanner/rw_tools.ML					\
-  $(SRC)/Tools/IsaPlanner/rw_inst.ML $(SRC)/Provers/eqsubst.ML		\
+  $(SRC)/Tools/IsaPlanner/rw_inst.ML $(SRC)/Tools/eqsubst.ML		\
   $(SRC)/Provers/hypsubst.ML $(SRC)/Tools/induct.ML			\
-  $(SRC)/Tools/atomize_elim.ML $(SRC)/Provers/project_rule.ML		\
-  $(SRC)/Provers/quantifier1.ML $(SRC)/Provers/splitter.ML FOL.thy	\
-  IFOL.thy ROOT.ML blastdata.ML cladata.ML document/root.tex		\
-  fologic.ML hypsubstdata.ML intprover.ML simpdata.ML
+  $(SRC)/Tools/intuitionistic.ML $(SRC)/Tools/atomize_elim.ML		\
+  $(SRC)/Tools/project_rule.ML $(SRC)/Provers/quantifier1.ML		\
+  $(SRC)/Provers/splitter.ML FOL.thy IFOL.thy ROOT.ML blastdata.ML	\
+  cladata.ML document/root.tex fologic.ML hypsubstdata.ML intprover.ML	\
+  simpdata.ML
 	@$(ISABELLE_TOOL) usedir -p 2 -b $(OUT)/Pure FOL
 
 
--- a/src/HOL/Divides.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/Divides.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -44,10 +44,10 @@
 by (simp add: mod_div_equality2)
 
 lemma mod_by_0 [simp]: "a mod 0 = a"
-  using mod_div_equality [of a zero] by simp
+using mod_div_equality [of a zero] by simp
 
 lemma mod_0 [simp]: "0 mod a = 0"
-  using mod_div_equality [of zero a] div_0 by simp 
+using mod_div_equality [of zero a] div_0 by simp
 
 lemma div_mult_self2 [simp]:
   assumes "b \<noteq> 0"
@@ -342,6 +342,25 @@
   unfolding diff_minus using assms
   by (intro mod_add_cong mod_minus_cong)
 
+lemma dvd_neg_div: "y dvd x \<Longrightarrow> -x div y = - (x div y)"
+apply (case_tac "y = 0") apply simp
+apply (auto simp add: dvd_def)
+apply (subgoal_tac "-(y * k) = y * - k")
+ apply (erule ssubst)
+ apply (erule div_mult_self1_is_id)
+apply simp
+done
+
+lemma dvd_div_neg: "y dvd x \<Longrightarrow> x div -y = - (x div y)"
+apply (case_tac "y = 0") apply simp
+apply (auto simp add: dvd_def)
+apply (subgoal_tac "y * k = -y * -k")
+ apply (erule ssubst)
+ apply (rule div_mult_self1_is_id)
+ apply simp
+apply simp
+done
+
 end
 
 
--- a/src/HOL/HOL.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/HOL.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -12,14 +12,15 @@
   "~~/src/Tools/IsaPlanner/isand.ML"
   "~~/src/Tools/IsaPlanner/rw_tools.ML"
   "~~/src/Tools/IsaPlanner/rw_inst.ML"
-  "~~/src/Provers/project_rule.ML"
+  "~~/src/Tools/intuitionistic.ML"
+  "~~/src/Tools/project_rule.ML"
   "~~/src/Provers/hypsubst.ML"
   "~~/src/Provers/splitter.ML"
   "~~/src/Provers/classical.ML"
   "~~/src/Provers/blast.ML"
   "~~/src/Provers/clasimp.ML"
-  "~~/src/Provers/coherent.ML"
-  "~~/src/Provers/eqsubst.ML"
+  "~~/src/Tools/coherent.ML"
+  "~~/src/Tools/eqsubst.ML"
   "~~/src/Provers/quantifier1.ML"
   ("Tools/simpdata.ML")
   "~~/src/Tools/random_word.ML"
@@ -39,6 +40,9 @@
   ("Tools/recfun_codegen.ML")
 begin
 
+setup {* Intuitionistic.method_setup "iprover" *}
+
+
 subsection {* Primitive logic *}
 
 subsubsection {* Core syntax *}
--- a/src/HOL/Induct/SList.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/Induct/SList.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1,15 +1,10 @@
-(* *********************************************************************** *)
-(*                                                                         *)
-(* Title:      SList.thy (Extended List Theory)                            *)
-(* Based on:   $Id$      *)
-(* Author:     Lawrence C Paulson, Cambridge University Computer Laboratory*)
-(* Author:     B. Wolff, University of Bremen                              *)
-(* Purpose:    Enriched theory of lists                                    *)
-(*	       mutual indirect recursive data-types                        *)
-(*                                                                         *)
-(* *********************************************************************** *)
+(*  Title:      SList.thy
+    Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
+    Author:     B. Wolff, University of Bremen
 
-(* Definition of type 'a list (strict lists) by a least fixed point
+Enriched theory of lists; mutual indirect recursive data-types.
+
+Definition of type 'a list (strict lists) by a least fixed point
 
 We use          list(A) == lfp(%Z. {NUMB(0)} <+> A <*> Z)
 and not         list    == lfp(%Z. {NUMB(0)} <+> range(Leaf) <*> Z)
@@ -24,6 +19,8 @@
 Tidied by lcp.  Still needs removal of nat_rec.
 *)
 
+header {* Extended List Theory (old) *}
+
 theory SList
 imports Sexp
 begin
@@ -79,12 +76,12 @@
 
 (*Declaring the abstract list constructors*)
 
-(*<*)no_translations
+no_translations
   "[x, xs]" == "x#[xs]"
   "[x]" == "x#[]"
-no_syntax
-  Nil :: "'a list"  ("[]")
-  Cons :: "'a \<Rightarrow> 'a list \<Rightarrow> 'a list"  (infixr "#" 65)(*>*)
+no_notation
+  Nil  ("[]") and
+  Cons (infixr "#" 65)
 
 definition
   Nil       :: "'a list"                               ("[]") where
@@ -149,8 +146,8 @@
   ttl       :: "'a list => 'a list" where
   "ttl xs   = list_rec xs [] (%x xs r. xs)"
 
-(*<*)no_syntax
-    member :: "'a \<Rightarrow> 'a list \<Rightarrow> bool" (infixl "mem" 55)(*>*)
+no_notation member  (infixl "mem" 55)
+
 definition
   member :: "['a, 'a list] => bool"    (infixl "mem" 55) where
   "x mem xs = list_rec xs False (%y ys r. if y=x then True else r)"
@@ -163,8 +160,8 @@
   map       :: "('a=>'b) => ('a list => 'b list)" where
   "map f xs = list_rec xs [] (%x l r. f(x)#r)"
 
-(*<*)no_syntax
-  "\<^const>List.append" :: "'a list => 'a list => 'a list" (infixr "@" 65)(*>*)
+no_notation append  (infixr "@" 65)
+
 definition
   append    :: "['a list, 'a list] => 'a list"   (infixr "@" 65) where
   "xs@ys = list_rec xs ys (%x l r. x#r)"
@@ -342,14 +339,14 @@
 
 
 lemma not_CONS_self: "N: list(A) ==> !M. N ~= CONS M N"
-by (erule list.induct, simp_all)
+apply (erule list.induct) apply simp_all done
 
 lemma not_Cons_self2: "\<forall>x. l ~= x#l"
-by (induct_tac "l" rule: list_induct, simp_all)
+by (induct l rule: list_induct) simp_all
 
 
 lemma neq_Nil_conv2: "(xs ~= []) = (\<exists>y ys. xs = y#ys)"
-by (induct_tac "xs" rule: list_induct, auto)
+by (induct xs rule: list_induct) auto
 
 (** Conversion rules for List_case: case analysis operator **)
 
@@ -491,7 +488,7 @@
 
 lemma expand_list_case: 
  "P(list_case a f xs) = ((xs=[] --> P a ) & (!y ys. xs=y#ys --> P(f y ys)))"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 
 (**** Function definitions ****)
@@ -533,41 +530,44 @@
 (** @ - append **)
 
 lemma append_assoc [simp]: "(xs@ys)@zs = xs@(ys@zs)"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 lemma append_Nil2 [simp]: "xs @ [] = xs"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 (** mem **)
 
 lemma mem_append [simp]: "x mem (xs@ys) = (x mem xs | x mem ys)"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 lemma mem_filter [simp]: "x mem [x\<leftarrow>xs. P x ] = (x mem xs & P(x))"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 (** list_all **)
 
 lemma list_all_True [simp]: "(Alls x:xs. True) = True"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 lemma list_all_conj [simp]:
      "list_all p (xs@ys) = ((list_all p xs) & (list_all p ys))"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 lemma list_all_mem_conv: "(Alls x:xs. P(x)) = (!x. x mem xs --> P(x))"
-apply (induct_tac "xs" rule: list_induct, simp_all)
+apply (induct xs rule: list_induct)
+apply simp_all
 apply blast 
 done
 
 lemma nat_case_dist : "(! n. P n) = (P 0 & (! n. P (Suc n)))"
 apply auto
-apply (induct_tac "n", auto)
+apply (induct_tac n)
+apply auto
 done
 
 
 lemma alls_P_eq_P_nth: "(Alls u:A. P u) = (!n. n < length A --> P(nth n A))"
-apply (induct_tac "A" rule: list_induct, simp_all)
+apply (induct_tac A rule: list_induct)
+apply simp_all
 apply (rule trans)
 apply (rule_tac [2] nat_case_dist [symmetric], simp_all)
 done
@@ -583,7 +583,7 @@
 lemma Abs_Rep_map: 
      "(!!x. f(x): sexp) ==>  
         Abs_map g (Rep_map f xs) = map (%t. g(f(t))) xs"
-apply (induct_tac "xs" rule: list_induct)
+apply (induct xs rule: list_induct)
 apply (simp_all add: Rep_map_type list_sexp [THEN subsetD])
 done
 
@@ -591,24 +591,25 @@
 (** Additional mapping lemmas **)
 
 lemma map_ident [simp]: "map(%x. x)(xs) = xs"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 lemma map_append [simp]: "map f (xs@ys) = map f xs  @ map f ys"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 lemma map_compose: "map(f o g)(xs) = map f (map g xs)"
 apply (simp add: o_def)
-apply (induct_tac "xs" rule: list_induct, simp_all)
+apply (induct xs rule: list_induct)
+apply simp_all
 done
 
 
 lemma mem_map_aux1 [rule_format]:
      "x mem (map f q) --> (\<exists>y. y mem q & x = f y)"
-by (induct_tac "q" rule: list_induct, simp_all, blast)
+by (induct q rule: list_induct) auto
 
 lemma mem_map_aux2 [rule_format]: 
      "(\<exists>y. y mem q & x = f y) --> x mem (map f q)"
-by (induct_tac "q" rule: list_induct, auto)
+by (induct q rule: list_induct) auto
 
 lemma mem_map: "x mem (map f q) = (\<exists>y. y mem q & x = f y)"
 apply (rule iffI)
@@ -617,10 +618,10 @@
 done
 
 lemma hd_append [rule_format]: "A ~= [] --> hd(A @ B) = hd(A)"
-by (induct_tac "A" rule: list_induct, auto)
+by (induct A rule: list_induct) auto
 
 lemma tl_append [rule_format]: "A ~= [] --> tl(A @ B) = tl(A) @ B"
-by (induct_tac "A" rule: list_induct, auto)
+by (induct A rule: list_induct) auto
 
 
 (** take **)
@@ -638,8 +639,8 @@
 by (simp add: drop_def)
 
 lemma drop_Suc1 [simp]: "drop [] (Suc x) = []"
-apply (simp add: drop_def)
-apply (induct_tac "x", auto) 
+apply (induct x) 
+apply (simp_all add: drop_def)
 done
 
 lemma drop_Suc2 [simp]: "drop(a#xs)(Suc x) = drop xs x"
@@ -698,9 +699,7 @@
 
 
 lemma zipWith_Cons_Nil [simp]: "zipWith f (x,[])  = []"
-apply (simp add: zipWith_def)
-apply (induct_tac "x" rule: list_induct, simp_all)
-done
+by (induct x rule: list_induct) (simp_all add: zipWith_def)
 
 
 lemma zipWith_Nil_Cons [simp]: "zipWith f ([],x) = []"
@@ -722,23 +721,23 @@
 done
 
 lemma map_flat: "map f (flat S) = flat(map (map f) S)"
-by (induct_tac "S" rule: list_induct, simp_all)
+by (induct S rule: list_induct) simp_all
 
 lemma list_all_map_eq: "(Alls u:xs. f(u) = g(u)) --> map f xs = map g xs"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 lemma filter_map_d: "filter p (map f xs) = map f (filter(p o f)(xs))"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 lemma filter_compose: "filter p (filter q xs) = filter(%x. p x & q x) xs"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 (* "filter(p, filter(q,xs)) = filter(q, filter(p,xs))",
    "filter(p, filter(p,xs)) = filter(p,xs)" BIRD's thms.*)
  
 lemma filter_append [rule_format, simp]:
      "\<forall>B. filter p (A @ B) = (filter p A @ filter p B)"
-by (induct_tac "A" rule: list_induct, simp_all)
+by (induct A rule: list_induct) simp_all
 
 
 (* inits(xs) == map(fst,splits(xs)), 
@@ -749,44 +748,50 @@
    x mem xs & y mem ys = <x,y> mem diag(xs,ys) *)
 
 lemma length_append: "length(xs@ys) = length(xs)+length(ys)"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 lemma length_map: "length(map f xs) = length(xs)"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
 
 
 lemma take_Nil [simp]: "take [] n = []"
-by (induct_tac "n", simp_all)
+by (induct n) simp_all
 
 lemma take_take_eq [simp]: "\<forall>n. take (take xs n) n = take xs n"
-apply (induct_tac "xs" rule: list_induct, simp_all)
+apply (induct xs rule: list_induct)
+apply simp_all
 apply (rule allI)
-apply (induct_tac "n", auto)
+apply (induct_tac n)
+apply auto
 done
 
 lemma take_take_Suc_eq1 [rule_format]:
      "\<forall>n. take (take xs(Suc(n+m))) n = take xs n"
-apply (induct_tac "xs" rule: list_induct, simp_all)
+apply (induct_tac xs rule: list_induct)
+apply simp_all
 apply (rule allI)
-apply (induct_tac "n", auto)
+apply (induct_tac n)
+apply auto
 done
 
 declare take_Suc [simp del]
 
 lemma take_take_1: "take (take xs (n+m)) n = take xs n"
-apply (induct_tac "m")
+apply (induct m)
 apply (simp_all add: take_take_Suc_eq1)
 done
 
 lemma take_take_Suc_eq2 [rule_format]:
      "\<forall>n. take (take xs n)(Suc(n+m)) = take xs n"
-apply (induct_tac "xs" rule: list_induct, simp_all)
+apply (induct_tac xs rule: list_induct)
+apply simp_all
 apply (rule allI)
-apply (induct_tac "n", auto)
+apply (induct_tac n)
+apply auto
 done
 
 lemma take_take_2: "take(take xs n)(n+m) = take xs n"
-apply (induct_tac "m")
+apply (induct m)
 apply (simp_all add: take_take_Suc_eq2)
 done
 
@@ -794,29 +799,33 @@
 (* length(drop(xs,n)) = length(xs) - n *)
 
 lemma drop_Nil [simp]: "drop  [] n  = []"
-by (induct_tac "n", auto)
+by (induct n) auto
 
 lemma drop_drop [rule_format]: "\<forall>xs. drop (drop xs m) n = drop xs(m+n)"
-apply (induct_tac "m", auto) 
-apply (induct_tac "xs" rule: list_induct, auto) 
+apply (induct_tac m)
+apply auto
+apply (induct_tac xs rule: list_induct)
+apply auto
 done
 
 lemma take_drop [rule_format]: "\<forall>xs. (take xs n) @ (drop xs n) = xs"
-apply (induct_tac "n", auto) 
-apply (induct_tac "xs" rule: list_induct, auto) 
+apply (induct_tac n)
+apply auto
+apply (induct_tac xs rule: list_induct)
+apply auto
 done
 
 lemma copy_copy: "copy x n @ copy x m = copy x (n+m)"
-by (induct_tac "n", auto)
+by (induct n) auto
 
 lemma length_copy: "length(copy x n)  = n"
-by (induct_tac "n", auto)
+by (induct n) auto
 
 lemma length_take [rule_format, simp]:
      "\<forall>xs. length(take xs n) = min (length xs) n"
-apply (induct_tac "n")
+apply (induct n)
  apply auto
-apply (induct_tac "xs" rule: list_induct)
+apply (induct_tac xs rule: list_induct)
  apply auto
 done
 
@@ -824,85 +833,93 @@
 by (simp only: length_append [symmetric] take_drop)
 
 lemma take_append [rule_format]: "\<forall>A. length(A) = n --> take(A@B) n = A"
-apply (induct_tac "n")
+apply (induct n)
 apply (rule allI)
 apply (rule_tac [2] allI)
-apply (induct_tac "A" rule: list_induct)
-apply (induct_tac [3] "A" rule: list_induct, simp_all)
+apply (induct_tac A rule: list_induct)
+apply (induct_tac [3] A rule: list_induct, simp_all)
 done
 
 lemma take_append2 [rule_format]:
      "\<forall>A. length(A) = n --> take(A@B) (n+k) = A @ take B k"
-apply (induct_tac "n")
+apply (induct n)
 apply (rule allI)
 apply (rule_tac [2] allI)
-apply (induct_tac "A" rule: list_induct)
-apply (induct_tac [3] "A" rule: list_induct, simp_all)
+apply (induct_tac A rule: list_induct)
+apply (induct_tac [3] A rule: list_induct, simp_all)
 done
 
 lemma take_map [rule_format]: "\<forall>n. take (map f A) n = map f (take A n)"
-apply (induct_tac "A" rule: list_induct, simp_all)
+apply (induct A rule: list_induct)
+apply simp_all
 apply (rule allI)
-apply (induct_tac "n", simp_all)
+apply (induct_tac n)
+apply simp_all
 done
 
 lemma drop_append [rule_format]: "\<forall>A. length(A) = n --> drop(A@B)n = B"
-apply (induct_tac "n")
+apply (induct n)
 apply (rule allI)
 apply (rule_tac [2] allI)
-apply (induct_tac "A" rule: list_induct)
-apply (induct_tac [3] "A" rule: list_induct, simp_all)
+apply (induct_tac A rule: list_induct)
+apply (induct_tac [3] A rule: list_induct)
+apply simp_all
 done
 
 lemma drop_append2 [rule_format]:
      "\<forall>A. length(A) = n --> drop(A@B)(n+k) = drop B k"
-apply (induct_tac "n")
+apply (induct n)
 apply (rule allI)
 apply (rule_tac [2] allI)
-apply (induct_tac "A" rule: list_induct)
-apply (induct_tac [3] "A" rule: list_induct, simp_all)
+apply (induct_tac A rule: list_induct)
+apply (induct_tac [3] A rule: list_induct)
+apply simp_all
 done
 
 
 lemma drop_all [rule_format]: "\<forall>A. length(A) = n --> drop A n = []"
-apply (induct_tac "n")
+apply (induct n)
 apply (rule allI)
 apply (rule_tac [2] allI)
-apply (induct_tac "A" rule: list_induct)
-apply (induct_tac [3] "A" rule: list_induct, auto)
+apply (induct_tac A rule: list_induct)
+apply (induct_tac [3] A rule: list_induct)
+apply auto
 done
 
 lemma drop_map [rule_format]: "\<forall>n. drop (map f A) n = map f (drop A n)"
-apply (induct_tac "A" rule: list_induct, simp_all)
+apply (induct A rule: list_induct)
+apply simp_all
 apply (rule allI)
-apply (induct_tac "n", simp_all)
+apply (induct_tac n)
+apply simp_all
 done
 
 lemma take_all [rule_format]: "\<forall>A. length(A) = n --> take A n = A"
-apply (induct_tac "n")
+apply (induct n)
 apply (rule allI)
 apply (rule_tac [2] allI)
-apply (induct_tac "A" rule: list_induct)
-apply (induct_tac [3] "A" rule: list_induct, auto) 
+apply (induct_tac A rule: list_induct)
+apply (induct_tac [3] A rule: list_induct)
+apply auto
 done
 
 lemma foldl_single: "foldl f a [b] = f a b"
 by simp_all
 
-lemma foldl_append [rule_format, simp]:
-     "\<forall>a. foldl f a (A @ B) = foldl f (foldl f a A) B"
-by (induct_tac "A" rule: list_induct, simp_all)
+lemma foldl_append [simp]:
+  "\<And>a. foldl f a (A @ B) = foldl f (foldl f a A) B"
+by (induct A rule: list_induct) simp_all
 
-lemma foldl_map [rule_format]:
-     "\<forall>e. foldl f e (map g S) = foldl (%x y. f x (g y)) e S"
-by (induct_tac "S" rule: list_induct, simp_all)
+lemma foldl_map:
+  "\<And>e. foldl f e (map g S) = foldl (%x y. f x (g y)) e S"
+by (induct S rule: list_induct) simp_all
 
 lemma foldl_neutr_distr [rule_format]:
   assumes r_neutr: "\<forall>a. f a e = a" 
       and r_neutl: "\<forall>a. f e a = a"
       and assoc:   "\<forall>a b c. f a (f b c) = f(f a b) c"
   shows "\<forall>y. f y (foldl f e A) = foldl f y A"
-apply (induct_tac "A" rule: list_induct)
+apply (induct A rule: list_induct)
 apply (simp_all add: r_neutr r_neutl, clarify) 
 apply (erule all_dupE) 
 apply (rule trans) 
@@ -923,95 +940,98 @@
 
 lemma foldr_append [rule_format, simp]:
      "\<forall>a. foldr f a (A @ B) = foldr f (foldr f a B) A"
-apply (induct_tac "A" rule: list_induct, simp_all)
-done
+by (induct A rule: list_induct) simp_all
 
 
-lemma foldr_map [rule_format]: "\<forall>e. foldr f e (map g S) = foldr (f o g) e S"
-apply (simp add: o_def)
-apply (induct_tac "S" rule: list_induct, simp_all)
-done
+lemma foldr_map: "\<And>e. foldr f e (map g S) = foldr (f o g) e S"
+by (induct S rule: list_induct) (simp_all add: o_def)
 
 lemma foldr_Un_eq_UN: "foldr op Un {} S = (UN X: {t. t mem S}.X)"
-by (induct_tac "S" rule: list_induct, auto)
+by (induct S rule: list_induct) auto
 
 lemma foldr_neutr_distr:
      "[| !a. f e a = a; !a b c. f a (f b c) = f(f a b) c |]    
       ==> foldr f y S = f (foldr f e S) y"
-by (induct_tac "S" rule: list_induct, auto)
+by (induct S rule: list_induct) auto
 
 lemma foldr_append2: 
     "[| !a. f e a = a; !a b c. f a (f b c) = f(f a b) c |]
      ==> foldr f e (A @ B) = f (foldr f e A) (foldr f e B)"
 apply auto
-apply (rule foldr_neutr_distr, auto)
+apply (rule foldr_neutr_distr)
+apply auto
 done
 
 lemma foldr_flat: 
     "[| !a. f e a = a; !a b c. f a (f b c) = f(f a b) c |] ==>  
       foldr f e (flat S) = (foldr f e)(map (foldr f e) S)"
-apply (induct_tac "S" rule: list_induct)
+apply (induct S rule: list_induct)
 apply (simp_all del: foldr_append add: foldr_append2)
 done
 
 
 lemma list_all_map: "(Alls x:map f xs .P(x)) = (Alls x:xs.(P o f)(x))"
-by (induct_tac "xs" rule: list_induct, auto)
+by (induct xs rule: list_induct) auto
 
 lemma list_all_and: 
      "(Alls x:xs. P(x)&Q(x)) = ((Alls x:xs. P(x))&(Alls x:xs. Q(x)))"
-by (induct_tac "xs" rule: list_induct, auto)
+by (induct xs rule: list_induct) auto
 
 
 lemma nth_map [rule_format]:
      "\<forall>i. i < length(A)  --> nth i (map f A) = f(nth i A)"
-apply (induct_tac "A" rule: list_induct, simp_all)
+apply (induct A rule: list_induct)
+apply simp_all
 apply (rule allI)
-apply (induct_tac "i", auto) 
+apply (induct_tac i)
+apply auto
 done
 
 lemma nth_app_cancel_right [rule_format]:
      "\<forall>i. i < length(A)  --> nth i(A@B) = nth i A"
-apply (induct_tac "A" rule: list_induct, simp_all)
+apply (induct A rule: list_induct)
+apply simp_all
 apply (rule allI)
-apply (induct_tac "i", simp_all)
+apply (induct_tac i)
+apply simp_all
 done
 
 lemma nth_app_cancel_left [rule_format]:
      "\<forall>n. n = length(A) --> nth(n+i)(A@B) = nth i B"
-by (induct_tac "A" rule: list_induct, simp_all)
+by (induct A rule: list_induct) simp_all
 
 
 (** flat **)
 
 lemma flat_append [simp]: "flat(xs@ys) = flat(xs) @ flat(ys)"
-by (induct_tac "xs" rule: list_induct, auto)
+by (induct xs rule: list_induct) auto
 
 lemma filter_flat: "filter p (flat S) = flat(map (filter p) S)"
-by (induct_tac "S" rule: list_induct, auto)
+by (induct S rule: list_induct) auto
 
 
 (** rev **)
 
 lemma rev_append [simp]: "rev(xs@ys) = rev(ys) @ rev(xs)"
-by (induct_tac "xs" rule: list_induct, auto)
+by (induct xs rule: list_induct) auto
 
 lemma rev_rev_ident [simp]: "rev(rev l) = l"
-by (induct_tac "l" rule: list_induct, auto)
+by (induct l rule: list_induct) auto
 
 lemma rev_flat: "rev(flat ls) = flat (map rev (rev ls))"
-by (induct_tac "ls" rule: list_induct, auto)
+by (induct ls rule: list_induct) auto
 
 lemma rev_map_distrib: "rev(map f l) = map f (rev l)"
-by (induct_tac "l" rule: list_induct, auto)
+by (induct l rule: list_induct) auto
 
 lemma foldl_rev: "foldl f b (rev l) = foldr (%x y. f y x) b l"
-by (induct_tac "l" rule: list_induct, auto)
+by (induct l rule: list_induct) auto
 
 lemma foldr_rev: "foldr f b (rev l) = foldl (%x y. f y x) b l"
 apply (rule sym)
 apply (rule trans)
-apply (rule_tac [2] foldl_rev, simp)
+apply (rule_tac [2] foldl_rev)
+apply simp
 done
 
 end
--- a/src/HOL/IntDiv.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/IntDiv.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -689,9 +689,6 @@
 apply (blast intro: divmod_rel_div_mod [THEN zmult1_lemma, THEN divmod_rel_mod])
 done
 
-lemma zdiv_zmult_self1 [simp]: "b \<noteq> (0::int) ==> (a*b) div b = a"
-by (simp add: zdiv_zmult1_eq)
-
 lemma zmod_zdiv_trivial: "(a mod b) div b = (0::int)"
 apply (case_tac "b = 0", simp)
 apply (auto simp add: linorder_neq_iff div_pos_pos_trivial div_neg_neg_trivial)
@@ -717,7 +714,7 @@
   assume not0: "b \<noteq> 0"
   show "(a + c * b) div b = c + a div b"
     unfolding zdiv_zadd1_eq [of a "c * b"] using not0 
-      by (simp add: zmod_zmult1_eq zmod_zdiv_trivial)
+      by (simp add: zmod_zmult1_eq zmod_zdiv_trivial zdiv_zmult1_eq)
 qed auto
 
 lemma posDivAlg_div_mod:
@@ -1225,6 +1222,9 @@
 apply (auto simp add: IntDiv.divmod_rel_def of_nat_mult)
 done
 
+lemma abs_div: "(y::int) dvd x \<Longrightarrow> abs (x div y) = abs x div abs y"
+by (unfold dvd_def, cases "y=0", auto simp add: abs_mult)
+
 text{*Suggested by Matthias Daum*}
 lemma int_power_div_base:
      "\<lbrakk>0 < m; 0 < k\<rbrakk> \<Longrightarrow> k ^ m div k = (k::int) ^ (m - Suc 0)"
--- a/src/HOL/IsaMakefile	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/IsaMakefile	Mon Mar 02 08:15:54 2009 +0100
@@ -78,38 +78,39 @@
 $(OUT)/Pure: Pure
 
 BASE_DEPENDENCIES = $(OUT)/Pure \
+  $(SRC)/Provers/blast.ML \
+  $(SRC)/Provers/clasimp.ML \
+  $(SRC)/Provers/classical.ML \
+  $(SRC)/Provers/hypsubst.ML \
+  $(SRC)/Provers/quantifier1.ML \
+  $(SRC)/Provers/splitter.ML \
+  $(SRC)/Tools/IsaPlanner/isand.ML \
+  $(SRC)/Tools/IsaPlanner/rw_inst.ML \
+  $(SRC)/Tools/IsaPlanner/rw_tools.ML \
+  $(SRC)/Tools/IsaPlanner/zipper.ML \
+  $(SRC)/Tools/atomize_elim.ML \
+  $(SRC)/Tools/code/code_funcgr.ML \
+  $(SRC)/Tools/code/code_haskell.ML \
+  $(SRC)/Tools/code/code_ml.ML \
+  $(SRC)/Tools/code/code_name.ML \
+  $(SRC)/Tools/code/code_printer.ML \
+  $(SRC)/Tools/code/code_target.ML \
+  $(SRC)/Tools/code/code_thingol.ML \
+  $(SRC)/Tools/code/code_wellsorted.ML \
+  $(SRC)/Tools/coherent.ML \
+  $(SRC)/Tools/eqsubst.ML \
+  $(SRC)/Tools/induct.ML \
+  $(SRC)/Tools/intuitionistic.ML \
+  $(SRC)/Tools/induct_tacs.ML \
+  $(SRC)/Tools/nbe.ML \
+  $(SRC)/Tools/project_rule.ML \
+  $(SRC)/Tools/random_word.ML \
+  $(SRC)/Tools/value.ML \
   Code_Setup.thy \
   HOL.thy \
   Tools/hologic.ML \
   Tools/recfun_codegen.ML \
   Tools/simpdata.ML \
-  $(SRC)/Tools/atomize_elim.ML \
-  $(SRC)/Tools/code/code_funcgr.ML \
-  $(SRC)/Tools/code/code_wellsorted.ML \
-  $(SRC)/Tools/code/code_name.ML \
-  $(SRC)/Tools/code/code_printer.ML \
-  $(SRC)/Tools/code/code_target.ML \
-  $(SRC)/Tools/code/code_ml.ML \
-  $(SRC)/Tools/code/code_haskell.ML \
-  $(SRC)/Tools/code/code_thingol.ML \
-  $(SRC)/Tools/induct.ML \
-  $(SRC)/Tools/induct_tacs.ML \
-  $(SRC)/Tools/IsaPlanner/isand.ML \
-  $(SRC)/Tools/IsaPlanner/rw_inst.ML \
-  $(SRC)/Tools/IsaPlanner/rw_tools.ML \
-  $(SRC)/Tools/IsaPlanner/zipper.ML \
-  $(SRC)/Tools/nbe.ML \
-  $(SRC)/Tools/random_word.ML \
-  $(SRC)/Tools/value.ML \
-  $(SRC)/Provers/blast.ML \
-  $(SRC)/Provers/clasimp.ML \
-  $(SRC)/Provers/classical.ML \
-  $(SRC)/Provers/coherent.ML \
-  $(SRC)/Provers/eqsubst.ML \
-  $(SRC)/Provers/hypsubst.ML \
-  $(SRC)/Provers/project_rule.ML \
-  $(SRC)/Provers/quantifier1.ML \
-  $(SRC)/Provers/splitter.ML \
 
 $(OUT)/HOL-Base: base.ML $(BASE_DEPENDENCIES)
 	@$(ISABELLE_TOOL) usedir -b -f base.ML -d false -g false $(OUT)/Pure HOL-Base
@@ -810,34 +811,31 @@
 HOL-ex: HOL $(LOG)/HOL-ex.gz
 
 $(LOG)/HOL-ex.gz: $(OUT)/HOL Library/Commutative_Ring.thy		\
-  Library/Primes.thy							\
-  ex/Abstract_NAT.thy ex/Antiquote.thy ex/Arith_Examples.thy ex/BT.thy	\
-  ex/BinEx.thy ex/CTL.thy ex/Chinese.thy ex/Classical.thy		\
-  ex/Coherent.thy ex/Dense_Linear_Order_Ex.thy ex/Eval_Examples.thy	\
-  ex/Groebner_Examples.thy ex/Quickcheck_Generators.thy		\
-  ex/Codegenerator.thy ex/Codegenerator_Pretty.thy			\
-  ex/CodegenSML_Test.thy ex/Formal_Power_Series_Examples.thy						\
-  ex/Commutative_RingEx.thy ex/Efficient_Nat_examples.thy		\
-  ex/Hex_Bin_Examples.thy ex/Commutative_Ring_Complete.thy		\
-  ex/ExecutableContent.thy ex/Fundefs.thy ex/Guess.thy ex/Hebrew.thy	\
-  ex/Binary.thy ex/Higher_Order_Logic.thy ex/Hilbert_Classical.thy	\
+  Library/Primes.thy ex/Abstract_NAT.thy ex/Antiquote.thy		\
+  ex/ApproximationEx.thy ex/Arith_Examples.thy				\
+  ex/Arithmetic_Series_Complex.thy ex/BT.thy ex/BinEx.thy		\
+  ex/Binary.thy ex/CTL.thy ex/Chinese.thy ex/Classical.thy		\
+  ex/CodegenSML_Test.thy ex/Codegenerator.thy				\
+  ex/Codegenerator_Pretty.thy ex/Coherent.thy				\
+  ex/Commutative_RingEx.thy ex/Commutative_Ring_Complete.thy		\
+  ex/Dense_Linear_Order_Ex.thy ex/Efficient_Nat_examples.thy		\
+  ex/Eval_Examples.thy ex/ExecutableContent.thy				\
+  ex/Formal_Power_Series_Examples.thy ex/Fundefs.thy			\
+  ex/Groebner_Examples.thy ex/Guess.thy ex/HarmonicSeries.thy		\
+  ex/Hebrew.thy ex/Hex_Bin_Examples.thy ex/Higher_Order_Logic.thy	\
+  ex/Hilbert_Classical.thy ex/ImperativeQuicksort.thy			\
   ex/Induction_Scheme.thy ex/InductiveInvariant.thy			\
   ex/InductiveInvariant_examples.thy ex/Intuitionistic.thy		\
-  ex/Lagrange.thy ex/LocaleTest2.thy ex/MT.thy		\
-  ex/MergeSort.thy ex/MonoidGroup.thy ex/Multiquote.thy ex/NatSum.thy	\
+  ex/Lagrange.thy ex/LocaleTest2.thy ex/MT.thy ex/MergeSort.thy		\
+  ex/Meson_Test.thy ex/MonoidGroup.thy ex/Multiquote.thy ex/NatSum.thy	\
   ex/Numeral.thy ex/PER.thy ex/PresburgerEx.thy ex/Primrec.thy		\
-  ex/Quickcheck_Examples.thy	\
-  ex/ReflectionEx.thy ex/ROOT.ML ex/Recdefs.thy ex/Records.thy		\
+  ex/Quickcheck_Examples.thy ex/Quickcheck_Generators.thy ex/ROOT.ML	\
+  ex/Recdefs.thy ex/Records.thy ex/ReflectionEx.thy			\
   ex/Refute_Examples.thy ex/SAT_Examples.thy ex/SVC_Oracle.thy		\
-  ex/Subarray.thy ex/Sublist.thy                                        \
-  ex/Sudoku.thy ex/Tarski.thy ex/Termination.thy ex/Term_Of_Syntax.thy	\
-  ex/Unification.thy ex/document/root.bib			        \
-  ex/document/root.tex ex/Meson_Test.thy ex/set.thy	\
-  ex/svc_funcs.ML ex/svc_test.thy	\
-  ex/ImperativeQuicksort.thy	\
-  ex/Arithmetic_Series_Complex.thy ex/HarmonicSeries.thy	\
-  ex/Sqrt.thy ex/Sqrt_Script.thy \
-  ex/ApproximationEx.thy
+  ex/Serbian.thy ex/Sqrt.thy ex/Sqrt_Script.thy ex/Subarray.thy		\
+  ex/Sublist.thy ex/Sudoku.thy ex/Tarski.thy ex/Term_Of_Syntax.thy	\
+  ex/Termination.thy ex/Unification.thy ex/document/root.bib		\
+  ex/document/root.tex ex/set.thy ex/svc_funcs.ML ex/svc_test.thy
 	@$(ISABELLE_TOOL) usedir $(OUT)/HOL ex
 
 
--- a/src/HOL/Library/Float.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/Library/Float.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1093,7 +1093,7 @@
   { have "2^(prec - 1) * m \<le> 2^(prec - 1) * 2^?b" using `m < 2^?b`[THEN less_imp_le] by (rule mult_left_mono, auto)
     also have "\<dots> = 2 ^ nat (int prec + bitlen m - 1)" unfolding pow_split zpower_zadd_distrib by auto
     finally have "2^(prec - 1) * m div m \<le> 2 ^ nat (int prec + bitlen m - 1) div m" using `0 < m` by (rule zdiv_mono1)
-    hence "2^(prec - 1) \<le> 2 ^ nat (int prec + bitlen m - 1) div m" unfolding zdiv_zmult_self1[OF `m \<noteq> 0`] .
+    hence "2^(prec - 1) \<le> 2 ^ nat (int prec + bitlen m - 1) div m" unfolding div_mult_self2_is_id[OF `m \<noteq> 0`] .
     hence "2^(prec - 1) * inverse (2 ^ nat (int prec + bitlen m - 1)) \<le> ?d"
       unfolding real_of_int_le_iff[of "2^(prec - 1)", symmetric] by auto }
   from mult_left_mono[OF this[unfolded pow_split power_add inverse_mult_distrib real_mult_assoc[symmetric] right_inverse[OF pow_not0] real_mult_1], of "2^?e"]
--- a/src/HOL/Library/Polynomial.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/Library/Polynomial.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -106,6 +106,7 @@
 translations
   "[:x, xs:]" == "CONST pCons x [:xs:]"
   "[:x:]" == "CONST pCons x 0"
+  "[:x:]" <= "CONST pCons x (_constrain 0 t)"
 
 lemma Poly_nat_case: "f \<in> Poly \<Longrightarrow> nat_case a f \<in> Poly"
   unfolding Poly_def by (auto split: nat.split)
--- a/src/HOL/Library/reflection.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/Library/reflection.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -88,17 +88,12 @@
 
 fun dest_listT (Type ("List.list", [T])) = T;
 
-fun partition P [] = ([],[])
-  | partition P (x::xs) = 
-     let val (yes,no) = partition P xs
-     in if P x then (x::yes,no) else (yes, x::no) end
-
 fun rearrange congs = 
 let 
  fun P (_, th) = 
   let val @{term "Trueprop"}$(Const ("op =",_) $l$_) = concl_of th
   in can dest_Var l end
- val (yes,no) = partition P congs 
+ val (yes,no) = List.partition P congs 
  in no @ yes end
 
 fun genreif ctxt raw_eqs t =
--- a/src/HOL/Tools/Qelim/langford.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/Tools/Qelim/langford.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -113,11 +113,6 @@
   val eqI = instantiate' [] [SOME ll, SOME rr] @{thm iffI}
  in implies_elim (implies_elim eqI thl) thr |> mk_meta_eq end;
 
-fun partition f [] = ([],[])
-  | partition f (x::xs) = 
-      let val (yes,no) = partition f xs 
-      in if f x then (x::yes,no) else (yes, x::no) end;
-
 fun contains x ct = member (op aconv) (OldTerm.term_frees (term_of ct)) (term_of x);
 
 fun is_eqx x eq = case term_of eq of
@@ -132,11 +127,11 @@
     val e = Thm.dest_fun ct
     val (x,p) = Thm.dest_abs (SOME xn) (Thm.dest_arg ct)
     val Pp = Thm.capply @{cterm "Trueprop"} p 
-    val (eqs,neqs) = partition (is_eqx x) (all_conjuncts p)
+    val (eqs,neqs) = List.partition (is_eqx x) (all_conjuncts p)
    in case eqs of
       [] => 
         let 
-         val (dx,ndx) = partition (contains x) neqs
+         val (dx,ndx) = List.partition (contains x) neqs
          in case ndx of [] => NONE
                       | _ =>
             conj_aci_rule (Thm.mk_binop @{cterm "op == :: prop => _"} Pp 
--- a/src/HOL/Tools/atp_wrapper.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/Tools/atp_wrapper.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -96,7 +96,7 @@
 
 fun tptp_prover_opts_full max_new theory_const full command =
   external_prover
-    (ResAtp.write_problem_files ResHolClause.tptp_write_file max_new theory_const)
+    (ResAtp.write_problem_files false max_new theory_const)
     command
     ResReconstruct.find_failure_e_vamp_spass
     (if full then ResReconstruct.structured_proof else ResReconstruct.lemma_list_tstp);
@@ -153,7 +153,7 @@
 (* SPASS *)
 
 fun spass_opts max_new theory_const = external_prover
-  (ResAtp.write_problem_files ResHolClause.dfg_write_file max_new theory_const)
+  (ResAtp.write_problem_files true max_new theory_const)
   (Path.explode "$SPASS_HOME/SPASS", "-Auto -SOS=1 -PGiven=0 -PProblem=0 -Splits=0 -FullRed=0 -DocProof")
   ResReconstruct.find_failure_e_vamp_spass
   ResReconstruct.lemma_list_dfg;
--- a/src/HOL/Tools/res_atp.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/Tools/res_atp.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -6,10 +6,7 @@
   val tvar_classes_of_terms : term list -> string list
   val tfree_classes_of_terms : term list -> string list
   val type_consts_of_terms : theory -> term list -> string list
-  val write_problem_files : (theory -> bool -> Thm.thm list -> string ->
-  (thm * (ResHolClause.axiom_name * ResHolClause.clause_id)) list * ResClause.classrelClause list *
-    ResClause.arityClause list -> string list -> ResHolClause.axiom_name list)
-    -> int -> bool
+  val write_problem_files : bool -> int -> bool
     -> (int -> Path.T) -> Proof.context * thm list * thm
     -> string list * ResHolClause.axiom_name Vector.vector list
 end;
@@ -524,11 +521,10 @@
 (* TODO: problem file for *one* subgoal would be sufficient *)
 (*Write out problem files for each subgoal.
   Argument probfile generates filenames from subgoal-number
-  Argument writer is either a tptp_write_file or dfg_write_file from ResHolClause
   Arguments max_new and theory_const are booleans controlling relevance_filter
     (necessary for different provers)
-  *)
-fun write_problem_files writer max_new theory_const probfile (ctxt, chain_ths, th) =
+*)
+fun write_problem_files dfg max_new theory_const probfile (ctxt, chain_ths, th) =
   let val goals = Thm.prems_of th
       val thy = ProofContext.theory_of ctxt
       fun get_neg_subgoals [] _ = []
@@ -548,6 +544,7 @@
       val white_cls = ResAxioms.cnf_rules_pairs thy white_thms
       (*clauses relevant to goal gl*)
       val axcls_list = map (fn ngcls => white_cls @ relevance_filter max_new theory_const thy included_cls (map prop_of ngcls)) goal_cls
+      val writer = if dfg then ResHolClause.dfg_write_file else ResHolClause.tptp_write_file
       fun write_all [] [] _ = []
         | write_all (ccls::ccls_list) (axcls::axcls_list) k =
             let val fname = File.platform_path (probfile k)
@@ -561,7 +558,7 @@
                 and supers = tvar_classes_of_terms axtms
                 and tycons = type_consts_of_terms thy (ccltms@axtms)
                 (*TFrees in conjecture clauses; TVars in axiom clauses*)
-                val (supers',arity_clauses) = ResClause.make_arity_clauses thy tycons supers
+                val (supers',arity_clauses) = ResClause.make_arity_clauses_dfg dfg thy tycons supers
                 val classrel_clauses = ResClause.make_classrel_clauses thy subs supers'
                 val clnames = writer thy isFO ccls fname (axcls,classrel_clauses,arity_clauses) []
                 val thm_names = Vector.fromList clnames
--- a/src/HOL/Tools/res_clause.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/Tools/res_clause.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -27,9 +27,8 @@
   val make_fixed_var : string -> string
   val make_schematic_type_var : string * int -> string
   val make_fixed_type_var : string -> string
-  val dfg_format: bool ref
-  val make_fixed_const : string -> string
-  val make_fixed_type_const : string -> string
+  val make_fixed_const : bool -> string -> string
+  val make_fixed_type_const : bool -> string -> string
   val make_type_class : string -> string
   datatype kind = Axiom | Conjecture
   type axiom_name = string
@@ -50,6 +49,7 @@
   datatype classrelClause = ClassrelClause of
    {axiom_name: axiom_name, subclass: class, superclass: class}
   val make_classrel_clauses: theory -> class list -> class list -> classrelClause list
+  val make_arity_clauses_dfg: bool -> theory -> string list -> class list -> class list * arityClause list
   val make_arity_clauses: theory -> string list -> class list -> class list * arityClause list
   val add_type_sort_preds: typ * int Symtab.table -> int Symtab.table
   val add_classrelClause_preds : classrelClause * int Symtab.table -> int Symtab.table
@@ -197,28 +197,26 @@
 fun make_fixed_type_var x = tfree_prefix ^ (ascii_of (trim_type_var x));
 
 (*HACK because SPASS truncates identifiers to 63 characters :-(( *)
-val dfg_format = ref false;
-
 (*32-bit hash,so we expect no collisions unless there are around 65536 long identifiers...*)
-fun controlled_length s =
-  if size s > 60 andalso !dfg_format
+fun controlled_length dfg_format s =
+  if size s > 60 andalso dfg_format
   then Word.toString (Polyhash.hashw_string(s,0w0))
   else s;
 
-fun lookup_const c =
+fun lookup_const dfg c =
     case Symtab.lookup const_trans_table c of
         SOME c' => c'
-      | NONE => controlled_length (ascii_of c);
+      | NONE => controlled_length dfg (ascii_of c);
 
-fun lookup_type_const c =
+fun lookup_type_const dfg c =
     case Symtab.lookup type_const_trans_table c of
         SOME c' => c'
-      | NONE => controlled_length (ascii_of c);
+      | NONE => controlled_length dfg (ascii_of c);
 
-fun make_fixed_const "op =" = "equal"   (*MUST BE "equal" because it's built-in to ATPs*)
-  | make_fixed_const c      = const_prefix ^ lookup_const c;
+fun make_fixed_const _ "op =" = "equal"   (*MUST BE "equal" because it's built-in to ATPs*)
+  | make_fixed_const dfg c      = const_prefix ^ lookup_const dfg c;
 
-fun make_fixed_type_const c = tconst_prefix ^ lookup_type_const c;
+fun make_fixed_type_const dfg c = tconst_prefix ^ lookup_type_const dfg c;
 
 fun make_type_class clas = class_prefix ^ ascii_of clas;
 
@@ -251,13 +249,13 @@
 
 (*Flatten a type to a fol_type while accumulating sort constraints on the TFrees and
   TVars it contains.*)
-fun type_of (Type (a, Ts)) =
-      let val (folTyps, ts) = types_of Ts
-          val t = make_fixed_type_const a
+fun type_of dfg (Type (a, Ts)) =
+      let val (folTyps, ts) = types_of dfg Ts
+          val t = make_fixed_type_const dfg a
       in (Comp(t,folTyps), ts) end
-  | type_of T = (atomic_type T, [T])
-and types_of Ts =
-      let val (folTyps,ts) = ListPair.unzip (map type_of Ts)
+  | type_of dfg T = (atomic_type T, [T])
+and types_of dfg Ts =
+      let val (folTyps,ts) = ListPair.unzip (map (type_of dfg) Ts)
       in (folTyps, union_all ts) end;
 
 (*Make literals for sorted type variables*)
@@ -317,12 +315,12 @@
   | pack_sort(tvar, cls::srt) =  (cls, tvar) :: pack_sort(tvar, srt);
 
 (*Arity of type constructor tcon :: (arg1,...,argN)res*)
-fun make_axiom_arity_clause (tcons, axiom_name, (cls,args)) =
+fun make_axiom_arity_clause dfg (tcons, axiom_name, (cls,args)) =
    let val tvars = gen_TVars (length args)
        val tvars_srts = ListPair.zip (tvars,args)
    in
       ArityClause {axiom_name = axiom_name, 
-                   conclLit = TConsLit (cls, make_fixed_type_const tcons, tvars),
+                   conclLit = TConsLit (cls, make_fixed_type_const dfg tcons, tvars),
                    premLits = map TVarLit (union_all(map pack_sort tvars_srts))}
    end;
 
@@ -354,20 +352,20 @@
 
 (** Isabelle arities **)
 
-fun arity_clause _ _ (tcons, []) = []
-  | arity_clause seen n (tcons, ("HOL.type",_)::ars) =  (*ignore*)
-      arity_clause seen n (tcons,ars)
-  | arity_clause seen n (tcons, (ar as (class,_)) :: ars) =
+fun arity_clause dfg _ _ (tcons, []) = []
+  | arity_clause dfg seen n (tcons, ("HOL.type",_)::ars) =  (*ignore*)
+      arity_clause dfg seen n (tcons,ars)
+  | arity_clause dfg seen n (tcons, (ar as (class,_)) :: ars) =
       if class mem_string seen then (*multiple arities for the same tycon, class pair*)
-          make_axiom_arity_clause (tcons, lookup_type_const tcons ^ "_" ^ class ^ "_" ^ Int.toString n, ar) ::
-          arity_clause seen (n+1) (tcons,ars)
+          make_axiom_arity_clause dfg (tcons, lookup_type_const dfg tcons ^ "_" ^ class ^ "_" ^ Int.toString n, ar) ::
+          arity_clause dfg seen (n+1) (tcons,ars)
       else
-          make_axiom_arity_clause (tcons, lookup_type_const tcons ^ "_" ^ class, ar) ::
-          arity_clause (class::seen) n (tcons,ars)
+          make_axiom_arity_clause dfg (tcons, lookup_type_const dfg tcons ^ "_" ^ class, ar) ::
+          arity_clause dfg (class::seen) n (tcons,ars)
 
-fun multi_arity_clause [] = []
-  | multi_arity_clause ((tcons,ars) :: tc_arlists) =
-      arity_clause [] 1 (tcons, ars)  @  multi_arity_clause tc_arlists
+fun multi_arity_clause dfg [] = []
+  | multi_arity_clause dfg ((tcons,ars) :: tc_arlists) =
+      arity_clause dfg [] 1 (tcons, ars)  @  multi_arity_clause dfg tc_arlists
 
 (*Generate all pairs (tycon,class,sorts) such that tycon belongs to class in theory thy
   provided its arguments have the corresponding sorts.*)
@@ -390,10 +388,10 @@
           val (classes', cpairs') = iter_type_class_pairs thy tycons newclasses
       in  (classes' union classes, cpairs' union cpairs)  end;
 
-fun make_arity_clauses thy tycons classes =
+fun make_arity_clauses_dfg dfg thy tycons classes =
   let val (classes', cpairs) = iter_type_class_pairs thy tycons classes
-  in  (classes', multi_arity_clause cpairs)  end;
-
+  in  (classes', multi_arity_clause dfg cpairs)  end;
+val make_arity_clauses = make_arity_clauses_dfg false;
 
 (**** Find occurrences of predicates in clauses ****)
 
--- a/src/HOL/Tools/res_hol_clause.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/Tools/res_hol_clause.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -13,8 +13,8 @@
   val comb_C: thm
   val comb_S: thm
   datatype type_level = T_FULL | T_CONST
-  val typ_level: type_level ref
-  val minimize_applies: bool ref
+  val typ_level: type_level
+  val minimize_applies: bool
   type axiom_name = string
   type polarity = bool
   type clause_id = int
@@ -53,22 +53,18 @@
 (*The different translations of types*)
 datatype type_level = T_FULL | T_CONST;
 
-val typ_level = ref T_CONST;
+val typ_level = T_CONST;
 
 (*If true, each function will be directly applied to as many arguments as possible, avoiding
   use of the "apply" operator. Use of hBOOL is also minimized.*)
-val minimize_applies = ref true;
-
-val const_min_arity = ref (Symtab.empty : int Symtab.table);
+val minimize_applies = true;
 
-val const_needs_hBOOL = ref (Symtab.empty : bool Symtab.table);
-
-fun min_arity_of c = getOpt (Symtab.lookup(!const_min_arity) c, 0);
+fun min_arity_of const_min_arity c = getOpt (Symtab.lookup const_min_arity c, 0);
 
 (*True if the constant ever appears outside of the top-level position in literals.
   If false, the constant always receives all of its arguments and is used as a predicate.*)
-fun needs_hBOOL c = not (!minimize_applies) orelse
-                    getOpt (Symtab.lookup(!const_needs_hBOOL) c, false);
+fun needs_hBOOL const_needs_hBOOL c = not minimize_applies orelse
+                    getOpt (Symtab.lookup const_needs_hBOOL c, false);
 
 
 (******************************************************)
@@ -110,67 +106,68 @@
 
 fun isTaut (Clause {literals,...}) = exists isTrue literals;
 
-fun type_of (Type (a, Ts)) =
-      let val (folTypes,ts) = types_of Ts
-      in  (RC.Comp(RC.make_fixed_type_const a, folTypes), ts)  end
-  | type_of (tp as (TFree(a,s))) =
+fun type_of dfg (Type (a, Ts)) =
+      let val (folTypes,ts) = types_of dfg Ts
+      in  (RC.Comp(RC.make_fixed_type_const dfg a, folTypes), ts)  end
+  | type_of dfg (tp as (TFree(a,s))) =
       (RC.AtomF (RC.make_fixed_type_var a), [tp])
-  | type_of (tp as (TVar(v,s))) =
+  | type_of dfg (tp as (TVar(v,s))) =
       (RC.AtomV (RC.make_schematic_type_var v), [tp])
-and types_of Ts =
-      let val (folTyps,ts) = ListPair.unzip (map type_of Ts)
+and types_of dfg Ts =
+      let val (folTyps,ts) = ListPair.unzip (map (type_of dfg) Ts)
       in  (folTyps, RC.union_all ts)  end;
 
 (* same as above, but no gathering of sort information *)
-fun simp_type_of (Type (a, Ts)) =
-      RC.Comp(RC.make_fixed_type_const a, map simp_type_of Ts)
-  | simp_type_of (TFree (a,s)) = RC.AtomF(RC.make_fixed_type_var a)
-  | simp_type_of (TVar (v,s)) = RC.AtomV(RC.make_schematic_type_var v);
+fun simp_type_of dfg (Type (a, Ts)) =
+      RC.Comp(RC.make_fixed_type_const dfg a, map (simp_type_of dfg) Ts)
+  | simp_type_of dfg (TFree (a,s)) = RC.AtomF(RC.make_fixed_type_var a)
+  | simp_type_of dfg (TVar (v,s)) = RC.AtomV(RC.make_schematic_type_var v);
 
 
-fun const_type_of thy (c,t) =
-      let val (tp,ts) = type_of t
-      in  (tp, ts, map simp_type_of (Sign.const_typargs thy (c,t))) end;
+fun const_type_of dfg thy (c,t) =
+      let val (tp,ts) = type_of dfg t
+      in  (tp, ts, map (simp_type_of dfg) (Sign.const_typargs thy (c,t))) end;
 
 (* convert a Term.term (with combinators) into a combterm, also accummulate sort info *)
-fun combterm_of thy (Const(c,t)) =
-      let val (tp,ts,tvar_list) = const_type_of thy (c,t)
-          val c' = CombConst(RC.make_fixed_const c, tp, tvar_list)
+fun combterm_of dfg thy (Const(c,t)) =
+      let val (tp,ts,tvar_list) = const_type_of dfg thy (c,t)
+          val c' = CombConst(RC.make_fixed_const dfg c, tp, tvar_list)
       in  (c',ts)  end
-  | combterm_of thy (Free(v,t)) =
-      let val (tp,ts) = type_of t
+  | combterm_of dfg thy (Free(v,t)) =
+      let val (tp,ts) = type_of dfg t
           val v' = CombConst(RC.make_fixed_var v, tp, [])
       in  (v',ts)  end
-  | combterm_of thy (Var(v,t)) =
-      let val (tp,ts) = type_of t
+  | combterm_of dfg thy (Var(v,t)) =
+      let val (tp,ts) = type_of dfg t
           val v' = CombVar(RC.make_schematic_var v,tp)
       in  (v',ts)  end
-  | combterm_of thy (P $ Q) =
-      let val (P',tsP) = combterm_of thy P
-          val (Q',tsQ) = combterm_of thy Q
+  | combterm_of dfg thy (P $ Q) =
+      let val (P',tsP) = combterm_of dfg thy P
+          val (Q',tsQ) = combterm_of dfg thy Q
       in  (CombApp(P',Q'), tsP union tsQ)  end
-  | combterm_of thy (t as Abs _) = raise RC.CLAUSE("HOL CLAUSE",t);
+  | combterm_of _ thy (t as Abs _) = raise RC.CLAUSE("HOL CLAUSE",t);
 
-fun predicate_of thy ((Const("Not",_) $ P), polarity) = predicate_of thy (P, not polarity)
-  | predicate_of thy (t,polarity) = (combterm_of thy (Envir.eta_contract t), polarity);
+fun predicate_of dfg thy ((Const("Not",_) $ P), polarity) = predicate_of dfg thy (P, not polarity)
+  | predicate_of dfg thy (t,polarity) = (combterm_of dfg thy (Envir.eta_contract t), polarity);
 
-fun literals_of_term1 thy args (Const("Trueprop",_) $ P) = literals_of_term1 thy args P
-  | literals_of_term1 thy args (Const("op |",_) $ P $ Q) =
-      literals_of_term1 thy (literals_of_term1 thy args P) Q
-  | literals_of_term1 thy (lits,ts) P =
-      let val ((pred,ts'),pol) = predicate_of thy (P,true)
+fun literals_of_term1 dfg thy args (Const("Trueprop",_) $ P) = literals_of_term1 dfg thy args P
+  | literals_of_term1 dfg thy args (Const("op |",_) $ P $ Q) =
+      literals_of_term1 dfg thy (literals_of_term1 dfg thy args P) Q
+  | literals_of_term1 dfg thy (lits,ts) P =
+      let val ((pred,ts'),pol) = predicate_of dfg thy (P,true)
       in
           (Literal(pol,pred)::lits, ts union ts')
       end;
 
-fun literals_of_term thy P = literals_of_term1 thy ([],[]) P;
+fun literals_of_term_dfg dfg thy P = literals_of_term1 dfg thy ([],[]) P;
+val literals_of_term = literals_of_term_dfg false;
 
 (* Problem too trivial for resolution (empty clause) *)
 exception TOO_TRIVIAL;
 
 (* making axiom and conjecture clauses *)
-fun make_clause thy (clause_id,axiom_name,kind,th) =
-    let val (lits,ctypes_sorts) = literals_of_term thy (prop_of th)
+fun make_clause dfg thy (clause_id,axiom_name,kind,th) =
+    let val (lits,ctypes_sorts) = literals_of_term_dfg dfg thy (prop_of th)
     in
         if forall isFalse lits
         then raise TOO_TRIVIAL
@@ -180,20 +177,20 @@
     end;
 
 
-fun add_axiom_clause thy ((th,(name,id)), pairs) =
-  let val cls = make_clause thy (id, name, RC.Axiom, th)
+fun add_axiom_clause dfg thy ((th,(name,id)), pairs) =
+  let val cls = make_clause dfg thy (id, name, RC.Axiom, th)
   in
       if isTaut cls then pairs else (name,cls)::pairs
   end;
 
-fun make_axiom_clauses thy = foldl (add_axiom_clause thy) [];
+fun make_axiom_clauses dfg thy = foldl (add_axiom_clause dfg thy) [];
 
-fun make_conjecture_clauses_aux _ _ [] = []
-  | make_conjecture_clauses_aux thy n (th::ths) =
-      make_clause thy (n,"conjecture", RC.Conjecture, th) ::
-      make_conjecture_clauses_aux thy (n+1) ths;
+fun make_conjecture_clauses_aux dfg _ _ [] = []
+  | make_conjecture_clauses_aux dfg thy n (th::ths) =
+      make_clause dfg thy (n,"conjecture", RC.Conjecture, th) ::
+      make_conjecture_clauses_aux dfg thy (n+1) ths;
 
-fun make_conjecture_clauses thy = make_conjecture_clauses_aux thy 0;
+fun make_conjecture_clauses dfg thy = make_conjecture_clauses_aux dfg thy 0;
 
 
 (**********************************************************************)
@@ -218,11 +215,11 @@
 
 val type_wrapper = "ti";
 
-fun head_needs_hBOOL (CombConst(c,_,_)) = needs_hBOOL c
-  | head_needs_hBOOL _ = true;
+fun head_needs_hBOOL const_needs_hBOOL (CombConst(c,_,_)) = needs_hBOOL const_needs_hBOOL c
+  | head_needs_hBOOL const_needs_hBOOL _ = true;
 
 fun wrap_type (s, tp) =
-  if !typ_level=T_FULL then
+  if typ_level=T_FULL then
       type_wrapper ^ RC.paren_pack [s, RC.string_of_fol_type tp]
   else s;
 
@@ -235,43 +232,43 @@
 
 (*Apply an operator to the argument strings, using either the "apply" operator or
   direct function application.*)
-fun string_of_applic (CombConst(c,tp,tvars), args) =
+fun string_of_applic cma (CombConst(c,tp,tvars), args) =
       let val c = if c = "equal" then "c_fequal" else c
-          val nargs = min_arity_of c
+          val nargs = min_arity_of cma c
           val args1 = List.take(args, nargs)
             handle Subscript => error ("string_of_applic: " ^ c ^ " has arity " ^
                                          Int.toString nargs ^ " but is applied to " ^
                                          space_implode ", " args)
           val args2 = List.drop(args, nargs)
-          val targs = if !typ_level = T_CONST then map RC.string_of_fol_type tvars
+          val targs = if typ_level = T_CONST then map RC.string_of_fol_type tvars
                       else []
       in
           string_apply (c ^ RC.paren_pack (args1@targs), args2)
       end
-  | string_of_applic (CombVar(v,tp), args) = string_apply (v, args)
-  | string_of_applic _ = error "string_of_applic";
+  | string_of_applic cma (CombVar(v,tp), args) = string_apply (v, args)
+  | string_of_applic _ _ = error "string_of_applic";
 
-fun wrap_type_if (head, s, tp) = if head_needs_hBOOL head then wrap_type (s, tp) else s;
+fun wrap_type_if cnh (head, s, tp) = if head_needs_hBOOL cnh head then wrap_type (s, tp) else s;
 
-fun string_of_combterm t =
+fun string_of_combterm cma cnh t =
   let val (head, args) = strip_comb t
-  in  wrap_type_if (head,
-                    string_of_applic (head, map string_of_combterm args),
+  in  wrap_type_if cnh (head,
+                    string_of_applic cma (head, map (string_of_combterm cma cnh) args),
                     type_of_combterm t)
   end;
 
 (*Boolean-valued terms are here converted to literals.*)
-fun boolify t = "hBOOL" ^ RC.paren_pack [string_of_combterm t];
+fun boolify cma cnh t = "hBOOL" ^ RC.paren_pack [string_of_combterm cma cnh t];
 
-fun string_of_predicate t =
+fun string_of_predicate cma cnh t =
   case t of
       (CombApp(CombApp(CombConst("equal",_,_), t1), t2)) =>
           (*DFG only: new TPTP prefers infix equality*)
-          ("equal" ^ RC.paren_pack [string_of_combterm t1, string_of_combterm t2])
+          ("equal" ^ RC.paren_pack [string_of_combterm cma cnh t1, string_of_combterm cma cnh t2])
     | _ =>
           case #1 (strip_comb t) of
-              CombConst(c,_,_) => if needs_hBOOL c then boolify t else string_of_combterm t
-            | _ => boolify t;
+              CombConst(c,_,_) => if needs_hBOOL cnh c then boolify cma cnh t else string_of_combterm cma cnh t
+            | _ => boolify cma cnh t;
 
 fun string_of_clausename (cls_id,ax_name) =
     RC.clause_prefix ^ RC.ascii_of ax_name ^ "_" ^ Int.toString cls_id;
@@ -282,23 +279,23 @@
 
 (*** tptp format ***)
 
-fun tptp_of_equality pol (t1,t2) =
+fun tptp_of_equality cma cnh pol (t1,t2) =
   let val eqop = if pol then " = " else " != "
-  in  string_of_combterm t1 ^ eqop ^ string_of_combterm t2  end;
+  in  string_of_combterm cma cnh t1 ^ eqop ^ string_of_combterm cma cnh t2  end;
 
-fun tptp_literal (Literal(pol, CombApp(CombApp(CombConst("equal",_,_), t1), t2))) =
-      tptp_of_equality pol (t1,t2)
-  | tptp_literal (Literal(pol,pred)) =
-      RC.tptp_sign pol (string_of_predicate pred);
+fun tptp_literal cma cnh (Literal(pol, CombApp(CombApp(CombConst("equal",_,_), t1), t2))) =
+      tptp_of_equality cma cnh pol (t1,t2)
+  | tptp_literal cma cnh (Literal(pol,pred)) =
+      RC.tptp_sign pol (string_of_predicate cma cnh pred);
 
 (*Given a clause, returns its literals paired with a list of literals concerning TFrees;
   the latter should only occur in conjecture clauses.*)
-fun tptp_type_lits pos (Clause{literals, ctypes_sorts, ...}) =
-      (map tptp_literal literals, 
+fun tptp_type_lits cma cnh pos (Clause{literals, ctypes_sorts, ...}) =
+      (map (tptp_literal cma cnh) literals, 
        map (RC.tptp_of_typeLit pos) (RC.add_typs ctypes_sorts));
 
-fun clause2tptp (cls as Clause{axiom_name,clause_id,kind,ctypes_sorts,...}) =
-  let val (lits,tylits) = tptp_type_lits (kind = RC.Conjecture) cls
+fun clause2tptp cma cnh (cls as Clause{axiom_name,clause_id,kind,ctypes_sorts,...}) =
+  let val (lits,tylits) = tptp_type_lits cma cnh (kind = RC.Conjecture) cls
   in
       (RC.gen_tptp_cls(clause_id,axiom_name,kind,lits,tylits), tylits)
   end;
@@ -306,10 +303,10 @@
 
 (*** dfg format ***)
 
-fun dfg_literal (Literal(pol,pred)) = RC.dfg_sign pol (string_of_predicate pred);
+fun dfg_literal cma cnh (Literal(pol,pred)) = RC.dfg_sign pol (string_of_predicate cma cnh pred);
 
-fun dfg_type_lits pos (Clause{literals, ctypes_sorts, ...}) =
-      (map dfg_literal literals, 
+fun dfg_type_lits cma cnh pos (Clause{literals, ctypes_sorts, ...}) =
+      (map (dfg_literal cma cnh) literals, 
        map (RC.dfg_of_typeLit pos) (RC.add_typs ctypes_sorts));
 
 fun get_uvars (CombConst _) vars = vars
@@ -320,8 +317,8 @@
 
 fun dfg_vars (Clause {literals,...}) = RC.union_all (map get_uvars_l literals);
 
-fun clause2dfg (cls as Clause{axiom_name,clause_id,kind,ctypes_sorts,...}) =
-  let val (lits,tylits) = dfg_type_lits (kind = RC.Conjecture) cls
+fun clause2dfg cma cnh (cls as Clause{axiom_name,clause_id,kind,ctypes_sorts,...}) =
+  let val (lits,tylits) = dfg_type_lits cma cnh (kind = RC.Conjecture) cls
       val vars = dfg_vars cls
       val tvars = RC.get_tvar_strs ctypes_sorts
   in
@@ -333,30 +330,30 @@
 
 fun addtypes tvars tab = foldl RC.add_foltype_funcs tab tvars;
 
-fun add_decls (CombConst(c,tp,tvars), (funcs,preds)) =
+fun add_decls cma cnh (CombConst(c,tp,tvars), (funcs,preds)) =
       if c = "equal" then (addtypes tvars funcs, preds)
       else
-	let val arity = min_arity_of c
-	    val ntys = if !typ_level = T_CONST then length tvars else 0
+	let val arity = min_arity_of cma c
+	    val ntys = if typ_level = T_CONST then length tvars else 0
 	    val addit = Symtab.update(c, arity+ntys)
 	in
-	    if needs_hBOOL c then (addtypes tvars (addit funcs), preds)
+	    if needs_hBOOL cnh c then (addtypes tvars (addit funcs), preds)
 	    else (addtypes tvars funcs, addit preds)
 	end
-  | add_decls (CombVar(_,ctp), (funcs,preds)) =
+  | add_decls _ _ (CombVar(_,ctp), (funcs,preds)) =
       (RC.add_foltype_funcs (ctp,funcs), preds)
-  | add_decls (CombApp(P,Q),decls) = add_decls(P,add_decls (Q,decls));
+  | add_decls cma cnh (CombApp(P,Q),decls) = add_decls cma cnh (P,add_decls cma cnh (Q,decls));
 
-fun add_literal_decls (Literal(_,c), decls) = add_decls (c,decls);
+fun add_literal_decls cma cnh (Literal(_,c), decls) = add_decls cma cnh (c,decls);
 
-fun add_clause_decls (Clause {literals, ...}, decls) =
-    foldl add_literal_decls decls literals
+fun add_clause_decls cma cnh (Clause {literals, ...}, decls) =
+    foldl (add_literal_decls cma cnh) decls literals
     handle Symtab.DUP a => error ("function " ^ a ^ " has multiple arities")
 
-fun decls_of_clauses clauses arity_clauses =
+fun decls_of_clauses cma cnh clauses arity_clauses =
   let val init_functab = Symtab.update (type_wrapper,2) (Symtab.update ("hAPP",2) RC.init_functab)
       val init_predtab = Symtab.update ("hBOOL",1) Symtab.empty
-      val (functab,predtab) = (foldl add_clause_decls (init_functab, init_predtab) clauses)
+      val (functab,predtab) = (foldl (add_clause_decls cma cnh) (init_functab, init_predtab) clauses)
   in
       (Symtab.dest (foldl RC.add_arityClause_funcs functab arity_clauses),
        Symtab.dest predtab)
@@ -402,7 +399,7 @@
 fun cnf_helper_thms thy =
   ResAxioms.cnf_rules_pairs thy o map ResAxioms.pairname
 
-fun get_helper_clauses thy isFO (conjectures, axclauses, user_lemmas) =
+fun get_helper_clauses dfg thy isFO (conjectures, axclauses, user_lemmas) =
   if isFO then []  (*first-order*)
   else
     let val ct0 = foldl count_clause init_counters conjectures
@@ -419,66 +416,67 @@
                 else []
         val other = cnf_helper_thms thy [ext,fequal_imp_equal,equal_imp_fequal]
     in
-        map #2 (make_axiom_clauses thy (other @ IK @ BC @ S))
+        map #2 (make_axiom_clauses dfg thy (other @ IK @ BC @ S))
     end;
 
 (*Find the minimal arity of each function mentioned in the term. Also, note which uses
   are not at top level, to see if hBOOL is needed.*)
-fun count_constants_term toplev t =
+fun count_constants_term toplev t (const_min_arity, const_needs_hBOOL) =
   let val (head, args) = strip_comb t
       val n = length args
-      val _ = List.app (count_constants_term false) args
+      val (const_min_arity, const_needs_hBOOL) = fold (count_constants_term false) args (const_min_arity, const_needs_hBOOL)
   in
       case head of
           CombConst (a,_,_) => (*predicate or function version of "equal"?*)
             let val a = if a="equal" andalso not toplev then "c_fequal" else a
+            val const_min_arity = Symtab.map_default (a,n) (curry Int.min n) const_min_arity
             in
-              const_min_arity := Symtab.map_default (a,n) (curry Int.min n) (!const_min_arity);
-              if toplev then ()
-              else const_needs_hBOOL := Symtab.update (a,true) (!const_needs_hBOOL)
+              if toplev then (const_min_arity, const_needs_hBOOL)
+              else (const_min_arity, Symtab.update (a,true) (const_needs_hBOOL))
             end
-        | ts => ()
+        | ts => (const_min_arity, const_needs_hBOOL)
   end;
 
 (*A literal is a top-level term*)
-fun count_constants_lit (Literal (_,t)) = count_constants_term true t;
-
-fun count_constants_clause (Clause{literals,...}) = List.app count_constants_lit literals;
+fun count_constants_lit (Literal (_,t)) (const_min_arity, const_needs_hBOOL) =
+  count_constants_term true t (const_min_arity, const_needs_hBOOL);
 
-fun display_arity (c,n) =
+fun count_constants_clause (Clause{literals,...}) (const_min_arity, const_needs_hBOOL) =
+  fold count_constants_lit literals (const_min_arity, const_needs_hBOOL);
+
+fun display_arity const_needs_hBOOL (c,n) =
   Output.debug (fn () => "Constant: " ^ c ^ " arity:\t" ^ Int.toString n ^
-                (if needs_hBOOL c then " needs hBOOL" else ""));
+                (if needs_hBOOL const_needs_hBOOL c then " needs hBOOL" else ""));
 
 fun count_constants (conjectures, axclauses, helper_clauses) =
-  if !minimize_applies then
-    (const_min_arity := Symtab.empty;
-     const_needs_hBOOL := Symtab.empty;
-     List.app count_constants_clause conjectures;
-     List.app count_constants_clause axclauses;
-     List.app count_constants_clause helper_clauses;
-     List.app display_arity (Symtab.dest (!const_min_arity)))
-  else ();
+  if minimize_applies then
+     let val (const_min_arity, const_needs_hBOOL) =
+          fold count_constants_clause conjectures (Symtab.empty, Symtab.empty)
+       |> fold count_constants_clause axclauses
+       |> fold count_constants_clause helper_clauses
+     val _ = List.app (display_arity const_needs_hBOOL) (Symtab.dest (const_min_arity))
+     in (const_min_arity, const_needs_hBOOL) end
+  else (Symtab.empty, Symtab.empty);
 
 (* tptp format *)
 
 (* write TPTP format to a single file *)
 fun tptp_write_file thy isFO thms filename (ax_tuples,classrel_clauses,arity_clauses) user_lemmas =
     let val _ = Output.debug (fn () => ("Preparing to write the TPTP file " ^ filename))
-        val _ = RC.dfg_format := false
-        val conjectures = make_conjecture_clauses thy thms
-        val (clnames,axclauses) = ListPair.unzip (make_axiom_clauses thy ax_tuples)
-        val helper_clauses = get_helper_clauses thy isFO (conjectures, axclauses, user_lemmas)
-        val _ = count_constants (conjectures, axclauses, helper_clauses);
-        val (tptp_clss,tfree_litss) = ListPair.unzip (map clause2tptp conjectures)
+        val conjectures = make_conjecture_clauses false thy thms
+        val (clnames,axclauses) = ListPair.unzip (make_axiom_clauses false thy ax_tuples)
+        val helper_clauses = get_helper_clauses false thy isFO (conjectures, axclauses, user_lemmas)
+        val (const_min_arity, const_needs_hBOOL) = count_constants (conjectures, axclauses, helper_clauses);
+        val (tptp_clss,tfree_litss) = ListPair.unzip (map (clause2tptp const_min_arity const_needs_hBOOL) conjectures)
         val tfree_clss = map RC.tptp_tfree_clause (foldl (op union_string) [] tfree_litss)
         val out = TextIO.openOut filename
     in
-        List.app (curry TextIO.output out o #1 o clause2tptp) axclauses;
+        List.app (curry TextIO.output out o #1 o (clause2tptp const_min_arity const_needs_hBOOL)) axclauses;
         RC.writeln_strs out tfree_clss;
         RC.writeln_strs out tptp_clss;
         List.app (curry TextIO.output out o RC.tptp_classrelClause) classrel_clauses;
         List.app (curry TextIO.output out o RC.tptp_arity_clause) arity_clauses;
-        List.app (curry TextIO.output out o #1 o clause2tptp) helper_clauses;
+        List.app (curry TextIO.output out o #1 o (clause2tptp const_min_arity const_needs_hBOOL)) helper_clauses;
         TextIO.closeOut out;
         clnames
     end;
@@ -488,18 +486,17 @@
 
 fun dfg_write_file thy isFO thms filename (ax_tuples,classrel_clauses,arity_clauses) user_lemmas =
     let val _ = Output.debug (fn () => ("Preparing to write the DFG file " ^ filename))
-        val _ = RC.dfg_format := true
-        val conjectures = make_conjecture_clauses thy thms
-        val (clnames,axclauses) = ListPair.unzip (make_axiom_clauses thy ax_tuples)
-        val helper_clauses = get_helper_clauses thy isFO (conjectures, axclauses, user_lemmas)
-        val _ = count_constants (conjectures, axclauses, helper_clauses);
-        val (dfg_clss, tfree_litss) = ListPair.unzip (map clause2dfg conjectures)
+        val conjectures = make_conjecture_clauses true thy thms
+        val (clnames,axclauses) = ListPair.unzip (make_axiom_clauses true thy ax_tuples)
+        val helper_clauses = get_helper_clauses true thy isFO (conjectures, axclauses, user_lemmas)
+        val (const_min_arity, const_needs_hBOOL) = count_constants (conjectures, axclauses, helper_clauses);
+        val (dfg_clss, tfree_litss) = ListPair.unzip (map (clause2dfg const_min_arity const_needs_hBOOL) conjectures)
         and probname = Path.implode (Path.base (Path.explode filename))
-        val axstrs = map (#1 o clause2dfg) axclauses
+        val axstrs = map (#1 o (clause2dfg const_min_arity const_needs_hBOOL)) axclauses
         val tfree_clss = map RC.dfg_tfree_clause (RC.union_all tfree_litss)
         val out = TextIO.openOut filename
-        val helper_clauses_strs = map (#1 o clause2dfg) helper_clauses
-        val (funcs,cl_preds) = decls_of_clauses (helper_clauses @ conjectures @ axclauses) arity_clauses
+        val helper_clauses_strs = map (#1 o (clause2dfg const_min_arity const_needs_hBOOL)) helper_clauses
+        val (funcs,cl_preds) = decls_of_clauses const_min_arity const_needs_hBOOL (helper_clauses @ conjectures @ axclauses) arity_clauses
         and ty_preds = preds_of_clauses axclauses classrel_clauses arity_clauses
     in
         TextIO.output (out, RC.string_of_start probname);
--- a/src/HOL/ex/ROOT.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOL/ex/ROOT.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -93,4 +93,5 @@
   use_thy "Sudoku"
 else ();
 
-HTML.with_charset "utf-8" (no_document use_thys) ["Hebrew", "Chinese"];
+HTML.with_charset "utf-8" (no_document use_thys)
+  ["Hebrew", "Chinese", "Serbian"];
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/ex/Serbian.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -0,0 +1,217 @@
+(* -*- coding: utf-8 -*- :encoding=utf-8:  
+    Author:     Filip Maric
+
+Example theory involving Unicode characters (UTF-8 encoding) -- 
+Conversion between Serbian cyrillic and latin letters (српска ћирилица и латиница)
+*)
+
+header {* A Serbian theory *}
+
+theory Serbian
+imports Main
+begin
+
+text{* Serbian cyrillic letters *}
+datatype azbuka =
+  azbA   ("А")
+| azbB   ("Б")
+| azbV   ("В")
+| azbG   ("Г")
+| azbD   ("Д")
+| azbDj  ("Ђ")
+| azbE   ("Е")
+| azbZv  ("Ж")
+| azbZ   ("З")
+| azbI   ("И")
+| azbJ   ("Ј")
+| azbK   ("К")
+| azbL   ("Л")
+| azbLj  ("Љ")
+| azbM   ("М")
+| azbN   ("Н")
+| azbNj  ("Њ")
+| azbO   ("О")
+| azbP   ("П")
+| azbR   ("Р")
+| azbS   ("С")
+| azbT   ("Т")
+| azbC'  ("Ћ")
+| azbU   ("У")
+| azbF   ("Ф")
+| azbH   ("Х")
+| azbC   ("Ц")
+| azbCv  ("Ч")
+| azbDzv ("Џ")
+| azbSv  ("Ш")
+| azbSpc
+
+thm azbuka.induct
+
+text{* Serbian latin letters *}
+datatype abeceda =
+  abcA   ("A")
+| abcB   ("B")
+| abcC   ("C")
+| abcCv  ("Č")
+| abcC'  ("Ć")
+| abcD   ("D")
+| abcE   ("E")
+| abcF   ("F")
+| abcG   ("G")
+| abcH   ("H")
+| abcI   ("I")
+| abcJ   ("J")
+| abcK   ("K")
+| abcL   ("L")
+| abcM   ("M")
+| abcN   ("N")
+| abcO   ("O")
+| abcP   ("P")
+| abcR   ("R")
+| abcS   ("S")
+| abcSv  ("Š")
+| abcT   ("T")
+| abcU   ("U")
+| abcV   ("V")
+| abcZ   ("Z")
+| abcvZ  ("Ž")
+| abcSpc
+
+thm abeceda.induct
+
+
+text{* Conversion from cyrillic to latin - 
+       this conversion is valid in all cases *}
+primrec azb2abc_aux :: "azbuka \<Rightarrow> abeceda list"
+where
+  "azb2abc_aux А = [A]"
+| "azb2abc_aux Б = [B]"
+| "azb2abc_aux В = [V]"
+| "azb2abc_aux Г = [G]"
+| "azb2abc_aux Д = [D]"
+| "azb2abc_aux Ђ = [D, J]"
+| "azb2abc_aux Е = [E]"
+| "azb2abc_aux Ж = [Ž]"
+| "azb2abc_aux З = [Z]"
+| "azb2abc_aux И = [I]"
+| "azb2abc_aux Ј = [J]"
+| "azb2abc_aux К = [K]"
+| "azb2abc_aux Л = [L]"
+| "azb2abc_aux Љ = [L, J]"
+| "azb2abc_aux М = [M]"
+| "azb2abc_aux Н = [N]"
+| "azb2abc_aux Њ = [N, J]"
+| "azb2abc_aux О = [O]"
+| "azb2abc_aux П = [P]"
+| "azb2abc_aux Р = [R]"
+| "azb2abc_aux С = [S]"
+| "azb2abc_aux Т = [T]"
+| "azb2abc_aux Ћ = [Ć]"
+| "azb2abc_aux У = [U]"
+| "azb2abc_aux Ф = [F]"
+| "azb2abc_aux Х = [H]"
+| "azb2abc_aux Ц = [C]"
+| "azb2abc_aux Ч = [Č]"
+| "azb2abc_aux Џ = [D, Ž]"
+| "azb2abc_aux Ш = [Š]"
+| "azb2abc_aux azbSpc = [abcSpc]"
+
+primrec azb2abc :: "azbuka list \<Rightarrow> abeceda list"
+where
+  "azb2abc [] = []"
+| "azb2abc (x # xs) = azb2abc_aux x @ azb2abc xs"
+
+value "azb2abc [Д, О, Б, А, Р, azbSpc, Д, А, Н, azbSpc, С, В, И, М, А]"
+value "azb2abc [Љ, У, Б, И, Ч, И, Ц, А, azbSpc, Н, А, azbSpc, П, О, Љ, У]"
+
+text{* The conversion from latin to cyrillic - 
+       this conversion is valid in most cases but there are some exceptions *}
+primrec abc2azb_aux :: "abeceda \<Rightarrow> azbuka"
+where
+   "abc2azb_aux A = А"
+|  "abc2azb_aux B = Б"
+|  "abc2azb_aux C = Ц"
+|  "abc2azb_aux Č = Ч"
+|  "abc2azb_aux Ć = Ћ"
+|  "abc2azb_aux D = Д"
+|  "abc2azb_aux E = Е"
+|  "abc2azb_aux F = Ф"
+|  "abc2azb_aux G = Г"
+|  "abc2azb_aux H = Х"
+|  "abc2azb_aux I = И"
+|  "abc2azb_aux J = Ј"
+|  "abc2azb_aux K = К"
+|  "abc2azb_aux L = Л"
+|  "abc2azb_aux M = М"
+|  "abc2azb_aux N = Н"
+|  "abc2azb_aux O = О"
+|  "abc2azb_aux P = П"
+|  "abc2azb_aux R = Р"
+|  "abc2azb_aux S = С"
+|  "abc2azb_aux Š = Ш"
+|  "abc2azb_aux T = Т"
+|  "abc2azb_aux U = У"
+|  "abc2azb_aux V = В"
+|  "abc2azb_aux Z = З"
+|  "abc2azb_aux Ž = Ж"
+|  "abc2azb_aux abcSpc = azbSpc"
+
+fun abc2azb :: "abeceda list \<Rightarrow> azbuka list"
+where
+  "abc2azb [] = []"
+| "abc2azb [x] = [abc2azb_aux x]"
+| "abc2azb (x1 # x2 # xs) = 
+       (if x1 = D \<and> x2 = J then
+           Ђ # abc2azb xs
+        else if x1 = L \<and> x2 = J then
+           Љ # abc2azb xs
+        else if x1 = N \<and> x2 = J then
+           Њ # abc2azb xs
+        else if x1 = D \<and> x2 = Ž then
+           Џ # abc2azb xs
+        else
+           abc2azb_aux x1 # abc2azb (x2 # xs)
+       )"
+
+value "abc2azb [D, O, B, A, R, abcSpc, D, A, N, abcSpc, S, V, I, M, A]"
+value "abc2azb [L, J, U, B, I, Č, I, C, A, abcSpc, N, A, abcSpc, P, O, L, J, U]"
+
+text{* Here are some invalid conversions *}
+lemma "abc2azb [N, A, D, Ž, I, V, E, T, I] = [Н, А, Џ, И, В, Е, Т, И]"
+  by simp
+text{* but it should be: НАДЖИВЕТИ *}
+lemma "abc2azb [I, N, J, E, K, C, I, J, A] = [И, Њ, Е, К, Ц, И, Ј, А]"
+  by simp
+text{* but it should be: ИНЈЕКЦИЈА *}
+
+text{* The conversion fails for all cyrrilic words that contain НЈ ЛЈ ДЈ ДЖ *}
+
+
+text{* Idempotency in one direction *}
+lemma [simp]: "azb2abc_aux (abc2azb_aux x) = [x]"
+  by (cases x) auto
+
+lemma [simp]: "abc2azb (Ž # xs) = Ж # abc2azb xs"
+  by (cases xs) auto
+
+lemma [simp]: "abc2azb (J # xs) = Ј # abc2azb xs"
+  by (cases xs) auto
+
+theorem "azb2abc (abc2azb x) = x"
+proof (induct x)
+  case (Cons x1 xs)
+  thus ?case
+  proof (cases xs)
+    case (Cons x2 xss)
+    thus ?thesis
+      using `azb2abc (abc2azb xs) = xs`
+      by auto
+  qed simp
+qed simp
+
+text{* Idempotency in the other direction does not hold *}
+lemma "abc2azb (azb2abc [И, Н, Ј, Е, К, Ц, И, Ј, А]) \<noteq> [И, Н, Ј, Е, К, Ц, И, Ј, А]"
+  by simp
+text{* It fails for all cyrrilic words that contain НЈ ЛЈ ДЈ ДЖ *}
+
+end
--- a/src/HOLCF/Tools/fixrec_package.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOLCF/Tools/fixrec_package.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -9,8 +9,12 @@
   val legacy_infer_term: theory -> term -> term
   val legacy_infer_prop: theory -> term -> term
 
-  val add_fixrec: bool -> (Attrib.binding * string) list list -> theory -> theory
-  val add_fixrec_i: bool -> ((binding * attribute list) * term) list list -> theory -> theory
+  val add_fixrec: bool -> (binding * string option * mixfix) list
+    -> (Attrib.binding * string) list -> local_theory -> local_theory
+
+  val add_fixrec_i: bool -> (binding * typ option * mixfix) list
+    -> (Attrib.binding * term) list -> local_theory -> local_theory
+
   val add_fixpat: Attrib.binding * string list -> theory -> theory
   val add_fixpat_i: (binding * attribute list) * term list -> theory -> theory
   val add_matchers: (string * string) list -> theory -> theory
@@ -166,30 +170,34 @@
 (************* fixed-point definitions and unfolding theorems ************)
 (*************************************************************************)
 
-fun add_fixdefs eqs thy =
+fun add_fixdefs
+  (fixes : ((binding * typ) * mixfix) list)
+  (spec : (Attrib.binding * term) list)
+  (lthy : local_theory) =
   let
-    val (lhss,rhss) = ListPair.unzip (map dest_eqs eqs);
+    val names = map (Binding.base_name o fst o fst) fixes;
+    val all_names = space_implode "_" names;
+    val (lhss,rhss) = ListPair.unzip (map (dest_eqs o snd) spec);
     val fixpoint = mk_fix (lambda_ctuple lhss (mk_ctuple rhss));
     
-    fun one_def (l as Const(n,T)) r =
-          let val b = Sign.base_name n in (b, (b^"_def", l == r)) end
+    fun one_def (l as Free(n,_)) r =
+          let val b = Sign.base_name n
+          in ((Binding.name (b^"_def"), []), r) end
       | one_def _ _ = fixrec_err "fixdefs: lhs not of correct form";
     fun defs [] _ = []
       | defs (l::[]) r = [one_def l r]
       | defs (l::ls) r = one_def l (mk_cfst r) :: defs ls (mk_csnd r);
-    val (names, fixdefs) = ListPair.unzip (defs lhss fixpoint);
-    
-    val (fixdef_thms, thy') =
-      PureThy.add_defs false (map (Thm.no_attributes o apfst Binding.name) fixdefs) thy;
-    val ctuple_fixdef_thm = foldr1 (fn (x,y) => @{thm cpair_equalI} OF [x,y]) fixdef_thms;
-    
-    val ctuple_unfold = mk_trp (mk_ctuple lhss === mk_ctuple rhss);
-    val ctuple_unfold_thm = Goal.prove_global thy' [] [] ctuple_unfold
-          (fn _ => EVERY [rtac (ctuple_fixdef_thm RS fix_eq2 RS trans) 1,
-                    simp_tac (simpset_of thy') 1]);
-    val ctuple_induct_thm =
-          (space_implode "_" names ^ "_induct", ctuple_fixdef_thm RS def_fix_ind);
-    
+    val fixdefs = defs lhss fixpoint;
+    val define_all = fold_map (LocalTheory.define Thm.definitionK);
+    val (fixdef_thms : (term * (string * thm)) list, lthy') = lthy
+      |> define_all (map (apfst fst) fixes ~~ fixdefs);
+    fun cpair_equalI (thm1, thm2) = @{thm cpair_equalI} OF [thm1, thm2];
+    val ctuple_fixdef_thm = foldr1 cpair_equalI (map (snd o snd) fixdef_thms);
+    val ctuple_induct_thm = ctuple_fixdef_thm RS def_fix_ind;
+    val ctuple_unfold_thm =
+      Goal.prove lthy' [] [] (mk_trp (mk_ctuple lhss === mk_ctuple rhss))
+        (fn _ => EVERY [rtac (ctuple_fixdef_thm RS fix_eq2 RS trans) 1,
+                   simp_tac (local_simpset_of lthy') 1]);
     fun unfolds [] thm = []
       | unfolds (n::[]) thm = [(n^"_unfold", thm)]
       | unfolds (n::ns) thm = let
@@ -197,10 +205,12 @@
           val thmR = thm RS @{thm cpair_eqD2};
         in (n^"_unfold", thmL) :: unfolds ns thmR end;
     val unfold_thms = unfolds names ctuple_unfold_thm;
-    val thms = ctuple_induct_thm :: unfold_thms;
-    val (_, thy'') = PureThy.add_thms (map (Thm.no_attributes o apfst Binding.name) thms) thy';
+    fun mk_note (n, thm) = ((Binding.name n, []), [thm]);
+    val (thmss, lthy'') = lthy'
+      |> fold_map (LocalTheory.note Thm.theoremK o mk_note)
+        ((all_names ^ "_induct", ctuple_induct_thm) :: unfold_thms);
   in
-    (thy'', names, fixdef_thms, map snd unfold_thms)
+    (lthy'', names, fixdef_thms, map snd unfold_thms)
   end;
 
 (*************************************************************************)
@@ -218,13 +228,16 @@
 (* associate match functions with pattern constants *)
 fun add_matchers ms = FixrecMatchData.map (fold Symtab.update ms);
 
-fun add_names (Const(a,_), bs) = insert (op =) (Sign.base_name a) bs
-  | add_names (Free(a,_) , bs) = insert (op =) a bs
-  | add_names (f $ u     , bs) = add_names (f, add_names(u, bs))
-  | add_names (Abs(a,_,t), bs) = add_names (t, insert (op =) a bs)
-  | add_names (_         , bs) = bs;
-
-fun add_terms ts xs = foldr add_names xs ts;
+fun taken_names (t : term) : bstring list =
+  let
+    fun taken (Const(a,_), bs) = insert (op =) (Sign.base_name a) bs
+      | taken (Free(a,_) , bs) = insert (op =) a bs
+      | taken (f $ u     , bs) = taken (f, taken (u, bs))
+      | taken (Abs(a,_,t), bs) = taken (t, insert (op =) a bs)
+      | taken (_         , bs) = bs;
+  in
+    taken (t, [])
+  end;
 
 (* builds a monadic term for matching a constructor pattern *)
 fun pre_build match_name pat rhs vs taken =
@@ -257,14 +270,19 @@
   | Const(@{const_name Rep_CFun}, _)$f$x =>
       let val (rhs', v, taken') = pre_build match_name x rhs [] taken;
       in building match_name f rhs' (v::vs) taken' end
-  | Const(name,_) => (pat, length vs, big_lambdas vs rhs)
-  | _ => fixrec_err "function is not declared as constant in theory";
+  | Free(_,_) => ((pat, length vs), big_lambdas vs rhs)
+  | Const(_,_) => ((pat, length vs), big_lambdas vs rhs)
+  | _ => fixrec_err ("function is not declared as constant in theory: "
+                    ^ ML_Syntax.print_term pat);
 
-fun match_eq match_name eq = 
-  let val (lhs,rhs) = dest_eqs eq;
+fun strip_alls t =
+  if Logic.is_all t then strip_alls (snd (Logic.dest_all t)) else t;
+
+fun match_eq match_name eq =
+  let
+    val (lhs,rhs) = dest_eqs (Logic.strip_imp_concl (strip_alls eq));
   in
-    building match_name lhs (mk_return rhs) []
-      (add_terms [eq] [])
+    building match_name lhs (mk_return rhs) [] (taken_names eq)
   end;
 
 (* returns the sum (using +++) of the terms in ms *)
@@ -286,15 +304,11 @@
     reLAM (rev Ts, dest_maybeT U) (mk_run msum)
   end;
 
-fun unzip3 [] = ([],[],[])
-  | unzip3 ((x,y,z)::ts) =
-      let val (xs,ys,zs) = unzip3 ts
-      in (x::xs, y::ys, z::zs) end;
-
 (* this is the pattern-matching compiler function *)
-fun compile_pats match_name eqs = 
+fun compile_pats match_name eqs =
   let
-    val ((n::names),(a::arities),mats) = unzip3 (map (match_eq match_name) eqs);
+    val (((n::names),(a::arities)),mats) =
+      apfst ListPair.unzip (ListPair.unzip (map (match_eq match_name) eqs));
     val cname = if forall (fn x => n=x) names then n
           else fixrec_err "all equations in block must define the same function";
     val arity = if forall (fn x => a=x) arities then a
@@ -309,11 +323,13 @@
 (*************************************************************************)
 
 (* proves a block of pattern matching equations as theorems, using unfold *)
-fun make_simps thy (unfold_thm, eqns) =
+fun make_simps lthy (unfold_thm, eqns : (Attrib.binding * term) list) =
   let
-    val tacs = [rtac (unfold_thm RS @{thm ssubst_lhs}) 1, asm_simp_tac (simpset_of thy) 1];
-    fun prove_term t = Goal.prove_global thy [] [] t (K (EVERY tacs));
-    fun prove_eqn ((name, eqn_t), atts) = ((name, prove_term eqn_t), atts);
+    val tacs =
+      [rtac (unfold_thm RS @{thm ssubst_lhs}) 1,
+       asm_simp_tac (local_simpset_of lthy) 1];
+    fun prove_term t = Goal.prove lthy [] [] t (K (EVERY tacs));
+    fun prove_eqn (bind, eqn_t) = (bind, prove_term eqn_t);
   in
     map prove_eqn eqns
   end;
@@ -322,48 +338,77 @@
 (************************* Main fixrec function **************************)
 (*************************************************************************)
 
-fun gen_add_fixrec prep_prop prep_attrib strict blocks thy =
+local
+(* code adapted from HOL/Tools/primrec_package.ML *)
+
+fun prepare_spec prep_spec ctxt raw_fixes raw_spec =
+  let
+    val ((fixes, spec), _) = prep_spec
+      raw_fixes (map (single o apsnd single) raw_spec) ctxt
+  in (fixes, map (apsnd the_single) spec) end;
+
+fun gen_fixrec
+  (set_group : bool)
+  (prep_spec : (binding * 'a option * mixfix) list ->
+       (Attrib.binding * 'b list) list list ->
+      Proof.context ->
+   (((binding * typ) * mixfix) list * (Attrib.binding * term list) list)
+    * Proof.context
+  )
+  (strict : bool)
+  raw_fixes
+  raw_spec
+  (lthy : local_theory) =
   let
-    val eqns = List.concat blocks;
-    val lengths = map length blocks;
-    
-    val ((bindings, srcss), strings) = apfst split_list (split_list eqns);
-    val names = map Binding.base_name bindings;
-    val atts = map (map (prep_attrib thy)) srcss;
-    val eqn_ts = map (prep_prop thy) strings;
-    val rec_ts = map (fn eq => chead_of (fst (dest_eqs (Logic.strip_imp_concl eq)))
-      handle TERM _ => fixrec_eq_err thy "not a proper equation" eq) eqn_ts;
-    val (_, eqn_ts') = OldPrimrecPackage.unify_consts thy rec_ts eqn_ts;
-    
-    fun unconcat [] _ = []
-      | unconcat (n::ns) xs = List.take (xs,n) :: unconcat ns (List.drop (xs,n));
-    val matcher_tab = FixrecMatchData.get thy;
+    val (fixes : ((binding * typ) * mixfix) list,
+         spec : (Attrib.binding * term) list) =
+          prepare_spec prep_spec lthy raw_fixes raw_spec;
+    val chead_of_spec =
+      chead_of o fst o dest_eqs o Logic.strip_imp_concl o strip_alls o snd;
+    fun name_of (Free (n, _)) = n
+      | name_of t = fixrec_err ("unknown term");
+    val all_names = map (name_of o chead_of_spec) spec;
+    val names = distinct (op =) all_names;
+    fun block_of_name n =
+      map_filter
+        (fn (m,eq) => if m = n then SOME eq else NONE)
+        (all_names ~~ spec);
+    val blocks = map block_of_name names;
+
+    val matcher_tab = FixrecMatchData.get (ProofContext.theory_of lthy);
     fun match_name c =
-          case Symtab.lookup matcher_tab c of SOME m => m
-            | NONE => fixrec_err ("unknown pattern constructor: " ^ c);
+      case Symtab.lookup matcher_tab c of SOME m => m
+        | NONE => fixrec_err ("unknown pattern constructor: " ^ c);
 
-    val pattern_blocks = unconcat lengths (map Logic.strip_imp_concl eqn_ts');
-    val compiled_ts =
-          map (compile_pats match_name) pattern_blocks;
-    val (thy', cnames, fixdef_thms, unfold_thms) = add_fixdefs compiled_ts thy;
+    val matches = map (compile_pats match_name) (map (map snd) blocks);
+    val spec' = map (pair Attrib.empty_binding) matches;
+    val (lthy', cnames, fixdef_thms, unfold_thms) =
+      add_fixdefs fixes spec' lthy;
   in
     if strict then let (* only prove simp rules if strict = true *)
-      val eqn_blocks = unconcat lengths ((names ~~ eqn_ts') ~~ atts);
-      val simps = maps (make_simps thy') (unfold_thms ~~ eqn_blocks);
-      val (simp_thms, thy'') = PureThy.add_thms ((map o apfst o apfst) Binding.name simps) thy';
-      
-      val simp_names = map (fn name => name^"_simps") cnames;
-      val simp_attribute = rpair [Simplifier.simp_add];
-      val simps' = map simp_attribute (simp_names ~~ unconcat lengths simp_thms);
+      val simps : (Attrib.binding * thm) list list =
+        map (make_simps lthy') (unfold_thms ~~ blocks);
+      fun mk_bind n : Attrib.binding =
+       (Binding.name (n ^ "_simps"),
+         [Attrib.internal (K Simplifier.simp_add)]);
+      val simps1 : (Attrib.binding * thm list) list =
+        map (fn (n,xs) => (mk_bind n, map snd xs)) (names ~~ simps);
+      val simps2 : (Attrib.binding * thm list) list =
+        map (apsnd (fn thm => [thm])) (List.concat simps);
+      val (_, lthy'') = lthy'
+        |> fold_map (LocalTheory.note Thm.theoremK) (simps1 @ simps2);
     in
-      (snd o PureThy.add_thmss ((map o apfst o apfst) Binding.name simps')) thy''
+      lthy''
     end
-    else thy'
+    else lthy'
   end;
 
-val add_fixrec = gen_add_fixrec Syntax.read_prop_global Attrib.attribute;
-val add_fixrec_i = gen_add_fixrec Sign.cert_prop (K I);
+in
 
+val add_fixrec_i = gen_fixrec false Specification.check_specification;
+val add_fixrec = gen_fixrec true Specification.read_specification;
+
+end; (* local *)
 
 (*************************************************************************)
 (******************************** Fixpat *********************************)
@@ -399,17 +444,34 @@
 
 local structure P = OuterParse and K = OuterKeyword in
 
-val fixrec_eqn = SpecParse.opt_thm_name ":" -- P.prop;
-
+(* bool parser *)
 val fixrec_strict = P.opt_keyword "permissive" >> not;
 
-val fixrec_decl = fixrec_strict -- P.and_list1 (Scan.repeat1 fixrec_eqn);
+fun pipe_error t = P.!!! (Scan.fail_with (K
+  (cat_lines ["Equations must be separated by " ^ quote "|", quote t])));
+
+(* (Attrib.binding * string) parser *)
+val statement = SpecParse.opt_thm_name ":" -- P.prop --| Scan.ahead
+  ((P.term :-- pipe_error) || Scan.succeed ("",""));
+
+(* ((Attrib.binding * string) list) parser *)
+val statements = P.enum1 "|" statement;
+
+(* (((xstring option * bool) * (Binding.binding * string option * Mixfix.mixfix) list)
+   * (Attrib.binding * string) list) parser *)
+val fixrec_decl =
+  P.opt_target -- fixrec_strict -- P.fixes --| P.$$$ "where" -- statements;
 
 (* this builds a parser for a new keyword, fixrec, whose functionality 
 is defined by add_fixrec *)
 val _ =
-  OuterSyntax.command "fixrec" "define recursive functions (HOLCF)" K.thy_decl
-    (fixrec_decl >> (Toplevel.theory o uncurry add_fixrec));
+  let
+    val desc = "define recursive functions (HOLCF)";
+    fun fixrec (((opt_target, strict), raw_fixes), raw_spec) =
+      Toplevel.local_theory opt_target (add_fixrec strict raw_fixes raw_spec);
+  in
+    OuterSyntax.command "fixrec" desc K.thy_decl (fixrec_decl >> fixrec)
+  end;
 
 (* fixpat parser *)
 val fixpat_decl = SpecParse.opt_thm_name ":" -- Scan.repeat1 P.prop;
@@ -417,7 +479,7 @@
 val _ =
   OuterSyntax.command "fixpat" "define rewrites for fixrec functions" K.thy_decl
     (fixpat_decl >> (Toplevel.theory o add_fixpat));
-
+  
 end; (* local structure *)
 
 val setup = FixrecMatchData.init;
--- a/src/HOLCF/ex/Fixrec_ex.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOLCF/ex/Fixrec_ex.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      HOLCF/ex/Fixrec_ex.thy
-    ID:         $Id$
     Author:     Brian Huffman
 *)
 
@@ -19,18 +18,18 @@
 
 text {* typical usage is with lazy constructors *}
 
-consts down :: "'a u \<rightarrow> 'a"
-fixrec "down\<cdot>(up\<cdot>x) = x"
+fixrec down :: "'a u \<rightarrow> 'a"
+where "down\<cdot>(up\<cdot>x) = x"
 
 text {* with strict constructors, rewrite rules may require side conditions *}
 
-consts from_sinl :: "'a \<oplus> 'b \<rightarrow> 'a"
-fixrec "x \<noteq> \<bottom> \<Longrightarrow> from_sinl\<cdot>(sinl\<cdot>x) = x"
+fixrec from_sinl :: "'a \<oplus> 'b \<rightarrow> 'a"
+where "x \<noteq> \<bottom> \<Longrightarrow> from_sinl\<cdot>(sinl\<cdot>x) = x"
 
 text {* lifting can turn a strict constructor into a lazy one *}
 
-consts from_sinl_up :: "'a u \<oplus> 'b \<rightarrow> 'a"
-fixrec "from_sinl_up\<cdot>(sinl\<cdot>(up\<cdot>x)) = x"
+fixrec from_sinl_up :: "'a u \<oplus> 'b \<rightarrow> 'a"
+where "from_sinl_up\<cdot>(sinl\<cdot>(up\<cdot>x)) = x"
 
 
 subsection {* fixpat examples *}
@@ -41,13 +40,13 @@
 
 text {* zip function for lazy lists *}
 
-consts lzip :: "'a llist \<rightarrow> 'b llist \<rightarrow> ('a \<times> 'b) llist"
-
 text {* notice that the patterns are not exhaustive *}
 
 fixrec
+  lzip :: "'a llist \<rightarrow> 'b llist \<rightarrow> ('a \<times> 'b) llist"
+where
   "lzip\<cdot>(lCons\<cdot>x\<cdot>xs)\<cdot>(lCons\<cdot>y\<cdot>ys) = lCons\<cdot><x,y>\<cdot>(lzip\<cdot>xs\<cdot>ys)"
-  "lzip\<cdot>lNil\<cdot>lNil = lNil"
+| "lzip\<cdot>lNil\<cdot>lNil = lNil"
 
 text {* fixpat is useful for producing strictness theorems *}
 text {* note that pattern matching is done in left-to-right order *}
@@ -68,8 +67,6 @@
 
 text {* another zip function for lazy lists *}
 
-consts lzip2 :: "'a llist \<rightarrow> 'b llist \<rightarrow> ('a \<times> 'b) llist"
-
 text {*
   Notice that this version has overlapping patterns.
   The second equation cannot be proved as a theorem
@@ -77,8 +74,10 @@
 *}
 
 fixrec (permissive)
+  lzip2 :: "'a llist \<rightarrow> 'b llist \<rightarrow> ('a \<times> 'b) llist"
+where
   "lzip2\<cdot>(lCons\<cdot>x\<cdot>xs)\<cdot>(lCons\<cdot>y\<cdot>ys) = lCons\<cdot><x,y>\<cdot>(lzip\<cdot>xs\<cdot>ys)"
-  "lzip2\<cdot>xs\<cdot>ys = lNil"
+| "lzip2\<cdot>xs\<cdot>ys = lNil"
 
 text {*
   Usually fixrec tries to prove all equations as theorems.
@@ -105,21 +104,20 @@
 domain 'a tree = Leaf (lazy 'a) | Branch (lazy "'a forest")
 and    'a forest = Empty | Trees (lazy "'a tree") "'a forest"
 
-consts
-  map_tree :: "('a \<rightarrow> 'b) \<rightarrow> ('a tree \<rightarrow> 'b tree)"
-  map_forest :: "('a \<rightarrow> 'b) \<rightarrow> ('a forest \<rightarrow> 'b forest)"
-
 text {*
   To define mutually recursive functions, separate the equations
   for each function using the keyword "and".
 *}
 
 fixrec
-  "map_tree\<cdot>f\<cdot>(Leaf\<cdot>x) = Leaf\<cdot>(f\<cdot>x)"
-  "map_tree\<cdot>f\<cdot>(Branch\<cdot>ts) = Branch\<cdot>(map_forest\<cdot>f\<cdot>ts)"
+  map_tree :: "('a \<rightarrow> 'b) \<rightarrow> ('a tree \<rightarrow> 'b tree)"
 and
-  "map_forest\<cdot>f\<cdot>Empty = Empty"
-  "ts \<noteq> \<bottom> \<Longrightarrow>
+  map_forest :: "('a \<rightarrow> 'b) \<rightarrow> ('a forest \<rightarrow> 'b forest)"
+where
+  "map_tree\<cdot>f\<cdot>(Leaf\<cdot>x) = Leaf\<cdot>(f\<cdot>x)"
+| "map_tree\<cdot>f\<cdot>(Branch\<cdot>ts) = Branch\<cdot>(map_forest\<cdot>f\<cdot>ts)"
+| "map_forest\<cdot>f\<cdot>Empty = Empty"
+| "ts \<noteq> \<bottom> \<Longrightarrow>
     map_forest\<cdot>f\<cdot>(Trees\<cdot>t\<cdot>ts) = Trees\<cdot>(map_tree\<cdot>f\<cdot>t)\<cdot>(map_forest\<cdot>f\<cdot>ts)"
 
 fixpat map_tree_strict [simp]: "map_tree\<cdot>f\<cdot>\<bottom>"
--- a/src/HOLCF/ex/Powerdomain_ex.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/HOLCF/ex/Powerdomain_ex.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -53,20 +53,20 @@
   Node (lazy "'a tree") (lazy "'a tree") |
   Leaf (lazy "'a")
 
-consts
+fixrec
   mirror :: "'a tree \<rightarrow> 'a tree"
-  pick :: "'a tree \<rightarrow> 'a convex_pd"
-
-fixrec
+where
   mirror_Leaf: "mirror\<cdot>(Leaf\<cdot>a) = Leaf\<cdot>a"
-  mirror_Node: "mirror\<cdot>(Node\<cdot>l\<cdot>r) = Node\<cdot>(mirror\<cdot>r)\<cdot>(mirror\<cdot>l)"
+| mirror_Node: "mirror\<cdot>(Node\<cdot>l\<cdot>r) = Node\<cdot>(mirror\<cdot>r)\<cdot>(mirror\<cdot>l)"
 
 fixpat
   mirror_strict [simp]: "mirror\<cdot>\<bottom>"
 
 fixrec
+  pick :: "'a tree \<rightarrow> 'a convex_pd"
+where
   pick_Leaf: "pick\<cdot>(Leaf\<cdot>a) = {a}\<natural>"
-  pick_Node: "pick\<cdot>(Node\<cdot>l\<cdot>r) = pick\<cdot>l +\<natural> pick\<cdot>r"
+| pick_Node: "pick\<cdot>(Node\<cdot>l\<cdot>r) = pick\<cdot>l +\<natural> pick\<cdot>r"
 
 fixpat
   pick_strict [simp]: "pick\<cdot>\<bottom>"
@@ -75,22 +75,17 @@
 by (induct t rule: tree.ind)
    (simp_all add: convex_plus_ac)
 
-consts
-  tree1 :: "int lift tree"
-  tree2 :: "int lift tree"
-  tree3 :: "int lift tree"
+fixrec tree1 :: "int lift tree"
+where "tree1 = Node\<cdot>(Node\<cdot>(Leaf\<cdot>(Def 1))\<cdot>(Leaf\<cdot>(Def 2)))
+                   \<cdot>(Node\<cdot>(Leaf\<cdot>(Def 3))\<cdot>(Leaf\<cdot>(Def 4)))"
 
-fixrec
-  "tree1 = Node\<cdot>(Node\<cdot>(Leaf\<cdot>(Def 1))\<cdot>(Leaf\<cdot>(Def 2)))
-               \<cdot>(Node\<cdot>(Leaf\<cdot>(Def 3))\<cdot>(Leaf\<cdot>(Def 4)))"
+fixrec tree2 :: "int lift tree"
+where "tree2 = Node\<cdot>(Node\<cdot>(Leaf\<cdot>(Def 1))\<cdot>(Leaf\<cdot>(Def 2)))
+                   \<cdot>(Node\<cdot>\<bottom>\<cdot>(Leaf\<cdot>(Def 4)))"
 
-fixrec
-  "tree2 = Node\<cdot>(Node\<cdot>(Leaf\<cdot>(Def 1))\<cdot>(Leaf\<cdot>(Def 2)))
-               \<cdot>(Node\<cdot>\<bottom>\<cdot>(Leaf\<cdot>(Def 4)))"
-
-fixrec
-  "tree3 = Node\<cdot>(Node\<cdot>(Leaf\<cdot>(Def 1))\<cdot>tree3)
-               \<cdot>(Node\<cdot>(Leaf\<cdot>(Def 3))\<cdot>(Leaf\<cdot>(Def 4)))"
+fixrec tree3 :: "int lift tree"
+where "tree3 = Node\<cdot>(Node\<cdot>(Leaf\<cdot>(Def 1))\<cdot>tree3)
+                   \<cdot>(Node\<cdot>(Leaf\<cdot>(Def 3))\<cdot>(Leaf\<cdot>(Def 4)))"
 
 declare tree1_simps tree2_simps tree3_simps [simp del]
 
--- a/src/Provers/README	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Provers/README	Mon Mar 02 08:15:54 2009 +0100
@@ -2,19 +2,13 @@
 
 This directory contains ML sources of generic theorem proving tools.
 Typically, they can be applied to various logics, provided rules of a
-certain form are derivable.  Some of these are documented in the
-Reference Manual.
+certain form are derivable.
 
   blast.ML              generic tableau prover with proof reconstruction
   clasimp.ML		combination of classical reasoner and simplifier
   classical.ML          theorem prover for classical logics
   hypsubst.ML           tactic to substitute in the hypotheses
-  ind.ML                a simple induction package
-  induct_method.ML      proof by cases and induction on sets and types (Isar)
-  linorder.ML		transitivity reasoner for linear (total) orders
   quantifier1.ML	simplification procedures for "1 point rules"
-  simp.ML               powerful but slow simplifier
-  split_paired_all.ML	turn surjective pairing into split rule
   splitter.ML           performs case splits for simplifier
   typedsimp.ML          basic simplifier for explicitly typed logics
 
--- a/src/Provers/blast.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Provers/blast.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -913,7 +913,7 @@
 
 fun printStats (State {ntried, nclosed, ...}) (b, start, tacs) =
   if b then
-    writeln (end_timing start ^ " for search.  Closed: "
+    writeln (#message (end_timing start) ^ " for search.  Closed: "
              ^ Int.toString (!nclosed) ^
              " tried: " ^ Int.toString (!ntried) ^
              " tactics: " ^ Int.toString (length tacs))
@@ -1264,7 +1264,7 @@
                        else ();
                        backtrack choices)
             | cell => (if (!trace orelse !stats)
-                       then writeln (end_timing start ^ " for reconstruction")
+                       then writeln (#message (end_timing start) ^ " for reconstruction")
                        else ();
                        Seq.make(fn()=> cell))
           end
--- a/src/Provers/coherent.ML	Mon Mar 02 08:15:32 2009 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,233 +0,0 @@
-(*  Title:      Provers/coherent.ML
-    Author:     Stefan Berghofer, TU Muenchen
-    Author:     Marc Bezem, Institutt for Informatikk, Universitetet i Bergen 
-
-Prover for coherent logic, see e.g.
-
-  Marc Bezem and Thierry Coquand, Automating Coherent Logic, LPAR 2005
-
-for a description of the algorithm.
-*)
-
-signature COHERENT_DATA =
-sig
-  val atomize_elimL: thm
-  val atomize_exL: thm
-  val atomize_conjL: thm
-  val atomize_disjL: thm
-  val operator_names: string list
-end;
-
-signature COHERENT =
-sig
-  val verbose: bool ref
-  val show_facts: bool ref
-  val coherent_tac: thm list -> Proof.context -> int -> tactic
-  val coherent_meth: thm list -> Proof.context -> Proof.method
-  val setup: theory -> theory
-end;
-
-functor CoherentFun(Data: COHERENT_DATA) : COHERENT =
-struct
-
-val verbose = ref false;
-
-fun message f = if !verbose then tracing (f ()) else ();
-
-datatype cl_prf =
-  ClPrf of thm * (Type.tyenv * Envir.tenv) * ((indexname * typ) * term) list *
-  int list * (term list * cl_prf) list;
-
-val is_atomic = not o exists_Const (member (op =) Data.operator_names o #1);
-
-local open Conv in
-
-fun rulify_elim_conv ct =
-  if is_atomic (Logic.strip_imp_concl (term_of ct)) then all_conv ct
-  else concl_conv (length (Logic.strip_imp_prems (term_of ct)))
-    (rewr_conv (symmetric Data.atomize_elimL) then_conv
-     MetaSimplifier.rewrite true (map symmetric
-       [Data.atomize_exL, Data.atomize_conjL, Data.atomize_disjL])) ct
-
-end;
-
-fun rulify_elim th = MetaSimplifier.norm_hhf (Conv.fconv_rule rulify_elim_conv th);
-
-(* Decompose elimination rule of the form
-   A1 ==> ... ==> Am ==> (!!xs1. Bs1 ==> P) ==> ... ==> (!!xsn. Bsn ==> P) ==> P
-*)
-fun dest_elim prop =
-  let
-    val prems = Logic.strip_imp_prems prop;
-    val concl = Logic.strip_imp_concl prop;
-    val (prems1, prems2) =
-      take_suffix (fn t => Logic.strip_assums_concl t = concl) prems;
-  in
-    (prems1,
-     if null prems2 then [([], [concl])]
-     else map (fn t =>
-       (map snd (Logic.strip_params t), Logic.strip_assums_hyp t)) prems2)
-  end;
-
-fun mk_rule th =
-  let
-    val th' = rulify_elim th;
-    val (prems, cases) = dest_elim (prop_of th')
-  in (th', prems, cases) end;
-
-fun mk_dom ts = fold (fn t =>
-  Typtab.map_default (fastype_of t, []) (fn us => us @ [t])) ts Typtab.empty;
-
-val empty_env = (Vartab.empty, Vartab.empty);
-
-(* Find matcher that makes conjunction valid in given state *)
-fun valid_conj ctxt facts env [] = Seq.single (env, [])
-  | valid_conj ctxt facts env (t :: ts) =
-      Seq.maps (fn (u, x) => Seq.map (apsnd (cons x))
-        (valid_conj ctxt facts
-           (Pattern.match (ProofContext.theory_of ctxt) (t, u) env) ts
-         handle Pattern.MATCH => Seq.empty))
-          (Seq.of_list (sort (int_ord o pairself snd) (Net.unify_term facts t)));
-
-(* Instantiate variables that only occur free in conlusion *)
-fun inst_extra_vars ctxt dom cs =
-  let
-    val vs = fold Term.add_vars (maps snd cs) [];
-    fun insts [] inst = Seq.single inst
-      | insts ((ixn, T) :: vs') inst = Seq.maps
-          (fn t => insts vs' (((ixn, T), t) :: inst))
-          (Seq.of_list (case Typtab.lookup dom T of
-             NONE => error ("Unknown domain: " ^
-               Syntax.string_of_typ ctxt T ^ "\nfor term(s) " ^
-               commas (maps (map (Syntax.string_of_term ctxt) o snd) cs))
-           | SOME ts => ts))
-  in Seq.map (fn inst =>
-    (inst, map (apsnd (map (subst_Vars (map (apfst fst) inst)))) cs))
-      (insts vs [])
-  end;
-
-(* Check whether disjunction is valid in given state *)
-fun is_valid_disj ctxt facts [] = false
-  | is_valid_disj ctxt facts ((Ts, ts) :: ds) =
-      let val vs = rev (map_index (fn (i, T) => Var (("x", i), T)) Ts)
-      in case Seq.pull (valid_conj ctxt facts empty_env
-        (map (fn t => subst_bounds (vs, t)) ts)) of
-          SOME _ => true
-        | NONE => is_valid_disj ctxt facts ds
-      end;
-
-val show_facts = ref false;
-
-fun string_of_facts ctxt s facts = space_implode "\n"
-  (s :: map (Syntax.string_of_term ctxt)
-     (map fst (sort (int_ord o pairself snd) (Net.content facts)))) ^ "\n\n";
-
-fun print_facts ctxt facts =
-  if !show_facts then message (fn () => string_of_facts ctxt "Facts:" facts)
-  else ();
-
-fun valid ctxt rules goal dom facts nfacts nparams =
-  let val seq = Seq.of_list rules |> Seq.maps (fn (th, ps, cs) =>
-    valid_conj ctxt facts empty_env ps |> Seq.maps (fn (env as (tye, _), is) =>
-      let val cs' = map (fn (Ts, ts) =>
-        (map (Envir.typ_subst_TVars tye) Ts, map (Envir.subst_vars env) ts)) cs
-      in
-        inst_extra_vars ctxt dom cs' |>
-          Seq.map_filter (fn (inst, cs'') =>
-            if is_valid_disj ctxt facts cs'' then NONE
-            else SOME (th, env, inst, is, cs''))
-      end))
-  in
-    case Seq.pull seq of
-      NONE => (tracing (string_of_facts ctxt "Countermodel found:" facts); NONE)
-    | SOME ((th, env, inst, is, cs), _) =>
-        if cs = [([], [goal])] then SOME (ClPrf (th, env, inst, is, []))
-        else
-          (case valid_cases ctxt rules goal dom facts nfacts nparams cs of
-             NONE => NONE
-           | SOME prfs => SOME (ClPrf (th, env, inst, is, prfs)))
-  end
-
-and valid_cases ctxt rules goal dom facts nfacts nparams [] = SOME []
-  | valid_cases ctxt rules goal dom facts nfacts nparams ((Ts, ts) :: ds) =
-      let
-        val _ = message (fn () => "case " ^ commas (map (Syntax.string_of_term ctxt) ts));
-        val params = rev (map_index (fn (i, T) =>
-          Free ("par" ^ string_of_int (nparams + i), T)) Ts);
-        val ts' = map_index (fn (i, t) =>
-          (subst_bounds (params, t), nfacts + i)) ts;
-        val dom' = fold (fn (T, p) =>
-          Typtab.map_default (T, []) (fn ps => ps @ [p]))
-            (Ts ~~ params) dom;
-        val facts' = fold (fn (t, i) => Net.insert_term op =
-          (t, (t, i))) ts' facts
-      in
-        case valid ctxt rules goal dom' facts'
-          (nfacts + length ts) (nparams + length Ts) of
-          NONE => NONE
-        | SOME prf => (case valid_cases ctxt rules goal dom facts nfacts nparams ds of
-            NONE => NONE
-          | SOME prfs => SOME ((params, prf) :: prfs))
-      end;
-
-(** proof replaying **)
-
-fun thm_of_cl_prf thy goal asms (ClPrf (th, (tye, env), insts, is, prfs)) =
-  let
-    val _ = message (fn () => space_implode "\n"
-      ("asms:" :: map Display.string_of_thm asms) ^ "\n\n");
-    val th' = Drule.implies_elim_list
-      (Thm.instantiate
-         (map (fn (ixn, (S, T)) =>
-            (Thm.ctyp_of thy (TVar ((ixn, S))), Thm.ctyp_of thy T))
-               (Vartab.dest tye),
-          map (fn (ixn, (T, t)) =>
-            (Thm.cterm_of thy (Var (ixn, Envir.typ_subst_TVars tye T)),
-             Thm.cterm_of thy t)) (Vartab.dest env) @
-          map (fn (ixnT, t) =>
-            (Thm.cterm_of thy (Var ixnT), Thm.cterm_of thy t)) insts) th)
-      (map (nth asms) is);
-    val (_, cases) = dest_elim (prop_of th')
-  in
-    case (cases, prfs) of
-      ([([], [_])], []) => th'
-    | ([([], [_])], [([], prf)]) => thm_of_cl_prf thy goal (asms @ [th']) prf
-    | _ => Drule.implies_elim_list
-        (Thm.instantiate (Thm.match
-           (Drule.strip_imp_concl (cprop_of th'), goal)) th')
-        (map (thm_of_case_prf thy goal asms) (prfs ~~ cases))
-  end
-
-and thm_of_case_prf thy goal asms ((params, prf), (_, asms')) =
-  let
-    val cparams = map (cterm_of thy) params;
-    val asms'' = map (cterm_of thy o curry subst_bounds (rev params)) asms'
-  in
-    Drule.forall_intr_list cparams (Drule.implies_intr_list asms''
-      (thm_of_cl_prf thy goal (asms @ map Thm.assume asms'') prf))
-  end;
-
-
-(** external interface **)
-
-fun coherent_tac rules ctxt = SUBPROOF (fn {prems, concl, params, context, ...} =>
-  rtac (rulify_elim_conv concl RS equal_elim_rule2) 1 THEN
-  SUBPROOF (fn {prems = prems', concl, context, ...} =>
-    let val xs = map term_of params @
-      map (fn (_, s) => Free (s, the (Variable.default_type context s)))
-        (Variable.fixes_of context)
-    in
-      case valid context (map mk_rule (prems' @ prems @ rules)) (term_of concl)
-           (mk_dom xs) Net.empty 0 0 of
-         NONE => no_tac
-       | SOME prf =>
-           rtac (thm_of_cl_prf (ProofContext.theory_of context) concl [] prf) 1
-    end) context 1) ctxt;
-
-fun coherent_meth rules ctxt =
-  Method.METHOD (fn facts => coherent_tac (facts @ rules) ctxt 1);
-
-val setup = Method.add_method
-  ("coherent", Method.thms_ctxt_args coherent_meth, "Prove coherent formula");
-
-end;
--- a/src/Provers/eqsubst.ML	Mon Mar 02 08:15:32 2009 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,575 +0,0 @@
-(*  Title:      Provers/eqsubst.ML
-    Author:     Lucas Dixon, University of Edinburgh
-
-A proof method to perform a substiution using an equation.
-*)
-
-signature EQSUBST =
-sig
-  (* a type abbreviation for match information *)
-  type match =
-       ((indexname * (sort * typ)) list (* type instantiations *)
-        * (indexname * (typ * term)) list) (* term instantiations *)
-       * (string * typ) list (* fake named type abs env *)
-       * (string * typ) list (* type abs env *)
-       * term (* outer term *)
-
-  type searchinfo =
-       theory
-       * int (* maxidx *)
-       * Zipper.T (* focusterm to search under *)
-
-    exception eqsubst_occL_exp of
-       string * int list * Thm.thm list * int * Thm.thm
-    
-    (* low level substitution functions *)
-    val apply_subst_in_asm :
-       int ->
-       Thm.thm ->
-       Thm.thm ->
-       (Thm.cterm list * int * 'a * Thm.thm) * match -> Thm.thm Seq.seq
-    val apply_subst_in_concl :
-       int ->
-       Thm.thm ->
-       Thm.cterm list * Thm.thm ->
-       Thm.thm -> match -> Thm.thm Seq.seq
-
-    (* matching/unification within zippers *)
-    val clean_match_z :
-       Context.theory -> Term.term -> Zipper.T -> match option
-    val clean_unify_z :
-       Context.theory -> int -> Term.term -> Zipper.T -> match Seq.seq
-
-    (* skipping things in seq seq's *)
-
-   (* skipping non-empty sub-sequences but when we reach the end
-      of the seq, remembering how much we have left to skip. *)
-    datatype 'a skipseq = SkipMore of int
-      | SkipSeq of 'a Seq.seq Seq.seq;
-
-    val skip_first_asm_occs_search :
-       ('a -> 'b -> 'c Seq.seq Seq.seq) ->
-       'a -> int -> 'b -> 'c skipseq
-    val skip_first_occs_search :
-       int -> ('a -> 'b -> 'c Seq.seq Seq.seq) -> 'a -> 'b -> 'c Seq.seq
-    val skipto_skipseq : int -> 'a Seq.seq Seq.seq -> 'a skipseq
-
-    (* tactics *)
-    val eqsubst_asm_tac :
-       Proof.context ->
-       int list -> Thm.thm list -> int -> Thm.thm -> Thm.thm Seq.seq
-    val eqsubst_asm_tac' :
-       Proof.context ->
-       (searchinfo -> int -> Term.term -> match skipseq) ->
-       int -> Thm.thm -> int -> Thm.thm -> Thm.thm Seq.seq
-    val eqsubst_tac :
-       Proof.context ->
-       int list -> (* list of occurences to rewrite, use [0] for any *)
-       Thm.thm list -> int -> Thm.thm -> Thm.thm Seq.seq
-    val eqsubst_tac' :
-       Proof.context -> (* proof context *)
-       (searchinfo -> Term.term -> match Seq.seq) (* search function *)
-       -> Thm.thm (* equation theorem to rewrite with *)
-       -> int (* subgoal number in goal theorem *)
-       -> Thm.thm (* goal theorem *)
-       -> Thm.thm Seq.seq (* rewritten goal theorem *)
-
-
-    val fakefree_badbounds :
-       (string * Term.typ) list ->
-       Term.term ->
-       (string * Term.typ) list * (string * Term.typ) list * Term.term
-
-    val mk_foo_match :
-       (Term.term -> Term.term) ->
-       ('a * Term.typ) list -> Term.term -> Term.term
-
-    (* preparing substitution *)
-    val prep_meta_eq : Proof.context -> Thm.thm -> Thm.thm list
-    val prep_concl_subst :
-       int -> Thm.thm -> (Thm.cterm list * Thm.thm) * searchinfo
-    val prep_subst_in_asm :
-       int -> Thm.thm -> int ->
-       (Thm.cterm list * int * int * Thm.thm) * searchinfo
-    val prep_subst_in_asms :
-       int -> Thm.thm ->
-       ((Thm.cterm list * int * int * Thm.thm) * searchinfo) list
-    val prep_zipper_match :
-       Zipper.T -> Term.term * ((string * Term.typ) list * (string * Term.typ) list * Term.term)
-
-    (* search for substitutions *)
-    val valid_match_start : Zipper.T -> bool
-    val search_lr_all : Zipper.T -> Zipper.T Seq.seq
-    val search_lr_valid : (Zipper.T -> bool) -> Zipper.T -> Zipper.T Seq.seq
-    val searchf_lr_unify_all :
-       searchinfo -> Term.term -> match Seq.seq Seq.seq
-    val searchf_lr_unify_valid :
-       searchinfo -> Term.term -> match Seq.seq Seq.seq
-    val searchf_bt_unify_valid :
-       searchinfo -> Term.term -> match Seq.seq Seq.seq
-
-    (* syntax tools *)
-    val ith_syntax : Args.T list -> int list * Args.T list
-    val options_syntax : Args.T list -> bool * Args.T list
-
-    (* Isar level hooks *)
-    val eqsubst_asm_meth : Proof.context -> int list -> Thm.thm list -> Proof.method
-    val eqsubst_meth : Proof.context -> int list -> Thm.thm list -> Proof.method
-    val subst_meth : Method.src -> Proof.context -> Proof.method
-    val setup : theory -> theory
-
-end;
-
-structure EqSubst
-: EQSUBST
-= struct
-
-structure Z = Zipper;
-
-(* changes object "=" to meta "==" which prepares a given rewrite rule *)
-fun prep_meta_eq ctxt =
-  let val (_, {mk_rews = {mk, ...}, ...}) = Simplifier.rep_ss (Simplifier.local_simpset_of ctxt)
-  in mk #> map Drule.zero_var_indexes end;
-
-
-  (* a type abriviation for match information *)
-  type match =
-       ((indexname * (sort * typ)) list (* type instantiations *)
-        * (indexname * (typ * term)) list) (* term instantiations *)
-       * (string * typ) list (* fake named type abs env *)
-       * (string * typ) list (* type abs env *)
-       * term (* outer term *)
-
-  type searchinfo =
-       theory
-       * int (* maxidx *)
-       * Zipper.T (* focusterm to search under *)
-
-
-(* skipping non-empty sub-sequences but when we reach the end
-   of the seq, remembering how much we have left to skip. *)
-datatype 'a skipseq = SkipMore of int
-  | SkipSeq of 'a Seq.seq Seq.seq;
-(* given a seqseq, skip the first m non-empty seq's, note deficit *)
-fun skipto_skipseq m s = 
-    let 
-      fun skip_occs n sq = 
-          case Seq.pull sq of 
-            NONE => SkipMore n
-          | SOME (h,t) => 
-            (case Seq.pull h of NONE => skip_occs n t
-             | SOME _ => if n <= 1 then SkipSeq (Seq.cons h t)
-                         else skip_occs (n - 1) t)
-    in (skip_occs m s) end;
-
-(* note: outerterm is the taget with the match replaced by a bound 
-         variable : ie: "P lhs" beocmes "%x. P x" 
-         insts is the types of instantiations of vars in lhs
-         and typinsts is the type instantiations of types in the lhs
-         Note: Final rule is the rule lifted into the ontext of the 
-         taget thm. *)
-fun mk_foo_match mkuptermfunc Ts t = 
-    let 
-      val ty = Term.type_of t
-      val bigtype = (rev (map snd Ts)) ---> ty
-      fun mk_foo 0 t = t
-        | mk_foo i t = mk_foo (i - 1) (t $ (Bound (i - 1)))
-      val num_of_bnds = (length Ts)
-      (* foo_term = "fooabs y0 ... yn" where y's are local bounds *)
-      val foo_term = mk_foo num_of_bnds (Bound num_of_bnds)
-    in Abs("fooabs", bigtype, mkuptermfunc foo_term) end;
-
-(* T is outer bound vars, n is number of locally bound vars *)
-(* THINK: is order of Ts correct...? or reversed? *)
-fun fakefree_badbounds Ts t = 
-    let val (FakeTs,Ts,newnames) = 
-            List.foldr (fn ((n,ty),(FakeTs,Ts,usednames)) => 
-                           let val newname = Name.variant usednames n
-                           in ((RWTools.mk_fake_bound_name newname,ty)::FakeTs,
-                               (newname,ty)::Ts, 
-                               newname::usednames) end)
-                       ([],[],[])
-                       Ts
-    in (FakeTs, Ts, Term.subst_bounds (map Free FakeTs, t)) end;
-
-(* before matching we need to fake the bound vars that are missing an
-abstraction. In this function we additionally construct the
-abstraction environment, and an outer context term (with the focus
-abstracted out) for use in rewriting with RWInst.rw *)
-fun prep_zipper_match z = 
-    let 
-      val t = Z.trm z  
-      val c = Z.ctxt z
-      val Ts = Z.C.nty_ctxt c
-      val (FakeTs', Ts', t') = fakefree_badbounds Ts t
-      val absterm = mk_foo_match (Z.C.apply c) Ts' t'
-    in
-      (t', (FakeTs', Ts', absterm))
-    end;
-
-(* Matching and Unification with exception handled *)
-fun clean_match thy (a as (pat, t)) =
-  let val (tyenv, tenv) = Pattern.match thy a (Vartab.empty, Vartab.empty)
-  in SOME (Vartab.dest tyenv, Vartab.dest tenv)
-  end handle Pattern.MATCH => NONE;
-
-(* given theory, max var index, pat, tgt; returns Seq of instantiations *)
-fun clean_unify thry ix (a as (pat, tgt)) =
-    let
-      (* type info will be re-derived, maybe this can be cached
-         for efficiency? *)
-      val pat_ty = Term.type_of pat;
-      val tgt_ty = Term.type_of tgt;
-      (* is it OK to ignore the type instantiation info?
-         or should I be using it? *)
-      val typs_unify =
-          SOME (Sign.typ_unify thry (pat_ty, tgt_ty) (Vartab.empty, ix))
-            handle Type.TUNIFY => NONE;
-    in
-      case typs_unify of
-        SOME (typinsttab, ix2) =>
-        let
-      (* is it right to throw away the flexes?
-         or should I be using them somehow? *)
-          fun mk_insts env =
-            (Vartab.dest (Envir.type_env env),
-             Envir.alist_of env);
-          val initenv = Envir.Envir {asol = Vartab.empty,
-                                     iTs = typinsttab, maxidx = ix2};
-          val useq = Unify.smash_unifiers thry [a] initenv
-	            handle UnequalLengths => Seq.empty
-		               | Term.TERM _ => Seq.empty;
-          fun clean_unify' useq () =
-              (case (Seq.pull useq) of
-                 NONE => NONE
-               | SOME (h,t) => SOME (mk_insts h, Seq.make (clean_unify' t)))
-	            handle UnequalLengths => NONE
-                   | Term.TERM _ => NONE
-        in
-          (Seq.make (clean_unify' useq))
-        end
-      | NONE => Seq.empty
-    end;
-
-(* Matching and Unification for zippers *)
-(* Note: Ts is a modified version of the original names of the outer
-bound variables. New names have been introduced to make sure they are
-unique w.r.t all names in the term and each other. usednames' is
-oldnames + new names. *)
-fun clean_match_z thy pat z = 
-    let val (t, (FakeTs,Ts,absterm)) = prep_zipper_match z in
-      case clean_match thy (pat, t) of 
-        NONE => NONE 
-      | SOME insts => SOME (insts, FakeTs, Ts, absterm) end;
-(* ix = max var index *)
-fun clean_unify_z sgn ix pat z = 
-    let val (t, (FakeTs, Ts,absterm)) = prep_zipper_match z in
-    Seq.map (fn insts => (insts, FakeTs, Ts, absterm)) 
-            (clean_unify sgn ix (t, pat)) end;
-
-
-(* FOR DEBUGGING...
-type trace_subst_errT = int (* subgoal *)
-        * thm (* thm with all goals *)
-        * (Thm.cterm list (* certified free var placeholders for vars *)
-           * thm)  (* trivial thm of goal concl *)
-            (* possible matches/unifiers *)
-        * thm (* rule *)
-        * (((indexname * typ) list (* type instantiations *)
-              * (indexname * term) list ) (* term instantiations *)
-             * (string * typ) list (* Type abs env *)
-             * term) (* outer term *);
-
-val trace_subst_err = (ref NONE : trace_subst_errT option ref);
-val trace_subst_search = ref false;
-exception trace_subst_exp of trace_subst_errT;
-*)
-
-
-fun bot_left_leaf_of (l $ r) = bot_left_leaf_of l
-  | bot_left_leaf_of (Abs(s,ty,t)) = bot_left_leaf_of t
-  | bot_left_leaf_of x = x;
-
-(* Avoid considering replacing terms which have a var at the head as
-   they always succeed trivially, and uninterestingly. *)
-fun valid_match_start z =
-    (case bot_left_leaf_of (Z.trm z) of 
-      Var _ => false 
-      | _ => true);
-
-(* search from top, left to right, then down *)
-val search_lr_all = ZipperSearch.all_bl_ur;
-
-(* search from top, left to right, then down *)
-fun search_lr_valid validf =
-    let 
-      fun sf_valid_td_lr z = 
-          let val here = if validf z then [Z.Here z] else [] in
-            case Z.trm z 
-             of _ $ _ => [Z.LookIn (Z.move_down_left z)] 
-                         @ here 
-                         @ [Z.LookIn (Z.move_down_right z)]
-              | Abs _ => here @ [Z.LookIn (Z.move_down_abs z)]
-              | _ => here
-          end;
-    in Z.lzy_search sf_valid_td_lr end;
-
-(* search from bottom to top, left to right *)
-
-fun search_bt_valid validf =
-    let 
-      fun sf_valid_td_lr z = 
-          let val here = if validf z then [Z.Here z] else [] in
-            case Z.trm z 
-             of _ $ _ => [Z.LookIn (Z.move_down_left z), 
-                          Z.LookIn (Z.move_down_right z)] @ here
-              | Abs _ => [Z.LookIn (Z.move_down_abs z)] @ here
-              | _ => here
-          end;
-    in Z.lzy_search sf_valid_td_lr end;
-
-fun searchf_unify_gen f (sgn, maxidx, z) lhs =
-    Seq.map (clean_unify_z sgn maxidx lhs) 
-            (Z.limit_apply f z);
-
-(* search all unifications *)
-val searchf_lr_unify_all =
-    searchf_unify_gen search_lr_all;
-
-(* search only for 'valid' unifiers (non abs subterms and non vars) *)
-val searchf_lr_unify_valid = 
-    searchf_unify_gen (search_lr_valid valid_match_start);
-
-val searchf_bt_unify_valid =
-    searchf_unify_gen (search_bt_valid valid_match_start);
-
-(* apply a substitution in the conclusion of the theorem th *)
-(* cfvs are certified free var placeholders for goal params *)
-(* conclthm is a theorem of for just the conclusion *)
-(* m is instantiation/match information *)
-(* rule is the equation for substitution *)
-fun apply_subst_in_concl i th (cfvs, conclthm) rule m =
-    (RWInst.rw m rule conclthm)
-      |> IsaND.unfix_frees cfvs
-      |> RWInst.beta_eta_contract
-      |> (fn r => Tactic.rtac r i th);
-
-(* substitute within the conclusion of goal i of gth, using a meta
-equation rule. Note that we assume rule has var indicies zero'd *)
-fun prep_concl_subst i gth =
-    let
-      val th = Thm.incr_indexes 1 gth;
-      val tgt_term = Thm.prop_of th;
-
-      val sgn = Thm.theory_of_thm th;
-      val ctermify = Thm.cterm_of sgn;
-      val trivify = Thm.trivial o ctermify;
-
-      val (fixedbody, fvs) = IsaND.fix_alls_term i tgt_term;
-      val cfvs = rev (map ctermify fvs);
-
-      val conclterm = Logic.strip_imp_concl fixedbody;
-      val conclthm = trivify conclterm;
-      val maxidx = Thm.maxidx_of th;
-      val ft = ((Z.move_down_right (* ==> *)
-                 o Z.move_down_left (* Trueprop *)
-                 o Z.mktop
-                 o Thm.prop_of) conclthm)
-    in
-      ((cfvs, conclthm), (sgn, maxidx, ft))
-    end;
-
-(* substitute using an object or meta level equality *)
-fun eqsubst_tac' ctxt searchf instepthm i th =
-    let
-      val (cvfsconclthm, searchinfo) = prep_concl_subst i th;
-      val stepthms = Seq.of_list (prep_meta_eq ctxt instepthm);
-      fun rewrite_with_thm r =
-          let val (lhs,_) = Logic.dest_equals (Thm.concl_of r);
-          in searchf searchinfo lhs
-             |> Seq.maps (apply_subst_in_concl i th cvfsconclthm r) end;
-    in stepthms |> Seq.maps rewrite_with_thm end;
-
-
-(* distinct subgoals *)
-fun distinct_subgoals th =
-  the_default th (SINGLE distinct_subgoals_tac th);
-
-(* General substitution of multiple occurances using one of
-   the given theorems*)
-
-
-exception eqsubst_occL_exp of
-          string * (int list) * (thm list) * int * thm;
-fun skip_first_occs_search occ srchf sinfo lhs =
-    case (skipto_skipseq occ (srchf sinfo lhs)) of
-      SkipMore _ => Seq.empty
-    | SkipSeq ss => Seq.flat ss;
-
-(* The occL is a list of integers indicating which occurence
-w.r.t. the search order, to rewrite. Backtracking will also find later
-occurences, but all earlier ones are skipped. Thus you can use [0] to
-just find all rewrites. *)
-
-fun eqsubst_tac ctxt occL thms i th =
-    let val nprems = Thm.nprems_of th in
-      if nprems < i then Seq.empty else
-      let val thmseq = (Seq.of_list thms)
-        fun apply_occ occ th =
-            thmseq |> Seq.maps
-                    (fn r => eqsubst_tac' 
-                               ctxt 
-                               (skip_first_occs_search
-                                  occ searchf_lr_unify_valid) r
-                                 (i + ((Thm.nprems_of th) - nprems))
-                                 th);
-        val sortedoccL =
-            Library.sort (Library.rev_order o Library.int_ord) occL;
-      in
-        Seq.map distinct_subgoals (Seq.EVERY (map apply_occ sortedoccL) th)
-      end
-    end
-    handle THM _ => raise eqsubst_occL_exp ("THM",occL,thms,i,th);
-
-
-(* inthms are the given arguments in Isar, and treated as eqstep with
-   the first one, then the second etc *)
-fun eqsubst_meth ctxt occL inthms =
-    Method.SIMPLE_METHOD' (eqsubst_tac ctxt occL inthms);
-
-(* apply a substitution inside assumption j, keeps asm in the same place *)
-fun apply_subst_in_asm i th rule ((cfvs, j, ngoalprems, pth),m) =
-    let
-      val th2 = Thm.rotate_rule (j - 1) i th; (* put premice first *)
-      val preelimrule =
-          (RWInst.rw m rule pth)
-            |> (Seq.hd o prune_params_tac)
-            |> Thm.permute_prems 0 ~1 (* put old asm first *)
-            |> IsaND.unfix_frees cfvs (* unfix any global params *)
-            |> RWInst.beta_eta_contract; (* normal form *)
-  (*    val elimrule =
-          preelimrule
-            |> Tactic.make_elim (* make into elim rule *)
-            |> Thm.lift_rule (th2, i); (* lift into context *)
-   *)
-    in
-      (* ~j because new asm starts at back, thus we subtract 1 *)
-      Seq.map (Thm.rotate_rule (~j) ((Thm.nprems_of rule) + i))
-      (Tactic.dtac preelimrule i th2)
-
-      (* (Thm.bicompose
-                 false (* use unification *)
-                 (true, (* elim resolution *)
-                  elimrule, (2 + (Thm.nprems_of rule)) - ngoalprems)
-                 i th2) *)
-    end;
-
-
-(* prepare to substitute within the j'th premise of subgoal i of gth,
-using a meta-level equation. Note that we assume rule has var indicies
-zero'd. Note that we also assume that premt is the j'th premice of
-subgoal i of gth. Note the repetition of work done for each
-assumption, i.e. this can be made more efficient for search over
-multiple assumptions.  *)
-fun prep_subst_in_asm i gth j =
-    let
-      val th = Thm.incr_indexes 1 gth;
-      val tgt_term = Thm.prop_of th;
-
-      val sgn = Thm.theory_of_thm th;
-      val ctermify = Thm.cterm_of sgn;
-      val trivify = Thm.trivial o ctermify;
-
-      val (fixedbody, fvs) = IsaND.fix_alls_term i tgt_term;
-      val cfvs = rev (map ctermify fvs);
-
-      val asmt = nth (Logic.strip_imp_prems fixedbody) (j - 1);
-      val asm_nprems = length (Logic.strip_imp_prems asmt);
-
-      val pth = trivify asmt;
-      val maxidx = Thm.maxidx_of th;
-
-      val ft = ((Z.move_down_right (* trueprop *)
-                 o Z.mktop
-                 o Thm.prop_of) pth)
-    in ((cfvs, j, asm_nprems, pth), (sgn, maxidx, ft)) end;
-
-(* prepare subst in every possible assumption *)
-fun prep_subst_in_asms i gth =
-    map (prep_subst_in_asm i gth)
-        ((fn l => Library.upto (1, length l))
-           (Logic.prems_of_goal (Thm.prop_of gth) i));
-
-
-(* substitute in an assumption using an object or meta level equality *)
-fun eqsubst_asm_tac' ctxt searchf skipocc instepthm i th =
-    let
-      val asmpreps = prep_subst_in_asms i th;
-      val stepthms = Seq.of_list (prep_meta_eq ctxt instepthm);
-      fun rewrite_with_thm r =
-          let val (lhs,_) = Logic.dest_equals (Thm.concl_of r)
-            fun occ_search occ [] = Seq.empty
-              | occ_search occ ((asminfo, searchinfo)::moreasms) =
-                (case searchf searchinfo occ lhs of
-                   SkipMore i => occ_search i moreasms
-                 | SkipSeq ss =>
-                   Seq.append (Seq.map (Library.pair asminfo) (Seq.flat ss))
-                               (occ_search 1 moreasms))
-                              (* find later substs also *)
-          in
-            occ_search skipocc asmpreps |> Seq.maps (apply_subst_in_asm i th r)
-          end;
-    in stepthms |> Seq.maps rewrite_with_thm end;
-
-
-fun skip_first_asm_occs_search searchf sinfo occ lhs =
-    skipto_skipseq occ (searchf sinfo lhs);
-
-fun eqsubst_asm_tac ctxt occL thms i th =
-    let val nprems = Thm.nprems_of th
-    in
-      if nprems < i then Seq.empty else
-      let val thmseq = (Seq.of_list thms)
-        fun apply_occ occK th =
-            thmseq |> Seq.maps
-                    (fn r =>
-                        eqsubst_asm_tac' ctxt (skip_first_asm_occs_search
-                                            searchf_lr_unify_valid) occK r
-                                         (i + ((Thm.nprems_of th) - nprems))
-                                         th);
-        val sortedoccs =
-            Library.sort (Library.rev_order o Library.int_ord) occL
-      in
-        Seq.map distinct_subgoals
-                (Seq.EVERY (map apply_occ sortedoccs) th)
-      end
-    end
-    handle THM _ => raise eqsubst_occL_exp ("THM",occL,thms,i,th);
-
-(* inthms are the given arguments in Isar, and treated as eqstep with
-   the first one, then the second etc *)
-fun eqsubst_asm_meth ctxt occL inthms =
-    Method.SIMPLE_METHOD' (eqsubst_asm_tac ctxt occL inthms);
-
-(* syntax for options, given "(asm)" will give back true, without
-   gives back false *)
-val options_syntax =
-    (Args.parens (Args.$$$ "asm") >> (K true)) ||
-     (Scan.succeed false);
-
-val ith_syntax =
-    Scan.optional (Args.parens (Scan.repeat OuterParse.nat)) [0];
-
-(* combination method that takes a flag (true indicates that subst
-should be done to an assumption, false = apply to the conclusion of
-the goal) as well as the theorems to use *)
-fun subst_meth src =
-  Method.syntax ((Scan.lift options_syntax) -- (Scan.lift ith_syntax) -- Attrib.thms) src
-  #> (fn (((asmflag, occL), inthms), ctxt) =>
-    (if asmflag then eqsubst_asm_meth else eqsubst_meth) ctxt occL inthms);
-
-
-val setup =
-  Method.add_method ("subst", subst_meth, "single-step substitution");
-
-end;
--- a/src/Provers/project_rule.ML	Mon Mar 02 08:15:32 2009 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-(*  Title:      Provers/project_rule.ML
-    ID:         $Id$
-    Author:     Makarius
-
-Transform mutual rule:
-  HH ==> (x1:A1 --> P1 x1) & ... & (xn:An --> Pn xn)
-into projection:
-  xi:Ai ==> HH ==> Pi xi
-*)
-
-signature PROJECT_RULE_DATA =
-sig
-  val conjunct1: thm
-  val conjunct2: thm
-  val mp: thm
-end;
-
-signature PROJECT_RULE =
-sig
-  val project: Proof.context -> int -> thm -> thm
-  val projects: Proof.context -> int list -> thm -> thm list
-  val projections: Proof.context -> thm -> thm list
-end;
-
-functor ProjectRuleFun(Data: PROJECT_RULE_DATA): PROJECT_RULE =
-struct
-
-fun conj1 th = th RS Data.conjunct1;
-fun conj2 th = th RS Data.conjunct2;
-fun imp th = th RS Data.mp;
-
-fun projects ctxt is raw_rule =
-  let
-    fun proj 1 th = the_default th (try conj1 th)
-      | proj k th = proj (k - 1) (conj2 th);
-    fun prems k th =
-      (case try imp th of
-        NONE => (k, th)
-      | SOME th' => prems (k + 1) th');
-    val ((_, [rule]), ctxt') = Variable.import_thms true [raw_rule] ctxt;
-    fun result i =
-      rule
-      |> proj i
-      |> prems 0 |-> (fn k =>
-        Thm.permute_prems 0 (~ k)
-        #> singleton (Variable.export ctxt' ctxt)
-        #> Drule.zero_var_indexes
-        #> RuleCases.save raw_rule
-        #> RuleCases.add_consumes k);
-  in map result is end;
-
-fun project ctxt i th = hd (projects ctxt [i] th);
-
-fun projections ctxt raw_rule =
-  let
-    fun projs k th =
-      (case try conj2 th of
-        NONE => k
-      | SOME th' => projs (k + 1) th');
-    val ((_, [rule]), _) = Variable.import_thms true [raw_rule] ctxt;
-  in projects ctxt (1 upto projs 1 rule) raw_rule end;
-
-end;
--- a/src/Pure/General/output.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/General/output.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -135,7 +135,7 @@
     let
       val start = start_timing ();
       val result = Exn.capture e ();
-      val end_msg = end_timing start;
+      val end_msg = #message (end_timing start);
       val _ = warning (if msg = "" then end_msg else msg ^ "\n" ^ end_msg);
     in Exn.release result end
   else e ();
--- a/src/Pure/IsaMakefile	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/IsaMakefile	Mon Mar 02 08:15:54 2009 +0100
@@ -19,8 +19,7 @@
 
 ## Pure
 
-BOOTSTRAP_FILES = ML-Systems/alice.ML ML-Systems/exn.ML			\
-  ML-Systems/install_pp_polyml.ML ML-Systems/ml_name_space.ML		\
+BOOTSTRAP_FILES = ML-Systems/exn.ML ML-Systems/ml_name_space.ML		\
   ML-Systems/mosml.ML ML-Systems/multithreading.ML			\
   ML-Systems/multithreading_polyml.ML ML-Systems/overloading_smlnj.ML	\
   ML-Systems/polyml-4.1.3.ML ML-Systems/polyml-4.1.4.ML			\
@@ -59,18 +58,18 @@
   Isar/attrib.ML Isar/auto_bind.ML Isar/calculation.ML Isar/class.ML	\
   Isar/class_target.ML Isar/code.ML Isar/code_unit.ML			\
   Isar/constdefs.ML Isar/context_rules.ML Isar/element.ML		\
-  Isar/expression.ML Isar/isar.ML Isar/isar_cmd.ML			\
-  Isar/isar_document.ML Isar/isar_syn.ML Isar/local_defs.ML		\
-  Isar/local_syntax.ML Isar/local_theory.ML Isar/locale.ML		\
-  Isar/method.ML Isar/net_rules.ML Isar/object_logic.ML Isar/obtain.ML	\
-  Isar/outer_keyword.ML Isar/outer_lex.ML Isar/outer_parse.ML		\
-  Isar/outer_syntax.ML Isar/overloading.ML Isar/proof.ML		\
-  Isar/proof_context.ML Isar/proof_display.ML Isar/proof_node.ML	\
-  Isar/rule_cases.ML Isar/rule_insts.ML Isar/session.ML			\
-  Isar/skip_proof.ML Isar/spec_parse.ML Isar/specification.ML		\
-  Isar/theory_target.ML Isar/toplevel.ML Isar/value_parse.ML		\
-  ML/ml_antiquote.ML ML/ml_context.ML ML/ml_lex.ML ML/ml_parse.ML	\
-  ML/ml_syntax.ML ML/ml_thms.ML Proof/extraction.ML			\
+  Isar/expression.ML Isar/isar_cmd.ML Isar/isar_document.ML		\
+  Isar/isar_syn.ML Isar/local_defs.ML Isar/local_syntax.ML		\
+  Isar/local_theory.ML Isar/locale.ML Isar/method.ML Isar/net_rules.ML	\
+  Isar/object_logic.ML Isar/obtain.ML Isar/outer_keyword.ML		\
+  Isar/outer_lex.ML Isar/outer_parse.ML Isar/outer_syntax.ML		\
+  Isar/overloading.ML Isar/proof.ML Isar/proof_context.ML		\
+  Isar/proof_display.ML Isar/proof_node.ML Isar/rule_cases.ML		\
+  Isar/rule_insts.ML Isar/skip_proof.ML Isar/spec_parse.ML		\
+  Isar/specification.ML Isar/theory_target.ML Isar/toplevel.ML		\
+  Isar/value_parse.ML ML/ml_antiquote.ML ML/ml_context.ML ML/ml_lex.ML	\
+  ML/ml_parse.ML ML/ml_syntax.ML ML/ml_thms.ML				\
+  ML-Systems/install_pp_polyml.ML Proof/extraction.ML			\
   Proof/proof_rewrite_rules.ML Proof/proof_syntax.ML			\
   Proof/proofchecker.ML Proof/reconstruct.ML ProofGeneral/ROOT.ML	\
   ProofGeneral/pgip.ML ProofGeneral/pgip_input.ML			\
@@ -81,11 +80,12 @@
   ProofGeneral/proof_general_pgip.ML Pure.thy ROOT.ML Syntax/ast.ML	\
   Syntax/lexicon.ML Syntax/mixfix.ML Syntax/parser.ML			\
   Syntax/printer.ML Syntax/simple_syntax.ML Syntax/syn_ext.ML		\
-  Syntax/syn_trans.ML Syntax/syntax.ML Syntax/type_ext.ML Thy/html.ML	\
-  Thy/latex.ML Thy/present.ML Thy/term_style.ML Thy/thm_deps.ML		\
-  Thy/thy_header.ML Thy/thy_info.ML Thy/thy_load.ML Thy/thy_output.ML	\
-  Thy/thy_syntax.ML Tools/ROOT.ML Tools/find_consts.ML			\
-  Tools/find_theorems.ML Tools/isabelle_process.ML Tools/named_thms.ML	\
+  Syntax/syn_trans.ML Syntax/syntax.ML Syntax/type_ext.ML		\
+  System/isabelle_process.ML System/isar.ML System/session.ML		\
+  Thy/html.ML Thy/latex.ML Thy/present.ML Thy/term_style.ML		\
+  Thy/thm_deps.ML Thy/thy_header.ML Thy/thy_info.ML Thy/thy_load.ML	\
+  Thy/thy_output.ML Thy/thy_syntax.ML Tools/ROOT.ML			\
+  Tools/find_consts.ML Tools/find_theorems.ML Tools/named_thms.ML	\
   Tools/xml_syntax.ML assumption.ML axclass.ML codegen.ML config.ML	\
   conjunction.ML consts.ML context.ML context_position.ML conv.ML	\
   defs.ML display.ML drule.ML envir.ML facts.ML goal.ML			\
@@ -119,8 +119,8 @@
   General/position.scala General/swing.scala General/symbol.scala	\
   General/xml.scala General/yxml.scala Isar/isar.scala			\
   Isar/isar_document.scala Isar/outer_keyword.scala			\
-  Thy/thy_header.scala Tools/isabelle_process.scala			\
-  Tools/isabelle_syntax.scala Tools/isabelle_system.scala
+  System/isabelle_process.scala System/isabelle_system.scala		\
+  Thy/thy_header.scala Tools/isabelle_syntax.scala
 
 
 SCALA_TARGET = $(ISABELLE_HOME)/lib/classes/Pure.jar
--- a/src/Pure/Isar/ROOT.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Isar/ROOT.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -82,8 +82,6 @@
 use "../old_goals.ML";
 use "outer_syntax.ML";
 use "../Thy/thy_info.ML";
-use "session.ML";
-use "isar.ML";
 use "isar_document.ML";
 
 (*theory and proof operations*)
@@ -91,3 +89,5 @@
 use "../Thy/thm_deps.ML";
 use "isar_cmd.ML";
 use "isar_syn.ML";
+
+
--- a/src/Pure/Isar/calculation.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Isar/calculation.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -15,7 +15,7 @@
   val symmetric: attribute
   val also: (Facts.ref * Attrib.src list) list option -> bool -> Proof.state -> Proof.state Seq.seq
   val also_i: thm list option -> bool -> Proof.state -> Proof.state Seq.seq
-  val finally_: (Facts.ref * Attrib.src list) list option -> bool ->
+  val finally: (Facts.ref * Attrib.src list) list option -> bool ->
     Proof.state -> Proof.state Seq.seq
   val finally_i: thm list option -> bool -> Proof.state -> Proof.state Seq.seq
   val moreover: bool -> Proof.state -> Proof.state
@@ -150,7 +150,7 @@
 
 val also = calculate Proof.get_thmss false;
 val also_i = calculate (K I) false;
-val finally_ = calculate Proof.get_thmss true;
+val finally = calculate Proof.get_thmss true;
 val finally_i = calculate (K I) true;
 
 
--- a/src/Pure/Isar/expression.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Isar/expression.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -726,14 +726,14 @@
   | defines_to_notes _ e = e;
 
 fun gen_add_locale prep_decl
-    bname raw_predicate_bname raw_imprt raw_body thy =
+    bname raw_predicate_bname raw_import raw_body thy =
   let
     val name = Sign.full_bname thy bname;
     val _ = Locale.defined thy name andalso
       error ("Duplicate definition of locale " ^ quote name);
 
     val ((fixed, deps, body_elems), (parms, ctxt')) =
-      prep_decl raw_imprt I raw_body (ProofContext.init thy);
+      prep_decl raw_import I raw_body (ProofContext.init thy);
     val text as (((_, exts'), _), defs) = eval ctxt' deps body_elems;
 
     val predicate_bname = if raw_predicate_bname = "" then bname
--- a/src/Pure/Isar/isar.ML	Mon Mar 02 08:15:32 2009 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,379 +0,0 @@
-(*  Title:      Pure/Isar/isar.ML
-    Author:     Makarius
-
-The global Isabelle/Isar state and main read-eval-print loop.
-*)
-
-signature ISAR =
-sig
-  val init: unit -> unit
-  val exn: unit -> (exn * string) option
-  val state: unit -> Toplevel.state
-  val context: unit -> Proof.context
-  val goal: unit -> thm
-  val print: unit -> unit
-  val >> : Toplevel.transition -> bool
-  val >>> : Toplevel.transition list -> unit
-  val linear_undo: int -> unit
-  val undo: int -> unit
-  val kill: unit -> unit
-  val kill_proof: unit -> unit
-  val crashes: exn list ref
-  val toplevel_loop: {init: bool, welcome: bool, sync: bool, secure: bool} -> unit
-  val loop: unit -> unit
-  val main: unit -> unit
-
-  type id = string
-  val no_id: id
-  val create_command: Toplevel.transition -> id
-  val insert_command: id -> id -> unit
-  val remove_command: id -> unit
-end;
-
-structure Isar: ISAR =
-struct
-
-
-(** TTY model -- SINGLE-THREADED! **)
-
-(* the global state *)
-
-type history = (Toplevel.state * Toplevel.transition) list;
-  (*previous state, state transition -- regular commands only*)
-
-local
-  val global_history = ref ([]: history);
-  val global_state = ref Toplevel.toplevel;
-  val global_exn = ref (NONE: (exn * string) option);
-in
-
-fun edit_history count f = NAMED_CRITICAL "Isar" (fn () =>
-  let
-    fun edit 0 (st, hist) = (global_history := hist; global_state := st; global_exn := NONE)
-      | edit n (st, hist) = edit (n - 1) (f st hist);
-  in edit count (! global_state, ! global_history) end);
-
-fun state () = NAMED_CRITICAL "Isar" (fn () => ! global_state);
-fun set_state state = NAMED_CRITICAL "Isar" (fn () => global_state := state);
-
-fun exn () = NAMED_CRITICAL "Isar" (fn () => ! global_exn);
-fun set_exn exn =  NAMED_CRITICAL "Isar" (fn () => global_exn := exn);
-
-end;
-
-
-fun init () = edit_history 1 (K (K (Toplevel.toplevel, [])));
-
-fun context () = Toplevel.context_of (state ())
-  handle Toplevel.UNDEF => error "Unknown context";
-
-fun goal () = #2 (#2 (Proof.get_goal (Toplevel.proof_of (state ()))))
-  handle Toplevel.UNDEF => error "No goal present";
-
-fun print () = Toplevel.print_state false (state ());
-
-
-(* history navigation *)
-
-local
-
-fun find_and_undo _ [] = error "Undo history exhausted"
-  | find_and_undo which ((prev, tr) :: hist) =
-      ((case Toplevel.init_of tr of SOME name => ThyInfo.kill_thy name | NONE => ());
-        if which (Toplevel.name_of tr) then (prev, hist) else find_and_undo which hist);
-
-in
-
-fun linear_undo n = edit_history n (K (find_and_undo (K true)));
-
-fun undo n = edit_history n (fn st => fn hist =>
-  find_and_undo (if Toplevel.is_proof st then K true else OuterKeyword.is_theory) hist);
-
-fun kill () = edit_history 1 (fn st => fn hist =>
-  find_and_undo
-    (if Toplevel.is_proof st then OuterKeyword.is_theory else OuterKeyword.is_theory_begin) hist);
-
-fun kill_proof () = edit_history 1 (fn st => fn hist =>
-  if Toplevel.is_proof st then find_and_undo OuterKeyword.is_theory hist
-  else raise Toplevel.UNDEF);
-
-end;
-
-
-(* interactive state transformations *)
-
-fun op >> tr =
-  (case Toplevel.transition true tr (state ()) of
-    NONE => false
-  | SOME (_, SOME err) => (set_exn (SOME err); Toplevel.error_msg tr err; true)
-  | SOME (st', NONE) =>
-      let
-        val name = Toplevel.name_of tr;
-        val _ = if OuterKeyword.is_theory_begin name then init () else ();
-        val _ =
-          if OuterKeyword.is_regular name
-          then edit_history 1 (fn st => fn hist => (st', (st, tr) :: hist)) else ();
-      in true end);
-
-fun op >>> [] = ()
-  | op >>> (tr :: trs) = if op >> tr then op >>> trs else ();
-
-
-(* toplevel loop *)
-
-val crashes = ref ([]: exn list);
-
-local
-
-fun raw_loop secure src =
-  let
-    fun check_secure () =
-      (if secure then warning "Secure loop -- cannot exit to ML" else (); secure);
-  in
-    (case Source.get_single (Source.set_prompt Source.default_prompt src) of
-      NONE => if secure then quit () else ()
-    | SOME (tr, src') => if op >> tr orelse check_secure () then raw_loop secure src' else ())
-    handle exn =>
-      (Output.error_msg (Toplevel.exn_message exn)
-        handle crash =>
-          (CRITICAL (fn () => change crashes (cons crash));
-            warning "Recovering from Isar toplevel crash -- see also Isar.crashes");
-          raw_loop secure src)
-  end;
-
-in
-
-fun toplevel_loop {init = do_init, welcome, sync, secure} =
- (Context.set_thread_data NONE;
-  if do_init then init () else ();  (* FIXME init editor model *)
-  if welcome then writeln (Session.welcome ()) else ();
-  uninterruptible (fn _ => fn () => raw_loop secure (OuterSyntax.isar sync)) ());
-
-end;
-
-fun loop () =
-  toplevel_loop {init = false, welcome = false, sync = false, secure = Secure.is_secure ()};
-
-fun main () =
-  toplevel_loop {init = true, welcome = true, sync = false, secure = Secure.is_secure ()};
-
-
-
-(** individual toplevel commands **)
-
-(* unique identification *)
-
-type id = string;
-val no_id : id = "";
-
-
-(* command category *)
-
-datatype category = Empty | Theory | Proof | Diag | Control;
-
-fun category_of tr =
-  let val name = Toplevel.name_of tr in
-    if name = "" then Empty
-    else if OuterKeyword.is_theory name then Theory
-    else if OuterKeyword.is_proof name then Proof
-    else if OuterKeyword.is_diag name then Diag
-    else Control
-  end;
-
-val is_theory = fn Theory => true | _ => false;
-val is_proper = fn Theory => true | Proof => true | _ => false;
-val is_regular = fn Control => false | _ => true;
-
-
-(* command status *)
-
-datatype status =
-  Unprocessed |
-  Running |
-  Failed of exn * string |
-  Finished of Toplevel.state;
-
-fun status_markup Unprocessed = Markup.unprocessed
-  | status_markup Running = (Markup.runningN, [])
-  | status_markup (Failed _) = Markup.failed
-  | status_markup (Finished _) = Markup.finished;
-
-fun run int tr state =
-  (case Toplevel.transition int tr state of
-    NONE => NONE
-  | SOME (_, SOME err) => (Toplevel.error_msg tr err; SOME (Failed err))
-  | SOME (state', NONE) => SOME (Finished state'));
-
-
-(* datatype command *)
-
-datatype command = Command of
- {category: category,
-  transition: Toplevel.transition,
-  status: status};
-
-fun make_command (category, transition, status) =
-  Command {category = category, transition = transition, status = status};
-
-val empty_command =
-  make_command (Empty, Toplevel.empty, Finished Toplevel.toplevel);
-
-fun map_command f (Command {category, transition, status}) =
-  make_command (f (category, transition, status));
-
-fun map_status f = map_command (fn (category, transition, status) =>
-  (category, transition, f status));
-
-
-(* global collection of identified commands *)
-
-fun err_dup id = sys_error ("Duplicate command " ^ quote id);
-fun err_undef id = sys_error ("Unknown command " ^ quote id);
-
-local val global_commands = ref (Graph.empty: command Graph.T) in
-
-fun change_commands f = NAMED_CRITICAL "Isar" (fn () => change global_commands f)
-  handle Graph.DUP bad => err_dup bad | Graph.UNDEF bad => err_undef bad;
-
-fun get_commands () = NAMED_CRITICAL "Isar" (fn () => ! global_commands);
-
-end;
-
-fun add_edge (id1, id2) =
-  if id1 = no_id orelse id2 = no_id then I else Graph.add_edge (id1, id2);
-
-
-fun init_commands () = change_commands (K Graph.empty);
-
-fun the_command id =
-  let val Command cmd =
-    if id = no_id then empty_command
-    else (Graph.get_node (get_commands ()) id handle Graph.UNDEF bad => err_undef bad)
-  in cmd end;
-
-fun prev_command id =
-  if id = no_id then no_id
-  else
-    (case Graph.imm_preds (get_commands ()) id handle Graph.UNDEF bad => err_undef bad of
-      [] => no_id
-    | [prev] => prev
-    | _ => sys_error ("Non-linear command dependency " ^ quote id));
-
-fun next_commands id =
-  if id = no_id then []
-  else Graph.imm_succs (get_commands ()) id handle Graph.UNDEF bad => err_undef bad;
-
-fun descendant_commands ids =
-  Graph.all_succs (get_commands ()) (distinct (op =) (filter_out (fn id => id = no_id) ids))
-    handle Graph.UNDEF bad => err_undef bad;
-
-
-(* maintain status *)
-
-fun report_status markup id = Toplevel.status (#transition (the_command id)) markup;
-
-fun update_status status id = change_commands (Graph.map_node id (map_status (K status)));
-
-fun report_update_status status id =
-  change_commands (Graph.map_node id (map_status (fn old_status =>
-    let val markup = status_markup status
-    in if markup <> status_markup old_status then report_status markup id else (); status end)));
-
-
-(* create and dispose commands *)
-
-fun create_command raw_tr =
-  let
-    val (id, tr) =
-      (case Toplevel.get_id raw_tr of
-        SOME id => (id, raw_tr)
-      | NONE =>
-          let val id =
-            if ! Toplevel.debug then "isabelle:" ^ Toplevel.name_of raw_tr ^ serial_string ()
-            else "isabelle:" ^ serial_string ()
-          in (id, Toplevel.put_id id raw_tr) end);
-
-    val cmd = make_command (category_of tr, tr, Unprocessed);
-    val _ = change_commands (Graph.new_node (id, cmd));
-  in id end;
-
-fun dispose_commands ids =
-  let
-    val desc = descendant_commands ids;
-    val _ = List.app (report_status Markup.disposed) desc;
-    val _ = change_commands (Graph.del_nodes desc);
-  in () end;
-
-
-(* final state *)
-
-fun the_state id =
-  (case the_command id of
-    {status = Finished state, ...} => state
-  | {transition, ...} => error ("Unfinished command " ^ Toplevel.str_of transition));
-
-
-
-(** editor model **)
-
-(* run commands *)
-
-fun try_run id =
-  (case try the_state (prev_command id) of
-    NONE => ()
-  | SOME state =>
-      (case run true (#transition (the_command id)) state of
-        NONE => ()
-      | SOME status => report_update_status status id));
-
-fun rerun_commands ids =
-  (List.app (report_update_status Unprocessed) ids; List.app try_run ids);
-
-
-(* modify document *)
-
-fun insert_command prev id = NAMED_CRITICAL "Isar" (fn () =>
-  let
-    val nexts = next_commands prev;
-    val _ = change_commands
-     (fold (fn next => Graph.del_edge (prev, next)) nexts #> add_edge (prev, id) #>
-      fold (fn next => Graph.add_edge (id, next)) nexts);
-  in descendant_commands [id] end) |> rerun_commands;
-
-fun remove_command id = NAMED_CRITICAL "Isar" (fn () =>
-  let
-    val prev = prev_command id;
-    val nexts = next_commands id;
-    val _ = change_commands
-     (fold (fn next => Graph.del_edge (id, next)) nexts #>
-      fold (fn next => add_edge (prev, next)) nexts);
-  in descendant_commands nexts end) |> rerun_commands;
-
-
-(* concrete syntax *)
-
-local
-
-structure P = OuterParse;
-val op >> = Scan.>>;
-
-in
-
-val _ =
-  OuterSyntax.internal_command "Isar.command"
-    (P.string -- P.string >> (fn (id, text) =>
-      Toplevel.imperative (fn () =>
-        ignore (create_command (OuterSyntax.prepare_command (Position.id id) text)))));
-
-val _ =
-  OuterSyntax.internal_command "Isar.insert"
-    (P.string -- P.string >> (fn (prev, id) =>
-      Toplevel.imperative (fn () => insert_command prev id)));
-
-val _ =
-  OuterSyntax.internal_command "Isar.remove"
-    (P.string >> (fn id => Toplevel.imperative (fn () => remove_command id)));
-
-end;
-
-end;
--- a/src/Pure/Isar/isar_cmd.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Isar/isar_cmd.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -32,7 +32,6 @@
   val skip_proof: Toplevel.transition -> Toplevel.transition
   val init_theory: string * string list * (string * bool) list ->
     Toplevel.transition -> Toplevel.transition
-  val welcome: Toplevel.transition -> Toplevel.transition
   val exit: Toplevel.transition -> Toplevel.transition
   val quit: Toplevel.transition -> Toplevel.transition
   val pr: string list * (int option * int option) -> Toplevel.transition -> Toplevel.transition
@@ -265,8 +264,6 @@
       if ThyInfo.check_known_thy (Context.theory_name thy)
       then ThyInfo.end_theory thy else ());
 
-val welcome = Toplevel.imperative (writeln o Session.welcome);
-
 val exit = Toplevel.keep (fn state =>
  (Context.set_thread_data (try Toplevel.generic_theory_of state);
   raise Toplevel.TERMINATE));
--- a/src/Pure/Isar/isar_syn.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Isar/isar_syn.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -693,7 +693,7 @@
 val _ =
   OuterSyntax.command "finally" "combine calculation and current facts, exhibit result"
     (K.tag_proof K.prf_chain)
-    (calc_args >> (Toplevel.proofs' o Calculation.finally_));
+    (calc_args >> (Toplevel.proofs' o Calculation.finally));
 
 val _ =
   OuterSyntax.command "moreover" "augment calculation by current facts"
@@ -729,39 +729,6 @@
       handle ERROR msg => Scan.fail_with (K msg)));
 
 
-(* global history commands *)
-
-val _ =
-  OuterSyntax.improper_command "init_toplevel" "init toplevel point-of-interest" K.control
-    (Scan.succeed (Toplevel.no_timing o Toplevel.imperative Isar.init));
-
-val _ =
-  OuterSyntax.improper_command "linear_undo" "undo commands" K.control
-    (Scan.optional P.nat 1 >>
-      (fn n => Toplevel.no_timing o Toplevel.imperative (fn () => Isar.linear_undo n)));
-
-val _ =
-  OuterSyntax.improper_command "undo" "undo commands (skipping closed proofs)" K.control
-    (Scan.optional P.nat 1 >>
-      (fn n => Toplevel.no_timing o Toplevel.imperative (fn () => Isar.undo n)));
-
-val _ =
-  OuterSyntax.improper_command "undos_proof" "undo commands (skipping closed proofs)" K.control
-    (Scan.optional P.nat 1 >> (fn n => Toplevel.no_timing o
-      Toplevel.keep (fn state =>
-        if Toplevel.is_proof state then (Isar.undo n; Isar.print ()) else raise Toplevel.UNDEF)));
-
-val _ =
-  OuterSyntax.improper_command "cannot_undo" "partial undo -- Proof General legacy" K.control
-    (P.name >>
-      (fn "end" => Toplevel.no_timing o Toplevel.imperative (fn () => Isar.undo 1)
-        | txt => Toplevel.imperative (fn () => error ("Cannot undo " ^ quote txt))));
-
-val _ =
-  OuterSyntax.improper_command "kill" "kill partial proof or theory development" K.control
-    (Scan.succeed (Toplevel.no_timing o Toplevel.imperative Isar.kill));
-
-
 
 (** diagnostic commands (for interactive mode only) **)
 
@@ -974,9 +941,5 @@
   OuterSyntax.improper_command "exit" "exit Isar loop" K.control
     (Scan.succeed (Toplevel.no_timing o IsarCmd.exit));
 
-val _ =
-  OuterSyntax.improper_command "welcome" "print welcome message" K.diag
-    (Scan.succeed (Toplevel.no_timing o IsarCmd.welcome));
-
 end;
 
--- a/src/Pure/Isar/method.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Isar/method.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -49,7 +49,6 @@
   val erule: int -> thm list -> method
   val drule: int -> thm list -> method
   val frule: int -> thm list -> method
-  val iprover_tac: Proof.context -> int option -> int -> tactic
   val set_tactic: (thm list -> tactic) -> Proof.context -> Proof.context
   val tactic: string * Position.T -> Proof.context -> method
   val raw_tactic: string * Position.T -> Proof.context -> method
@@ -296,55 +295,6 @@
     THEN Tactic.distinct_subgoals_tac;
 
 
-(* iprover -- intuitionistic proof search *)
-
-local
-
-val remdups_tac = SUBGOAL (fn (g, i) =>
-  let val prems = Logic.strip_assums_hyp g in
-    REPEAT_DETERM_N (length prems - length (distinct op aconv prems))
-    (Tactic.ematch_tac [Drule.remdups_rl] i THEN Tactic.eq_assume_tac i)
-  end);
-
-fun REMDUPS tac = tac THEN_ALL_NEW remdups_tac;
-
-val bires_tac = Tactic.biresolution_from_nets_tac ContextRules.orderlist;
-
-fun safe_step_tac ctxt =
-  ContextRules.Swrap ctxt
-   (eq_assume_tac ORELSE'
-    bires_tac true (ContextRules.netpair_bang ctxt));
-
-fun unsafe_step_tac ctxt =
-  ContextRules.wrap ctxt
-   (assume_tac APPEND'
-    bires_tac false (ContextRules.netpair_bang ctxt) APPEND'
-    bires_tac false (ContextRules.netpair ctxt));
-
-fun step_tac ctxt i =
-  REPEAT_DETERM1 (REMDUPS (safe_step_tac ctxt) i) ORELSE
-  REMDUPS (unsafe_step_tac ctxt) i;
-
-fun intprover_tac ctxt gs d lim = SUBGOAL (fn (g, i) => if d > lim then no_tac else
-  let
-    val ps = Logic.strip_assums_hyp g;
-    val c = Logic.strip_assums_concl g;
-  in
-    if member (fn ((ps1, c1), (ps2, c2)) =>
-        c1 aconv c2 andalso
-        length ps1 = length ps2 andalso
-        gen_eq_set (op aconv) (ps1, ps2)) gs (ps, c) then no_tac
-    else (step_tac ctxt THEN_ALL_NEW intprover_tac ctxt ((ps, c) :: gs) (d + 1) lim) i
-  end);
-
-in
-
-fun iprover_tac ctxt opt_lim =
-  SELECT_GOAL (DEEPEN (2, the_default 20 opt_lim) (intprover_tac ctxt [] 0) 4 1);
-
-end;
-
-
 (* ML tactics *)
 
 structure TacticData = ProofDataFun
@@ -511,39 +461,6 @@
 end;
 
 
-(* iprover syntax *)
-
-local
-
-val introN = "intro";
-val elimN = "elim";
-val destN = "dest";
-val ruleN = "rule";
-
-fun modifier name kind kind' att =
-  Args.$$$ name |-- (kind >> K NONE || kind' |-- P.nat --| Args.colon >> SOME)
-    >> (pair (I: Proof.context -> Proof.context) o att);
-
-val iprover_modifiers =
- [modifier destN Args.bang_colon Args.bang ContextRules.dest_bang,
-  modifier destN Args.colon (Scan.succeed ()) ContextRules.dest,
-  modifier elimN Args.bang_colon Args.bang ContextRules.elim_bang,
-  modifier elimN Args.colon (Scan.succeed ()) ContextRules.elim,
-  modifier introN Args.bang_colon Args.bang ContextRules.intro_bang,
-  modifier introN Args.colon (Scan.succeed ()) ContextRules.intro,
-  Args.del -- Args.colon >> K (I, ContextRules.rule_del)];
-
-in
-
-val iprover_meth =
-  bang_sectioned_args' iprover_modifiers (Scan.lift (Scan.option P.nat))
-    (fn n => fn prems => fn ctxt => METHOD (fn facts =>
-      HEADGOAL (insert_tac (prems @ facts) THEN'
-      ObjectLogic.atomize_prems_tac THEN' iprover_tac ctxt n)));
-
-end;
-
-
 (* tactic syntax *)
 
 fun nat_thms_args f = uncurry f oo
@@ -599,7 +516,6 @@
     ("fold", thms_ctxt_args fold_meth, "fold definitions"),
     ("atomize", (atomize o fst) oo syntax (Args.mode "full"),
       "present local premises as object-level statements"),
-    ("iprover", iprover_meth, "intuitionistic proof search"),
     ("rule", thms_ctxt_args some_rule, "apply some intro/elim rule"),
     ("erule", nat_thms_args erule, "apply rule in elimination manner (improper)"),
     ("drule", nat_thms_args drule, "apply rule in destruct manner (improper)"),
--- a/src/Pure/Isar/session.ML	Mon Mar 02 08:15:32 2009 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,105 +0,0 @@
-(*  Title:      Pure/Isar/session.ML
-    Author:     Markus Wenzel, TU Muenchen
-
-Session management -- maintain state of logic images.
-*)
-
-signature SESSION =
-sig
-  val id: unit -> string list
-  val name: unit -> string
-  val welcome: unit -> string
-  val use_dir: string -> bool -> string list -> bool -> bool -> string -> bool -> string list ->
-    string -> string -> bool * string -> string -> int -> bool -> int -> int -> bool -> unit
-  val finish: unit -> unit
-end;
-
-structure Session: SESSION =
-struct
-
-
-(* session state *)
-
-val session = ref ([Context.PureN]: string list);
-val session_path = ref ([]: string list);
-val session_finished = ref false;
-val remote_path = ref (NONE: Url.T option);
-
-
-(* access path *)
-
-fun id () = ! session;
-fun path () = ! session_path;
-
-fun str_of [] = Context.PureN
-  | str_of elems = space_implode "/" elems;
-
-fun name () = "Isabelle/" ^ str_of (path ());
-
-fun welcome () =
-  if Distribution.is_official then
-    "Welcome to " ^ name () ^ " (" ^ Distribution.version ^ ")"
-  else
-    "Unofficial version of " ^ name () ^ " (" ^ Distribution.version ^ ")" ^
-    (if Distribution.changelog <> "" then "\nSee also " ^ Distribution.changelog else "");
-
-
-(* add_path *)
-
-fun add_path reset s =
-  let val sess = ! session @ [s] in
-    (case duplicates (op =) sess of
-      [] => (session := sess; session_path := ((if reset then [] else ! session_path) @ [s]))
-    | dups => error ("Duplicate session identifiers " ^ commas_quote dups ^ " in " ^ str_of sess))
-  end;
-
-
-(* init *)
-
-fun init reset parent name =
-  if not (member (op =) (! session) parent) orelse not (! session_finished) then
-    error ("Unfinished parent session " ^ quote parent ^ " for " ^ quote name)
-  else (add_path reset name; session_finished := false);
-
-
-(* finish *)
-
-fun finish () =
-  (Output.accumulated_time ();
-    ThyInfo.finish ();
-    Present.finish ();
-    Future.shutdown ();
-    session_finished := true);
-
-
-(* use_dir *)
-
-fun get_rpath rpath =
-  (if rpath = "" then () else
-     if is_some (! remote_path) then
-       error "Path for remote theory browsing information may only be set once"
-     else
-       remote_path := SOME (Url.explode rpath);
-   (! remote_path, rpath <> ""));
-
-fun dumping (_, "") = NONE
-  | dumping (cp, path) = SOME (cp, Path.explode path);
-
-fun use_dir root build modes reset info doc doc_graph doc_versions
-    parent name dump rpath level verbose max_threads trace_threads parallel_proofs =
-  ((fn () =>
-     (init reset parent name;
-      Present.init build info doc doc_graph doc_versions (path ()) name
-        (dumping dump) (get_rpath rpath) verbose (map ThyInfo.get_theory (ThyInfo.get_names ()));
-      use root;
-      finish ()))
-    |> setmp_noncritical Proofterm.proofs level
-    |> setmp_noncritical print_mode (modes @ print_mode_value ())
-    |> setmp_noncritical Goal.parallel_proofs parallel_proofs
-    |> setmp_noncritical Multithreading.trace trace_threads
-    |> setmp_noncritical Multithreading.max_threads
-      (if Multithreading.available then max_threads
-       else (if max_threads = 1 then () else warning "Multithreading support unavailable"; 1))) ()
-  handle exn => (Output.error_msg (Toplevel.exn_message exn); exit 1);
-
-end;
--- a/src/Pure/ML-Systems/alice.ML	Mon Mar 02 08:15:32 2009 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,220 +0,0 @@
-(*  Title:      Pure/ML-Systems/alice.ML
-
-Compatibility file for Alice 1.4.
-
-NOTE: there is no wrapper script; may run it interactively as follows:
-
-$ cd Isabelle/src/Pure
-$ env ALICE_JIT_MODE=0 ISABELLE_HOME=$(cd ../..; pwd) alice
-- val ml_system = "alice";
-- use "ML-Systems/exn.ML";
-- use "ML-Systems/universal.ML";
-- use "ML-Systems/multithreading.ML";
-- use "ML-Systems/time_limit.ML";
-- use "ML-Systems/alice.ML";
-- use "ROOT.ML";
-- Session.finish ();
-*)
-
-val ml_system_fix_ints = false;
-
-fun forget_structure _ = ();
-
-fun exit 0 = (OS.Process.exit OS.Process.success): unit
-  | exit _ = OS.Process.exit OS.Process.failure;
-
-
-(** ML system related **)
-
-(*low-level pointer equality*)
-fun pointer_eq (_: 'a, _: 'a) = false;
-
-
-(* integer compatibility -- downgraded IntInf *)
-
-structure Time =
-struct
-  open Time;
-  val fromMilliseconds = Time.fromMilliseconds o IntInf.fromInt;
-  val fromSeconds = Time.fromSeconds o IntInf.fromInt;
-end;
-
-structure IntInf =
-struct
-  fun divMod (x, y) = (x div y, x mod y);
-  open Int;
-end;
-
-
-(* restore old-style character / string functions *)
-
-exception Ord;
-fun ord "" = raise Ord
-  | ord s = Char.ord (String.sub (s, 0));
-
-val chr = String.str o chr;
-val explode = map String.str o String.explode;
-val implode = String.concat;
-
-
-(* Poly/ML emulation *)
-
-fun quit () = exit 0;
-
-fun get_print_depth () = ! Print.depth;
-fun print_depth n = Print.depth := n;
-
-
-(* compiler-independent timing functions *)
-
-structure Timer =
-struct
-  open Timer;
-  type cpu_timer = unit;
-  fun startCPUTimer () = ();
-  fun checkCPUTimer () = {sys = Time.zeroTime, usr = Time.zeroTime};
-  fun checkGCTime () = Time.zeroTime;
-end;
-
-fun start_timing () =
-  let val CPUtimer = Timer.startCPUTimer();
-      val time = Timer.checkCPUTimer(CPUtimer)
-  in  (CPUtimer,time)  end;
-
-fun end_timing (CPUtimer, {sys,usr}) =
-  let open Time  (*...for Time.toString, Time.+ and Time.- *)
-      val {sys=sys2,usr=usr2} = Timer.checkCPUTimer(CPUtimer)
-  in  "User " ^ toString (usr2-usr) ^
-      "  All "^ toString (sys2-sys + usr2-usr) ^
-      " secs"
-      handle Time => ""
-  end;
-
-fun check_timer timer =
-  let
-    val {sys, usr} = Timer.checkCPUTimer timer;
-    val gc = Timer.checkGCTime timer;    (* FIXME already included in usr? *)
-  in (sys, usr, gc) end;
-
-
-(*prompts*)
-fun ml_prompts p1 p2 = ();
-
-(*dummy implementation*)
-fun profile (n: int) f x = f x;
-
-(*dummy implementation*)
-fun exception_trace f = f ();
-
-(*dummy implementation*)
-fun print x = x;
-
-
-(* toplevel pretty printing (see also Pure/pure_setup.ML) *)
-
-fun make_pp path pprint = (path, pprint);
-fun install_pp (path, pp) = ();
-
-
-(* ML command execution *)
-
-fun use_text _ _ _ _ _ txt = (Compiler.eval txt; ());
-fun use_file _ _ _ _ name = use name;
-
-
-
-(** interrupts **)
-
-exception Interrupt;
-
-fun interruptible f x = f x;
-fun uninterruptible f x = f (fn (g: 'c -> 'd) => g) x;
-
-
-(* basis library fixes *)
-
-structure TextIO =
-struct
-  open TextIO;
-  fun inputLine is = TextIO.inputLine is
-    handle IO.Io _ => raise Interrupt;
-end;
-
-
-
-(** OS related **)
-
-structure OS =
-struct
-  open OS;
-  structure FileSys =
-  struct
-    open FileSys;
-    fun tmpName () =
-      let val name = FileSys.tmpName () in
-        if String.isSuffix "\000" name
-        then String.substring (name, 0, size name - 1)
-        else name
-      end;
-  end;
-end;
-
-val cd = OS.FileSys.chDir;
-val pwd = OS.FileSys.getDir;
-
-local
-
-fun read_file name =
-  let val is = TextIO.openIn name
-  in Exn.release (Exn.capture TextIO.inputAll is before TextIO.closeIn is) end;
-
-fun write_file name txt =
-  let val os = TextIO.openOut name
-  in Exn.release (Exn.capture TextIO.output (os, txt) before TextIO.closeOut os) end;
-
-in
-
-fun system_out script =
-  let
-    val script_name = OS.FileSys.tmpName ();
-    val _ = write_file script_name script;
-
-    val output_name = OS.FileSys.tmpName ();
-
-    val status =
-      OS.Process.system ("perl -w \"$ISABELLE_HOME/lib/scripts/system.pl\" nogroup " ^
-        script_name ^ " /dev/null " ^ output_name);
-    val rc = if OS.Process.isSuccess status then 0 else 1;
-
-    val output = read_file output_name handle IO.Io _ => "";
-    val _ = OS.FileSys.remove script_name handle OS.SysErr _ => ();
-    val _ = OS.FileSys.remove output_name handle OS.SysErr _ => ();
-  in (output, rc) end;
-
-end;
-
-structure OS =
-struct
-  open OS;
-  structure FileSys =
-  struct
-    fun fileId name =
-      (case system_out ("perl -e '@_ = stat(q:" ^ name ^ ":); print $_[1]'") of
-        ("", _) => raise Fail "OS.FileSys.fileId"   (* FIXME IO.Io!? *)
-      | (s, _) => (case Int.fromString s of NONE => raise Fail "OS.FileSys.fileId" | SOME i => i));
-    val compare = Int.compare;
-    fun fullPath name =
-      (case system_out ("FILE='" ^ name ^
-        "' && cd \"$(dirname \"$FILE\")\" && echo -n \"$(pwd -P)/$(basename \"$FILE\")\"") of
-        ("", _) => raise SysErr ("Bad file", NONE)
-      | (s, _) => s);
-    open FileSys;
-  end;
-end;
-
-fun process_id () = raise Fail "process_id undefined";
-
-fun getenv var =
-  (case OS.Process.getEnv var of
-    NONE => ""
-  | SOME txt => txt);
--- a/src/Pure/ML-Systems/mosml.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/ML-Systems/mosml.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -141,19 +141,19 @@
 load "Timer";
 
 fun start_timing () =
-  let val CPUtimer = Timer.startCPUTimer();
-      val time = Timer.checkCPUTimer(CPUtimer)
-  in  (CPUtimer,time)  end;
+  let
+    val timer = Timer.startCPUTimer ();
+    val time = Timer.checkCPUTimer timer;
+  in (timer, time) end;
 
-fun end_timing (CPUtimer, {gc,sys,usr}) =
-  let open Time  (*...for Time.toString, Time.+ and Time.- *)
-      val {gc=gc2,sys=sys2,usr=usr2} = Timer.checkCPUTimer(CPUtimer)
-  in  "User " ^ toString (usr2-usr) ^
-      "  GC " ^ toString (gc2-gc) ^
-      "  All "^ toString (sys2-sys + usr2-usr + gc2-gc) ^
-      " secs"
-      handle Time => ""
-  end;
+fun end_timing (timer, {gc, sys, usr}) =
+  let
+    open Time;  (*...for Time.toString, Time.+ and Time.- *)
+    val {gc = gc2, sys = sys2, usr = usr2} = Timer.checkCPUTimer timer;
+    val user = usr2 - usr + gc2 - gc;
+    val all = user + sys2 - sys;
+    val message = "User " ^ toString user ^ "  All "^ toString all ^ " secs" handle Time => "";
+  in {message = message, user = user, all = all} end;
 
 fun check_timer timer =
   let val {sys, usr, gc} = Timer.checkCPUTimer timer
--- a/src/Pure/ML-Systems/polyml_common.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/ML-Systems/polyml_common.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -47,18 +47,19 @@
 (* compiler-independent timing functions *)
 
 fun start_timing () =
-  let val CPUtimer = Timer.startCPUTimer();
-      val time = Timer.checkCPUTimer(CPUtimer)
-  in  (CPUtimer,time)  end;
+  let
+    val timer = Timer.startCPUTimer ();
+    val time = Timer.checkCPUTimer timer;
+  in (timer, time) end;
 
-fun end_timing (CPUtimer, {sys,usr}) =
-  let open Time  (*...for Time.toString, Time.+ and Time.- *)
-      val {sys=sys2,usr=usr2} = Timer.checkCPUTimer(CPUtimer)
-  in  "User " ^ toString (usr2-usr) ^
-      "  All "^ toString (sys2-sys + usr2-usr) ^
-      " secs"
-      handle Time => ""
-  end;
+fun end_timing (timer, {sys, usr}) =
+  let
+    open Time;  (*...for Time.toString, Time.+ and Time.- *)
+    val {sys = sys2, usr = usr2} = Timer.checkCPUTimer timer;
+    val user = usr2 - usr;
+    val all = user + sys2 - sys;
+    val message = "User " ^ toString user ^ "  All "^ toString all ^ " secs" handle Time => "";
+  in {message = message, user = user, all = all} end;
 
 fun check_timer timer =
   let
--- a/src/Pure/ML-Systems/smlnj.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/ML-Systems/smlnj.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -59,18 +59,19 @@
 (* compiler-independent timing functions *)
 
 fun start_timing () =
-  let val CPUtimer = Timer.startCPUTimer();
-      val time = Timer.checkCPUTimer(CPUtimer)
-  in  (CPUtimer,time)  end;
+  let
+    val timer = Timer.startCPUTimer ();
+    val time = Timer.checkCPUTimer timer;
+  in (timer, time) end;
 
-fun end_timing (CPUtimer, {sys,usr}) =
-  let open Time  (*...for Time.toString, Time.+ and Time.- *)
-      val {sys=sys2,usr=usr2} = Timer.checkCPUTimer(CPUtimer)
-  in  "User " ^ toString (usr2-usr) ^
-      "  All "^ toString (sys2-sys + usr2-usr) ^
-      " secs"
-      handle Time => ""
-  end;
+fun end_timing (timer, {sys, usr}) =
+  let
+    open Time;  (*...for Time.toString, Time.+ and Time.- *)
+    val {sys = sys2, usr = usr2} = Timer.checkCPUTimer timer;
+    val user = usr2 - usr;
+    val all = user + sys2 - sys;
+    val message = "User " ^ toString user ^ "  All "^ toString all ^ " secs" handle Time => "";
+  in {message = message, user = user, all = all} end;
 
 fun check_timer timer =
   let
--- a/src/Pure/Proof/proofchecker.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Proof/proofchecker.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -56,7 +56,7 @@
       | thm_of _ _ (PAxm (name, _, SOME Ts)) =
           thm_of_atom (Thm.axiom thy name) Ts
 
-      | thm_of _ Hs (PBound i) = List.nth (Hs, i)
+      | thm_of _ Hs (PBound i) = nth Hs i
 
       | thm_of (vs, names) Hs (Abst (s, SOME T, prf)) =
           let
--- a/src/Pure/Proof/reconstruct.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Proof/reconstruct.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -98,7 +98,7 @@
           let val (env3, V) = mk_tvar (env2, [])
           in (t' $ u', V, vTs2, unifyT thy env3 T (U --> V)) end)
       end
-  | infer_type thy env Ts vTs (t as Bound i) = ((t, List.nth (Ts, i), vTs, env)
+  | infer_type thy env Ts vTs (t as Bound i) = ((t, nth Ts i, vTs, env)
       handle Subscript => error ("infer_type: bad variable index " ^ string_of_int i));
 
 fun cantunify thy (t, u) = error ("Non-unifiable terms:\n" ^
@@ -152,7 +152,7 @@
     fun head_norm (prop, prf, cnstrts, env, vTs) =
       (Envir.head_norm env prop, prf, cnstrts, env, vTs);
 
-    fun mk_cnstrts env _ Hs vTs (PBound i) = ((List.nth (Hs, i), PBound i, [], env, vTs)
+    fun mk_cnstrts env _ Hs vTs (PBound i) = ((nth Hs i, PBound i, [], env, vTs)
           handle Subscript => error ("mk_cnstrts: bad variable index " ^ string_of_int i))
       | mk_cnstrts env Ts Hs vTs (Abst (s, opT, cprf)) =
           let
@@ -304,7 +304,7 @@
 
 val head_norm = Envir.head_norm (Envir.empty 0);
 
-fun prop_of0 Hs (PBound i) = List.nth (Hs, i)
+fun prop_of0 Hs (PBound i) = nth Hs i
   | prop_of0 Hs (Abst (s, SOME T, prf)) =
       Term.all T $ (Abs (s, T, prop_of0 Hs prf))
   | prop_of0 Hs (AbsP (s, SOME t, prf)) =
--- a/src/Pure/ROOT.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/ROOT.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -81,12 +81,18 @@
 use "goal.ML";
 use "axclass.ML";
 
-(*the main Isar system*)
+(*main Isar stuff*)
 cd "Isar"; use "ROOT.ML"; cd "..";
 use "subgoal.ML";
 
 use "Proof/extraction.ML";
 
+(*Isabelle/Isar system*)
+use "System/session.ML";
+use "System/isar.ML";
+use "System/isabelle_process.ML";
+
+(*additional tools*)
 cd "Tools"; use "ROOT.ML"; cd "..";
 
 use "codegen.ML";
--- a/src/Pure/Syntax/parser.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Syntax/parser.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -73,10 +73,10 @@
       val chain_from = case (pri, rhs) of (~1, [Nonterminal (id, ~1)]) => SOME id | _ => NONE;
 
       (*store chain if it does not already exist*)
-      val (new_chain, chains') = case chain_from of NONE => (NONE, chains) | SOME from_ =>
-        let val old_tos = these (AList.lookup (op =) chains from_) in
+      val (new_chain, chains') = case chain_from of NONE => (NONE, chains) | SOME from =>
+        let val old_tos = these (AList.lookup (op =) chains from) in
           if member (op =) old_tos lhs then (NONE, chains)
-          else (SOME from_, AList.update (op =) (from_, insert (op =) lhs old_tos) chains)
+          else (SOME from, AList.update (op =) (from, insert (op =) lhs old_tos) chains)
         end;
 
       (*propagate new chain in lookahead and lambda lists;
@@ -410,7 +410,7 @@
 
     fun pretty_nt (name, tag) =
       let
-        fun prod_of_chain from_ = ([Nonterminal (from_, ~1)], "", ~1);
+        fun prod_of_chain from = ([Nonterminal (from, ~1)], "", ~1);
 
         val nt_prods =
           Library.foldl (gen_union op =) ([], map snd (snd (Array.sub (prods, tag)))) @
@@ -552,8 +552,8 @@
           val to_tag = convert_tag to;
 
           fun make [] result = result
-            | make (from_ :: froms) result = make froms ((to_tag,
-                ([Nonterminal (convert_tag from_, ~1)], "", ~1)) :: result);
+            | make (from :: froms) result = make froms ((to_tag,
+                ([Nonterminal (convert_tag from, ~1)], "", ~1)) :: result);
         in mk_chain_prods cs (make froms [] @ result) end;
 
     val chain_prods = mk_chain_prods chains2 [];
--- a/src/Pure/Syntax/syn_ext.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Syntax/syn_ext.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -26,7 +26,7 @@
   val logic: string
   val args: string
   val cargs: string
-  val any_: string
+  val any: string
   val sprop: string
   val typ_to_nonterm: typ -> string
   datatype xsymb =
@@ -108,8 +108,8 @@
 val sprop = "#prop";
 val spropT = Type (sprop, []);
 
-val any_ = "any";
-val anyT = Type (any_, []);
+val any = "any";
+val anyT = Type (any, []);
 
 
 
@@ -181,7 +181,7 @@
   | typ_to_nt default _ = default;
 
 (*get nonterminal for rhs*)
-val typ_to_nonterm = typ_to_nt any_;
+val typ_to_nonterm = typ_to_nt any;
 
 (*get nonterminal for lhs*)
 val typ_to_nonterm1 = typ_to_nt logic;
--- a/src/Pure/Syntax/syn_trans.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Syntax/syn_trans.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -222,7 +222,7 @@
 (* implicit structures *)
 
 fun the_struct structs i =
-  if 1 <= i andalso i <= length structs then List.nth (structs, i - 1)
+  if 1 <= i andalso i <= length structs then nth structs (i - 1)
   else raise error ("Illegal reference to implicit structure #" ^ string_of_int i);
 
 fun struct_tr structs (*"_struct"*) [Const ("_indexdefault", _)] =
--- a/src/Pure/Syntax/syntax.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Syntax/syntax.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -390,7 +390,7 @@
 val basic_nonterms =
   (Lexicon.terminals @ [SynExt.logic, "type", "types", "sort", "classes",
     SynExt.args, SynExt.cargs, "pttrn", "pttrns", "idt", "idts", "aprop",
-    "asms", SynExt.any_, SynExt.sprop, "num_const", "float_const",
+    "asms", SynExt.any, SynExt.sprop, "num_const", "float_const",
     "index", "struct"]);
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/System/isabelle_process.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -0,0 +1,138 @@
+(*  Title:      Pure/System/isabelle_process.ML
+    Author:     Makarius
+
+Isabelle process wrapper -- interaction via external program.
+
+General format of process output:
+
+  (1) unmarked stdout/stderr, no line structure (output should be
+  processed immediately as it arrives);
+
+  (2) properly marked-up messages, e.g. for writeln channel
+
+  "\002A" ^ props ^ "\002,\n" ^ text ^ "\002.\n"
+
+  where the props consist of name=value lines terminated by "\002,\n"
+  each, and the remaining text is any number of lines (output is
+  supposed to be processed in one piece);
+
+  (3) special init message holds "pid" and "session" property;
+
+  (4) message content is encoded in YXML format.
+*)
+
+signature ISABELLE_PROCESS =
+sig
+  val isabelle_processN: string
+  val init: string -> unit
+end;
+
+structure IsabelleProcess: ISABELLE_PROCESS =
+struct
+
+(* print modes *)
+
+val isabelle_processN = "isabelle_process";
+
+val _ = Output.add_mode isabelle_processN Output.default_output Output.default_escape;
+val _ = Markup.add_mode isabelle_processN YXML.output_markup;
+
+
+(* message markup *)
+
+fun special ch = Symbol.STX ^ ch;
+val special_sep = special ",";
+val special_end = special ".";
+
+local
+
+fun clean_string bad str =
+  if exists_string (member (op =) bad) str then
+    translate_string (fn c => if member (op =) bad c then Symbol.DEL else c) str
+  else str;
+
+fun message_props props =
+  let val clean = clean_string [Symbol.STX, "\n", "\r"]
+  in implode (map (fn (x, y) => clean x ^ "=" ^ clean y ^ special_sep ^ "\n") props) end;
+
+fun message_pos trees = trees |> get_first
+  (fn XML.Elem (name, atts, ts) =>
+        if name = Markup.positionN then SOME (Position.of_properties atts)
+        else message_pos ts
+    | _ => NONE);
+
+fun output out_stream s = NAMED_CRITICAL "IO" (fn () =>
+  (TextIO.output (out_stream, s); TextIO.output (out_stream, "\n")));
+
+in
+
+fun message _ _ "" = ()
+  | message out_stream ch body =
+      let
+        val pos = the_default Position.none (message_pos (YXML.parse_body body));
+        val props =
+          Position.properties_of (Position.thread_data ())
+          |> Position.default_properties pos;
+        val txt = clean_string [Symbol.STX] body;
+      in output out_stream (special ch ^ message_props props ^ txt ^ special_end) end;
+
+fun init_message out_stream =
+  let
+    val pid = (Markup.pidN, process_id ());
+    val session = (Markup.sessionN, List.last (Session.id ()) handle List.Empty => "unknown");
+    val text = Session.welcome ();
+  in output out_stream (special "A" ^ message_props [pid, session] ^ text ^ special_end) end;
+
+end;
+
+
+(* channels *)
+
+local
+
+fun auto_flush stream =
+  let
+    val _ = TextIO.StreamIO.setBufferMode (TextIO.getOutstream stream, IO.BLOCK_BUF);
+    fun loop () =
+      (OS.Process.sleep (Time.fromMilliseconds 50); try TextIO.flushOut stream; loop ());
+  in loop end;
+
+in
+
+fun setup_channels out =
+  let
+    val out_stream =
+      if out = "-" then TextIO.stdOut
+      else
+        let
+          val path = File.platform_path (Path.explode out);
+          val out_stream = TextIO.openOut path;  (*fifo blocks until reader is ready*)
+          val _ = OS.FileSys.remove path;  (*prevent alien access, indicate writer is ready*)
+          val _ = SimpleThread.fork false (auto_flush TextIO.stdOut);
+        in out_stream end;
+    val _ = SimpleThread.fork false (auto_flush out_stream);
+    val _ = SimpleThread.fork false (auto_flush TextIO.stdErr);
+  in
+    Output.status_fn   := message out_stream "B";
+    Output.writeln_fn  := message out_stream "C";
+    Output.priority_fn := message out_stream "D";
+    Output.tracing_fn  := message out_stream "E";
+    Output.warning_fn  := message out_stream "F";
+    Output.error_fn    := message out_stream "G";
+    Output.debug_fn    := message out_stream "H";
+    Output.prompt_fn   := ignore;
+    out_stream
+  end;
+
+end;
+
+
+(* init *)
+
+fun init out =
+ (change print_mode (update (op =) isabelle_processN);
+  setup_channels out |> init_message;
+  OuterKeyword.report ();
+  Isar.toplevel_loop {init = true, welcome = false, sync = true, secure = true});
+
+end;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/System/isabelle_process.scala	Mon Mar 02 08:15:54 2009 +0100
@@ -0,0 +1,435 @@
+/*  Title:      Pure/System/isabelle_process.ML
+    Author:     Makarius
+    Options:    :folding=explicit:collapseFolds=1:
+
+Isabelle process management -- always reactive due to multi-threaded I/O.
+*/
+
+package isabelle
+
+import java.util.concurrent.LinkedBlockingQueue
+import java.io.{BufferedReader, BufferedWriter, InputStreamReader, OutputStreamWriter,
+  InputStream, OutputStream, IOException}
+
+
+object IsabelleProcess {
+
+  /* results */
+
+  object Kind extends Enumeration {
+    //{{{ values and codes
+    // internal system notification
+    val SYSTEM = Value("SYSTEM")
+    // Posix channels/events
+    val STDIN = Value("STDIN")
+    val STDOUT = Value("STDOUT")
+    val SIGNAL = Value("SIGNAL")
+    val EXIT = Value("EXIT")
+    // Isabelle messages
+    val INIT = Value("INIT")
+    val STATUS = Value("STATUS")
+    val WRITELN = Value("WRITELN")
+    val PRIORITY = Value("PRIORITY")
+    val TRACING = Value("TRACING")
+    val WARNING = Value("WARNING")
+    val ERROR = Value("ERROR")
+    val DEBUG = Value("DEBUG")
+    // messages codes
+    val code = Map(
+      ('A' : Int) -> Kind.INIT,
+      ('B' : Int) -> Kind.STATUS,
+      ('C' : Int) -> Kind.WRITELN,
+      ('D' : Int) -> Kind.PRIORITY,
+      ('E' : Int) -> Kind.TRACING,
+      ('F' : Int) -> Kind.WARNING,
+      ('G' : Int) -> Kind.ERROR,
+      ('H' : Int) -> Kind.DEBUG,
+      ('0' : Int) -> Kind.SYSTEM,
+      ('1' : Int) -> Kind.STDIN,
+      ('2' : Int) -> Kind.STDOUT,
+      ('3' : Int) -> Kind.SIGNAL,
+      ('4' : Int) -> Kind.EXIT)
+    // message markup
+    val markup = Map(
+      Kind.INIT -> Markup.INIT,
+      Kind.STATUS -> Markup.STATUS,
+      Kind.WRITELN -> Markup.WRITELN,
+      Kind.PRIORITY -> Markup.PRIORITY,
+      Kind.TRACING -> Markup.TRACING,
+      Kind.WARNING -> Markup.WARNING,
+      Kind.ERROR -> Markup.ERROR,
+      Kind.DEBUG -> Markup.DEBUG,
+      Kind.SYSTEM -> Markup.SYSTEM,
+      Kind.STDIN -> Markup.STDIN,
+      Kind.STDOUT -> Markup.STDOUT,
+      Kind.SIGNAL -> Markup.SIGNAL,
+      Kind.EXIT -> Markup.EXIT)
+    //}}}
+    def is_raw(kind: Value) =
+      kind == STDOUT
+    def is_control(kind: Value) =
+      kind == SYSTEM ||
+      kind == SIGNAL ||
+      kind == EXIT
+    def is_system(kind: Value) =
+      kind == SYSTEM ||
+      kind == STDIN ||
+      kind == SIGNAL ||
+      kind == EXIT ||
+      kind == STATUS
+  }
+
+  class Result(val kind: Kind.Value, val props: List[(String, String)], val result: String) {
+    override def toString = {
+      val trees = YXML.parse_body_failsafe(result)
+      val res =
+        if (kind == Kind.STATUS) trees.map(_.toString).mkString
+        else trees.flatMap(XML.content(_).mkString).mkString
+      if (props.isEmpty)
+        kind.toString + " [[" + res + "]]"
+      else
+        kind.toString + " " +
+          (for ((x, y) <- props) yield x + "=" + y).mkString("{", ",", "}") + " [[" + res + "]]"
+    }
+    def is_raw = Kind.is_raw(kind)
+    def is_control = Kind.is_control(kind)
+    def is_system = Kind.is_system(kind)
+  }
+
+  def parse_message(isabelle_system: IsabelleSystem, result: Result) =
+  {
+    XML.Elem(Markup.MESSAGE, (Markup.CLASS, Kind.markup(result.kind)) :: result.props,
+      YXML.parse_body_failsafe(isabelle_system.symbols.decode(result.result)))
+  }
+}
+
+
+class IsabelleProcess(isabelle_system: IsabelleSystem,
+  results: EventBus[IsabelleProcess.Result], args: String*)
+{
+  import IsabelleProcess._
+
+
+  /* demo constructor */
+
+  def this(args: String*) =
+    this(new IsabelleSystem, new EventBus[IsabelleProcess.Result] + Console.println, args: _*)
+
+
+  /* process information */
+
+  @volatile private var proc: Process = null
+  @volatile private var closing = false
+  @volatile private var pid: String = null
+  @volatile private var the_session: String = null
+  def session = the_session
+
+
+  /* results */
+
+  def parse_message(result: Result): XML.Tree =
+    IsabelleProcess.parse_message(isabelle_system, result)
+
+  private val result_queue = new LinkedBlockingQueue[Result]
+
+  private def put_result(kind: Kind.Value, props: List[(String, String)], result: String)
+  {
+    if (kind == Kind.INIT) {
+      val map = Map(props: _*)
+      if (map.isDefinedAt(Markup.PID)) pid = map(Markup.PID)
+      if (map.isDefinedAt(Markup.SESSION)) the_session = map(Markup.SESSION)
+    }
+    result_queue.put(new Result(kind, props, result))
+  }
+
+  private class ResultThread extends Thread("isabelle: results") {
+    override def run() = {
+      var finished = false
+      while (!finished) {
+        val result =
+          try { result_queue.take }
+          catch { case _: NullPointerException => null }
+
+        if (result != null) {
+          results.event(result)
+          if (result.kind == Kind.EXIT) finished = true
+        }
+        else finished = true
+      }
+    }
+  }
+
+
+  /* signals */
+
+  def interrupt() = synchronized {
+    if (proc == null) error("Cannot interrupt Isabelle: no process")
+    if (pid == null) put_result(Kind.SYSTEM, Nil, "Cannot interrupt: unknown pid")
+    else {
+      try {
+        if (isabelle_system.execute(true, "kill", "-INT", pid).waitFor == 0)
+          put_result(Kind.SIGNAL, Nil, "INT")
+        else
+          put_result(Kind.SYSTEM, Nil, "Cannot interrupt: kill command failed")
+      }
+      catch { case e: IOException => error("Cannot interrupt Isabelle: " + e.getMessage) }
+    }
+  }
+
+  def kill() = synchronized {
+    if (proc == 0) error("Cannot kill Isabelle: no process")
+    else {
+      try_close()
+      Thread.sleep(500)
+      put_result(Kind.SIGNAL, Nil, "KILL")
+      proc.destroy
+      proc = null
+      pid = null
+    }
+  }
+
+
+  /* output being piped into the process */
+
+  private val output = new LinkedBlockingQueue[String]
+
+  private def output_raw(text: String) = synchronized {
+    if (proc == null) error("Cannot output to Isabelle: no process")
+    if (closing) error("Cannot output to Isabelle: already closing")
+    output.put(text)
+  }
+
+  def output_sync(text: String) =
+    output_raw(" \\<^sync>\n; " + text + " \\<^sync>;\n")
+
+
+  def command(text: String) =
+    output_sync("Isabelle.command " + IsabelleSyntax.encode_string(text))
+
+  def command(props: List[(String, String)], text: String) =
+    output_sync("Isabelle.command " + IsabelleSyntax.encode_properties(props) + " " +
+      IsabelleSyntax.encode_string(text))
+
+  def ML(text: String) =
+    output_sync("ML_val " + IsabelleSyntax.encode_string(text))
+
+  def close() = synchronized {    // FIXME watchdog/timeout
+    output_raw("\u0000")
+    closing = true
+  }
+
+  def try_close() = synchronized {
+    if (proc != null && !closing) {
+      try { close() }
+      catch { case _: RuntimeException => }
+    }
+  }
+
+
+  /* stdin */
+
+  private class StdinThread(out_stream: OutputStream) extends Thread("isabelle: stdin") {
+    override def run() = {
+      val writer = new BufferedWriter(new OutputStreamWriter(out_stream, isabelle_system.charset))
+      var finished = false
+      while (!finished) {
+        try {
+          //{{{
+          val s = output.take
+          if (s == "\u0000") {
+            writer.close
+            finished = true
+          }
+          else {
+            put_result(Kind.STDIN, Nil, s)
+            writer.write(s)
+            writer.flush
+          }
+          //}}}
+        }
+        catch {
+          case e: IOException => put_result(Kind.SYSTEM, Nil, "Stdin thread: " + e.getMessage)
+        }
+      }
+      put_result(Kind.SYSTEM, Nil, "Stdin thread terminated")
+    }
+  }
+
+
+  /* stdout */
+
+  private class StdoutThread(in_stream: InputStream) extends Thread("isabelle: stdout") {
+    override def run() = {
+      val reader = new BufferedReader(new InputStreamReader(in_stream, isabelle_system.charset))
+      var result = new StringBuilder(100)
+
+      var finished = false
+      while (!finished) {
+        try {
+          //{{{
+          var c = -1
+          var done = false
+          while (!done && (result.length == 0 || reader.ready)) {
+            c = reader.read
+            if (c >= 0) result.append(c.asInstanceOf[Char])
+            else done = true
+          }
+          if (result.length > 0) {
+            put_result(Kind.STDOUT, Nil, result.toString)
+            result.length = 0
+          }
+          else {
+            reader.close
+            finished = true
+            try_close()
+          }
+          //}}}
+        }
+        catch {
+          case e: IOException => put_result(Kind.SYSTEM, Nil, "Stdout thread: " + e.getMessage)
+        }
+      }
+      put_result(Kind.SYSTEM, Nil, "Stdout thread terminated")
+    }
+  }
+
+
+  /* messages */
+
+  private class MessageThread(fifo: String) extends Thread("isabelle: messages") {
+    override def run() = {
+      val reader = isabelle_system.fifo_reader(fifo)
+      var kind: Kind.Value = null
+      var props: List[(String, String)] = Nil
+      var result = new StringBuilder
+
+      var finished = false
+      while (!finished) {
+        try {
+          if (kind == null) {
+            //{{{ Char mode -- resync
+            var c = -1
+            do {
+              c = reader.read
+              if (c >= 0 && c != 2) result.append(c.asInstanceOf[Char])
+            } while (c >= 0 && c != 2)
+
+            if (result.length > 0) {
+              put_result(Kind.SYSTEM, Nil, "Malformed message:\n" + result.toString)
+              result.length = 0
+            }
+            if (c < 0) {
+              reader.close
+              finished = true
+              try_close()
+            }
+            else {
+              c = reader.read
+              if (Kind.code.isDefinedAt(c)) kind = Kind.code(c)
+              else kind = null
+            }
+            //}}}
+          }
+          else {
+            //{{{ Line mode
+            val line = reader.readLine
+            if (line == null) {
+              reader.close
+              finished = true
+              try_close()
+            }
+            else {
+              val len = line.length
+              // property
+              if (line.endsWith("\u0002,")) {
+                val i = line.indexOf('=')
+                if (i > 0) {
+                  val name = line.substring(0, i)
+                  val value = line.substring(i + 1, len - 2)
+                  props = (name, value) :: props
+                }
+              }
+              // last text line
+              else if (line.endsWith("\u0002.")) {
+                result.append(line.substring(0, len - 2))
+                put_result(kind, props.reverse, result.toString)
+                kind = null
+                props = Nil
+                result.length = 0
+              }
+              // text line
+              else {
+                result.append(line)
+                result.append('\n')
+              }
+            }
+            //}}}
+          }
+        }
+        catch {
+          case e: IOException => put_result(Kind.SYSTEM, Nil, "Message thread: " + e.getMessage)
+        }
+      }
+      put_result(Kind.SYSTEM, Nil, "Message thread terminated")
+    }
+  }
+
+
+
+  /** main **/
+
+  {
+    /* isabelle version */
+
+    {
+      val (msg, rc) = isabelle_system.isabelle_tool("version")
+      if (rc != 0) error("Version check failed -- bad Isabelle installation:\n" + msg)
+      put_result(Kind.SYSTEM, Nil, msg)
+    }
+
+
+    /* messages */
+
+    val message_fifo = isabelle_system.mk_fifo()
+    def rm_fifo() = isabelle_system.rm_fifo(message_fifo)
+
+    val message_thread = new MessageThread(message_fifo)
+    message_thread.start
+
+    new ResultThread().start
+
+
+    /* exec process */
+
+    try {
+      val cmdline =
+        List(isabelle_system.getenv_strict("ISABELLE_PROCESS"), "-W", message_fifo) ++ args
+      proc = isabelle_system.execute(true, cmdline: _*)
+    }
+    catch {
+      case e: IOException =>
+        rm_fifo()
+        error("Failed to execute Isabelle process: " + e.getMessage)
+    }
+
+
+    /* stdin/stdout */
+
+    new StdinThread(proc.getOutputStream).start
+    new StdoutThread(proc.getInputStream).start
+
+
+    /* exit */
+
+    new Thread("isabelle: exit") {
+      override def run() = {
+        val rc = proc.waitFor()
+        Thread.sleep(300)
+        put_result(Kind.SYSTEM, Nil, "Exit thread terminated")
+        put_result(Kind.EXIT, Nil, Integer.toString(rc))
+        rm_fifo()
+      }
+    }.start
+
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/System/isabelle_system.scala	Mon Mar 02 08:15:54 2009 +0100
@@ -0,0 +1,158 @@
+/*  Title:      Pure/System/isabelle_system.scala
+    Author:     Makarius
+
+Isabelle system support -- basic Cygwin/Posix compatibility.
+*/
+
+package isabelle
+
+import java.util.regex.{Pattern, Matcher}
+import java.io.{BufferedReader, InputStreamReader, FileInputStream, File, IOException}
+
+import scala.io.Source
+
+
+class IsabelleSystem {
+
+  val charset = "UTF-8"
+
+
+  /* Isabelle environment settings */
+
+  private val environment = System.getenv
+
+  def getenv(name: String) = {
+    val value = environment.get(if (name == "HOME") "HOME_JVM" else name)
+    if (value != null) value else ""
+  }
+
+  def getenv_strict(name: String) = {
+    val value = environment.get(name)
+    if (value != "") value else error("Undefined environment variable: " + name)
+  }
+
+  val is_cygwin = Pattern.matches(".*-cygwin", getenv_strict("ML_PLATFORM"))
+
+
+  /* file path specifications */
+
+  private val cygdrive_pattern = Pattern.compile("/cygdrive/([a-zA-Z])($|/.*)")
+
+  def platform_path(source_path: String) = {
+    val result_path = new StringBuilder
+
+    def init(path: String) = {
+      val cygdrive = cygdrive_pattern.matcher(path)
+      if (cygdrive.matches) {
+        result_path.length = 0
+        result_path.append(cygdrive.group(1))
+        result_path.append(":")
+        result_path.append(File.separator)
+        cygdrive.group(2)
+      }
+      else if (path.startsWith("/")) {
+        result_path.length = 0
+        result_path.append(getenv_strict("ISABELLE_ROOT_JVM"))
+        path.substring(1)
+      }
+      else path
+    }
+    def append(path: String) = {
+      for (p <- init(path).split("/")) {
+        if (p != "") {
+          val len = result_path.length
+          if (len > 0 && result_path(len - 1) != File.separatorChar)
+            result_path.append(File.separator)
+          result_path.append(p)
+        }
+      }
+    }
+    for (p <- init(source_path).split("/")) {
+      if (p.startsWith("$")) append(getenv_strict(p.substring(1)))
+      else if (p == "~") append(getenv_strict("HOME"))
+      else if (p == "~~") append(getenv_strict("ISABELLE_HOME"))
+      else append(p)
+    }
+    result_path.toString
+  }
+
+  def platform_file(path: String) =
+    new File(platform_path(path))
+
+
+  /* processes */
+
+  def execute(redirect: Boolean, args: String*): Process = {
+    val cmdline = new java.util.LinkedList[String]
+    if (is_cygwin) cmdline.add(platform_path("/bin/env"))
+    for (s <- args) cmdline.add(s)
+
+    val proc = new ProcessBuilder(cmdline)
+    proc.environment.clear
+    proc.environment.putAll(environment)
+    proc.redirectErrorStream(redirect)
+    proc.start
+  }
+
+
+  /* Isabelle tools (non-interactive) */
+
+  def isabelle_tool(args: String*) = {
+    val proc =
+      try { execute(true, (List(getenv_strict("ISABELLE_TOOL")) ++ args): _*) }
+      catch { case e: IOException => error(e.getMessage) }
+    proc.getOutputStream.close
+    val output = Source.fromInputStream(proc.getInputStream, charset).mkString
+    val rc = proc.waitFor
+    (output, rc)
+  }
+
+
+  /* named pipes */
+
+  def mk_fifo() = {
+    val (result, rc) = isabelle_tool("mkfifo")
+    if (rc == 0) result.trim
+    else error(result)
+  }
+
+  def rm_fifo(fifo: String) = {
+    val (result, rc) = isabelle_tool("rmfifo", fifo)
+    if (rc != 0) error(result)
+  }
+
+  def fifo_reader(fifo: String) = {
+    // blocks until writer is ready
+    val stream =
+      if (is_cygwin) execute(false, "cat", fifo).getInputStream
+      else new FileInputStream(fifo)
+    new BufferedReader(new InputStreamReader(stream, charset))
+  }
+
+
+  /* find logics */
+
+  def find_logics() = {
+    val ml_ident = getenv_strict("ML_IDENTIFIER")
+    var logics: Set[String] = Set()
+    for (dir <- getenv_strict("ISABELLE_PATH").split(":")) {
+      val files = platform_file(dir + "/" + ml_ident).listFiles()
+      if (files != null) {
+        for (file <- files if file.isFile) logics += file.getName
+      }
+    }
+    logics.toList.sort(_ < _)
+  }
+
+
+  /* symbols */
+
+  private def read_symbols(path: String) = {
+    val file = new File(platform_path(path))
+    if (file.canRead) Source.fromFile(file).getLines
+    else Iterator.empty
+  }
+  val symbols = new Symbol.Interpretation(
+    read_symbols("$ISABELLE_HOME/etc/symbols") ++
+    read_symbols("$ISABELLE_HOME_USER/etc/symbols"))
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/System/isar.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -0,0 +1,415 @@
+(*  Title:      Pure/System/isar.ML
+    Author:     Makarius
+
+The global Isabelle/Isar state and main read-eval-print loop.
+*)
+
+signature ISAR =
+sig
+  val init: unit -> unit
+  val exn: unit -> (exn * string) option
+  val state: unit -> Toplevel.state
+  val context: unit -> Proof.context
+  val goal: unit -> thm
+  val print: unit -> unit
+  val >> : Toplevel.transition -> bool
+  val >>> : Toplevel.transition list -> unit
+  val linear_undo: int -> unit
+  val undo: int -> unit
+  val kill: unit -> unit
+  val kill_proof: unit -> unit
+  val crashes: exn list ref
+  val toplevel_loop: {init: bool, welcome: bool, sync: bool, secure: bool} -> unit
+  val loop: unit -> unit
+  val main: unit -> unit
+
+  type id = string
+  val no_id: id
+  val create_command: Toplevel.transition -> id
+  val insert_command: id -> id -> unit
+  val remove_command: id -> unit
+end;
+
+structure Isar: ISAR =
+struct
+
+
+(** TTY model -- SINGLE-THREADED! **)
+
+(* the global state *)
+
+type history = (Toplevel.state * Toplevel.transition) list;
+  (*previous state, state transition -- regular commands only*)
+
+local
+  val global_history = ref ([]: history);
+  val global_state = ref Toplevel.toplevel;
+  val global_exn = ref (NONE: (exn * string) option);
+in
+
+fun edit_history count f = NAMED_CRITICAL "Isar" (fn () =>
+  let
+    fun edit 0 (st, hist) = (global_history := hist; global_state := st; global_exn := NONE)
+      | edit n (st, hist) = edit (n - 1) (f st hist);
+  in edit count (! global_state, ! global_history) end);
+
+fun state () = NAMED_CRITICAL "Isar" (fn () => ! global_state);
+fun set_state state = NAMED_CRITICAL "Isar" (fn () => global_state := state);
+
+fun exn () = NAMED_CRITICAL "Isar" (fn () => ! global_exn);
+fun set_exn exn =  NAMED_CRITICAL "Isar" (fn () => global_exn := exn);
+
+end;
+
+
+fun init () = edit_history 1 (K (K (Toplevel.toplevel, [])));
+
+fun context () = Toplevel.context_of (state ())
+  handle Toplevel.UNDEF => error "Unknown context";
+
+fun goal () = #2 (#2 (Proof.get_goal (Toplevel.proof_of (state ()))))
+  handle Toplevel.UNDEF => error "No goal present";
+
+fun print () = Toplevel.print_state false (state ());
+
+
+(* history navigation *)
+
+local
+
+fun find_and_undo _ [] = error "Undo history exhausted"
+  | find_and_undo which ((prev, tr) :: hist) =
+      ((case Toplevel.init_of tr of SOME name => ThyInfo.kill_thy name | NONE => ());
+        if which (Toplevel.name_of tr) then (prev, hist) else find_and_undo which hist);
+
+in
+
+fun linear_undo n = edit_history n (K (find_and_undo (K true)));
+
+fun undo n = edit_history n (fn st => fn hist =>
+  find_and_undo (if Toplevel.is_proof st then K true else OuterKeyword.is_theory) hist);
+
+fun kill () = edit_history 1 (fn st => fn hist =>
+  find_and_undo
+    (if Toplevel.is_proof st then OuterKeyword.is_theory else OuterKeyword.is_theory_begin) hist);
+
+fun kill_proof () = edit_history 1 (fn st => fn hist =>
+  if Toplevel.is_proof st then find_and_undo OuterKeyword.is_theory hist
+  else raise Toplevel.UNDEF);
+
+end;
+
+
+(* interactive state transformations *)
+
+fun op >> tr =
+  (case Toplevel.transition true tr (state ()) of
+    NONE => false
+  | SOME (_, SOME err) => (set_exn (SOME err); Toplevel.error_msg tr err; true)
+  | SOME (st', NONE) =>
+      let
+        val name = Toplevel.name_of tr;
+        val _ = if OuterKeyword.is_theory_begin name then init () else ();
+        val _ =
+          if OuterKeyword.is_regular name
+          then edit_history 1 (fn st => fn hist => (st', (st, tr) :: hist)) else ();
+      in true end);
+
+fun op >>> [] = ()
+  | op >>> (tr :: trs) = if op >> tr then op >>> trs else ();
+
+
+(* toplevel loop *)
+
+val crashes = ref ([]: exn list);
+
+local
+
+fun raw_loop secure src =
+  let
+    fun check_secure () =
+      (if secure then warning "Secure loop -- cannot exit to ML" else (); secure);
+  in
+    (case Source.get_single (Source.set_prompt Source.default_prompt src) of
+      NONE => if secure then quit () else ()
+    | SOME (tr, src') => if op >> tr orelse check_secure () then raw_loop secure src' else ())
+    handle exn =>
+      (Output.error_msg (Toplevel.exn_message exn)
+        handle crash =>
+          (CRITICAL (fn () => change crashes (cons crash));
+            warning "Recovering from Isar toplevel crash -- see also Isar.crashes");
+          raw_loop secure src)
+  end;
+
+in
+
+fun toplevel_loop {init = do_init, welcome, sync, secure} =
+ (Context.set_thread_data NONE;
+  if do_init then init () else ();  (* FIXME init editor model *)
+  if welcome then writeln (Session.welcome ()) else ();
+  uninterruptible (fn _ => fn () => raw_loop secure (OuterSyntax.isar sync)) ());
+
+end;
+
+fun loop () =
+  toplevel_loop {init = false, welcome = false, sync = false, secure = Secure.is_secure ()};
+
+fun main () =
+  toplevel_loop {init = true, welcome = true, sync = false, secure = Secure.is_secure ()};
+
+
+
+(** individual toplevel commands **)
+
+(* unique identification *)
+
+type id = string;
+val no_id : id = "";
+
+
+(* command category *)
+
+datatype category = Empty | Theory | Proof | Diag | Control;
+
+fun category_of tr =
+  let val name = Toplevel.name_of tr in
+    if name = "" then Empty
+    else if OuterKeyword.is_theory name then Theory
+    else if OuterKeyword.is_proof name then Proof
+    else if OuterKeyword.is_diag name then Diag
+    else Control
+  end;
+
+val is_theory = fn Theory => true | _ => false;
+val is_proper = fn Theory => true | Proof => true | _ => false;
+val is_regular = fn Control => false | _ => true;
+
+
+(* command status *)
+
+datatype status =
+  Unprocessed |
+  Running |
+  Failed of exn * string |
+  Finished of Toplevel.state;
+
+fun status_markup Unprocessed = Markup.unprocessed
+  | status_markup Running = (Markup.runningN, [])
+  | status_markup (Failed _) = Markup.failed
+  | status_markup (Finished _) = Markup.finished;
+
+fun run int tr state =
+  (case Toplevel.transition int tr state of
+    NONE => NONE
+  | SOME (_, SOME err) => (Toplevel.error_msg tr err; SOME (Failed err))
+  | SOME (state', NONE) => SOME (Finished state'));
+
+
+(* datatype command *)
+
+datatype command = Command of
+ {category: category,
+  transition: Toplevel.transition,
+  status: status};
+
+fun make_command (category, transition, status) =
+  Command {category = category, transition = transition, status = status};
+
+val empty_command =
+  make_command (Empty, Toplevel.empty, Finished Toplevel.toplevel);
+
+fun map_command f (Command {category, transition, status}) =
+  make_command (f (category, transition, status));
+
+fun map_status f = map_command (fn (category, transition, status) =>
+  (category, transition, f status));
+
+
+(* global collection of identified commands *)
+
+fun err_dup id = sys_error ("Duplicate command " ^ quote id);
+fun err_undef id = sys_error ("Unknown command " ^ quote id);
+
+local val global_commands = ref (Graph.empty: command Graph.T) in
+
+fun change_commands f = NAMED_CRITICAL "Isar" (fn () => change global_commands f)
+  handle Graph.DUP bad => err_dup bad | Graph.UNDEF bad => err_undef bad;
+
+fun get_commands () = NAMED_CRITICAL "Isar" (fn () => ! global_commands);
+
+end;
+
+fun add_edge (id1, id2) =
+  if id1 = no_id orelse id2 = no_id then I else Graph.add_edge (id1, id2);
+
+
+fun init_commands () = change_commands (K Graph.empty);
+
+fun the_command id =
+  let val Command cmd =
+    if id = no_id then empty_command
+    else (Graph.get_node (get_commands ()) id handle Graph.UNDEF bad => err_undef bad)
+  in cmd end;
+
+fun prev_command id =
+  if id = no_id then no_id
+  else
+    (case Graph.imm_preds (get_commands ()) id handle Graph.UNDEF bad => err_undef bad of
+      [] => no_id
+    | [prev] => prev
+    | _ => sys_error ("Non-linear command dependency " ^ quote id));
+
+fun next_commands id =
+  if id = no_id then []
+  else Graph.imm_succs (get_commands ()) id handle Graph.UNDEF bad => err_undef bad;
+
+fun descendant_commands ids =
+  Graph.all_succs (get_commands ()) (distinct (op =) (filter_out (fn id => id = no_id) ids))
+    handle Graph.UNDEF bad => err_undef bad;
+
+
+(* maintain status *)
+
+fun report_status markup id = Toplevel.status (#transition (the_command id)) markup;
+
+fun update_status status id = change_commands (Graph.map_node id (map_status (K status)));
+
+fun report_update_status status id =
+  change_commands (Graph.map_node id (map_status (fn old_status =>
+    let val markup = status_markup status
+    in if markup <> status_markup old_status then report_status markup id else (); status end)));
+
+
+(* create and dispose commands *)
+
+fun create_command raw_tr =
+  let
+    val (id, tr) =
+      (case Toplevel.get_id raw_tr of
+        SOME id => (id, raw_tr)
+      | NONE =>
+          let val id =
+            if ! Toplevel.debug then "isabelle:" ^ Toplevel.name_of raw_tr ^ serial_string ()
+            else "isabelle:" ^ serial_string ()
+          in (id, Toplevel.put_id id raw_tr) end);
+
+    val cmd = make_command (category_of tr, tr, Unprocessed);
+    val _ = change_commands (Graph.new_node (id, cmd));
+  in id end;
+
+fun dispose_commands ids =
+  let
+    val desc = descendant_commands ids;
+    val _ = List.app (report_status Markup.disposed) desc;
+    val _ = change_commands (Graph.del_nodes desc);
+  in () end;
+
+
+(* final state *)
+
+fun the_state id =
+  (case the_command id of
+    {status = Finished state, ...} => state
+  | {transition, ...} => error ("Unfinished command " ^ Toplevel.str_of transition));
+
+
+
+(** editor model **)
+
+(* run commands *)
+
+fun try_run id =
+  (case try the_state (prev_command id) of
+    NONE => ()
+  | SOME state =>
+      (case run true (#transition (the_command id)) state of
+        NONE => ()
+      | SOME status => report_update_status status id));
+
+fun rerun_commands ids =
+  (List.app (report_update_status Unprocessed) ids; List.app try_run ids);
+
+
+(* modify document *)
+
+fun insert_command prev id = NAMED_CRITICAL "Isar" (fn () =>
+  let
+    val nexts = next_commands prev;
+    val _ = change_commands
+     (fold (fn next => Graph.del_edge (prev, next)) nexts #> add_edge (prev, id) #>
+      fold (fn next => Graph.add_edge (id, next)) nexts);
+  in descendant_commands [id] end) |> rerun_commands;
+
+fun remove_command id = NAMED_CRITICAL "Isar" (fn () =>
+  let
+    val prev = prev_command id;
+    val nexts = next_commands id;
+    val _ = change_commands
+     (fold (fn next => Graph.del_edge (id, next)) nexts #>
+      fold (fn next => add_edge (prev, next)) nexts);
+  in descendant_commands nexts end) |> rerun_commands;
+
+
+
+(** command syntax **)
+
+local
+
+structure P = OuterParse and K = OuterKeyword;
+val op >> = Scan.>>;
+
+in
+
+(* global history *)
+
+val _ =
+  OuterSyntax.improper_command "init_toplevel" "init toplevel point-of-interest" K.control
+    (Scan.succeed (Toplevel.no_timing o Toplevel.imperative init));
+
+val _ =
+  OuterSyntax.improper_command "linear_undo" "undo commands" K.control
+    (Scan.optional P.nat 1 >>
+      (fn n => Toplevel.no_timing o Toplevel.imperative (fn () => linear_undo n)));
+
+val _ =
+  OuterSyntax.improper_command "undo" "undo commands (skipping closed proofs)" K.control
+    (Scan.optional P.nat 1 >>
+      (fn n => Toplevel.no_timing o Toplevel.imperative (fn () => undo n)));
+
+val _ =
+  OuterSyntax.improper_command "undos_proof" "undo commands (skipping closed proofs)" K.control
+    (Scan.optional P.nat 1 >> (fn n => Toplevel.no_timing o
+      Toplevel.keep (fn state =>
+        if Toplevel.is_proof state then (undo n; print ()) else raise Toplevel.UNDEF)));
+
+val _ =
+  OuterSyntax.improper_command "cannot_undo" "partial undo -- Proof General legacy" K.control
+    (P.name >>
+      (fn "end" => Toplevel.no_timing o Toplevel.imperative (fn () => undo 1)
+        | txt => Toplevel.imperative (fn () => error ("Cannot undo " ^ quote txt))));
+
+val _ =
+  OuterSyntax.improper_command "kill" "kill partial proof or theory development" K.control
+    (Scan.succeed (Toplevel.no_timing o Toplevel.imperative kill));
+
+
+(* editor model *)
+
+val _ =
+  OuterSyntax.internal_command "Isar.command"
+    (P.string -- P.string >> (fn (id, text) =>
+      Toplevel.imperative (fn () =>
+        ignore (create_command (OuterSyntax.prepare_command (Position.id id) text)))));
+
+val _ =
+  OuterSyntax.internal_command "Isar.insert"
+    (P.string -- P.string >> (fn (prev, id) =>
+      Toplevel.imperative (fn () => insert_command prev id)));
+
+val _ =
+  OuterSyntax.internal_command "Isar.remove"
+    (P.string >> (fn id => Toplevel.imperative (fn () => remove_command id)));
+
+end;
+
+end;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/System/session.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -0,0 +1,112 @@
+(*  Title:      Pure/System/session.ML
+    Author:     Markus Wenzel, TU Muenchen
+
+Session management -- maintain state of logic images.
+*)
+
+signature SESSION =
+sig
+  val id: unit -> string list
+  val name: unit -> string
+  val welcome: unit -> string
+  val use_dir: string -> bool -> string list -> bool -> bool -> string -> bool -> string list ->
+    string -> string -> bool * string -> string -> int -> bool -> int -> int -> bool -> unit
+  val finish: unit -> unit
+end;
+
+structure Session: SESSION =
+struct
+
+
+(* session state *)
+
+val session = ref ([Context.PureN]: string list);
+val session_path = ref ([]: string list);
+val session_finished = ref false;
+val remote_path = ref (NONE: Url.T option);
+
+
+(* access path *)
+
+fun id () = ! session;
+fun path () = ! session_path;
+
+fun str_of [] = Context.PureN
+  | str_of elems = space_implode "/" elems;
+
+fun name () = "Isabelle/" ^ str_of (path ());
+
+
+(* welcome *)
+
+fun welcome () =
+  if Distribution.is_official then
+    "Welcome to " ^ name () ^ " (" ^ Distribution.version ^ ")"
+  else
+    "Unofficial version of " ^ name () ^ " (" ^ Distribution.version ^ ")" ^
+    (if Distribution.changelog <> "" then "\nSee also " ^ Distribution.changelog else "");
+
+val _ =
+  OuterSyntax.improper_command "welcome" "print welcome message" OuterKeyword.diag
+    (Scan.succeed (Toplevel.no_timing o Toplevel.imperative (writeln o welcome)));
+
+
+(* add_path *)
+
+fun add_path reset s =
+  let val sess = ! session @ [s] in
+    (case duplicates (op =) sess of
+      [] => (session := sess; session_path := ((if reset then [] else ! session_path) @ [s]))
+    | dups => error ("Duplicate session identifiers " ^ commas_quote dups ^ " in " ^ str_of sess))
+  end;
+
+
+(* init *)
+
+fun init reset parent name =
+  if not (member (op =) (! session) parent) orelse not (! session_finished) then
+    error ("Unfinished parent session " ^ quote parent ^ " for " ^ quote name)
+  else (add_path reset name; session_finished := false);
+
+
+(* finish *)
+
+fun finish () =
+  (Output.accumulated_time ();
+    ThyInfo.finish ();
+    Present.finish ();
+    Future.shutdown ();
+    session_finished := true);
+
+
+(* use_dir *)
+
+fun get_rpath rpath =
+  (if rpath = "" then () else
+     if is_some (! remote_path) then
+       error "Path for remote theory browsing information may only be set once"
+     else
+       remote_path := SOME (Url.explode rpath);
+   (! remote_path, rpath <> ""));
+
+fun dumping (_, "") = NONE
+  | dumping (cp, path) = SOME (cp, Path.explode path);
+
+fun use_dir root build modes reset info doc doc_graph doc_versions
+    parent name dump rpath level verbose max_threads trace_threads parallel_proofs =
+  ((fn () =>
+     (init reset parent name;
+      Present.init build info doc doc_graph doc_versions (path ()) name
+        (dumping dump) (get_rpath rpath) verbose (map ThyInfo.get_theory (ThyInfo.get_names ()));
+      use root;
+      finish ()))
+    |> setmp_noncritical Proofterm.proofs level
+    |> setmp_noncritical print_mode (modes @ print_mode_value ())
+    |> setmp_noncritical Goal.parallel_proofs parallel_proofs
+    |> setmp_noncritical Multithreading.trace trace_threads
+    |> setmp_noncritical Multithreading.max_threads
+      (if Multithreading.available then max_threads
+       else (if max_threads = 1 then () else warning "Multithreading support unavailable"; 1))) ()
+  handle exn => (Output.error_msg (Toplevel.exn_message exn); exit 1);
+
+end;
--- a/src/Pure/Tools/ROOT.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Tools/ROOT.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -4,7 +4,6 @@
 *)
 
 use "named_thms.ML";
-use "isabelle_process.ML";
 
 (*basic XML support*)
 use "xml_syntax.ML";
--- a/src/Pure/Tools/find_consts.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Tools/find_consts.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -119,9 +119,7 @@
       |> sort (rev_order o int_ord o pairself snd)
       |> map ((apsnd fst) o fst);
 
-    val end_msg = " in " ^
-                  (List.nth (String.tokens Char.isSpace (end_timing start), 3))
-                  ^ " secs"
+    val end_msg = " in " ^ Time.toString (#all (end_timing start)) ^ " secs";
   in
     Pretty.big_list "searched for:" (map pretty_criterion raw_criteria)
       :: Pretty.str ""
--- a/src/Pure/Tools/find_theorems.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/Tools/find_theorems.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -6,16 +6,14 @@
 
 signature FIND_THEOREMS =
 sig
-  val limit: int ref
-  val tac_limit: int ref
-
   datatype 'term criterion =
     Name of string | Intro | Elim | Dest | Solves | Simp of 'term |
     Pattern of 'term
-
+  val tac_limit: int ref
+  val limit: int ref
   val find_theorems: Proof.context -> thm option -> bool ->
     (bool * string criterion) list -> (Facts.ref * thm) list
-
+  val pretty_thm: Proof.context -> Facts.ref * thm -> Pretty.T
   val print_theorems: Proof.context -> thm option -> int option -> bool ->
     (bool * string criterion) list -> unit
 end;
@@ -344,8 +342,7 @@
 
 fun find_theorems ctxt opt_goal rem_dups raw_criteria =
   let
-    val add_prems = Seq.hd o (TRY (Method.insert_tac
-                                     (Assumption.prems_of ctxt) 1));
+    val add_prems = Seq.hd o (TRY (Method.insert_tac (Assumption.prems_of ctxt) 1));
     val opt_goal' = Option.map add_prems opt_goal;
 
     val criteria = map (apsnd (read_criterion ctxt)) raw_criteria;
@@ -359,6 +356,11 @@
       else raw_matches;
   in matches end;
 
+
+fun pretty_thm ctxt (thmref, thm) = Pretty.block
+  [Pretty.str (Facts.string_of_ref thmref), Pretty.str ":", Pretty.brk 1,
+    ProofContext.pretty_thm ctxt thm];
+
 fun print_theorems ctxt opt_goal opt_limit rem_dups raw_criteria =
   let
     val start = start_timing ();
@@ -370,9 +372,7 @@
     val lim = the_default (! limit) opt_limit;
     val thms = Library.drop (len - lim, matches);
 
-    val end_msg = " in " ^
-                  (List.nth (String.tokens Char.isSpace (end_timing start), 3))
-                  ^ " secs"
+    val end_msg = " in " ^ Time.toString (#all (end_timing start)) ^ " secs";
   in
     Pretty.big_list "searched for:" (map (pretty_criterion ctxt) criteria)
         :: Pretty.str "" ::
@@ -382,7 +382,7 @@
           (if len <= lim then ""
            else " (" ^ string_of_int lim ^ " displayed)")
            ^ end_msg ^ ":"), Pretty.str ""] @
-        map Display.pretty_fact thms)
+        map (pretty_thm ctxt) thms)
     |> Pretty.chunks |> Pretty.writeln
   end;
 
--- a/src/Pure/Tools/isabelle_process.ML	Mon Mar 02 08:15:32 2009 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,138 +0,0 @@
-(*  Title:      Pure/Tools/isabelle_process.ML
-    Author:     Makarius
-
-Isabelle process wrapper -- interaction via external program.
-
-General format of process output:
-
-  (1) unmarked stdout/stderr, no line structure (output should be
-  processed immediately as it arrives);
-
-  (2) properly marked-up messages, e.g. for writeln channel
-
-  "\002A" ^ props ^ "\002,\n" ^ text ^ "\002.\n"
-
-  where the props consist of name=value lines terminated by "\002,\n"
-  each, and the remaining text is any number of lines (output is
-  supposed to be processed in one piece);
-
-  (3) special init message holds "pid" and "session" property;
-
-  (4) message content is encoded in YXML format.
-*)
-
-signature ISABELLE_PROCESS =
-sig
-  val isabelle_processN: string
-  val init: string -> unit
-end;
-
-structure IsabelleProcess: ISABELLE_PROCESS =
-struct
-
-(* print modes *)
-
-val isabelle_processN = "isabelle_process";
-
-val _ = Output.add_mode isabelle_processN Output.default_output Output.default_escape;
-val _ = Markup.add_mode isabelle_processN YXML.output_markup;
-
-
-(* message markup *)
-
-fun special ch = Symbol.STX ^ ch;
-val special_sep = special ",";
-val special_end = special ".";
-
-local
-
-fun clean_string bad str =
-  if exists_string (member (op =) bad) str then
-    translate_string (fn c => if member (op =) bad c then Symbol.DEL else c) str
-  else str;
-
-fun message_props props =
-  let val clean = clean_string [Symbol.STX, "\n", "\r"]
-  in implode (map (fn (x, y) => clean x ^ "=" ^ clean y ^ special_sep ^ "\n") props) end;
-
-fun message_pos trees = trees |> get_first
-  (fn XML.Elem (name, atts, ts) =>
-        if name = Markup.positionN then SOME (Position.of_properties atts)
-        else message_pos ts
-    | _ => NONE);
-
-fun output out_stream s = NAMED_CRITICAL "IO" (fn () =>
-  (TextIO.output (out_stream, s); TextIO.output (out_stream, "\n")));
-
-in
-
-fun message _ _ "" = ()
-  | message out_stream ch body =
-      let
-        val pos = the_default Position.none (message_pos (YXML.parse_body body));
-        val props =
-          Position.properties_of (Position.thread_data ())
-          |> Position.default_properties pos;
-        val txt = clean_string [Symbol.STX] body;
-      in output out_stream (special ch ^ message_props props ^ txt ^ special_end) end;
-
-fun init_message out_stream =
-  let
-    val pid = (Markup.pidN, process_id ());
-    val session = (Markup.sessionN, List.last (Session.id ()) handle List.Empty => "unknown");
-    val text = Session.welcome ();
-  in output out_stream (special "A" ^ message_props [pid, session] ^ text ^ special_end) end;
-
-end;
-
-
-(* channels *)
-
-local
-
-fun auto_flush stream =
-  let
-    val _ = TextIO.StreamIO.setBufferMode (TextIO.getOutstream stream, IO.BLOCK_BUF);
-    fun loop () =
-      (OS.Process.sleep (Time.fromMilliseconds 50); try TextIO.flushOut stream; loop ());
-  in loop end;
-
-in
-
-fun setup_channels out =
-  let
-    val out_stream =
-      if out = "-" then TextIO.stdOut
-      else
-        let
-          val path = File.platform_path (Path.explode out);
-          val out_stream = TextIO.openOut path;  (*fifo blocks until reader is ready*)
-          val _ = OS.FileSys.remove path;  (*prevent alien access, indicate writer is ready*)
-          val _ = SimpleThread.fork false (auto_flush TextIO.stdOut);
-        in out_stream end;
-    val _ = SimpleThread.fork false (auto_flush out_stream);
-    val _ = SimpleThread.fork false (auto_flush TextIO.stdErr);
-  in
-    Output.status_fn   := message out_stream "B";
-    Output.writeln_fn  := message out_stream "C";
-    Output.priority_fn := message out_stream "D";
-    Output.tracing_fn  := message out_stream "E";
-    Output.warning_fn  := message out_stream "F";
-    Output.error_fn    := message out_stream "G";
-    Output.debug_fn    := message out_stream "H";
-    Output.prompt_fn   := ignore;
-    out_stream
-  end;
-
-end;
-
-
-(* init *)
-
-fun init out =
- (change print_mode (update (op =) isabelle_processN);
-  setup_channels out |> init_message;
-  OuterKeyword.report ();
-  Isar.toplevel_loop {init = true, welcome = false, sync = true, secure = true});
-
-end;
--- a/src/Pure/Tools/isabelle_process.scala	Mon Mar 02 08:15:32 2009 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,435 +0,0 @@
-/*  Title:      Pure/Tools/isabelle_process.ML
-    Author:     Makarius
-    Options:    :folding=explicit:collapseFolds=1:
-
-Isabelle process management -- always reactive due to multi-threaded I/O.
-*/
-
-package isabelle
-
-import java.util.concurrent.LinkedBlockingQueue
-import java.io.{BufferedReader, BufferedWriter, InputStreamReader, OutputStreamWriter,
-  InputStream, OutputStream, IOException}
-
-
-object IsabelleProcess {
-
-  /* results */
-
-  object Kind extends Enumeration {
-    //{{{ values and codes
-    // internal system notification
-    val SYSTEM = Value("SYSTEM")
-    // Posix channels/events
-    val STDIN = Value("STDIN")
-    val STDOUT = Value("STDOUT")
-    val SIGNAL = Value("SIGNAL")
-    val EXIT = Value("EXIT")
-    // Isabelle messages
-    val INIT = Value("INIT")
-    val STATUS = Value("STATUS")
-    val WRITELN = Value("WRITELN")
-    val PRIORITY = Value("PRIORITY")
-    val TRACING = Value("TRACING")
-    val WARNING = Value("WARNING")
-    val ERROR = Value("ERROR")
-    val DEBUG = Value("DEBUG")
-    // messages codes
-    val code = Map(
-      ('A' : Int) -> Kind.INIT,
-      ('B' : Int) -> Kind.STATUS,
-      ('C' : Int) -> Kind.WRITELN,
-      ('D' : Int) -> Kind.PRIORITY,
-      ('E' : Int) -> Kind.TRACING,
-      ('F' : Int) -> Kind.WARNING,
-      ('G' : Int) -> Kind.ERROR,
-      ('H' : Int) -> Kind.DEBUG,
-      ('0' : Int) -> Kind.SYSTEM,
-      ('1' : Int) -> Kind.STDIN,
-      ('2' : Int) -> Kind.STDOUT,
-      ('3' : Int) -> Kind.SIGNAL,
-      ('4' : Int) -> Kind.EXIT)
-    // message markup
-    val markup = Map(
-      Kind.INIT -> Markup.INIT,
-      Kind.STATUS -> Markup.STATUS,
-      Kind.WRITELN -> Markup.WRITELN,
-      Kind.PRIORITY -> Markup.PRIORITY,
-      Kind.TRACING -> Markup.TRACING,
-      Kind.WARNING -> Markup.WARNING,
-      Kind.ERROR -> Markup.ERROR,
-      Kind.DEBUG -> Markup.DEBUG,
-      Kind.SYSTEM -> Markup.SYSTEM,
-      Kind.STDIN -> Markup.STDIN,
-      Kind.STDOUT -> Markup.STDOUT,
-      Kind.SIGNAL -> Markup.SIGNAL,
-      Kind.EXIT -> Markup.EXIT)
-    //}}}
-    def is_raw(kind: Value) =
-      kind == STDOUT
-    def is_control(kind: Value) =
-      kind == SYSTEM ||
-      kind == SIGNAL ||
-      kind == EXIT
-    def is_system(kind: Value) =
-      kind == SYSTEM ||
-      kind == STDIN ||
-      kind == SIGNAL ||
-      kind == EXIT ||
-      kind == STATUS
-  }
-
-  class Result(val kind: Kind.Value, val props: List[(String, String)], val result: String) {
-    override def toString = {
-      val trees = YXML.parse_body_failsafe(result)
-      val res =
-        if (kind == Kind.STATUS) trees.map(_.toString).mkString
-        else trees.flatMap(XML.content(_).mkString).mkString
-      if (props.isEmpty)
-        kind.toString + " [[" + res + "]]"
-      else
-        kind.toString + " " +
-          (for ((x, y) <- props) yield x + "=" + y).mkString("{", ",", "}") + " [[" + res + "]]"
-    }
-    def is_raw = Kind.is_raw(kind)
-    def is_control = Kind.is_control(kind)
-    def is_system = Kind.is_system(kind)
-  }
-
-  def parse_message(isabelle_system: IsabelleSystem, result: Result) =
-  {
-    XML.Elem(Markup.MESSAGE, (Markup.CLASS, Kind.markup(result.kind)) :: result.props,
-      YXML.parse_body_failsafe(isabelle_system.symbols.decode(result.result)))
-  }
-}
-
-
-class IsabelleProcess(isabelle_system: IsabelleSystem,
-  results: EventBus[IsabelleProcess.Result], args: String*)
-{
-  import IsabelleProcess._
-
-
-  /* demo constructor */
-
-  def this(args: String*) =
-    this(new IsabelleSystem, new EventBus[IsabelleProcess.Result] + Console.println, args: _*)
-
-
-  /* process information */
-
-  @volatile private var proc: Process = null
-  @volatile private var closing = false
-  @volatile private var pid: String = null
-  @volatile private var the_session: String = null
-  def session = the_session
-
-
-  /* results */
-
-  def parse_message(result: Result): XML.Tree =
-    IsabelleProcess.parse_message(isabelle_system, result)
-
-  private val result_queue = new LinkedBlockingQueue[Result]
-
-  private def put_result(kind: Kind.Value, props: List[(String, String)], result: String)
-  {
-    if (kind == Kind.INIT) {
-      val map = Map(props: _*)
-      if (map.isDefinedAt(Markup.PID)) pid = map(Markup.PID)
-      if (map.isDefinedAt(Markup.SESSION)) the_session = map(Markup.SESSION)
-    }
-    result_queue.put(new Result(kind, props, result))
-  }
-
-  private class ResultThread extends Thread("isabelle: results") {
-    override def run() = {
-      var finished = false
-      while (!finished) {
-        val result =
-          try { result_queue.take }
-          catch { case _: NullPointerException => null }
-
-        if (result != null) {
-          results.event(result)
-          if (result.kind == Kind.EXIT) finished = true
-        }
-        else finished = true
-      }
-    }
-  }
-
-
-  /* signals */
-
-  def interrupt() = synchronized {
-    if (proc == null) error("Cannot interrupt Isabelle: no process")
-    if (pid == null) put_result(Kind.SYSTEM, Nil, "Cannot interrupt: unknown pid")
-    else {
-      try {
-        if (isabelle_system.execute(true, "kill", "-INT", pid).waitFor == 0)
-          put_result(Kind.SIGNAL, Nil, "INT")
-        else
-          put_result(Kind.SYSTEM, Nil, "Cannot interrupt: kill command failed")
-      }
-      catch { case e: IOException => error("Cannot interrupt Isabelle: " + e.getMessage) }
-    }
-  }
-
-  def kill() = synchronized {
-    if (proc == 0) error("Cannot kill Isabelle: no process")
-    else {
-      try_close()
-      Thread.sleep(500)
-      put_result(Kind.SIGNAL, Nil, "KILL")
-      proc.destroy
-      proc = null
-      pid = null
-    }
-  }
-
-
-  /* output being piped into the process */
-
-  private val output = new LinkedBlockingQueue[String]
-
-  private def output_raw(text: String) = synchronized {
-    if (proc == null) error("Cannot output to Isabelle: no process")
-    if (closing) error("Cannot output to Isabelle: already closing")
-    output.put(text)
-  }
-
-  def output_sync(text: String) =
-    output_raw(" \\<^sync>\n; " + text + " \\<^sync>;\n")
-
-
-  def command(text: String) =
-    output_sync("Isabelle.command " + IsabelleSyntax.encode_string(text))
-
-  def command(props: List[(String, String)], text: String) =
-    output_sync("Isabelle.command " + IsabelleSyntax.encode_properties(props) + " " +
-      IsabelleSyntax.encode_string(text))
-
-  def ML(text: String) =
-    output_sync("ML_val " + IsabelleSyntax.encode_string(text))
-
-  def close() = synchronized {    // FIXME watchdog/timeout
-    output_raw("\u0000")
-    closing = true
-  }
-
-  def try_close() = synchronized {
-    if (proc != null && !closing) {
-      try { close() }
-      catch { case _: RuntimeException => }
-    }
-  }
-
-
-  /* stdin */
-
-  private class StdinThread(out_stream: OutputStream) extends Thread("isabelle: stdin") {
-    override def run() = {
-      val writer = new BufferedWriter(new OutputStreamWriter(out_stream, isabelle_system.charset))
-      var finished = false
-      while (!finished) {
-        try {
-          //{{{
-          val s = output.take
-          if (s == "\u0000") {
-            writer.close
-            finished = true
-          }
-          else {
-            put_result(Kind.STDIN, Nil, s)
-            writer.write(s)
-            writer.flush
-          }
-          //}}}
-        }
-        catch {
-          case e: IOException => put_result(Kind.SYSTEM, Nil, "Stdin thread: " + e.getMessage)
-        }
-      }
-      put_result(Kind.SYSTEM, Nil, "Stdin thread terminated")
-    }
-  }
-
-
-  /* stdout */
-
-  private class StdoutThread(in_stream: InputStream) extends Thread("isabelle: stdout") {
-    override def run() = {
-      val reader = new BufferedReader(new InputStreamReader(in_stream, isabelle_system.charset))
-      var result = new StringBuilder(100)
-
-      var finished = false
-      while (!finished) {
-        try {
-          //{{{
-          var c = -1
-          var done = false
-          while (!done && (result.length == 0 || reader.ready)) {
-            c = reader.read
-            if (c >= 0) result.append(c.asInstanceOf[Char])
-            else done = true
-          }
-          if (result.length > 0) {
-            put_result(Kind.STDOUT, Nil, result.toString)
-            result.length = 0
-          }
-          else {
-            reader.close
-            finished = true
-            try_close()
-          }
-          //}}}
-        }
-        catch {
-          case e: IOException => put_result(Kind.SYSTEM, Nil, "Stdout thread: " + e.getMessage)
-        }
-      }
-      put_result(Kind.SYSTEM, Nil, "Stdout thread terminated")
-    }
-  }
-
-
-  /* messages */
-
-  private class MessageThread(fifo: String) extends Thread("isabelle: messages") {
-    override def run() = {
-      val reader = isabelle_system.fifo_reader(fifo)
-      var kind: Kind.Value = null
-      var props: List[(String, String)] = Nil
-      var result = new StringBuilder
-
-      var finished = false
-      while (!finished) {
-        try {
-          if (kind == null) {
-            //{{{ Char mode -- resync
-            var c = -1
-            do {
-              c = reader.read
-              if (c >= 0 && c != 2) result.append(c.asInstanceOf[Char])
-            } while (c >= 0 && c != 2)
-
-            if (result.length > 0) {
-              put_result(Kind.SYSTEM, Nil, "Malformed message:\n" + result.toString)
-              result.length = 0
-            }
-            if (c < 0) {
-              reader.close
-              finished = true
-              try_close()
-            }
-            else {
-              c = reader.read
-              if (Kind.code.isDefinedAt(c)) kind = Kind.code(c)
-              else kind = null
-            }
-            //}}}
-          }
-          else {
-            //{{{ Line mode
-            val line = reader.readLine
-            if (line == null) {
-              reader.close
-              finished = true
-              try_close()
-            }
-            else {
-              val len = line.length
-              // property
-              if (line.endsWith("\u0002,")) {
-                val i = line.indexOf('=')
-                if (i > 0) {
-                  val name = line.substring(0, i)
-                  val value = line.substring(i + 1, len - 2)
-                  props = (name, value) :: props
-                }
-              }
-              // last text line
-              else if (line.endsWith("\u0002.")) {
-                result.append(line.substring(0, len - 2))
-                put_result(kind, props.reverse, result.toString)
-                kind = null
-                props = Nil
-                result.length = 0
-              }
-              // text line
-              else {
-                result.append(line)
-                result.append('\n')
-              }
-            }
-            //}}}
-          }
-        }
-        catch {
-          case e: IOException => put_result(Kind.SYSTEM, Nil, "Message thread: " + e.getMessage)
-        }
-      }
-      put_result(Kind.SYSTEM, Nil, "Message thread terminated")
-    }
-  }
-
-
-
-  /** main **/
-
-  {
-    /* isabelle version */
-
-    {
-      val (msg, rc) = isabelle_system.isabelle_tool("version")
-      if (rc != 0) error("Version check failed -- bad Isabelle installation:\n" + msg)
-      put_result(Kind.SYSTEM, Nil, msg)
-    }
-
-
-    /* messages */
-
-    val message_fifo = isabelle_system.mk_fifo()
-    def rm_fifo() = isabelle_system.rm_fifo(message_fifo)
-
-    val message_thread = new MessageThread(message_fifo)
-    message_thread.start
-
-    new ResultThread().start
-
-
-    /* exec process */
-
-    try {
-      val cmdline =
-        List(isabelle_system.getenv_strict("ISABELLE_PROCESS"), "-W", message_fifo) ++ args
-      proc = isabelle_system.execute(true, cmdline: _*)
-    }
-    catch {
-      case e: IOException =>
-        rm_fifo()
-        error("Failed to execute Isabelle process: " + e.getMessage)
-    }
-
-
-    /* stdin/stdout */
-
-    new StdinThread(proc.getOutputStream).start
-    new StdoutThread(proc.getInputStream).start
-
-
-    /* exit */
-
-    new Thread("isabelle: exit") {
-      override def run() = {
-        val rc = proc.waitFor()
-        Thread.sleep(300)
-        put_result(Kind.SYSTEM, Nil, "Exit thread terminated")
-        put_result(Kind.EXIT, Nil, Integer.toString(rc))
-        rm_fifo()
-      }
-    }.start
-
-  }
-}
--- a/src/Pure/Tools/isabelle_system.scala	Mon Mar 02 08:15:32 2009 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,158 +0,0 @@
-/*  Title:      Pure/Tools/isabelle_system.scala
-    Author:     Makarius
-
-Isabelle system support -- basic Cygwin/Posix compatibility.
-*/
-
-package isabelle
-
-import java.util.regex.{Pattern, Matcher}
-import java.io.{BufferedReader, InputStreamReader, FileInputStream, File, IOException}
-
-import scala.io.Source
-
-
-class IsabelleSystem {
-
-  val charset = "UTF-8"
-
-
-  /* Isabelle environment settings */
-
-  private val environment = System.getenv
-
-  def getenv(name: String) = {
-    val value = environment.get(if (name == "HOME") "HOME_JVM" else name)
-    if (value != null) value else ""
-  }
-
-  def getenv_strict(name: String) = {
-    val value = environment.get(name)
-    if (value != "") value else error("Undefined environment variable: " + name)
-  }
-
-  val is_cygwin = Pattern.matches(".*-cygwin", getenv_strict("ML_PLATFORM"))
-
-
-  /* file path specifications */
-
-  private val cygdrive_pattern = Pattern.compile("/cygdrive/([a-zA-Z])($|/.*)")
-
-  def platform_path(source_path: String) = {
-    val result_path = new StringBuilder
-
-    def init(path: String) = {
-      val cygdrive = cygdrive_pattern.matcher(path)
-      if (cygdrive.matches) {
-        result_path.length = 0
-        result_path.append(cygdrive.group(1))
-        result_path.append(":")
-        result_path.append(File.separator)
-        cygdrive.group(2)
-      }
-      else if (path.startsWith("/")) {
-        result_path.length = 0
-        result_path.append(getenv_strict("ISABELLE_ROOT_JVM"))
-        path.substring(1)
-      }
-      else path
-    }
-    def append(path: String) = {
-      for (p <- init(path).split("/")) {
-        if (p != "") {
-          val len = result_path.length
-          if (len > 0 && result_path(len - 1) != File.separatorChar)
-            result_path.append(File.separator)
-          result_path.append(p)
-        }
-      }
-    }
-    for (p <- init(source_path).split("/")) {
-      if (p.startsWith("$")) append(getenv_strict(p.substring(1)))
-      else if (p == "~") append(getenv_strict("HOME"))
-      else if (p == "~~") append(getenv_strict("ISABELLE_HOME"))
-      else append(p)
-    }
-    result_path.toString
-  }
-
-  def platform_file(path: String) =
-    new File(platform_path(path))
-
-
-  /* processes */
-
-  def execute(redirect: Boolean, args: String*): Process = {
-    val cmdline = new java.util.LinkedList[String]
-    if (is_cygwin) cmdline.add(platform_path("/bin/env"))
-    for (s <- args) cmdline.add(s)
-
-    val proc = new ProcessBuilder(cmdline)
-    proc.environment.clear
-    proc.environment.putAll(environment)
-    proc.redirectErrorStream(redirect)
-    proc.start
-  }
-
-
-  /* Isabelle tools (non-interactive) */
-
-  def isabelle_tool(args: String*) = {
-    val proc =
-      try { execute(true, (List(getenv_strict("ISABELLE_TOOL")) ++ args): _*) }
-      catch { case e: IOException => error(e.getMessage) }
-    proc.getOutputStream.close
-    val output = Source.fromInputStream(proc.getInputStream, charset).mkString
-    val rc = proc.waitFor
-    (output, rc)
-  }
-
-
-  /* named pipes */
-
-  def mk_fifo() = {
-    val (result, rc) = isabelle_tool("mkfifo")
-    if (rc == 0) result.trim
-    else error(result)
-  }
-
-  def rm_fifo(fifo: String) = {
-    val (result, rc) = isabelle_tool("rmfifo", fifo)
-    if (rc != 0) error(result)
-  }
-
-  def fifo_reader(fifo: String) = {
-    // blocks until writer is ready
-    val stream =
-      if (is_cygwin) execute(false, "cat", fifo).getInputStream
-      else new FileInputStream(fifo)
-    new BufferedReader(new InputStreamReader(stream, charset))
-  }
-
-
-  /* find logics */
-
-  def find_logics() = {
-    val ml_ident = getenv_strict("ML_IDENTIFIER")
-    var logics: Set[String] = Set()
-    for (dir <- getenv_strict("ISABELLE_PATH").split(":")) {
-      val files = platform_file(dir + "/" + ml_ident).listFiles()
-      if (files != null) {
-        for (file <- files if file.isFile) logics += file.getName
-      }
-    }
-    logics.toList.sort(_ < _)
-  }
-
-
-  /* symbols */
-
-  private def read_symbols(path: String) = {
-    val file = new File(platform_path(path))
-    if (file.canRead) Source.fromFile(file).getLines
-    else Iterator.empty
-  }
-  val symbols = new Symbol.Interpretation(
-    read_symbols("$ISABELLE_HOME/etc/symbols") ++
-    read_symbols("$ISABELLE_HOME_USER/etc/symbols"))
-}
--- a/src/Pure/display.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/display.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -20,7 +20,6 @@
   val pretty_thm_aux: Pretty.pp -> bool -> bool -> term list -> thm -> Pretty.T
   val pretty_thm: thm -> Pretty.T
   val string_of_thm: thm -> string
-  val pretty_fact: Facts.ref * thm -> Pretty.T
   val pretty_thms: thm list -> Pretty.T
   val pretty_thm_sg: theory -> thm -> Pretty.T
   val pretty_thms_sg: theory -> thm list -> Pretty.T
@@ -93,10 +92,6 @@
 
 val string_of_thm = Pretty.string_of o pretty_thm;
 
-fun pretty_fact (thmref, thm) = Pretty.block
-  [Pretty.str (Facts.string_of_ref thmref), Pretty.str ":", Pretty.brk 1,
-   pretty_thm thm];
-
 fun pretty_thms [th] = pretty_thm th
   | pretty_thms ths = Pretty.block (Pretty.fbreaks (map pretty_thm ths));
 
--- a/src/Pure/envir.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/envir.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -265,7 +265,7 @@
       | fast Ts (Const (_, T)) = T
       | fast Ts (Free (_, T)) = T
       | fast Ts (Bound i) =
-        (List.nth (Ts, i)
+        (nth Ts i
          handle Subscript => raise TERM ("fastype: Bound", [Bound i]))
       | fast Ts (Var (_, T)) = T
       | fast Ts (Abs (_, T, u)) = T --> fast (T :: Ts) u
--- a/src/Pure/proofterm.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/proofterm.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -470,8 +470,8 @@
     val n = length args;
     fun subst' lev (Bound i) =
          (if i<lev then raise SAME    (*var is locally bound*)
-          else  incr_boundvars lev (List.nth (args, i-lev))
-                  handle Subscript => Bound (i-n)  (*loose: change it*))
+          else  incr_boundvars lev (nth args (i-lev))
+                  handle Subscript => Bound (i-n))  (*loose: change it*)
       | subst' lev (Abs (a, T, body)) = Abs (a, T,  subst' (lev+1) body)
       | subst' lev (f $ t) = (subst' lev f $ substh' lev t
           handle SAME => f $ subst' lev t)
@@ -494,7 +494,7 @@
     val n = length args;
     fun subst (PBound i) Plev tlev =
          (if i < Plev then raise SAME    (*var is locally bound*)
-          else incr_pboundvars Plev tlev (List.nth (args, i-Plev))
+          else incr_pboundvars Plev tlev (nth args (i-Plev))
                  handle Subscript => PBound (i-n)  (*loose: change it*))
       | subst (AbsP (a, t, body)) Plev tlev = AbsP (a, t, subst body (Plev+1) tlev)
       | subst (Abst (a, T, body)) Plev tlev = Abst (a, T, subst body Plev (tlev+1))
@@ -935,7 +935,7 @@
           in (is, ch orelse ch', ts',
               if ch orelse ch' then prf' % t' else prf) end
       | shrink' ls lev ts prfs (prf as PBound i) =
-          (if exists (fn SOME (Bound j) => lev-j <= List.nth (ls, i) | _ => true) ts
+          (if exists (fn SOME (Bound j) => lev-j <= nth ls i | _ => true) ts
              orelse has_duplicates (op =)
                (Library.foldl (fn (js, SOME (Bound j)) => j :: js | (js, _) => js) ([], ts))
              orelse exists #1 prfs then [i] else [], false, map (pair false) ts, prf)
--- a/src/Pure/sign.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/sign.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -338,7 +338,7 @@
     fun typ_of (_, Const (_, T)) = T
       | typ_of (_, Free  (_, T)) = T
       | typ_of (_, Var (_, T)) = T
-      | typ_of (bs, Bound i) = snd (List.nth (bs, i) handle Subscript =>
+      | typ_of (bs, Bound i) = snd (nth bs i handle Subscript =>
           raise TYPE ("Loose bound variable: B." ^ string_of_int i, [], [Bound i]))
       | typ_of (bs, Abs (x, T, body)) = T --> typ_of ((x, T) :: bs, body)
       | typ_of (bs, t $ u) =
--- a/src/Pure/tctical.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/tctical.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -349,15 +349,13 @@
 (*Returns all states that have changed in subgoal i, counted from the LAST
   subgoal.  For stac, for example.*)
 fun CHANGED_GOAL tac i st =
-    let val np = nprems_of st
+    let val np = Thm.nprems_of st
         val d = np-i                 (*distance from END*)
-        val t = List.nth(prems_of st, i-1)
+        val t = Thm.term_of (Thm.cprem_of st i)
         fun diff st' =
-            nprems_of st' - d <= 0   (*the subgoal no longer exists*)
+            Thm.nprems_of st' - d <= 0   (*the subgoal no longer exists*)
             orelse
-             not (Pattern.aeconv (t,
-                                  List.nth(prems_of st',
-                                           nprems_of st' - d - 1)))
+             not (Pattern.aeconv (t, Thm.term_of (Thm.cprem_of st' (Thm.nprems_of st' - d))))
     in  Seq.filter diff (tac i st)  end
     handle Subscript => Seq.empty  (*no subgoal i*);
 
--- a/src/Pure/term.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/term.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -297,7 +297,7 @@
   Ts = [T0,T1,...] holds types of bound variables 0, 1, ...*)
 fun type_of1 (Ts, Const (_,T)) = T
   | type_of1 (Ts, Free  (_,T)) = T
-  | type_of1 (Ts, Bound i) = (List.nth (Ts,i)
+  | type_of1 (Ts, Bound i) = (nth Ts i
         handle Subscript => raise TYPE("type_of: bound variable", [], [Bound i]))
   | type_of1 (Ts, Var (_,T)) = T
   | type_of1 (Ts, Abs (_,T,body)) = T --> type_of1(T::Ts, body)
@@ -322,7 +322,7 @@
         | _ => raise TERM("fastype_of: expected function type", [f$u]))
   | fastype_of1 (_, Const (_,T)) = T
   | fastype_of1 (_, Free (_,T)) = T
-  | fastype_of1 (Ts, Bound i) = (List.nth(Ts,i)
+  | fastype_of1 (Ts, Bound i) = (nth Ts i
          handle Subscript => raise TERM("fastype_of: Bound", [Bound i]))
   | fastype_of1 (_, Var (_,T)) = T
   | fastype_of1 (Ts, Abs (_,T,u)) = T --> fastype_of1 (T::Ts, u);
@@ -387,17 +387,17 @@
 (*number of atoms and abstractions in a term*)
 fun size_of_term tm =
   let
-    fun add_size (t $ u, n) = add_size (t, add_size (u, n))
-      | add_size (Abs (_ ,_, t), n) = add_size (t, n + 1)
-      | add_size (_, n) = n + 1;
-  in add_size (tm, 0) end;
+    fun add_size (t $ u) n = add_size t (add_size u n)
+      | add_size (Abs (_ ,_, t)) n = add_size t (n + 1)
+      | add_size _ n = n + 1;
+  in add_size tm 0 end;
 
-(*number of tfrees, tvars, and constructors in a type*)
+(*number of atoms and constructors in a type*)
 fun size_of_typ ty =
   let
-    fun add_size (Type (_, ars), n) = foldl add_size (n + 1) ars
-      | add_size (_, n) = n + 1;
-  in add_size (ty, 0) end;
+    fun add_size (Type (_, tys)) n = fold add_size tys (n + 1)
+      | add_size _ n = n + 1;
+  in add_size ty 0 end;
 
 fun map_atyps f (Type (a, Ts)) = Type (a, map (map_atyps f) Ts)
   | map_atyps f T = f T;
@@ -638,7 +638,7 @@
     val n = length args;
     fun subst (t as Bound i, lev) =
          (if i < lev then raise SAME   (*var is locally bound*)
-          else incr_boundvars lev (List.nth (args, i - lev))
+          else incr_boundvars lev (nth args (i - lev))
             handle Subscript => Bound (i - n))  (*loose: change it*)
       | subst (Abs (a, T, body), lev) = Abs (a, T, subst (body, lev + 1))
       | subst (f $ t, lev) =
--- a/src/Pure/type_infer.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Pure/type_infer.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -369,7 +369,7 @@
     fun inf _ (PConst (_, T)) = T
       | inf _ (PFree (_, T)) = T
       | inf _ (PVar (_, T)) = T
-      | inf bs (PBound i) = snd (List.nth (bs, i) handle Subscript => err_loose i)
+      | inf bs (PBound i) = snd (nth bs i handle Subscript => err_loose i)
       | inf bs (PAbs (x, T, t)) = PType ("fun", [T, inf ((x, T) :: bs) t])
       | inf bs (PAppl (t, u)) =
           let
--- a/src/Tools/Compute_Oracle/Compute_Oracle.thy	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/Compute_Oracle/Compute_Oracle.thy	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/Compute_Oracle/Compute_Oracle.thy
-    ID:         $Id$
     Author:     Steven Obua, TU Munich
 
 Steven Obua's evaluator.
--- a/src/Tools/Compute_Oracle/am_compiler.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/Compute_Oracle/am_compiler.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/Compute_Oracle/am_compiler.ML
-    ID:         $Id$
     Author:     Steven Obua
 *)
 
--- a/src/Tools/Compute_Oracle/am_ghc.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/Compute_Oracle/am_ghc.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/Compute_Oracle/am_ghc.ML
-    ID:         $Id$
     Author:     Steven Obua
 *)
 
--- a/src/Tools/Compute_Oracle/am_interpreter.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/Compute_Oracle/am_interpreter.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/Compute_Oracle/am_interpreter.ML
-    ID:         $Id$
     Author:     Steven Obua
 *)
 
--- a/src/Tools/Compute_Oracle/am_sml.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/Compute_Oracle/am_sml.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/Compute_Oracle/am_sml.ML
-    ID:         $Id$
     Author:     Steven Obua
 
     ToDO: "parameterless rewrite cannot be used in pattern": In a lot of cases it CAN be used, and these cases should be handled properly; 
--- a/src/Tools/Compute_Oracle/report.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/Compute_Oracle/report.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -13,7 +13,7 @@
     let
 	val t1 = start_timing ()
 	val x = f ()
-	val t2 = end_timing t1
+	val t2 = #message (end_timing t1)
 	val _ = writeln ((report_space ()) ^ "--> "^t2)
     in
 	x	
--- a/src/Tools/IsaPlanner/README	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/IsaPlanner/README	Mon Mar 02 08:15:54 2009 +0100
@@ -1,4 +1,3 @@
-ID:         $Id$
 Author:     Lucas Dixon, University of Edinburgh
 
 Support files for IsaPlanner (see http://isaplanner.sourceforge.net).
--- a/src/Tools/IsaPlanner/rw_tools.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/IsaPlanner/rw_tools.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/IsaPlanner/rw_tools.ML
-    ID:		$Id$
     Author:     Lucas Dixon, University of Edinburgh
 
 Term related tools used for rewriting.
--- a/src/Tools/IsaPlanner/zipper.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/IsaPlanner/zipper.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/IsaPlanner/zipper.ML
-    ID:		$Id$
     Author:     Lucas Dixon, University of Edinburgh
 
 A notion roughly based on Huet's Zippers for Isabelle terms.
--- a/src/Tools/Metis/make-metis	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/Metis/make-metis	Mon Mar 02 08:15:54 2009 +0100
@@ -1,7 +1,5 @@
 #!/usr/bin/env bash
 #
-# $Id$
-#
 # make-metis - turn original Metis files into Isabelle ML source.
 #
 # Structure declarations etc. are made local by wrapping into a
@@ -11,8 +9,6 @@
 THIS=$(cd "$(dirname "$0")"; echo $PWD)
 
 (
-  echo -n '(* $'
-  echo 'Id$ *)'
   cat <<EOF
 (******************************************************************)
 (* GENERATED FILE -- DO NOT EDIT -- GENERATED FILE -- DO NOT EDIT *)
--- a/src/Tools/Metis/metis.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/Metis/metis.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,4 +1,3 @@
-(* $Id$ *)
 (******************************************************************)
 (* GENERATED FILE -- DO NOT EDIT -- GENERATED FILE -- DO NOT EDIT *)
 (* GENERATED FILE -- DO NOT EDIT -- GENERATED FILE -- DO NOT EDIT *)
--- a/src/Tools/README	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/README	Mon Mar 02 08:15:54 2009 +0100
@@ -4,5 +4,3 @@
 This directory contains ML sources of generic tools.  Typically, they
 can be applied to various logics.
 
-
-$Id$
--- a/src/Tools/atomize_elim.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/atomize_elim.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/atomize_elim.ML
-    ID:         $Id$
     Author:     Alexander Krauss, TU Muenchen
 
 Turn elimination rules into atomic expressions in the object logic.
--- a/src/Tools/auto_solve.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/auto_solve.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,89 +1,91 @@
-(*  Title:      auto_solve.ML
+(*  Title:      Pure/Tools/auto_solve.ML
     Author:     Timothy Bourke and Gerwin Klein, NICTA
 
-    Check whether a newly stated theorem can be solved directly
-    by an existing theorem. Duplicate lemmas can be detected in
-    this way.
+Check whether a newly stated theorem can be solved directly by an
+existing theorem. Duplicate lemmas can be detected in this way.
 
-    The implemenation is based in part on Berghofer and
-    Haftmann's Pure/codegen.ML. It relies critically on
-    the FindTheorems solves feature.
+The implemenation is based in part on Berghofer and Haftmann's
+Pure/codegen.ML. It relies critically on the FindTheorems solves
+feature.
 *)
 
 signature AUTO_SOLVE =
 sig
-  val auto : bool ref;
-  val auto_time_limit : int ref;
+  val auto : bool ref
+  val auto_time_limit : int ref
 
-  val seek_solution : bool -> Proof.state -> Proof.state;
+  val seek_solution : bool -> Proof.state -> Proof.state
 end;
 
 structure AutoSolve : AUTO_SOLVE =
 struct
-  structure FT = FindTheorems;
 
-  val auto = ref false;
-  val auto_time_limit = ref 2500;
+val auto = ref false;
+val auto_time_limit = ref 2500;
 
-  fun seek_solution int state = let
-      val ctxt = Proof.context_of state;
+fun seek_solution int state =
+  let
+    val ctxt = Proof.context_of state;
 
-      fun conj_to_list [] = []
-        | conj_to_list (t::ts) =
-          (Conjunction.dest_conjunction t
-           |> (fn (t1, t2) => conj_to_list (t1::t2::ts)))
-          handle TERM _ => t::conj_to_list ts;
+    fun conj_to_list [] = []
+      | conj_to_list (t::ts) =
+        (Conjunction.dest_conjunction t
+         |> (fn (t1, t2) => conj_to_list (t1::t2::ts)))
+        handle TERM _ => t::conj_to_list ts;
 
-      val crits = [(true, FT.Solves)];
-      fun find g = (NONE, FT.find_theorems ctxt g true crits);
-      fun find_cterm g = (SOME g, FT.find_theorems ctxt
-                                    (SOME (Goal.init g)) true crits);
+    val crits = [(true, FindTheorems.Solves)];
+    fun find g = (NONE, FindTheorems.find_theorems ctxt g true crits);
+    fun find_cterm g = (SOME g, FindTheorems.find_theorems ctxt
+                                  (SOME (Goal.init g)) true crits);
 
-      fun prt_result (goal, results) = let
-          val msg = case goal of
-                      NONE => "The current goal"
-                    | SOME g => Syntax.string_of_term ctxt (term_of g);
-        in
-          Pretty.big_list (msg ^ " could be solved directly with:")
-                          (map Display.pretty_fact results)
-        end;
+    fun prt_result (goal, results) =
+      let
+        val msg = case goal of
+                    NONE => "The current goal"
+                  | SOME g => Syntax.string_of_term ctxt (term_of g);
+      in
+        Pretty.big_list (msg ^ " could be solved directly with:")
+                        (map (FindTheorems.pretty_thm ctxt) results)
+      end;
 
-      fun seek_against_goal () = let
-          val goal = try Proof.get_goal state
-                     |> Option.map (#2 o #2);
+    fun seek_against_goal () =
+      let
+        val goal = try Proof.get_goal state
+                   |> Option.map (#2 o #2);
 
-          val goals = goal
-                      |> Option.map (fn g => cprem_of g 1)
-                      |> the_list
-                      |> conj_to_list;
+        val goals = goal
+                    |> Option.map (fn g => cprem_of g 1)
+                    |> the_list
+                    |> conj_to_list;
 
-          val rs = if length goals = 1
-                   then [find goal]
-                   else map find_cterm goals;
-          val frs = filter_out (null o snd) rs;
+        val rs = if length goals = 1
+                 then [find goal]
+                 else map find_cterm goals;
+        val frs = filter_out (null o snd) rs;
 
-        in if null frs then NONE else SOME frs end;
+      in if null frs then NONE else SOME frs end;
 
-      fun go () = let
-          val res = TimeLimit.timeLimit
-                      (Time.fromMilliseconds (!auto_time_limit))
-                      (try seek_against_goal) ();
-        in
-          case Option.join res of
-            NONE => state
-          | SOME results => (Proof.goal_message
-                              (fn () => Pretty.chunks [Pretty.str "",
-                                Pretty.markup Markup.hilite
-                                (Library.separate (Pretty.brk 0)
-                                                  (map prt_result results))])
-                                state)
-        end handle TimeLimit.TimeOut => (warning "AutoSolve: timeout."; state);
-    in
-      if int andalso !auto andalso not (!Toplevel.quiet)
-      then go ()
-      else state
-    end;
-    
+    fun go () =
+      let
+        val res = TimeLimit.timeLimit
+                    (Time.fromMilliseconds (! auto_time_limit))
+                    (try seek_against_goal) ();
+      in
+        case Option.join res of
+          NONE => state
+        | SOME results => (Proof.goal_message
+                            (fn () => Pretty.chunks [Pretty.str "",
+                              Pretty.markup Markup.hilite
+                              (Library.separate (Pretty.brk 0)
+                                                (map prt_result results))])
+                              state)
+      end handle TimeLimit.TimeOut => (warning "AutoSolve: timeout."; state);
+  in
+    if int andalso ! auto andalso not (! Toplevel.quiet)
+    then go ()
+    else state
+  end;
+
 end;
 
 val _ = Context.>> (Specification.add_theorem_hook AutoSolve.seek_solution);
--- a/src/Tools/code/code_haskell.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/code/code_haskell.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/code/code_haskell.ML
-    ID:         $Id$
     Author:     Florian Haftmann, TU Muenchen
 
 Serializer for Haskell.
--- a/src/Tools/code/code_name.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/code/code_name.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/code/code_name.ML
-    ID:         $Id$
     Author:     Florian Haftmann, TU Muenchen
 
 Some code generator infrastructure concerning names.
--- a/src/Tools/code/code_printer.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/code/code_printer.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/code/code_printer.ML
-    ID:         $Id$
     Author:     Florian Haftmann, TU Muenchen
 
 Generic operations for pretty printing of target language code.
--- a/src/Tools/code/code_target.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/code/code_target.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/code/code_target.ML
-    ID:         $Id$
     Author:     Florian Haftmann, TU Muenchen
 
 Serializer from intermediate language ("Thin-gol") to target languages.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Tools/coherent.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -0,0 +1,233 @@
+(*  Title:      Tools/coherent.ML
+    Author:     Stefan Berghofer, TU Muenchen
+    Author:     Marc Bezem, Institutt for Informatikk, Universitetet i Bergen 
+
+Prover for coherent logic, see e.g.
+
+  Marc Bezem and Thierry Coquand, Automating Coherent Logic, LPAR 2005
+
+for a description of the algorithm.
+*)
+
+signature COHERENT_DATA =
+sig
+  val atomize_elimL: thm
+  val atomize_exL: thm
+  val atomize_conjL: thm
+  val atomize_disjL: thm
+  val operator_names: string list
+end;
+
+signature COHERENT =
+sig
+  val verbose: bool ref
+  val show_facts: bool ref
+  val coherent_tac: thm list -> Proof.context -> int -> tactic
+  val coherent_meth: thm list -> Proof.context -> Proof.method
+  val setup: theory -> theory
+end;
+
+functor CoherentFun(Data: COHERENT_DATA) : COHERENT =
+struct
+
+val verbose = ref false;
+
+fun message f = if !verbose then tracing (f ()) else ();
+
+datatype cl_prf =
+  ClPrf of thm * (Type.tyenv * Envir.tenv) * ((indexname * typ) * term) list *
+  int list * (term list * cl_prf) list;
+
+val is_atomic = not o exists_Const (member (op =) Data.operator_names o #1);
+
+local open Conv in
+
+fun rulify_elim_conv ct =
+  if is_atomic (Logic.strip_imp_concl (term_of ct)) then all_conv ct
+  else concl_conv (length (Logic.strip_imp_prems (term_of ct)))
+    (rewr_conv (symmetric Data.atomize_elimL) then_conv
+     MetaSimplifier.rewrite true (map symmetric
+       [Data.atomize_exL, Data.atomize_conjL, Data.atomize_disjL])) ct
+
+end;
+
+fun rulify_elim th = MetaSimplifier.norm_hhf (Conv.fconv_rule rulify_elim_conv th);
+
+(* Decompose elimination rule of the form
+   A1 ==> ... ==> Am ==> (!!xs1. Bs1 ==> P) ==> ... ==> (!!xsn. Bsn ==> P) ==> P
+*)
+fun dest_elim prop =
+  let
+    val prems = Logic.strip_imp_prems prop;
+    val concl = Logic.strip_imp_concl prop;
+    val (prems1, prems2) =
+      take_suffix (fn t => Logic.strip_assums_concl t = concl) prems;
+  in
+    (prems1,
+     if null prems2 then [([], [concl])]
+     else map (fn t =>
+       (map snd (Logic.strip_params t), Logic.strip_assums_hyp t)) prems2)
+  end;
+
+fun mk_rule th =
+  let
+    val th' = rulify_elim th;
+    val (prems, cases) = dest_elim (prop_of th')
+  in (th', prems, cases) end;
+
+fun mk_dom ts = fold (fn t =>
+  Typtab.map_default (fastype_of t, []) (fn us => us @ [t])) ts Typtab.empty;
+
+val empty_env = (Vartab.empty, Vartab.empty);
+
+(* Find matcher that makes conjunction valid in given state *)
+fun valid_conj ctxt facts env [] = Seq.single (env, [])
+  | valid_conj ctxt facts env (t :: ts) =
+      Seq.maps (fn (u, x) => Seq.map (apsnd (cons x))
+        (valid_conj ctxt facts
+           (Pattern.match (ProofContext.theory_of ctxt) (t, u) env) ts
+         handle Pattern.MATCH => Seq.empty))
+          (Seq.of_list (sort (int_ord o pairself snd) (Net.unify_term facts t)));
+
+(* Instantiate variables that only occur free in conlusion *)
+fun inst_extra_vars ctxt dom cs =
+  let
+    val vs = fold Term.add_vars (maps snd cs) [];
+    fun insts [] inst = Seq.single inst
+      | insts ((ixn, T) :: vs') inst = Seq.maps
+          (fn t => insts vs' (((ixn, T), t) :: inst))
+          (Seq.of_list (case Typtab.lookup dom T of
+             NONE => error ("Unknown domain: " ^
+               Syntax.string_of_typ ctxt T ^ "\nfor term(s) " ^
+               commas (maps (map (Syntax.string_of_term ctxt) o snd) cs))
+           | SOME ts => ts))
+  in Seq.map (fn inst =>
+    (inst, map (apsnd (map (subst_Vars (map (apfst fst) inst)))) cs))
+      (insts vs [])
+  end;
+
+(* Check whether disjunction is valid in given state *)
+fun is_valid_disj ctxt facts [] = false
+  | is_valid_disj ctxt facts ((Ts, ts) :: ds) =
+      let val vs = rev (map_index (fn (i, T) => Var (("x", i), T)) Ts)
+      in case Seq.pull (valid_conj ctxt facts empty_env
+        (map (fn t => subst_bounds (vs, t)) ts)) of
+          SOME _ => true
+        | NONE => is_valid_disj ctxt facts ds
+      end;
+
+val show_facts = ref false;
+
+fun string_of_facts ctxt s facts = space_implode "\n"
+  (s :: map (Syntax.string_of_term ctxt)
+     (map fst (sort (int_ord o pairself snd) (Net.content facts)))) ^ "\n\n";
+
+fun print_facts ctxt facts =
+  if !show_facts then message (fn () => string_of_facts ctxt "Facts:" facts)
+  else ();
+
+fun valid ctxt rules goal dom facts nfacts nparams =
+  let val seq = Seq.of_list rules |> Seq.maps (fn (th, ps, cs) =>
+    valid_conj ctxt facts empty_env ps |> Seq.maps (fn (env as (tye, _), is) =>
+      let val cs' = map (fn (Ts, ts) =>
+        (map (Envir.typ_subst_TVars tye) Ts, map (Envir.subst_vars env) ts)) cs
+      in
+        inst_extra_vars ctxt dom cs' |>
+          Seq.map_filter (fn (inst, cs'') =>
+            if is_valid_disj ctxt facts cs'' then NONE
+            else SOME (th, env, inst, is, cs''))
+      end))
+  in
+    case Seq.pull seq of
+      NONE => (tracing (string_of_facts ctxt "Countermodel found:" facts); NONE)
+    | SOME ((th, env, inst, is, cs), _) =>
+        if cs = [([], [goal])] then SOME (ClPrf (th, env, inst, is, []))
+        else
+          (case valid_cases ctxt rules goal dom facts nfacts nparams cs of
+             NONE => NONE
+           | SOME prfs => SOME (ClPrf (th, env, inst, is, prfs)))
+  end
+
+and valid_cases ctxt rules goal dom facts nfacts nparams [] = SOME []
+  | valid_cases ctxt rules goal dom facts nfacts nparams ((Ts, ts) :: ds) =
+      let
+        val _ = message (fn () => "case " ^ commas (map (Syntax.string_of_term ctxt) ts));
+        val params = rev (map_index (fn (i, T) =>
+          Free ("par" ^ string_of_int (nparams + i), T)) Ts);
+        val ts' = map_index (fn (i, t) =>
+          (subst_bounds (params, t), nfacts + i)) ts;
+        val dom' = fold (fn (T, p) =>
+          Typtab.map_default (T, []) (fn ps => ps @ [p]))
+            (Ts ~~ params) dom;
+        val facts' = fold (fn (t, i) => Net.insert_term op =
+          (t, (t, i))) ts' facts
+      in
+        case valid ctxt rules goal dom' facts'
+          (nfacts + length ts) (nparams + length Ts) of
+          NONE => NONE
+        | SOME prf => (case valid_cases ctxt rules goal dom facts nfacts nparams ds of
+            NONE => NONE
+          | SOME prfs => SOME ((params, prf) :: prfs))
+      end;
+
+(** proof replaying **)
+
+fun thm_of_cl_prf thy goal asms (ClPrf (th, (tye, env), insts, is, prfs)) =
+  let
+    val _ = message (fn () => space_implode "\n"
+      ("asms:" :: map Display.string_of_thm asms) ^ "\n\n");
+    val th' = Drule.implies_elim_list
+      (Thm.instantiate
+         (map (fn (ixn, (S, T)) =>
+            (Thm.ctyp_of thy (TVar ((ixn, S))), Thm.ctyp_of thy T))
+               (Vartab.dest tye),
+          map (fn (ixn, (T, t)) =>
+            (Thm.cterm_of thy (Var (ixn, Envir.typ_subst_TVars tye T)),
+             Thm.cterm_of thy t)) (Vartab.dest env) @
+          map (fn (ixnT, t) =>
+            (Thm.cterm_of thy (Var ixnT), Thm.cterm_of thy t)) insts) th)
+      (map (nth asms) is);
+    val (_, cases) = dest_elim (prop_of th')
+  in
+    case (cases, prfs) of
+      ([([], [_])], []) => th'
+    | ([([], [_])], [([], prf)]) => thm_of_cl_prf thy goal (asms @ [th']) prf
+    | _ => Drule.implies_elim_list
+        (Thm.instantiate (Thm.match
+           (Drule.strip_imp_concl (cprop_of th'), goal)) th')
+        (map (thm_of_case_prf thy goal asms) (prfs ~~ cases))
+  end
+
+and thm_of_case_prf thy goal asms ((params, prf), (_, asms')) =
+  let
+    val cparams = map (cterm_of thy) params;
+    val asms'' = map (cterm_of thy o curry subst_bounds (rev params)) asms'
+  in
+    Drule.forall_intr_list cparams (Drule.implies_intr_list asms''
+      (thm_of_cl_prf thy goal (asms @ map Thm.assume asms'') prf))
+  end;
+
+
+(** external interface **)
+
+fun coherent_tac rules ctxt = SUBPROOF (fn {prems, concl, params, context, ...} =>
+  rtac (rulify_elim_conv concl RS equal_elim_rule2) 1 THEN
+  SUBPROOF (fn {prems = prems', concl, context, ...} =>
+    let val xs = map term_of params @
+      map (fn (_, s) => Free (s, the (Variable.default_type context s)))
+        (Variable.fixes_of context)
+    in
+      case valid context (map mk_rule (prems' @ prems @ rules)) (term_of concl)
+           (mk_dom xs) Net.empty 0 0 of
+         NONE => no_tac
+       | SOME prf =>
+           rtac (thm_of_cl_prf (ProofContext.theory_of context) concl [] prf) 1
+    end) context 1) ctxt;
+
+fun coherent_meth rules ctxt =
+  Method.METHOD (fn facts => coherent_tac (facts @ rules) ctxt 1);
+
+val setup = Method.add_method
+  ("coherent", Method.thms_ctxt_args coherent_meth, "prove coherent formula");
+
+end;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Tools/eqsubst.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -0,0 +1,575 @@
+(*  Title:      Tools/eqsubst.ML
+    Author:     Lucas Dixon, University of Edinburgh
+
+A proof method to perform a substiution using an equation.
+*)
+
+signature EQSUBST =
+sig
+  (* a type abbreviation for match information *)
+  type match =
+       ((indexname * (sort * typ)) list (* type instantiations *)
+        * (indexname * (typ * term)) list) (* term instantiations *)
+       * (string * typ) list (* fake named type abs env *)
+       * (string * typ) list (* type abs env *)
+       * term (* outer term *)
+
+  type searchinfo =
+       theory
+       * int (* maxidx *)
+       * Zipper.T (* focusterm to search under *)
+
+    exception eqsubst_occL_exp of
+       string * int list * Thm.thm list * int * Thm.thm
+    
+    (* low level substitution functions *)
+    val apply_subst_in_asm :
+       int ->
+       Thm.thm ->
+       Thm.thm ->
+       (Thm.cterm list * int * 'a * Thm.thm) * match -> Thm.thm Seq.seq
+    val apply_subst_in_concl :
+       int ->
+       Thm.thm ->
+       Thm.cterm list * Thm.thm ->
+       Thm.thm -> match -> Thm.thm Seq.seq
+
+    (* matching/unification within zippers *)
+    val clean_match_z :
+       Context.theory -> Term.term -> Zipper.T -> match option
+    val clean_unify_z :
+       Context.theory -> int -> Term.term -> Zipper.T -> match Seq.seq
+
+    (* skipping things in seq seq's *)
+
+   (* skipping non-empty sub-sequences but when we reach the end
+      of the seq, remembering how much we have left to skip. *)
+    datatype 'a skipseq = SkipMore of int
+      | SkipSeq of 'a Seq.seq Seq.seq;
+
+    val skip_first_asm_occs_search :
+       ('a -> 'b -> 'c Seq.seq Seq.seq) ->
+       'a -> int -> 'b -> 'c skipseq
+    val skip_first_occs_search :
+       int -> ('a -> 'b -> 'c Seq.seq Seq.seq) -> 'a -> 'b -> 'c Seq.seq
+    val skipto_skipseq : int -> 'a Seq.seq Seq.seq -> 'a skipseq
+
+    (* tactics *)
+    val eqsubst_asm_tac :
+       Proof.context ->
+       int list -> Thm.thm list -> int -> Thm.thm -> Thm.thm Seq.seq
+    val eqsubst_asm_tac' :
+       Proof.context ->
+       (searchinfo -> int -> Term.term -> match skipseq) ->
+       int -> Thm.thm -> int -> Thm.thm -> Thm.thm Seq.seq
+    val eqsubst_tac :
+       Proof.context ->
+       int list -> (* list of occurences to rewrite, use [0] for any *)
+       Thm.thm list -> int -> Thm.thm -> Thm.thm Seq.seq
+    val eqsubst_tac' :
+       Proof.context -> (* proof context *)
+       (searchinfo -> Term.term -> match Seq.seq) (* search function *)
+       -> Thm.thm (* equation theorem to rewrite with *)
+       -> int (* subgoal number in goal theorem *)
+       -> Thm.thm (* goal theorem *)
+       -> Thm.thm Seq.seq (* rewritten goal theorem *)
+
+
+    val fakefree_badbounds :
+       (string * Term.typ) list ->
+       Term.term ->
+       (string * Term.typ) list * (string * Term.typ) list * Term.term
+
+    val mk_foo_match :
+       (Term.term -> Term.term) ->
+       ('a * Term.typ) list -> Term.term -> Term.term
+
+    (* preparing substitution *)
+    val prep_meta_eq : Proof.context -> Thm.thm -> Thm.thm list
+    val prep_concl_subst :
+       int -> Thm.thm -> (Thm.cterm list * Thm.thm) * searchinfo
+    val prep_subst_in_asm :
+       int -> Thm.thm -> int ->
+       (Thm.cterm list * int * int * Thm.thm) * searchinfo
+    val prep_subst_in_asms :
+       int -> Thm.thm ->
+       ((Thm.cterm list * int * int * Thm.thm) * searchinfo) list
+    val prep_zipper_match :
+       Zipper.T -> Term.term * ((string * Term.typ) list * (string * Term.typ) list * Term.term)
+
+    (* search for substitutions *)
+    val valid_match_start : Zipper.T -> bool
+    val search_lr_all : Zipper.T -> Zipper.T Seq.seq
+    val search_lr_valid : (Zipper.T -> bool) -> Zipper.T -> Zipper.T Seq.seq
+    val searchf_lr_unify_all :
+       searchinfo -> Term.term -> match Seq.seq Seq.seq
+    val searchf_lr_unify_valid :
+       searchinfo -> Term.term -> match Seq.seq Seq.seq
+    val searchf_bt_unify_valid :
+       searchinfo -> Term.term -> match Seq.seq Seq.seq
+
+    (* syntax tools *)
+    val ith_syntax : Args.T list -> int list * Args.T list
+    val options_syntax : Args.T list -> bool * Args.T list
+
+    (* Isar level hooks *)
+    val eqsubst_asm_meth : Proof.context -> int list -> Thm.thm list -> Proof.method
+    val eqsubst_meth : Proof.context -> int list -> Thm.thm list -> Proof.method
+    val subst_meth : Method.src -> Proof.context -> Proof.method
+    val setup : theory -> theory
+
+end;
+
+structure EqSubst
+: EQSUBST
+= struct
+
+structure Z = Zipper;
+
+(* changes object "=" to meta "==" which prepares a given rewrite rule *)
+fun prep_meta_eq ctxt =
+  let val (_, {mk_rews = {mk, ...}, ...}) = Simplifier.rep_ss (Simplifier.local_simpset_of ctxt)
+  in mk #> map Drule.zero_var_indexes end;
+
+
+  (* a type abriviation for match information *)
+  type match =
+       ((indexname * (sort * typ)) list (* type instantiations *)
+        * (indexname * (typ * term)) list) (* term instantiations *)
+       * (string * typ) list (* fake named type abs env *)
+       * (string * typ) list (* type abs env *)
+       * term (* outer term *)
+
+  type searchinfo =
+       theory
+       * int (* maxidx *)
+       * Zipper.T (* focusterm to search under *)
+
+
+(* skipping non-empty sub-sequences but when we reach the end
+   of the seq, remembering how much we have left to skip. *)
+datatype 'a skipseq = SkipMore of int
+  | SkipSeq of 'a Seq.seq Seq.seq;
+(* given a seqseq, skip the first m non-empty seq's, note deficit *)
+fun skipto_skipseq m s = 
+    let 
+      fun skip_occs n sq = 
+          case Seq.pull sq of 
+            NONE => SkipMore n
+          | SOME (h,t) => 
+            (case Seq.pull h of NONE => skip_occs n t
+             | SOME _ => if n <= 1 then SkipSeq (Seq.cons h t)
+                         else skip_occs (n - 1) t)
+    in (skip_occs m s) end;
+
+(* note: outerterm is the taget with the match replaced by a bound 
+         variable : ie: "P lhs" beocmes "%x. P x" 
+         insts is the types of instantiations of vars in lhs
+         and typinsts is the type instantiations of types in the lhs
+         Note: Final rule is the rule lifted into the ontext of the 
+         taget thm. *)
+fun mk_foo_match mkuptermfunc Ts t = 
+    let 
+      val ty = Term.type_of t
+      val bigtype = (rev (map snd Ts)) ---> ty
+      fun mk_foo 0 t = t
+        | mk_foo i t = mk_foo (i - 1) (t $ (Bound (i - 1)))
+      val num_of_bnds = (length Ts)
+      (* foo_term = "fooabs y0 ... yn" where y's are local bounds *)
+      val foo_term = mk_foo num_of_bnds (Bound num_of_bnds)
+    in Abs("fooabs", bigtype, mkuptermfunc foo_term) end;
+
+(* T is outer bound vars, n is number of locally bound vars *)
+(* THINK: is order of Ts correct...? or reversed? *)
+fun fakefree_badbounds Ts t = 
+    let val (FakeTs,Ts,newnames) = 
+            List.foldr (fn ((n,ty),(FakeTs,Ts,usednames)) => 
+                           let val newname = Name.variant usednames n
+                           in ((RWTools.mk_fake_bound_name newname,ty)::FakeTs,
+                               (newname,ty)::Ts, 
+                               newname::usednames) end)
+                       ([],[],[])
+                       Ts
+    in (FakeTs, Ts, Term.subst_bounds (map Free FakeTs, t)) end;
+
+(* before matching we need to fake the bound vars that are missing an
+abstraction. In this function we additionally construct the
+abstraction environment, and an outer context term (with the focus
+abstracted out) for use in rewriting with RWInst.rw *)
+fun prep_zipper_match z = 
+    let 
+      val t = Z.trm z  
+      val c = Z.ctxt z
+      val Ts = Z.C.nty_ctxt c
+      val (FakeTs', Ts', t') = fakefree_badbounds Ts t
+      val absterm = mk_foo_match (Z.C.apply c) Ts' t'
+    in
+      (t', (FakeTs', Ts', absterm))
+    end;
+
+(* Matching and Unification with exception handled *)
+fun clean_match thy (a as (pat, t)) =
+  let val (tyenv, tenv) = Pattern.match thy a (Vartab.empty, Vartab.empty)
+  in SOME (Vartab.dest tyenv, Vartab.dest tenv)
+  end handle Pattern.MATCH => NONE;
+
+(* given theory, max var index, pat, tgt; returns Seq of instantiations *)
+fun clean_unify thry ix (a as (pat, tgt)) =
+    let
+      (* type info will be re-derived, maybe this can be cached
+         for efficiency? *)
+      val pat_ty = Term.type_of pat;
+      val tgt_ty = Term.type_of tgt;
+      (* is it OK to ignore the type instantiation info?
+         or should I be using it? *)
+      val typs_unify =
+          SOME (Sign.typ_unify thry (pat_ty, tgt_ty) (Vartab.empty, ix))
+            handle Type.TUNIFY => NONE;
+    in
+      case typs_unify of
+        SOME (typinsttab, ix2) =>
+        let
+      (* is it right to throw away the flexes?
+         or should I be using them somehow? *)
+          fun mk_insts env =
+            (Vartab.dest (Envir.type_env env),
+             Envir.alist_of env);
+          val initenv = Envir.Envir {asol = Vartab.empty,
+                                     iTs = typinsttab, maxidx = ix2};
+          val useq = Unify.smash_unifiers thry [a] initenv
+	            handle UnequalLengths => Seq.empty
+		               | Term.TERM _ => Seq.empty;
+          fun clean_unify' useq () =
+              (case (Seq.pull useq) of
+                 NONE => NONE
+               | SOME (h,t) => SOME (mk_insts h, Seq.make (clean_unify' t)))
+	            handle UnequalLengths => NONE
+                   | Term.TERM _ => NONE
+        in
+          (Seq.make (clean_unify' useq))
+        end
+      | NONE => Seq.empty
+    end;
+
+(* Matching and Unification for zippers *)
+(* Note: Ts is a modified version of the original names of the outer
+bound variables. New names have been introduced to make sure they are
+unique w.r.t all names in the term and each other. usednames' is
+oldnames + new names. *)
+fun clean_match_z thy pat z = 
+    let val (t, (FakeTs,Ts,absterm)) = prep_zipper_match z in
+      case clean_match thy (pat, t) of 
+        NONE => NONE 
+      | SOME insts => SOME (insts, FakeTs, Ts, absterm) end;
+(* ix = max var index *)
+fun clean_unify_z sgn ix pat z = 
+    let val (t, (FakeTs, Ts,absterm)) = prep_zipper_match z in
+    Seq.map (fn insts => (insts, FakeTs, Ts, absterm)) 
+            (clean_unify sgn ix (t, pat)) end;
+
+
+(* FOR DEBUGGING...
+type trace_subst_errT = int (* subgoal *)
+        * thm (* thm with all goals *)
+        * (Thm.cterm list (* certified free var placeholders for vars *)
+           * thm)  (* trivial thm of goal concl *)
+            (* possible matches/unifiers *)
+        * thm (* rule *)
+        * (((indexname * typ) list (* type instantiations *)
+              * (indexname * term) list ) (* term instantiations *)
+             * (string * typ) list (* Type abs env *)
+             * term) (* outer term *);
+
+val trace_subst_err = (ref NONE : trace_subst_errT option ref);
+val trace_subst_search = ref false;
+exception trace_subst_exp of trace_subst_errT;
+*)
+
+
+fun bot_left_leaf_of (l $ r) = bot_left_leaf_of l
+  | bot_left_leaf_of (Abs(s,ty,t)) = bot_left_leaf_of t
+  | bot_left_leaf_of x = x;
+
+(* Avoid considering replacing terms which have a var at the head as
+   they always succeed trivially, and uninterestingly. *)
+fun valid_match_start z =
+    (case bot_left_leaf_of (Z.trm z) of 
+      Var _ => false 
+      | _ => true);
+
+(* search from top, left to right, then down *)
+val search_lr_all = ZipperSearch.all_bl_ur;
+
+(* search from top, left to right, then down *)
+fun search_lr_valid validf =
+    let 
+      fun sf_valid_td_lr z = 
+          let val here = if validf z then [Z.Here z] else [] in
+            case Z.trm z 
+             of _ $ _ => [Z.LookIn (Z.move_down_left z)] 
+                         @ here 
+                         @ [Z.LookIn (Z.move_down_right z)]
+              | Abs _ => here @ [Z.LookIn (Z.move_down_abs z)]
+              | _ => here
+          end;
+    in Z.lzy_search sf_valid_td_lr end;
+
+(* search from bottom to top, left to right *)
+
+fun search_bt_valid validf =
+    let 
+      fun sf_valid_td_lr z = 
+          let val here = if validf z then [Z.Here z] else [] in
+            case Z.trm z 
+             of _ $ _ => [Z.LookIn (Z.move_down_left z), 
+                          Z.LookIn (Z.move_down_right z)] @ here
+              | Abs _ => [Z.LookIn (Z.move_down_abs z)] @ here
+              | _ => here
+          end;
+    in Z.lzy_search sf_valid_td_lr end;
+
+fun searchf_unify_gen f (sgn, maxidx, z) lhs =
+    Seq.map (clean_unify_z sgn maxidx lhs) 
+            (Z.limit_apply f z);
+
+(* search all unifications *)
+val searchf_lr_unify_all =
+    searchf_unify_gen search_lr_all;
+
+(* search only for 'valid' unifiers (non abs subterms and non vars) *)
+val searchf_lr_unify_valid = 
+    searchf_unify_gen (search_lr_valid valid_match_start);
+
+val searchf_bt_unify_valid =
+    searchf_unify_gen (search_bt_valid valid_match_start);
+
+(* apply a substitution in the conclusion of the theorem th *)
+(* cfvs are certified free var placeholders for goal params *)
+(* conclthm is a theorem of for just the conclusion *)
+(* m is instantiation/match information *)
+(* rule is the equation for substitution *)
+fun apply_subst_in_concl i th (cfvs, conclthm) rule m =
+    (RWInst.rw m rule conclthm)
+      |> IsaND.unfix_frees cfvs
+      |> RWInst.beta_eta_contract
+      |> (fn r => Tactic.rtac r i th);
+
+(* substitute within the conclusion of goal i of gth, using a meta
+equation rule. Note that we assume rule has var indicies zero'd *)
+fun prep_concl_subst i gth =
+    let
+      val th = Thm.incr_indexes 1 gth;
+      val tgt_term = Thm.prop_of th;
+
+      val sgn = Thm.theory_of_thm th;
+      val ctermify = Thm.cterm_of sgn;
+      val trivify = Thm.trivial o ctermify;
+
+      val (fixedbody, fvs) = IsaND.fix_alls_term i tgt_term;
+      val cfvs = rev (map ctermify fvs);
+
+      val conclterm = Logic.strip_imp_concl fixedbody;
+      val conclthm = trivify conclterm;
+      val maxidx = Thm.maxidx_of th;
+      val ft = ((Z.move_down_right (* ==> *)
+                 o Z.move_down_left (* Trueprop *)
+                 o Z.mktop
+                 o Thm.prop_of) conclthm)
+    in
+      ((cfvs, conclthm), (sgn, maxidx, ft))
+    end;
+
+(* substitute using an object or meta level equality *)
+fun eqsubst_tac' ctxt searchf instepthm i th =
+    let
+      val (cvfsconclthm, searchinfo) = prep_concl_subst i th;
+      val stepthms = Seq.of_list (prep_meta_eq ctxt instepthm);
+      fun rewrite_with_thm r =
+          let val (lhs,_) = Logic.dest_equals (Thm.concl_of r);
+          in searchf searchinfo lhs
+             |> Seq.maps (apply_subst_in_concl i th cvfsconclthm r) end;
+    in stepthms |> Seq.maps rewrite_with_thm end;
+
+
+(* distinct subgoals *)
+fun distinct_subgoals th =
+  the_default th (SINGLE distinct_subgoals_tac th);
+
+(* General substitution of multiple occurances using one of
+   the given theorems*)
+
+
+exception eqsubst_occL_exp of
+          string * (int list) * (thm list) * int * thm;
+fun skip_first_occs_search occ srchf sinfo lhs =
+    case (skipto_skipseq occ (srchf sinfo lhs)) of
+      SkipMore _ => Seq.empty
+    | SkipSeq ss => Seq.flat ss;
+
+(* The occL is a list of integers indicating which occurence
+w.r.t. the search order, to rewrite. Backtracking will also find later
+occurences, but all earlier ones are skipped. Thus you can use [0] to
+just find all rewrites. *)
+
+fun eqsubst_tac ctxt occL thms i th =
+    let val nprems = Thm.nprems_of th in
+      if nprems < i then Seq.empty else
+      let val thmseq = (Seq.of_list thms)
+        fun apply_occ occ th =
+            thmseq |> Seq.maps
+                    (fn r => eqsubst_tac' 
+                               ctxt 
+                               (skip_first_occs_search
+                                  occ searchf_lr_unify_valid) r
+                                 (i + ((Thm.nprems_of th) - nprems))
+                                 th);
+        val sortedoccL =
+            Library.sort (Library.rev_order o Library.int_ord) occL;
+      in
+        Seq.map distinct_subgoals (Seq.EVERY (map apply_occ sortedoccL) th)
+      end
+    end
+    handle THM _ => raise eqsubst_occL_exp ("THM",occL,thms,i,th);
+
+
+(* inthms are the given arguments in Isar, and treated as eqstep with
+   the first one, then the second etc *)
+fun eqsubst_meth ctxt occL inthms =
+    Method.SIMPLE_METHOD' (eqsubst_tac ctxt occL inthms);
+
+(* apply a substitution inside assumption j, keeps asm in the same place *)
+fun apply_subst_in_asm i th rule ((cfvs, j, ngoalprems, pth),m) =
+    let
+      val th2 = Thm.rotate_rule (j - 1) i th; (* put premice first *)
+      val preelimrule =
+          (RWInst.rw m rule pth)
+            |> (Seq.hd o prune_params_tac)
+            |> Thm.permute_prems 0 ~1 (* put old asm first *)
+            |> IsaND.unfix_frees cfvs (* unfix any global params *)
+            |> RWInst.beta_eta_contract; (* normal form *)
+  (*    val elimrule =
+          preelimrule
+            |> Tactic.make_elim (* make into elim rule *)
+            |> Thm.lift_rule (th2, i); (* lift into context *)
+   *)
+    in
+      (* ~j because new asm starts at back, thus we subtract 1 *)
+      Seq.map (Thm.rotate_rule (~j) ((Thm.nprems_of rule) + i))
+      (Tactic.dtac preelimrule i th2)
+
+      (* (Thm.bicompose
+                 false (* use unification *)
+                 (true, (* elim resolution *)
+                  elimrule, (2 + (Thm.nprems_of rule)) - ngoalprems)
+                 i th2) *)
+    end;
+
+
+(* prepare to substitute within the j'th premise of subgoal i of gth,
+using a meta-level equation. Note that we assume rule has var indicies
+zero'd. Note that we also assume that premt is the j'th premice of
+subgoal i of gth. Note the repetition of work done for each
+assumption, i.e. this can be made more efficient for search over
+multiple assumptions.  *)
+fun prep_subst_in_asm i gth j =
+    let
+      val th = Thm.incr_indexes 1 gth;
+      val tgt_term = Thm.prop_of th;
+
+      val sgn = Thm.theory_of_thm th;
+      val ctermify = Thm.cterm_of sgn;
+      val trivify = Thm.trivial o ctermify;
+
+      val (fixedbody, fvs) = IsaND.fix_alls_term i tgt_term;
+      val cfvs = rev (map ctermify fvs);
+
+      val asmt = nth (Logic.strip_imp_prems fixedbody) (j - 1);
+      val asm_nprems = length (Logic.strip_imp_prems asmt);
+
+      val pth = trivify asmt;
+      val maxidx = Thm.maxidx_of th;
+
+      val ft = ((Z.move_down_right (* trueprop *)
+                 o Z.mktop
+                 o Thm.prop_of) pth)
+    in ((cfvs, j, asm_nprems, pth), (sgn, maxidx, ft)) end;
+
+(* prepare subst in every possible assumption *)
+fun prep_subst_in_asms i gth =
+    map (prep_subst_in_asm i gth)
+        ((fn l => Library.upto (1, length l))
+           (Logic.prems_of_goal (Thm.prop_of gth) i));
+
+
+(* substitute in an assumption using an object or meta level equality *)
+fun eqsubst_asm_tac' ctxt searchf skipocc instepthm i th =
+    let
+      val asmpreps = prep_subst_in_asms i th;
+      val stepthms = Seq.of_list (prep_meta_eq ctxt instepthm);
+      fun rewrite_with_thm r =
+          let val (lhs,_) = Logic.dest_equals (Thm.concl_of r)
+            fun occ_search occ [] = Seq.empty
+              | occ_search occ ((asminfo, searchinfo)::moreasms) =
+                (case searchf searchinfo occ lhs of
+                   SkipMore i => occ_search i moreasms
+                 | SkipSeq ss =>
+                   Seq.append (Seq.map (Library.pair asminfo) (Seq.flat ss))
+                               (occ_search 1 moreasms))
+                              (* find later substs also *)
+          in
+            occ_search skipocc asmpreps |> Seq.maps (apply_subst_in_asm i th r)
+          end;
+    in stepthms |> Seq.maps rewrite_with_thm end;
+
+
+fun skip_first_asm_occs_search searchf sinfo occ lhs =
+    skipto_skipseq occ (searchf sinfo lhs);
+
+fun eqsubst_asm_tac ctxt occL thms i th =
+    let val nprems = Thm.nprems_of th
+    in
+      if nprems < i then Seq.empty else
+      let val thmseq = (Seq.of_list thms)
+        fun apply_occ occK th =
+            thmseq |> Seq.maps
+                    (fn r =>
+                        eqsubst_asm_tac' ctxt (skip_first_asm_occs_search
+                                            searchf_lr_unify_valid) occK r
+                                         (i + ((Thm.nprems_of th) - nprems))
+                                         th);
+        val sortedoccs =
+            Library.sort (Library.rev_order o Library.int_ord) occL
+      in
+        Seq.map distinct_subgoals
+                (Seq.EVERY (map apply_occ sortedoccs) th)
+      end
+    end
+    handle THM _ => raise eqsubst_occL_exp ("THM",occL,thms,i,th);
+
+(* inthms are the given arguments in Isar, and treated as eqstep with
+   the first one, then the second etc *)
+fun eqsubst_asm_meth ctxt occL inthms =
+    Method.SIMPLE_METHOD' (eqsubst_asm_tac ctxt occL inthms);
+
+(* syntax for options, given "(asm)" will give back true, without
+   gives back false *)
+val options_syntax =
+    (Args.parens (Args.$$$ "asm") >> (K true)) ||
+     (Scan.succeed false);
+
+val ith_syntax =
+    Scan.optional (Args.parens (Scan.repeat OuterParse.nat)) [0];
+
+(* combination method that takes a flag (true indicates that subst
+should be done to an assumption, false = apply to the conclusion of
+the goal) as well as the theorems to use *)
+fun subst_meth src =
+  Method.syntax ((Scan.lift options_syntax) -- (Scan.lift ith_syntax) -- Attrib.thms) src
+  #> (fn (((asmflag, occL), inthms), ctxt) =>
+    (if asmflag then eqsubst_asm_meth else eqsubst_meth) ctxt occL inthms);
+
+
+val setup =
+  Method.add_method ("subst", subst_meth, "single-step substitution");
+
+end;
--- a/src/Tools/float.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/float.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/float.ML
-    ID:         $Id$
     Author:     Steven Obua, Florian Haftmann, TU Muenchen
 
 Implementation of real numbers as mantisse-exponent pairs.
--- a/src/Tools/induct_tacs.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/induct_tacs.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/induct_tacs.ML
-    ID:         $Id$
     Author:     Makarius
 
 Unstructured induction and cases analysis.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Tools/intuitionistic.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -0,0 +1,100 @@
+(*  Title:      Tools/intuitionistic.ML
+    Author:     Stefan Berghofer, TU Muenchen
+
+Simple intuitionistic proof search.
+*)
+
+signature INTUITIONISTIC =
+sig
+  val prover_tac: Proof.context -> int option -> int -> tactic
+  val method_setup: bstring -> theory -> theory
+end;
+
+structure Intuitionistic: INTUITIONISTIC =
+struct
+
+(* main tactic *)
+
+local
+
+val remdups_tac = SUBGOAL (fn (g, i) =>
+  let val prems = Logic.strip_assums_hyp g in
+    REPEAT_DETERM_N (length prems - length (distinct op aconv prems))
+    (Tactic.ematch_tac [Drule.remdups_rl] i THEN Tactic.eq_assume_tac i)
+  end);
+
+fun REMDUPS tac = tac THEN_ALL_NEW remdups_tac;
+
+val bires_tac = Tactic.biresolution_from_nets_tac ContextRules.orderlist;
+
+fun safe_step_tac ctxt =
+  ContextRules.Swrap ctxt
+   (eq_assume_tac ORELSE'
+    bires_tac true (ContextRules.netpair_bang ctxt));
+
+fun unsafe_step_tac ctxt =
+  ContextRules.wrap ctxt
+   (assume_tac APPEND'
+    bires_tac false (ContextRules.netpair_bang ctxt) APPEND'
+    bires_tac false (ContextRules.netpair ctxt));
+
+fun step_tac ctxt i =
+  REPEAT_DETERM1 (REMDUPS (safe_step_tac ctxt) i) ORELSE
+  REMDUPS (unsafe_step_tac ctxt) i;
+
+fun intprover_tac ctxt gs d lim = SUBGOAL (fn (g, i) => if d > lim then no_tac else
+  let
+    val ps = Logic.strip_assums_hyp g;
+    val c = Logic.strip_assums_concl g;
+  in
+    if member (fn ((ps1, c1), (ps2, c2)) =>
+        c1 aconv c2 andalso
+        length ps1 = length ps2 andalso
+        gen_eq_set (op aconv) (ps1, ps2)) gs (ps, c) then no_tac
+    else (step_tac ctxt THEN_ALL_NEW intprover_tac ctxt ((ps, c) :: gs) (d + 1) lim) i
+  end);
+
+in
+
+fun prover_tac ctxt opt_lim =
+  SELECT_GOAL (DEEPEN (2, the_default 20 opt_lim) (intprover_tac ctxt [] 0) 4 1);
+
+end;
+
+
+(* method setup *)
+
+local
+
+val introN = "intro";
+val elimN = "elim";
+val destN = "dest";
+val ruleN = "rule";
+
+fun modifier name kind kind' att =
+  Args.$$$ name |-- (kind >> K NONE || kind' |-- OuterParse.nat --| Args.colon >> SOME)
+    >> (pair (I: Proof.context -> Proof.context) o att);
+
+val modifiers =
+ [modifier destN Args.bang_colon Args.bang ContextRules.dest_bang,
+  modifier destN Args.colon (Scan.succeed ()) ContextRules.dest,
+  modifier elimN Args.bang_colon Args.bang ContextRules.elim_bang,
+  modifier elimN Args.colon (Scan.succeed ()) ContextRules.elim,
+  modifier introN Args.bang_colon Args.bang ContextRules.intro_bang,
+  modifier introN Args.colon (Scan.succeed ()) ContextRules.intro,
+  Args.del -- Args.colon >> K (I, ContextRules.rule_del)];
+
+val method =
+  Method.bang_sectioned_args' modifiers (Scan.lift (Scan.option OuterParse.nat))
+    (fn n => fn prems => fn ctxt => Method.METHOD (fn facts =>
+      HEADGOAL (Method.insert_tac (prems @ facts) THEN'
+      ObjectLogic.atomize_prems_tac THEN' prover_tac ctxt n)));
+
+in
+
+fun method_setup name = Method.add_method (name, method, "intuitionistic proof search");
+
+end;
+
+end;
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Tools/project_rule.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -0,0 +1,65 @@
+(*  Title:      Tools/project_rule.ML
+    Author:     Makarius
+
+Transform mutual rule:
+
+  HH ==> (x1:A1 --> P1 x1) & ... & (xn:An --> Pn xn)
+
+into projection:
+
+  xi:Ai ==> HH ==> Pi xi
+*)
+
+signature PROJECT_RULE_DATA =
+sig
+  val conjunct1: thm
+  val conjunct2: thm
+  val mp: thm
+end;
+
+signature PROJECT_RULE =
+sig
+  val project: Proof.context -> int -> thm -> thm
+  val projects: Proof.context -> int list -> thm -> thm list
+  val projections: Proof.context -> thm -> thm list
+end;
+
+functor ProjectRuleFun(Data: PROJECT_RULE_DATA): PROJECT_RULE =
+struct
+
+fun conj1 th = th RS Data.conjunct1;
+fun conj2 th = th RS Data.conjunct2;
+fun imp th = th RS Data.mp;
+
+fun projects ctxt is raw_rule =
+  let
+    fun proj 1 th = the_default th (try conj1 th)
+      | proj k th = proj (k - 1) (conj2 th);
+    fun prems k th =
+      (case try imp th of
+        NONE => (k, th)
+      | SOME th' => prems (k + 1) th');
+    val ((_, [rule]), ctxt') = Variable.import_thms true [raw_rule] ctxt;
+    fun result i =
+      rule
+      |> proj i
+      |> prems 0 |-> (fn k =>
+        Thm.permute_prems 0 (~ k)
+        #> singleton (Variable.export ctxt' ctxt)
+        #> Drule.zero_var_indexes
+        #> RuleCases.save raw_rule
+        #> RuleCases.add_consumes k);
+  in map result is end;
+
+fun project ctxt i th = hd (projects ctxt [i] th);
+
+fun projections ctxt raw_rule =
+  let
+    fun projs k th =
+      (case try conj2 th of
+        NONE => k
+      | SOME th' => projs (k + 1) th');
+    val ((_, [rule]), _) = Variable.import_thms true [raw_rule] ctxt;
+  in projects ctxt (1 upto projs 1 rule) raw_rule end;
+
+end;
--- a/src/Tools/random_word.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/random_word.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/random_word.ML
-    ID:         $Id$
     Author:     Makarius
 
 Simple generator for pseudo-random numbers, using unboxed word
--- a/src/Tools/rat.ML	Mon Mar 02 08:15:32 2009 +0100
+++ b/src/Tools/rat.ML	Mon Mar 02 08:15:54 2009 +0100
@@ -1,5 +1,4 @@
 (*  Title:      Tools/rat.ML
-    ID:         $Id$
     Author:     Tobias Nipkow, Florian Haftmann, TU Muenchen
 
 Canonical implementation of exact rational numbers.