closer correspondence of document and session names, while maintaining document names for external reference
authorhaftmann
Sat, 05 Apr 2014 11:37:00 +0200
changeset 56420 b266e7a86485
parent 56419 f47de9e82b0f
child 56431 4eb88149c7b2
closer correspondence of document and session names, while maintaining document names for external reference
src/Doc/Implementation/Base.thy
src/Doc/Implementation/Eq.thy
src/Doc/Implementation/Integration.thy
src/Doc/Implementation/Isar.thy
src/Doc/Implementation/Local_Theory.thy
src/Doc/Implementation/Logic.thy
src/Doc/Implementation/ML.thy
src/Doc/Implementation/Prelim.thy
src/Doc/Implementation/Proof.thy
src/Doc/Implementation/Syntax.thy
src/Doc/Implementation/Tactic.thy
src/Doc/Implementation/document/build
src/Doc/Implementation/document/root.tex
src/Doc/Implementation/document/style.sty
src/Doc/Isar-Ref/Base.thy
src/Doc/Isar-Ref/Document_Preparation.thy
src/Doc/Isar-Ref/First_Order_Logic.thy
src/Doc/Isar-Ref/Framework.thy
src/Doc/Isar-Ref/Generic.thy
src/Doc/Isar-Ref/HOL_Specific.thy
src/Doc/Isar-Ref/Inner_Syntax.thy
src/Doc/Isar-Ref/ML_Tactic.thy
src/Doc/Isar-Ref/Misc.thy
src/Doc/Isar-Ref/Outer_Syntax.thy
src/Doc/Isar-Ref/Preface.thy
src/Doc/Isar-Ref/Proof.thy
src/Doc/Isar-Ref/Quick_Reference.thy
src/Doc/Isar-Ref/Spec.thy
src/Doc/Isar-Ref/Symbols.thy
src/Doc/Isar-Ref/Synopsis.thy
src/Doc/Isar-Ref/document/build
src/Doc/Isar-Ref/document/isar-vm.pdf
src/Doc/Isar-Ref/document/isar-vm.svg
src/Doc/Isar-Ref/document/root.tex
src/Doc/Isar-Ref/document/showsymbols
src/Doc/Isar-Ref/document/style.sty
src/Doc/IsarImplementation/Base.thy
src/Doc/IsarImplementation/Eq.thy
src/Doc/IsarImplementation/Integration.thy
src/Doc/IsarImplementation/Isar.thy
src/Doc/IsarImplementation/Local_Theory.thy
src/Doc/IsarImplementation/Logic.thy
src/Doc/IsarImplementation/ML.thy
src/Doc/IsarImplementation/Prelim.thy
src/Doc/IsarImplementation/Proof.thy
src/Doc/IsarImplementation/Syntax.thy
src/Doc/IsarImplementation/Tactic.thy
src/Doc/IsarImplementation/document/build
src/Doc/IsarImplementation/document/root.tex
src/Doc/IsarImplementation/document/style.sty
src/Doc/IsarRef/Base.thy
src/Doc/IsarRef/Document_Preparation.thy
src/Doc/IsarRef/First_Order_Logic.thy
src/Doc/IsarRef/Framework.thy
src/Doc/IsarRef/Generic.thy
src/Doc/IsarRef/HOL_Specific.thy
src/Doc/IsarRef/Inner_Syntax.thy
src/Doc/IsarRef/ML_Tactic.thy
src/Doc/IsarRef/Misc.thy
src/Doc/IsarRef/Outer_Syntax.thy
src/Doc/IsarRef/Preface.thy
src/Doc/IsarRef/Proof.thy
src/Doc/IsarRef/Quick_Reference.thy
src/Doc/IsarRef/Spec.thy
src/Doc/IsarRef/Symbols.thy
src/Doc/IsarRef/Synopsis.thy
src/Doc/IsarRef/document/build
src/Doc/IsarRef/document/isar-vm.pdf
src/Doc/IsarRef/document/isar-vm.svg
src/Doc/IsarRef/document/root.tex
src/Doc/IsarRef/document/showsymbols
src/Doc/IsarRef/document/style.sty
src/Doc/JEdit/document/build
src/Doc/LaTeXsugar/Sugar.thy
src/Doc/LaTeXsugar/document/build
src/Doc/LaTeXsugar/document/mathpartir.sty
src/Doc/LaTeXsugar/document/root.bib
src/Doc/LaTeXsugar/document/root.tex
src/Doc/Logics-ZF/FOL_examples.thy
src/Doc/Logics-ZF/IFOL_examples.thy
src/Doc/Logics-ZF/If.thy
src/Doc/Logics-ZF/ZF_Isar.thy
src/Doc/Logics-ZF/ZF_examples.thy
src/Doc/Logics-ZF/document/FOL.tex
src/Doc/Logics-ZF/document/ZF.tex
src/Doc/Logics-ZF/document/build
src/Doc/Logics-ZF/document/logics.sty
src/Doc/Logics-ZF/document/root.tex
src/Doc/Prog-Prove/Basics.thy
src/Doc/Prog-Prove/Bool_nat_list.thy
src/Doc/Prog-Prove/Isar.thy
src/Doc/Prog-Prove/LaTeXsugar.thy
src/Doc/Prog-Prove/Logic.thy
src/Doc/Prog-Prove/MyList.thy
src/Doc/Prog-Prove/Types_and_funs.thy
src/Doc/Prog-Prove/document/bang.pdf
src/Doc/Prog-Prove/document/build
src/Doc/Prog-Prove/document/intro-isabelle.tex
src/Doc/Prog-Prove/document/mathpartir.sty
src/Doc/Prog-Prove/document/prelude.tex
src/Doc/Prog-Prove/document/root.bib
src/Doc/Prog-Prove/document/root.tex
src/Doc/Prog-Prove/document/svmono.cls
src/Doc/ProgProve/Basics.thy
src/Doc/ProgProve/Bool_nat_list.thy
src/Doc/ProgProve/Isar.thy
src/Doc/ProgProve/LaTeXsugar.thy
src/Doc/ProgProve/Logic.thy
src/Doc/ProgProve/MyList.thy
src/Doc/ProgProve/Types_and_funs.thy
src/Doc/ProgProve/document/bang.pdf
src/Doc/ProgProve/document/build
src/Doc/ProgProve/document/intro-isabelle.tex
src/Doc/ProgProve/document/mathpartir.sty
src/Doc/ProgProve/document/prelude.tex
src/Doc/ProgProve/document/root.bib
src/Doc/ProgProve/document/root.tex
src/Doc/ProgProve/document/svmono.cls
src/Doc/ROOT
src/Doc/Sugar/Sugar.thy
src/Doc/Sugar/document/build
src/Doc/Sugar/document/mathpartir.sty
src/Doc/Sugar/document/root.bib
src/Doc/Sugar/document/root.tex
src/Doc/System/document/build
src/Doc/ZF/FOL_examples.thy
src/Doc/ZF/IFOL_examples.thy
src/Doc/ZF/If.thy
src/Doc/ZF/ZF_Isar.thy
src/Doc/ZF/ZF_examples.thy
src/Doc/ZF/document/FOL.tex
src/Doc/ZF/document/ZF.tex
src/Doc/ZF/document/build
src/Doc/ZF/document/logics.sty
src/Doc/ZF/document/root.tex
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/Base.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,7 @@
+theory Base
+imports Main
+begin
+
+ML_file "../antiquote_setup.ML"
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/Eq.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,126 @@
+theory Eq
+imports Base
+begin
+
+chapter {* Equational reasoning *}
+
+text {* Equality is one of the most fundamental concepts of
+  mathematics.  The Isabelle/Pure logic (\chref{ch:logic}) provides a
+  builtin relation @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> prop"} that expresses equality
+  of arbitrary terms (or propositions) at the framework level, as
+  expressed by certain basic inference rules (\secref{sec:eq-rules}).
+
+  Equational reasoning means to replace equals by equals, using
+  reflexivity and transitivity to form chains of replacement steps,
+  and congruence rules to access sub-structures.  Conversions
+  (\secref{sec:conv}) provide a convenient framework to compose basic
+  equational steps to build specific equational reasoning tools.
+
+  Higher-order matching is able to provide suitable instantiations for
+  giving equality rules, which leads to the versatile concept of
+  @{text "\<lambda>"}-term rewriting (\secref{sec:rewriting}).  Internally
+  this is based on the general-purpose Simplifier engine of Isabelle,
+  which is more specific and more efficient than plain conversions.
+
+  Object-logics usually introduce specific notions of equality or
+  equivalence, and relate it with the Pure equality.  This enables to
+  re-use the Pure tools for equational reasoning for particular
+  object-logic connectives as well.
+*}
+
+
+section {* Basic equality rules \label{sec:eq-rules} *}
+
+text {* Isabelle/Pure uses @{text "\<equiv>"} for equality of arbitrary
+  terms, which includes equivalence of propositions of the logical
+  framework.  The conceptual axiomatization of the constant @{text "\<equiv>
+  :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> prop"} is given in \figref{fig:pure-equality}.  The
+  inference kernel presents slightly different equality rules, which
+  may be understood as derived rules from this minimal axiomatization.
+  The Pure theory also provides some theorems that express the same
+  reasoning schemes as theorems that can be composed like object-level
+  rules as explained in \secref{sec:obj-rules}.
+
+  For example, @{ML Thm.symmetric} as Pure inference is an ML function
+  that maps a theorem @{text "th"} stating @{text "t \<equiv> u"} to one
+  stating @{text "u \<equiv> t"}.  In contrast, @{thm [source]
+  Pure.symmetric} as Pure theorem expresses the same reasoning in
+  declarative form.  If used like @{text "th [THEN Pure.symmetric]"}
+  in Isar source notation, it achieves a similar effect as the ML
+  inference function, although the rule attribute @{attribute THEN} or
+  ML operator @{ML "op RS"} involve the full machinery of higher-order
+  unification (modulo @{text "\<beta>\<eta>"}-conversion) and lifting of @{text
+  "\<And>/\<Longrightarrow>"} contexts. *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Thm.reflexive: "cterm -> thm"} \\
+  @{index_ML Thm.symmetric: "thm -> thm"} \\
+  @{index_ML Thm.transitive: "thm -> thm -> thm"} \\
+  @{index_ML Thm.abstract_rule: "string -> cterm -> thm -> thm"} \\
+  @{index_ML Thm.combination: "thm -> thm -> thm"} \\[0.5ex]
+  @{index_ML Thm.equal_intr: "thm -> thm -> thm"} \\
+  @{index_ML Thm.equal_elim: "thm -> thm -> thm"} \\
+  \end{mldecls}
+
+  See also @{file "~~/src/Pure/thm.ML" } for further description of
+  these inference rules, and a few more for primitive @{text "\<beta>"} and
+  @{text "\<eta>"} conversions.  Note that @{text "\<alpha>"} conversion is
+  implicit due to the representation of terms with de-Bruijn indices
+  (\secref{sec:terms}). *}
+
+
+section {* Conversions \label{sec:conv} *}
+
+text {*
+  %FIXME
+
+  The classic article that introduces the concept of conversion (for
+  Cambridge LCF) is \cite{paulson:1983}.
+*}
+
+
+section {* Rewriting \label{sec:rewriting} *}
+
+text {* Rewriting normalizes a given term (theorem or goal) by
+  replacing instances of given equalities @{text "t \<equiv> u"} in subterms.
+  Rewriting continues until no rewrites are applicable to any subterm.
+  This may be used to unfold simple definitions of the form @{text "f
+  x\<^sub>1 \<dots> x\<^sub>n \<equiv> u"}, but is slightly more general than that.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML rewrite_rule: "Proof.context -> thm list -> thm -> thm"} \\
+  @{index_ML rewrite_goals_rule: "Proof.context -> thm list -> thm -> thm"} \\
+  @{index_ML rewrite_goal_tac: "Proof.context -> thm list -> int -> tactic"} \\
+  @{index_ML rewrite_goals_tac: "Proof.context -> thm list -> tactic"} \\
+  @{index_ML fold_goals_tac: "Proof.context -> thm list -> tactic"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML rewrite_rule}~@{text "ctxt rules thm"} rewrites the whole
+  theorem by the given rules.
+
+  \item @{ML rewrite_goals_rule}~@{text "ctxt rules thm"} rewrites the
+  outer premises of the given theorem.  Interpreting the same as a
+  goal state (\secref{sec:tactical-goals}) it means to rewrite all
+  subgoals (in the same manner as @{ML rewrite_goals_tac}).
+
+  \item @{ML rewrite_goal_tac}~@{text "ctxt rules i"} rewrites subgoal
+  @{text "i"} by the given rewrite rules.
+
+  \item @{ML rewrite_goals_tac}~@{text "ctxt rules"} rewrites all subgoals
+  by the given rewrite rules.
+
+  \item @{ML fold_goals_tac}~@{text "ctxt rules"} essentially uses @{ML
+  rewrite_goals_tac} with the symmetric form of each member of @{text
+  "rules"}, re-ordered to fold longer expression first.  This supports
+  to idea to fold primitive definitions that appear in expended form
+  in the proof state.
+
+  \end{description}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/Integration.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,291 @@
+theory Integration
+imports Base
+begin
+
+chapter {* System integration *}
+
+section {* Isar toplevel \label{sec:isar-toplevel} *}
+
+text {* The Isar toplevel may be considered the central hub of the
+  Isabelle/Isar system, where all key components and sub-systems are
+  integrated into a single read-eval-print loop of Isar commands,
+  which also incorporates the underlying ML compiler.
+
+  Isabelle/Isar departs from the original ``LCF system architecture''
+  where ML was really The Meta Language for defining theories and
+  conducting proofs.  Instead, ML now only serves as the
+  implementation language for the system (and user extensions), while
+  the specific Isar toplevel supports the concepts of theory and proof
+  development natively.  This includes the graph structure of theories
+  and the block structure of proofs, support for unlimited undo,
+  facilities for tracing, debugging, timing, profiling etc.
+
+  \medskip The toplevel maintains an implicit state, which is
+  transformed by a sequence of transitions -- either interactively or
+  in batch-mode.
+
+  The toplevel state is a disjoint sum of empty @{text toplevel}, or
+  @{text theory}, or @{text proof}.  On entering the main Isar loop we
+  start with an empty toplevel.  A theory is commenced by giving a
+  @{text \<THEORY>} header; within a theory we may issue theory
+  commands such as @{text \<DEFINITION>}, or state a @{text
+  \<THEOREM>} to be proven.  Now we are within a proof state, with a
+  rich collection of Isar proof commands for structured proof
+  composition, or unstructured proof scripts.  When the proof is
+  concluded we get back to the theory, which is then updated by
+  storing the resulting fact.  Further theory declarations or theorem
+  statements with proofs may follow, until we eventually conclude the
+  theory development by issuing @{text \<END>}.  The resulting theory
+  is then stored within the theory database and we are back to the
+  empty toplevel.
+
+  In addition to these proper state transformations, there are also
+  some diagnostic commands for peeking at the toplevel state without
+  modifying it (e.g.\ \isakeyword{thm}, \isakeyword{term},
+  \isakeyword{print-cases}).
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type Toplevel.state} \\
+  @{index_ML_exception Toplevel.UNDEF} \\
+  @{index_ML Toplevel.is_toplevel: "Toplevel.state -> bool"} \\
+  @{index_ML Toplevel.theory_of: "Toplevel.state -> theory"} \\
+  @{index_ML Toplevel.proof_of: "Toplevel.state -> Proof.state"} \\
+  @{index_ML Toplevel.timing: "bool Unsynchronized.ref"} \\
+  @{index_ML Toplevel.profiling: "int Unsynchronized.ref"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type Toplevel.state} represents Isar toplevel
+  states, which are normally manipulated through the concept of
+  toplevel transitions only (\secref{sec:toplevel-transition}).  Also
+  note that a raw toplevel state is subject to the same linearity
+  restrictions as a theory context (cf.~\secref{sec:context-theory}).
+
+  \item @{ML Toplevel.UNDEF} is raised for undefined toplevel
+  operations.  Many operations work only partially for certain cases,
+  since @{ML_type Toplevel.state} is a sum type.
+
+  \item @{ML Toplevel.is_toplevel}~@{text "state"} checks for an empty
+  toplevel state.
+
+  \item @{ML Toplevel.theory_of}~@{text "state"} selects the
+  background theory of @{text "state"}, raises @{ML Toplevel.UNDEF}
+  for an empty toplevel state.
+
+  \item @{ML Toplevel.proof_of}~@{text "state"} selects the Isar proof
+  state if available, otherwise raises @{ML Toplevel.UNDEF}.
+
+  \item @{ML "Toplevel.timing := true"} makes the toplevel print timing
+  information for each Isar command being executed.
+
+  \item @{ML Toplevel.profiling}~@{ML_text ":="}~@{text "n"} controls
+  low-level profiling of the underlying ML runtime system.  For
+  Poly/ML, @{text "n = 1"} means time and @{text "n = 2"} space
+  profiling.
+
+  \end{description}
+*}
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def "Isar.state"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  \begin{description}
+
+  \item @{text "@{Isar.state}"} refers to Isar toplevel state at that
+  point --- as abstract value.
+
+  This only works for diagnostic ML commands, such as @{command
+  ML_val} or @{command ML_command}.
+
+  \end{description}
+*}
+
+
+subsection {* Toplevel transitions \label{sec:toplevel-transition} *}
+
+text {*
+  An Isar toplevel transition consists of a partial function on the
+  toplevel state, with additional information for diagnostics and
+  error reporting: there are fields for command name, source position,
+  optional source text, as well as flags for interactive-only commands
+  (which issue a warning in batch-mode), printing of result state,
+  etc.
+
+  The operational part is represented as the sequential union of a
+  list of partial functions, which are tried in turn until the first
+  one succeeds.  This acts like an outer case-expression for various
+  alternative state transitions.  For example, \isakeyword{qed} works
+  differently for a local proofs vs.\ the global ending of the main
+  proof.
+
+  Toplevel transitions are composed via transition transformers.
+  Internally, Isar commands are put together from an empty transition
+  extended by name and source position.  It is then left to the
+  individual command parser to turn the given concrete syntax into a
+  suitable transition transformer that adjoins actual operations on a
+  theory or proof state etc.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Toplevel.print: "Toplevel.transition -> Toplevel.transition"} \\
+  @{index_ML Toplevel.keep: "(Toplevel.state -> unit) ->
+  Toplevel.transition -> Toplevel.transition"} \\
+  @{index_ML Toplevel.theory: "(theory -> theory) ->
+  Toplevel.transition -> Toplevel.transition"} \\
+  @{index_ML Toplevel.theory_to_proof: "(theory -> Proof.state) ->
+  Toplevel.transition -> Toplevel.transition"} \\
+  @{index_ML Toplevel.proof: "(Proof.state -> Proof.state) ->
+  Toplevel.transition -> Toplevel.transition"} \\
+  @{index_ML Toplevel.proofs: "(Proof.state -> Proof.state Seq.result Seq.seq) ->
+  Toplevel.transition -> Toplevel.transition"} \\
+  @{index_ML Toplevel.end_proof: "(bool -> Proof.state -> Proof.context) ->
+  Toplevel.transition -> Toplevel.transition"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Toplevel.print}~@{text "tr"} sets the print flag, which
+  causes the toplevel loop to echo the result state (in interactive
+  mode).
+
+  \item @{ML Toplevel.keep}~@{text "tr"} adjoins a diagnostic
+  function.
+
+  \item @{ML Toplevel.theory}~@{text "tr"} adjoins a theory
+  transformer.
+
+  \item @{ML Toplevel.theory_to_proof}~@{text "tr"} adjoins a global
+  goal function, which turns a theory into a proof state.  The theory
+  may be changed before entering the proof; the generic Isar goal
+  setup includes an argument that specifies how to apply the proven
+  result to the theory, when the proof is finished.
+
+  \item @{ML Toplevel.proof}~@{text "tr"} adjoins a deterministic
+  proof command, with a singleton result.
+
+  \item @{ML Toplevel.proofs}~@{text "tr"} adjoins a general proof
+  command, with zero or more result states (represented as a lazy
+  list).
+
+  \item @{ML Toplevel.end_proof}~@{text "tr"} adjoins a concluding
+  proof command, that returns the resulting theory, after storing the
+  resulting facts in the context etc.
+
+  \end{description}
+*}
+
+
+section {* Theory database \label{sec:theory-database} *}
+
+text {*
+  The theory database maintains a collection of theories, together
+  with some administrative information about their original sources,
+  which are held in an external store (i.e.\ some directory within the
+  regular file system).
+
+  The theory database is organized as a directed acyclic graph;
+  entries are referenced by theory name.  Although some additional
+  interfaces allow to include a directory specification as well, this
+  is only a hint to the underlying theory loader.  The internal theory
+  name space is flat!
+
+  Theory @{text A} is associated with the main theory file @{text
+  A}\verb,.thy,, which needs to be accessible through the theory
+  loader path.  Any number of additional ML source files may be
+  associated with each theory, by declaring these dependencies in the
+  theory header as @{text \<USES>}, and loading them consecutively
+  within the theory context.  The system keeps track of incoming ML
+  sources and associates them with the current theory.
+
+  The basic internal actions of the theory database are @{text
+  "update"} and @{text "remove"}:
+
+  \begin{itemize}
+
+  \item @{text "update A"} introduces a link of @{text "A"} with a
+  @{text "theory"} value of the same name; it asserts that the theory
+  sources are now consistent with that value;
+
+  \item @{text "remove A"} deletes entry @{text "A"} from the theory
+  database.
+  
+  \end{itemize}
+
+  These actions are propagated to sub- or super-graphs of a theory
+  entry as expected, in order to preserve global consistency of the
+  state of all loaded theories with the sources of the external store.
+  This implies certain causalities between actions: @{text "update"}
+  or @{text "remove"} of an entry will @{text "remove"} all
+  descendants.
+
+  \medskip There are separate user-level interfaces to operate on the
+  theory database directly or indirectly.  The primitive actions then
+  just happen automatically while working with the system.  In
+  particular, processing a theory header @{text "\<THEORY> A
+  \<IMPORTS> B\<^sub>1 \<dots> B\<^sub>n \<BEGIN>"} ensures that the
+  sub-graph of the collective imports @{text "B\<^sub>1 \<dots> B\<^sub>n"}
+  is up-to-date, too.  Earlier theories are reloaded as required, with
+  @{text update} actions proceeding in topological order according to
+  theory dependencies.  There may be also a wave of implied @{text
+  remove} actions for derived theory nodes until a stable situation
+  is achieved eventually.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML use_thy: "string -> unit"} \\
+  @{index_ML use_thys: "string list -> unit"} \\
+  @{index_ML Thy_Info.get_theory: "string -> theory"} \\
+  @{index_ML Thy_Info.remove_thy: "string -> unit"} \\[1ex]
+  @{index_ML Thy_Info.register_thy: "theory -> unit"} \\[1ex]
+  @{ML_text "datatype action = Update | Remove"} \\
+  @{index_ML Thy_Info.add_hook: "(Thy_Info.action -> string -> unit) -> unit"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML use_thy}~@{text A} ensures that theory @{text A} is fully
+  up-to-date wrt.\ the external file store, reloading outdated
+  ancestors as required.  In batch mode, the simultaneous @{ML
+  use_thys} should be used exclusively.
+
+  \item @{ML use_thys} is similar to @{ML use_thy}, but handles
+  several theories simultaneously.  Thus it acts like processing the
+  import header of a theory, without performing the merge of the
+  result.  By loading a whole sub-graph of theories like that, the
+  intrinsic parallelism can be exploited by the system, to speedup
+  loading.
+
+  \item @{ML Thy_Info.get_theory}~@{text A} retrieves the theory value
+  presently associated with name @{text A}.  Note that the result
+  might be outdated.
+
+  \item @{ML Thy_Info.remove_thy}~@{text A} deletes theory @{text A} and all
+  descendants from the theory database.
+
+  \item @{ML Thy_Info.register_thy}~@{text "text thy"} registers an
+  existing theory value with the theory loader database and updates
+  source version information according to the current file-system
+  state.
+
+  \item @{ML "Thy_Info.add_hook"}~@{text f} registers function @{text
+  f} as a hook for theory database actions.  The function will be
+  invoked with the action and theory name being involved; thus derived
+  actions may be performed in associated system components, e.g.\
+  maintaining the state of an editor for the theory sources.
+
+  The kind and order of actions occurring in practice depends both on
+  user interactions and the internal process of resolving theory
+  imports.  Hooks should not rely on a particular policy here!  Any
+  exceptions raised by the hook are ignored.
+
+  \end{description}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/Isar.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,585 @@
+theory Isar
+imports Base
+begin
+
+chapter {* Isar language elements *}
+
+text {* The Isar proof language (see also
+  \cite[\S2]{isabelle-isar-ref}) consists of three main categories of
+  language elements as follows.
+
+  \begin{enumerate}
+
+  \item Proof \emph{commands} define the primary language of
+  transactions of the underlying Isar/VM interpreter.  Typical
+  examples are @{command "fix"}, @{command "assume"}, @{command
+  "show"}, @{command "proof"}, and @{command "qed"}.
+
+  Composing proof commands according to the rules of the Isar/VM leads
+  to expressions of structured proof text, such that both the machine
+  and the human reader can give it a meaning as formal reasoning.
+
+  \item Proof \emph{methods} define a secondary language of mixed
+  forward-backward refinement steps involving facts and goals.
+  Typical examples are @{method rule}, @{method unfold}, and @{method
+  simp}.
+
+  Methods can occur in certain well-defined parts of the Isar proof
+  language, say as arguments to @{command "proof"}, @{command "qed"},
+  or @{command "by"}.
+
+  \item \emph{Attributes} define a tertiary language of small
+  annotations to theorems being defined or referenced.  Attributes can
+  modify both the context and the theorem.
+
+  Typical examples are @{attribute intro} (which affects the context),
+  and @{attribute symmetric} (which affects the theorem).
+
+  \end{enumerate}
+*}
+
+
+section {* Proof commands *}
+
+text {* A \emph{proof command} is state transition of the Isar/VM
+  proof interpreter.
+
+  In principle, Isar proof commands could be defined in user-space as
+  well.  The system is built like that in the first place: one part of
+  the commands are primitive, the other part is defined as derived
+  elements.  Adding to the genuine structured proof language requires
+  profound understanding of the Isar/VM machinery, though, so this is
+  beyond the scope of this manual.
+
+  What can be done realistically is to define some diagnostic commands
+  that inspect the general state of the Isar/VM, and report some
+  feedback to the user.  Typically this involves checking of the
+  linguistic \emph{mode} of a proof state, or peeking at the pending
+  goals (if available).
+
+  Another common application is to define a toplevel command that
+  poses a problem to the user as Isar proof state and processes the
+  final result relatively to the context.  Thus a proof can be
+  incorporated into the context of some user-space tool, without
+  modifying the Isar proof language itself.  *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type Proof.state} \\
+  @{index_ML Proof.assert_forward: "Proof.state -> Proof.state"} \\
+  @{index_ML Proof.assert_chain: "Proof.state -> Proof.state"} \\
+  @{index_ML Proof.assert_backward: "Proof.state -> Proof.state"} \\
+  @{index_ML Proof.simple_goal: "Proof.state -> {context: Proof.context, goal: thm}"} \\
+  @{index_ML Proof.goal: "Proof.state ->
+  {context: Proof.context, facts: thm list, goal: thm}"} \\
+  @{index_ML Proof.raw_goal: "Proof.state ->
+  {context: Proof.context, facts: thm list, goal: thm}"} \\
+  @{index_ML Proof.theorem: "Method.text option ->
+  (thm list list -> Proof.context -> Proof.context) ->
+  (term * term list) list list -> Proof.context -> Proof.state"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type Proof.state} represents Isar proof states.
+  This is a block-structured configuration with proof context,
+  linguistic mode, and optional goal.  The latter consists of goal
+  context, goal facts (``@{text "using"}''), and tactical goal state
+  (see \secref{sec:tactical-goals}).
+
+  The general idea is that the facts shall contribute to the
+  refinement of some parts of the tactical goal --- how exactly is
+  defined by the proof method that is applied in that situation.
+
+  \item @{ML Proof.assert_forward}, @{ML Proof.assert_chain}, @{ML
+  Proof.assert_backward} are partial identity functions that fail
+  unless a certain linguistic mode is active, namely ``@{text
+  "proof(state)"}'', ``@{text "proof(chain)"}'', ``@{text
+  "proof(prove)"}'', respectively (using the terminology of
+  \cite{isabelle-isar-ref}).
+
+  It is advisable study the implementations of existing proof commands
+  for suitable modes to be asserted.
+
+  \item @{ML Proof.simple_goal}~@{text "state"} returns the structured
+  Isar goal (if available) in the form seen by ``simple'' methods
+  (like @{method simp} or @{method blast}).  The Isar goal facts are
+  already inserted as premises into the subgoals, which are presented
+  individually as in @{ML Proof.goal}.
+
+  \item @{ML Proof.goal}~@{text "state"} returns the structured Isar
+  goal (if available) in the form seen by regular methods (like
+  @{method rule}).  The auxiliary internal encoding of Pure
+  conjunctions is split into individual subgoals as usual.
+
+  \item @{ML Proof.raw_goal}~@{text "state"} returns the structured
+  Isar goal (if available) in the raw internal form seen by ``raw''
+  methods (like @{method induct}).  This form is rarely appropriate
+  for dignostic tools; @{ML Proof.simple_goal} or @{ML Proof.goal}
+  should be used in most situations.
+
+  \item @{ML Proof.theorem}~@{text "before_qed after_qed statement ctxt"}
+  initializes a toplevel Isar proof state within a given context.
+
+  The optional @{text "before_qed"} method is applied at the end of
+  the proof, just before extracting the result (this feature is rarely
+  used).
+
+  The @{text "after_qed"} continuation receives the extracted result
+  in order to apply it to the final context in a suitable way (e.g.\
+  storing named facts).  Note that at this generic level the target
+  context is specified as @{ML_type Proof.context}, but the usual
+  wrapping of toplevel proofs into command transactions will provide a
+  @{ML_type local_theory} here (\chref{ch:local-theory}).  This
+  affects the way how results are stored.
+
+  The @{text "statement"} is given as a nested list of terms, each
+  associated with optional @{keyword "is"} patterns as usual in the
+  Isar source language.  The original nested list structure over terms
+  is turned into one over theorems when @{text "after_qed"} is
+  invoked.
+
+  \end{description}
+*}
+
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def "Isar.goal"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  \begin{description}
+
+  \item @{text "@{Isar.goal}"} refers to the regular goal state (if
+  available) of the current proof state managed by the Isar toplevel
+  --- as abstract value.
+
+  This only works for diagnostic ML commands, such as @{command
+  ML_val} or @{command ML_command}.
+
+  \end{description}
+*}
+
+text %mlex {* The following example peeks at a certain goal configuration. *}
+
+notepad
+begin
+  have A and B and C
+    ML_val {*
+      val n = Thm.nprems_of (#goal @{Isar.goal});
+      @{assert} (n = 3);
+    *}
+    oops
+
+text {* Here we see 3 individual subgoals in the same way as regular
+  proof methods would do.  *}
+
+
+section {* Proof methods *}
+
+text {* A @{text "method"} is a function @{text "context \<rightarrow> thm\<^sup>* \<rightarrow> goal
+  \<rightarrow> (cases \<times> goal)\<^sup>*\<^sup>*"} that operates on the full Isar goal
+  configuration with context, goal facts, and tactical goal state and
+  enumerates possible follow-up goal states, with the potential
+  addition of named extensions of the proof context (\emph{cases}).
+  The latter feature is rarely used.
+
+  This means a proof method is like a structurally enhanced tactic
+  (cf.\ \secref{sec:tactics}).  The well-formedness conditions for
+  tactics need to hold for methods accordingly, with the following
+  additions.
+
+  \begin{itemize}
+
+  \item Goal addressing is further limited either to operate either
+  uniformly on \emph{all} subgoals, or specifically on the
+  \emph{first} subgoal.
+
+  Exception: old-style tactic emulations that are embedded into the
+  method space, e.g.\ @{method rule_tac}.
+
+  \item A non-trivial method always needs to make progress: an
+  identical follow-up goal state has to be avoided.\footnote{This
+  enables the user to write method expressions like @{text "meth\<^sup>+"}
+  without looping, while the trivial do-nothing case can be recovered
+  via @{text "meth\<^sup>?"}.}
+
+  Exception: trivial stuttering steps, such as ``@{method -}'' or
+  @{method succeed}.
+
+  \item Goal facts passed to the method must not be ignored.  If there
+  is no sensible use of facts outside the goal state, facts should be
+  inserted into the subgoals that are addressed by the method.
+
+  \end{itemize}
+
+  \medskip Syntactically, the language of proof methods appears as
+  arguments to Isar commands like @{command "by"} or @{command apply}.
+  User-space additions are reasonably easy by plugging suitable
+  method-valued parser functions into the framework, using the
+  @{command method_setup} command, for example.
+
+  To get a better idea about the range of possibilities, consider the
+  following Isar proof schemes.  This is the general form of
+  structured proof text:
+
+  \medskip
+  \begin{tabular}{l}
+  @{command from}~@{text "facts\<^sub>1"}~@{command have}~@{text "props"}~@{command using}~@{text "facts\<^sub>2"} \\
+  @{command proof}~@{text "(initial_method)"} \\
+  \quad@{text "body"} \\
+  @{command qed}~@{text "(terminal_method)"} \\
+  \end{tabular}
+  \medskip
+
+  The goal configuration consists of @{text "facts\<^sub>1"} and
+  @{text "facts\<^sub>2"} appended in that order, and various @{text
+  "props"} being claimed.  The @{text "initial_method"} is invoked
+  with facts and goals together and refines the problem to something
+  that is handled recursively in the proof @{text "body"}.  The @{text
+  "terminal_method"} has another chance to finish any remaining
+  subgoals, but it does not see the facts of the initial step.
+
+  \medskip This pattern illustrates unstructured proof scripts:
+
+  \medskip
+  \begin{tabular}{l}
+  @{command have}~@{text "props"} \\
+  \quad@{command using}~@{text "facts\<^sub>1"}~@{command apply}~@{text "method\<^sub>1"} \\
+  \quad@{command apply}~@{text "method\<^sub>2"} \\
+  \quad@{command using}~@{text "facts\<^sub>3"}~@{command apply}~@{text "method\<^sub>3"} \\
+  \quad@{command done} \\
+  \end{tabular}
+  \medskip
+
+  The @{text "method\<^sub>1"} operates on the original claim while
+  using @{text "facts\<^sub>1"}.  Since the @{command apply} command
+  structurally resets the facts, the @{text "method\<^sub>2"} will
+  operate on the remaining goal state without facts.  The @{text
+  "method\<^sub>3"} will see again a collection of @{text
+  "facts\<^sub>3"} that has been inserted into the script explicitly.
+
+  \medskip Empirically, any Isar proof method can be categorized as
+  follows.
+
+  \begin{enumerate}
+
+  \item \emph{Special method with cases} with named context additions
+  associated with the follow-up goal state.
+
+  Example: @{method "induct"}, which is also a ``raw'' method since it
+  operates on the internal representation of simultaneous claims as
+  Pure conjunction (\secref{sec:logic-aux}), instead of separate
+  subgoals (\secref{sec:tactical-goals}).
+
+  \item \emph{Structured method} with strong emphasis on facts outside
+  the goal state.
+
+  Example: @{method "rule"}, which captures the key ideas behind
+  structured reasoning in Isar in purest form.
+
+  \item \emph{Simple method} with weaker emphasis on facts, which are
+  inserted into subgoals to emulate old-style tactical as
+  ``premises''.
+
+  Examples: @{method "simp"}, @{method "blast"}, @{method "auto"}.
+
+  \item \emph{Old-style tactic emulation} with detailed numeric goal
+  addressing and explicit references to entities of the internal goal
+  state (which are otherwise invisible from proper Isar proof text).
+  The naming convention @{text "foo_tac"} makes this special
+  non-standard status clear.
+
+  Example: @{method "rule_tac"}.
+
+  \end{enumerate}
+
+  When implementing proof methods, it is advisable to study existing
+  implementations carefully and imitate the typical ``boiler plate''
+  for context-sensitive parsing and further combinators to wrap-up
+  tactic expressions as methods.\footnote{Aliases or abbreviations of
+  the standard method combinators should be avoided.  Note that from
+  Isabelle99 until Isabelle2009 the system did provide various odd
+  combinations of method wrappers that made user applications more
+  complicated than necessary.}
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type Proof.method} \\
+  @{index_ML METHOD_CASES: "(thm list -> cases_tactic) -> Proof.method"} \\
+  @{index_ML METHOD: "(thm list -> tactic) -> Proof.method"} \\
+  @{index_ML SIMPLE_METHOD: "tactic -> Proof.method"} \\
+  @{index_ML SIMPLE_METHOD': "(int -> tactic) -> Proof.method"} \\
+  @{index_ML Method.insert_tac: "thm list -> int -> tactic"} \\
+  @{index_ML Method.setup: "binding -> (Proof.context -> Proof.method) context_parser ->
+  string -> theory -> theory"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type Proof.method} represents proof methods as
+  abstract type.
+
+  \item @{ML METHOD_CASES}~@{text "(fn facts => cases_tactic)"} wraps
+  @{text cases_tactic} depending on goal facts as proof method with
+  cases; the goal context is passed via method syntax.
+
+  \item @{ML METHOD}~@{text "(fn facts => tactic)"} wraps @{text
+  tactic} depending on goal facts as regular proof method; the goal
+  context is passed via method syntax.
+
+  \item @{ML SIMPLE_METHOD}~@{text "tactic"} wraps a tactic that
+  addresses all subgoals uniformly as simple proof method.  Goal facts
+  are already inserted into all subgoals before @{text "tactic"} is
+  applied.
+
+  \item @{ML SIMPLE_METHOD'}~@{text "tactic"} wraps a tactic that
+  addresses a specific subgoal as simple proof method that operates on
+  subgoal 1.  Goal facts are inserted into the subgoal then the @{text
+  "tactic"} is applied.
+
+  \item @{ML Method.insert_tac}~@{text "facts i"} inserts @{text
+  "facts"} into subgoal @{text "i"}.  This is convenient to reproduce
+  part of the @{ML SIMPLE_METHOD} or @{ML SIMPLE_METHOD'} wrapping
+  within regular @{ML METHOD}, for example.
+
+  \item @{ML Method.setup}~@{text "name parser description"} provides
+  the functionality of the Isar command @{command method_setup} as ML
+  function.
+
+  \end{description}
+*}
+
+text %mlex {* See also @{command method_setup} in
+  \cite{isabelle-isar-ref} which includes some abstract examples.
+
+  \medskip The following toy examples illustrate how the goal facts
+  and state are passed to proof methods.  The pre-defined proof method
+  called ``@{method tactic}'' wraps ML source of type @{ML_type
+  tactic} (abstracted over @{ML_text facts}).  This allows immediate
+  experimentation without parsing of concrete syntax. *}
+
+notepad
+begin
+  assume a: A and b: B
+
+  have "A \<and> B"
+    apply (tactic {* rtac @{thm conjI} 1 *})
+    using a apply (tactic {* resolve_tac facts 1 *})
+    using b apply (tactic {* resolve_tac facts 1 *})
+    done
+
+  have "A \<and> B"
+    using a and b
+    ML_val "@{Isar.goal}"
+    apply (tactic {* Method.insert_tac facts 1 *})
+    apply (tactic {* (rtac @{thm conjI} THEN_ALL_NEW atac) 1 *})
+    done
+end
+
+text {* \medskip The next example implements a method that simplifies
+  the first subgoal by rewrite rules given as arguments.  *}
+
+method_setup my_simp = {*
+  Attrib.thms >> (fn thms => fn ctxt =>
+    SIMPLE_METHOD' (fn i =>
+      CHANGED (asm_full_simp_tac
+        (put_simpset HOL_basic_ss ctxt addsimps thms) i)))
+*} "rewrite subgoal by given rules"
+
+text {* The concrete syntax wrapping of @{command method_setup} always
+  passes-through the proof context at the end of parsing, but it is
+  not used in this example.
+
+  The @{ML Attrib.thms} parser produces a list of theorems from the
+  usual Isar syntax involving attribute expressions etc.\ (syntax
+  category @{syntax thmrefs}) \cite{isabelle-isar-ref}.  The resulting
+  @{ML_text thms} are added to @{ML HOL_basic_ss} which already
+  contains the basic Simplifier setup for HOL.
+
+  The tactic @{ML asm_full_simp_tac} is the one that is also used in
+  method @{method simp} by default.  The extra wrapping by the @{ML
+  CHANGED} tactical ensures progress of simplification: identical goal
+  states are filtered out explicitly to make the raw tactic conform to
+  standard Isar method behaviour.
+
+  \medskip Method @{method my_simp} can be used in Isar proofs like
+  this:
+*}
+
+notepad
+begin
+  fix a b c
+  assume a: "a = b"
+  assume b: "b = c"
+  have "a = c" by (my_simp a b)
+end
+
+text {* Here is a similar method that operates on all subgoals,
+  instead of just the first one. *}
+
+method_setup my_simp_all = {*
+  Attrib.thms >> (fn thms => fn ctxt =>
+    SIMPLE_METHOD
+      (CHANGED
+        (ALLGOALS (asm_full_simp_tac
+          (put_simpset HOL_basic_ss ctxt addsimps thms)))))
+*} "rewrite all subgoals by given rules"
+
+notepad
+begin
+  fix a b c
+  assume a: "a = b"
+  assume b: "b = c"
+  have "a = c" and "c = b" by (my_simp_all a b)
+end
+
+text {* \medskip Apart from explicit arguments, common proof methods
+  typically work with a default configuration provided by the context.
+  As a shortcut to rule management we use a cheap solution via functor
+  @{ML_functor Named_Thms} (see also @{file
+  "~~/src/Pure/Tools/named_thms.ML"}).  *}
+
+ML {*
+  structure My_Simps =
+    Named_Thms
+      (val name = @{binding my_simp} val description = "my_simp rule")
+*}
+setup My_Simps.setup
+
+text {* This provides ML access to a list of theorems in canonical
+  declaration order via @{ML My_Simps.get}.  The user can add or
+  delete rules via the attribute @{attribute my_simp}.  The actual
+  proof method is now defined as before, but we append the explicit
+  arguments and the rules from the context.  *}
+
+method_setup my_simp' = {*
+  Attrib.thms >> (fn thms => fn ctxt =>
+    SIMPLE_METHOD' (fn i =>
+      CHANGED (asm_full_simp_tac
+        (put_simpset HOL_basic_ss ctxt
+          addsimps (thms @ My_Simps.get ctxt)) i)))
+*} "rewrite subgoal by given rules and my_simp rules from the context"
+
+text {*
+  \medskip Method @{method my_simp'} can be used in Isar proofs
+  like this:
+*}
+
+notepad
+begin
+  fix a b c
+  assume [my_simp]: "a \<equiv> b"
+  assume [my_simp]: "b \<equiv> c"
+  have "a \<equiv> c" by my_simp'
+end
+
+text {* \medskip The @{method my_simp} variants defined above are
+  ``simple'' methods, i.e.\ the goal facts are merely inserted as goal
+  premises by the @{ML SIMPLE_METHOD'} or @{ML SIMPLE_METHOD} wrapper.
+  For proof methods that are similar to the standard collection of
+  @{method simp}, @{method blast}, @{method fast}, @{method auto}
+  there is little more that can be done.
+
+  Note that using the primary goal facts in the same manner as the
+  method arguments obtained via concrete syntax or the context does
+  not meet the requirement of ``strong emphasis on facts'' of regular
+  proof methods, because rewrite rules as used above can be easily
+  ignored.  A proof text ``@{command using}~@{text "foo"}~@{command
+  "by"}~@{text "my_simp"}'' where @{text "foo"} is not used would
+  deceive the reader.
+
+  \medskip The technical treatment of rules from the context requires
+  further attention.  Above we rebuild a fresh @{ML_type simpset} from
+  the arguments and \emph{all} rules retrieved from the context on
+  every invocation of the method.  This does not scale to really large
+  collections of rules, which easily emerges in the context of a big
+  theory library, for example.
+
+  This is an inherent limitation of the simplistic rule management via
+  functor @{ML_functor Named_Thms}, because it lacks tool-specific
+  storage and retrieval.  More realistic applications require
+  efficient index-structures that organize theorems in a customized
+  manner, such as a discrimination net that is indexed by the
+  left-hand sides of rewrite rules.  For variations on the Simplifier,
+  re-use of the existing type @{ML_type simpset} is adequate, but
+  scalability would require it be maintained statically within the
+  context data, not dynamically on each tool invocation.  *}
+
+
+section {* Attributes \label{sec:attributes} *}
+
+text {* An \emph{attribute} is a function @{text "context \<times> thm \<rightarrow>
+  context \<times> thm"}, which means both a (generic) context and a theorem
+  can be modified simultaneously.  In practice this mixed form is very
+  rare, instead attributes are presented either as \emph{declaration
+  attribute:} @{text "thm \<rightarrow> context \<rightarrow> context"} or \emph{rule
+  attribute:} @{text "context \<rightarrow> thm \<rightarrow> thm"}.
+
+  Attributes can have additional arguments via concrete syntax.  There
+  is a collection of context-sensitive parsers for various logical
+  entities (types, terms, theorems).  These already take care of
+  applying morphisms to the arguments when attribute expressions are
+  moved into a different context (see also \secref{sec:morphisms}).
+
+  When implementing declaration attributes, it is important to operate
+  exactly on the variant of the generic context that is provided by
+  the system, which is either global theory context or local proof
+  context.  In particular, the background theory of a local context
+  must not be modified in this situation! *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type attribute} \\
+  @{index_ML Thm.rule_attribute: "(Context.generic -> thm -> thm) -> attribute"} \\
+  @{index_ML Thm.declaration_attribute: "
+  (thm -> Context.generic -> Context.generic) -> attribute"} \\
+  @{index_ML Attrib.setup: "binding -> attribute context_parser ->
+  string -> theory -> theory"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type attribute} represents attributes as concrete
+  type alias.
+
+  \item @{ML Thm.rule_attribute}~@{text "(fn context => rule)"} wraps
+  a context-dependent rule (mapping on @{ML_type thm}) as attribute.
+
+  \item @{ML Thm.declaration_attribute}~@{text "(fn thm => decl)"}
+  wraps a theorem-dependent declaration (mapping on @{ML_type
+  Context.generic}) as attribute.
+
+  \item @{ML Attrib.setup}~@{text "name parser description"} provides
+  the functionality of the Isar command @{command attribute_setup} as
+  ML function.
+
+  \end{description}
+*}
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def attributes} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  @{rail \<open>
+  @@{ML_antiquotation attributes} attributes
+  \<close>}
+
+  \begin{description}
+
+  \item @{text "@{attributes [\<dots>]}"} embeds attribute source
+  representation into the ML text, which is particularly useful with
+  declarations like @{ML Local_Theory.note}.  Attribute names are
+  internalized at compile time, but the source is unevaluated.  This
+  means attributes with formal arguments (types, terms, theorems) may
+  be subject to odd effects of dynamic scoping!
+
+  \end{description}
+*}
+
+text %mlex {* See also @{command attribute_setup} in
+  \cite{isabelle-isar-ref} which includes some abstract examples. *}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/Local_Theory.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,168 @@
+theory Local_Theory
+imports Base
+begin
+
+chapter {* Local theory specifications \label{ch:local-theory} *}
+
+text {*
+  A \emph{local theory} combines aspects of both theory and proof
+  context (cf.\ \secref{sec:context}), such that definitional
+  specifications may be given relatively to parameters and
+  assumptions.  A local theory is represented as a regular proof
+  context, augmented by administrative data about the \emph{target
+  context}.
+
+  The target is usually derived from the background theory by adding
+  local @{text "\<FIX>"} and @{text "\<ASSUME>"} elements, plus
+  suitable modifications of non-logical context data (e.g.\ a special
+  type-checking discipline).  Once initialized, the target is ready to
+  absorb definitional primitives: @{text "\<DEFINE>"} for terms and
+  @{text "\<NOTE>"} for theorems.  Such definitions may get
+  transformed in a target-specific way, but the programming interface
+  hides such details.
+
+  Isabelle/Pure provides target mechanisms for locales, type-classes,
+  type-class instantiations, and general overloading.  In principle,
+  users can implement new targets as well, but this rather arcane
+  discipline is beyond the scope of this manual.  In contrast,
+  implementing derived definitional packages to be used within a local
+  theory context is quite easy: the interfaces are even simpler and
+  more abstract than the underlying primitives for raw theories.
+
+  Many definitional packages for local theories are available in
+  Isabelle.  Although a few old packages only work for global
+  theories, the standard way of implementing definitional packages in
+  Isabelle is via the local theory interface.
+*}
+
+
+section {* Definitional elements *}
+
+text {*
+  There are separate elements @{text "\<DEFINE> c \<equiv> t"} for terms, and
+  @{text "\<NOTE> b = thm"} for theorems.  Types are treated
+  implicitly, according to Hindley-Milner discipline (cf.\
+  \secref{sec:variables}).  These definitional primitives essentially
+  act like @{text "let"}-bindings within a local context that may
+  already contain earlier @{text "let"}-bindings and some initial
+  @{text "\<lambda>"}-bindings.  Thus we gain \emph{dependent definitions}
+  that are relative to an initial axiomatic context.  The following
+  diagram illustrates this idea of axiomatic elements versus
+  definitional elements:
+
+  \begin{center}
+  \begin{tabular}{|l|l|l|}
+  \hline
+  & @{text "\<lambda>"}-binding & @{text "let"}-binding \\
+  \hline
+  types & fixed @{text "\<alpha>"} & arbitrary @{text "\<beta>"} \\
+  terms & @{text "\<FIX> x :: \<tau>"} & @{text "\<DEFINE> c \<equiv> t"} \\
+  theorems & @{text "\<ASSUME> a: A"} & @{text "\<NOTE> b = \<^BG>B\<^EN>"} \\
+  \hline
+  \end{tabular}
+  \end{center}
+
+  A user package merely needs to produce suitable @{text "\<DEFINE>"}
+  and @{text "\<NOTE>"} elements according to the application.  For
+  example, a package for inductive definitions might first @{text
+  "\<DEFINE>"} a certain predicate as some fixed-point construction,
+  then @{text "\<NOTE>"} a proven result about monotonicity of the
+  functor involved here, and then produce further derived concepts via
+  additional @{text "\<DEFINE>"} and @{text "\<NOTE>"} elements.
+
+  The cumulative sequence of @{text "\<DEFINE>"} and @{text "\<NOTE>"}
+  produced at package runtime is managed by the local theory
+  infrastructure by means of an \emph{auxiliary context}.  Thus the
+  system holds up the impression of working within a fully abstract
+  situation with hypothetical entities: @{text "\<DEFINE> c \<equiv> t"}
+  always results in a literal fact @{text "\<^BG>c \<equiv> t\<^EN>"}, where
+  @{text "c"} is a fixed variable @{text "c"}.  The details about
+  global constants, name spaces etc. are handled internally.
+
+  So the general structure of a local theory is a sandwich of three
+  layers:
+
+  \begin{center}
+  \framebox{\quad auxiliary context \quad\framebox{\quad target context \quad\framebox{\quad background theory\quad}}}
+  \end{center}
+
+  When a definitional package is finished, the auxiliary context is
+  reset to the target context.  The target now holds definitions for
+  terms and theorems that stem from the hypothetical @{text
+  "\<DEFINE>"} and @{text "\<NOTE>"} elements, transformed by the
+  particular target policy (see \cite[\S4--5]{Haftmann-Wenzel:2009}
+  for details).  *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type local_theory: Proof.context} \\
+  @{index_ML Named_Target.init: "(local_theory -> local_theory) ->
+    string -> theory -> local_theory"} \\[1ex]
+  @{index_ML Local_Theory.define: "(binding * mixfix) * (Attrib.binding * term) ->
+    local_theory -> (term * (string * thm)) * local_theory"} \\
+  @{index_ML Local_Theory.note: "Attrib.binding * thm list ->
+    local_theory -> (string * thm list) * local_theory"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type local_theory} represents local theories.
+  Although this is merely an alias for @{ML_type Proof.context}, it is
+  semantically a subtype of the same: a @{ML_type local_theory} holds
+  target information as special context data.  Subtyping means that
+  any value @{text "lthy:"}~@{ML_type local_theory} can be also used
+  with operations on expecting a regular @{text "ctxt:"}~@{ML_type
+  Proof.context}.
+
+  \item @{ML Named_Target.init}~@{text "before_exit name thy"}
+  initializes a local theory derived from the given background theory.
+  An empty name refers to a \emph{global theory} context, and a
+  non-empty name refers to a @{command locale} or @{command class}
+  context (a fully-qualified internal name is expected here).  This is
+  useful for experimentation --- normally the Isar toplevel already
+  takes care to initialize the local theory context.  The given @{text
+  "before_exit"} function is invoked before leaving the context; in
+  most situations plain identity @{ML I} is sufficient.
+
+  \item @{ML Local_Theory.define}~@{text "((b, mx), (a, rhs))
+  lthy"} defines a local entity according to the specification that is
+  given relatively to the current @{text "lthy"} context.  In
+  particular the term of the RHS may refer to earlier local entities
+  from the auxiliary context, or hypothetical parameters from the
+  target context.  The result is the newly defined term (which is
+  always a fixed variable with exactly the same name as specified for
+  the LHS), together with an equational theorem that states the
+  definition as a hypothetical fact.
+
+  Unless an explicit name binding is given for the RHS, the resulting
+  fact will be called @{text "b_def"}.  Any given attributes are
+  applied to that same fact --- immediately in the auxiliary context
+  \emph{and} in any transformed versions stemming from target-specific
+  policies or any later interpretations of results from the target
+  context (think of @{command locale} and @{command interpretation},
+  for example).  This means that attributes should be usually plain
+  declarations such as @{attribute simp}, while non-trivial rules like
+  @{attribute simplified} are better avoided.
+
+  \item @{ML Local_Theory.note}~@{text "(a, ths) lthy"} is
+  analogous to @{ML Local_Theory.define}, but defines facts instead of
+  terms.  There is also a slightly more general variant @{ML
+  Local_Theory.notes} that defines several facts (with attribute
+  expressions) simultaneously.
+
+  This is essentially the internal version of the @{command lemmas}
+  command, or @{command declare} if an empty name binding is given.
+
+  \end{description}
+*}
+
+
+section {* Morphisms and declarations \label{sec:morphisms} *}
+
+text {*
+  %FIXME
+
+  See also \cite{Chaieb-Wenzel:2007}.
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/Logic.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,1462 @@
+theory Logic
+imports Base
+begin
+
+chapter {* Primitive logic \label{ch:logic} *}
+
+text {*
+  The logical foundations of Isabelle/Isar are that of the Pure logic,
+  which has been introduced as a Natural Deduction framework in
+  \cite{paulson700}.  This is essentially the same logic as ``@{text
+  "\<lambda>HOL"}'' in the more abstract setting of Pure Type Systems (PTS)
+  \cite{Barendregt-Geuvers:2001}, although there are some key
+  differences in the specific treatment of simple types in
+  Isabelle/Pure.
+
+  Following type-theoretic parlance, the Pure logic consists of three
+  levels of @{text "\<lambda>"}-calculus with corresponding arrows, @{text
+  "\<Rightarrow>"} for syntactic function space (terms depending on terms), @{text
+  "\<And>"} for universal quantification (proofs depending on terms), and
+  @{text "\<Longrightarrow>"} for implication (proofs depending on proofs).
+
+  Derivations are relative to a logical theory, which declares type
+  constructors, constants, and axioms.  Theory declarations support
+  schematic polymorphism, which is strictly speaking outside the
+  logic.\footnote{This is the deeper logical reason, why the theory
+  context @{text "\<Theta>"} is separate from the proof context @{text "\<Gamma>"}
+  of the core calculus: type constructors, term constants, and facts
+  (proof constants) may involve arbitrary type schemes, but the type
+  of a locally fixed term parameter is also fixed!}
+*}
+
+
+section {* Types \label{sec:types} *}
+
+text {*
+  The language of types is an uninterpreted order-sorted first-order
+  algebra; types are qualified by ordered type classes.
+
+  \medskip A \emph{type class} is an abstract syntactic entity
+  declared in the theory context.  The \emph{subclass relation} @{text
+  "c\<^sub>1 \<subseteq> c\<^sub>2"} is specified by stating an acyclic
+  generating relation; the transitive closure is maintained
+  internally.  The resulting relation is an ordering: reflexive,
+  transitive, and antisymmetric.
+
+  A \emph{sort} is a list of type classes written as @{text "s = {c\<^sub>1,
+  \<dots>, c\<^sub>m}"}, it represents symbolic intersection.  Notationally, the
+  curly braces are omitted for singleton intersections, i.e.\ any
+  class @{text "c"} may be read as a sort @{text "{c}"}.  The ordering
+  on type classes is extended to sorts according to the meaning of
+  intersections: @{text "{c\<^sub>1, \<dots> c\<^sub>m} \<subseteq> {d\<^sub>1, \<dots>, d\<^sub>n}"} iff @{text
+  "\<forall>j. \<exists>i. c\<^sub>i \<subseteq> d\<^sub>j"}.  The empty intersection @{text "{}"} refers to
+  the universal sort, which is the largest element wrt.\ the sort
+  order.  Thus @{text "{}"} represents the ``full sort'', not the
+  empty one!  The intersection of all (finitely many) classes declared
+  in the current theory is the least element wrt.\ the sort ordering.
+
+  \medskip A \emph{fixed type variable} is a pair of a basic name
+  (starting with a @{text "'"} character) and a sort constraint, e.g.\
+  @{text "('a, s)"} which is usually printed as @{text "\<alpha>\<^sub>s"}.
+  A \emph{schematic type variable} is a pair of an indexname and a
+  sort constraint, e.g.\ @{text "(('a, 0), s)"} which is usually
+  printed as @{text "?\<alpha>\<^sub>s"}.
+
+  Note that \emph{all} syntactic components contribute to the identity
+  of type variables: basic name, index, and sort constraint.  The core
+  logic handles type variables with the same name but different sorts
+  as different, although the type-inference layer (which is outside
+  the core) rejects anything like that.
+
+  A \emph{type constructor} @{text "\<kappa>"} is a @{text "k"}-ary operator
+  on types declared in the theory.  Type constructor application is
+  written postfix as @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>k)\<kappa>"}.  For
+  @{text "k = 0"} the argument tuple is omitted, e.g.\ @{text "prop"}
+  instead of @{text "()prop"}.  For @{text "k = 1"} the parentheses
+  are omitted, e.g.\ @{text "\<alpha> list"} instead of @{text "(\<alpha>)list"}.
+  Further notation is provided for specific constructors, notably the
+  right-associative infix @{text "\<alpha> \<Rightarrow> \<beta>"} instead of @{text "(\<alpha>,
+  \<beta>)fun"}.
+  
+  The logical category \emph{type} is defined inductively over type
+  variables and type constructors as follows: @{text "\<tau> = \<alpha>\<^sub>s | ?\<alpha>\<^sub>s |
+  (\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>k)\<kappa>"}.
+
+  A \emph{type abbreviation} is a syntactic definition @{text
+  "(\<^vec>\<alpha>)\<kappa> = \<tau>"} of an arbitrary type expression @{text "\<tau>"} over
+  variables @{text "\<^vec>\<alpha>"}.  Type abbreviations appear as type
+  constructors in the syntax, but are expanded before entering the
+  logical core.
+
+  A \emph{type arity} declares the image behavior of a type
+  constructor wrt.\ the algebra of sorts: @{text "\<kappa> :: (s\<^sub>1, \<dots>,
+  s\<^sub>k)s"} means that @{text "(\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>k)\<kappa>"} is
+  of sort @{text "s"} if every argument type @{text "\<tau>\<^sub>i"} is
+  of sort @{text "s\<^sub>i"}.  Arity declarations are implicitly
+  completed, i.e.\ @{text "\<kappa> :: (\<^vec>s)c"} entails @{text "\<kappa> ::
+  (\<^vec>s)c'"} for any @{text "c' \<supseteq> c"}.
+
+  \medskip The sort algebra is always maintained as \emph{coregular},
+  which means that type arities are consistent with the subclass
+  relation: for any type constructor @{text "\<kappa>"}, and classes @{text
+  "c\<^sub>1 \<subseteq> c\<^sub>2"}, and arities @{text "\<kappa> ::
+  (\<^vec>s\<^sub>1)c\<^sub>1"} and @{text "\<kappa> ::
+  (\<^vec>s\<^sub>2)c\<^sub>2"} holds @{text "\<^vec>s\<^sub>1 \<subseteq>
+  \<^vec>s\<^sub>2"} component-wise.
+
+  The key property of a coregular order-sorted algebra is that sort
+  constraints can be solved in a most general fashion: for each type
+  constructor @{text "\<kappa>"} and sort @{text "s"} there is a most general
+  vector of argument sorts @{text "(s\<^sub>1, \<dots>, s\<^sub>k)"} such
+  that a type scheme @{text "(\<alpha>\<^bsub>s\<^sub>1\<^esub>, \<dots>,
+  \<alpha>\<^bsub>s\<^sub>k\<^esub>)\<kappa>"} is of sort @{text "s"}.
+  Consequently, type unification has most general solutions (modulo
+  equivalence of sorts), so type-inference produces primary types as
+  expected \cite{nipkow-prehofer}.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type class: string} \\
+  @{index_ML_type sort: "class list"} \\
+  @{index_ML_type arity: "string * sort list * sort"} \\
+  @{index_ML_type typ} \\
+  @{index_ML Term.map_atyps: "(typ -> typ) -> typ -> typ"} \\
+  @{index_ML Term.fold_atyps: "(typ -> 'a -> 'a) -> typ -> 'a -> 'a"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML Sign.subsort: "theory -> sort * sort -> bool"} \\
+  @{index_ML Sign.of_sort: "theory -> typ * sort -> bool"} \\
+  @{index_ML Sign.add_type: "Proof.context -> binding * int * mixfix -> theory -> theory"} \\
+  @{index_ML Sign.add_type_abbrev: "Proof.context ->
+  binding * string list * typ -> theory -> theory"} \\
+  @{index_ML Sign.primitive_class: "binding * class list -> theory -> theory"} \\
+  @{index_ML Sign.primitive_classrel: "class * class -> theory -> theory"} \\
+  @{index_ML Sign.primitive_arity: "arity -> theory -> theory"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type class} represents type classes.
+
+  \item Type @{ML_type sort} represents sorts, i.e.\ finite
+  intersections of classes.  The empty list @{ML "[]: sort"} refers to
+  the empty class intersection, i.e.\ the ``full sort''.
+
+  \item Type @{ML_type arity} represents type arities.  A triple
+  @{text "(\<kappa>, \<^vec>s, s) : arity"} represents @{text "\<kappa> ::
+  (\<^vec>s)s"} as described above.
+
+  \item Type @{ML_type typ} represents types; this is a datatype with
+  constructors @{ML TFree}, @{ML TVar}, @{ML Type}.
+
+  \item @{ML Term.map_atyps}~@{text "f \<tau>"} applies the mapping @{text
+  "f"} to all atomic types (@{ML TFree}, @{ML TVar}) occurring in
+  @{text "\<tau>"}.
+
+  \item @{ML Term.fold_atyps}~@{text "f \<tau>"} iterates the operation
+  @{text "f"} over all occurrences of atomic types (@{ML TFree}, @{ML
+  TVar}) in @{text "\<tau>"}; the type structure is traversed from left to
+  right.
+
+  \item @{ML Sign.subsort}~@{text "thy (s\<^sub>1, s\<^sub>2)"}
+  tests the subsort relation @{text "s\<^sub>1 \<subseteq> s\<^sub>2"}.
+
+  \item @{ML Sign.of_sort}~@{text "thy (\<tau>, s)"} tests whether type
+  @{text "\<tau>"} is of sort @{text "s"}.
+
+  \item @{ML Sign.add_type}~@{text "ctxt (\<kappa>, k, mx)"} declares a
+  new type constructors @{text "\<kappa>"} with @{text "k"} arguments and
+  optional mixfix syntax.
+
+  \item @{ML Sign.add_type_abbrev}~@{text "ctxt (\<kappa>, \<^vec>\<alpha>, \<tau>)"}
+  defines a new type abbreviation @{text "(\<^vec>\<alpha>)\<kappa> = \<tau>"}.
+
+  \item @{ML Sign.primitive_class}~@{text "(c, [c\<^sub>1, \<dots>,
+  c\<^sub>n])"} declares a new class @{text "c"}, together with class
+  relations @{text "c \<subseteq> c\<^sub>i"}, for @{text "i = 1, \<dots>, n"}.
+
+  \item @{ML Sign.primitive_classrel}~@{text "(c\<^sub>1,
+  c\<^sub>2)"} declares the class relation @{text "c\<^sub>1 \<subseteq>
+  c\<^sub>2"}.
+
+  \item @{ML Sign.primitive_arity}~@{text "(\<kappa>, \<^vec>s, s)"} declares
+  the arity @{text "\<kappa> :: (\<^vec>s)s"}.
+
+  \end{description}
+*}
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def "class"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "sort"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "type_name"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "type_abbrev"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "nonterminal"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "typ"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  @{rail \<open>
+  @@{ML_antiquotation class} nameref
+  ;
+  @@{ML_antiquotation sort} sort
+  ;
+  (@@{ML_antiquotation type_name} |
+   @@{ML_antiquotation type_abbrev} |
+   @@{ML_antiquotation nonterminal}) nameref
+  ;
+  @@{ML_antiquotation typ} type
+  \<close>}
+
+  \begin{description}
+
+  \item @{text "@{class c}"} inlines the internalized class @{text
+  "c"} --- as @{ML_type string} literal.
+
+  \item @{text "@{sort s}"} inlines the internalized sort @{text "s"}
+  --- as @{ML_type "string list"} literal.
+
+  \item @{text "@{type_name c}"} inlines the internalized type
+  constructor @{text "c"} --- as @{ML_type string} literal.
+
+  \item @{text "@{type_abbrev c}"} inlines the internalized type
+  abbreviation @{text "c"} --- as @{ML_type string} literal.
+
+  \item @{text "@{nonterminal c}"} inlines the internalized syntactic
+  type~/ grammar nonterminal @{text "c"} --- as @{ML_type string}
+  literal.
+
+  \item @{text "@{typ \<tau>}"} inlines the internalized type @{text "\<tau>"}
+  --- as constructor term for datatype @{ML_type typ}.
+
+  \end{description}
+*}
+
+
+section {* Terms \label{sec:terms} *}
+
+text {*
+  The language of terms is that of simply-typed @{text "\<lambda>"}-calculus
+  with de-Bruijn indices for bound variables (cf.\ \cite{debruijn72}
+  or \cite{paulson-ml2}), with the types being determined by the
+  corresponding binders.  In contrast, free variables and constants
+  have an explicit name and type in each occurrence.
+
+  \medskip A \emph{bound variable} is a natural number @{text "b"},
+  which accounts for the number of intermediate binders between the
+  variable occurrence in the body and its binding position.  For
+  example, the de-Bruijn term @{text "\<lambda>\<^bsub>bool\<^esub>. \<lambda>\<^bsub>bool\<^esub>. 1 \<and> 0"} would
+  correspond to @{text "\<lambda>x\<^bsub>bool\<^esub>. \<lambda>y\<^bsub>bool\<^esub>. x \<and> y"} in a named
+  representation.  Note that a bound variable may be represented by
+  different de-Bruijn indices at different occurrences, depending on
+  the nesting of abstractions.
+
+  A \emph{loose variable} is a bound variable that is outside the
+  scope of local binders.  The types (and names) for loose variables
+  can be managed as a separate context, that is maintained as a stack
+  of hypothetical binders.  The core logic operates on closed terms,
+  without any loose variables.
+
+  A \emph{fixed variable} is a pair of a basic name and a type, e.g.\
+  @{text "(x, \<tau>)"} which is usually printed @{text "x\<^sub>\<tau>"} here.  A
+  \emph{schematic variable} is a pair of an indexname and a type,
+  e.g.\ @{text "((x, 0), \<tau>)"} which is likewise printed as @{text
+  "?x\<^sub>\<tau>"}.
+
+  \medskip A \emph{constant} is a pair of a basic name and a type,
+  e.g.\ @{text "(c, \<tau>)"} which is usually printed as @{text "c\<^sub>\<tau>"}
+  here.  Constants are declared in the context as polymorphic families
+  @{text "c :: \<sigma>"}, meaning that all substitution instances @{text
+  "c\<^sub>\<tau>"} for @{text "\<tau> = \<sigma>\<vartheta>"} are valid.
+
+  The vector of \emph{type arguments} of constant @{text "c\<^sub>\<tau>"} wrt.\
+  the declaration @{text "c :: \<sigma>"} is defined as the codomain of the
+  matcher @{text "\<vartheta> = {?\<alpha>\<^sub>1 \<mapsto> \<tau>\<^sub>1, \<dots>, ?\<alpha>\<^sub>n \<mapsto> \<tau>\<^sub>n}"} presented in
+  canonical order @{text "(\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>n)"}, corresponding to the
+  left-to-right occurrences of the @{text "\<alpha>\<^sub>i"} in @{text "\<sigma>"}.
+  Within a given theory context, there is a one-to-one correspondence
+  between any constant @{text "c\<^sub>\<tau>"} and the application @{text "c(\<tau>\<^sub>1,
+  \<dots>, \<tau>\<^sub>n)"} of its type arguments.  For example, with @{text "plus :: \<alpha>
+  \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>"}, the instance @{text "plus\<^bsub>nat \<Rightarrow> nat \<Rightarrow> nat\<^esub>"} corresponds to
+  @{text "plus(nat)"}.
+
+  Constant declarations @{text "c :: \<sigma>"} may contain sort constraints
+  for type variables in @{text "\<sigma>"}.  These are observed by
+  type-inference as expected, but \emph{ignored} by the core logic.
+  This means the primitive logic is able to reason with instances of
+  polymorphic constants that the user-level type-checker would reject
+  due to violation of type class restrictions.
+
+  \medskip An \emph{atomic term} is either a variable or constant.
+  The logical category \emph{term} is defined inductively over atomic
+  terms, with abstraction and application as follows: @{text "t = b |
+  x\<^sub>\<tau> | ?x\<^sub>\<tau> | c\<^sub>\<tau> | \<lambda>\<^sub>\<tau>. t | t\<^sub>1 t\<^sub>2"}.  Parsing and printing takes care of
+  converting between an external representation with named bound
+  variables.  Subsequently, we shall use the latter notation instead
+  of internal de-Bruijn representation.
+
+  The inductive relation @{text "t :: \<tau>"} assigns a (unique) type to a
+  term according to the structure of atomic terms, abstractions, and
+  applicatins:
+  \[
+  \infer{@{text "a\<^sub>\<tau> :: \<tau>"}}{}
+  \qquad
+  \infer{@{text "(\<lambda>x\<^sub>\<tau>. t) :: \<tau> \<Rightarrow> \<sigma>"}}{@{text "t :: \<sigma>"}}
+  \qquad
+  \infer{@{text "t u :: \<sigma>"}}{@{text "t :: \<tau> \<Rightarrow> \<sigma>"} & @{text "u :: \<tau>"}}
+  \]
+  A \emph{well-typed term} is a term that can be typed according to these rules.
+
+  Typing information can be omitted: type-inference is able to
+  reconstruct the most general type of a raw term, while assigning
+  most general types to all of its variables and constants.
+  Type-inference depends on a context of type constraints for fixed
+  variables, and declarations for polymorphic constants.
+
+  The identity of atomic terms consists both of the name and the type
+  component.  This means that different variables @{text
+  "x\<^bsub>\<tau>\<^sub>1\<^esub>"} and @{text "x\<^bsub>\<tau>\<^sub>2\<^esub>"} may become the same after
+  type instantiation.  Type-inference rejects variables of the same
+  name, but different types.  In contrast, mixed instances of
+  polymorphic constants occur routinely.
+
+  \medskip The \emph{hidden polymorphism} of a term @{text "t :: \<sigma>"}
+  is the set of type variables occurring in @{text "t"}, but not in
+  its type @{text "\<sigma>"}.  This means that the term implicitly depends
+  on type arguments that are not accounted in the result type, i.e.\
+  there are different type instances @{text "t\<vartheta> :: \<sigma>"} and
+  @{text "t\<vartheta>' :: \<sigma>"} with the same type.  This slightly
+  pathological situation notoriously demands additional care.
+
+  \medskip A \emph{term abbreviation} is a syntactic definition @{text
+  "c\<^sub>\<sigma> \<equiv> t"} of a closed term @{text "t"} of type @{text "\<sigma>"},
+  without any hidden polymorphism.  A term abbreviation looks like a
+  constant in the syntax, but is expanded before entering the logical
+  core.  Abbreviations are usually reverted when printing terms, using
+  @{text "t \<rightarrow> c\<^sub>\<sigma>"} as rules for higher-order rewriting.
+
+  \medskip Canonical operations on @{text "\<lambda>"}-terms include @{text
+  "\<alpha>\<beta>\<eta>"}-conversion: @{text "\<alpha>"}-conversion refers to capture-free
+  renaming of bound variables; @{text "\<beta>"}-conversion contracts an
+  abstraction applied to an argument term, substituting the argument
+  in the body: @{text "(\<lambda>x. b)a"} becomes @{text "b[a/x]"}; @{text
+  "\<eta>"}-conversion contracts vacuous application-abstraction: @{text
+  "\<lambda>x. f x"} becomes @{text "f"}, provided that the bound variable
+  does not occur in @{text "f"}.
+
+  Terms are normally treated modulo @{text "\<alpha>"}-conversion, which is
+  implicit in the de-Bruijn representation.  Names for bound variables
+  in abstractions are maintained separately as (meaningless) comments,
+  mostly for parsing and printing.  Full @{text "\<alpha>\<beta>\<eta>"}-conversion is
+  commonplace in various standard operations (\secref{sec:obj-rules})
+  that are based on higher-order unification and matching.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type term} \\
+  @{index_ML_op "aconv": "term * term -> bool"} \\
+  @{index_ML Term.map_types: "(typ -> typ) -> term -> term"} \\
+  @{index_ML Term.fold_types: "(typ -> 'a -> 'a) -> term -> 'a -> 'a"} \\
+  @{index_ML Term.map_aterms: "(term -> term) -> term -> term"} \\
+  @{index_ML Term.fold_aterms: "(term -> 'a -> 'a) -> term -> 'a -> 'a"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML fastype_of: "term -> typ"} \\
+  @{index_ML lambda: "term -> term -> term"} \\
+  @{index_ML betapply: "term * term -> term"} \\
+  @{index_ML incr_boundvars: "int -> term -> term"} \\
+  @{index_ML Sign.declare_const: "Proof.context ->
+  (binding * typ) * mixfix -> theory -> term * theory"} \\
+  @{index_ML Sign.add_abbrev: "string -> binding * term ->
+  theory -> (term * term) * theory"} \\
+  @{index_ML Sign.const_typargs: "theory -> string * typ -> typ list"} \\
+  @{index_ML Sign.const_instance: "theory -> string * typ list -> typ"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type term} represents de-Bruijn terms, with comments
+  in abstractions, and explicitly named free variables and constants;
+  this is a datatype with constructors @{index_ML Bound}, @{index_ML
+  Free}, @{index_ML Var}, @{index_ML Const}, @{index_ML Abs},
+  @{index_ML_op "$"}.
+
+  \item @{text "t"}~@{ML_text aconv}~@{text "u"} checks @{text
+  "\<alpha>"}-equivalence of two terms.  This is the basic equality relation
+  on type @{ML_type term}; raw datatype equality should only be used
+  for operations related to parsing or printing!
+
+  \item @{ML Term.map_types}~@{text "f t"} applies the mapping @{text
+  "f"} to all types occurring in @{text "t"}.
+
+  \item @{ML Term.fold_types}~@{text "f t"} iterates the operation
+  @{text "f"} over all occurrences of types in @{text "t"}; the term
+  structure is traversed from left to right.
+
+  \item @{ML Term.map_aterms}~@{text "f t"} applies the mapping @{text
+  "f"} to all atomic terms (@{ML Bound}, @{ML Free}, @{ML Var}, @{ML
+  Const}) occurring in @{text "t"}.
+
+  \item @{ML Term.fold_aterms}~@{text "f t"} iterates the operation
+  @{text "f"} over all occurrences of atomic terms (@{ML Bound}, @{ML
+  Free}, @{ML Var}, @{ML Const}) in @{text "t"}; the term structure is
+  traversed from left to right.
+
+  \item @{ML fastype_of}~@{text "t"} determines the type of a
+  well-typed term.  This operation is relatively slow, despite the
+  omission of any sanity checks.
+
+  \item @{ML lambda}~@{text "a b"} produces an abstraction @{text
+  "\<lambda>a. b"}, where occurrences of the atomic term @{text "a"} in the
+  body @{text "b"} are replaced by bound variables.
+
+  \item @{ML betapply}~@{text "(t, u)"} produces an application @{text
+  "t u"}, with topmost @{text "\<beta>"}-conversion if @{text "t"} is an
+  abstraction.
+
+  \item @{ML incr_boundvars}~@{text "j"} increments a term's dangling
+  bound variables by the offset @{text "j"}.  This is required when
+  moving a subterm into a context where it is enclosed by a different
+  number of abstractions.  Bound variables with a matching abstraction
+  are unaffected.
+
+  \item @{ML Sign.declare_const}~@{text "ctxt ((c, \<sigma>), mx)"} declares
+  a new constant @{text "c :: \<sigma>"} with optional mixfix syntax.
+
+  \item @{ML Sign.add_abbrev}~@{text "print_mode (c, t)"}
+  introduces a new term abbreviation @{text "c \<equiv> t"}.
+
+  \item @{ML Sign.const_typargs}~@{text "thy (c, \<tau>)"} and @{ML
+  Sign.const_instance}~@{text "thy (c, [\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>n])"}
+  convert between two representations of polymorphic constants: full
+  type instance vs.\ compact type arguments form.
+
+  \end{description}
+*}
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def "const_name"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "const_abbrev"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "const"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "term"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "prop"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  @{rail \<open>
+  (@@{ML_antiquotation const_name} |
+   @@{ML_antiquotation const_abbrev}) nameref
+  ;
+  @@{ML_antiquotation const} ('(' (type + ',') ')')?
+  ;
+  @@{ML_antiquotation term} term
+  ;
+  @@{ML_antiquotation prop} prop
+  \<close>}
+
+  \begin{description}
+
+  \item @{text "@{const_name c}"} inlines the internalized logical
+  constant name @{text "c"} --- as @{ML_type string} literal.
+
+  \item @{text "@{const_abbrev c}"} inlines the internalized
+  abbreviated constant name @{text "c"} --- as @{ML_type string}
+  literal.
+
+  \item @{text "@{const c(\<^vec>\<tau>)}"} inlines the internalized
+  constant @{text "c"} with precise type instantiation in the sense of
+  @{ML Sign.const_instance} --- as @{ML Const} constructor term for
+  datatype @{ML_type term}.
+
+  \item @{text "@{term t}"} inlines the internalized term @{text "t"}
+  --- as constructor term for datatype @{ML_type term}.
+
+  \item @{text "@{prop \<phi>}"} inlines the internalized proposition
+  @{text "\<phi>"} --- as constructor term for datatype @{ML_type term}.
+
+  \end{description}
+*}
+
+
+section {* Theorems \label{sec:thms} *}
+
+text {*
+  A \emph{proposition} is a well-typed term of type @{text "prop"}, a
+  \emph{theorem} is a proven proposition (depending on a context of
+  hypotheses and the background theory).  Primitive inferences include
+  plain Natural Deduction rules for the primary connectives @{text
+  "\<And>"} and @{text "\<Longrightarrow>"} of the framework.  There is also a builtin
+  notion of equality/equivalence @{text "\<equiv>"}.
+*}
+
+
+subsection {* Primitive connectives and rules \label{sec:prim-rules} *}
+
+text {*
+  The theory @{text "Pure"} contains constant declarations for the
+  primitive connectives @{text "\<And>"}, @{text "\<Longrightarrow>"}, and @{text "\<equiv>"} of
+  the logical framework, see \figref{fig:pure-connectives}.  The
+  derivability judgment @{text "A\<^sub>1, \<dots>, A\<^sub>n \<turnstile> B"} is
+  defined inductively by the primitive inferences given in
+  \figref{fig:prim-rules}, with the global restriction that the
+  hypotheses must \emph{not} contain any schematic variables.  The
+  builtin equality is conceptually axiomatized as shown in
+  \figref{fig:pure-equality}, although the implementation works
+  directly with derived inferences.
+
+  \begin{figure}[htb]
+  \begin{center}
+  \begin{tabular}{ll}
+  @{text "all :: (\<alpha> \<Rightarrow> prop) \<Rightarrow> prop"} & universal quantification (binder @{text "\<And>"}) \\
+  @{text "\<Longrightarrow> :: prop \<Rightarrow> prop \<Rightarrow> prop"} & implication (right associative infix) \\
+  @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> prop"} & equality relation (infix) \\
+  \end{tabular}
+  \caption{Primitive connectives of Pure}\label{fig:pure-connectives}
+  \end{center}
+  \end{figure}
+
+  \begin{figure}[htb]
+  \begin{center}
+  \[
+  \infer[@{text "(axiom)"}]{@{text "\<turnstile> A"}}{@{text "A \<in> \<Theta>"}}
+  \qquad
+  \infer[@{text "(assume)"}]{@{text "A \<turnstile> A"}}{}
+  \]
+  \[
+  \infer[@{text "(\<And>\<hyphen>intro)"}]{@{text "\<Gamma> \<turnstile> \<And>x. B[x]"}}{@{text "\<Gamma> \<turnstile> B[x]"} & @{text "x \<notin> \<Gamma>"}}
+  \qquad
+  \infer[@{text "(\<And>\<hyphen>elim)"}]{@{text "\<Gamma> \<turnstile> B[a]"}}{@{text "\<Gamma> \<turnstile> \<And>x. B[x]"}}
+  \]
+  \[
+  \infer[@{text "(\<Longrightarrow>\<hyphen>intro)"}]{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
+  \qquad
+  \infer[@{text "(\<Longrightarrow>\<hyphen>elim)"}]{@{text "\<Gamma>\<^sub>1 \<union> \<Gamma>\<^sub>2 \<turnstile> B"}}{@{text "\<Gamma>\<^sub>1 \<turnstile> A \<Longrightarrow> B"} & @{text "\<Gamma>\<^sub>2 \<turnstile> A"}}
+  \]
+  \caption{Primitive inferences of Pure}\label{fig:prim-rules}
+  \end{center}
+  \end{figure}
+
+  \begin{figure}[htb]
+  \begin{center}
+  \begin{tabular}{ll}
+  @{text "\<turnstile> (\<lambda>x. b[x]) a \<equiv> b[a]"} & @{text "\<beta>"}-conversion \\
+  @{text "\<turnstile> x \<equiv> x"} & reflexivity \\
+  @{text "\<turnstile> x \<equiv> y \<Longrightarrow> P x \<Longrightarrow> P y"} & substitution \\
+  @{text "\<turnstile> (\<And>x. f x \<equiv> g x) \<Longrightarrow> f \<equiv> g"} & extensionality \\
+  @{text "\<turnstile> (A \<Longrightarrow> B) \<Longrightarrow> (B \<Longrightarrow> A) \<Longrightarrow> A \<equiv> B"} & logical equivalence \\
+  \end{tabular}
+  \caption{Conceptual axiomatization of Pure equality}\label{fig:pure-equality}
+  \end{center}
+  \end{figure}
+
+  The introduction and elimination rules for @{text "\<And>"} and @{text
+  "\<Longrightarrow>"} are analogous to formation of dependently typed @{text
+  "\<lambda>"}-terms representing the underlying proof objects.  Proof terms
+  are irrelevant in the Pure logic, though; they cannot occur within
+  propositions.  The system provides a runtime option to record
+  explicit proof terms for primitive inferences, see also
+  \secref{sec:proof-terms}.  Thus all three levels of @{text
+  "\<lambda>"}-calculus become explicit: @{text "\<Rightarrow>"} for terms, and @{text
+  "\<And>/\<Longrightarrow>"} for proofs (cf.\ \cite{Berghofer-Nipkow:2000:TPHOL}).
+
+  Observe that locally fixed parameters (as in @{text
+  "\<And>\<hyphen>intro"}) need not be recorded in the hypotheses, because
+  the simple syntactic types of Pure are always inhabitable.
+  ``Assumptions'' @{text "x :: \<tau>"} for type-membership are only
+  present as long as some @{text "x\<^sub>\<tau>"} occurs in the statement
+  body.\footnote{This is the key difference to ``@{text "\<lambda>HOL"}'' in
+  the PTS framework \cite{Barendregt-Geuvers:2001}, where hypotheses
+  @{text "x : A"} are treated uniformly for propositions and types.}
+
+  \medskip The axiomatization of a theory is implicitly closed by
+  forming all instances of type and term variables: @{text "\<turnstile>
+  A\<vartheta>"} holds for any substitution instance of an axiom
+  @{text "\<turnstile> A"}.  By pushing substitutions through derivations
+  inductively, we also get admissible @{text "generalize"} and @{text
+  "instantiate"} rules as shown in \figref{fig:subst-rules}.
+
+  \begin{figure}[htb]
+  \begin{center}
+  \[
+  \infer{@{text "\<Gamma> \<turnstile> B[?\<alpha>]"}}{@{text "\<Gamma> \<turnstile> B[\<alpha>]"} & @{text "\<alpha> \<notin> \<Gamma>"}}
+  \quad
+  \infer[\quad@{text "(generalize)"}]{@{text "\<Gamma> \<turnstile> B[?x]"}}{@{text "\<Gamma> \<turnstile> B[x]"} & @{text "x \<notin> \<Gamma>"}}
+  \]
+  \[
+  \infer{@{text "\<Gamma> \<turnstile> B[\<tau>]"}}{@{text "\<Gamma> \<turnstile> B[?\<alpha>]"}}
+  \quad
+  \infer[\quad@{text "(instantiate)"}]{@{text "\<Gamma> \<turnstile> B[t]"}}{@{text "\<Gamma> \<turnstile> B[?x]"}}
+  \]
+  \caption{Admissible substitution rules}\label{fig:subst-rules}
+  \end{center}
+  \end{figure}
+
+  Note that @{text "instantiate"} does not require an explicit
+  side-condition, because @{text "\<Gamma>"} may never contain schematic
+  variables.
+
+  In principle, variables could be substituted in hypotheses as well,
+  but this would disrupt the monotonicity of reasoning: deriving
+  @{text "\<Gamma>\<vartheta> \<turnstile> B\<vartheta>"} from @{text "\<Gamma> \<turnstile> B"} is
+  correct, but @{text "\<Gamma>\<vartheta> \<supseteq> \<Gamma>"} does not necessarily hold:
+  the result belongs to a different proof context.
+
+  \medskip An \emph{oracle} is a function that produces axioms on the
+  fly.  Logically, this is an instance of the @{text "axiom"} rule
+  (\figref{fig:prim-rules}), but there is an operational difference.
+  The system always records oracle invocations within derivations of
+  theorems by a unique tag.
+
+  Axiomatizations should be limited to the bare minimum, typically as
+  part of the initial logical basis of an object-logic formalization.
+  Later on, theories are usually developed in a strictly definitional
+  fashion, by stating only certain equalities over new constants.
+
+  A \emph{simple definition} consists of a constant declaration @{text
+  "c :: \<sigma>"} together with an axiom @{text "\<turnstile> c \<equiv> t"}, where @{text "t
+  :: \<sigma>"} is a closed term without any hidden polymorphism.  The RHS
+  may depend on further defined constants, but not @{text "c"} itself.
+  Definitions of functions may be presented as @{text "c \<^vec>x \<equiv>
+  t"} instead of the puristic @{text "c \<equiv> \<lambda>\<^vec>x. t"}.
+
+  An \emph{overloaded definition} consists of a collection of axioms
+  for the same constant, with zero or one equations @{text
+  "c((\<^vec>\<alpha>)\<kappa>) \<equiv> t"} for each type constructor @{text "\<kappa>"} (for
+  distinct variables @{text "\<^vec>\<alpha>"}).  The RHS may mention
+  previously defined constants as above, or arbitrary constants @{text
+  "d(\<alpha>\<^sub>i)"} for some @{text "\<alpha>\<^sub>i"} projected from @{text
+  "\<^vec>\<alpha>"}.  Thus overloaded definitions essentially work by
+  primitive recursion over the syntactic structure of a single type
+  argument.  See also \cite[\S4.3]{Haftmann-Wenzel:2006:classes}.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Logic.all: "term -> term -> term"} \\
+  @{index_ML Logic.mk_implies: "term * term -> term"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML_type ctyp} \\
+  @{index_ML_type cterm} \\
+  @{index_ML Thm.ctyp_of: "theory -> typ -> ctyp"} \\
+  @{index_ML Thm.cterm_of: "theory -> term -> cterm"} \\
+  @{index_ML Thm.apply: "cterm -> cterm -> cterm"} \\
+  @{index_ML Thm.lambda: "cterm -> cterm -> cterm"} \\
+  @{index_ML Thm.all: "cterm -> cterm -> cterm"} \\
+  @{index_ML Drule.mk_implies: "cterm * cterm -> cterm"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML_type thm} \\
+  @{index_ML Thm.peek_status: "thm -> {oracle: bool, unfinished: bool, failed: bool}"} \\
+  @{index_ML Thm.transfer: "theory -> thm -> thm"} \\
+  @{index_ML Thm.assume: "cterm -> thm"} \\
+  @{index_ML Thm.forall_intr: "cterm -> thm -> thm"} \\
+  @{index_ML Thm.forall_elim: "cterm -> thm -> thm"} \\
+  @{index_ML Thm.implies_intr: "cterm -> thm -> thm"} \\
+  @{index_ML Thm.implies_elim: "thm -> thm -> thm"} \\
+  @{index_ML Thm.generalize: "string list * string list -> int -> thm -> thm"} \\
+  @{index_ML Thm.instantiate: "(ctyp * ctyp) list * (cterm * cterm) list -> thm -> thm"} \\
+  @{index_ML Thm.add_axiom: "Proof.context ->
+  binding * term -> theory -> (string * thm) * theory"} \\
+  @{index_ML Thm.add_oracle: "binding * ('a -> cterm) -> theory ->
+  (string * ('a -> thm)) * theory"} \\
+  @{index_ML Thm.add_def: "Proof.context -> bool -> bool ->
+  binding * term -> theory -> (string * thm) * theory"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML Theory.add_deps: "Proof.context -> string ->
+  string * typ -> (string * typ) list -> theory -> theory"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Thm.peek_status}~@{text "thm"} informs about the current
+  status of the derivation object behind the given theorem.  This is a
+  snapshot of a potentially ongoing (parallel) evaluation of proofs.
+  The three Boolean values indicate the following: @{verbatim oracle}
+  if the finished part contains some oracle invocation; @{verbatim
+  unfinished} if some future proofs are still pending; @{verbatim
+  failed} if some future proof has failed, rendering the theorem
+  invalid!
+
+  \item @{ML Logic.all}~@{text "a B"} produces a Pure quantification
+  @{text "\<And>a. B"}, where occurrences of the atomic term @{text "a"} in
+  the body proposition @{text "B"} are replaced by bound variables.
+  (See also @{ML lambda} on terms.)
+
+  \item @{ML Logic.mk_implies}~@{text "(A, B)"} produces a Pure
+  implication @{text "A \<Longrightarrow> B"}.
+
+  \item Types @{ML_type ctyp} and @{ML_type cterm} represent certified
+  types and terms, respectively.  These are abstract datatypes that
+  guarantee that its values have passed the full well-formedness (and
+  well-typedness) checks, relative to the declarations of type
+  constructors, constants etc.\ in the background theory.  The
+  abstract types @{ML_type ctyp} and @{ML_type cterm} are part of the
+  same inference kernel that is mainly responsible for @{ML_type thm}.
+  Thus syntactic operations on @{ML_type ctyp} and @{ML_type cterm}
+  are located in the @{ML_structure Thm} module, even though theorems are
+  not yet involved at that stage.
+
+  \item @{ML Thm.ctyp_of}~@{text "thy \<tau>"} and @{ML
+  Thm.cterm_of}~@{text "thy t"} explicitly checks types and terms,
+  respectively.  This also involves some basic normalizations, such
+  expansion of type and term abbreviations from the theory context.
+  Full re-certification is relatively slow and should be avoided in
+  tight reasoning loops.
+
+  \item @{ML Thm.apply}, @{ML Thm.lambda}, @{ML Thm.all}, @{ML
+  Drule.mk_implies} etc.\ compose certified terms (or propositions)
+  incrementally.  This is equivalent to @{ML Thm.cterm_of} after
+  unchecked @{ML_op "$"}, @{ML lambda}, @{ML Logic.all}, @{ML
+  Logic.mk_implies} etc., but there can be a big difference in
+  performance when large existing entities are composed by a few extra
+  constructions on top.  There are separate operations to decompose
+  certified terms and theorems to produce certified terms again.
+
+  \item Type @{ML_type thm} represents proven propositions.  This is
+  an abstract datatype that guarantees that its values have been
+  constructed by basic principles of the @{ML_structure Thm} module.
+  Every @{ML_type thm} value refers its background theory,
+  cf.\ \secref{sec:context-theory}.
+
+  \item @{ML Thm.transfer}~@{text "thy thm"} transfers the given
+  theorem to a \emph{larger} theory, see also \secref{sec:context}.
+  This formal adjustment of the background context has no logical
+  significance, but is occasionally required for formal reasons, e.g.\
+  when theorems that are imported from more basic theories are used in
+  the current situation.
+
+  \item @{ML Thm.assume}, @{ML Thm.forall_intr}, @{ML
+  Thm.forall_elim}, @{ML Thm.implies_intr}, and @{ML Thm.implies_elim}
+  correspond to the primitive inferences of \figref{fig:prim-rules}.
+
+  \item @{ML Thm.generalize}~@{text "(\<^vec>\<alpha>, \<^vec>x)"}
+  corresponds to the @{text "generalize"} rules of
+  \figref{fig:subst-rules}.  Here collections of type and term
+  variables are generalized simultaneously, specified by the given
+  basic names.
+
+  \item @{ML Thm.instantiate}~@{text "(\<^vec>\<alpha>\<^sub>s,
+  \<^vec>x\<^sub>\<tau>)"} corresponds to the @{text "instantiate"} rules
+  of \figref{fig:subst-rules}.  Type variables are substituted before
+  term variables.  Note that the types in @{text "\<^vec>x\<^sub>\<tau>"}
+  refer to the instantiated versions.
+
+  \item @{ML Thm.add_axiom}~@{text "ctxt (name, A)"} declares an
+  arbitrary proposition as axiom, and retrieves it as a theorem from
+  the resulting theory, cf.\ @{text "axiom"} in
+  \figref{fig:prim-rules}.  Note that the low-level representation in
+  the axiom table may differ slightly from the returned theorem.
+
+  \item @{ML Thm.add_oracle}~@{text "(binding, oracle)"} produces a named
+  oracle rule, essentially generating arbitrary axioms on the fly,
+  cf.\ @{text "axiom"} in \figref{fig:prim-rules}.
+
+  \item @{ML Thm.add_def}~@{text "ctxt unchecked overloaded (name, c
+  \<^vec>x \<equiv> t)"} states a definitional axiom for an existing constant
+  @{text "c"}.  Dependencies are recorded via @{ML Theory.add_deps},
+  unless the @{text "unchecked"} option is set.  Note that the
+  low-level representation in the axiom table may differ slightly from
+  the returned theorem.
+
+  \item @{ML Theory.add_deps}~@{text "ctxt name c\<^sub>\<tau> \<^vec>d\<^sub>\<sigma>"}
+  declares dependencies of a named specification for constant @{text
+  "c\<^sub>\<tau>"}, relative to existing specifications for constants @{text
+  "\<^vec>d\<^sub>\<sigma>"}.
+
+  \end{description}
+*}
+
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def "ctyp"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "cterm"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "cprop"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "thm"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "thms"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "lemma"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  @{rail \<open>
+  @@{ML_antiquotation ctyp} typ
+  ;
+  @@{ML_antiquotation cterm} term
+  ;
+  @@{ML_antiquotation cprop} prop
+  ;
+  @@{ML_antiquotation thm} thmref
+  ;
+  @@{ML_antiquotation thms} thmrefs
+  ;
+  @@{ML_antiquotation lemma} ('(' @'open' ')')? ((prop +) + @'and') \<newline>
+    @'by' method method?
+  \<close>}
+
+  \begin{description}
+
+  \item @{text "@{ctyp \<tau>}"} produces a certified type wrt.\ the
+  current background theory --- as abstract value of type @{ML_type
+  ctyp}.
+
+  \item @{text "@{cterm t}"} and @{text "@{cprop \<phi>}"} produce a
+  certified term wrt.\ the current background theory --- as abstract
+  value of type @{ML_type cterm}.
+
+  \item @{text "@{thm a}"} produces a singleton fact --- as abstract
+  value of type @{ML_type thm}.
+
+  \item @{text "@{thms a}"} produces a general fact --- as abstract
+  value of type @{ML_type "thm list"}.
+
+  \item @{text "@{lemma \<phi> by meth}"} produces a fact that is proven on
+  the spot according to the minimal proof, which imitates a terminal
+  Isar proof.  The result is an abstract value of type @{ML_type thm}
+  or @{ML_type "thm list"}, depending on the number of propositions
+  given here.
+
+  The internal derivation object lacks a proper theorem name, but it
+  is formally closed, unless the @{text "(open)"} option is specified
+  (this may impact performance of applications with proof terms).
+
+  Since ML antiquotations are always evaluated at compile-time, there
+  is no run-time overhead even for non-trivial proofs.  Nonetheless,
+  the justification is syntactically limited to a single @{command
+  "by"} step.  More complex Isar proofs should be done in regular
+  theory source, before compiling the corresponding ML text that uses
+  the result.
+
+  \end{description}
+
+*}
+
+
+subsection {* Auxiliary connectives \label{sec:logic-aux} *}
+
+text {* Theory @{text "Pure"} provides a few auxiliary connectives
+  that are defined on top of the primitive ones, see
+  \figref{fig:pure-aux}.  These special constants are useful in
+  certain internal encodings, and are normally not directly exposed to
+  the user.
+
+  \begin{figure}[htb]
+  \begin{center}
+  \begin{tabular}{ll}
+  @{text "conjunction :: prop \<Rightarrow> prop \<Rightarrow> prop"} & (infix @{text "&&&"}) \\
+  @{text "\<turnstile> A &&& B \<equiv> (\<And>C. (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C)"} \\[1ex]
+  @{text "prop :: prop \<Rightarrow> prop"} & (prefix @{text "#"}, suppressed) \\
+  @{text "#A \<equiv> A"} \\[1ex]
+  @{text "term :: \<alpha> \<Rightarrow> prop"} & (prefix @{text "TERM"}) \\
+  @{text "term x \<equiv> (\<And>A. A \<Longrightarrow> A)"} \\[1ex]
+  @{text "type :: \<alpha> itself"} & (prefix @{text "TYPE"}) \\
+  @{text "(unspecified)"} \\
+  \end{tabular}
+  \caption{Definitions of auxiliary connectives}\label{fig:pure-aux}
+  \end{center}
+  \end{figure}
+
+  The introduction @{text "A \<Longrightarrow> B \<Longrightarrow> A &&& B"}, and eliminations
+  (projections) @{text "A &&& B \<Longrightarrow> A"} and @{text "A &&& B \<Longrightarrow> B"} are
+  available as derived rules.  Conjunction allows to treat
+  simultaneous assumptions and conclusions uniformly, e.g.\ consider
+  @{text "A \<Longrightarrow> B \<Longrightarrow> C &&& D"}.  In particular, the goal mechanism
+  represents multiple claims as explicit conjunction internally, but
+  this is refined (via backwards introduction) into separate sub-goals
+  before the user commences the proof; the final result is projected
+  into a list of theorems using eliminations (cf.\
+  \secref{sec:tactical-goals}).
+
+  The @{text "prop"} marker (@{text "#"}) makes arbitrarily complex
+  propositions appear as atomic, without changing the meaning: @{text
+  "\<Gamma> \<turnstile> A"} and @{text "\<Gamma> \<turnstile> #A"} are interchangeable.  See
+  \secref{sec:tactical-goals} for specific operations.
+
+  The @{text "term"} marker turns any well-typed term into a derivable
+  proposition: @{text "\<turnstile> TERM t"} holds unconditionally.  Although
+  this is logically vacuous, it allows to treat terms and proofs
+  uniformly, similar to a type-theoretic framework.
+
+  The @{text "TYPE"} constructor is the canonical representative of
+  the unspecified type @{text "\<alpha> itself"}; it essentially injects the
+  language of types into that of terms.  There is specific notation
+  @{text "TYPE(\<tau>)"} for @{text "TYPE\<^bsub>\<tau> itself\<^esub>"}.
+  Although being devoid of any particular meaning, the term @{text
+  "TYPE(\<tau>)"} accounts for the type @{text "\<tau>"} within the term
+  language.  In particular, @{text "TYPE(\<alpha>)"} may be used as formal
+  argument in primitive definitions, in order to circumvent hidden
+  polymorphism (cf.\ \secref{sec:terms}).  For example, @{text "c
+  TYPE(\<alpha>) \<equiv> A[\<alpha>]"} defines @{text "c :: \<alpha> itself \<Rightarrow> prop"} in terms of
+  a proposition @{text "A"} that depends on an additional type
+  argument, which is essentially a predicate on types.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Conjunction.intr: "thm -> thm -> thm"} \\
+  @{index_ML Conjunction.elim: "thm -> thm * thm"} \\
+  @{index_ML Drule.mk_term: "cterm -> thm"} \\
+  @{index_ML Drule.dest_term: "thm -> cterm"} \\
+  @{index_ML Logic.mk_type: "typ -> term"} \\
+  @{index_ML Logic.dest_type: "term -> typ"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Conjunction.intr} derives @{text "A &&& B"} from @{text
+  "A"} and @{text "B"}.
+
+  \item @{ML Conjunction.elim} derives @{text "A"} and @{text "B"}
+  from @{text "A &&& B"}.
+
+  \item @{ML Drule.mk_term} derives @{text "TERM t"}.
+
+  \item @{ML Drule.dest_term} recovers term @{text "t"} from @{text
+  "TERM t"}.
+
+  \item @{ML Logic.mk_type}~@{text "\<tau>"} produces the term @{text
+  "TYPE(\<tau>)"}.
+
+  \item @{ML Logic.dest_type}~@{text "TYPE(\<tau>)"} recovers the type
+  @{text "\<tau>"}.
+
+  \end{description}
+*}
+
+
+subsection {* Sort hypotheses *}
+
+text {* Type variables are decorated with sorts, as explained in
+  \secref{sec:types}.  This constrains type instantiation to certain
+  ranges of types: variable @{text "\<alpha>\<^sub>s"} may only be assigned to types
+  @{text "\<tau>"} that belong to sort @{text "s"}.  Within the logic, sort
+  constraints act like implicit preconditions on the result @{text
+  "\<lparr>\<alpha>\<^sub>1 : s\<^sub>1\<rparr>, \<dots>, \<lparr>\<alpha>\<^sub>n : s\<^sub>n\<rparr>, \<Gamma> \<turnstile> \<phi>"} where the type variables @{text
+  "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} cover the propositions @{text "\<Gamma>"}, @{text "\<phi>"}, as
+  well as the proof of @{text "\<Gamma> \<turnstile> \<phi>"}.
+
+  These \emph{sort hypothesis} of a theorem are passed monotonically
+  through further derivations.  They are redundant, as long as the
+  statement of a theorem still contains the type variables that are
+  accounted here.  The logical significance of sort hypotheses is
+  limited to the boundary case where type variables disappear from the
+  proposition, e.g.\ @{text "\<lparr>\<alpha>\<^sub>s : s\<rparr> \<turnstile> \<phi>"}.  Since such dangling type
+  variables can be renamed arbitrarily without changing the
+  proposition @{text "\<phi>"}, the inference kernel maintains sort
+  hypotheses in anonymous form @{text "s \<turnstile> \<phi>"}.
+
+  In most practical situations, such extra sort hypotheses may be
+  stripped in a final bookkeeping step, e.g.\ at the end of a proof:
+  they are typically left over from intermediate reasoning with type
+  classes that can be satisfied by some concrete type @{text "\<tau>"} of
+  sort @{text "s"} to replace the hypothetical type variable @{text
+  "\<alpha>\<^sub>s"}.  *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Thm.extra_shyps: "thm -> sort list"} \\
+  @{index_ML Thm.strip_shyps: "thm -> thm"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Thm.extra_shyps}~@{text "thm"} determines the extraneous
+  sort hypotheses of the given theorem, i.e.\ the sorts that are not
+  present within type variables of the statement.
+
+  \item @{ML Thm.strip_shyps}~@{text "thm"} removes any extraneous
+  sort hypotheses that can be witnessed from the type signature.
+
+  \end{description}
+*}
+
+text %mlex {* The following artificial example demonstrates the
+  derivation of @{prop False} with a pending sort hypothesis involving
+  a logically empty sort.  *}
+
+class empty =
+  assumes bad: "\<And>(x::'a) y. x \<noteq> y"
+
+theorem (in empty) false: False
+  using bad by blast
+
+ML {*
+  @{assert} (Thm.extra_shyps @{thm false} = [@{sort empty}])
+*}
+
+text {* Thanks to the inference kernel managing sort hypothesis
+  according to their logical significance, this example is merely an
+  instance of \emph{ex falso quodlibet consequitur} --- not a collapse
+  of the logical framework! *}
+
+
+section {* Object-level rules \label{sec:obj-rules} *}
+
+text {*
+  The primitive inferences covered so far mostly serve foundational
+  purposes.  User-level reasoning usually works via object-level rules
+  that are represented as theorems of Pure.  Composition of rules
+  involves \emph{backchaining}, \emph{higher-order unification} modulo
+  @{text "\<alpha>\<beta>\<eta>"}-conversion of @{text "\<lambda>"}-terms, and so-called
+  \emph{lifting} of rules into a context of @{text "\<And>"} and @{text
+  "\<Longrightarrow>"} connectives.  Thus the full power of higher-order Natural
+  Deduction in Isabelle/Pure becomes readily available.
+*}
+
+
+subsection {* Hereditary Harrop Formulae *}
+
+text {*
+  The idea of object-level rules is to model Natural Deduction
+  inferences in the style of Gentzen \cite{Gentzen:1935}, but we allow
+  arbitrary nesting similar to \cite{extensions91}.  The most basic
+  rule format is that of a \emph{Horn Clause}:
+  \[
+  \infer{@{text "A"}}{@{text "A\<^sub>1"} & @{text "\<dots>"} & @{text "A\<^sub>n"}}
+  \]
+  where @{text "A, A\<^sub>1, \<dots>, A\<^sub>n"} are atomic propositions
+  of the framework, usually of the form @{text "Trueprop B"}, where
+  @{text "B"} is a (compound) object-level statement.  This
+  object-level inference corresponds to an iterated implication in
+  Pure like this:
+  \[
+  @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> A"}
+  \]
+  As an example consider conjunction introduction: @{text "A \<Longrightarrow> B \<Longrightarrow> A \<and>
+  B"}.  Any parameters occurring in such rule statements are
+  conceptionally treated as arbitrary:
+  \[
+  @{text "\<And>x\<^sub>1 \<dots> x\<^sub>m. A\<^sub>1 x\<^sub>1 \<dots> x\<^sub>m \<Longrightarrow> \<dots> A\<^sub>n x\<^sub>1 \<dots> x\<^sub>m \<Longrightarrow> A x\<^sub>1 \<dots> x\<^sub>m"}
+  \]
+
+  Nesting of rules means that the positions of @{text "A\<^sub>i"} may
+  again hold compound rules, not just atomic propositions.
+  Propositions of this format are called \emph{Hereditary Harrop
+  Formulae} in the literature \cite{Miller:1991}.  Here we give an
+  inductive characterization as follows:
+
+  \medskip
+  \begin{tabular}{ll}
+  @{text "\<^bold>x"} & set of variables \\
+  @{text "\<^bold>A"} & set of atomic propositions \\
+  @{text "\<^bold>H  =  \<And>\<^bold>x\<^sup>*. \<^bold>H\<^sup>* \<Longrightarrow> \<^bold>A"} & set of Hereditary Harrop Formulas \\
+  \end{tabular}
+  \medskip
+
+  Thus we essentially impose nesting levels on propositions formed
+  from @{text "\<And>"} and @{text "\<Longrightarrow>"}.  At each level there is a prefix
+  of parameters and compound premises, concluding an atomic
+  proposition.  Typical examples are @{text "\<longrightarrow>"}-introduction @{text
+  "(A \<Longrightarrow> B) \<Longrightarrow> A \<longrightarrow> B"} or mathematical induction @{text "P 0 \<Longrightarrow> (\<And>n. P n
+  \<Longrightarrow> P (Suc n)) \<Longrightarrow> P n"}.  Even deeper nesting occurs in well-founded
+  induction @{text "(\<And>x. (\<And>y. y \<prec> x \<Longrightarrow> P y) \<Longrightarrow> P x) \<Longrightarrow> P x"}, but this
+  already marks the limit of rule complexity that is usually seen in
+  practice.
+
+  \medskip Regular user-level inferences in Isabelle/Pure always
+  maintain the following canonical form of results:
+
+  \begin{itemize}
+
+  \item Normalization by @{text "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"},
+  which is a theorem of Pure, means that quantifiers are pushed in
+  front of implication at each level of nesting.  The normal form is a
+  Hereditary Harrop Formula.
+
+  \item The outermost prefix of parameters is represented via
+  schematic variables: instead of @{text "\<And>\<^vec>x. \<^vec>H \<^vec>x
+  \<Longrightarrow> A \<^vec>x"} we have @{text "\<^vec>H ?\<^vec>x \<Longrightarrow> A ?\<^vec>x"}.
+  Note that this representation looses information about the order of
+  parameters, and vacuous quantifiers vanish automatically.
+
+  \end{itemize}
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Simplifier.norm_hhf: "Proof.context -> thm -> thm"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Simplifier.norm_hhf}~@{text "ctxt thm"} normalizes the given
+  theorem according to the canonical form specified above.  This is
+  occasionally helpful to repair some low-level tools that do not
+  handle Hereditary Harrop Formulae properly.
+
+  \end{description}
+*}
+
+
+subsection {* Rule composition *}
+
+text {*
+  The rule calculus of Isabelle/Pure provides two main inferences:
+  @{inference resolution} (i.e.\ back-chaining of rules) and
+  @{inference assumption} (i.e.\ closing a branch), both modulo
+  higher-order unification.  There are also combined variants, notably
+  @{inference elim_resolution} and @{inference dest_resolution}.
+
+  To understand the all-important @{inference resolution} principle,
+  we first consider raw @{inference_def composition} (modulo
+  higher-order unification with substitution @{text "\<vartheta>"}):
+  \[
+  \infer[(@{inference_def composition})]{@{text "\<^vec>A\<vartheta> \<Longrightarrow> C\<vartheta>"}}
+  {@{text "\<^vec>A \<Longrightarrow> B"} & @{text "B' \<Longrightarrow> C"} & @{text "B\<vartheta> = B'\<vartheta>"}}
+  \]
+  Here the conclusion of the first rule is unified with the premise of
+  the second; the resulting rule instance inherits the premises of the
+  first and conclusion of the second.  Note that @{text "C"} can again
+  consist of iterated implications.  We can also permute the premises
+  of the second rule back-and-forth in order to compose with @{text
+  "B'"} in any position (subsequently we shall always refer to
+  position 1 w.l.o.g.).
+
+  In @{inference composition} the internal structure of the common
+  part @{text "B"} and @{text "B'"} is not taken into account.  For
+  proper @{inference resolution} we require @{text "B"} to be atomic,
+  and explicitly observe the structure @{text "\<And>\<^vec>x. \<^vec>H
+  \<^vec>x \<Longrightarrow> B' \<^vec>x"} of the premise of the second rule.  The
+  idea is to adapt the first rule by ``lifting'' it into this context,
+  by means of iterated application of the following inferences:
+  \[
+  \infer[(@{inference_def imp_lift})]{@{text "(\<^vec>H \<Longrightarrow> \<^vec>A) \<Longrightarrow> (\<^vec>H \<Longrightarrow> B)"}}{@{text "\<^vec>A \<Longrightarrow> B"}}
+  \]
+  \[
+  \infer[(@{inference_def all_lift})]{@{text "(\<And>\<^vec>x. \<^vec>A (?\<^vec>a \<^vec>x)) \<Longrightarrow> (\<And>\<^vec>x. B (?\<^vec>a \<^vec>x))"}}{@{text "\<^vec>A ?\<^vec>a \<Longrightarrow> B ?\<^vec>a"}}
+  \]
+  By combining raw composition with lifting, we get full @{inference
+  resolution} as follows:
+  \[
+  \infer[(@{inference_def resolution})]
+  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>A (?\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
+  {\begin{tabular}{l}
+    @{text "\<^vec>A ?\<^vec>a \<Longrightarrow> B ?\<^vec>a"} \\
+    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
+    @{text "(\<lambda>\<^vec>x. B (?\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
+   \end{tabular}}
+  \]
+
+  Continued resolution of rules allows to back-chain a problem towards
+  more and sub-problems.  Branches are closed either by resolving with
+  a rule of 0 premises, or by producing a ``short-circuit'' within a
+  solved situation (again modulo unification):
+  \[
+  \infer[(@{inference_def assumption})]{@{text "C\<vartheta>"}}
+  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> A \<^vec>x) \<Longrightarrow> C"} & @{text "A\<vartheta> = H\<^sub>i\<vartheta>"}~~\text{(for some~@{text i})}}
+  \]
+
+  %FIXME @{inference_def elim_resolution}, @{inference_def dest_resolution}
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_op "RSN": "thm * (int * thm) -> thm"} \\
+  @{index_ML_op "RS": "thm * thm -> thm"} \\
+
+  @{index_ML_op "RLN": "thm list * (int * thm list) -> thm list"} \\
+  @{index_ML_op "RL": "thm list * thm list -> thm list"} \\
+
+  @{index_ML_op "MRS": "thm list * thm -> thm"} \\
+  @{index_ML_op "OF": "thm * thm list -> thm"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{text "rule\<^sub>1 RSN (i, rule\<^sub>2)"} resolves the conclusion of
+  @{text "rule\<^sub>1"} with the @{text i}-th premise of @{text "rule\<^sub>2"},
+  according to the @{inference resolution} principle explained above.
+  Unless there is precisely one resolvent it raises exception @{ML
+  THM}.
+
+  This corresponds to the rule attribute @{attribute THEN} in Isar
+  source language.
+
+  \item @{text "rule\<^sub>1 RS rule\<^sub>2"} abbreviates @{text "rule\<^sub>1 RSN (1,
+  rule\<^sub>2)"}.
+
+  \item @{text "rules\<^sub>1 RLN (i, rules\<^sub>2)"} joins lists of rules.  For
+  every @{text "rule\<^sub>1"} in @{text "rules\<^sub>1"} and @{text "rule\<^sub>2"} in
+  @{text "rules\<^sub>2"}, it resolves the conclusion of @{text "rule\<^sub>1"} with
+  the @{text "i"}-th premise of @{text "rule\<^sub>2"}, accumulating multiple
+  results in one big list.  Note that such strict enumerations of
+  higher-order unifications can be inefficient compared to the lazy
+  variant seen in elementary tactics like @{ML resolve_tac}.
+
+  \item @{text "rules\<^sub>1 RL rules\<^sub>2"} abbreviates @{text "rules\<^sub>1 RLN (1,
+  rules\<^sub>2)"}.
+
+  \item @{text "[rule\<^sub>1, \<dots>, rule\<^sub>n] MRS rule"} resolves @{text "rule\<^sub>i"}
+  against premise @{text "i"} of @{text "rule"}, for @{text "i = n, \<dots>,
+  1"}.  By working from right to left, newly emerging premises are
+  concatenated in the result, without interfering.
+
+  \item @{text "rule OF rules"} is an alternative notation for @{text
+  "rules MRS rule"}, which makes rule composition look more like
+  function application.  Note that the argument @{text "rules"} need
+  not be atomic.
+
+  This corresponds to the rule attribute @{attribute OF} in Isar
+  source language.
+
+  \end{description}
+*}
+
+
+section {* Proof terms \label{sec:proof-terms} *}
+
+text {* The Isabelle/Pure inference kernel can record the proof of
+  each theorem as a proof term that contains all logical inferences in
+  detail.  Rule composition by resolution (\secref{sec:obj-rules}) and
+  type-class reasoning is broken down to primitive rules of the
+  logical framework.  The proof term can be inspected by a separate
+  proof-checker, for example.
+
+  According to the well-known \emph{Curry-Howard isomorphism}, a proof
+  can be viewed as a @{text "\<lambda>"}-term. Following this idea, proofs in
+  Isabelle are internally represented by a datatype similar to the one
+  for terms described in \secref{sec:terms}.  On top of these
+  syntactic terms, two more layers of @{text "\<lambda>"}-calculus are added,
+  which correspond to @{text "\<And>x :: \<alpha>. B x"} and @{text "A \<Longrightarrow> B"}
+  according to the propositions-as-types principle.  The resulting
+  3-level @{text "\<lambda>"}-calculus resembles ``@{text "\<lambda>HOL"}'' in the
+  more abstract setting of Pure Type Systems (PTS)
+  \cite{Barendregt-Geuvers:2001}, if some fine points like schematic
+  polymorphism and type classes are ignored.
+
+  \medskip\emph{Proof abstractions} of the form @{text "\<^bold>\<lambda>x :: \<alpha>. prf"}
+  or @{text "\<^bold>\<lambda>p : A. prf"} correspond to introduction of @{text
+  "\<And>"}/@{text "\<Longrightarrow>"}, and \emph{proof applications} of the form @{text
+  "p \<cdot> t"} or @{text "p \<bullet> q"} correspond to elimination of @{text
+  "\<And>"}/@{text "\<Longrightarrow>"}.  Actual types @{text "\<alpha>"}, propositions @{text
+  "A"}, and terms @{text "t"} might be suppressed and reconstructed
+  from the overall proof term.
+
+  \medskip Various atomic proofs indicate special situations within
+  the proof construction as follows.
+
+  A \emph{bound proof variable} is a natural number @{text "b"} that
+  acts as de-Bruijn index for proof term abstractions.
+
+  A \emph{minimal proof} ``@{text "?"}'' is a dummy proof term.  This
+  indicates some unrecorded part of the proof.
+
+  @{text "Hyp A"} refers to some pending hypothesis by giving its
+  proposition.  This indicates an open context of implicit hypotheses,
+  similar to loose bound variables or free variables within a term
+  (\secref{sec:terms}).
+
+  An \emph{axiom} or \emph{oracle} @{text "a : A[\<^vec>\<tau>]"} refers
+  some postulated @{text "proof constant"}, which is subject to
+  schematic polymorphism of theory content, and the particular type
+  instantiation may be given explicitly.  The vector of types @{text
+  "\<^vec>\<tau>"} refers to the schematic type variables in the generic
+  proposition @{text "A"} in canonical order.
+
+  A \emph{proof promise} @{text "a : A[\<^vec>\<tau>]"} is a placeholder
+  for some proof of polymorphic proposition @{text "A"}, with explicit
+  type instantiation as given by the vector @{text "\<^vec>\<tau>"}, as
+  above.  Unlike axioms or oracles, proof promises may be
+  \emph{fulfilled} eventually, by substituting @{text "a"} by some
+  particular proof @{text "q"} at the corresponding type instance.
+  This acts like Hindley-Milner @{text "let"}-polymorphism: a generic
+  local proof definition may get used at different type instances, and
+  is replaced by the concrete instance eventually.
+
+  A \emph{named theorem} wraps up some concrete proof as a closed
+  formal entity, in the manner of constant definitions for proof
+  terms.  The \emph{proof body} of such boxed theorems involves some
+  digest about oracles and promises occurring in the original proof.
+  This allows the inference kernel to manage this critical information
+  without the full overhead of explicit proof terms.
+*}
+
+
+subsection {* Reconstructing and checking proof terms *}
+
+text {* Fully explicit proof terms can be large, but most of this
+  information is redundant and can be reconstructed from the context.
+  Therefore, the Isabelle/Pure inference kernel records only
+  \emph{implicit} proof terms, by omitting all typing information in
+  terms, all term and type labels of proof abstractions, and some
+  argument terms of applications @{text "p \<cdot> t"} (if possible).
+
+  There are separate operations to reconstruct the full proof term
+  later on, using \emph{higher-order pattern unification}
+  \cite{nipkow-patterns,Berghofer-Nipkow:2000:TPHOL}.
+
+  The \emph{proof checker} expects a fully reconstructed proof term,
+  and can turn it into a theorem by replaying its primitive inferences
+  within the kernel.  *}
+
+
+subsection {* Concrete syntax of proof terms *}
+
+text {* The concrete syntax of proof terms is a slight extension of
+  the regular inner syntax of Isabelle/Pure \cite{isabelle-isar-ref}.
+  Its main syntactic category @{syntax (inner) proof} is defined as
+  follows:
+
+  \begin{center}
+  \begin{supertabular}{rclr}
+
+  @{syntax_def (inner) proof} & = & @{verbatim Lam} @{text params} @{verbatim "."} @{text proof} \\
+    & @{text "|"} & @{text "\<^bold>\<lambda>"} @{text "params"} @{verbatim "."} @{text proof} \\
+    & @{text "|"} & @{text proof} @{verbatim "%"} @{text any} \\
+    & @{text "|"} & @{text proof} @{text "\<cdot>"} @{text any} \\
+    & @{text "|"} & @{text proof} @{verbatim "%%"} @{text proof} \\
+    & @{text "|"} & @{text proof} @{text "\<bullet>"} @{text proof} \\
+    & @{text "|"} & @{text "id  |  longid"} \\
+  \\
+
+  @{text param} & = & @{text idt} \\
+    & @{text "|"} & @{text idt} @{verbatim ":"} @{text prop} \\
+    & @{text "|"} & @{verbatim "("} @{text param} @{verbatim ")"} \\
+  \\
+
+  @{text params} & = & @{text param} \\
+    & @{text "|"} & @{text param} @{text params} \\
+
+  \end{supertabular}
+  \end{center}
+
+  Implicit term arguments in partial proofs are indicated by ``@{text
+  "_"}''.  Type arguments for theorems and axioms may be specified
+  using @{text "p \<cdot> TYPE(type)"} (they must appear before any other
+  term argument of a theorem or axiom, but may be omitted altogether).
+
+  \medskip There are separate read and print operations for proof
+  terms, in order to avoid conflicts with the regular term language.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type proof} \\
+  @{index_ML_type proof_body} \\
+  @{index_ML proofs: "int Unsynchronized.ref"} \\
+  @{index_ML Reconstruct.reconstruct_proof:
+  "theory -> term -> proof -> proof"} \\
+  @{index_ML Reconstruct.expand_proof: "theory ->
+  (string * term option) list -> proof -> proof"} \\
+  @{index_ML Proof_Checker.thm_of_proof: "theory -> proof -> thm"} \\
+  @{index_ML Proof_Syntax.read_proof: "theory -> bool -> bool -> string -> proof"} \\
+  @{index_ML Proof_Syntax.pretty_proof: "Proof.context -> proof -> Pretty.T"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type proof} represents proof terms; this is a
+  datatype with constructors @{index_ML Abst}, @{index_ML AbsP},
+  @{index_ML_op "%"}, @{index_ML_op "%%"}, @{index_ML PBound},
+  @{index_ML MinProof}, @{index_ML Hyp}, @{index_ML PAxm}, @{index_ML
+  Oracle}, @{index_ML Promise}, @{index_ML PThm} as explained above.
+  %FIXME OfClass (!?)
+
+  \item Type @{ML_type proof_body} represents the nested proof
+  information of a named theorem, consisting of a digest of oracles
+  and named theorem over some proof term.  The digest only covers the
+  directly visible part of the proof: in order to get the full
+  information, the implicit graph of nested theorems needs to be
+  traversed (e.g.\ using @{ML Proofterm.fold_body_thms}).
+
+  \item @{ML Thm.proof_of}~@{text "thm"} and @{ML
+  Thm.proof_body_of}~@{text "thm"} produce the proof term or proof
+  body (with digest of oracles and theorems) from a given theorem.
+  Note that this involves a full join of internal futures that fulfill
+  pending proof promises, and thus disrupts the natural bottom-up
+  construction of proofs by introducing dynamic ad-hoc dependencies.
+  Parallel performance may suffer by inspecting proof terms at
+  run-time.
+
+  \item @{ML proofs} specifies the detail of proof recording within
+  @{ML_type thm} values produced by the inference kernel: @{ML 0}
+  records only the names of oracles, @{ML 1} records oracle names and
+  propositions, @{ML 2} additionally records full proof terms.
+  Officially named theorems that contribute to a result are recorded
+  in any case.
+
+  \item @{ML Reconstruct.reconstruct_proof}~@{text "thy prop prf"}
+  turns the implicit proof term @{text "prf"} into a full proof of the
+  given proposition.
+
+  Reconstruction may fail if @{text "prf"} is not a proof of @{text
+  "prop"}, or if it does not contain sufficient information for
+  reconstruction.  Failure may only happen for proofs that are
+  constructed manually, but not for those produced automatically by
+  the inference kernel.
+
+  \item @{ML Reconstruct.expand_proof}~@{text "thy [thm\<^sub>1, \<dots>, thm\<^sub>n]
+  prf"} expands and reconstructs the proofs of all specified theorems,
+  with the given (full) proof.  Theorems that are not unique specified
+  via their name may be disambiguated by giving their proposition.
+
+  \item @{ML Proof_Checker.thm_of_proof}~@{text "thy prf"} turns the
+  given (full) proof into a theorem, by replaying it using only
+  primitive rules of the inference kernel.
+
+  \item @{ML Proof_Syntax.read_proof}~@{text "thy b\<^sub>1 b\<^sub>2 s"} reads in a
+  proof term. The Boolean flags indicate the use of sort and type
+  information.  Usually, typing information is left implicit and is
+  inferred during proof reconstruction.  %FIXME eliminate flags!?
+
+  \item @{ML Proof_Syntax.pretty_proof}~@{text "ctxt prf"}
+  pretty-prints the given proof term.
+
+  \end{description}
+*}
+
+text %mlex {* Detailed proof information of a theorem may be retrieved
+  as follows: *}
+
+lemma ex: "A \<and> B \<longrightarrow> B \<and> A"
+proof
+  assume "A \<and> B"
+  then obtain B and A ..
+  then show "B \<and> A" ..
+qed
+
+ML_val {*
+  (*proof body with digest*)
+  val body = Proofterm.strip_thm (Thm.proof_body_of @{thm ex});
+
+  (*proof term only*)
+  val prf = Proofterm.proof_of body;
+  Pretty.writeln (Proof_Syntax.pretty_proof @{context} prf);
+
+  (*all theorems used in the graph of nested proofs*)
+  val all_thms =
+    Proofterm.fold_body_thms
+      (fn (name, _, _) => insert (op =) name) [body] [];
+*}
+
+text {* The result refers to various basic facts of Isabelle/HOL:
+  @{thm [source] HOL.impI}, @{thm [source] HOL.conjE}, @{thm [source]
+  HOL.conjI} etc.  The combinator @{ML Proofterm.fold_body_thms}
+  recursively explores the graph of the proofs of all theorems being
+  used here.
+
+  \medskip Alternatively, we may produce a proof term manually, and
+  turn it into a theorem as follows: *}
+
+ML_val {*
+  val thy = @{theory};
+  val prf =
+    Proof_Syntax.read_proof thy true false
+      "impI \<cdot> _ \<cdot> _ \<bullet> \
+      \   (\<^bold>\<lambda>H: _. \
+      \     conjE \<cdot> _ \<cdot> _ \<cdot> _ \<bullet> H \<bullet> \
+      \       (\<^bold>\<lambda>(H: _) Ha: _. conjI \<cdot> _ \<cdot> _ \<bullet> Ha \<bullet> H))";
+  val thm =
+    prf
+    |> Reconstruct.reconstruct_proof thy @{prop "A \<and> B \<longrightarrow> B \<and> A"}
+    |> Proof_Checker.thm_of_proof thy
+    |> Drule.export_without_context;
+*}
+
+text {* \medskip See also @{file "~~/src/HOL/Proofs/ex/XML_Data.thy"}
+  for further examples, with export and import of proof terms via
+  XML/ML data representation.
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/ML.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,2116 @@
+theory "ML"
+imports Base
+begin
+
+chapter {* Isabelle/ML *}
+
+text {* Isabelle/ML is best understood as a certain culture based on
+  Standard ML.  Thus it is not a new programming language, but a
+  certain way to use SML at an advanced level within the Isabelle
+  environment.  This covers a variety of aspects that are geared
+  towards an efficient and robust platform for applications of formal
+  logic with fully foundational proof construction --- according to
+  the well-known \emph{LCF principle}.  There is specific
+  infrastructure with library modules to address the needs of this
+  difficult task.  For example, the raw parallel programming model of
+  Poly/ML is presented as considerably more abstract concept of
+  \emph{future values}, which is then used to augment the inference
+  kernel, proof interpreter, and theory loader accordingly.
+
+  The main aspects of Isabelle/ML are introduced below.  These
+  first-hand explanations should help to understand how proper
+  Isabelle/ML is to be read and written, and to get access to the
+  wealth of experience that is expressed in the source text and its
+  history of changes.\footnote{See
+  @{url "http://isabelle.in.tum.de/repos/isabelle"} for the full
+  Mercurial history.  There are symbolic tags to refer to official
+  Isabelle releases, as opposed to arbitrary \emph{tip} versions that
+  merely reflect snapshots that are never really up-to-date.}  *}
+
+
+section {* Style and orthography *}
+
+text {* The sources of Isabelle/Isar are optimized for
+  \emph{readability} and \emph{maintainability}.  The main purpose is
+  to tell an informed reader what is really going on and how things
+  really work.  This is a non-trivial aim, but it is supported by a
+  certain style of writing Isabelle/ML that has emerged from long
+  years of system development.\footnote{See also the interesting style
+  guide for OCaml
+  @{url "http://caml.inria.fr/resources/doc/guides/guidelines.en.html"}
+  which shares many of our means and ends.}
+
+  The main principle behind any coding style is \emph{consistency}.
+  For a single author of a small program this merely means ``choose
+  your style and stick to it''.  A complex project like Isabelle, with
+  long years of development and different contributors, requires more
+  standardization.  A coding style that is changed every few years or
+  with every new contributor is no style at all, because consistency
+  is quickly lost.  Global consistency is hard to achieve, though.
+  Nonetheless, one should always strive at least for local consistency
+  of modules and sub-systems, without deviating from some general
+  principles how to write Isabelle/ML.
+
+  In a sense, good coding style is like an \emph{orthography} for the
+  sources: it helps to read quickly over the text and see through the
+  main points, without getting distracted by accidental presentation
+  of free-style code.
+*}
+
+
+subsection {* Header and sectioning *}
+
+text {* Isabelle source files have a certain standardized header
+  format (with precise spacing) that follows ancient traditions
+  reaching back to the earliest versions of the system by Larry
+  Paulson.  See @{file "~~/src/Pure/thm.ML"}, for example.
+
+  The header includes at least @{verbatim Title} and @{verbatim
+  Author} entries, followed by a prose description of the purpose of
+  the module.  The latter can range from a single line to several
+  paragraphs of explanations.
+
+  The rest of the file is divided into sections, subsections,
+  subsubsections, paragraphs etc.\ using a simple layout via ML
+  comments as follows.
+
+\begin{verbatim}
+(*** section ***)
+
+(** subsection **)
+
+(* subsubsection *)
+
+(*short paragraph*)
+
+(*
+  long paragraph,
+  with more text
+*)
+\end{verbatim}
+
+  As in regular typography, there is some extra space \emph{before}
+  section headings that are adjacent to plain text (not other headings
+  as in the example above).
+
+  \medskip The precise wording of the prose text given in these
+  headings is chosen carefully to introduce the main theme of the
+  subsequent formal ML text.
+*}
+
+
+subsection {* Naming conventions *}
+
+text {* Since ML is the primary medium to express the meaning of the
+  source text, naming of ML entities requires special care.
+
+  \paragraph{Notation.}  A name consists of 1--3 \emph{words} (rarely
+  4, but not more) that are separated by underscore.  There are three
+  variants concerning upper or lower case letters, which are used for
+  certain ML categories as follows:
+
+  \medskip
+  \begin{tabular}{lll}
+  variant & example & ML categories \\\hline
+  lower-case & @{ML_text foo_bar} & values, types, record fields \\
+  capitalized & @{ML_text Foo_Bar} & datatype constructors, structures, functors \\
+  upper-case & @{ML_text FOO_BAR} & special values, exception constructors, signatures \\
+  \end{tabular}
+  \medskip
+
+  For historical reasons, many capitalized names omit underscores,
+  e.g.\ old-style @{ML_text FooBar} instead of @{ML_text Foo_Bar}.
+  Genuine mixed-case names are \emph{not} used, because clear division
+  of words is essential for readability.\footnote{Camel-case was
+  invented to workaround the lack of underscore in some early
+  non-ASCII character sets.  Later it became habitual in some language
+  communities that are now strong in numbers.}
+
+  A single (capital) character does not count as ``word'' in this
+  respect: some Isabelle/ML names are suffixed by extra markers like
+  this: @{ML_text foo_barT}.
+
+  Name variants are produced by adding 1--3 primes, e.g.\ @{ML_text
+  foo'}, @{ML_text foo''}, or @{ML_text foo'''}, but not @{ML_text
+  foo''''} or more.  Decimal digits scale better to larger numbers,
+  e.g.\ @{ML_text foo0}, @{ML_text foo1}, @{ML_text foo42}.
+
+  \paragraph{Scopes.}  Apart from very basic library modules, ML
+  structures are not ``opened'', but names are referenced with
+  explicit qualification, as in @{ML Syntax.string_of_term} for
+  example.  When devising names for structures and their components it
+  is important aim at eye-catching compositions of both parts, because
+  this is how they are seen in the sources and documentation.  For the
+  same reasons, aliases of well-known library functions should be
+  avoided.
+
+  Local names of function abstraction or case/let bindings are
+  typically shorter, sometimes using only rudiments of ``words'',
+  while still avoiding cryptic shorthands.  An auxiliary function
+  called @{ML_text helper}, @{ML_text aux}, or @{ML_text f} is
+  considered bad style.
+
+  Example:
+
+  \begin{verbatim}
+  (* RIGHT *)
+
+  fun print_foo ctxt foo =
+    let
+      fun print t = ... Syntax.string_of_term ctxt t ...
+    in ... end;
+
+
+  (* RIGHT *)
+
+  fun print_foo ctxt foo =
+    let
+      val string_of_term = Syntax.string_of_term ctxt;
+      fun print t = ... string_of_term t ...
+    in ... end;
+
+
+  (* WRONG *)
+
+  val string_of_term = Syntax.string_of_term;
+
+  fun print_foo ctxt foo =
+    let
+      fun aux t = ... string_of_term ctxt t ...
+    in ... end;
+
+  \end{verbatim}
+
+
+  \paragraph{Specific conventions.} Here are some specific name forms
+  that occur frequently in the sources.
+
+  \begin{itemize}
+
+  \item A function that maps @{ML_text foo} to @{ML_text bar} is
+  called @{ML_text foo_to_bar} or @{ML_text bar_of_foo} (never
+  @{ML_text foo2bar}, @{ML_text bar_from_foo}, @{ML_text
+  bar_for_foo}, or @{ML_text bar4foo}).
+
+  \item The name component @{ML_text legacy} means that the operation
+  is about to be discontinued soon.
+
+  \item The name component @{ML_text old} means that this is historic
+  material that might disappear at some later stage.
+
+  \item The name component @{ML_text global} means that this works
+  with the background theory instead of the regular local context
+  (\secref{sec:context}), sometimes for historical reasons, sometimes
+  due a genuine lack of locality of the concept involved, sometimes as
+  a fall-back for the lack of a proper context in the application
+  code.  Whenever there is a non-global variant available, the
+  application should be migrated to use it with a proper local
+  context.
+
+  \item Variables of the main context types of the Isabelle/Isar
+  framework (\secref{sec:context} and \chref{ch:local-theory}) have
+  firm naming conventions as follows:
+
+  \begin{itemize}
+
+  \item theories are called @{ML_text thy}, rarely @{ML_text theory}
+  (never @{ML_text thry})
+
+  \item proof contexts are called @{ML_text ctxt}, rarely @{ML_text
+  context} (never @{ML_text ctx})
+
+  \item generic contexts are called @{ML_text context}, rarely
+  @{ML_text ctxt}
+
+  \item local theories are called @{ML_text lthy}, except for local
+  theories that are treated as proof context (which is a semantic
+  super-type)
+
+  \end{itemize}
+
+  Variations with primed or decimal numbers are always possible, as
+  well as sematic prefixes like @{ML_text foo_thy} or @{ML_text
+  bar_ctxt}, but the base conventions above need to be preserved.
+  This allows to visualize the their data flow via plain regular
+  expressions in the editor.
+
+  \item The main logical entities (\secref{ch:logic}) have established
+  naming convention as follows:
+
+  \begin{itemize}
+
+  \item sorts are called @{ML_text S}
+
+  \item types are called @{ML_text T}, @{ML_text U}, or @{ML_text
+  ty} (never @{ML_text t})
+
+  \item terms are called @{ML_text t}, @{ML_text u}, or @{ML_text
+  tm} (never @{ML_text trm})
+
+  \item certified types are called @{ML_text cT}, rarely @{ML_text
+  T}, with variants as for types
+
+  \item certified terms are called @{ML_text ct}, rarely @{ML_text
+  t}, with variants as for terms (never @{ML_text ctrm})
+
+  \item theorems are called @{ML_text th}, or @{ML_text thm}
+
+  \end{itemize}
+
+  Proper semantic names override these conventions completely.  For
+  example, the left-hand side of an equation (as a term) can be called
+  @{ML_text lhs} (not @{ML_text lhs_tm}).  Or a term that is known
+  to be a variable can be called @{ML_text v} or @{ML_text x}.
+
+  \item Tactics (\secref{sec:tactics}) are sufficiently important to
+  have specific naming conventions.  The name of a basic tactic
+  definition always has a @{ML_text "_tac"} suffix, the subgoal index
+  (if applicable) is always called @{ML_text i}, and the goal state
+  (if made explicit) is usually called @{ML_text st} instead of the
+  somewhat misleading @{ML_text thm}.  Any other arguments are given
+  before the latter two, and the general context is given first.
+  Example:
+
+  \begin{verbatim}
+  fun my_tac ctxt arg1 arg2 i st = ...
+  \end{verbatim}
+
+  Note that the goal state @{ML_text st} above is rarely made
+  explicit, if tactic combinators (tacticals) are used as usual.
+
+  \end{itemize}
+*}
+
+
+subsection {* General source layout *}
+
+text {* The general Isabelle/ML source layout imitates regular
+  type-setting to some extent, augmented by the requirements for
+  deeply nested expressions that are commonplace in functional
+  programming.
+
+  \paragraph{Line length} is 80 characters according to ancient
+  standards, but we allow as much as 100 characters (not
+  more).\footnote{Readability requires to keep the beginning of a line
+  in view while watching its end.  Modern wide-screen displays do not
+  change the way how the human brain works.  Sources also need to be
+  printable on plain paper with reasonable font-size.} The extra 20
+  characters acknowledge the space requirements due to qualified
+  library references in Isabelle/ML.
+
+  \paragraph{White-space} is used to emphasize the structure of
+  expressions, following mostly standard conventions for mathematical
+  typesetting, as can be seen in plain {\TeX} or {\LaTeX}.  This
+  defines positioning of spaces for parentheses, punctuation, and
+  infixes as illustrated here:
+
+  \begin{verbatim}
+  val x = y + z * (a + b);
+  val pair = (a, b);
+  val record = {foo = 1, bar = 2};
+  \end{verbatim}
+
+  Lines are normally broken \emph{after} an infix operator or
+  punctuation character.  For example:
+
+  \begin{verbatim}
+  val x =
+    a +
+    b +
+    c;
+
+  val tuple =
+   (a,
+    b,
+    c);
+  \end{verbatim}
+
+  Some special infixes (e.g.\ @{ML_text "|>"}) work better at the
+  start of the line, but punctuation is always at the end.
+
+  Function application follows the tradition of @{text "\<lambda>"}-calculus,
+  not informal mathematics.  For example: @{ML_text "f a b"} for a
+  curried function, or @{ML_text "g (a, b)"} for a tupled function.
+  Note that the space between @{ML_text g} and the pair @{ML_text
+  "(a, b)"} follows the important principle of
+  \emph{compositionality}: the layout of @{ML_text "g p"} does not
+  change when @{ML_text "p"} is refined to the concrete pair
+  @{ML_text "(a, b)"}.
+
+  \paragraph{Indentation} uses plain spaces, never hard
+  tabulators.\footnote{Tabulators were invented to move the carriage
+  of a type-writer to certain predefined positions.  In software they
+  could be used as a primitive run-length compression of consecutive
+  spaces, but the precise result would depend on non-standardized
+  editor configuration.}
+
+  Each level of nesting is indented by 2 spaces, sometimes 1, very
+  rarely 4, never 8 or any other odd number.
+
+  Indentation follows a simple logical format that only depends on the
+  nesting depth, not the accidental length of the text that initiates
+  a level of nesting.  Example:
+
+  \begin{verbatim}
+  (* RIGHT *)
+
+  if b then
+    expr1_part1
+    expr1_part2
+  else
+    expr2_part1
+    expr2_part2
+
+
+  (* WRONG *)
+
+  if b then expr1_part1
+            expr1_part2
+  else expr2_part1
+       expr2_part2
+  \end{verbatim}
+
+  The second form has many problems: it assumes a fixed-width font
+  when viewing the sources, it uses more space on the line and thus
+  makes it hard to observe its strict length limit (working against
+  \emph{readability}), it requires extra editing to adapt the layout
+  to changes of the initial text (working against
+  \emph{maintainability}) etc.
+
+  \medskip For similar reasons, any kind of two-dimensional or tabular
+  layouts, ASCII-art with lines or boxes of asterisks etc.\ should be
+  avoided.
+
+  \paragraph{Complex expressions} that consist of multi-clausal
+  function definitions, @{ML_text handle}, @{ML_text case},
+  @{ML_text let} (and combinations) require special attention.  The
+  syntax of Standard ML is quite ambitious and admits a lot of
+  variance that can distort the meaning of the text.
+
+  Clauses of @{ML_text fun}, @{ML_text fn}, @{ML_text handle},
+  @{ML_text case} get extra indentation to indicate the nesting
+  clearly.  Example:
+
+  \begin{verbatim}
+  (* RIGHT *)
+
+  fun foo p1 =
+        expr1
+    | foo p2 =
+        expr2
+
+
+  (* WRONG *)
+
+  fun foo p1 =
+    expr1
+    | foo p2 =
+    expr2
+  \end{verbatim}
+
+  Body expressions consisting of @{ML_text case} or @{ML_text let}
+  require care to maintain compositionality, to prevent loss of
+  logical indentation where it is especially important to see the
+  structure of the text.  Example:
+
+  \begin{verbatim}
+  (* RIGHT *)
+
+  fun foo p1 =
+        (case e of
+          q1 => ...
+        | q2 => ...)
+    | foo p2 =
+        let
+          ...
+        in
+          ...
+        end
+
+
+  (* WRONG *)
+
+  fun foo p1 = case e of
+      q1 => ...
+    | q2 => ...
+    | foo p2 =
+    let
+      ...
+    in
+      ...
+    end
+  \end{verbatim}
+
+  Extra parentheses around @{ML_text case} expressions are optional,
+  but help to analyse the nesting based on character matching in the
+  editor.
+
+  \medskip There are two main exceptions to the overall principle of
+  compositionality in the layout of complex expressions.
+
+  \begin{enumerate}
+
+  \item @{ML_text "if"} expressions are iterated as if there would be
+  a multi-branch conditional in SML, e.g.
+
+  \begin{verbatim}
+  (* RIGHT *)
+
+  if b1 then e1
+  else if b2 then e2
+  else e3
+  \end{verbatim}
+
+  \item @{ML_text fn} abstractions are often layed-out as if they
+  would lack any structure by themselves.  This traditional form is
+  motivated by the possibility to shift function arguments back and
+  forth wrt.\ additional combinators.  Example:
+
+  \begin{verbatim}
+  (* RIGHT *)
+
+  fun foo x y = fold (fn z =>
+    expr)
+  \end{verbatim}
+
+  Here the visual appearance is that of three arguments @{ML_text x},
+  @{ML_text y}, @{ML_text z}.
+
+  \end{enumerate}
+
+  Such weakly structured layout should be use with great care.  Here
+  are some counter-examples involving @{ML_text let} expressions:
+
+  \begin{verbatim}
+  (* WRONG *)
+
+  fun foo x = let
+      val y = ...
+    in ... end
+
+
+  (* WRONG *)
+
+  fun foo x = let
+    val y = ...
+  in ... end
+
+
+  (* WRONG *)
+
+  fun foo x =
+  let
+    val y = ...
+  in ... end
+  \end{verbatim}
+
+  \medskip In general the source layout is meant to emphasize the
+  structure of complex language expressions, not to pretend that SML
+  had a completely different syntax (say that of Haskell or Java).
+*}
+
+
+section {* SML embedded into Isabelle/Isar *}
+
+text {* ML and Isar are intertwined via an open-ended bootstrap
+  process that provides more and more programming facilities and
+  logical content in an alternating manner.  Bootstrapping starts from
+  the raw environment of existing implementations of Standard ML
+  (mainly Poly/ML, but also SML/NJ).
+
+  Isabelle/Pure marks the point where the original ML toplevel is
+  superseded by the Isar toplevel that maintains a uniform context for
+  arbitrary ML values (see also \secref{sec:context}).  This formal
+  environment holds ML compiler bindings, logical entities, and many
+  other things.  Raw SML is never encountered again after the initial
+  bootstrap of Isabelle/Pure.
+
+  Object-logics like Isabelle/HOL are built within the
+  Isabelle/ML/Isar environment by introducing suitable theories with
+  associated ML modules, either inlined or as separate files.  Thus
+  Isabelle/HOL is defined as a regular user-space application within
+  the Isabelle framework.  Further add-on tools can be implemented in
+  ML within the Isar context in the same manner: ML is part of the
+  standard repertoire of Isabelle, and there is no distinction between
+  ``user'' and ``developer'' in this respect.
+*}
+
+
+subsection {* Isar ML commands *}
+
+text {* The primary Isar source language provides facilities to ``open
+  a window'' to the underlying ML compiler.  Especially see the Isar
+  commands @{command_ref "ML_file"} and @{command_ref "ML"}: both work the
+  same way, only the source text is provided via a file vs.\ inlined,
+  respectively.  Apart from embedding ML into the main theory
+  definition like that, there are many more commands that refer to ML
+  source, such as @{command_ref setup} or @{command_ref declaration}.
+  Even more fine-grained embedding of ML into Isar is encountered in
+  the proof method @{method_ref tactic}, which refines the pending
+  goal state via a given expression of type @{ML_type tactic}.
+*}
+
+text %mlex {* The following artificial example demonstrates some ML
+  toplevel declarations within the implicit Isar theory context.  This
+  is regular functional programming without referring to logical
+  entities yet.
+*}
+
+ML {*
+  fun factorial 0 = 1
+    | factorial n = n * factorial (n - 1)
+*}
+
+text {* Here the ML environment is already managed by Isabelle, i.e.\
+  the @{ML factorial} function is not yet accessible in the preceding
+  paragraph, nor in a different theory that is independent from the
+  current one in the import hierarchy.
+
+  Removing the above ML declaration from the source text will remove
+  any trace of this definition as expected.  The Isabelle/ML toplevel
+  environment is managed in a \emph{stateless} way: unlike the raw ML
+  toplevel there are no global side-effects involved
+  here.\footnote{Such a stateless compilation environment is also a
+  prerequisite for robust parallel compilation within independent
+  nodes of the implicit theory development graph.}
+
+  \medskip The next example shows how to embed ML into Isar proofs, using
+ @{command_ref "ML_prf"} instead of Instead of @{command_ref "ML"}.
+  As illustrated below, the effect on the ML environment is local to
+  the whole proof body, ignoring the block structure.
+*}
+
+notepad
+begin
+  ML_prf %"ML" {* val a = 1 *}
+  {
+    ML_prf %"ML" {* val b = a + 1 *}
+  } -- {* Isar block structure ignored by ML environment *}
+  ML_prf %"ML" {* val c = b + 1 *}
+end
+
+text {* By side-stepping the normal scoping rules for Isar proof
+  blocks, embedded ML code can refer to the different contexts and
+  manipulate corresponding entities, e.g.\ export a fact from a block
+  context.
+
+  \medskip Two further ML commands are useful in certain situations:
+  @{command_ref ML_val} and @{command_ref ML_command} are
+  \emph{diagnostic} in the sense that there is no effect on the
+  underlying environment, and can thus used anywhere (even outside a
+  theory).  The examples below produce long strings of digits by
+  invoking @{ML factorial}: @{command ML_val} already takes care of
+  printing the ML toplevel result, but @{command ML_command} is silent
+  so we produce an explicit output message.  *}
+
+ML_val {* factorial 100 *}
+ML_command {* writeln (string_of_int (factorial 100)) *}
+
+notepad
+begin
+  ML_val {* factorial 100 *}
+  ML_command {* writeln (string_of_int (factorial 100)) *}
+end
+
+
+subsection {* Compile-time context *}
+
+text {* Whenever the ML compiler is invoked within Isabelle/Isar, the
+  formal context is passed as a thread-local reference variable.  Thus
+  ML code may access the theory context during compilation, by reading
+  or writing the (local) theory under construction.  Note that such
+  direct access to the compile-time context is rare.  In practice it
+  is typically done via some derived ML functions instead.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML ML_Context.the_generic_context: "unit -> Context.generic"} \\
+  @{index_ML "Context.>>": "(Context.generic -> Context.generic) -> unit"} \\
+  @{index_ML ML_Thms.bind_thms: "string * thm list -> unit"} \\
+  @{index_ML ML_Thms.bind_thm: "string * thm -> unit"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML "ML_Context.the_generic_context ()"} refers to the theory
+  context of the ML toplevel --- at compile time.  ML code needs to
+  take care to refer to @{ML "ML_Context.the_generic_context ()"}
+  correctly.  Recall that evaluation of a function body is delayed
+  until actual run-time.
+
+  \item @{ML "Context.>>"}~@{text f} applies context transformation
+  @{text f} to the implicit context of the ML toplevel.
+
+  \item @{ML ML_Thms.bind_thms}~@{text "(name, thms)"} stores a list of
+  theorems produced in ML both in the (global) theory context and the
+  ML toplevel, associating it with the provided name.  Theorems are
+  put into a global ``standard'' format before being stored.
+
+  \item @{ML ML_Thms.bind_thm} is similar to @{ML ML_Thms.bind_thms} but refers to a
+  singleton fact.
+
+  \end{description}
+
+  It is important to note that the above functions are really
+  restricted to the compile time, even though the ML compiler is
+  invoked at run-time.  The majority of ML code either uses static
+  antiquotations (\secref{sec:ML-antiq}) or refers to the theory or
+  proof context at run-time, by explicit functional abstraction.
+*}
+
+
+subsection {* Antiquotations \label{sec:ML-antiq} *}
+
+text {* A very important consequence of embedding SML into Isar is the
+  concept of \emph{ML antiquotation}.  The standard token language of
+  ML is augmented by special syntactic entities of the following form:
+
+  @{rail \<open>
+  @{syntax_def antiquote}: '@{' nameref args '}'
+  \<close>}
+
+  Here @{syntax nameref} and @{syntax args} are regular outer syntax
+  categories \cite{isabelle-isar-ref}.  Attributes and proof methods
+  use similar syntax.
+
+  \medskip A regular antiquotation @{text "@{name args}"} processes
+  its arguments by the usual means of the Isar source language, and
+  produces corresponding ML source text, either as literal
+  \emph{inline} text (e.g. @{text "@{term t}"}) or abstract
+  \emph{value} (e.g. @{text "@{thm th}"}).  This pre-compilation
+  scheme allows to refer to formal entities in a robust manner, with
+  proper static scoping and with some degree of logical checking of
+  small portions of the code.
+*}
+
+
+subsection {* Printing ML values *}
+
+text {* The ML compiler knows about the structure of values according
+  to their static type, and can print them in the manner of the
+  toplevel loop, although the details are non-portable.  The
+  antiquotations @{ML_antiquotation_def "make_string"} and
+  @{ML_antiquotation_def "print"} provide a quasi-portable way to
+  refer to this potential capability of the underlying ML system in
+  generic Isabelle/ML sources.
+
+  This is occasionally useful for diagnostic or demonstration
+  purposes.  Note that production-quality tools require proper
+  user-level error messages. *}
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def "make_string"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "print"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  @{rail \<open>
+  @@{ML_antiquotation make_string}
+  ;
+  @@{ML_antiquotation print} @{syntax name}?
+  \<close>}
+
+  \begin{description}
+
+  \item @{text "@{make_string}"} inlines a function to print arbitrary
+  values similar to the ML toplevel.  The result is compiler dependent
+  and may fall back on "?" in certain situations.
+
+  \item @{text "@{print f}"} uses the ML function @{text "f: string ->
+  unit"} to output the result of @{text "@{make_string}"} above,
+  together with the source position of the antiquotation.  The default
+  output function is @{ML writeln}.
+
+  \end{description}
+*}
+
+text %mlex {* The following artificial examples show how to produce
+  adhoc output of ML values for debugging purposes. *}
+
+ML {*
+  val x = 42;
+  val y = true;
+
+  writeln (@{make_string} {x = x, y = y});
+
+  @{print} {x = x, y = y};
+  @{print tracing} {x = x, y = y};
+*}
+
+
+section {* Canonical argument order \label{sec:canonical-argument-order} *}
+
+text {* Standard ML is a language in the tradition of @{text
+  "\<lambda>"}-calculus and \emph{higher-order functional programming},
+  similar to OCaml, Haskell, or Isabelle/Pure and HOL as logical
+  languages.  Getting acquainted with the native style of representing
+  functions in that setting can save a lot of extra boiler-plate of
+  redundant shuffling of arguments, auxiliary abstractions etc.
+
+  Functions are usually \emph{curried}: the idea of turning arguments
+  of type @{text "\<tau>\<^sub>i"} (for @{text "i \<in> {1, \<dots> n}"}) into a result of
+  type @{text "\<tau>"} is represented by the iterated function space
+  @{text "\<tau>\<^sub>1 \<rightarrow> \<dots> \<rightarrow> \<tau>\<^sub>n \<rightarrow> \<tau>"}.  This is isomorphic to the well-known
+  encoding via tuples @{text "\<tau>\<^sub>1 \<times> \<dots> \<times> \<tau>\<^sub>n \<rightarrow> \<tau>"}, but the curried
+  version fits more smoothly into the basic calculus.\footnote{The
+  difference is even more significant in higher-order logic, because
+  the redundant tuple structure needs to be accommodated by formal
+  reasoning.}
+
+  Currying gives some flexiblity due to \emph{partial application}.  A
+  function @{text "f: \<tau>\<^sub>1 \<rightarrow> \<tau>\<^sub>2 \<rightarrow> \<tau>"} can be applied to @{text "x: \<tau>\<^sub>1"}
+  and the remaining @{text "(f x): \<tau>\<^sub>2 \<rightarrow> \<tau>"} passed to another function
+  etc.  How well this works in practice depends on the order of
+  arguments.  In the worst case, arguments are arranged erratically,
+  and using a function in a certain situation always requires some
+  glue code.  Thus we would get exponentially many oppurtunities to
+  decorate the code with meaningless permutations of arguments.
+
+  This can be avoided by \emph{canonical argument order}, which
+  observes certain standard patterns and minimizes adhoc permutations
+  in their application.  In Isabelle/ML, large portions of text can be
+  written without auxiliary operations like @{text "swap: \<alpha> \<times> \<beta> \<rightarrow> \<beta> \<times>
+  \<alpha>"} or @{text "C: (\<alpha> \<rightarrow> \<beta> \<rightarrow> \<gamma>) \<rightarrow> (\<beta> \<rightarrow> \<alpha> \<rightarrow> \<gamma>)"} (the latter not
+  present in the Isabelle/ML library).
+
+  \medskip The basic idea is that arguments that vary less are moved
+  further to the left than those that vary more.  Two particularly
+  important categories of functions are \emph{selectors} and
+  \emph{updates}.
+
+  The subsequent scheme is based on a hypothetical set-like container
+  of type @{text "\<beta>"} that manages elements of type @{text "\<alpha>"}.  Both
+  the names and types of the associated operations are canonical for
+  Isabelle/ML.
+
+  \begin{center}
+  \begin{tabular}{ll}
+  kind & canonical name and type \\\hline
+  selector & @{text "member: \<beta> \<rightarrow> \<alpha> \<rightarrow> bool"} \\
+  update & @{text "insert: \<alpha> \<rightarrow> \<beta> \<rightarrow> \<beta>"} \\
+  \end{tabular}
+  \end{center}
+
+  Given a container @{text "B: \<beta>"}, the partially applied @{text
+  "member B"} is a predicate over elements @{text "\<alpha> \<rightarrow> bool"}, and
+  thus represents the intended denotation directly.  It is customary
+  to pass the abstract predicate to further operations, not the
+  concrete container.  The argument order makes it easy to use other
+  combinators: @{text "forall (member B) list"} will check a list of
+  elements for membership in @{text "B"} etc. Often the explicit
+  @{text "list"} is pointless and can be contracted to @{text "forall
+  (member B)"} to get directly a predicate again.
+
+  In contrast, an update operation varies the container, so it moves
+  to the right: @{text "insert a"} is a function @{text "\<beta> \<rightarrow> \<beta>"} to
+  insert a value @{text "a"}.  These can be composed naturally as
+  @{text "insert c \<circ> insert b \<circ> insert a"}.  The slightly awkward
+  inversion of the composition order is due to conventional
+  mathematical notation, which can be easily amended as explained
+  below.
+*}
+
+
+subsection {* Forward application and composition *}
+
+text {* Regular function application and infix notation works best for
+  relatively deeply structured expressions, e.g.\ @{text "h (f x y + g
+  z)"}.  The important special case of \emph{linear transformation}
+  applies a cascade of functions @{text "f\<^sub>n (\<dots> (f\<^sub>1 x))"}.  This
+  becomes hard to read and maintain if the functions are themselves
+  given as complex expressions.  The notation can be significantly
+  improved by introducing \emph{forward} versions of application and
+  composition as follows:
+
+  \medskip
+  \begin{tabular}{lll}
+  @{text "x |> f"} & @{text "\<equiv>"} & @{text "f x"} \\
+  @{text "(f #> g) x"} & @{text "\<equiv>"} & @{text "x |> f |> g"} \\
+  \end{tabular}
+  \medskip
+
+  This enables to write conveniently @{text "x |> f\<^sub>1 |> \<dots> |> f\<^sub>n"} or
+  @{text "f\<^sub>1 #> \<dots> #> f\<^sub>n"} for its functional abstraction over @{text
+  "x"}.
+
+  \medskip There is an additional set of combinators to accommodate
+  multiple results (via pairs) that are passed on as multiple
+  arguments (via currying).
+
+  \medskip
+  \begin{tabular}{lll}
+  @{text "(x, y) |-> f"} & @{text "\<equiv>"} & @{text "f x y"} \\
+  @{text "(f #-> g) x"} & @{text "\<equiv>"} & @{text "x |> f |-> g"} \\
+  \end{tabular}
+  \medskip
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_op "|> ": "'a * ('a -> 'b) -> 'b"} \\
+  @{index_ML_op "|-> ": "('c * 'a) * ('c -> 'a -> 'b) -> 'b"} \\
+  @{index_ML_op "#> ": "('a -> 'b) * ('b -> 'c) -> 'a -> 'c"} \\
+  @{index_ML_op "#-> ": "('a -> 'c * 'b) * ('c -> 'b -> 'd) -> 'a -> 'd"} \\
+  \end{mldecls}
+*}
+
+
+subsection {* Canonical iteration *}
+
+text {* As explained above, a function @{text "f: \<alpha> \<rightarrow> \<beta> \<rightarrow> \<beta>"} can be
+  understood as update on a configuration of type @{text "\<beta>"},
+  parametrized by arguments of type @{text "\<alpha>"}.  Given @{text "a: \<alpha>"}
+  the partial application @{text "(f a): \<beta> \<rightarrow> \<beta>"} operates
+  homogeneously on @{text "\<beta>"}.  This can be iterated naturally over a
+  list of parameters @{text "[a\<^sub>1, \<dots>, a\<^sub>n]"} as @{text "f a\<^sub>1 #> \<dots> #> f a\<^sub>n"}.
+  The latter expression is again a function @{text "\<beta> \<rightarrow> \<beta>"}.
+  It can be applied to an initial configuration @{text "b: \<beta>"} to
+  start the iteration over the given list of arguments: each @{text
+  "a"} in @{text "a\<^sub>1, \<dots>, a\<^sub>n"} is applied consecutively by updating a
+  cumulative configuration.
+
+  The @{text fold} combinator in Isabelle/ML lifts a function @{text
+  "f"} as above to its iterated version over a list of arguments.
+  Lifting can be repeated, e.g.\ @{text "(fold \<circ> fold) f"} iterates
+  over a list of lists as expected.
+
+  The variant @{text "fold_rev"} works inside-out over the list of
+  arguments, such that @{text "fold_rev f \<equiv> fold f \<circ> rev"} holds.
+
+  The @{text "fold_map"} combinator essentially performs @{text
+  "fold"} and @{text "map"} simultaneously: each application of @{text
+  "f"} produces an updated configuration together with a side-result;
+  the iteration collects all such side-results as a separate list.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML fold: "('a -> 'b -> 'b) -> 'a list -> 'b -> 'b"} \\
+  @{index_ML fold_rev: "('a -> 'b -> 'b) -> 'a list -> 'b -> 'b"} \\
+  @{index_ML fold_map: "('a -> 'b -> 'c * 'b) -> 'a list -> 'b -> 'c list * 'b"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML fold}~@{text f} lifts the parametrized update function
+  @{text "f"} to a list of parameters.
+
+  \item @{ML fold_rev}~@{text "f"} is similar to @{ML fold}~@{text
+  "f"}, but works inside-out.
+
+  \item @{ML fold_map}~@{text "f"} lifts the parametrized update
+  function @{text "f"} (with side-result) to a list of parameters and
+  cumulative side-results.
+
+  \end{description}
+
+  \begin{warn}
+  The literature on functional programming provides a multitude of
+  combinators called @{text "foldl"}, @{text "foldr"} etc.  SML97
+  provides its own variations as @{ML List.foldl} and @{ML
+  List.foldr}, while the classic Isabelle library also has the
+  historic @{ML Library.foldl} and @{ML Library.foldr}.  To avoid
+  unnecessary complication and confusion, all these historical
+  versions should be ignored, and @{ML fold} (or @{ML fold_rev}) used
+  exclusively.
+  \end{warn}
+*}
+
+text %mlex {* The following example shows how to fill a text buffer
+  incrementally by adding strings, either individually or from a given
+  list.
+*}
+
+ML {*
+  val s =
+    Buffer.empty
+    |> Buffer.add "digits: "
+    |> fold (Buffer.add o string_of_int) (0 upto 9)
+    |> Buffer.content;
+
+  @{assert} (s = "digits: 0123456789");
+*}
+
+text {* Note how @{ML "fold (Buffer.add o string_of_int)"} above saves
+  an extra @{ML "map"} over the given list.  This kind of peephole
+  optimization reduces both the code size and the tree structures in
+  memory (``deforestation''), but it requires some practice to read
+  and write fluently.
+
+  \medskip The next example elaborates the idea of canonical
+  iteration, demonstrating fast accumulation of tree content using a
+  text buffer.
+*}
+
+ML {*
+  datatype tree = Text of string | Elem of string * tree list;
+
+  fun slow_content (Text txt) = txt
+    | slow_content (Elem (name, ts)) =
+        "<" ^ name ^ ">" ^
+        implode (map slow_content ts) ^
+        "</" ^ name ^ ">"
+
+  fun add_content (Text txt) = Buffer.add txt
+    | add_content (Elem (name, ts)) =
+        Buffer.add ("<" ^ name ^ ">") #>
+        fold add_content ts #>
+        Buffer.add ("</" ^ name ^ ">");
+
+  fun fast_content tree =
+    Buffer.empty |> add_content tree |> Buffer.content;
+*}
+
+text {* The slow part of @{ML slow_content} is the @{ML implode} of
+  the recursive results, because it copies previously produced strings
+  again.
+
+  The incremental @{ML add_content} avoids this by operating on a
+  buffer that is passed through in a linear fashion.  Using @{ML_text
+  "#>"} and contraction over the actual buffer argument saves some
+  additional boiler-plate.  Of course, the two @{ML "Buffer.add"}
+  invocations with concatenated strings could have been split into
+  smaller parts, but this would have obfuscated the source without
+  making a big difference in allocations.  Here we have done some
+  peephole-optimization for the sake of readability.
+
+  Another benefit of @{ML add_content} is its ``open'' form as a
+  function on buffers that can be continued in further linear
+  transformations, folding etc.  Thus it is more compositional than
+  the naive @{ML slow_content}.  As realistic example, compare the
+  old-style @{ML "Term.maxidx_of_term: term -> int"} with the newer
+  @{ML "Term.maxidx_term: term -> int -> int"} in Isabelle/Pure.
+
+  Note that @{ML fast_content} above is only defined as example.  In
+  many practical situations, it is customary to provide the
+  incremental @{ML add_content} only and leave the initialization and
+  termination to the concrete application by the user.
+*}
+
+
+section {* Message output channels \label{sec:message-channels} *}
+
+text {* Isabelle provides output channels for different kinds of
+  messages: regular output, high-volume tracing information, warnings,
+  and errors.
+
+  Depending on the user interface involved, these messages may appear
+  in different text styles or colours.  The standard output for
+  terminal sessions prefixes each line of warnings by @{verbatim
+  "###"} and errors by @{verbatim "***"}, but leaves anything else
+  unchanged.
+
+  Messages are associated with the transaction context of the running
+  Isar command.  This enables the front-end to manage commands and
+  resulting messages together.  For example, after deleting a command
+  from a given theory document version, the corresponding message
+  output can be retracted from the display.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML writeln: "string -> unit"} \\
+  @{index_ML tracing: "string -> unit"} \\
+  @{index_ML warning: "string -> unit"} \\
+  @{index_ML error: "string -> 'a"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML writeln}~@{text "text"} outputs @{text "text"} as regular
+  message.  This is the primary message output operation of Isabelle
+  and should be used by default.
+
+  \item @{ML tracing}~@{text "text"} outputs @{text "text"} as special
+  tracing message, indicating potential high-volume output to the
+  front-end (hundreds or thousands of messages issued by a single
+  command).  The idea is to allow the user-interface to downgrade the
+  quality of message display to achieve higher throughput.
+
+  Note that the user might have to take special actions to see tracing
+  output, e.g.\ switch to a different output window.  So this channel
+  should not be used for regular output.
+
+  \item @{ML warning}~@{text "text"} outputs @{text "text"} as
+  warning, which typically means some extra emphasis on the front-end
+  side (color highlighting, icons, etc.).
+
+  \item @{ML error}~@{text "text"} raises exception @{ML ERROR}~@{text
+  "text"} and thus lets the Isar toplevel print @{text "text"} on the
+  error channel, which typically means some extra emphasis on the
+  front-end side (color highlighting, icons, etc.).
+
+  This assumes that the exception is not handled before the command
+  terminates.  Handling exception @{ML ERROR}~@{text "text"} is a
+  perfectly legal alternative: it means that the error is absorbed
+  without any message output.
+
+  \begin{warn}
+  The actual error channel is accessed via @{ML Output.error_message}, but
+  the old interaction protocol of Proof~General \emph{crashes} if that
+  function is used in regular ML code: error output and toplevel
+  command failure always need to coincide in classic TTY interaction.
+  \end{warn}
+
+  \end{description}
+
+  \begin{warn}
+  Regular Isabelle/ML code should output messages exclusively by the
+  official channels.  Using raw I/O on \emph{stdout} or \emph{stderr}
+  instead (e.g.\ via @{ML TextIO.output}) is apt to cause problems in
+  the presence of parallel and asynchronous processing of Isabelle
+  theories.  Such raw output might be displayed by the front-end in
+  some system console log, with a low chance that the user will ever
+  see it.  Moreover, as a genuine side-effect on global process
+  channels, there is no proper way to retract output when Isar command
+  transactions are reset by the system.
+  \end{warn}
+
+  \begin{warn}
+  The message channels should be used in a message-oriented manner.
+  This means that multi-line output that logically belongs together is
+  issued by a \emph{single} invocation of @{ML writeln} etc.\ with the
+  functional concatenation of all message constituents.
+  \end{warn}
+*}
+
+text %mlex {* The following example demonstrates a multi-line
+  warning.  Note that in some situations the user sees only the first
+  line, so the most important point should be made first.
+*}
+
+ML_command {*
+  warning (cat_lines
+   ["Beware the Jabberwock, my son!",
+    "The jaws that bite, the claws that catch!",
+    "Beware the Jubjub Bird, and shun",
+    "The frumious Bandersnatch!"]);
+*}
+
+
+section {* Exceptions \label{sec:exceptions} *}
+
+text {* The Standard ML semantics of strict functional evaluation
+  together with exceptions is rather well defined, but some delicate
+  points need to be observed to avoid that ML programs go wrong
+  despite static type-checking.  Exceptions in Isabelle/ML are
+  subsequently categorized as follows.
+
+  \paragraph{Regular user errors.}  These are meant to provide
+  informative feedback about malformed input etc.
+
+  The \emph{error} function raises the corresponding \emph{ERROR}
+  exception, with a plain text message as argument.  \emph{ERROR}
+  exceptions can be handled internally, in order to be ignored, turned
+  into other exceptions, or cascaded by appending messages.  If the
+  corresponding Isabelle/Isar command terminates with an \emph{ERROR}
+  exception state, the toplevel will print the result on the error
+  channel (see \secref{sec:message-channels}).
+
+  It is considered bad style to refer to internal function names or
+  values in ML source notation in user error messages.
+
+  Grammatical correctness of error messages can be improved by
+  \emph{omitting} final punctuation: messages are often concatenated
+  or put into a larger context (e.g.\ augmented with source position).
+  By not insisting in the final word at the origin of the error, the
+  system can perform its administrative tasks more easily and
+  robustly.
+
+  \paragraph{Program failures.}  There is a handful of standard
+  exceptions that indicate general failure situations, or failures of
+  core operations on logical entities (types, terms, theorems,
+  theories, see \chref{ch:logic}).
+
+  These exceptions indicate a genuine breakdown of the program, so the
+  main purpose is to determine quickly what has happened where.
+  Traditionally, the (short) exception message would include the name
+  of an ML function, although this is no longer necessary, because the
+  ML runtime system prints a detailed source position of the
+  corresponding @{ML_text raise} keyword.
+
+  \medskip User modules can always introduce their own custom
+  exceptions locally, e.g.\ to organize internal failures robustly
+  without overlapping with existing exceptions.  Exceptions that are
+  exposed in module signatures require extra care, though, and should
+  \emph{not} be introduced by default.  Surprise by users of a module
+  can be often minimized by using plain user errors instead.
+
+  \paragraph{Interrupts.}  These indicate arbitrary system events:
+  both the ML runtime system and the Isabelle/ML infrastructure signal
+  various exceptional situations by raising the special
+  \emph{Interrupt} exception in user code.
+
+  This is the one and only way that physical events can intrude an
+  Isabelle/ML program.  Such an interrupt can mean out-of-memory,
+  stack overflow, timeout, internal signaling of threads, or the user
+  producing a console interrupt manually etc.  An Isabelle/ML program
+  that intercepts interrupts becomes dependent on physical effects of
+  the environment.  Even worse, exception handling patterns that are
+  too general by accident, e.g.\ by mispelled exception constructors,
+  will cover interrupts unintentionally and thus render the program
+  semantics ill-defined.
+
+  Note that the Interrupt exception dates back to the original SML90
+  language definition.  It was excluded from the SML97 version to
+  avoid its malign impact on ML program semantics, but without
+  providing a viable alternative.  Isabelle/ML recovers physical
+  interruptibility (which is an indispensable tool to implement
+  managed evaluation of command transactions), but requires user code
+  to be strictly transparent wrt.\ interrupts.
+
+  \begin{warn}
+  Isabelle/ML user code needs to terminate promptly on interruption,
+  without guessing at its meaning to the system infrastructure.
+  Temporary handling of interrupts for cleanup of global resources
+  etc.\ needs to be followed immediately by re-raising of the original
+  exception.
+  \end{warn}
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML try: "('a -> 'b) -> 'a -> 'b option"} \\
+  @{index_ML can: "('a -> 'b) -> 'a -> bool"} \\
+  @{index_ML_exception ERROR: string} \\
+  @{index_ML_exception Fail: string} \\
+  @{index_ML Exn.is_interrupt: "exn -> bool"} \\
+  @{index_ML reraise: "exn -> 'a"} \\
+  @{index_ML Runtime.exn_trace: "(unit -> 'a) -> 'a"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML try}~@{text "f x"} makes the partiality of evaluating
+  @{text "f x"} explicit via the option datatype.  Interrupts are
+  \emph{not} handled here, i.e.\ this form serves as safe replacement
+  for the \emph{unsafe} version @{ML_text "(SOME"}~@{text "f
+  x"}~@{ML_text "handle _ => NONE)"} that is occasionally seen in
+  books about SML97, not Isabelle/ML.
+
+  \item @{ML can} is similar to @{ML try} with more abstract result.
+
+  \item @{ML ERROR}~@{text "msg"} represents user errors; this
+  exception is normally raised indirectly via the @{ML error} function
+  (see \secref{sec:message-channels}).
+
+  \item @{ML Fail}~@{text "msg"} represents general program failures.
+
+  \item @{ML Exn.is_interrupt} identifies interrupts robustly, without
+  mentioning concrete exception constructors in user code.  Handled
+  interrupts need to be re-raised promptly!
+
+  \item @{ML reraise}~@{text "exn"} raises exception @{text "exn"}
+  while preserving its implicit position information (if possible,
+  depending on the ML platform).
+
+  \item @{ML Runtime.exn_trace}~@{ML_text "(fn () =>"}~@{text
+  "e"}@{ML_text ")"} evaluates expression @{text "e"} while printing
+  a full trace of its stack of nested exceptions (if possible,
+  depending on the ML platform).
+
+  Inserting @{ML Runtime.exn_trace} into ML code temporarily is
+  useful for debugging, but not suitable for production code.
+
+  \end{description}
+*}
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def "assert"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  \begin{description}
+
+  \item @{text "@{assert}"} inlines a function
+  @{ML_type "bool -> unit"} that raises @{ML Fail} if the argument is
+  @{ML false}.  Due to inlining the source position of failed
+  assertions is included in the error output.
+
+  \end{description}
+*}
+
+
+section {* Strings of symbols \label{sec:symbols} *}
+
+text {* A \emph{symbol} constitutes the smallest textual unit in
+  Isabelle/ML --- raw ML characters are normally not encountered at
+  all!  Isabelle strings consist of a sequence of symbols, represented
+  as a packed string or an exploded list of strings.  Each symbol is
+  in itself a small string, which has either one of the following
+  forms:
+
+  \begin{enumerate}
+
+  \item a single ASCII character ``@{text "c"}'', for example
+  ``\verb,a,'',
+
+  \item a codepoint according to UTF8 (non-ASCII byte sequence),
+
+  \item a regular symbol ``\verb,\,\verb,<,@{text "ident"}\verb,>,'',
+  for example ``\verb,\,\verb,<alpha>,'',
+
+  \item a control symbol ``\verb,\,\verb,<^,@{text "ident"}\verb,>,'',
+  for example ``\verb,\,\verb,<^bold>,'',
+
+  \item a raw symbol ``\verb,\,\verb,<^raw:,@{text text}\verb,>,''
+  where @{text text} consists of printable characters excluding
+  ``\verb,.,'' and ``\verb,>,'', for example
+  ``\verb,\,\verb,<^raw:$\sum_{i = 1}^n$>,'',
+
+  \item a numbered raw control symbol ``\verb,\,\verb,<^raw,@{text
+  n}\verb,>, where @{text n} consists of digits, for example
+  ``\verb,\,\verb,<^raw42>,''.
+
+  \end{enumerate}
+
+  The @{text "ident"} syntax for symbol names is @{text "letter
+  (letter | digit)\<^sup>*"}, where @{text "letter = A..Za..z"} and @{text
+  "digit = 0..9"}.  There are infinitely many regular symbols and
+  control symbols, but a fixed collection of standard symbols is
+  treated specifically.  For example, ``\verb,\,\verb,<alpha>,'' is
+  classified as a letter, which means it may occur within regular
+  Isabelle identifiers.
+
+  The character set underlying Isabelle symbols is 7-bit ASCII, but
+  8-bit character sequences are passed-through unchanged.  Unicode/UCS
+  data in UTF-8 encoding is processed in a non-strict fashion, such
+  that well-formed code sequences are recognized
+  accordingly.\footnote{Note that ISO-Latin-1 differs from UTF-8 only
+  in some special punctuation characters that even have replacements
+  within the standard collection of Isabelle symbols.  Text consisting
+  of ASCII plus accented letters can be processed in either encoding.}
+  Unicode provides its own collection of mathematical symbols, but
+  within the core Isabelle/ML world there is no link to the standard
+  collection of Isabelle regular symbols.
+
+  \medskip Output of Isabelle symbols depends on the print mode
+  \cite{isabelle-isar-ref}.  For example, the standard {\LaTeX}
+  setup of the Isabelle document preparation system would present
+  ``\verb,\,\verb,<alpha>,'' as @{text "\<alpha>"}, and
+  ``\verb,\,\verb,<^bold>,\verb,\,\verb,<alpha>,'' as @{text "\<^bold>\<alpha>"}.
+  On-screen rendering usually works by mapping a finite subset of
+  Isabelle symbols to suitable Unicode characters.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type "Symbol.symbol": string} \\
+  @{index_ML Symbol.explode: "string -> Symbol.symbol list"} \\
+  @{index_ML Symbol.is_letter: "Symbol.symbol -> bool"} \\
+  @{index_ML Symbol.is_digit: "Symbol.symbol -> bool"} \\
+  @{index_ML Symbol.is_quasi: "Symbol.symbol -> bool"} \\
+  @{index_ML Symbol.is_blank: "Symbol.symbol -> bool"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML_type "Symbol.sym"} \\
+  @{index_ML Symbol.decode: "Symbol.symbol -> Symbol.sym"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type "Symbol.symbol"} represents individual Isabelle
+  symbols.
+
+  \item @{ML "Symbol.explode"}~@{text "str"} produces a symbol list
+  from the packed form.  This function supersedes @{ML
+  "String.explode"} for virtually all purposes of manipulating text in
+  Isabelle!\footnote{The runtime overhead for exploded strings is
+  mainly that of the list structure: individual symbols that happen to
+  be a singleton string do not require extra memory in Poly/ML.}
+
+  \item @{ML "Symbol.is_letter"}, @{ML "Symbol.is_digit"}, @{ML
+  "Symbol.is_quasi"}, @{ML "Symbol.is_blank"} classify standard
+  symbols according to fixed syntactic conventions of Isabelle, cf.\
+  \cite{isabelle-isar-ref}.
+
+  \item Type @{ML_type "Symbol.sym"} is a concrete datatype that
+  represents the different kinds of symbols explicitly, with
+  constructors @{ML "Symbol.Char"}, @{ML "Symbol.Sym"}, @{ML
+  "Symbol.UTF8"}, @{ML "Symbol.Ctrl"}, @{ML "Symbol.Raw"}.
+
+  \item @{ML "Symbol.decode"} converts the string representation of a
+  symbol into the datatype version.
+
+  \end{description}
+
+  \paragraph{Historical note.} In the original SML90 standard the
+  primitive ML type @{ML_type char} did not exists, and @{ML_text
+  "explode: string -> string list"} produced a list of singleton
+  strings like @{ML "raw_explode: string -> string list"} in
+  Isabelle/ML today.  When SML97 came out, Isabelle did not adopt its
+  somewhat anachronistic 8-bit or 16-bit characters, but the idea of
+  exploding a string into a list of small strings was extended to
+  ``symbols'' as explained above.  Thus Isabelle sources can refer to
+  an infinite store of user-defined symbols, without having to worry
+  about the multitude of Unicode encodings that have emerged over the
+  years.  *}
+
+
+section {* Basic data types *}
+
+text {* The basis library proposal of SML97 needs to be treated with
+  caution.  Many of its operations simply do not fit with important
+  Isabelle/ML conventions (like ``canonical argument order'', see
+  \secref{sec:canonical-argument-order}), others cause problems with
+  the parallel evaluation model of Isabelle/ML (such as @{ML
+  TextIO.print} or @{ML OS.Process.system}).
+
+  Subsequently we give a brief overview of important operations on
+  basic ML data types.
+*}
+
+
+subsection {* Characters *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type char} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type char} is \emph{not} used.  The smallest textual
+  unit in Isabelle is represented as a ``symbol'' (see
+  \secref{sec:symbols}).
+
+  \end{description}
+*}
+
+
+subsection {* Strings *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type string} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type string} represents immutable vectors of 8-bit
+  characters.  There are operations in SML to convert back and forth
+  to actual byte vectors, which are seldom used.
+
+  This historically important raw text representation is used for
+  Isabelle-specific purposes with the following implicit substructures
+  packed into the string content:
+
+  \begin{enumerate}
+
+  \item sequence of Isabelle symbols (see also \secref{sec:symbols}),
+  with @{ML Symbol.explode} as key operation;
+
+  \item XML tree structure via YXML (see also \cite{isabelle-sys}),
+  with @{ML YXML.parse_body} as key operation.
+
+  \end{enumerate}
+
+  Note that Isabelle/ML string literals may refer Isabelle symbols
+  like ``\verb,\,\verb,<alpha>,'' natively, \emph{without} escaping
+  the backslash.  This is a consequence of Isabelle treating all
+  source text as strings of symbols, instead of raw characters.
+
+  \end{description}
+*}
+
+text %mlex {* The subsequent example illustrates the difference of
+  physical addressing of bytes versus logical addressing of symbols in
+  Isabelle strings.
+*}
+
+ML_val {*
+  val s = "\<A>";
+
+  @{assert} (length (Symbol.explode s) = 1);
+  @{assert} (size s = 4);
+*}
+
+text {* Note that in Unicode renderings of the symbol @{text "\<A>"},
+  variations of encodings like UTF-8 or UTF-16 pose delicate questions
+  about the multi-byte representations its codepoint, which is outside
+  of the 16-bit address space of the original Unicode standard from
+  the 1990-ies.  In Isabelle/ML it is just ``\verb,\,\verb,<A>,''
+  literally, using plain ASCII characters beyond any doubts. *}
+
+
+subsection {* Integers *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type int} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type int} represents regular mathematical integers,
+  which are \emph{unbounded}.  Overflow never happens in
+  practice.\footnote{The size limit for integer bit patterns in memory
+  is 64\,MB for 32-bit Poly/ML, and much higher for 64-bit systems.}
+  This works uniformly for all supported ML platforms (Poly/ML and
+  SML/NJ).
+
+  Literal integers in ML text are forced to be of this one true
+  integer type --- adhoc overloading of SML97 is disabled.
+
+  Structure @{ML_structure IntInf} of SML97 is obsolete and superseded by
+  @{ML_structure Int}.  Structure @{ML_structure Integer} in @{file
+  "~~/src/Pure/General/integer.ML"} provides some additional
+  operations.
+
+  \end{description}
+*}
+
+
+subsection {* Time *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type Time.time} \\
+  @{index_ML seconds: "real -> Time.time"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type Time.time} represents time abstractly according
+  to the SML97 basis library definition.  This is adequate for
+  internal ML operations, but awkward in concrete time specifications.
+
+  \item @{ML seconds}~@{text "s"} turns the concrete scalar @{text
+  "s"} (measured in seconds) into an abstract time value.  Floating
+  point numbers are easy to use as configuration options in the
+  context (see \secref{sec:config-options}) or system preferences that
+  are maintained externally.
+
+  \end{description}
+*}
+
+
+subsection {* Options *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Option.map: "('a -> 'b) -> 'a option -> 'b option"} \\
+  @{index_ML is_some: "'a option -> bool"} \\
+  @{index_ML is_none: "'a option -> bool"} \\
+  @{index_ML the: "'a option -> 'a"} \\
+  @{index_ML these: "'a list option -> 'a list"} \\
+  @{index_ML the_list: "'a option -> 'a list"} \\
+  @{index_ML the_default: "'a -> 'a option -> 'a"} \\
+  \end{mldecls}
+*}
+
+text {* Apart from @{ML Option.map} most other operations defined in
+  structure @{ML_structure Option} are alien to Isabelle/ML an never
+  used.  The operations shown above are defined in @{file
+  "~~/src/Pure/General/basics.ML"}.  *}
+
+
+subsection {* Lists *}
+
+text {* Lists are ubiquitous in ML as simple and light-weight
+  ``collections'' for many everyday programming tasks.  Isabelle/ML
+  provides important additions and improvements over operations that
+  are predefined in the SML97 library. *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML cons: "'a -> 'a list -> 'a list"} \\
+  @{index_ML member: "('b * 'a -> bool) -> 'a list -> 'b -> bool"} \\
+  @{index_ML insert: "('a * 'a -> bool) -> 'a -> 'a list -> 'a list"} \\
+  @{index_ML remove: "('b * 'a -> bool) -> 'b -> 'a list -> 'a list"} \\
+  @{index_ML update: "('a * 'a -> bool) -> 'a -> 'a list -> 'a list"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML cons}~@{text "x xs"} evaluates to @{text "x :: xs"}.
+
+  Tupled infix operators are a historical accident in Standard ML.
+  The curried @{ML cons} amends this, but it should be only used when
+  partial application is required.
+
+  \item @{ML member}, @{ML insert}, @{ML remove}, @{ML update} treat
+  lists as a set-like container that maintains the order of elements.
+  See @{file "~~/src/Pure/library.ML"} for the full specifications
+  (written in ML).  There are some further derived operations like
+  @{ML union} or @{ML inter}.
+
+  Note that @{ML insert} is conservative about elements that are
+  already a @{ML member} of the list, while @{ML update} ensures that
+  the latest entry is always put in front.  The latter discipline is
+  often more appropriate in declarations of context data
+  (\secref{sec:context-data}) that are issued by the user in Isar
+  source: later declarations take precedence over earlier ones.
+
+  \end{description}
+*}
+
+text %mlex {* Using canonical @{ML fold} together with @{ML cons} (or
+  similar standard operations) alternates the orientation of data.
+  The is quite natural and should not be altered forcible by inserting
+  extra applications of @{ML rev}.  The alternative @{ML fold_rev} can
+  be used in the few situations, where alternation should be
+  prevented.
+*}
+
+ML {*
+  val items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+  val list1 = fold cons items [];
+  @{assert} (list1 = rev items);
+
+  val list2 = fold_rev cons items [];
+  @{assert} (list2 = items);
+*}
+
+text {* The subsequent example demonstrates how to \emph{merge} two
+  lists in a natural way. *}
+
+ML {*
+  fun merge_lists eq (xs, ys) = fold_rev (insert eq) ys xs;
+*}
+
+text {* Here the first list is treated conservatively: only the new
+  elements from the second list are inserted.  The inside-out order of
+  insertion via @{ML fold_rev} attempts to preserve the order of
+  elements in the result.
+
+  This way of merging lists is typical for context data
+  (\secref{sec:context-data}).  See also @{ML merge} as defined in
+  @{file "~~/src/Pure/library.ML"}.
+*}
+
+
+subsection {* Association lists *}
+
+text {* The operations for association lists interpret a concrete list
+  of pairs as a finite function from keys to values.  Redundant
+  representations with multiple occurrences of the same key are
+  implicitly normalized: lookup and update only take the first
+  occurrence into account.
+*}
+
+text {*
+  \begin{mldecls}
+  @{index_ML AList.lookup: "('a * 'b -> bool) -> ('b * 'c) list -> 'a -> 'c option"} \\
+  @{index_ML AList.defined: "('a * 'b -> bool) -> ('b * 'c) list -> 'a -> bool"} \\
+  @{index_ML AList.update: "('a * 'a -> bool) -> 'a * 'b -> ('a * 'b) list -> ('a * 'b) list"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML AList.lookup}, @{ML AList.defined}, @{ML AList.update}
+  implement the main ``framework operations'' for mappings in
+  Isabelle/ML, following standard conventions for their names and
+  types.
+
+  Note that a function called @{text lookup} is obliged to express its
+  partiality via an explicit option element.  There is no choice to
+  raise an exception, without changing the name to something like
+  @{text "the_element"} or @{text "get"}.
+
+  The @{text "defined"} operation is essentially a contraction of @{ML
+  is_some} and @{text "lookup"}, but this is sufficiently frequent to
+  justify its independent existence.  This also gives the
+  implementation some opportunity for peep-hole optimization.
+
+  \end{description}
+
+  Association lists are adequate as simple and light-weight
+  implementation of finite mappings in many practical situations.  A
+  more heavy-duty table structure is defined in @{file
+  "~~/src/Pure/General/table.ML"}; that version scales easily to
+  thousands or millions of elements.
+*}
+
+
+subsection {* Unsynchronized references *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type "'a Unsynchronized.ref"} \\
+  @{index_ML Unsynchronized.ref: "'a -> 'a Unsynchronized.ref"} \\
+  @{index_ML "!": "'a Unsynchronized.ref -> 'a"} \\
+  @{index_ML_op ":=": "'a Unsynchronized.ref * 'a -> unit"} \\
+  \end{mldecls}
+*}
+
+text {* Due to ubiquitous parallelism in Isabelle/ML (see also
+  \secref{sec:multi-threading}), the mutable reference cells of
+  Standard ML are notorious for causing problems.  In a highly
+  parallel system, both correctness \emph{and} performance are easily
+  degraded when using mutable data.
+
+  The unwieldy name of @{ML Unsynchronized.ref} for the constructor
+  for references in Isabelle/ML emphasizes the inconveniences caused by
+  mutability.  Existing operations @{ML "!"}  and @{ML_op ":="} are
+  unchanged, but should be used with special precautions, say in a
+  strictly local situation that is guaranteed to be restricted to
+  sequential evaluation --- now and in the future.
+
+  \begin{warn}
+  Never @{ML_text "open Unsynchronized"}, not even in a local scope!
+  Pretending that mutable state is no problem is a very bad idea.
+  \end{warn}
+*}
+
+
+section {* Thread-safe programming \label{sec:multi-threading} *}
+
+text {* Multi-threaded execution has become an everyday reality in
+  Isabelle since Poly/ML 5.2.1 and Isabelle2008.  Isabelle/ML provides
+  implicit and explicit parallelism by default, and there is no way
+  for user-space tools to ``opt out''.  ML programs that are purely
+  functional, output messages only via the official channels
+  (\secref{sec:message-channels}), and do not intercept interrupts
+  (\secref{sec:exceptions}) can participate in the multi-threaded
+  environment immediately without further ado.
+
+  More ambitious tools with more fine-grained interaction with the
+  environment need to observe the principles explained below.
+*}
+
+
+subsection {* Multi-threading with shared memory *}
+
+text {* Multiple threads help to organize advanced operations of the
+  system, such as real-time conditions on command transactions,
+  sub-components with explicit communication, general asynchronous
+  interaction etc.  Moreover, parallel evaluation is a prerequisite to
+  make adequate use of the CPU resources that are available on
+  multi-core systems.\footnote{Multi-core computing does not mean that
+  there are ``spare cycles'' to be wasted.  It means that the
+  continued exponential speedup of CPU performance due to ``Moore's
+  Law'' follows different rules: clock frequency has reached its peak
+  around 2005, and applications need to be parallelized in order to
+  avoid a perceived loss of performance.  See also
+  \cite{Sutter:2005}.}
+
+  Isabelle/Isar exploits the inherent structure of theories and proofs
+  to support \emph{implicit parallelism} to a large extent.  LCF-style
+  theorem provides almost ideal conditions for that, see also
+  \cite{Wenzel:2009}.  This means, significant parts of theory and
+  proof checking is parallelized by default.  In Isabelle2013, a
+  maximum speedup-factor of 3.5 on 4 cores and 6.5 on 8 cores can be
+  expected.
+
+  \medskip ML threads lack the memory protection of separate
+  processes, and operate concurrently on shared heap memory.  This has
+  the advantage that results of independent computations are directly
+  available to other threads: abstract values can be passed without
+  copying or awkward serialization that is typically required for
+  separate processes.
+
+  To make shared-memory multi-threading work robustly and efficiently,
+  some programming guidelines need to be observed.  While the ML
+  system is responsible to maintain basic integrity of the
+  representation of ML values in memory, the application programmer
+  needs to ensure that multi-threaded execution does not break the
+  intended semantics.
+
+  \begin{warn}
+  To participate in implicit parallelism, tools need to be
+  thread-safe.  A single ill-behaved tool can affect the stability and
+  performance of the whole system.
+  \end{warn}
+
+  Apart from observing the principles of thread-safeness passively,
+  advanced tools may also exploit parallelism actively, e.g.\ by using
+  ``future values'' (\secref{sec:futures}) or the more basic library
+  functions for parallel list operations (\secref{sec:parlist}).
+
+  \begin{warn}
+  Parallel computing resources are managed centrally by the
+  Isabelle/ML infrastructure.  User programs must not fork their own
+  ML threads to perform computations.
+  \end{warn}
+*}
+
+
+subsection {* Critical shared resources *}
+
+text {* Thread-safeness is mainly concerned about concurrent
+  read/write access to shared resources, which are outside the purely
+  functional world of ML.  This covers the following in particular.
+
+  \begin{itemize}
+
+  \item Global references (or arrays), i.e.\ mutable memory cells that
+  persist over several invocations of associated
+  operations.\footnote{This is independent of the visibility of such
+  mutable values in the toplevel scope.}
+
+  \item Global state of the running Isabelle/ML process, i.e.\ raw I/O
+  channels, environment variables, current working directory.
+
+  \item Writable resources in the file-system that are shared among
+  different threads or external processes.
+
+  \end{itemize}
+
+  Isabelle/ML provides various mechanisms to avoid critical shared
+  resources in most situations.  As last resort there are some
+  mechanisms for explicit synchronization.  The following guidelines
+  help to make Isabelle/ML programs work smoothly in a concurrent
+  environment.
+
+  \begin{itemize}
+
+  \item Avoid global references altogether.  Isabelle/Isar maintains a
+  uniform context that incorporates arbitrary data declared by user
+  programs (\secref{sec:context-data}).  This context is passed as
+  plain value and user tools can get/map their own data in a purely
+  functional manner.  Configuration options within the context
+  (\secref{sec:config-options}) provide simple drop-in replacements
+  for historic reference variables.
+
+  \item Keep components with local state information re-entrant.
+  Instead of poking initial values into (private) global references, a
+  new state record can be created on each invocation, and passed
+  through any auxiliary functions of the component.  The state record
+  may well contain mutable references, without requiring any special
+  synchronizations, as long as each invocation gets its own copy.
+
+  \item Avoid raw output on @{text "stdout"} or @{text "stderr"}.  The
+  Poly/ML library is thread-safe for each individual output operation,
+  but the ordering of parallel invocations is arbitrary.  This means
+  raw output will appear on some system console with unpredictable
+  interleaving of atomic chunks.
+
+  Note that this does not affect regular message output channels
+  (\secref{sec:message-channels}).  An official message is associated
+  with the command transaction from where it originates, independently
+  of other transactions.  This means each running Isar command has
+  effectively its own set of message channels, and interleaving can
+  only happen when commands use parallelism internally (and only at
+  message boundaries).
+
+  \item Treat environment variables and the current working directory
+  of the running process as strictly read-only.
+
+  \item Restrict writing to the file-system to unique temporary files.
+  Isabelle already provides a temporary directory that is unique for
+  the running process, and there is a centralized source of unique
+  serial numbers in Isabelle/ML.  Thus temporary files that are passed
+  to to some external process will be always disjoint, and thus
+  thread-safe.
+
+  \end{itemize}
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML File.tmp_path: "Path.T -> Path.T"} \\
+  @{index_ML serial_string: "unit -> string"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML File.tmp_path}~@{text "path"} relocates the base
+  component of @{text "path"} into the unique temporary directory of
+  the running Isabelle/ML process.
+
+  \item @{ML serial_string}~@{text "()"} creates a new serial number
+  that is unique over the runtime of the Isabelle/ML process.
+
+  \end{description}
+*}
+
+text %mlex {* The following example shows how to create unique
+  temporary file names.
+*}
+
+ML {*
+  val tmp1 = File.tmp_path (Path.basic ("foo" ^ serial_string ()));
+  val tmp2 = File.tmp_path (Path.basic ("foo" ^ serial_string ()));
+  @{assert} (tmp1 <> tmp2);
+*}
+
+
+subsection {* Explicit synchronization *}
+
+text {* Isabelle/ML also provides some explicit synchronization
+  mechanisms, for the rare situations where mutable shared resources
+  are really required.  These are based on the synchronizations
+  primitives of Poly/ML, which have been adapted to the specific
+  assumptions of the concurrent Isabelle/ML environment.  User code
+  must not use the Poly/ML primitives directly!
+
+  \medskip The most basic synchronization concept is a single
+  \emph{critical section} (also called ``monitor'' in the literature).
+  A thread that enters the critical section prevents all other threads
+  from doing the same.  A thread that is already within the critical
+  section may re-enter it in an idempotent manner.
+
+  Such centralized locking is convenient, because it prevents
+  deadlocks by construction.
+
+  \medskip More fine-grained locking works via \emph{synchronized
+  variables}.  An explicit state component is associated with
+  mechanisms for locking and signaling.  There are operations to
+  await a condition, change the state, and signal the change to all
+  other waiting threads.
+
+  Here the synchronized access to the state variable is \emph{not}
+  re-entrant: direct or indirect nesting within the same thread will
+  cause a deadlock!
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML NAMED_CRITICAL: "string -> (unit -> 'a) -> 'a"} \\
+  @{index_ML CRITICAL: "(unit -> 'a) -> 'a"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML_type "'a Synchronized.var"} \\
+  @{index_ML Synchronized.var: "string -> 'a -> 'a Synchronized.var"} \\
+  @{index_ML Synchronized.guarded_access: "'a Synchronized.var ->
+  ('a -> ('b * 'a) option) -> 'b"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML NAMED_CRITICAL}~@{text "name e"} evaluates @{text "e ()"}
+  within the central critical section of Isabelle/ML.  No other thread
+  may do so at the same time, but non-critical parallel execution will
+  continue.  The @{text "name"} argument is used for tracing and might
+  help to spot sources of congestion.
+
+  Entering the critical section without contention is very fast.  Each
+  thread should stay within the critical section only very briefly,
+  otherwise parallel performance may degrade.
+
+  \item @{ML CRITICAL} is the same as @{ML NAMED_CRITICAL} with empty
+  name argument.
+
+  \item Type @{ML_type "'a Synchronized.var"} represents synchronized
+  variables with state of type @{ML_type 'a}.
+
+  \item @{ML Synchronized.var}~@{text "name x"} creates a synchronized
+  variable that is initialized with value @{text "x"}.  The @{text
+  "name"} is used for tracing.
+
+  \item @{ML Synchronized.guarded_access}~@{text "var f"} lets the
+  function @{text "f"} operate within a critical section on the state
+  @{text "x"} as follows: if @{text "f x"} produces @{ML NONE}, it
+  continues to wait on the internal condition variable, expecting that
+  some other thread will eventually change the content in a suitable
+  manner; if @{text "f x"} produces @{ML SOME}~@{text "(y, x')"} it is
+  satisfied and assigns the new state value @{text "x'"}, broadcasts a
+  signal to all waiting threads on the associated condition variable,
+  and returns the result @{text "y"}.
+
+  \end{description}
+
+  There are some further variants of the @{ML
+  Synchronized.guarded_access} combinator, see @{file
+  "~~/src/Pure/Concurrent/synchronized.ML"} for details.
+*}
+
+text %mlex {* The following example implements a counter that produces
+  positive integers that are unique over the runtime of the Isabelle
+  process:
+*}
+
+ML {*
+  local
+    val counter = Synchronized.var "counter" 0;
+  in
+    fun next () =
+      Synchronized.guarded_access counter
+        (fn i =>
+          let val j = i + 1
+          in SOME (j, j) end);
+  end;
+*}
+
+ML {*
+  val a = next ();
+  val b = next ();
+  @{assert} (a <> b);
+*}
+
+text {* \medskip See @{file "~~/src/Pure/Concurrent/mailbox.ML"} how
+  to implement a mailbox as synchronized variable over a purely
+  functional queue. *}
+
+
+section {* Managed evaluation *}
+
+text {* Execution of Standard ML follows the model of strict
+  functional evaluation with optional exceptions.  Evaluation happens
+  whenever some function is applied to (sufficiently many)
+  arguments. The result is either an explicit value or an implicit
+  exception.
+
+  \emph{Managed evaluation} in Isabelle/ML organizes expressions and
+  results to control certain physical side-conditions, to say more
+  specifically when and how evaluation happens.  For example, the
+  Isabelle/ML library supports lazy evaluation with memoing, parallel
+  evaluation via futures, asynchronous evaluation via promises,
+  evaluation with time limit etc.
+
+  \medskip An \emph{unevaluated expression} is represented either as
+  unit abstraction @{verbatim "fn () => a"} of type
+  @{verbatim "unit -> 'a"} or as regular function
+  @{verbatim "fn a => b"} of type @{verbatim "'a -> 'b"}.  Both forms
+  occur routinely, and special care is required to tell them apart ---
+  the static type-system of SML is only of limited help here.
+
+  The first form is more intuitive: some combinator @{text "(unit ->
+  'a) -> 'a"} applies the given function to @{text "()"} to initiate
+  the postponed evaluation process.  The second form is more flexible:
+  some combinator @{text "('a -> 'b) -> 'a -> 'b"} acts like a
+  modified form of function application; several such combinators may
+  be cascaded to modify a given function, before it is ultimately
+  applied to some argument.
+
+  \medskip \emph{Reified results} make the disjoint sum of regular
+  values versions exceptional situations explicit as ML datatype:
+  @{text "'a result = Res of 'a | Exn of exn"}.  This is typically
+  used for administrative purposes, to store the overall outcome of an
+  evaluation process.
+
+  \emph{Parallel exceptions} aggregate reified results, such that
+  multiple exceptions are digested as a collection in canonical form
+  that identifies exceptions according to their original occurrence.
+  This is particular important for parallel evaluation via futures
+  \secref{sec:futures}, which are organized as acyclic graph of
+  evaluations that depend on other evaluations: exceptions stemming
+  from shared sub-graphs are exposed exactly once and in the order of
+  their original occurrence (e.g.\ when printed at the toplevel).
+  Interrupt counts as neutral element here: it is treated as minimal
+  information about some canceled evaluation process, and is absorbed
+  by the presence of regular program exceptions.  *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type "'a Exn.result"} \\
+  @{index_ML Exn.capture: "('a -> 'b) -> 'a -> 'b Exn.result"} \\
+  @{index_ML Exn.interruptible_capture: "('a -> 'b) -> 'a -> 'b Exn.result"} \\
+  @{index_ML Exn.release: "'a Exn.result -> 'a"} \\
+  @{index_ML Par_Exn.release_all: "'a Exn.result list -> 'a list"} \\
+  @{index_ML Par_Exn.release_first: "'a Exn.result list -> 'a list"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type "'a Exn.result"} represents the disjoint sum of
+  ML results explicitly, with constructor @{ML Exn.Res} for regular
+  values and @{ML "Exn.Exn"} for exceptions.
+
+  \item @{ML Exn.capture}~@{text "f x"} manages the evaluation of
+  @{text "f x"} such that exceptions are made explicit as @{ML
+  "Exn.Exn"}.  Note that this includes physical interrupts (see also
+  \secref{sec:exceptions}), so the same precautions apply to user
+  code: interrupts must not be absorbed accidentally!
+
+  \item @{ML Exn.interruptible_capture} is similar to @{ML
+  Exn.capture}, but interrupts are immediately re-raised as required
+  for user code.
+
+  \item @{ML Exn.release}~@{text "result"} releases the original
+  runtime result, exposing its regular value or raising the reified
+  exception.
+
+  \item @{ML Par_Exn.release_all}~@{text "results"} combines results
+  that were produced independently (e.g.\ by parallel evaluation).  If
+  all results are regular values, that list is returned.  Otherwise,
+  the collection of all exceptions is raised, wrapped-up as collective
+  parallel exception.  Note that the latter prevents access to
+  individual exceptions by conventional @{verbatim "handle"} of SML.
+
+  \item @{ML Par_Exn.release_first} is similar to @{ML
+  Par_Exn.release_all}, but only the first exception that has occurred
+  in the original evaluation process is raised again, the others are
+  ignored.  That single exception may get handled by conventional
+  means in SML.
+
+  \end{description}
+*}
+
+
+subsection {* Parallel skeletons \label{sec:parlist} *}
+
+text {*
+  Algorithmic skeletons are combinators that operate on lists in
+  parallel, in the manner of well-known @{text map}, @{text exists},
+  @{text forall} etc.  Management of futures (\secref{sec:futures})
+  and their results as reified exceptions is wrapped up into simple
+  programming interfaces that resemble the sequential versions.
+
+  What remains is the application-specific problem to present
+  expressions with suitable \emph{granularity}: each list element
+  corresponds to one evaluation task.  If the granularity is too
+  coarse, the available CPUs are not saturated.  If it is too
+  fine-grained, CPU cycles are wasted due to the overhead of
+  organizing parallel processing.  In the worst case, parallel
+  performance will be less than the sequential counterpart!
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Par_List.map: "('a -> 'b) -> 'a list -> 'b list"} \\
+  @{index_ML Par_List.get_some: "('a -> 'b option) -> 'a list -> 'b option"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Par_List.map}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} is like @{ML
+  "map"}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"}, but the evaluation of @{text "f x\<^sub>i"}
+  for @{text "i = 1, \<dots>, n"} is performed in parallel.
+
+  An exception in any @{text "f x\<^sub>i"} cancels the overall evaluation
+  process.  The final result is produced via @{ML
+  Par_Exn.release_first} as explained above, which means the first
+  program exception that happened to occur in the parallel evaluation
+  is propagated, and all other failures are ignored.
+
+  \item @{ML Par_List.get_some}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} produces some
+  @{text "f x\<^sub>i"} that is of the form @{text "SOME y\<^sub>i"}, if that
+  exists, otherwise @{text "NONE"}.  Thus it is similar to @{ML
+  Library.get_first}, but subject to a non-deterministic parallel
+  choice process.  The first successful result cancels the overall
+  evaluation process; other exceptions are propagated as for @{ML
+  Par_List.map}.
+
+  This generic parallel choice combinator is the basis for derived
+  forms, such as @{ML Par_List.find_some}, @{ML Par_List.exists}, @{ML
+  Par_List.forall}.
+
+  \end{description}
+*}
+
+text %mlex {* Subsequently, the Ackermann function is evaluated in
+  parallel for some ranges of arguments. *}
+
+ML_val {*
+  fun ackermann 0 n = n + 1
+    | ackermann m 0 = ackermann (m - 1) 1
+    | ackermann m n = ackermann (m - 1) (ackermann m (n - 1));
+
+  Par_List.map (ackermann 2) (500 upto 1000);
+  Par_List.map (ackermann 3) (5 upto 10);
+*}
+
+
+subsection {* Lazy evaluation *}
+
+text {*
+  %FIXME
+
+  See also @{file "~~/src/Pure/Concurrent/lazy.ML"}.
+*}
+
+
+subsection {* Future values \label{sec:futures} *}
+
+text {*
+  %FIXME
+
+  See also @{file "~~/src/Pure/Concurrent/future.ML"}.
+*}
+
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/Prelim.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,1069 @@
+theory Prelim
+imports Base
+begin
+
+chapter {* Preliminaries *}
+
+section {* Contexts \label{sec:context} *}
+
+text {*
+  A logical context represents the background that is required for
+  formulating statements and composing proofs.  It acts as a medium to
+  produce formal content, depending on earlier material (declarations,
+  results etc.).
+
+  For example, derivations within the Isabelle/Pure logic can be
+  described as a judgment @{text "\<Gamma> \<turnstile>\<^sub>\<Theta> \<phi>"}, which means that a
+  proposition @{text "\<phi>"} is derivable from hypotheses @{text "\<Gamma>"}
+  within the theory @{text "\<Theta>"}.  There are logical reasons for
+  keeping @{text "\<Theta>"} and @{text "\<Gamma>"} separate: theories can be
+  liberal about supporting type constructors and schematic
+  polymorphism of constants and axioms, while the inner calculus of
+  @{text "\<Gamma> \<turnstile> \<phi>"} is strictly limited to Simple Type Theory (with
+  fixed type variables in the assumptions).
+
+  \medskip Contexts and derivations are linked by the following key
+  principles:
+
+  \begin{itemize}
+
+  \item Transfer: monotonicity of derivations admits results to be
+  transferred into a \emph{larger} context, i.e.\ @{text "\<Gamma> \<turnstile>\<^sub>\<Theta>
+  \<phi>"} implies @{text "\<Gamma>' \<turnstile>\<^sub>\<Theta>\<^sub>' \<phi>"} for contexts @{text "\<Theta>'
+  \<supseteq> \<Theta>"} and @{text "\<Gamma>' \<supseteq> \<Gamma>"}.
+
+  \item Export: discharge of hypotheses admits results to be exported
+  into a \emph{smaller} context, i.e.\ @{text "\<Gamma>' \<turnstile>\<^sub>\<Theta> \<phi>"}
+  implies @{text "\<Gamma> \<turnstile>\<^sub>\<Theta> \<Delta> \<Longrightarrow> \<phi>"} where @{text "\<Gamma>' \<supseteq> \<Gamma>"} and
+  @{text "\<Delta> = \<Gamma>' - \<Gamma>"}.  Note that @{text "\<Theta>"} remains unchanged here,
+  only the @{text "\<Gamma>"} part is affected.
+
+  \end{itemize}
+
+  \medskip By modeling the main characteristics of the primitive
+  @{text "\<Theta>"} and @{text "\<Gamma>"} above, and abstracting over any
+  particular logical content, we arrive at the fundamental notions of
+  \emph{theory context} and \emph{proof context} in Isabelle/Isar.
+  These implement a certain policy to manage arbitrary \emph{context
+  data}.  There is a strongly-typed mechanism to declare new kinds of
+  data at compile time.
+
+  The internal bootstrap process of Isabelle/Pure eventually reaches a
+  stage where certain data slots provide the logical content of @{text
+  "\<Theta>"} and @{text "\<Gamma>"} sketched above, but this does not stop there!
+  Various additional data slots support all kinds of mechanisms that
+  are not necessarily part of the core logic.
+
+  For example, there would be data for canonical introduction and
+  elimination rules for arbitrary operators (depending on the
+  object-logic and application), which enables users to perform
+  standard proof steps implicitly (cf.\ the @{text "rule"} method
+  \cite{isabelle-isar-ref}).
+
+  \medskip Thus Isabelle/Isar is able to bring forth more and more
+  concepts successively.  In particular, an object-logic like
+  Isabelle/HOL continues the Isabelle/Pure setup by adding specific
+  components for automated reasoning (classical reasoner, tableau
+  prover, structured induction etc.) and derived specification
+  mechanisms (inductive predicates, recursive functions etc.).  All of
+  this is ultimately based on the generic data management by theory
+  and proof contexts introduced here.
+*}
+
+
+subsection {* Theory context \label{sec:context-theory} *}
+
+text {* A \emph{theory} is a data container with explicit name and
+  unique identifier.  Theories are related by a (nominal) sub-theory
+  relation, which corresponds to the dependency graph of the original
+  construction; each theory is derived from a certain sub-graph of
+  ancestor theories.  To this end, the system maintains a set of
+  symbolic ``identification stamps'' within each theory.
+
+  The @{text "merge"} operation produces the least upper bound of two
+  theories, which actually degenerates into absorption of one theory
+  into the other (according to the nominal sub-theory relation).
+
+  The @{text "begin"} operation starts a new theory by importing
+  several parent theories and entering a special mode of nameless
+  incremental updates, until the final @{text "end"} operation is
+  performed.
+
+  \medskip The example in \figref{fig:ex-theory} below shows a theory
+  graph derived from @{text "Pure"}, with theory @{text "Length"}
+  importing @{text "Nat"} and @{text "List"}.  The body of @{text
+  "Length"} consists of a sequence of updates, resulting in locally a
+  linear sub-theory relation for each intermediate step.
+
+  \begin{figure}[htb]
+  \begin{center}
+  \begin{tabular}{rcccl}
+        &            & @{text "Pure"} \\
+        &            & @{text "\<down>"} \\
+        &            & @{text "FOL"} \\
+        & $\swarrow$ &              & $\searrow$ & \\
+  @{text "Nat"} &    &              &            & @{text "List"} \\
+        & $\searrow$ &              & $\swarrow$ \\
+        &            & @{text "Length"} \\
+        &            & \multicolumn{3}{l}{~~@{keyword "begin"}} \\
+        &            & $\vdots$~~ \\
+        &            & \multicolumn{3}{l}{~~@{command "end"}} \\
+  \end{tabular}
+  \caption{A theory definition depending on ancestors}\label{fig:ex-theory}
+  \end{center}
+  \end{figure}
+
+  \medskip Derived formal entities may retain a reference to the
+  background theory in order to indicate the formal context from which
+  they were produced.  This provides an immutable certificate of the
+  background theory.  *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type theory} \\
+  @{index_ML Theory.eq_thy: "theory * theory -> bool"} \\
+  @{index_ML Theory.subthy: "theory * theory -> bool"} \\
+  @{index_ML Theory.merge: "theory * theory -> theory"} \\
+  @{index_ML Theory.begin_theory: "string * Position.T -> theory list -> theory"} \\
+  @{index_ML Theory.parents_of: "theory -> theory list"} \\
+  @{index_ML Theory.ancestors_of: "theory -> theory list"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type theory} represents theory contexts.
+
+  \item @{ML "Theory.eq_thy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} check strict
+  identity of two theories.
+
+  \item @{ML "Theory.subthy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} compares theories
+  according to the intrinsic graph structure of the construction.
+  This sub-theory relation is a nominal approximation of inclusion
+  (@{text "\<subseteq>"}) of the corresponding content (according to the
+  semantics of the ML modules that implement the data).
+
+  \item @{ML "Theory.merge"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} absorbs one theory
+  into the other.  This version of ad-hoc theory merge fails for
+  unrelated theories!
+
+  \item @{ML "Theory.begin_theory"}~@{text "name parents"} constructs
+  a new theory based on the given parents.  This ML function is
+  normally not invoked directly.
+
+  \item @{ML "Theory.parents_of"}~@{text "thy"} returns the direct
+  ancestors of @{text thy}.
+
+  \item @{ML "Theory.ancestors_of"}~@{text "thy"} returns all
+  ancestors of @{text thy} (not including @{text thy} itself).
+
+  \end{description}
+*}
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def "theory"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "theory_context"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  @{rail \<open>
+  @@{ML_antiquotation theory} nameref?
+  ;
+  @@{ML_antiquotation theory_context} nameref
+  \<close>}
+
+  \begin{description}
+
+  \item @{text "@{theory}"} refers to the background theory of the
+  current context --- as abstract value.
+
+  \item @{text "@{theory A}"} refers to an explicitly named ancestor
+  theory @{text "A"} of the background theory of the current context
+  --- as abstract value.
+
+  \item @{text "@{theory_context A}"} is similar to @{text "@{theory
+  A}"}, but presents the result as initial @{ML_type Proof.context}
+  (see also @{ML Proof_Context.init_global}).
+
+  \end{description}
+*}
+
+
+subsection {* Proof context \label{sec:context-proof} *}
+
+text {* A proof context is a container for pure data that refers to
+  the theory from which it is derived. The @{text "init"} operation
+  creates a proof context from a given theory. There is an explicit
+  @{text "transfer"} operation to force resynchronization with updates
+  to the background theory -- this is rarely required in practice.
+
+  Entities derived in a proof context need to record logical
+  requirements explicitly, since there is no separate context
+  identification or symbolic inclusion as for theories.  For example,
+  hypotheses used in primitive derivations (cf.\ \secref{sec:thms})
+  are recorded separately within the sequent @{text "\<Gamma> \<turnstile> \<phi>"}, just to
+  make double sure.  Results could still leak into an alien proof
+  context due to programming errors, but Isabelle/Isar includes some
+  extra validity checks in critical positions, notably at the end of a
+  sub-proof.
+
+  Proof contexts may be manipulated arbitrarily, although the common
+  discipline is to follow block structure as a mental model: a given
+  context is extended consecutively, and results are exported back
+  into the original context.  Note that an Isar proof state models
+  block-structured reasoning explicitly, using a stack of proof
+  contexts internally.  For various technical reasons, the background
+  theory of an Isar proof state must not be changed while the proof is
+  still under construction!
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type Proof.context} \\
+  @{index_ML Proof_Context.init_global: "theory -> Proof.context"} \\
+  @{index_ML Proof_Context.theory_of: "Proof.context -> theory"} \\
+  @{index_ML Proof_Context.transfer: "theory -> Proof.context -> Proof.context"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type Proof.context} represents proof contexts.
+
+  \item @{ML Proof_Context.init_global}~@{text "thy"} produces a proof
+  context derived from @{text "thy"}, initializing all data.
+
+  \item @{ML Proof_Context.theory_of}~@{text "ctxt"} selects the
+  background theory from @{text "ctxt"}.
+
+  \item @{ML Proof_Context.transfer}~@{text "thy ctxt"} promotes the
+  background theory of @{text "ctxt"} to the super theory @{text
+  "thy"}.
+
+  \end{description}
+*}
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def "context"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  \begin{description}
+
+  \item @{text "@{context}"} refers to \emph{the} context at
+  compile-time --- as abstract value.  Independently of (local) theory
+  or proof mode, this always produces a meaningful result.
+
+  This is probably the most common antiquotation in interactive
+  experimentation with ML inside Isar.
+
+  \end{description}
+*}
+
+
+subsection {* Generic contexts \label{sec:generic-context} *}
+
+text {*
+  A generic context is the disjoint sum of either a theory or proof
+  context.  Occasionally, this enables uniform treatment of generic
+  context data, typically extra-logical information.  Operations on
+  generic contexts include the usual injections, partial selections,
+  and combinators for lifting operations on either component of the
+  disjoint sum.
+
+  Moreover, there are total operations @{text "theory_of"} and @{text
+  "proof_of"} to convert a generic context into either kind: a theory
+  can always be selected from the sum, while a proof context might
+  have to be constructed by an ad-hoc @{text "init"} operation, which
+  incurs a small runtime overhead.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type Context.generic} \\
+  @{index_ML Context.theory_of: "Context.generic -> theory"} \\
+  @{index_ML Context.proof_of: "Context.generic -> Proof.context"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type Context.generic} is the direct sum of @{ML_type
+  "theory"} and @{ML_type "Proof.context"}, with the datatype
+  constructors @{ML "Context.Theory"} and @{ML "Context.Proof"}.
+
+  \item @{ML Context.theory_of}~@{text "context"} always produces a
+  theory from the generic @{text "context"}, using @{ML
+  "Proof_Context.theory_of"} as required.
+
+  \item @{ML Context.proof_of}~@{text "context"} always produces a
+  proof context from the generic @{text "context"}, using @{ML
+  "Proof_Context.init_global"} as required (note that this re-initializes the
+  context data with each invocation).
+
+  \end{description}
+*}
+
+
+subsection {* Context data \label{sec:context-data} *}
+
+text {* The main purpose of theory and proof contexts is to manage
+  arbitrary (pure) data.  New data types can be declared incrementally
+  at compile time.  There are separate declaration mechanisms for any
+  of the three kinds of contexts: theory, proof, generic.
+
+  \paragraph{Theory data} declarations need to implement the following
+  SML signature:
+
+  \medskip
+  \begin{tabular}{ll}
+  @{text "\<type> T"} & representing type \\
+  @{text "\<val> empty: T"} & empty default value \\
+  @{text "\<val> extend: T \<rightarrow> T"} & re-initialize on import \\
+  @{text "\<val> merge: T \<times> T \<rightarrow> T"} & join on import \\
+  \end{tabular}
+  \medskip
+
+  The @{text "empty"} value acts as initial default for \emph{any}
+  theory that does not declare actual data content; @{text "extend"}
+  is acts like a unitary version of @{text "merge"}.
+
+  Implementing @{text "merge"} can be tricky.  The general idea is
+  that @{text "merge (data\<^sub>1, data\<^sub>2)"} inserts those parts of @{text
+  "data\<^sub>2"} into @{text "data\<^sub>1"} that are not yet present, while
+  keeping the general order of things.  The @{ML Library.merge}
+  function on plain lists may serve as canonical template.
+
+  Particularly note that shared parts of the data must not be
+  duplicated by naive concatenation, or a theory graph that is like a
+  chain of diamonds would cause an exponential blowup!
+
+  \paragraph{Proof context data} declarations need to implement the
+  following SML signature:
+
+  \medskip
+  \begin{tabular}{ll}
+  @{text "\<type> T"} & representing type \\
+  @{text "\<val> init: theory \<rightarrow> T"} & produce initial value \\
+  \end{tabular}
+  \medskip
+
+  The @{text "init"} operation is supposed to produce a pure value
+  from the given background theory and should be somehow
+  ``immediate''.  Whenever a proof context is initialized, which
+  happens frequently, the the system invokes the @{text "init"}
+  operation of \emph{all} theory data slots ever declared.  This also
+  means that one needs to be economic about the total number of proof
+  data declarations in the system, i.e.\ each ML module should declare
+  at most one, sometimes two data slots for its internal use.
+  Repeated data declarations to simulate a record type should be
+  avoided!
+
+  \paragraph{Generic data} provides a hybrid interface for both theory
+  and proof data.  The @{text "init"} operation for proof contexts is
+  predefined to select the current data value from the background
+  theory.
+
+  \bigskip Any of the above data declarations over type @{text "T"}
+  result in an ML structure with the following signature:
+
+  \medskip
+  \begin{tabular}{ll}
+  @{text "get: context \<rightarrow> T"} \\
+  @{text "put: T \<rightarrow> context \<rightarrow> context"} \\
+  @{text "map: (T \<rightarrow> T) \<rightarrow> context \<rightarrow> context"} \\
+  \end{tabular}
+  \medskip
+
+  These other operations provide exclusive access for the particular
+  kind of context (theory, proof, or generic context).  This interface
+  observes the ML discipline for types and scopes: there is no other
+  way to access the corresponding data slot of a context.  By keeping
+  these operations private, an Isabelle/ML module may maintain
+  abstract values authentically.  *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_functor Theory_Data} \\
+  @{index_ML_functor Proof_Data} \\
+  @{index_ML_functor Generic_Data} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML_functor Theory_Data}@{text "(spec)"} declares data for
+  type @{ML_type theory} according to the specification provided as
+  argument structure.  The resulting structure provides data init and
+  access operations as described above.
+
+  \item @{ML_functor Proof_Data}@{text "(spec)"} is analogous to
+  @{ML_functor Theory_Data} for type @{ML_type Proof.context}.
+
+  \item @{ML_functor Generic_Data}@{text "(spec)"} is analogous to
+  @{ML_functor Theory_Data} for type @{ML_type Context.generic}.
+
+  \end{description}
+*}
+
+text %mlex {*
+  The following artificial example demonstrates theory
+  data: we maintain a set of terms that are supposed to be wellformed
+  wrt.\ the enclosing theory.  The public interface is as follows:
+*}
+
+ML {*
+  signature WELLFORMED_TERMS =
+  sig
+    val get: theory -> term list
+    val add: term -> theory -> theory
+  end;
+*}
+
+text {* The implementation uses private theory data internally, and
+  only exposes an operation that involves explicit argument checking
+  wrt.\ the given theory. *}
+
+ML {*
+  structure Wellformed_Terms: WELLFORMED_TERMS =
+  struct
+
+  structure Terms = Theory_Data
+  (
+    type T = term Ord_List.T;
+    val empty = [];
+    val extend = I;
+    fun merge (ts1, ts2) =
+      Ord_List.union Term_Ord.fast_term_ord ts1 ts2;
+  );
+
+  val get = Terms.get;
+
+  fun add raw_t thy =
+    let
+      val t = Sign.cert_term thy raw_t;
+    in
+      Terms.map (Ord_List.insert Term_Ord.fast_term_ord t) thy
+    end;
+
+  end;
+*}
+
+text {* Type @{ML_type "term Ord_List.T"} is used for reasonably
+  efficient representation of a set of terms: all operations are
+  linear in the number of stored elements.  Here we assume that users
+  of this module do not care about the declaration order, since that
+  data structure forces its own arrangement of elements.
+
+  Observe how the @{ML_text merge} operation joins the data slots of
+  the two constituents: @{ML Ord_List.union} prevents duplication of
+  common data from different branches, thus avoiding the danger of
+  exponential blowup.  Plain list append etc.\ must never be used for
+  theory data merges!
+
+  \medskip Our intended invariant is achieved as follows:
+  \begin{enumerate}
+
+  \item @{ML Wellformed_Terms.add} only admits terms that have passed
+  the @{ML Sign.cert_term} check of the given theory at that point.
+
+  \item Wellformedness in the sense of @{ML Sign.cert_term} is
+  monotonic wrt.\ the sub-theory relation.  So our data can move
+  upwards in the hierarchy (via extension or merges), and maintain
+  wellformedness without further checks.
+
+  \end{enumerate}
+
+  Note that all basic operations of the inference kernel (which
+  includes @{ML Sign.cert_term}) observe this monotonicity principle,
+  but other user-space tools don't.  For example, fully-featured
+  type-inference via @{ML Syntax.check_term} (cf.\
+  \secref{sec:term-check}) is not necessarily monotonic wrt.\ the
+  background theory, since constraints of term constants can be
+  modified by later declarations, for example.
+
+  In most cases, user-space context data does not have to take such
+  invariants too seriously.  The situation is different in the
+  implementation of the inference kernel itself, which uses the very
+  same data mechanisms for types, constants, axioms etc.
+*}
+
+
+subsection {* Configuration options \label{sec:config-options} *}
+
+text {* A \emph{configuration option} is a named optional value of
+  some basic type (Boolean, integer, string) that is stored in the
+  context.  It is a simple application of general context data
+  (\secref{sec:context-data}) that is sufficiently common to justify
+  customized setup, which includes some concrete declarations for
+  end-users using existing notation for attributes (cf.\
+  \secref{sec:attributes}).
+
+  For example, the predefined configuration option @{attribute
+  show_types} controls output of explicit type constraints for
+  variables in printed terms (cf.\ \secref{sec:read-print}).  Its
+  value can be modified within Isar text like this:
+*}
+
+declare [[show_types = false]]
+  -- {* declaration within (local) theory context *}
+
+notepad
+begin
+  note [[show_types = true]]
+    -- {* declaration within proof (forward mode) *}
+  term x
+
+  have "x = x"
+    using [[show_types = false]]
+      -- {* declaration within proof (backward mode) *}
+    ..
+end
+
+text {* Configuration options that are not set explicitly hold a
+  default value that can depend on the application context.  This
+  allows to retrieve the value from another slot within the context,
+  or fall back on a global preference mechanism, for example.
+
+  The operations to declare configuration options and get/map their
+  values are modeled as direct replacements for historic global
+  references, only that the context is made explicit.  This allows
+  easy configuration of tools, without relying on the execution order
+  as required for old-style mutable references.  *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Config.get: "Proof.context -> 'a Config.T -> 'a"} \\
+  @{index_ML Config.map: "'a Config.T -> ('a -> 'a) -> Proof.context -> Proof.context"} \\
+  @{index_ML Attrib.setup_config_bool: "binding -> (Context.generic -> bool) ->
+  bool Config.T"} \\
+  @{index_ML Attrib.setup_config_int: "binding -> (Context.generic -> int) ->
+  int Config.T"} \\
+  @{index_ML Attrib.setup_config_real: "binding -> (Context.generic -> real) ->
+  real Config.T"} \\
+  @{index_ML Attrib.setup_config_string: "binding -> (Context.generic -> string) ->
+  string Config.T"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Config.get}~@{text "ctxt config"} gets the value of
+  @{text "config"} in the given context.
+
+  \item @{ML Config.map}~@{text "config f ctxt"} updates the context
+  by updating the value of @{text "config"}.
+
+  \item @{text "config ="}~@{ML Attrib.setup_config_bool}~@{text "name
+  default"} creates a named configuration option of type @{ML_type
+  bool}, with the given @{text "default"} depending on the application
+  context.  The resulting @{text "config"} can be used to get/map its
+  value in a given context.  There is an implicit update of the
+  background theory that registers the option as attribute with some
+  concrete syntax.
+
+  \item @{ML Attrib.config_int}, @{ML Attrib.config_real}, and @{ML
+  Attrib.config_string} work like @{ML Attrib.config_bool}, but for
+  types @{ML_type int} and @{ML_type string}, respectively.
+
+  \end{description}
+*}
+
+text %mlex {* The following example shows how to declare and use a
+  Boolean configuration option called @{text "my_flag"} with constant
+  default value @{ML false}.  *}
+
+ML {*
+  val my_flag =
+    Attrib.setup_config_bool @{binding my_flag} (K false)
+*}
+
+text {* Now the user can refer to @{attribute my_flag} in
+  declarations, while ML tools can retrieve the current value from the
+  context via @{ML Config.get}.  *}
+
+ML_val {* @{assert} (Config.get @{context} my_flag = false) *}
+
+declare [[my_flag = true]]
+
+ML_val {* @{assert} (Config.get @{context} my_flag = true) *}
+
+notepad
+begin
+  {
+    note [[my_flag = false]]
+    ML_val {* @{assert} (Config.get @{context} my_flag = false) *}
+  }
+  ML_val {* @{assert} (Config.get @{context} my_flag = true) *}
+end
+
+text {* Here is another example involving ML type @{ML_type real}
+  (floating-point numbers). *}
+
+ML {*
+  val airspeed_velocity =
+    Attrib.setup_config_real @{binding airspeed_velocity} (K 0.0)
+*}
+
+declare [[airspeed_velocity = 10]]
+declare [[airspeed_velocity = 9.9]]
+
+
+section {* Names \label{sec:names} *}
+
+text {* In principle, a name is just a string, but there are various
+  conventions for representing additional structure.  For example,
+  ``@{text "Foo.bar.baz"}'' is considered as a long name consisting of
+  qualifier @{text "Foo.bar"} and base name @{text "baz"}.  The
+  individual constituents of a name may have further substructure,
+  e.g.\ the string ``\verb,\,\verb,<alpha>,'' encodes as a single
+  symbol (\secref{sec:symbols}).
+
+  \medskip Subsequently, we shall introduce specific categories of
+  names.  Roughly speaking these correspond to logical entities as
+  follows:
+  \begin{itemize}
+
+  \item Basic names (\secref{sec:basic-name}): free and bound
+  variables.
+
+  \item Indexed names (\secref{sec:indexname}): schematic variables.
+
+  \item Long names (\secref{sec:long-name}): constants of any kind
+  (type constructors, term constants, other concepts defined in user
+  space).  Such entities are typically managed via name spaces
+  (\secref{sec:name-space}).
+
+  \end{itemize}
+*}
+
+
+subsection {* Basic names \label{sec:basic-name} *}
+
+text {*
+  A \emph{basic name} essentially consists of a single Isabelle
+  identifier.  There are conventions to mark separate classes of basic
+  names, by attaching a suffix of underscores: one underscore means
+  \emph{internal name}, two underscores means \emph{Skolem name},
+  three underscores means \emph{internal Skolem name}.
+
+  For example, the basic name @{text "foo"} has the internal version
+  @{text "foo_"}, with Skolem versions @{text "foo__"} and @{text
+  "foo___"}, respectively.
+
+  These special versions provide copies of the basic name space, apart
+  from anything that normally appears in the user text.  For example,
+  system generated variables in Isar proof contexts are usually marked
+  as internal, which prevents mysterious names like @{text "xaa"} to
+  appear in human-readable text.
+
+  \medskip Manipulating binding scopes often requires on-the-fly
+  renamings.  A \emph{name context} contains a collection of already
+  used names.  The @{text "declare"} operation adds names to the
+  context.
+
+  The @{text "invents"} operation derives a number of fresh names from
+  a given starting point.  For example, the first three names derived
+  from @{text "a"} are @{text "a"}, @{text "b"}, @{text "c"}.
+
+  The @{text "variants"} operation produces fresh names by
+  incrementing tentative names as base-26 numbers (with digits @{text
+  "a..z"}) until all clashes are resolved.  For example, name @{text
+  "foo"} results in variants @{text "fooa"}, @{text "foob"}, @{text
+  "fooc"}, \dots, @{text "fooaa"}, @{text "fooab"} etc.; each renaming
+  step picks the next unused variant from this sequence.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Name.internal: "string -> string"} \\
+  @{index_ML Name.skolem: "string -> string"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML_type Name.context} \\
+  @{index_ML Name.context: Name.context} \\
+  @{index_ML Name.declare: "string -> Name.context -> Name.context"} \\
+  @{index_ML Name.invent: "Name.context -> string -> int -> string list"} \\
+  @{index_ML Name.variant: "string -> Name.context -> string * Name.context"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML Variable.names_of: "Proof.context -> Name.context"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Name.internal}~@{text "name"} produces an internal name
+  by adding one underscore.
+
+  \item @{ML Name.skolem}~@{text "name"} produces a Skolem name by
+  adding two underscores.
+
+  \item Type @{ML_type Name.context} represents the context of already
+  used names; the initial value is @{ML "Name.context"}.
+
+  \item @{ML Name.declare}~@{text "name"} enters a used name into the
+  context.
+
+  \item @{ML Name.invent}~@{text "context name n"} produces @{text
+  "n"} fresh names derived from @{text "name"}.
+
+  \item @{ML Name.variant}~@{text "name context"} produces a fresh
+  variant of @{text "name"}; the result is declared to the context.
+
+  \item @{ML Variable.names_of}~@{text "ctxt"} retrieves the context
+  of declared type and term variable names.  Projecting a proof
+  context down to a primitive name context is occasionally useful when
+  invoking lower-level operations.  Regular management of ``fresh
+  variables'' is done by suitable operations of structure @{ML_structure
+  Variable}, which is also able to provide an official status of
+  ``locally fixed variable'' within the logical environment (cf.\
+  \secref{sec:variables}).
+
+  \end{description}
+*}
+
+text %mlex {* The following simple examples demonstrate how to produce
+  fresh names from the initial @{ML Name.context}. *}
+
+ML {*
+  val list1 = Name.invent Name.context "a" 5;
+  @{assert} (list1 = ["a", "b", "c", "d", "e"]);
+
+  val list2 =
+    #1 (fold_map Name.variant ["x", "x", "a", "a", "'a", "'a"] Name.context);
+  @{assert} (list2 = ["x", "xa", "a", "aa", "'a", "'aa"]);
+*}
+
+text {* \medskip The same works relatively to the formal context as
+  follows. *}
+
+locale ex = fixes a b c :: 'a
+begin
+
+ML {*
+  val names = Variable.names_of @{context};
+
+  val list1 = Name.invent names "a" 5;
+  @{assert} (list1 = ["d", "e", "f", "g", "h"]);
+
+  val list2 =
+    #1 (fold_map Name.variant ["x", "x", "a", "a", "'a", "'a"] names);
+  @{assert} (list2 = ["x", "xa", "aa", "ab", "'aa", "'ab"]);
+*}
+
+end
+
+
+subsection {* Indexed names \label{sec:indexname} *}
+
+text {*
+  An \emph{indexed name} (or @{text "indexname"}) is a pair of a basic
+  name and a natural number.  This representation allows efficient
+  renaming by incrementing the second component only.  The canonical
+  way to rename two collections of indexnames apart from each other is
+  this: determine the maximum index @{text "maxidx"} of the first
+  collection, then increment all indexes of the second collection by
+  @{text "maxidx + 1"}; the maximum index of an empty collection is
+  @{text "-1"}.
+
+  Occasionally, basic names are injected into the same pair type of
+  indexed names: then @{text "(x, -1)"} is used to encode the basic
+  name @{text "x"}.
+
+  \medskip Isabelle syntax observes the following rules for
+  representing an indexname @{text "(x, i)"} as a packed string:
+
+  \begin{itemize}
+
+  \item @{text "?x"} if @{text "x"} does not end with a digit and @{text "i = 0"},
+
+  \item @{text "?xi"} if @{text "x"} does not end with a digit,
+
+  \item @{text "?x.i"} otherwise.
+
+  \end{itemize}
+
+  Indexnames may acquire large index numbers after several maxidx
+  shifts have been applied.  Results are usually normalized towards
+  @{text "0"} at certain checkpoints, notably at the end of a proof.
+  This works by producing variants of the corresponding basic name
+  components.  For example, the collection @{text "?x1, ?x7, ?x42"}
+  becomes @{text "?x, ?xa, ?xb"}.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type indexname: "string * int"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type indexname} represents indexed names.  This is
+  an abbreviation for @{ML_type "string * int"}.  The second component
+  is usually non-negative, except for situations where @{text "(x,
+  -1)"} is used to inject basic names into this type.  Other negative
+  indexes should not be used.
+
+  \end{description}
+*}
+
+
+subsection {* Long names \label{sec:long-name} *}
+
+text {* A \emph{long name} consists of a sequence of non-empty name
+  components.  The packed representation uses a dot as separator, as
+  in ``@{text "A.b.c"}''.  The last component is called \emph{base
+  name}, the remaining prefix is called \emph{qualifier} (which may be
+  empty).  The qualifier can be understood as the access path to the
+  named entity while passing through some nested block-structure,
+  although our free-form long names do not really enforce any strict
+  discipline.
+
+  For example, an item named ``@{text "A.b.c"}'' may be understood as
+  a local entity @{text "c"}, within a local structure @{text "b"},
+  within a global structure @{text "A"}.  In practice, long names
+  usually represent 1--3 levels of qualification.  User ML code should
+  not make any assumptions about the particular structure of long
+  names!
+
+  The empty name is commonly used as an indication of unnamed
+  entities, or entities that are not entered into the corresponding
+  name space, whenever this makes any sense.  The basic operations on
+  long names map empty names again to empty names.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Long_Name.base_name: "string -> string"} \\
+  @{index_ML Long_Name.qualifier: "string -> string"} \\
+  @{index_ML Long_Name.append: "string -> string -> string"} \\
+  @{index_ML Long_Name.implode: "string list -> string"} \\
+  @{index_ML Long_Name.explode: "string -> string list"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Long_Name.base_name}~@{text "name"} returns the base name
+  of a long name.
+
+  \item @{ML Long_Name.qualifier}~@{text "name"} returns the qualifier
+  of a long name.
+
+  \item @{ML Long_Name.append}~@{text "name\<^sub>1 name\<^sub>2"} appends two long
+  names.
+
+  \item @{ML Long_Name.implode}~@{text "names"} and @{ML
+  Long_Name.explode}~@{text "name"} convert between the packed string
+  representation and the explicit list form of long names.
+
+  \end{description}
+*}
+
+
+subsection {* Name spaces \label{sec:name-space} *}
+
+text {* A @{text "name space"} manages a collection of long names,
+  together with a mapping between partially qualified external names
+  and fully qualified internal names (in both directions).  Note that
+  the corresponding @{text "intern"} and @{text "extern"} operations
+  are mostly used for parsing and printing only!  The @{text
+  "declare"} operation augments a name space according to the accesses
+  determined by a given binding, and a naming policy from the context.
+
+  \medskip A @{text "binding"} specifies details about the prospective
+  long name of a newly introduced formal entity.  It consists of a
+  base name, prefixes for qualification (separate ones for system
+  infrastructure and user-space mechanisms), a slot for the original
+  source position, and some additional flags.
+
+  \medskip A @{text "naming"} provides some additional details for
+  producing a long name from a binding.  Normally, the naming is
+  implicit in the theory or proof context.  The @{text "full"}
+  operation (and its variants for different context types) produces a
+  fully qualified internal name to be entered into a name space.  The
+  main equation of this ``chemical reaction'' when binding new
+  entities in a context is as follows:
+
+  \medskip
+  \begin{tabular}{l}
+  @{text "binding + naming \<longrightarrow> long name + name space accesses"}
+  \end{tabular}
+
+  \bigskip As a general principle, there is a separate name space for
+  each kind of formal entity, e.g.\ fact, logical constant, type
+  constructor, type class.  It is usually clear from the occurrence in
+  concrete syntax (or from the scope) which kind of entity a name
+  refers to.  For example, the very same name @{text "c"} may be used
+  uniformly for a constant, type constructor, and type class.
+
+  There are common schemes to name derived entities systematically
+  according to the name of the main logical entity involved, e.g.\
+  fact @{text "c.intro"} for a canonical introduction rule related to
+  constant @{text "c"}.  This technique of mapping names from one
+  space into another requires some care in order to avoid conflicts.
+  In particular, theorem names derived from a type constructor or type
+  class should get an additional suffix in addition to the usual
+  qualification.  This leads to the following conventions for derived
+  names:
+
+  \medskip
+  \begin{tabular}{ll}
+  logical entity & fact name \\\hline
+  constant @{text "c"} & @{text "c.intro"} \\
+  type @{text "c"} & @{text "c_type.intro"} \\
+  class @{text "c"} & @{text "c_class.intro"} \\
+  \end{tabular}
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type binding} \\
+  @{index_ML Binding.empty: binding} \\
+  @{index_ML Binding.name: "string -> binding"} \\
+  @{index_ML Binding.qualify: "bool -> string -> binding -> binding"} \\
+  @{index_ML Binding.prefix: "bool -> string -> binding -> binding"} \\
+  @{index_ML Binding.conceal: "binding -> binding"} \\
+  @{index_ML Binding.print: "binding -> string"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML_type Name_Space.naming} \\
+  @{index_ML Name_Space.default_naming: Name_Space.naming} \\
+  @{index_ML Name_Space.add_path: "string -> Name_Space.naming -> Name_Space.naming"} \\
+  @{index_ML Name_Space.full_name: "Name_Space.naming -> binding -> string"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML_type Name_Space.T} \\
+  @{index_ML Name_Space.empty: "string -> Name_Space.T"} \\
+  @{index_ML Name_Space.merge: "Name_Space.T * Name_Space.T -> Name_Space.T"} \\
+  @{index_ML Name_Space.declare: "Context.generic -> bool ->
+  binding -> Name_Space.T -> string * Name_Space.T"} \\
+  @{index_ML Name_Space.intern: "Name_Space.T -> string -> string"} \\
+  @{index_ML Name_Space.extern: "Proof.context -> Name_Space.T -> string -> string"} \\
+  @{index_ML Name_Space.is_concealed: "Name_Space.T -> string -> bool"}
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type binding} represents the abstract concept of
+  name bindings.
+
+  \item @{ML Binding.empty} is the empty binding.
+
+  \item @{ML Binding.name}~@{text "name"} produces a binding with base
+  name @{text "name"}.  Note that this lacks proper source position
+  information; see also the ML antiquotation @{ML_antiquotation
+  binding}.
+
+  \item @{ML Binding.qualify}~@{text "mandatory name binding"}
+  prefixes qualifier @{text "name"} to @{text "binding"}.  The @{text
+  "mandatory"} flag tells if this name component always needs to be
+  given in name space accesses --- this is mostly @{text "false"} in
+  practice.  Note that this part of qualification is typically used in
+  derived specification mechanisms.
+
+  \item @{ML Binding.prefix} is similar to @{ML Binding.qualify}, but
+  affects the system prefix.  This part of extra qualification is
+  typically used in the infrastructure for modular specifications,
+  notably ``local theory targets'' (see also \chref{ch:local-theory}).
+
+  \item @{ML Binding.conceal}~@{text "binding"} indicates that the
+  binding shall refer to an entity that serves foundational purposes
+  only.  This flag helps to mark implementation details of
+  specification mechanism etc.  Other tools should not depend on the
+  particulars of concealed entities (cf.\ @{ML
+  Name_Space.is_concealed}).
+
+  \item @{ML Binding.print}~@{text "binding"} produces a string
+  representation for human-readable output, together with some formal
+  markup that might get used in GUI front-ends, for example.
+
+  \item Type @{ML_type Name_Space.naming} represents the abstract
+  concept of a naming policy.
+
+  \item @{ML Name_Space.default_naming} is the default naming policy.
+  In a theory context, this is usually augmented by a path prefix
+  consisting of the theory name.
+
+  \item @{ML Name_Space.add_path}~@{text "path naming"} augments the
+  naming policy by extending its path component.
+
+  \item @{ML Name_Space.full_name}~@{text "naming binding"} turns a
+  name binding (usually a basic name) into the fully qualified
+  internal name, according to the given naming policy.
+
+  \item Type @{ML_type Name_Space.T} represents name spaces.
+
+  \item @{ML Name_Space.empty}~@{text "kind"} and @{ML Name_Space.merge}~@{text
+  "(space\<^sub>1, space\<^sub>2)"} are the canonical operations for
+  maintaining name spaces according to theory data management
+  (\secref{sec:context-data}); @{text "kind"} is a formal comment
+  to characterize the purpose of a name space.
+
+  \item @{ML Name_Space.declare}~@{text "context strict binding
+  space"} enters a name binding as fully qualified internal name into
+  the name space, using the naming of the context.
+
+  \item @{ML Name_Space.intern}~@{text "space name"} internalizes a
+  (partially qualified) external name.
+
+  This operation is mostly for parsing!  Note that fully qualified
+  names stemming from declarations are produced via @{ML
+  "Name_Space.full_name"} and @{ML "Name_Space.declare"}
+  (or their derivatives for @{ML_type theory} and
+  @{ML_type Proof.context}).
+
+  \item @{ML Name_Space.extern}~@{text "ctxt space name"} externalizes a
+  (fully qualified) internal name.
+
+  This operation is mostly for printing!  User code should not rely on
+  the precise result too much.
+
+  \item @{ML Name_Space.is_concealed}~@{text "space name"} indicates
+  whether @{text "name"} refers to a strictly private entity that
+  other tools are supposed to ignore!
+
+  \end{description}
+*}
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def "binding"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  @{rail \<open>
+  @@{ML_antiquotation binding} name
+  \<close>}
+
+  \begin{description}
+
+  \item @{text "@{binding name}"} produces a binding with base name
+  @{text "name"} and the source position taken from the concrete
+  syntax of this antiquotation.  In many situations this is more
+  appropriate than the more basic @{ML Binding.name} function.
+
+  \end{description}
+*}
+
+text %mlex {* The following example yields the source position of some
+  concrete binding inlined into the text:
+*}
+
+ML {* Binding.pos_of @{binding here} *}
+
+text {* \medskip That position can be also printed in a message as
+  follows: *}
+
+ML_command {*
+  writeln
+    ("Look here" ^ Position.here (Binding.pos_of @{binding here}))
+*}
+
+text {* This illustrates a key virtue of formalized bindings as
+  opposed to raw specifications of base names: the system can use this
+  additional information for feedback given to the user (error
+  messages etc.).
+
+  \medskip The following example refers to its source position
+  directly, which is occasionally useful for experimentation and
+  diagnostic purposes: *}
+
+ML_command {*
+  warning ("Look here" ^ Position.here @{here})
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/Proof.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,492 @@
+theory Proof
+imports Base
+begin
+
+chapter {* Structured proofs *}
+
+section {* Variables \label{sec:variables} *}
+
+text {*
+  Any variable that is not explicitly bound by @{text "\<lambda>"}-abstraction
+  is considered as ``free''.  Logically, free variables act like
+  outermost universal quantification at the sequent level: @{text
+  "A\<^sub>1(x), \<dots>, A\<^sub>n(x) \<turnstile> B(x)"} means that the result
+  holds \emph{for all} values of @{text "x"}.  Free variables for
+  terms (not types) can be fully internalized into the logic: @{text
+  "\<turnstile> B(x)"} and @{text "\<turnstile> \<And>x. B(x)"} are interchangeable, provided
+  that @{text "x"} does not occur elsewhere in the context.
+  Inspecting @{text "\<turnstile> \<And>x. B(x)"} more closely, we see that inside the
+  quantifier, @{text "x"} is essentially ``arbitrary, but fixed'',
+  while from outside it appears as a place-holder for instantiation
+  (thanks to @{text "\<And>"} elimination).
+
+  The Pure logic represents the idea of variables being either inside
+  or outside the current scope by providing separate syntactic
+  categories for \emph{fixed variables} (e.g.\ @{text "x"}) vs.\
+  \emph{schematic variables} (e.g.\ @{text "?x"}).  Incidently, a
+  universal result @{text "\<turnstile> \<And>x. B(x)"} has the HHF normal form @{text
+  "\<turnstile> B(?x)"}, which represents its generality without requiring an
+  explicit quantifier.  The same principle works for type variables:
+  @{text "\<turnstile> B(?\<alpha>)"} represents the idea of ``@{text "\<turnstile> \<forall>\<alpha>. B(\<alpha>)"}''
+  without demanding a truly polymorphic framework.
+
+  \medskip Additional care is required to treat type variables in a
+  way that facilitates type-inference.  In principle, term variables
+  depend on type variables, which means that type variables would have
+  to be declared first.  For example, a raw type-theoretic framework
+  would demand the context to be constructed in stages as follows:
+  @{text "\<Gamma> = \<alpha>: type, x: \<alpha>, a: A(x\<^sub>\<alpha>)"}.
+
+  We allow a slightly less formalistic mode of operation: term
+  variables @{text "x"} are fixed without specifying a type yet
+  (essentially \emph{all} potential occurrences of some instance
+  @{text "x\<^sub>\<tau>"} are fixed); the first occurrence of @{text "x"}
+  within a specific term assigns its most general type, which is then
+  maintained consistently in the context.  The above example becomes
+  @{text "\<Gamma> = x: term, \<alpha>: type, A(x\<^sub>\<alpha>)"}, where type @{text
+  "\<alpha>"} is fixed \emph{after} term @{text "x"}, and the constraint
+  @{text "x :: \<alpha>"} is an implicit consequence of the occurrence of
+  @{text "x\<^sub>\<alpha>"} in the subsequent proposition.
+
+  This twist of dependencies is also accommodated by the reverse
+  operation of exporting results from a context: a type variable
+  @{text "\<alpha>"} is considered fixed as long as it occurs in some fixed
+  term variable of the context.  For example, exporting @{text "x:
+  term, \<alpha>: type \<turnstile> x\<^sub>\<alpha> \<equiv> x\<^sub>\<alpha>"} produces in the first step @{text "x: term
+  \<turnstile> x\<^sub>\<alpha> \<equiv> x\<^sub>\<alpha>"} for fixed @{text "\<alpha>"}, and only in the second step
+  @{text "\<turnstile> ?x\<^sub>?\<^sub>\<alpha> \<equiv> ?x\<^sub>?\<^sub>\<alpha>"} for schematic @{text "?x"} and @{text "?\<alpha>"}.
+  The following Isar source text illustrates this scenario.
+*}
+
+notepad
+begin
+  {
+    fix x  -- {* all potential occurrences of some @{text "x::\<tau>"} are fixed *}
+    {
+      have "x::'a \<equiv> x"  -- {* implicit type assigment by concrete occurrence *}
+        by (rule reflexive)
+    }
+    thm this  -- {* result still with fixed type @{text "'a"} *}
+  }
+  thm this  -- {* fully general result for arbitrary @{text "?x::?'a"} *}
+end
+
+text {* The Isabelle/Isar proof context manages the details of term
+  vs.\ type variables, with high-level principles for moving the
+  frontier between fixed and schematic variables.
+
+  The @{text "add_fixes"} operation explictly declares fixed
+  variables; the @{text "declare_term"} operation absorbs a term into
+  a context by fixing new type variables and adding syntactic
+  constraints.
+
+  The @{text "export"} operation is able to perform the main work of
+  generalizing term and type variables as sketched above, assuming
+  that fixing variables and terms have been declared properly.
+
+  There @{text "import"} operation makes a generalized fact a genuine
+  part of the context, by inventing fixed variables for the schematic
+  ones.  The effect can be reversed by using @{text "export"} later,
+  potentially with an extended context; the result is equivalent to
+  the original modulo renaming of schematic variables.
+
+  The @{text "focus"} operation provides a variant of @{text "import"}
+  for nested propositions (with explicit quantification): @{text
+  "\<And>x\<^sub>1 \<dots> x\<^sub>n. B(x\<^sub>1, \<dots>, x\<^sub>n)"} is
+  decomposed by inventing fixed variables @{text "x\<^sub>1, \<dots>,
+  x\<^sub>n"} for the body.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Variable.add_fixes: "
+  string list -> Proof.context -> string list * Proof.context"} \\
+  @{index_ML Variable.variant_fixes: "
+  string list -> Proof.context -> string list * Proof.context"} \\
+  @{index_ML Variable.declare_term: "term -> Proof.context -> Proof.context"} \\
+  @{index_ML Variable.declare_constraints: "term -> Proof.context -> Proof.context"} \\
+  @{index_ML Variable.export: "Proof.context -> Proof.context -> thm list -> thm list"} \\
+  @{index_ML Variable.polymorphic: "Proof.context -> term list -> term list"} \\
+  @{index_ML Variable.import: "bool -> thm list -> Proof.context ->
+  (((ctyp * ctyp) list * (cterm * cterm) list) * thm list) * Proof.context"} \\
+  @{index_ML Variable.focus: "term -> Proof.context ->
+  ((string * (string * typ)) list * term) * Proof.context"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Variable.add_fixes}~@{text "xs ctxt"} fixes term
+  variables @{text "xs"}, returning the resulting internal names.  By
+  default, the internal representation coincides with the external
+  one, which also means that the given variables must not be fixed
+  already.  There is a different policy within a local proof body: the
+  given names are just hints for newly invented Skolem variables.
+
+  \item @{ML Variable.variant_fixes} is similar to @{ML
+  Variable.add_fixes}, but always produces fresh variants of the given
+  names.
+
+  \item @{ML Variable.declare_term}~@{text "t ctxt"} declares term
+  @{text "t"} to belong to the context.  This automatically fixes new
+  type variables, but not term variables.  Syntactic constraints for
+  type and term variables are declared uniformly, though.
+
+  \item @{ML Variable.declare_constraints}~@{text "t ctxt"} declares
+  syntactic constraints from term @{text "t"}, without making it part
+  of the context yet.
+
+  \item @{ML Variable.export}~@{text "inner outer thms"} generalizes
+  fixed type and term variables in @{text "thms"} according to the
+  difference of the @{text "inner"} and @{text "outer"} context,
+  following the principles sketched above.
+
+  \item @{ML Variable.polymorphic}~@{text "ctxt ts"} generalizes type
+  variables in @{text "ts"} as far as possible, even those occurring
+  in fixed term variables.  The default policy of type-inference is to
+  fix newly introduced type variables, which is essentially reversed
+  with @{ML Variable.polymorphic}: here the given terms are detached
+  from the context as far as possible.
+
+  \item @{ML Variable.import}~@{text "open thms ctxt"} invents fixed
+  type and term variables for the schematic ones occurring in @{text
+  "thms"}.  The @{text "open"} flag indicates whether the fixed names
+  should be accessible to the user, otherwise newly introduced names
+  are marked as ``internal'' (\secref{sec:names}).
+
+  \item @{ML Variable.focus}~@{text B} decomposes the outermost @{text
+  "\<And>"} prefix of proposition @{text "B"}.
+
+  \end{description}
+*}
+
+text %mlex {* The following example shows how to work with fixed term
+  and type parameters and with type-inference.  *}
+
+ML {*
+  (*static compile-time context -- for testing only*)
+  val ctxt0 = @{context};
+
+  (*locally fixed parameters -- no type assignment yet*)
+  val ([x, y], ctxt1) = ctxt0 |> Variable.add_fixes ["x", "y"];
+
+  (*t1: most general fixed type; t1': most general arbitrary type*)
+  val t1 = Syntax.read_term ctxt1 "x";
+  val t1' = singleton (Variable.polymorphic ctxt1) t1;
+
+  (*term u enforces specific type assignment*)
+  val u = Syntax.read_term ctxt1 "(x::nat) \<equiv> y";
+
+  (*official declaration of u -- propagates constraints etc.*)
+  val ctxt2 = ctxt1 |> Variable.declare_term u;
+  val t2 = Syntax.read_term ctxt2 "x";  (*x::nat is enforced*)
+*}
+
+text {* In the above example, the starting context is derived from the
+  toplevel theory, which means that fixed variables are internalized
+  literally: @{text "x"} is mapped again to @{text "x"}, and
+  attempting to fix it again in the subsequent context is an error.
+  Alternatively, fixed parameters can be renamed explicitly as
+  follows: *}
+
+ML {*
+  val ctxt0 = @{context};
+  val ([x1, x2, x3], ctxt1) =
+    ctxt0 |> Variable.variant_fixes ["x", "x", "x"];
+*}
+
+text {* The following ML code can now work with the invented names of
+  @{text x1}, @{text x2}, @{text x3}, without depending on
+  the details on the system policy for introducing these variants.
+  Recall that within a proof body the system always invents fresh
+  ``skolem constants'', e.g.\ as follows: *}
+
+notepad
+begin
+  ML_prf %"ML" {*
+    val ctxt0 = @{context};
+
+    val ([x1], ctxt1) = ctxt0 |> Variable.add_fixes ["x"];
+    val ([x2], ctxt2) = ctxt1 |> Variable.add_fixes ["x"];
+    val ([x3], ctxt3) = ctxt2 |> Variable.add_fixes ["x"];
+
+    val ([y1, y2], ctxt4) =
+      ctxt3 |> Variable.variant_fixes ["y", "y"];
+  *}
+end
+
+text {* In this situation @{ML Variable.add_fixes} and @{ML
+  Variable.variant_fixes} are very similar, but identical name
+  proposals given in a row are only accepted by the second version.
+  *}
+
+
+section {* Assumptions \label{sec:assumptions} *}
+
+text {*
+  An \emph{assumption} is a proposition that it is postulated in the
+  current context.  Local conclusions may use assumptions as
+  additional facts, but this imposes implicit hypotheses that weaken
+  the overall statement.
+
+  Assumptions are restricted to fixed non-schematic statements, i.e.\
+  all generality needs to be expressed by explicit quantifiers.
+  Nevertheless, the result will be in HHF normal form with outermost
+  quantifiers stripped.  For example, by assuming @{text "\<And>x :: \<alpha>. P
+  x"} we get @{text "\<And>x :: \<alpha>. P x \<turnstile> P ?x"} for schematic @{text "?x"}
+  of fixed type @{text "\<alpha>"}.  Local derivations accumulate more and
+  more explicit references to hypotheses: @{text "A\<^sub>1, \<dots>,
+  A\<^sub>n \<turnstile> B"} where @{text "A\<^sub>1, \<dots>, A\<^sub>n"} needs to
+  be covered by the assumptions of the current context.
+
+  \medskip The @{text "add_assms"} operation augments the context by
+  local assumptions, which are parameterized by an arbitrary @{text
+  "export"} rule (see below).
+
+  The @{text "export"} operation moves facts from a (larger) inner
+  context into a (smaller) outer context, by discharging the
+  difference of the assumptions as specified by the associated export
+  rules.  Note that the discharged portion is determined by the
+  difference of contexts, not the facts being exported!  There is a
+  separate flag to indicate a goal context, where the result is meant
+  to refine an enclosing sub-goal of a structured proof state.
+
+  \medskip The most basic export rule discharges assumptions directly
+  by means of the @{text "\<Longrightarrow>"} introduction rule:
+  \[
+  \infer[(@{text "\<Longrightarrow>\<hyphen>intro"})]{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
+  \]
+
+  The variant for goal refinements marks the newly introduced
+  premises, which causes the canonical Isar goal refinement scheme to
+  enforce unification with local premises within the goal:
+  \[
+  \infer[(@{text "#\<Longrightarrow>\<hyphen>intro"})]{@{text "\<Gamma> - A \<turnstile> #A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
+  \]
+
+  \medskip Alternative versions of assumptions may perform arbitrary
+  transformations on export, as long as the corresponding portion of
+  hypotheses is removed from the given facts.  For example, a local
+  definition works by fixing @{text "x"} and assuming @{text "x \<equiv> t"},
+  with the following export rule to reverse the effect:
+  \[
+  \infer[(@{text "\<equiv>\<hyphen>expand"})]{@{text "\<Gamma> - (x \<equiv> t) \<turnstile> B t"}}{@{text "\<Gamma> \<turnstile> B x"}}
+  \]
+  This works, because the assumption @{text "x \<equiv> t"} was introduced in
+  a context with @{text "x"} being fresh, so @{text "x"} does not
+  occur in @{text "\<Gamma>"} here.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type Assumption.export} \\
+  @{index_ML Assumption.assume: "Proof.context -> cterm -> thm"} \\
+  @{index_ML Assumption.add_assms:
+    "Assumption.export ->
+  cterm list -> Proof.context -> thm list * Proof.context"} \\
+  @{index_ML Assumption.add_assumes: "
+  cterm list -> Proof.context -> thm list * Proof.context"} \\
+  @{index_ML Assumption.export: "bool -> Proof.context -> Proof.context -> thm -> thm"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type Assumption.export} represents arbitrary export
+  rules, which is any function of type @{ML_type "bool -> cterm list
+  -> thm -> thm"}, where the @{ML_type "bool"} indicates goal mode,
+  and the @{ML_type "cterm list"} the collection of assumptions to be
+  discharged simultaneously.
+
+  \item @{ML Assumption.assume}~@{text "ctxt A"} turns proposition @{text
+  "A"} into a primitive assumption @{text "A \<turnstile> A'"}, where the
+  conclusion @{text "A'"} is in HHF normal form.
+
+  \item @{ML Assumption.add_assms}~@{text "r As"} augments the context
+  by assumptions @{text "As"} with export rule @{text "r"}.  The
+  resulting facts are hypothetical theorems as produced by the raw
+  @{ML Assumption.assume}.
+
+  \item @{ML Assumption.add_assumes}~@{text "As"} is a special case of
+  @{ML Assumption.add_assms} where the export rule performs @{text
+  "\<Longrightarrow>\<hyphen>intro"} or @{text "#\<Longrightarrow>\<hyphen>intro"}, depending on goal
+  mode.
+
+  \item @{ML Assumption.export}~@{text "is_goal inner outer thm"}
+  exports result @{text "thm"} from the the @{text "inner"} context
+  back into the @{text "outer"} one; @{text "is_goal = true"} means
+  this is a goal context.  The result is in HHF normal form.  Note
+  that @{ML "Proof_Context.export"} combines @{ML "Variable.export"}
+  and @{ML "Assumption.export"} in the canonical way.
+
+  \end{description}
+*}
+
+text %mlex {* The following example demonstrates how rules can be
+  derived by building up a context of assumptions first, and exporting
+  some local fact afterwards.  We refer to @{theory Pure} equality
+  here for testing purposes.
+*}
+
+ML {*
+  (*static compile-time context -- for testing only*)
+  val ctxt0 = @{context};
+
+  val ([eq], ctxt1) =
+    ctxt0 |> Assumption.add_assumes [@{cprop "x \<equiv> y"}];
+  val eq' = Thm.symmetric eq;
+
+  (*back to original context -- discharges assumption*)
+  val r = Assumption.export false ctxt1 ctxt0 eq';
+*}
+
+text {* Note that the variables of the resulting rule are not
+  generalized.  This would have required to fix them properly in the
+  context beforehand, and export wrt.\ variables afterwards (cf.\ @{ML
+  Variable.export} or the combined @{ML "Proof_Context.export"}).  *}
+
+
+section {* Structured goals and results \label{sec:struct-goals} *}
+
+text {*
+  Local results are established by monotonic reasoning from facts
+  within a context.  This allows common combinations of theorems,
+  e.g.\ via @{text "\<And>/\<Longrightarrow>"} elimination, resolution rules, or equational
+  reasoning, see \secref{sec:thms}.  Unaccounted context manipulations
+  should be avoided, notably raw @{text "\<And>/\<Longrightarrow>"} introduction or ad-hoc
+  references to free variables or assumptions not present in the proof
+  context.
+
+  \medskip The @{text "SUBPROOF"} combinator allows to structure a
+  tactical proof recursively by decomposing a selected sub-goal:
+  @{text "(\<And>x. A(x) \<Longrightarrow> B(x)) \<Longrightarrow> \<dots>"} is turned into @{text "B(x) \<Longrightarrow> \<dots>"}
+  after fixing @{text "x"} and assuming @{text "A(x)"}.  This means
+  the tactic needs to solve the conclusion, but may use the premise as
+  a local fact, for locally fixed variables.
+
+  The family of @{text "FOCUS"} combinators is similar to @{text
+  "SUBPROOF"}, but allows to retain schematic variables and pending
+  subgoals in the resulting goal state.
+
+  The @{text "prove"} operation provides an interface for structured
+  backwards reasoning under program control, with some explicit sanity
+  checks of the result.  The goal context can be augmented by
+  additional fixed variables (cf.\ \secref{sec:variables}) and
+  assumptions (cf.\ \secref{sec:assumptions}), which will be available
+  as local facts during the proof and discharged into implications in
+  the result.  Type and term variables are generalized as usual,
+  according to the context.
+
+  The @{text "obtain"} operation produces results by eliminating
+  existing facts by means of a given tactic.  This acts like a dual
+  conclusion: the proof demonstrates that the context may be augmented
+  by parameters and assumptions, without affecting any conclusions
+  that do not mention these parameters.  See also
+  \cite{isabelle-isar-ref} for the user-level @{command obtain} and
+  @{command guess} elements.  Final results, which may not refer to
+  the parameters in the conclusion, need to exported explicitly into
+  the original context.  *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML SUBPROOF: "(Subgoal.focus -> tactic) ->
+  Proof.context -> int -> tactic"} \\
+  @{index_ML Subgoal.FOCUS: "(Subgoal.focus -> tactic) ->
+  Proof.context -> int -> tactic"} \\
+  @{index_ML Subgoal.FOCUS_PREMS: "(Subgoal.focus -> tactic) ->
+  Proof.context -> int -> tactic"} \\
+  @{index_ML Subgoal.FOCUS_PARAMS: "(Subgoal.focus -> tactic) ->
+  Proof.context -> int -> tactic"} \\
+  @{index_ML Subgoal.focus: "Proof.context -> int -> thm -> Subgoal.focus * thm"} \\
+  @{index_ML Subgoal.focus_prems: "Proof.context -> int -> thm -> Subgoal.focus * thm"} \\
+  @{index_ML Subgoal.focus_params: "Proof.context -> int -> thm -> Subgoal.focus * thm"} \\
+  \end{mldecls}
+
+  \begin{mldecls}
+  @{index_ML Goal.prove: "Proof.context -> string list -> term list -> term ->
+  ({prems: thm list, context: Proof.context} -> tactic) -> thm"} \\
+  @{index_ML Goal.prove_multi: "Proof.context -> string list -> term list -> term list ->
+  ({prems: thm list, context: Proof.context} -> tactic) -> thm list"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML Obtain.result: "(Proof.context -> tactic) -> thm list ->
+  Proof.context -> ((string * cterm) list * thm list) * Proof.context"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML SUBPROOF}~@{text "tac ctxt i"} decomposes the structure
+  of the specified sub-goal, producing an extended context and a
+  reduced goal, which needs to be solved by the given tactic.  All
+  schematic parameters of the goal are imported into the context as
+  fixed ones, which may not be instantiated in the sub-proof.
+
+  \item @{ML Subgoal.FOCUS}, @{ML Subgoal.FOCUS_PREMS}, and @{ML
+  Subgoal.FOCUS_PARAMS} are similar to @{ML SUBPROOF}, but are
+  slightly more flexible: only the specified parts of the subgoal are
+  imported into the context, and the body tactic may introduce new
+  subgoals and schematic variables.
+
+  \item @{ML Subgoal.focus}, @{ML Subgoal.focus_prems}, @{ML
+  Subgoal.focus_params} extract the focus information from a goal
+  state in the same way as the corresponding tacticals above.  This is
+  occasionally useful to experiment without writing actual tactics
+  yet.
+
+  \item @{ML Goal.prove}~@{text "ctxt xs As C tac"} states goal @{text
+  "C"} in the context augmented by fixed variables @{text "xs"} and
+  assumptions @{text "As"}, and applies tactic @{text "tac"} to solve
+  it.  The latter may depend on the local assumptions being presented
+  as facts.  The result is in HHF normal form.
+
+  \item @{ML Goal.prove_multi} is simular to @{ML Goal.prove}, but
+  states several conclusions simultaneously.  The goal is encoded by
+  means of Pure conjunction; @{ML Goal.conjunction_tac} will turn this
+  into a collection of individual subgoals.
+
+  \item @{ML Obtain.result}~@{text "tac thms ctxt"} eliminates the
+  given facts using a tactic, which results in additional fixed
+  variables and assumptions in the context.  Final results need to be
+  exported explicitly.
+
+  \end{description}
+*}
+
+text %mlex {* The following minimal example illustrates how to access
+  the focus information of a structured goal state. *}
+
+notepad
+begin
+  fix A B C :: "'a \<Rightarrow> bool"
+
+  have "\<And>x. A x \<Longrightarrow> B x \<Longrightarrow> C x"
+    ML_val
+    {*
+      val {goal, context = goal_ctxt, ...} = @{Isar.goal};
+      val (focus as {params, asms, concl, ...}, goal') =
+        Subgoal.focus goal_ctxt 1 goal;
+      val [A, B] = #prems focus;
+      val [(_, x)] = #params focus;
+    *}
+    oops
+
+text {* \medskip The next example demonstrates forward-elimination in
+  a local context, using @{ML Obtain.result}.  *}
+
+notepad
+begin
+  assume ex: "\<exists>x. B x"
+
+  ML_prf %"ML" {*
+    val ctxt0 = @{context};
+    val (([(_, x)], [B]), ctxt1) = ctxt0
+      |> Obtain.result (fn _ => etac @{thm exE} 1) [@{thm ex}];
+  *}
+  ML_prf %"ML" {*
+    singleton (Proof_Context.export ctxt1 ctxt0) @{thm refl};
+  *}
+  ML_prf %"ML" {*
+    Proof_Context.export ctxt1 ctxt0 [Thm.reflexive x]
+      handle ERROR msg => (warning msg; []);
+  *}
+end
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/Syntax.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,151 @@
+theory Syntax
+imports Base
+begin
+
+chapter {* Concrete syntax and type-checking *}
+
+text {* Pure @{text "\<lambda>"}-calculus as introduced in \chref{ch:logic} is
+  an adequate foundation for logical languages --- in the tradition of
+  \emph{higher-order abstract syntax} --- but end-users require
+  additional means for reading and printing of terms and types.  This
+  important add-on outside the logical core is called \emph{inner
+  syntax} in Isabelle jargon, as opposed to the \emph{outer syntax} of
+  the theory and proof language (cf.\ \cite{isabelle-isar-ref}).
+
+  For example, according to \cite{church40} quantifiers are
+  represented as higher-order constants @{text "All :: ('a \<Rightarrow> bool) \<Rightarrow>
+  bool"} such that @{text "All (\<lambda>x::'a. B x)"} faithfully represents
+  the idea that is displayed as @{text "\<forall>x::'a. B x"} via @{keyword
+  "binder"} notation.  Moreover, type-inference in the style of
+  Hindley-Milner \cite{hindleymilner} (and extensions) enables users
+  to write @{text "\<forall>x. B x"} concisely, when the type @{text "'a"} is
+  already clear from the context.\footnote{Type-inference taken to the
+  extreme can easily confuse users, though.  Beginners often stumble
+  over unexpectedly general types inferred by the system.}
+
+  \medskip The main inner syntax operations are \emph{read} for
+  parsing together with type-checking, and \emph{pretty} for formatted
+  output.  See also \secref{sec:read-print}.
+
+  Furthermore, the input and output syntax layers are sub-divided into
+  separate phases for \emph{concrete syntax} versus \emph{abstract
+  syntax}, see also \secref{sec:parse-unparse} and
+  \secref{sec:term-check}, respectively.  This results in the
+  following decomposition of the main operations:
+
+  \begin{itemize}
+
+  \item @{text "read = parse; check"}
+
+  \item @{text "pretty = uncheck; unparse"}
+
+  \end{itemize}
+
+  Some specification package might thus intercept syntax processing at
+  a well-defined stage after @{text "parse"}, to a augment the
+  resulting pre-term before full type-reconstruction is performed by
+  @{text "check"}, for example.  Note that the formal status of bound
+  variables, versus free variables, versus constants must not be
+  changed here! *}
+
+
+section {* Reading and pretty printing \label{sec:read-print} *}
+
+text {* Read and print operations are roughly dual to each other, such
+  that for the user @{text "s' = pretty (read s)"} looks similar to
+  the original source text @{text "s"}, but the details depend on many
+  side-conditions.  There are also explicit options to control
+  suppressing of type information in the output.  The default
+  configuration routinely looses information, so @{text "t' = read
+  (pretty t)"} might fail, produce a differently typed term, or a
+  completely different term in the face of syntactic overloading!  *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Syntax.read_typ: "Proof.context -> string -> typ"} \\
+  @{index_ML Syntax.read_term: "Proof.context -> string -> term"} \\
+  @{index_ML Syntax.read_prop: "Proof.context -> string -> term"} \\
+  @{index_ML Syntax.pretty_typ: "Proof.context -> typ -> Pretty.T"} \\
+  @{index_ML Syntax.pretty_term: "Proof.context -> term -> Pretty.T"} \\
+  \end{mldecls}
+
+  %FIXME description
+*}
+
+
+section {* Parsing and unparsing \label{sec:parse-unparse} *}
+
+text {* Parsing and unparsing converts between actual source text and
+  a certain \emph{pre-term} format, where all bindings and scopes are
+  resolved faithfully.  Thus the names of free variables or constants
+  are already determined in the sense of the logical context, but type
+  information might is still missing.  Pre-terms support an explicit
+  language of \emph{type constraints} that may be augmented by user
+  code to guide the later \emph{check} phase, for example.
+
+  Actual parsing is based on traditional lexical analysis and Earley
+  parsing for arbitrary context-free grammars.  The user can specify
+  this via mixfix annotations.  Moreover, there are \emph{syntax
+  translations} that can be augmented by the user, either
+  declaratively via @{command translations} or programmatically via
+  @{command parse_translation}, @{command print_translation} etc.  The
+  final scope resolution is performed by the system, according to name
+  spaces for types, constants etc.\ determined by the context.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Syntax.parse_typ: "Proof.context -> string -> typ"} \\
+  @{index_ML Syntax.parse_term: "Proof.context -> string -> term"} \\
+  @{index_ML Syntax.parse_prop: "Proof.context -> string -> term"} \\
+  @{index_ML Syntax.unparse_typ: "Proof.context -> typ -> Pretty.T"} \\
+  @{index_ML Syntax.unparse_term: "Proof.context -> term -> Pretty.T"} \\
+  \end{mldecls}
+
+  %FIXME description
+*}
+
+
+section {* Checking and unchecking \label{sec:term-check} *}
+
+text {* These operations define the transition from pre-terms and
+  fully-annotated terms in the sense of the logical core
+  (\chref{ch:logic}).
+
+  The \emph{check} phase is meant to subsume a variety of mechanisms
+  in the manner of ``type-inference'' or ``type-reconstruction'' or
+  ``type-improvement'', not just type-checking in the narrow sense.
+  The \emph{uncheck} phase is roughly dual, it prunes type-information
+  before pretty printing.
+
+  A typical add-on for the check/uncheck syntax layer is the @{command
+  abbreviation} mechanism.  Here the user specifies syntactic
+  definitions that are managed by the system as polymorphic @{text
+  "let"} bindings.  These are expanded during the @{text "check"}
+  phase, and contracted during the @{text "uncheck"} phase, without
+  affecting the type-assignment of the given terms.
+
+  \medskip The precise meaning of type checking depends on the context
+  --- additional check/uncheck plugins might be defined in user space!
+
+  For example, the @{command class} command defines a context where
+  @{text "check"} treats certain type instances of overloaded
+  constants according to the ``dictionary construction'' of its
+  logical foundation.  This involves ``type improvement''
+  (specialization of slightly too general types) and replacement by
+  certain locale parameters.  See also \cite{Haftmann-Wenzel:2009}.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Syntax.check_typs: "Proof.context -> typ list -> typ list"} \\
+  @{index_ML Syntax.check_terms: "Proof.context -> term list -> term list"} \\
+  @{index_ML Syntax.check_props: "Proof.context -> term list -> term list"} \\
+  @{index_ML Syntax.uncheck_typs: "Proof.context -> typ list -> typ list"} \\
+  @{index_ML Syntax.uncheck_terms: "Proof.context -> term list -> term list"} \\
+  \end{mldecls}
+
+  %FIXME description
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/Tactic.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,939 @@
+theory Tactic
+imports Base
+begin
+
+chapter {* Tactical reasoning *}
+
+text {* Tactical reasoning works by refining an initial claim in a
+  backwards fashion, until a solved form is reached.  A @{text "goal"}
+  consists of several subgoals that need to be solved in order to
+  achieve the main statement; zero subgoals means that the proof may
+  be finished.  A @{text "tactic"} is a refinement operation that maps
+  a goal to a lazy sequence of potential successors.  A @{text
+  "tactical"} is a combinator for composing tactics.  *}
+
+
+section {* Goals \label{sec:tactical-goals} *}
+
+text {*
+  Isabelle/Pure represents a goal as a theorem stating that the
+  subgoals imply the main goal: @{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow>
+  C"}.  The outermost goal structure is that of a Horn Clause: i.e.\
+  an iterated implication without any quantifiers\footnote{Recall that
+  outermost @{text "\<And>x. \<phi>[x]"} is always represented via schematic
+  variables in the body: @{text "\<phi>[?x]"}.  These variables may get
+  instantiated during the course of reasoning.}.  For @{text "n = 0"}
+  a goal is called ``solved''.
+
+  The structure of each subgoal @{text "A\<^sub>i"} is that of a
+  general Hereditary Harrop Formula @{text "\<And>x\<^sub>1 \<dots>
+  \<And>x\<^sub>k. H\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> H\<^sub>m \<Longrightarrow> B"}.  Here @{text
+  "x\<^sub>1, \<dots>, x\<^sub>k"} are goal parameters, i.e.\
+  arbitrary-but-fixed entities of certain types, and @{text
+  "H\<^sub>1, \<dots>, H\<^sub>m"} are goal hypotheses, i.e.\ facts that may
+  be assumed locally.  Together, this forms the goal context of the
+  conclusion @{text B} to be established.  The goal hypotheses may be
+  again arbitrary Hereditary Harrop Formulas, although the level of
+  nesting rarely exceeds 1--2 in practice.
+
+  The main conclusion @{text C} is internally marked as a protected
+  proposition, which is represented explicitly by the notation @{text
+  "#C"} here.  This ensures that the decomposition into subgoals and
+  main conclusion is well-defined for arbitrarily structured claims.
+
+  \medskip Basic goal management is performed via the following
+  Isabelle/Pure rules:
+
+  \[
+  \infer[@{text "(init)"}]{@{text "C \<Longrightarrow> #C"}}{} \qquad
+  \infer[@{text "(finish)"}]{@{text "C"}}{@{text "#C"}}
+  \]
+
+  \medskip The following low-level variants admit general reasoning
+  with protected propositions:
+
+  \[
+  \infer[@{text "(protect n)"}]{@{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> #C"}}{@{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> C"}}
+  \]
+  \[
+  \infer[@{text "(conclude)"}]{@{text "A \<Longrightarrow> \<dots> \<Longrightarrow> C"}}{@{text "A \<Longrightarrow> \<dots> \<Longrightarrow> #C"}}
+  \]
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Goal.init: "cterm -> thm"} \\
+  @{index_ML Goal.finish: "Proof.context -> thm -> thm"} \\
+  @{index_ML Goal.protect: "int -> thm -> thm"} \\
+  @{index_ML Goal.conclude: "thm -> thm"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML "Goal.init"}~@{text C} initializes a tactical goal from
+  the well-formed proposition @{text C}.
+
+  \item @{ML "Goal.finish"}~@{text "ctxt thm"} checks whether theorem
+  @{text "thm"} is a solved goal (no subgoals), and concludes the
+  result by removing the goal protection.  The context is only
+  required for printing error messages.
+
+  \item @{ML "Goal.protect"}~@{text "n thm"} protects the statement
+  of theorem @{text "thm"}.  The parameter @{text n} indicates the
+  number of premises to be retained.
+
+  \item @{ML "Goal.conclude"}~@{text "thm"} removes the goal
+  protection, even if there are pending subgoals.
+
+  \end{description}
+*}
+
+
+section {* Tactics\label{sec:tactics} *}
+
+text {* A @{text "tactic"} is a function @{text "goal \<rightarrow> goal\<^sup>*\<^sup>*"} that
+  maps a given goal state (represented as a theorem, cf.\
+  \secref{sec:tactical-goals}) to a lazy sequence of potential
+  successor states.  The underlying sequence implementation is lazy
+  both in head and tail, and is purely functional in \emph{not}
+  supporting memoing.\footnote{The lack of memoing and the strict
+  nature of SML requires some care when working with low-level
+  sequence operations, to avoid duplicate or premature evaluation of
+  results.  It also means that modified runtime behavior, such as
+  timeout, is very hard to achieve for general tactics.}
+
+  An \emph{empty result sequence} means that the tactic has failed: in
+  a compound tactic expression other tactics might be tried instead,
+  or the whole refinement step might fail outright, producing a
+  toplevel error message in the end.  When implementing tactics from
+  scratch, one should take care to observe the basic protocol of
+  mapping regular error conditions to an empty result; only serious
+  faults should emerge as exceptions.
+
+  By enumerating \emph{multiple results}, a tactic can easily express
+  the potential outcome of an internal search process.  There are also
+  combinators for building proof tools that involve search
+  systematically, see also \secref{sec:tacticals}.
+
+  \medskip As explained before, a goal state essentially consists of a
+  list of subgoals that imply the main goal (conclusion).  Tactics may
+  operate on all subgoals or on a particularly specified subgoal, but
+  must not change the main conclusion (apart from instantiating
+  schematic goal variables).
+
+  Tactics with explicit \emph{subgoal addressing} are of the form
+  @{text "int \<rightarrow> tactic"} and may be applied to a particular subgoal
+  (counting from 1).  If the subgoal number is out of range, the
+  tactic should fail with an empty result sequence, but must not raise
+  an exception!
+
+  Operating on a particular subgoal means to replace it by an interval
+  of zero or more subgoals in the same place; other subgoals must not
+  be affected, apart from instantiating schematic variables ranging
+  over the whole goal state.
+
+  A common pattern of composing tactics with subgoal addressing is to
+  try the first one, and then the second one only if the subgoal has
+  not been solved yet.  Special care is required here to avoid bumping
+  into unrelated subgoals that happen to come after the original
+  subgoal.  Assuming that there is only a single initial subgoal is a
+  very common error when implementing tactics!
+
+  Tactics with internal subgoal addressing should expose the subgoal
+  index as @{text "int"} argument in full generality; a hardwired
+  subgoal 1 is not acceptable.
+  
+  \medskip The main well-formedness conditions for proper tactics are
+  summarized as follows.
+
+  \begin{itemize}
+
+  \item General tactic failure is indicated by an empty result, only
+  serious faults may produce an exception.
+
+  \item The main conclusion must not be changed, apart from
+  instantiating schematic variables.
+
+  \item A tactic operates either uniformly on all subgoals, or
+  specifically on a selected subgoal (without bumping into unrelated
+  subgoals).
+
+  \item Range errors in subgoal addressing produce an empty result.
+
+  \end{itemize}
+
+  Some of these conditions are checked by higher-level goal
+  infrastructure (\secref{sec:struct-goals}); others are not checked
+  explicitly, and violating them merely results in ill-behaved tactics
+  experienced by the user (e.g.\ tactics that insist in being
+  applicable only to singleton goals, or prevent composition via
+  standard tacticals such as @{ML REPEAT}).
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type tactic: "thm -> thm Seq.seq"} \\
+  @{index_ML no_tac: tactic} \\
+  @{index_ML all_tac: tactic} \\
+  @{index_ML print_tac: "string -> tactic"} \\[1ex]
+  @{index_ML PRIMITIVE: "(thm -> thm) -> tactic"} \\[1ex]
+  @{index_ML SUBGOAL: "(term * int -> tactic) -> int -> tactic"} \\
+  @{index_ML CSUBGOAL: "(cterm * int -> tactic) -> int -> tactic"} \\
+  @{index_ML SELECT_GOAL: "tactic -> int -> tactic"} \\
+  @{index_ML PREFER_GOAL: "tactic -> int -> tactic"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type tactic} represents tactics.  The
+  well-formedness conditions described above need to be observed.  See
+  also @{file "~~/src/Pure/General/seq.ML"} for the underlying
+  implementation of lazy sequences.
+
+  \item Type @{ML_type "int -> tactic"} represents tactics with
+  explicit subgoal addressing, with well-formedness conditions as
+  described above.
+
+  \item @{ML no_tac} is a tactic that always fails, returning the
+  empty sequence.
+
+  \item @{ML all_tac} is a tactic that always succeeds, returning a
+  singleton sequence with unchanged goal state.
+
+  \item @{ML print_tac}~@{text "message"} is like @{ML all_tac}, but
+  prints a message together with the goal state on the tracing
+  channel.
+
+  \item @{ML PRIMITIVE}~@{text rule} turns a primitive inference rule
+  into a tactic with unique result.  Exception @{ML THM} is considered
+  a regular tactic failure and produces an empty result; other
+  exceptions are passed through.
+
+  \item @{ML SUBGOAL}~@{text "(fn (subgoal, i) => tactic)"} is the
+  most basic form to produce a tactic with subgoal addressing.  The
+  given abstraction over the subgoal term and subgoal number allows to
+  peek at the relevant information of the full goal state.  The
+  subgoal range is checked as required above.
+
+  \item @{ML CSUBGOAL} is similar to @{ML SUBGOAL}, but passes the
+  subgoal as @{ML_type cterm} instead of raw @{ML_type term}.  This
+  avoids expensive re-certification in situations where the subgoal is
+  used directly for primitive inferences.
+
+  \item @{ML SELECT_GOAL}~@{text "tac i"} confines a tactic to the
+  specified subgoal @{text "i"}.  This rearranges subgoals and the
+  main goal protection (\secref{sec:tactical-goals}), while retaining
+  the syntactic context of the overall goal state (concerning
+  schematic variables etc.).
+
+  \item @{ML PREFER_GOAL}~@{text "tac i"} rearranges subgoals to put
+  @{text "i"} in front.  This is similar to @{ML SELECT_GOAL}, but
+  without changing the main goal protection.
+
+  \end{description}
+*}
+
+
+subsection {* Resolution and assumption tactics \label{sec:resolve-assume-tac} *}
+
+text {* \emph{Resolution} is the most basic mechanism for refining a
+  subgoal using a theorem as object-level rule.
+  \emph{Elim-resolution} is particularly suited for elimination rules:
+  it resolves with a rule, proves its first premise by assumption, and
+  finally deletes that assumption from any new subgoals.
+  \emph{Destruct-resolution} is like elim-resolution, but the given
+  destruction rules are first turned into canonical elimination
+  format.  \emph{Forward-resolution} is like destruct-resolution, but
+  without deleting the selected assumption.  The @{text "r/e/d/f"}
+  naming convention is maintained for several different kinds of
+  resolution rules and tactics.
+
+  Assumption tactics close a subgoal by unifying some of its premises
+  against its conclusion.
+
+  \medskip All the tactics in this section operate on a subgoal
+  designated by a positive integer.  Other subgoals might be affected
+  indirectly, due to instantiation of schematic variables.
+
+  There are various sources of non-determinism, the tactic result
+  sequence enumerates all possibilities of the following choices (if
+  applicable):
+
+  \begin{enumerate}
+
+  \item selecting one of the rules given as argument to the tactic;
+
+  \item selecting a subgoal premise to eliminate, unifying it against
+  the first premise of the rule;
+
+  \item unifying the conclusion of the subgoal to the conclusion of
+  the rule.
+
+  \end{enumerate}
+
+  Recall that higher-order unification may produce multiple results
+  that are enumerated here.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML resolve_tac: "thm list -> int -> tactic"} \\
+  @{index_ML eresolve_tac: "thm list -> int -> tactic"} \\
+  @{index_ML dresolve_tac: "thm list -> int -> tactic"} \\
+  @{index_ML forward_tac: "thm list -> int -> tactic"} \\
+  @{index_ML biresolve_tac: "(bool * thm) list -> int -> tactic"} \\[1ex]
+  @{index_ML assume_tac: "int -> tactic"} \\
+  @{index_ML eq_assume_tac: "int -> tactic"} \\[1ex]
+  @{index_ML match_tac: "thm list -> int -> tactic"} \\
+  @{index_ML ematch_tac: "thm list -> int -> tactic"} \\
+  @{index_ML dmatch_tac: "thm list -> int -> tactic"} \\
+  @{index_ML bimatch_tac: "(bool * thm) list -> int -> tactic"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML resolve_tac}~@{text "thms i"} refines the goal state
+  using the given theorems, which should normally be introduction
+  rules.  The tactic resolves a rule's conclusion with subgoal @{text
+  i}, replacing it by the corresponding versions of the rule's
+  premises.
+
+  \item @{ML eresolve_tac}~@{text "thms i"} performs elim-resolution
+  with the given theorems, which are normally be elimination rules.
+
+  Note that @{ML "eresolve_tac [asm_rl]"} is equivalent to @{ML
+  assume_tac}, which facilitates mixing of assumption steps with
+  genuine eliminations.
+
+  \item @{ML dresolve_tac}~@{text "thms i"} performs
+  destruct-resolution with the given theorems, which should normally
+  be destruction rules.  This replaces an assumption by the result of
+  applying one of the rules.
+
+  \item @{ML forward_tac} is like @{ML dresolve_tac} except that the
+  selected assumption is not deleted.  It applies a rule to an
+  assumption, adding the result as a new assumption.
+
+  \item @{ML biresolve_tac}~@{text "brls i"} refines the proof state
+  by resolution or elim-resolution on each rule, as indicated by its
+  flag.  It affects subgoal @{text "i"} of the proof state.
+
+  For each pair @{text "(flag, rule)"}, it applies resolution if the
+  flag is @{text "false"} and elim-resolution if the flag is @{text
+  "true"}.  A single tactic call handles a mixture of introduction and
+  elimination rules, which is useful to organize the search process
+  systematically in proof tools.
+
+  \item @{ML assume_tac}~@{text i} attempts to solve subgoal @{text i}
+  by assumption (modulo higher-order unification).
+
+  \item @{ML eq_assume_tac} is similar to @{ML assume_tac}, but checks
+  only for immediate @{text "\<alpha>"}-convertibility instead of using
+  unification.  It succeeds (with a unique next state) if one of the
+  assumptions is equal to the subgoal's conclusion.  Since it does not
+  instantiate variables, it cannot make other subgoals unprovable.
+
+  \item @{ML match_tac}, @{ML ematch_tac}, @{ML dmatch_tac}, and @{ML
+  bimatch_tac} are similar to @{ML resolve_tac}, @{ML eresolve_tac},
+  @{ML dresolve_tac}, and @{ML biresolve_tac}, respectively, but do
+  not instantiate schematic variables in the goal state.%
+\footnote{Strictly speaking, matching means to treat the unknowns in the goal
+  state as constants, but these tactics merely discard unifiers that would
+  update the goal state. In rare situations (where the conclusion and 
+  goal state have flexible terms at the same position), the tactic
+  will fail even though an acceptable unifier exists.}
+  These tactics were written for a specific application within the classical reasoner.
+
+  Flexible subgoals are not updated at will, but are left alone.
+  \end{description}
+*}
+
+
+subsection {* Explicit instantiation within a subgoal context *}
+
+text {* The main resolution tactics (\secref{sec:resolve-assume-tac})
+  use higher-order unification, which works well in many practical
+  situations despite its daunting theoretical properties.
+  Nonetheless, there are important problem classes where unguided
+  higher-order unification is not so useful.  This typically involves
+  rules like universal elimination, existential introduction, or
+  equational substitution.  Here the unification problem involves
+  fully flexible @{text "?P ?x"} schemes, which are hard to manage
+  without further hints.
+
+  By providing a (small) rigid term for @{text "?x"} explicitly, the
+  remaining unification problem is to assign a (large) term to @{text
+  "?P"}, according to the shape of the given subgoal.  This is
+  sufficiently well-behaved in most practical situations.
+
+  \medskip Isabelle provides separate versions of the standard @{text
+  "r/e/d/f"} resolution tactics that allow to provide explicit
+  instantiations of unknowns of the given rule, wrt.\ terms that refer
+  to the implicit context of the selected subgoal.
+
+  An instantiation consists of a list of pairs of the form @{text
+  "(?x, t)"}, where @{text ?x} is a schematic variable occurring in
+  the given rule, and @{text t} is a term from the current proof
+  context, augmented by the local goal parameters of the selected
+  subgoal; cf.\ the @{text "focus"} operation described in
+  \secref{sec:variables}.
+
+  Entering the syntactic context of a subgoal is a brittle operation,
+  because its exact form is somewhat accidental, and the choice of
+  bound variable names depends on the presence of other local and
+  global names.  Explicit renaming of subgoal parameters prior to
+  explicit instantiation might help to achieve a bit more robustness.
+
+  Type instantiations may be given as well, via pairs like @{text
+  "(?'a, \<tau>)"}.  Type instantiations are distinguished from term
+  instantiations by the syntactic form of the schematic variable.
+  Types are instantiated before terms are.  Since term instantiation
+  already performs simple type-inference, so explicit type
+  instantiations are seldom necessary.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML res_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
+  @{index_ML eres_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
+  @{index_ML dres_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
+  @{index_ML forw_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
+  @{index_ML subgoal_tac: "Proof.context -> string -> int -> tactic"} \\
+  @{index_ML thin_tac: "Proof.context -> string -> int -> tactic"} \\
+  @{index_ML rename_tac: "string list -> int -> tactic"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML res_inst_tac}~@{text "ctxt insts thm i"} instantiates the
+  rule @{text thm} with the instantiations @{text insts}, as described
+  above, and then performs resolution on subgoal @{text i}.
+  
+  \item @{ML eres_inst_tac} is like @{ML res_inst_tac}, but performs
+  elim-resolution.
+
+  \item @{ML dres_inst_tac} is like @{ML res_inst_tac}, but performs
+  destruct-resolution.
+
+  \item @{ML forw_inst_tac} is like @{ML dres_inst_tac} except that
+  the selected assumption is not deleted.
+
+  \item @{ML subgoal_tac}~@{text "ctxt \<phi> i"} adds the proposition
+  @{text "\<phi>"} as local premise to subgoal @{text "i"}, and poses the
+  same as a new subgoal @{text "i + 1"} (in the original context).
+
+  \item @{ML thin_tac}~@{text "ctxt \<phi> i"} deletes the specified
+  premise from subgoal @{text i}.  Note that @{text \<phi>} may contain
+  schematic variables, to abbreviate the intended proposition; the
+  first matching subgoal premise will be deleted.  Removing useless
+  premises from a subgoal increases its readability and can make
+  search tactics run faster.
+
+  \item @{ML rename_tac}~@{text "names i"} renames the innermost
+  parameters of subgoal @{text i} according to the provided @{text
+  names} (which need to be distinct indentifiers).
+
+  \end{description}
+
+  For historical reasons, the above instantiation tactics take
+  unparsed string arguments, which makes them hard to use in general
+  ML code.  The slightly more advanced @{ML Subgoal.FOCUS} combinator
+  of \secref{sec:struct-goals} allows to refer to internal goal
+  structure with explicit context management.
+*}
+
+
+subsection {* Rearranging goal states *}
+
+text {* In rare situations there is a need to rearrange goal states:
+  either the overall collection of subgoals, or the local structure of
+  a subgoal.  Various administrative tactics allow to operate on the
+  concrete presentation these conceptual sets of formulae. *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML rotate_tac: "int -> int -> tactic"} \\
+  @{index_ML distinct_subgoals_tac: tactic} \\
+  @{index_ML flexflex_tac: tactic} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML rotate_tac}~@{text "n i"} rotates the premises of subgoal
+  @{text i} by @{text n} positions: from right to left if @{text n} is
+  positive, and from left to right if @{text n} is negative.
+
+  \item @{ML distinct_subgoals_tac} removes duplicate subgoals from a
+  proof state.  This is potentially inefficient.
+
+  \item @{ML flexflex_tac} removes all flex-flex pairs from the proof
+  state by applying the trivial unifier.  This drastic step loses
+  information.  It is already part of the Isar infrastructure for
+  facts resulting from goals, and rarely needs to be invoked manually.
+
+  Flex-flex constraints arise from difficult cases of higher-order
+  unification.  To prevent this, use @{ML res_inst_tac} to instantiate
+  some variables in a rule.  Normally flex-flex constraints can be
+  ignored; they often disappear as unknowns get instantiated.
+
+  \end{description}
+*}
+
+
+subsection {* Raw composition: resolution without lifting *}
+
+text {*
+  Raw composition of two rules means resolving them without prior
+  lifting or renaming of unknowns.  This low-level operation, which
+  underlies the resolution tactics, may occasionally be useful for
+  special effects.  Schematic variables are not renamed by default, so
+  beware of clashes!
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML compose_tac: "(bool * thm * int) -> int -> tactic"} \\
+  @{index_ML Drule.compose: "thm * int * thm -> thm"} \\
+  @{index_ML_op COMP: "thm * thm -> thm"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML compose_tac}~@{text "(flag, rule, m) i"} refines subgoal
+  @{text "i"} using @{text "rule"}, without lifting.  The @{text
+  "rule"} is taken to have the form @{text "\<psi>\<^sub>1 \<Longrightarrow> \<dots> \<psi>\<^sub>m \<Longrightarrow> \<psi>"}, where
+  @{text "\<psi>"} need not be atomic; thus @{text "m"} determines the
+  number of new subgoals.  If @{text "flag"} is @{text "true"} then it
+  performs elim-resolution --- it solves the first premise of @{text
+  "rule"} by assumption and deletes that assumption.
+
+  \item @{ML Drule.compose}~@{text "(thm\<^sub>1, i, thm\<^sub>2)"} uses @{text "thm\<^sub>1"},
+  regarded as an atomic formula, to solve premise @{text "i"} of
+  @{text "thm\<^sub>2"}.  Let @{text "thm\<^sub>1"} and @{text "thm\<^sub>2"} be @{text
+  "\<psi>"} and @{text "\<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> \<phi>"}.  The unique @{text "s"} that
+  unifies @{text "\<psi>"} and @{text "\<phi>\<^sub>i"} yields the theorem @{text "(\<phi>\<^sub>1 \<Longrightarrow>
+  \<dots> \<phi>\<^sub>i\<^sub>-\<^sub>1 \<Longrightarrow> \<phi>\<^sub>i\<^sub>+\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> \<phi>)s"}.  Multiple results are considered as
+  error (exception @{ML THM}).
+
+  \item @{text "thm\<^sub>1 COMP thm\<^sub>2"} is the same as @{text "Drule.compose
+  (thm\<^sub>1, 1, thm\<^sub>2)"}.
+
+  \end{description}
+
+  \begin{warn}
+  These low-level operations are stepping outside the structure
+  imposed by regular rule resolution.  Used without understanding of
+  the consequences, they may produce results that cause problems with
+  standard rules and tactics later on.
+  \end{warn}
+*}
+
+
+section {* Tacticals \label{sec:tacticals} *}
+
+text {* A \emph{tactical} is a functional combinator for building up
+  complex tactics from simpler ones.  Common tacticals perform
+  sequential composition, disjunctive choice, iteration, or goal
+  addressing.  Various search strategies may be expressed via
+  tacticals.
+*}
+
+
+subsection {* Combining tactics *}
+
+text {* Sequential composition and alternative choices are the most
+  basic ways to combine tactics, similarly to ``@{verbatim ","}'' and
+  ``@{verbatim "|"}'' in Isar method notation.  This corresponds to
+  @{ML_op "THEN"} and @{ML_op "ORELSE"} in ML, but there are further
+  possibilities for fine-tuning alternation of tactics such as @{ML_op
+  "APPEND"}.  Further details become visible in ML due to explicit
+  subgoal addressing.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_op "THEN": "tactic * tactic -> tactic"} \\
+  @{index_ML_op "ORELSE": "tactic * tactic -> tactic"} \\
+  @{index_ML_op "APPEND": "tactic * tactic -> tactic"} \\
+  @{index_ML "EVERY": "tactic list -> tactic"} \\
+  @{index_ML "FIRST": "tactic list -> tactic"} \\[0.5ex]
+
+  @{index_ML_op "THEN'": "('a -> tactic) * ('a -> tactic) -> 'a -> tactic"} \\
+  @{index_ML_op "ORELSE'": "('a -> tactic) * ('a -> tactic) -> 'a -> tactic"} \\
+  @{index_ML_op "APPEND'": "('a -> tactic) * ('a -> tactic) -> 'a -> tactic"} \\
+  @{index_ML "EVERY'": "('a -> tactic) list -> 'a -> tactic"} \\
+  @{index_ML "FIRST'": "('a -> tactic) list -> 'a -> tactic"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{text "tac\<^sub>1"}~@{ML_op THEN}~@{text "tac\<^sub>2"} is the sequential
+  composition of @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"}.  Applied to a goal
+  state, it returns all states reachable in two steps by applying
+  @{text "tac\<^sub>1"} followed by @{text "tac\<^sub>2"}.  First, it applies @{text
+  "tac\<^sub>1"} to the goal state, getting a sequence of possible next
+  states; then, it applies @{text "tac\<^sub>2"} to each of these and
+  concatenates the results to produce again one flat sequence of
+  states.
+
+  \item @{text "tac\<^sub>1"}~@{ML_op ORELSE}~@{text "tac\<^sub>2"} makes a choice
+  between @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"}.  Applied to a state, it
+  tries @{text "tac\<^sub>1"} and returns the result if non-empty; if @{text
+  "tac\<^sub>1"} fails then it uses @{text "tac\<^sub>2"}.  This is a deterministic
+  choice: if @{text "tac\<^sub>1"} succeeds then @{text "tac\<^sub>2"} is excluded
+  from the result.
+
+  \item @{text "tac\<^sub>1"}~@{ML_op APPEND}~@{text "tac\<^sub>2"} concatenates the
+  possible results of @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"}.  Unlike
+  @{ML_op "ORELSE"} there is \emph{no commitment} to either tactic, so
+  @{ML_op "APPEND"} helps to avoid incompleteness during search, at
+  the cost of potential inefficiencies.
+
+  \item @{ML EVERY}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>n]"} abbreviates @{text
+  "tac\<^sub>1"}~@{ML_op THEN}~@{text "\<dots>"}~@{ML_op THEN}~@{text "tac\<^sub>n"}.
+  Note that @{ML "EVERY []"} is the same as @{ML all_tac}: it always
+  succeeds.
+
+  \item @{ML FIRST}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>n]"} abbreviates @{text
+  "tac\<^sub>1"}~@{ML_op ORELSE}~@{text "\<dots>"}~@{ML_op "ORELSE"}~@{text
+  "tac\<^sub>n"}.  Note that @{ML "FIRST []"} is the same as @{ML no_tac}: it
+  always fails.
+
+  \item @{ML_op "THEN'"} is the lifted version of @{ML_op "THEN"}, for
+  tactics with explicit subgoal addressing.  So @{text
+  "(tac\<^sub>1"}~@{ML_op THEN'}~@{text "tac\<^sub>2) i"} is the same as @{text
+  "(tac\<^sub>1 i"}~@{ML_op THEN}~@{text "tac\<^sub>2 i)"}.
+
+  The other primed tacticals work analogously.
+
+  \end{description}
+*}
+
+
+subsection {* Repetition tacticals *}
+
+text {* These tacticals provide further control over repetition of
+  tactics, beyond the stylized forms of ``@{verbatim "?"}''  and
+  ``@{verbatim "+"}'' in Isar method expressions. *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML "TRY": "tactic -> tactic"} \\
+  @{index_ML "REPEAT": "tactic -> tactic"} \\
+  @{index_ML "REPEAT1": "tactic -> tactic"} \\
+  @{index_ML "REPEAT_DETERM": "tactic -> tactic"} \\
+  @{index_ML "REPEAT_DETERM_N": "int -> tactic -> tactic"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML TRY}~@{text "tac"} applies @{text "tac"} to the goal
+  state and returns the resulting sequence, if non-empty; otherwise it
+  returns the original state.  Thus, it applies @{text "tac"} at most
+  once.
+
+  Note that for tactics with subgoal addressing, the combinator can be
+  applied via functional composition: @{ML "TRY"}~@{ML_op o}~@{text
+  "tac"}.  There is no need for @{verbatim TRY'}.
+
+  \item @{ML REPEAT}~@{text "tac"} applies @{text "tac"} to the goal
+  state and, recursively, to each element of the resulting sequence.
+  The resulting sequence consists of those states that make @{text
+  "tac"} fail.  Thus, it applies @{text "tac"} as many times as
+  possible (including zero times), and allows backtracking over each
+  invocation of @{text "tac"}.  @{ML REPEAT} is more general than @{ML
+  REPEAT_DETERM}, but requires more space.
+
+  \item @{ML REPEAT1}~@{text "tac"} is like @{ML REPEAT}~@{text "tac"}
+  but it always applies @{text "tac"} at least once, failing if this
+  is impossible.
+
+  \item @{ML REPEAT_DETERM}~@{text "tac"} applies @{text "tac"} to the
+  goal state and, recursively, to the head of the resulting sequence.
+  It returns the first state to make @{text "tac"} fail.  It is
+  deterministic, discarding alternative outcomes.
+
+  \item @{ML REPEAT_DETERM_N}~@{text "n tac"} is like @{ML
+  REPEAT_DETERM}~@{text "tac"} but the number of repetitions is bound
+  by @{text "n"} (where @{ML "~1"} means @{text "\<infinity>"}).
+
+  \end{description}
+*}
+
+text %mlex {* The basic tactics and tacticals considered above follow
+  some algebraic laws:
+
+  \begin{itemize}
+
+  \item @{ML all_tac} is the identity element of the tactical @{ML_op
+  "THEN"}.
+
+  \item @{ML no_tac} is the identity element of @{ML_op "ORELSE"} and
+  @{ML_op "APPEND"}.  Also, it is a zero element for @{ML_op "THEN"},
+  which means that @{text "tac"}~@{ML_op THEN}~@{ML no_tac} is
+  equivalent to @{ML no_tac}.
+
+  \item @{ML TRY} and @{ML REPEAT} can be expressed as (recursive)
+  functions over more basic combinators (ignoring some internal
+  implementation tricks):
+
+  \end{itemize}
+*}
+
+ML {*
+  fun TRY tac = tac ORELSE all_tac;
+  fun REPEAT tac st = ((tac THEN REPEAT tac) ORELSE all_tac) st;
+*}
+
+text {* If @{text "tac"} can return multiple outcomes then so can @{ML
+  REPEAT}~@{text "tac"}.  @{ML REPEAT} uses @{ML_op "ORELSE"} and not
+  @{ML_op "APPEND"}, it applies @{text "tac"} as many times as
+  possible in each outcome.
+
+  \begin{warn}
+  Note the explicit abstraction over the goal state in the ML
+  definition of @{ML REPEAT}.  Recursive tacticals must be coded in
+  this awkward fashion to avoid infinite recursion of eager functional
+  evaluation in Standard ML.  The following attempt would make @{ML
+  REPEAT}~@{text "tac"} loop:
+  \end{warn}
+*}
+
+ML {*
+  (*BAD -- does not terminate!*)
+  fun REPEAT tac = (tac THEN REPEAT tac) ORELSE all_tac;
+*}
+
+
+subsection {* Applying tactics to subgoal ranges *}
+
+text {* Tactics with explicit subgoal addressing
+  @{ML_type "int -> tactic"} can be used together with tacticals that
+  act like ``subgoal quantifiers'': guided by success of the body
+  tactic a certain range of subgoals is covered.  Thus the body tactic
+  is applied to \emph{all} subgoals, \emph{some} subgoal etc.
+
+  Suppose that the goal state has @{text "n \<ge> 0"} subgoals.  Many of
+  these tacticals address subgoal ranges counting downwards from
+  @{text "n"} towards @{text "1"}.  This has the fortunate effect that
+  newly emerging subgoals are concatenated in the result, without
+  interfering each other.  Nonetheless, there might be situations
+  where a different order is desired. *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML ALLGOALS: "(int -> tactic) -> tactic"} \\
+  @{index_ML SOMEGOAL: "(int -> tactic) -> tactic"} \\
+  @{index_ML FIRSTGOAL: "(int -> tactic) -> tactic"} \\
+  @{index_ML HEADGOAL: "(int -> tactic) -> tactic"} \\
+  @{index_ML REPEAT_SOME: "(int -> tactic) -> tactic"} \\
+  @{index_ML REPEAT_FIRST: "(int -> tactic) -> tactic"} \\
+  @{index_ML RANGE: "(int -> tactic) list -> int -> tactic"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML ALLGOALS}~@{text "tac"} is equivalent to @{text "tac
+  n"}~@{ML_op THEN}~@{text "\<dots>"}~@{ML_op THEN}~@{text "tac 1"}.  It
+  applies the @{text tac} to all the subgoals, counting downwards.
+
+  \item @{ML SOMEGOAL}~@{text "tac"} is equivalent to @{text "tac
+  n"}~@{ML_op ORELSE}~@{text "\<dots>"}~@{ML_op ORELSE}~@{text "tac 1"}.  It
+  applies @{text "tac"} to one subgoal, counting downwards.
+
+  \item @{ML FIRSTGOAL}~@{text "tac"} is equivalent to @{text "tac
+  1"}~@{ML_op ORELSE}~@{text "\<dots>"}~@{ML_op ORELSE}~@{text "tac n"}.  It
+  applies @{text "tac"} to one subgoal, counting upwards.
+
+  \item @{ML HEADGOAL}~@{text "tac"} is equivalent to @{text "tac 1"}.
+  It applies @{text "tac"} unconditionally to the first subgoal.
+
+  \item @{ML REPEAT_SOME}~@{text "tac"} applies @{text "tac"} once or
+  more to a subgoal, counting downwards.
+
+  \item @{ML REPEAT_FIRST}~@{text "tac"} applies @{text "tac"} once or
+  more to a subgoal, counting upwards.
+
+  \item @{ML RANGE}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>k] i"} is equivalent to
+  @{text "tac\<^sub>k (i + k - 1)"}~@{ML_op THEN}~@{text "\<dots>"}~@{ML_op
+  THEN}~@{text "tac\<^sub>1 i"}.  It applies the given list of tactics to the
+  corresponding range of subgoals, counting downwards.
+
+  \end{description}
+*}
+
+
+subsection {* Control and search tacticals *}
+
+text {* A predicate on theorems @{ML_type "thm -> bool"} can test
+  whether a goal state enjoys some desirable property --- such as
+  having no subgoals.  Tactics that search for satisfactory goal
+  states are easy to express.  The main search procedures,
+  depth-first, breadth-first and best-first, are provided as
+  tacticals.  They generate the search tree by repeatedly applying a
+  given tactic.  *}
+
+
+text %mlref ""
+
+subsubsection {* Filtering a tactic's results *}
+
+text {*
+  \begin{mldecls}
+  @{index_ML FILTER: "(thm -> bool) -> tactic -> tactic"} \\
+  @{index_ML CHANGED: "tactic -> tactic"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML FILTER}~@{text "sat tac"} applies @{text "tac"} to the
+  goal state and returns a sequence consisting of those result goal
+  states that are satisfactory in the sense of @{text "sat"}.
+
+  \item @{ML CHANGED}~@{text "tac"} applies @{text "tac"} to the goal
+  state and returns precisely those states that differ from the
+  original state (according to @{ML Thm.eq_thm}).  Thus @{ML
+  CHANGED}~@{text "tac"} always has some effect on the state.
+
+  \end{description}
+*}
+
+
+subsubsection {* Depth-first search *}
+
+text {*
+  \begin{mldecls}
+  @{index_ML DEPTH_FIRST: "(thm -> bool) -> tactic -> tactic"} \\
+  @{index_ML DEPTH_SOLVE: "tactic -> tactic"} \\
+  @{index_ML DEPTH_SOLVE_1: "tactic -> tactic"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML DEPTH_FIRST}~@{text "sat tac"} returns the goal state if
+  @{text "sat"} returns true.  Otherwise it applies @{text "tac"},
+  then recursively searches from each element of the resulting
+  sequence.  The code uses a stack for efficiency, in effect applying
+  @{text "tac"}~@{ML_op THEN}~@{ML DEPTH_FIRST}~@{text "sat tac"} to
+  the state.
+
+  \item @{ML DEPTH_SOLVE}@{text "tac"} uses @{ML DEPTH_FIRST} to
+  search for states having no subgoals.
+
+  \item @{ML DEPTH_SOLVE_1}~@{text "tac"} uses @{ML DEPTH_FIRST} to
+  search for states having fewer subgoals than the given state.  Thus,
+  it insists upon solving at least one subgoal.
+
+  \end{description}
+*}
+
+
+subsubsection {* Other search strategies *}
+
+text {*
+  \begin{mldecls}
+  @{index_ML BREADTH_FIRST: "(thm -> bool) -> tactic -> tactic"} \\
+  @{index_ML BEST_FIRST: "(thm -> bool) * (thm -> int) -> tactic -> tactic"} \\
+  @{index_ML THEN_BEST_FIRST: "tactic -> (thm -> bool) * (thm -> int) -> tactic -> tactic"} \\
+  \end{mldecls}
+
+  These search strategies will find a solution if one exists.
+  However, they do not enumerate all solutions; they terminate after
+  the first satisfactory result from @{text "tac"}.
+
+  \begin{description}
+
+  \item @{ML BREADTH_FIRST}~@{text "sat tac"} uses breadth-first
+  search to find states for which @{text "sat"} is true.  For most
+  applications, it is too slow.
+
+  \item @{ML BEST_FIRST}~@{text "(sat, dist) tac"} does a heuristic
+  search, using @{text "dist"} to estimate the distance from a
+  satisfactory state (in the sense of @{text "sat"}).  It maintains a
+  list of states ordered by distance.  It applies @{text "tac"} to the
+  head of this list; if the result contains any satisfactory states,
+  then it returns them.  Otherwise, @{ML BEST_FIRST} adds the new
+  states to the list, and continues.
+
+  The distance function is typically @{ML size_of_thm}, which computes
+  the size of the state.  The smaller the state, the fewer and simpler
+  subgoals it has.
+
+  \item @{ML THEN_BEST_FIRST}~@{text "tac\<^sub>0 (sat, dist) tac"} is like
+  @{ML BEST_FIRST}, except that the priority queue initially contains
+  the result of applying @{text "tac\<^sub>0"} to the goal state.  This
+  tactical permits separate tactics for starting the search and
+  continuing the search.
+
+  \end{description}
+*}
+
+
+subsubsection {* Auxiliary tacticals for searching *}
+
+text {*
+  \begin{mldecls}
+  @{index_ML COND: "(thm -> bool) -> tactic -> tactic -> tactic"} \\
+  @{index_ML IF_UNSOLVED: "tactic -> tactic"} \\
+  @{index_ML SOLVE: "tactic -> tactic"} \\
+  @{index_ML DETERM: "tactic -> tactic"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML COND}~@{text "sat tac\<^sub>1 tac\<^sub>2"} applies @{text "tac\<^sub>1"} to
+  the goal state if it satisfies predicate @{text "sat"}, and applies
+  @{text "tac\<^sub>2"}.  It is a conditional tactical in that only one of
+  @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"} is applied to a goal state.
+  However, both @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"} are evaluated
+  because ML uses eager evaluation.
+
+  \item @{ML IF_UNSOLVED}~@{text "tac"} applies @{text "tac"} to the
+  goal state if it has any subgoals, and simply returns the goal state
+  otherwise.  Many common tactics, such as @{ML resolve_tac}, fail if
+  applied to a goal state that has no subgoals.
+
+  \item @{ML SOLVE}~@{text "tac"} applies @{text "tac"} to the goal
+  state and then fails iff there are subgoals left.
+
+  \item @{ML DETERM}~@{text "tac"} applies @{text "tac"} to the goal
+  state and returns the head of the resulting sequence.  @{ML DETERM}
+  limits the search space by making its argument deterministic.
+
+  \end{description}
+*}
+
+
+subsubsection {* Predicates and functions useful for searching *}
+
+text {*
+  \begin{mldecls}
+  @{index_ML has_fewer_prems: "int -> thm -> bool"} \\
+  @{index_ML Thm.eq_thm: "thm * thm -> bool"} \\
+  @{index_ML Thm.eq_thm_prop: "thm * thm -> bool"} \\
+  @{index_ML size_of_thm: "thm -> int"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML has_fewer_prems}~@{text "n thm"} reports whether @{text
+  "thm"} has fewer than @{text "n"} premises.
+
+  \item @{ML Thm.eq_thm}~@{text "(thm\<^sub>1, thm\<^sub>2)"} reports whether @{text
+  "thm\<^sub>1"} and @{text "thm\<^sub>2"} are equal.  Both theorems must have the
+  same conclusions, the same set of hypotheses, and the same set of sort
+  hypotheses.  Names of bound variables are ignored as usual.
+
+  \item @{ML Thm.eq_thm_prop}~@{text "(thm\<^sub>1, thm\<^sub>2)"} reports whether
+  the propositions of @{text "thm\<^sub>1"} and @{text "thm\<^sub>2"} are equal.
+  Names of bound variables are ignored.
+
+  \item @{ML size_of_thm}~@{text "thm"} computes the size of @{text
+  "thm"}, namely the number of variables, constants and abstractions
+  in its conclusion.  It may serve as a distance function for
+  @{ML BEST_FIRST}.
+
+  \end{description}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/document/build	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -e
+
+FORMAT="$1"
+VARIANT="$2"
+
+"$ISABELLE_TOOL" logo Isar
+
+cp "$ISABELLE_HOME/src/Doc/iman.sty" .
+cp "$ISABELLE_HOME/src/Doc/extra.sty" .
+cp "$ISABELLE_HOME/src/Doc/isar.sty" .
+cp "$ISABELLE_HOME/src/Doc/ttbox.sty" .
+cp "$ISABELLE_HOME/src/Doc/underscore.sty" .
+cp "$ISABELLE_HOME/src/Doc/manual.bib" .
+
+"$ISABELLE_HOME/src/Doc/prepare_document" "$FORMAT"
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/document/root.tex	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,125 @@
+\documentclass[12pt,a4paper,fleqn]{report}
+\usepackage[T1]{fontenc}
+\usepackage{latexsym,graphicx}
+\usepackage[refpage]{nomencl}
+\usepackage{iman,extra,isar,proof}
+\usepackage[nohyphen,strings]{underscore}
+\usepackage{isabelle}
+\usepackage{isabellesym}
+\usepackage{railsetup}
+\usepackage{ttbox}
+\usepackage{supertabular}
+\usepackage{style}
+\usepackage{pdfsetup}
+
+
+\hyphenation{Isabelle}
+\hyphenation{Isar}
+
+\isadroptag{theory}
+\title{\includegraphics[scale=0.5]{isabelle_isar}
+  \\[4ex] The Isabelle/Isar Implementation}
+\author{\emph{Makarius Wenzel}  \\[3ex]
+  With Contributions by
+  Stefan Berghofer, \\
+  Florian Haftmann
+  and Larry Paulson
+}
+
+\makeindex
+
+
+\begin{document}
+
+\maketitle
+
+\begin{abstract}
+  We describe the key concepts underlying the Isabelle/Isar
+  implementation, including ML references for the most important
+  functions.  The aim is to give some insight into the overall system
+  architecture, and provide clues on implementing applications within
+  this framework.
+\end{abstract}
+
+\vspace*{2.5cm}
+\begin{quote}
+
+  {\small\em Isabelle was not designed; it evolved.  Not everyone
+    likes this idea.  Specification experts rightly abhor
+    trial-and-error programming.  They suggest that no one should
+    write a program without first writing a complete formal
+    specification. But university departments are not software houses.
+    Programs like Isabelle are not products: when they have served
+    their purpose, they are discarded.}
+
+  Lawrence C. Paulson, ``Isabelle: The Next 700 Theorem Provers''
+
+  \vspace*{1cm}
+
+  {\small\em As I did 20 years ago, I still fervently believe that the
+    only way to make software secure, reliable, and fast is to make it
+    small.  Fight features.}
+
+  Andrew S. Tanenbaum
+
+  \vspace*{1cm}
+
+  {\small\em One thing that UNIX does not need is more features. It is
+    successful in part because it has a small number of good ideas
+    that work well together. Merely adding features does not make it
+    easier for users to do things --- it just makes the manual
+    thicker. The right solution in the right place is always more
+    effective than haphazard hacking.}
+
+  Rob Pike and Brian W. Kernighan
+
+  \vspace*{1cm}
+
+  {\small\em If you look at software today, through the lens of the
+    history of engineering, it's certainly engineering of a sort--but
+    it's the kind of engineering that people without the concept of
+    the arch did. Most software today is very much like an Egyptian
+    pyramid with millions of bricks piled on top of each other, with
+    no structural integrity, but just done by brute force and
+    thousands of slaves.}
+
+  Alan Kay
+
+\end{quote}
+
+\thispagestyle{empty}\clearpage
+
+\pagenumbering{roman}
+\tableofcontents
+\listoffigures
+\clearfirst
+
+\setcounter{chapter}{-1}
+
+\input{ML.tex}
+\input{Prelim.tex}
+\input{Logic.tex}
+\input{Syntax.tex}
+\input{Tactic.tex}
+\input{Eq.tex}
+\input{Proof.tex}
+\input{Isar.tex}
+\input{Local_Theory.tex}
+\input{Integration.tex}
+
+\begingroup
+\tocentry{\bibname}
+\bibliographystyle{abbrv} \small\raggedright\frenchspacing
+\bibliography{manual}
+\endgroup
+
+\tocentry{\indexname}
+\printindex
+
+\end{document}
+
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: t
+%%% End:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/document/style.sty	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,68 @@
+%% toc
+\newcommand{\tocentry}[1]{\cleardoublepage\phantomsection\addcontentsline{toc}{chapter}{#1}
+\@mkboth{\MakeUppercase{#1}}{\MakeUppercase{#1}}}
+
+%% references
+\newcommand{\secref}[1]{\S\ref{#1}}
+\newcommand{\chref}[1]{chapter~\ref{#1}}
+\newcommand{\figref}[1]{figure~\ref{#1}}
+
+%% math
+\newcommand{\text}[1]{\mbox{#1}}
+\newcommand{\isasymvartheta}{\isamath{\theta}}
+\newcommand{\isactrlvec}[1]{\emph{$\vec{#1}$}}
+\newcommand{\isactrlBG}{\isacharbackquoteopen}
+\newcommand{\isactrlEN}{\isacharbackquoteclose}
+
+\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}
+
+\pagestyle{headings}
+\sloppy
+\binperiod
+
+\parindent 0pt\parskip 0.5ex
+
+\renewcommand{\isadigit}[1]{\isamath{#1}}
+
+\newenvironment{mldecls}{\par\noindent\begingroup\footnotesize\def\isanewline{\\}\begin{tabular}{l}}{\end{tabular}\smallskip\endgroup}
+
+\isafoldtag{FIXME}
+
+\isakeeptag{mlref}
+\renewcommand{\isatagmlref}{\subsection*{\makebox[0pt][r]{\fbox{ML}~~}Reference}}
+\renewcommand{\endisatagmlref}{}
+
+\isakeeptag{mlantiq}
+\renewcommand{\isatagmlantiq}{\subsection*{\makebox[0pt][r]{\fbox{ML}~~}Antiquotations}}
+\renewcommand{\endisatagmlantiq}{}
+
+\isakeeptag{mlex}
+\renewcommand{\isatagmlex}{\subsection*{\makebox[0pt][r]{\fbox{ML}~~}Examples}}
+\renewcommand{\endisatagmlex}{}
+
+\renewcommand{\isatagML}{\begingroup\isabellestyle{default}\isastyle\def\isadigit##1{##1}}
+\renewcommand{\endisatagML}{\endgroup}
+
+\newcommand{\minorcmd}[1]{{\sf #1}}
+\newcommand{\isasymtype}{\minorcmd{type}}
+\newcommand{\isasymval}{\minorcmd{val}}
+
+\newcommand{\isasymFIX}{\isakeyword{fix}}
+\newcommand{\isasymASSUME}{\isakeyword{assume}}
+\newcommand{\isasymDEFINE}{\isakeyword{define}}
+\newcommand{\isasymNOTE}{\isakeyword{note}}
+\newcommand{\isasymGUESS}{\isakeyword{guess}}
+\newcommand{\isasymOBTAIN}{\isakeyword{obtain}}
+\newcommand{\isasymTHEORY}{\isakeyword{theory}}
+\newcommand{\isasymUSES}{\isakeyword{uses}}
+\newcommand{\isasymEND}{\isakeyword{end}}
+\newcommand{\isasymCONSTS}{\isakeyword{consts}}
+\newcommand{\isasymDEFS}{\isakeyword{defs}}
+\newcommand{\isasymTHEOREM}{\isakeyword{theorem}}
+\newcommand{\isasymDEFINITION}{\isakeyword{definition}}
+
+\isabellestyle{literal}
+
+\railtermfont{\isabellestyle{tt}}
+\railnontermfont{\isabellestyle{itunderscore}}
+\railnamefont{\isabellestyle{itunderscore}}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/Base.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,7 @@
+theory Base
+imports Pure
+begin
+
+ML_file "../antiquote_setup.ML"
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/Document_Preparation.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,589 @@
+theory Document_Preparation
+imports Base Main
+begin
+
+chapter {* Document preparation \label{ch:document-prep} *}
+
+text {* Isabelle/Isar provides a simple document preparation system
+  based on {PDF-\LaTeX}, with support for hyperlinks and bookmarks
+  within that format.  This allows to produce papers, books, theses
+  etc.\ from Isabelle theory sources.
+
+  {\LaTeX} output is generated while processing a \emph{session} in
+  batch mode, as explained in the \emph{The Isabelle System Manual}
+  \cite{isabelle-sys}.  The main Isabelle tools to get started with
+  document preparation are @{tool_ref mkroot} and @{tool_ref build}.
+
+  The classic Isabelle/HOL tutorial \cite{isabelle-hol-book} also
+  explains some aspects of theory presentation.  *}
+
+
+section {* Markup commands \label{sec:markup} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "header"} & : & @{text "toplevel \<rightarrow> toplevel"} \\[0.5ex]
+    @{command_def "chapter"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "section"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "subsection"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "subsubsection"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "text"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "text_raw"} & : & @{text "local_theory \<rightarrow> local_theory"} \\[0.5ex]
+    @{command_def "sect"} & : & @{text "proof \<rightarrow> proof"} \\
+    @{command_def "subsect"} & : & @{text "proof \<rightarrow> proof"} \\
+    @{command_def "subsubsect"} & : & @{text "proof \<rightarrow> proof"} \\
+    @{command_def "txt"} & : & @{text "proof \<rightarrow> proof"} \\
+    @{command_def "txt_raw"} & : & @{text "proof \<rightarrow> proof"} \\
+  \end{matharray}
+
+  Markup commands provide a structured way to insert text into the
+  document generated from a theory.  Each markup command takes a
+  single @{syntax text} argument, which is passed as argument to a
+  corresponding {\LaTeX} macro.  The default macros provided by
+  @{file "~~/lib/texinputs/isabelle.sty"} can be redefined according
+  to the needs of the underlying document and {\LaTeX} styles.
+
+  Note that formal comments (\secref{sec:comments}) are similar to
+  markup commands, but have a different status within Isabelle/Isar
+  syntax.
+
+  @{rail \<open>
+    (@@{command chapter} | @@{command section} | @@{command subsection} |
+      @@{command subsubsection} | @@{command text}) @{syntax target}? @{syntax text}
+    ;
+    (@@{command header} | @@{command text_raw} | @@{command sect} | @@{command subsect} |
+      @@{command subsubsect} | @@{command txt} | @@{command txt_raw}) @{syntax text}
+  \<close>}
+
+  \begin{description}
+
+  \item @{command header} provides plain text markup just preceding
+  the formal beginning of a theory.  The corresponding {\LaTeX} macro
+  is @{verbatim "\\isamarkupheader"}, which acts like @{command
+  section} by default.
+  
+  \item @{command chapter}, @{command section}, @{command subsection},
+  and @{command subsubsection} mark chapter and section headings
+  within the main theory body or local theory targets.  The
+  corresponding {\LaTeX} macros are @{verbatim "\\isamarkupchapter"},
+  @{verbatim "\\isamarkupsection"}, @{verbatim
+  "\\isamarkupsubsection"} etc.
+
+  \item @{command sect}, @{command subsect}, and @{command subsubsect}
+  mark section headings within proofs.  The corresponding {\LaTeX}
+  macros are @{verbatim "\\isamarkupsect"}, @{verbatim
+  "\\isamarkupsubsect"} etc.
+
+  \item @{command text} and @{command txt} specify paragraphs of plain
+  text.  This corresponds to a {\LaTeX} environment @{verbatim
+  "\\begin{isamarkuptext}"} @{text "\<dots>"} @{verbatim
+  "\\end{isamarkuptext}"} etc.
+
+  \item @{command text_raw} and @{command txt_raw} insert {\LaTeX}
+  source into the output, without additional markup.  Thus the full
+  range of document manipulations becomes available, at the risk of
+  messing up document output.
+
+  \end{description}
+
+  Except for @{command "text_raw"} and @{command "txt_raw"}, the text
+  passed to any of the above markup commands may refer to formal
+  entities via \emph{document antiquotations}, see also
+  \secref{sec:antiq}.  These are interpreted in the present theory or
+  proof context, or the named @{text "target"}.
+
+  \medskip The proof markup commands closely resemble those for theory
+  specifications, but have a different formal status and produce
+  different {\LaTeX} macros.  The default definitions coincide for
+  analogous commands such as @{command section} and @{command sect}.
+*}
+
+
+section {* Document Antiquotations \label{sec:antiq} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{antiquotation_def "theory"} & : & @{text antiquotation} \\
+    @{antiquotation_def "thm"} & : & @{text antiquotation} \\
+    @{antiquotation_def "lemma"} & : & @{text antiquotation} \\
+    @{antiquotation_def "prop"} & : & @{text antiquotation} \\
+    @{antiquotation_def "term"} & : & @{text antiquotation} \\
+    @{antiquotation_def term_type} & : & @{text antiquotation} \\
+    @{antiquotation_def typeof} & : & @{text antiquotation} \\
+    @{antiquotation_def const} & : & @{text antiquotation} \\
+    @{antiquotation_def abbrev} & : & @{text antiquotation} \\
+    @{antiquotation_def typ} & : & @{text antiquotation} \\
+    @{antiquotation_def type} & : & @{text antiquotation} \\
+    @{antiquotation_def class} & : & @{text antiquotation} \\
+    @{antiquotation_def "text"} & : & @{text antiquotation} \\
+    @{antiquotation_def goals} & : & @{text antiquotation} \\
+    @{antiquotation_def subgoals} & : & @{text antiquotation} \\
+    @{antiquotation_def prf} & : & @{text antiquotation} \\
+    @{antiquotation_def full_prf} & : & @{text antiquotation} \\
+    @{antiquotation_def ML} & : & @{text antiquotation} \\
+    @{antiquotation_def ML_op} & : & @{text antiquotation} \\
+    @{antiquotation_def ML_type} & : & @{text antiquotation} \\
+    @{antiquotation_def ML_structure} & : & @{text antiquotation} \\
+    @{antiquotation_def ML_functor} & : & @{text antiquotation} \\
+    @{antiquotation_def "file"} & : & @{text antiquotation} \\
+    @{antiquotation_def "url"} & : & @{text antiquotation} \\
+  \end{matharray}
+
+  The overall content of an Isabelle/Isar theory may alternate between
+  formal and informal text.  The main body consists of formal
+  specification and proof commands, interspersed with markup commands
+  (\secref{sec:markup}) or document comments (\secref{sec:comments}).
+  The argument of markup commands quotes informal text to be printed
+  in the resulting document, but may again refer to formal entities
+  via \emph{document antiquotations}.
+
+  For example, embedding of ``@{text [source=false] "@{term [show_types] \"f x = a + x\"}"}''
+  within a text block makes
+  \isa{{\isacharparenleft}f{\isasymColon}{\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a{\isacharparenright}\ {\isacharparenleft}x{\isasymColon}{\isacharprime}a{\isacharparenright}\ {\isacharequal}\ {\isacharparenleft}a{\isasymColon}{\isacharprime}a{\isacharparenright}\ {\isacharplus}\ x} appear in the final {\LaTeX} document.
+
+  Antiquotations usually spare the author tedious typing of logical
+  entities in full detail.  Even more importantly, some degree of
+  consistency-checking between the main body of formal text and its
+  informal explanation is achieved, since terms and types appearing in
+  antiquotations are checked within the current theory or proof
+  context.
+
+  %% FIXME less monolithic presentation, move to individual sections!?
+  @{rail \<open>
+    '@{' antiquotation '}'
+    ;
+    @{syntax_def antiquotation}:
+      @@{antiquotation theory} options @{syntax name} |
+      @@{antiquotation thm} options styles @{syntax thmrefs} |
+      @@{antiquotation lemma} options @{syntax prop} @'by' @{syntax method} @{syntax method}? |
+      @@{antiquotation prop} options styles @{syntax prop} |
+      @@{antiquotation term} options styles @{syntax term} |
+      @@{antiquotation (HOL) value} options styles @{syntax term} |
+      @@{antiquotation term_type} options styles @{syntax term} |
+      @@{antiquotation typeof} options styles @{syntax term} |
+      @@{antiquotation const} options @{syntax term} |
+      @@{antiquotation abbrev} options @{syntax term} |
+      @@{antiquotation typ} options @{syntax type} |
+      @@{antiquotation type} options @{syntax name} |
+      @@{antiquotation class} options @{syntax name} |
+      @@{antiquotation text} options @{syntax name}
+    ;
+    @{syntax antiquotation}:
+      @@{antiquotation goals} options |
+      @@{antiquotation subgoals} options |
+      @@{antiquotation prf} options @{syntax thmrefs} |
+      @@{antiquotation full_prf} options @{syntax thmrefs} |
+      @@{antiquotation ML} options @{syntax name} |
+      @@{antiquotation ML_op} options @{syntax name} |
+      @@{antiquotation ML_type} options @{syntax name} |
+      @@{antiquotation ML_structure} options @{syntax name} |
+      @@{antiquotation ML_functor} options @{syntax name} |
+      @@{antiquotation "file"} options @{syntax name} |
+      @@{antiquotation file_unchecked} options @{syntax name} |
+      @@{antiquotation url} options @{syntax name}
+    ;
+    options: '[' (option * ',') ']'
+    ;
+    option: @{syntax name} | @{syntax name} '=' @{syntax name}
+    ;
+    styles: '(' (style + ',') ')'
+    ;
+    style: (@{syntax name} +)
+  \<close>}
+
+  Note that the syntax of antiquotations may \emph{not} include source
+  comments @{verbatim "(*"}~@{text "\<dots>"}~@{verbatim "*)"} nor verbatim
+  text @{verbatim "{"}@{verbatim "*"}~@{text "\<dots>"}~@{verbatim
+  "*"}@{verbatim "}"}.
+
+  \begin{description}
+  
+  \item @{text "@{theory A}"} prints the name @{text "A"}, which is
+  guaranteed to refer to a valid ancestor theory in the current
+  context.
+
+  \item @{text "@{thm a\<^sub>1 \<dots> a\<^sub>n}"} prints theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}.
+  Full fact expressions are allowed here, including attributes
+  (\secref{sec:syn-att}).
+
+  \item @{text "@{prop \<phi>}"} prints a well-typed proposition @{text
+  "\<phi>"}.
+
+  \item @{text "@{lemma \<phi> by m}"} proves a well-typed proposition
+  @{text "\<phi>"} by method @{text m} and prints the original @{text "\<phi>"}.
+
+  \item @{text "@{term t}"} prints a well-typed term @{text "t"}.
+  
+  \item @{text "@{value t}"} evaluates a term @{text "t"} and prints
+  its result, see also @{command_ref (HOL) value}.
+
+  \item @{text "@{term_type t}"} prints a well-typed term @{text "t"}
+  annotated with its type.
+
+  \item @{text "@{typeof t}"} prints the type of a well-typed term
+  @{text "t"}.
+
+  \item @{text "@{const c}"} prints a logical or syntactic constant
+  @{text "c"}.
+  
+  \item @{text "@{abbrev c x\<^sub>1 \<dots> x\<^sub>n}"} prints a constant abbreviation
+  @{text "c x\<^sub>1 \<dots> x\<^sub>n \<equiv> rhs"} as defined in the current context.
+
+  \item @{text "@{typ \<tau>}"} prints a well-formed type @{text "\<tau>"}.
+
+  \item @{text "@{type \<kappa>}"} prints a (logical or syntactic) type
+    constructor @{text "\<kappa>"}.
+
+  \item @{text "@{class c}"} prints a class @{text c}.
+
+  \item @{text "@{text s}"} prints uninterpreted source text @{text
+  s}.  This is particularly useful to print portions of text according
+  to the Isabelle document style, without demanding well-formedness,
+  e.g.\ small pieces of terms that should not be parsed or
+  type-checked yet.
+
+  \item @{text "@{goals}"} prints the current \emph{dynamic} goal
+  state.  This is mainly for support of tactic-emulation scripts
+  within Isar.  Presentation of goal states does not conform to the
+  idea of human-readable proof documents!
+
+  When explaining proofs in detail it is usually better to spell out
+  the reasoning via proper Isar proof commands, instead of peeking at
+  the internal machine configuration.
+  
+  \item @{text "@{subgoals}"} is similar to @{text "@{goals}"}, but
+  does not print the main goal.
+  
+  \item @{text "@{prf a\<^sub>1 \<dots> a\<^sub>n}"} prints the (compact) proof terms
+  corresponding to the theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}. Note that this
+  requires proof terms to be switched on for the current logic
+  session.
+  
+  \item @{text "@{full_prf a\<^sub>1 \<dots> a\<^sub>n}"} is like @{text "@{prf a\<^sub>1 \<dots>
+  a\<^sub>n}"}, but prints the full proof terms, i.e.\ also displays
+  information omitted in the compact proof term, which is denoted by
+  ``@{text _}'' placeholders there.
+  
+  \item @{text "@{ML s}"}, @{text "@{ML_op s}"}, @{text "@{ML_type
+  s}"}, @{text "@{ML_structure s}"}, and @{text "@{ML_functor s}"}
+  check text @{text s} as ML value, infix operator, type, structure,
+  and functor respectively.  The source is printed verbatim.
+
+  \item @{text "@{file path}"} checks that @{text "path"} refers to a
+  file (or directory) and prints it verbatim.
+
+  \item @{text "@{file_unchecked path}"} is like @{text "@{file
+  path}"}, but does not check the existence of the @{text "path"}
+  within the file-system.
+
+  \item @{text "@{url name}"} produces markup for the given URL, which
+  results in an active hyperlink within the text.
+
+  \end{description}
+*}
+
+
+subsection {* Styled antiquotations *}
+
+text {* The antiquotations @{text thm}, @{text prop} and @{text
+  term} admit an extra \emph{style} specification to modify the
+  printed result.  A style is specified by a name with a possibly
+  empty number of arguments;  multiple styles can be sequenced with
+  commas.  The following standard styles are available:
+
+  \begin{description}
+  
+  \item @{text lhs} extracts the first argument of any application
+  form with at least two arguments --- typically meta-level or
+  object-level equality, or any other binary relation.
+  
+  \item @{text rhs} is like @{text lhs}, but extracts the second
+  argument.
+  
+  \item @{text "concl"} extracts the conclusion @{text C} from a rule
+  in Horn-clause normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}.
+  
+  \item @{text "prem"} @{text n} extract premise number
+  @{text "n"} from from a rule in Horn-clause
+  normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}
+
+  \end{description}
+*}
+
+
+subsection {* General options *}
+
+text {* The following options are available to tune the printed output
+  of antiquotations.  Note that many of these coincide with system and
+  configuration options of the same names.
+
+  \begin{description}
+
+  \item @{antiquotation_option_def show_types}~@{text "= bool"} and
+  @{antiquotation_option_def show_sorts}~@{text "= bool"} control
+  printing of explicit type and sort constraints.
+
+  \item @{antiquotation_option_def show_structs}~@{text "= bool"}
+  controls printing of implicit structures.
+
+  \item @{antiquotation_option_def show_abbrevs}~@{text "= bool"}
+  controls folding of abbreviations.
+
+  \item @{antiquotation_option_def names_long}~@{text "= bool"} forces
+  names of types and constants etc.\ to be printed in their fully
+  qualified internal form.
+
+  \item @{antiquotation_option_def names_short}~@{text "= bool"}
+  forces names of types and constants etc.\ to be printed unqualified.
+  Note that internalizing the output again in the current context may
+  well yield a different result.
+
+  \item @{antiquotation_option_def names_unique}~@{text "= bool"}
+  determines whether the printed version of qualified names should be
+  made sufficiently long to avoid overlap with names declared further
+  back.  Set to @{text false} for more concise output.
+
+  \item @{antiquotation_option_def eta_contract}~@{text "= bool"}
+  prints terms in @{text \<eta>}-contracted form.
+
+  \item @{antiquotation_option_def display}~@{text "= bool"} indicates
+  if the text is to be output as multi-line ``display material'',
+  rather than a small piece of text without line breaks (which is the
+  default).
+
+  In this mode the embedded entities are printed in the same style as
+  the main theory text.
+
+  \item @{antiquotation_option_def break}~@{text "= bool"} controls
+  line breaks in non-display material.
+
+  \item @{antiquotation_option_def quotes}~@{text "= bool"} indicates
+  if the output should be enclosed in double quotes.
+
+  \item @{antiquotation_option_def mode}~@{text "= name"} adds @{text
+  name} to the print mode to be used for presentation.  Note that the
+  standard setup for {\LaTeX} output is already present by default,
+  including the modes @{text latex} and @{text xsymbols}.
+
+  \item @{antiquotation_option_def margin}~@{text "= nat"} and
+  @{antiquotation_option_def indent}~@{text "= nat"} change the margin
+  or indentation for pretty printing of display material.
+
+  \item @{antiquotation_option_def goals_limit}~@{text "= nat"}
+  determines the maximum number of subgoals to be printed (for goal-based
+  antiquotation).
+
+  \item @{antiquotation_option_def source}~@{text "= bool"} prints the
+  original source text of the antiquotation arguments, rather than its
+  internal representation.  Note that formal checking of
+  @{antiquotation "thm"}, @{antiquotation "term"}, etc. is still
+  enabled; use the @{antiquotation "text"} antiquotation for unchecked
+  output.
+
+  Regular @{text "term"} and @{text "typ"} antiquotations with @{text
+  "source = false"} involve a full round-trip from the original source
+  to an internalized logical entity back to a source form, according
+  to the syntax of the current context.  Thus the printed output is
+  not under direct control of the author, it may even fluctuate a bit
+  as the underlying theory is changed later on.
+
+  In contrast, @{antiquotation_option source}~@{text "= true"}
+  admits direct printing of the given source text, with the desirable
+  well-formedness check in the background, but without modification of
+  the printed text.
+
+  \end{description}
+
+  For boolean flags, ``@{text "name = true"}'' may be abbreviated as
+  ``@{text name}''.  All of the above flags are disabled by default,
+  unless changed specifically for a logic session in the corresponding
+  @{verbatim "ROOT"} file.  *}
+
+
+section {* Markup via command tags \label{sec:tags} *}
+
+text {* Each Isabelle/Isar command may be decorated by additional
+  presentation tags, to indicate some modification in the way it is
+  printed in the document.
+
+  @{rail \<open>
+    @{syntax_def tags}: ( tag * )
+    ;
+    tag: '%' (@{syntax ident} | @{syntax string})
+  \<close>}
+
+  Some tags are pre-declared for certain classes of commands, serving
+  as default markup if no tags are given in the text:
+
+  \medskip
+  \begin{tabular}{ll}
+    @{text "theory"} & theory begin/end \\
+    @{text "proof"} & all proof commands \\
+    @{text "ML"} & all commands involving ML code \\
+  \end{tabular}
+
+  \medskip The Isabelle document preparation system
+  \cite{isabelle-sys} allows tagged command regions to be presented
+  specifically, e.g.\ to fold proof texts, or drop parts of the text
+  completely.
+
+  For example ``@{command "by"}~@{text "%invisible auto"}'' causes
+  that piece of proof to be treated as @{text invisible} instead of
+  @{text "proof"} (the default), which may be shown or hidden
+  depending on the document setup.  In contrast, ``@{command
+  "by"}~@{text "%visible auto"}'' forces this text to be shown
+  invariably.
+
+  Explicit tag specifications within a proof apply to all subsequent
+  commands of the same level of nesting.  For example, ``@{command
+  "proof"}~@{text "%visible \<dots>"}~@{command "qed"}'' forces the whole
+  sub-proof to be typeset as @{text visible} (unless some of its parts
+  are tagged differently).
+
+  \medskip Command tags merely produce certain markup environments for
+  type-setting.  The meaning of these is determined by {\LaTeX}
+  macros, as defined in @{file "~~/lib/texinputs/isabelle.sty"} or
+  by the document author.  The Isabelle document preparation tools
+  also provide some high-level options to specify the meaning of
+  arbitrary tags to ``keep'', ``drop'', or ``fold'' the corresponding
+  parts of the text.  Logic sessions may also specify ``document
+  versions'', where given tags are interpreted in some particular way.
+  Again see \cite{isabelle-sys} for further details.
+*}
+
+
+section {* Railroad diagrams *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{antiquotation_def "rail"} & : & @{text antiquotation} \\
+  \end{matharray}
+
+  @{rail \<open>
+    'rail' (@{syntax string} | @{syntax cartouche})
+  \<close>}
+
+  The @{antiquotation rail} antiquotation allows to include syntax
+  diagrams into Isabelle documents.  {\LaTeX} requires the style file
+  @{file "~~/lib/texinputs/pdfsetup.sty"}, which can be used via
+  @{verbatim "\\usepackage{pdfsetup}"} in @{verbatim "root.tex"}, for
+  example.
+
+  The rail specification language is quoted here as Isabelle @{syntax
+  string} or text @{syntax "cartouche"}; it has its own grammar given
+  below.
+
+  \begingroup
+  \def\isasymnewline{\isatext{\tt\isacharbackslash<newline>}}
+  @{rail \<open>
+  rule? + ';'
+  ;
+  rule: ((identifier | @{syntax antiquotation}) ':')? body
+  ;
+  body: concatenation + '|'
+  ;
+  concatenation: ((atom '?'?) +) (('*' | '+') atom?)?
+  ;
+  atom: '(' body? ')' | identifier |
+    '@'? (string | @{syntax antiquotation}) |
+    '\<newline>'
+  \<close>}
+  \endgroup
+
+  The lexical syntax of @{text "identifier"} coincides with that of
+  @{syntax ident} in regular Isabelle syntax, but @{text string} uses
+  single quotes instead of double quotes of the standard @{syntax
+  string} category.
+
+  Each @{text rule} defines a formal language (with optional name),
+  using a notation that is similar to EBNF or regular expressions with
+  recursion.  The meaning and visual appearance of these rail language
+  elements is illustrated by the following representative examples.
+
+  \begin{itemize}
+
+  \item Empty @{verbatim "()"}
+
+  @{rail \<open>()\<close>}
+
+  \item Nonterminal @{verbatim "A"}
+
+  @{rail \<open>A\<close>}
+
+  \item Nonterminal via Isabelle antiquotation
+  @{verbatim "@{syntax method}"}
+
+  @{rail \<open>@{syntax method}\<close>}
+
+  \item Terminal @{verbatim "'xyz'"}
+
+  @{rail \<open>'xyz'\<close>}
+
+  \item Terminal in keyword style @{verbatim "@'xyz'"}
+
+  @{rail \<open>@'xyz'\<close>}
+
+  \item Terminal via Isabelle antiquotation
+  @{verbatim "@@{method rule}"}
+
+  @{rail \<open>@@{method rule}\<close>}
+
+  \item Concatenation @{verbatim "A B C"}
+
+  @{rail \<open>A B C\<close>}
+
+  \item Newline inside concatenation
+  @{verbatim "A B C \<newline> D E F"}
+
+  @{rail \<open>A B C \<newline> D E F\<close>}
+
+  \item Variants @{verbatim "A | B | C"}
+
+  @{rail \<open>A | B | C\<close>}
+
+  \item Option @{verbatim "A ?"}
+
+  @{rail \<open>A ?\<close>}
+
+  \item Repetition @{verbatim "A *"}
+
+  @{rail \<open>A *\<close>}
+
+  \item Repetition with separator @{verbatim "A * sep"}
+
+  @{rail \<open>A * sep\<close>}
+
+  \item Strict repetition @{verbatim "A +"}
+
+  @{rail \<open>A +\<close>}
+
+  \item Strict repetition with separator @{verbatim "A + sep"}
+
+  @{rail \<open>A + sep\<close>}
+
+  \end{itemize}
+*}
+
+
+section {* Draft presentation *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "display_drafts"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command display_drafts} (@{syntax name} +)
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "display_drafts"}~@{text paths} performs simple output of a
+  given list of raw source files. Only those symbols that do not require
+  additional {\LaTeX} packages are displayed properly, everything else is left
+  verbatim.
+
+  \end{description}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/First_Order_Logic.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,520 @@
+
+header {* Example: First-Order Logic *}
+
+theory %visible First_Order_Logic
+imports Base  (* FIXME Pure!? *)
+begin
+
+text {*
+  \noindent In order to commence a new object-logic within
+  Isabelle/Pure we introduce abstract syntactic categories @{text "i"}
+  for individuals and @{text "o"} for object-propositions.  The latter
+  is embedded into the language of Pure propositions by means of a
+  separate judgment.
+*}
+
+typedecl i
+typedecl o
+
+judgment
+  Trueprop :: "o \<Rightarrow> prop"    ("_" 5)
+
+text {*
+  \noindent Note that the object-logic judgement is implicit in the
+  syntax: writing @{prop A} produces @{term "Trueprop A"} internally.
+  From the Pure perspective this means ``@{prop A} is derivable in the
+  object-logic''.
+*}
+
+
+subsection {* Equational reasoning \label{sec:framework-ex-equal} *}
+
+text {*
+  Equality is axiomatized as a binary predicate on individuals, with
+  reflexivity as introduction, and substitution as elimination
+  principle.  Note that the latter is particularly convenient in a
+  framework like Isabelle, because syntactic congruences are
+  implicitly produced by unification of @{term "B x"} against
+  expressions containing occurrences of @{term x}.
+*}
+
+axiomatization
+  equal :: "i \<Rightarrow> i \<Rightarrow> o"  (infix "=" 50)
+where
+  refl [intro]: "x = x" and
+  subst [elim]: "x = y \<Longrightarrow> B x \<Longrightarrow> B y"
+
+text {*
+  \noindent Substitution is very powerful, but also hard to control in
+  full generality.  We derive some common symmetry~/ transitivity
+  schemes of @{term equal} as particular consequences.
+*}
+
+theorem sym [sym]:
+  assumes "x = y"
+  shows "y = x"
+proof -
+  have "x = x" ..
+  with `x = y` show "y = x" ..
+qed
+
+theorem forw_subst [trans]:
+  assumes "y = x" and "B x"
+  shows "B y"
+proof -
+  from `y = x` have "x = y" ..
+  from this and `B x` show "B y" ..
+qed
+
+theorem back_subst [trans]:
+  assumes "B x" and "x = y"
+  shows "B y"
+proof -
+  from `x = y` and `B x`
+  show "B y" ..
+qed
+
+theorem trans [trans]:
+  assumes "x = y" and "y = z"
+  shows "x = z"
+proof -
+  from `y = z` and `x = y`
+  show "x = z" ..
+qed
+
+
+subsection {* Basic group theory *}
+
+text {*
+  As an example for equational reasoning we consider some bits of
+  group theory.  The subsequent locale definition postulates group
+  operations and axioms; we also derive some consequences of this
+  specification.
+*}
+
+locale group =
+  fixes prod :: "i \<Rightarrow> i \<Rightarrow> i"  (infix "\<circ>" 70)
+    and inv :: "i \<Rightarrow> i"  ("(_\<inverse>)" [1000] 999)
+    and unit :: i  ("1")
+  assumes assoc: "(x \<circ> y) \<circ> z = x \<circ> (y \<circ> z)"
+    and left_unit:  "1 \<circ> x = x"
+    and left_inv: "x\<inverse> \<circ> x = 1"
+begin
+
+theorem right_inv: "x \<circ> x\<inverse> = 1"
+proof -
+  have "x \<circ> x\<inverse> = 1 \<circ> (x \<circ> x\<inverse>)" by (rule left_unit [symmetric])
+  also have "\<dots> = (1 \<circ> x) \<circ> x\<inverse>" by (rule assoc [symmetric])
+  also have "1 = (x\<inverse>)\<inverse> \<circ> x\<inverse>" by (rule left_inv [symmetric])
+  also have "\<dots> \<circ> x = (x\<inverse>)\<inverse> \<circ> (x\<inverse> \<circ> x)" by (rule assoc)
+  also have "x\<inverse> \<circ> x = 1" by (rule left_inv)
+  also have "((x\<inverse>)\<inverse> \<circ> \<dots>) \<circ> x\<inverse> = (x\<inverse>)\<inverse> \<circ> (1 \<circ> x\<inverse>)" by (rule assoc)
+  also have "1 \<circ> x\<inverse> = x\<inverse>" by (rule left_unit)
+  also have "(x\<inverse>)\<inverse> \<circ> \<dots> = 1" by (rule left_inv)
+  finally show "x \<circ> x\<inverse> = 1" .
+qed
+
+theorem right_unit: "x \<circ> 1 = x"
+proof -
+  have "1 = x\<inverse> \<circ> x" by (rule left_inv [symmetric])
+  also have "x \<circ> \<dots> = (x \<circ> x\<inverse>) \<circ> x" by (rule assoc [symmetric])
+  also have "x \<circ> x\<inverse> = 1" by (rule right_inv)
+  also have "\<dots> \<circ> x = x" by (rule left_unit)
+  finally show "x \<circ> 1 = x" .
+qed
+
+text {*
+  \noindent Reasoning from basic axioms is often tedious.  Our proofs
+  work by producing various instances of the given rules (potentially
+  the symmetric form) using the pattern ``@{command have}~@{text
+  eq}~@{command "by"}~@{text "(rule r)"}'' and composing the chain of
+  results via @{command also}/@{command finally}.  These steps may
+  involve any of the transitivity rules declared in
+  \secref{sec:framework-ex-equal}, namely @{thm trans} in combining
+  the first two results in @{thm right_inv} and in the final steps of
+  both proofs, @{thm forw_subst} in the first combination of @{thm
+  right_unit}, and @{thm back_subst} in all other calculational steps.
+
+  Occasional substitutions in calculations are adequate, but should
+  not be over-emphasized.  The other extreme is to compose a chain by
+  plain transitivity only, with replacements occurring always in
+  topmost position. For example:
+*}
+
+(*<*)
+theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
+proof -
+  assume [symmetric, defn]: "\<And>x y. (x \<equiv> y) \<equiv> Trueprop (x = y)"
+(*>*)
+  have "x \<circ> 1 = x \<circ> (x\<inverse> \<circ> x)" unfolding left_inv ..
+  also have "\<dots> = (x \<circ> x\<inverse>) \<circ> x" unfolding assoc ..
+  also have "\<dots> = 1 \<circ> x" unfolding right_inv ..
+  also have "\<dots> = x" unfolding left_unit ..
+  finally have "x \<circ> 1 = x" .
+(*<*)
+qed
+(*>*)
+
+text {*
+  \noindent Here we have re-used the built-in mechanism for unfolding
+  definitions in order to normalize each equational problem.  A more
+  realistic object-logic would include proper setup for the Simplifier
+  (\secref{sec:simplifier}), the main automated tool for equational
+  reasoning in Isabelle.  Then ``@{command unfolding}~@{thm
+  left_inv}~@{command ".."}'' would become ``@{command "by"}~@{text
+  "(simp only: left_inv)"}'' etc.
+*}
+
+end
+
+
+subsection {* Propositional logic \label{sec:framework-ex-prop} *}
+
+text {*
+  We axiomatize basic connectives of propositional logic: implication,
+  disjunction, and conjunction.  The associated rules are modeled
+  after Gentzen's system of Natural Deduction \cite{Gentzen:1935}.
+*}
+
+axiomatization
+  imp :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<longrightarrow>" 25) where
+  impI [intro]: "(A \<Longrightarrow> B) \<Longrightarrow> A \<longrightarrow> B" and
+  impD [dest]: "(A \<longrightarrow> B) \<Longrightarrow> A \<Longrightarrow> B"
+
+axiomatization
+  disj :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<or>" 30) where
+  disjI\<^sub>1 [intro]: "A \<Longrightarrow> A \<or> B" and
+  disjI\<^sub>2 [intro]: "B \<Longrightarrow> A \<or> B" and
+  disjE [elim]: "A \<or> B \<Longrightarrow> (A \<Longrightarrow> C) \<Longrightarrow> (B \<Longrightarrow> C) \<Longrightarrow> C"
+
+axiomatization
+  conj :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<and>" 35) where
+  conjI [intro]: "A \<Longrightarrow> B \<Longrightarrow> A \<and> B" and
+  conjD\<^sub>1: "A \<and> B \<Longrightarrow> A" and
+  conjD\<^sub>2: "A \<and> B \<Longrightarrow> B"
+
+text {*
+  \noindent The conjunctive destructions have the disadvantage that
+  decomposing @{prop "A \<and> B"} involves an immediate decision which
+  component should be projected.  The more convenient simultaneous
+  elimination @{prop "A \<and> B \<Longrightarrow> (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C"} can be derived as
+  follows:
+*}
+
+theorem conjE [elim]:
+  assumes "A \<and> B"
+  obtains A and B
+proof
+  from `A \<and> B` show A by (rule conjD\<^sub>1)
+  from `A \<and> B` show B by (rule conjD\<^sub>2)
+qed
+
+text {*
+  \noindent Here is an example of swapping conjuncts with a single
+  intermediate elimination step:
+*}
+
+(*<*)
+lemma "\<And>A. PROP A \<Longrightarrow> PROP A"
+proof -
+(*>*)
+  assume "A \<and> B"
+  then obtain B and A ..
+  then have "B \<and> A" ..
+(*<*)
+qed
+(*>*)
+
+text {*
+  \noindent Note that the analogous elimination rule for disjunction
+  ``@{text "\<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"}'' coincides with
+  the original axiomatization of @{thm disjE}.
+
+  \medskip We continue propositional logic by introducing absurdity
+  with its characteristic elimination.  Plain truth may then be
+  defined as a proposition that is trivially true.
+*}
+
+axiomatization
+  false :: o  ("\<bottom>") where
+  falseE [elim]: "\<bottom> \<Longrightarrow> A"
+
+definition
+  true :: o  ("\<top>") where
+  "\<top> \<equiv> \<bottom> \<longrightarrow> \<bottom>"
+
+theorem trueI [intro]: \<top>
+  unfolding true_def ..
+
+text {*
+  \medskip\noindent Now negation represents an implication towards
+  absurdity:
+*}
+
+definition
+  not :: "o \<Rightarrow> o"  ("\<not> _" [40] 40) where
+  "\<not> A \<equiv> A \<longrightarrow> \<bottom>"
+
+theorem notI [intro]:
+  assumes "A \<Longrightarrow> \<bottom>"
+  shows "\<not> A"
+unfolding not_def
+proof
+  assume A
+  then show \<bottom> by (rule `A \<Longrightarrow> \<bottom>`)
+qed
+
+theorem notE [elim]:
+  assumes "\<not> A" and A
+  shows B
+proof -
+  from `\<not> A` have "A \<longrightarrow> \<bottom>" unfolding not_def .
+  from `A \<longrightarrow> \<bottom>` and `A` have \<bottom> ..
+  then show B ..
+qed
+
+
+subsection {* Classical logic *}
+
+text {*
+  Subsequently we state the principle of classical contradiction as a
+  local assumption.  Thus we refrain from forcing the object-logic
+  into the classical perspective.  Within that context, we may derive
+  well-known consequences of the classical principle.
+*}
+
+locale classical =
+  assumes classical: "(\<not> C \<Longrightarrow> C) \<Longrightarrow> C"
+begin
+
+theorem double_negation:
+  assumes "\<not> \<not> C"
+  shows C
+proof (rule classical)
+  assume "\<not> C"
+  with `\<not> \<not> C` show C ..
+qed
+
+theorem tertium_non_datur: "C \<or> \<not> C"
+proof (rule double_negation)
+  show "\<not> \<not> (C \<or> \<not> C)"
+  proof
+    assume "\<not> (C \<or> \<not> C)"
+    have "\<not> C"
+    proof
+      assume C then have "C \<or> \<not> C" ..
+      with `\<not> (C \<or> \<not> C)` show \<bottom> ..
+    qed
+    then have "C \<or> \<not> C" ..
+    with `\<not> (C \<or> \<not> C)` show \<bottom> ..
+  qed
+qed
+
+text {*
+  \noindent These examples illustrate both classical reasoning and
+  non-trivial propositional proofs in general.  All three rules
+  characterize classical logic independently, but the original rule is
+  already the most convenient to use, because it leaves the conclusion
+  unchanged.  Note that @{prop "(\<not> C \<Longrightarrow> C) \<Longrightarrow> C"} fits again into our
+  format for eliminations, despite the additional twist that the
+  context refers to the main conclusion.  So we may write @{thm
+  classical} as the Isar statement ``@{text "\<OBTAINS> \<not> thesis"}''.
+  This also explains nicely how classical reasoning really works:
+  whatever the main @{text thesis} might be, we may always assume its
+  negation!
+*}
+
+end
+
+
+subsection {* Quantifiers \label{sec:framework-ex-quant} *}
+
+text {*
+  Representing quantifiers is easy, thanks to the higher-order nature
+  of the underlying framework.  According to the well-known technique
+  introduced by Church \cite{church40}, quantifiers are operators on
+  predicates, which are syntactically represented as @{text "\<lambda>"}-terms
+  of type @{typ "i \<Rightarrow> o"}.  Binder notation turns @{text "All (\<lambda>x. B
+  x)"} into @{text "\<forall>x. B x"} etc.
+*}
+
+axiomatization
+  All :: "(i \<Rightarrow> o) \<Rightarrow> o"  (binder "\<forall>" 10) where
+  allI [intro]: "(\<And>x. B x) \<Longrightarrow> \<forall>x. B x" and
+  allD [dest]: "(\<forall>x. B x) \<Longrightarrow> B a"
+
+axiomatization
+  Ex :: "(i \<Rightarrow> o) \<Rightarrow> o"  (binder "\<exists>" 10) where
+  exI [intro]: "B a \<Longrightarrow> (\<exists>x. B x)" and
+  exE [elim]: "(\<exists>x. B x) \<Longrightarrow> (\<And>x. B x \<Longrightarrow> C) \<Longrightarrow> C"
+
+text {*
+  \noindent The statement of @{thm exE} corresponds to ``@{text
+  "\<ASSUMES> \<exists>x. B x \<OBTAINS> x \<WHERE> B x"}'' in Isar.  In the
+  subsequent example we illustrate quantifier reasoning involving all
+  four rules:
+*}
+
+theorem
+  assumes "\<exists>x. \<forall>y. R x y"
+  shows "\<forall>y. \<exists>x. R x y"
+proof    -- {* @{text "\<forall>"} introduction *}
+  obtain x where "\<forall>y. R x y" using `\<exists>x. \<forall>y. R x y` ..    -- {* @{text "\<exists>"} elimination *}
+  fix y have "R x y" using `\<forall>y. R x y` ..    -- {* @{text "\<forall>"} destruction *}
+  then show "\<exists>x. R x y" ..    -- {* @{text "\<exists>"} introduction *}
+qed
+
+
+subsection {* Canonical reasoning patterns *}
+
+text {*
+  The main rules of first-order predicate logic from
+  \secref{sec:framework-ex-prop} and \secref{sec:framework-ex-quant}
+  can now be summarized as follows, using the native Isar statement
+  format of \secref{sec:framework-stmt}.
+
+  \medskip
+  \begin{tabular}{l}
+  @{text "impI: \<ASSUMES> A \<Longrightarrow> B \<SHOWS> A \<longrightarrow> B"} \\
+  @{text "impD: \<ASSUMES> A \<longrightarrow> B \<AND> A \<SHOWS> B"} \\[1ex]
+
+  @{text "disjI\<^sub>1: \<ASSUMES> A \<SHOWS> A \<or> B"} \\
+  @{text "disjI\<^sub>2: \<ASSUMES> B \<SHOWS> A \<or> B"} \\
+  @{text "disjE: \<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"} \\[1ex]
+
+  @{text "conjI: \<ASSUMES> A \<AND> B \<SHOWS> A \<and> B"} \\
+  @{text "conjE: \<ASSUMES> A \<and> B \<OBTAINS> A \<AND> B"} \\[1ex]
+
+  @{text "falseE: \<ASSUMES> \<bottom> \<SHOWS> A"} \\
+  @{text "trueI: \<SHOWS> \<top>"} \\[1ex]
+
+  @{text "notI: \<ASSUMES> A \<Longrightarrow> \<bottom> \<SHOWS> \<not> A"} \\
+  @{text "notE: \<ASSUMES> \<not> A \<AND> A \<SHOWS> B"} \\[1ex]
+
+  @{text "allI: \<ASSUMES> \<And>x. B x \<SHOWS> \<forall>x. B x"} \\
+  @{text "allE: \<ASSUMES> \<forall>x. B x \<SHOWS> B a"} \\[1ex]
+
+  @{text "exI: \<ASSUMES> B a \<SHOWS> \<exists>x. B x"} \\
+  @{text "exE: \<ASSUMES> \<exists>x. B x \<OBTAINS> a \<WHERE> B a"}
+  \end{tabular}
+  \medskip
+
+  \noindent This essentially provides a declarative reading of Pure
+  rules as Isar reasoning patterns: the rule statements tells how a
+  canonical proof outline shall look like.  Since the above rules have
+  already been declared as @{attribute (Pure) intro}, @{attribute
+  (Pure) elim}, @{attribute (Pure) dest} --- each according to its
+  particular shape --- we can immediately write Isar proof texts as
+  follows:
+*}
+
+(*<*)
+theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
+proof -
+(*>*)
+
+  txt_raw {*\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "A \<longrightarrow> B"
+  proof
+    assume A
+    show B sorry %noproof
+  qed
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "A \<longrightarrow> B" and A sorry %noproof
+  then have B ..
+
+  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have A sorry %noproof
+  then have "A \<or> B" ..
+
+  have B sorry %noproof
+  then have "A \<or> B" ..
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "A \<or> B" sorry %noproof
+  then have C
+  proof
+    assume A
+    then show C sorry %noproof
+  next
+    assume B
+    then show C sorry %noproof
+  qed
+
+  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have A and B sorry %noproof
+  then have "A \<and> B" ..
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "A \<and> B" sorry %noproof
+  then obtain A and B ..
+
+  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<bottom>" sorry %noproof
+  then have A ..
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<top>" ..
+
+  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<not> A"
+  proof
+    assume A
+    then show "\<bottom>" sorry %noproof
+  qed
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<not> A" and A sorry %noproof
+  then have B ..
+
+  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<forall>x. B x"
+  proof
+    fix x
+    show "B x" sorry %noproof
+  qed
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<forall>x. B x" sorry %noproof
+  then have "B a" ..
+
+  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<exists>x. B x"
+  proof
+    show "B a" sorry %noproof
+  qed
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<exists>x. B x" sorry %noproof
+  then obtain a where "B a" ..
+
+  txt_raw {*\end{minipage}*}
+
+(*<*)
+qed
+(*>*)
+
+text {*
+  \bigskip\noindent Of course, these proofs are merely examples.  As
+  sketched in \secref{sec:framework-subproof}, there is a fair amount
+  of flexibility in expressing Pure deductions in Isar.  Here the user
+  is asked to express himself adequately, aiming at proof texts of
+  literary quality.
+*}
+
+end %visible
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/Framework.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,1016 @@
+theory Framework
+imports Base Main
+begin
+
+chapter {* The Isabelle/Isar Framework \label{ch:isar-framework} *}
+
+text {*
+  Isabelle/Isar
+  \cite{Wenzel:1999:TPHOL,Wenzel-PhD,Nipkow-TYPES02,Wenzel-Paulson:2006,Wenzel:2006:Festschrift}
+  is intended as a generic framework for developing formal
+  mathematical documents with full proof checking.  Definitions and
+  proofs are organized as theories.  An assembly of theory sources may
+  be presented as a printed document; see also
+  \chref{ch:document-prep}.
+
+  The main objective of Isar is the design of a human-readable
+  structured proof language, which is called the ``primary proof
+  format'' in Isar terminology.  Such a primary proof language is
+  somewhere in the middle between the extremes of primitive proof
+  objects and actual natural language.  In this respect, Isar is a bit
+  more formalistic than Mizar
+  \cite{Trybulec:1993:MizarFeatures,Rudnicki:1992:MizarOverview,Wiedijk:1999:Mizar},
+  using logical symbols for certain reasoning schemes where Mizar
+  would prefer English words; see \cite{Wenzel-Wiedijk:2002} for
+  further comparisons of these systems.
+
+  So Isar challenges the traditional way of recording informal proofs
+  in mathematical prose, as well as the common tendency to see fully
+  formal proofs directly as objects of some logical calculus (e.g.\
+  @{text "\<lambda>"}-terms in a version of type theory).  In fact, Isar is
+  better understood as an interpreter of a simple block-structured
+  language for describing the data flow of local facts and goals,
+  interspersed with occasional invocations of proof methods.
+  Everything is reduced to logical inferences internally, but these
+  steps are somewhat marginal compared to the overall bookkeeping of
+  the interpretation process.  Thanks to careful design of the syntax
+  and semantics of Isar language elements, a formal record of Isar
+  instructions may later appear as an intelligible text to the
+  attentive reader.
+
+  The Isar proof language has emerged from careful analysis of some
+  inherent virtues of the existing logical framework of Isabelle/Pure
+  \cite{paulson-found,paulson700}, notably composition of higher-order
+  natural deduction rules, which is a generalization of Gentzen's
+  original calculus \cite{Gentzen:1935}.  The approach of generic
+  inference systems in Pure is continued by Isar towards actual proof
+  texts.
+
+  Concrete applications require another intermediate layer: an
+  object-logic.  Isabelle/HOL \cite{isa-tutorial} (simply-typed
+  set-theory) is being used most of the time; Isabelle/ZF
+  \cite{isabelle-ZF} is less extensively developed, although it would
+  probably fit better for classical mathematics.
+
+  \medskip In order to illustrate natural deduction in Isar, we shall
+  refer to the background theory and library of Isabelle/HOL.  This
+  includes common notions of predicate logic, naive set-theory etc.\
+  using fairly standard mathematical notation.  From the perspective
+  of generic natural deduction there is nothing special about the
+  logical connectives of HOL (@{text "\<and>"}, @{text "\<or>"}, @{text "\<forall>"},
+  @{text "\<exists>"}, etc.), only the resulting reasoning principles are
+  relevant to the user.  There are similar rules available for
+  set-theory operators (@{text "\<inter>"}, @{text "\<union>"}, @{text "\<Inter>"}, @{text
+  "\<Union>"}, etc.), or any other theory developed in the library (lattice
+  theory, topology etc.).
+
+  Subsequently we briefly review fragments of Isar proof texts
+  corresponding directly to such general deduction schemes.  The
+  examples shall refer to set-theory, to minimize the danger of
+  understanding connectives of predicate logic as something special.
+
+  \medskip The following deduction performs @{text "\<inter>"}-introduction,
+  working forwards from assumptions towards the conclusion.  We give
+  both the Isar text, and depict the primitive rule involved, as
+  determined by unification of the problem against rules that are
+  declared in the library context.
+*}
+
+text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
+
+(*<*)
+notepad
+begin
+(*>*)
+    assume "x \<in> A" and "x \<in> B"
+    then have "x \<in> A \<inter> B" ..
+(*<*)
+end
+(*>*)
+
+text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
+
+text {*
+  \infer{@{prop "x \<in> A \<inter> B"}}{@{prop "x \<in> A"} & @{prop "x \<in> B"}}
+*}
+
+text_raw {*\end{minipage}*}
+
+text {*
+  \medskip\noindent Note that @{command assume} augments the proof
+  context, @{command then} indicates that the current fact shall be
+  used in the next step, and @{command have} states an intermediate
+  goal.  The two dots ``@{command ".."}'' refer to a complete proof of
+  this claim, using the indicated facts and a canonical rule from the
+  context.  We could have been more explicit here by spelling out the
+  final proof step via the @{command "by"} command:
+*}
+
+(*<*)
+notepad
+begin
+(*>*)
+    assume "x \<in> A" and "x \<in> B"
+    then have "x \<in> A \<inter> B" by (rule IntI)
+(*<*)
+end
+(*>*)
+
+text {*
+  \noindent The format of the @{text "\<inter>"}-introduction rule represents
+  the most basic inference, which proceeds from given premises to a
+  conclusion, without any nested proof context involved.
+
+  The next example performs backwards introduction on @{term "\<Inter>\<A>"},
+  the intersection of all sets within a given set.  This requires a
+  nested proof of set membership within a local context, where @{term
+  A} is an arbitrary-but-fixed member of the collection:
+*}
+
+text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
+
+(*<*)
+notepad
+begin
+(*>*)
+    have "x \<in> \<Inter>\<A>"
+    proof
+      fix A
+      assume "A \<in> \<A>"
+      show "x \<in> A" sorry %noproof
+    qed
+(*<*)
+end
+(*>*)
+
+text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
+
+text {*
+  \infer{@{prop "x \<in> \<Inter>\<A>"}}{\infer*{@{prop "x \<in> A"}}{@{text "[A][A \<in> \<A>]"}}}
+*}
+
+text_raw {*\end{minipage}*}
+
+text {*
+  \medskip\noindent This Isar reasoning pattern again refers to the
+  primitive rule depicted above.  The system determines it in the
+  ``@{command proof}'' step, which could have been spelt out more
+  explicitly as ``@{command proof}~@{text "(rule InterI)"}''.  Note
+  that the rule involves both a local parameter @{term "A"} and an
+  assumption @{prop "A \<in> \<A>"} in the nested reasoning.  This kind of
+  compound rule typically demands a genuine sub-proof in Isar, working
+  backwards rather than forwards as seen before.  In the proof body we
+  encounter the @{command fix}-@{command assume}-@{command show}
+  outline of nested sub-proofs that is typical for Isar.  The final
+  @{command show} is like @{command have} followed by an additional
+  refinement of the enclosing claim, using the rule derived from the
+  proof body.
+
+  \medskip The next example involves @{term "\<Union>\<A>"}, which can be
+  characterized as the set of all @{term "x"} such that @{prop "\<exists>A. x
+  \<in> A \<and> A \<in> \<A>"}.  The elimination rule for @{prop "x \<in> \<Union>\<A>"} does
+  not mention @{text "\<exists>"} and @{text "\<and>"} at all, but admits to obtain
+  directly a local @{term "A"} such that @{prop "x \<in> A"} and @{prop "A
+  \<in> \<A>"} hold.  This corresponds to the following Isar proof and
+  inference rule, respectively:
+*}
+
+text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
+
+(*<*)
+notepad
+begin
+(*>*)
+    assume "x \<in> \<Union>\<A>"
+    then have C
+    proof
+      fix A
+      assume "x \<in> A" and "A \<in> \<A>"
+      show C sorry %noproof
+    qed
+(*<*)
+end
+(*>*)
+
+text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
+
+text {*
+  \infer{@{prop "C"}}{@{prop "x \<in> \<Union>\<A>"} & \infer*{@{prop "C"}~}{@{text "[A][x \<in> A, A \<in> \<A>]"}}}
+*}
+
+text_raw {*\end{minipage}*}
+
+text {*
+  \medskip\noindent Although the Isar proof follows the natural
+  deduction rule closely, the text reads not as natural as
+  anticipated.  There is a double occurrence of an arbitrary
+  conclusion @{prop "C"}, which represents the final result, but is
+  irrelevant for now.  This issue arises for any elimination rule
+  involving local parameters.  Isar provides the derived language
+  element @{command obtain}, which is able to perform the same
+  elimination proof more conveniently:
+*}
+
+(*<*)
+notepad
+begin
+(*>*)
+    assume "x \<in> \<Union>\<A>"
+    then obtain A where "x \<in> A" and "A \<in> \<A>" ..
+(*<*)
+end
+(*>*)
+
+text {*
+  \noindent Here we avoid to mention the final conclusion @{prop "C"}
+  and return to plain forward reasoning.  The rule involved in the
+  ``@{command ".."}'' proof is the same as before.
+*}
+
+
+section {* The Pure framework \label{sec:framework-pure} *}
+
+text {*
+  The Pure logic \cite{paulson-found,paulson700} is an intuitionistic
+  fragment of higher-order logic \cite{church40}.  In type-theoretic
+  parlance, there are three levels of @{text "\<lambda>"}-calculus with
+  corresponding arrows @{text "\<Rightarrow>"}/@{text "\<And>"}/@{text "\<Longrightarrow>"}:
+
+  \medskip
+  \begin{tabular}{ll}
+  @{text "\<alpha> \<Rightarrow> \<beta>"} & syntactic function space (terms depending on terms) \\
+  @{text "\<And>x. B(x)"} & universal quantification (proofs depending on terms) \\
+  @{text "A \<Longrightarrow> B"} & implication (proofs depending on proofs) \\
+  \end{tabular}
+  \medskip
+
+  \noindent Here only the types of syntactic terms, and the
+  propositions of proof terms have been shown.  The @{text
+  "\<lambda>"}-structure of proofs can be recorded as an optional feature of
+  the Pure inference kernel \cite{Berghofer-Nipkow:2000:TPHOL}, but
+  the formal system can never depend on them due to \emph{proof
+  irrelevance}.
+
+  On top of this most primitive layer of proofs, Pure implements a
+  generic calculus for nested natural deduction rules, similar to
+  \cite{Schroeder-Heister:1984}.  Here object-logic inferences are
+  internalized as formulae over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
+  Combining such rule statements may involve higher-order unification
+  \cite{paulson-natural}.
+*}
+
+
+subsection {* Primitive inferences *}
+
+text {*
+  Term syntax provides explicit notation for abstraction @{text "\<lambda>x ::
+  \<alpha>. b(x)"} and application @{text "b a"}, while types are usually
+  implicit thanks to type-inference; terms of type @{text "prop"} are
+  called propositions.  Logical statements are composed via @{text "\<And>x
+  :: \<alpha>. B(x)"} and @{text "A \<Longrightarrow> B"}.  Primitive reasoning operates on
+  judgments of the form @{text "\<Gamma> \<turnstile> \<phi>"}, with standard introduction
+  and elimination rules for @{text "\<And>"} and @{text "\<Longrightarrow>"} that refer to
+  fixed parameters @{text "x\<^sub>1, \<dots>, x\<^sub>m"} and hypotheses
+  @{text "A\<^sub>1, \<dots>, A\<^sub>n"} from the context @{text "\<Gamma>"};
+  the corresponding proof terms are left implicit.  The subsequent
+  inference rules define @{text "\<Gamma> \<turnstile> \<phi>"} inductively, relative to a
+  collection of axioms:
+
+  \[
+  \infer{@{text "\<turnstile> A"}}{(@{text "A"} \text{~axiom})}
+  \qquad
+  \infer{@{text "A \<turnstile> A"}}{}
+  \]
+
+  \[
+  \infer{@{text "\<Gamma> \<turnstile> \<And>x. B(x)"}}{@{text "\<Gamma> \<turnstile> B(x)"} & @{text "x \<notin> \<Gamma>"}}
+  \qquad
+  \infer{@{text "\<Gamma> \<turnstile> B(a)"}}{@{text "\<Gamma> \<turnstile> \<And>x. B(x)"}}
+  \]
+
+  \[
+  \infer{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
+  \qquad
+  \infer{@{text "\<Gamma>\<^sub>1 \<union> \<Gamma>\<^sub>2 \<turnstile> B"}}{@{text "\<Gamma>\<^sub>1 \<turnstile> A \<Longrightarrow> B"} & @{text "\<Gamma>\<^sub>2 \<turnstile> A"}}
+  \]
+
+  Furthermore, Pure provides a built-in equality @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow>
+  prop"} with axioms for reflexivity, substitution, extensionality,
+  and @{text "\<alpha>\<beta>\<eta>"}-conversion on @{text "\<lambda>"}-terms.
+
+  \medskip An object-logic introduces another layer on top of Pure,
+  e.g.\ with types @{text "i"} for individuals and @{text "o"} for
+  propositions, term constants @{text "Trueprop :: o \<Rightarrow> prop"} as
+  (implicit) derivability judgment and connectives like @{text "\<and> :: o
+  \<Rightarrow> o \<Rightarrow> o"} or @{text "\<forall> :: (i \<Rightarrow> o) \<Rightarrow> o"}, and axioms for object-level
+  rules such as @{text "conjI: A \<Longrightarrow> B \<Longrightarrow> A \<and> B"} or @{text "allI: (\<And>x. B
+  x) \<Longrightarrow> \<forall>x. B x"}.  Derived object rules are represented as theorems of
+  Pure.  After the initial object-logic setup, further axiomatizations
+  are usually avoided; plain definitions and derived principles are
+  used exclusively.
+*}
+
+
+subsection {* Reasoning with rules \label{sec:framework-resolution} *}
+
+text {*
+  Primitive inferences mostly serve foundational purposes.  The main
+  reasoning mechanisms of Pure operate on nested natural deduction
+  rules expressed as formulae, using @{text "\<And>"} to bind local
+  parameters and @{text "\<Longrightarrow>"} to express entailment.  Multiple
+  parameters and premises are represented by repeating these
+  connectives in a right-associative manner.
+
+  Since @{text "\<And>"} and @{text "\<Longrightarrow>"} commute thanks to the theorem
+  @{prop "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"}, we may assume w.l.o.g.\
+  that rule statements always observe the normal form where
+  quantifiers are pulled in front of implications at each level of
+  nesting.  This means that any Pure proposition may be presented as a
+  \emph{Hereditary Harrop Formula} \cite{Miller:1991} which is of the
+  form @{text "\<And>x\<^sub>1 \<dots> x\<^sub>m. H\<^sub>1 \<Longrightarrow> \<dots> H\<^sub>n \<Longrightarrow>
+  A"} for @{text "m, n \<ge> 0"}, and @{text "A"} atomic, and @{text
+  "H\<^sub>1, \<dots>, H\<^sub>n"} being recursively of the same format.
+  Following the convention that outermost quantifiers are implicit,
+  Horn clauses @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> A"} are a special
+  case of this.
+
+  For example, @{text "\<inter>"}-introduction rule encountered before is
+  represented as a Pure theorem as follows:
+  \[
+  @{text "IntI:"}~@{prop "x \<in> A \<Longrightarrow> x \<in> B \<Longrightarrow> x \<in> A \<inter> B"}
+  \]
+
+  \noindent This is a plain Horn clause, since no further nesting on
+  the left is involved.  The general @{text "\<Inter>"}-introduction
+  corresponds to a Hereditary Harrop Formula with one additional level
+  of nesting:
+  \[
+  @{text "InterI:"}~@{prop "(\<And>A. A \<in> \<A> \<Longrightarrow> x \<in> A) \<Longrightarrow> x \<in> \<Inter>\<A>"}
+  \]
+
+  \medskip Goals are also represented as rules: @{text "A\<^sub>1 \<Longrightarrow>
+  \<dots> A\<^sub>n \<Longrightarrow> C"} states that the sub-goals @{text "A\<^sub>1, \<dots>,
+  A\<^sub>n"} entail the result @{text "C"}; for @{text "n = 0"} the
+  goal is finished.  To allow @{text "C"} being a rule statement
+  itself, we introduce the protective marker @{text "# :: prop \<Rightarrow>
+  prop"}, which is defined as identity and hidden from the user.  We
+  initialize and finish goal states as follows:
+
+  \[
+  \begin{array}{c@ {\qquad}c}
+  \infer[(@{inference_def init})]{@{text "C \<Longrightarrow> #C"}}{} &
+  \infer[(@{inference_def finish})]{@{text C}}{@{text "#C"}}
+  \end{array}
+  \]
+
+  \noindent Goal states are refined in intermediate proof steps until
+  a finished form is achieved.  Here the two main reasoning principles
+  are @{inference resolution}, for back-chaining a rule against a
+  sub-goal (replacing it by zero or more sub-goals), and @{inference
+  assumption}, for solving a sub-goal (finding a short-circuit with
+  local assumptions).  Below @{text "\<^vec>x"} stands for @{text
+  "x\<^sub>1, \<dots>, x\<^sub>n"} (@{text "n \<ge> 0"}).
+
+  \[
+  \infer[(@{inference_def resolution})]
+  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>A (\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
+  {\begin{tabular}{rl}
+    @{text "rule:"} &
+    @{text "\<^vec>A \<^vec>a \<Longrightarrow> B \<^vec>a"} \\
+    @{text "goal:"} &
+    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
+    @{text "goal unifier:"} &
+    @{text "(\<lambda>\<^vec>x. B (\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
+   \end{tabular}}
+  \]
+
+  \medskip
+
+  \[
+  \infer[(@{inference_def assumption})]{@{text "C\<vartheta>"}}
+  {\begin{tabular}{rl}
+    @{text "goal:"} &
+    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> A \<^vec>x) \<Longrightarrow> C"} \\
+    @{text "assm unifier:"} & @{text "A\<vartheta> = H\<^sub>i\<vartheta>"}~~\text{(for some~@{text "H\<^sub>i"})} \\
+   \end{tabular}}
+  \]
+
+  The following trace illustrates goal-oriented reasoning in
+  Isabelle/Pure:
+
+  {\footnotesize
+  \medskip
+  \begin{tabular}{r@ {\quad}l}
+  @{text "(A \<and> B \<Longrightarrow> B \<and> A) \<Longrightarrow> #(A \<and> B \<Longrightarrow> B \<and> A)"} & @{text "(init)"} \\
+  @{text "(A \<and> B \<Longrightarrow> B) \<Longrightarrow> (A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution B \<Longrightarrow> A \<Longrightarrow> B \<and> A)"} \\
+  @{text "(A \<and> B \<Longrightarrow> A \<and> B) \<Longrightarrow> (A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution A \<and> B \<Longrightarrow> B)"} \\
+  @{text "(A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(assumption)"} \\
+  @{text "(A \<and> B \<Longrightarrow> A \<and> B) \<Longrightarrow> #\<dots>"} & @{text "(resolution A \<and> B \<Longrightarrow> A)"} \\
+  @{text "#\<dots>"} & @{text "(assumption)"} \\
+  @{text "A \<and> B \<Longrightarrow> B \<and> A"} & @{text "(finish)"} \\
+  \end{tabular}
+  \medskip
+  }
+
+  Compositions of @{inference assumption} after @{inference
+  resolution} occurs quite often, typically in elimination steps.
+  Traditional Isabelle tactics accommodate this by a combined
+  @{inference_def elim_resolution} principle.  In contrast, Isar uses
+  a slightly more refined combination, where the assumptions to be
+  closed are marked explicitly, using again the protective marker
+  @{text "#"}:
+
+  \[
+  \infer[(@{inference refinement})]
+  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>G' (\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
+  {\begin{tabular}{rl}
+    @{text "sub\<hyphen>proof:"} &
+    @{text "\<^vec>G \<^vec>a \<Longrightarrow> B \<^vec>a"} \\
+    @{text "goal:"} &
+    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
+    @{text "goal unifier:"} &
+    @{text "(\<lambda>\<^vec>x. B (\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
+    @{text "assm unifiers:"} &
+    @{text "(\<lambda>\<^vec>x. G\<^sub>j (\<^vec>a \<^vec>x))\<vartheta> = #H\<^sub>i\<vartheta>"} \\
+    & \quad (for each marked @{text "G\<^sub>j"} some @{text "#H\<^sub>i"}) \\
+   \end{tabular}}
+  \]
+
+  \noindent Here the @{text "sub\<hyphen>proof"} rule stems from the
+  main @{command fix}-@{command assume}-@{command show} outline of
+  Isar (cf.\ \secref{sec:framework-subproof}): each assumption
+  indicated in the text results in a marked premise @{text "G"} above.
+  The marking enforces resolution against one of the sub-goal's
+  premises.  Consequently, @{command fix}-@{command assume}-@{command
+  show} enables to fit the result of a sub-proof quite robustly into a
+  pending sub-goal, while maintaining a good measure of flexibility.
+*}
+
+
+section {* The Isar proof language \label{sec:framework-isar} *}
+
+text {*
+  Structured proofs are presented as high-level expressions for
+  composing entities of Pure (propositions, facts, and goals).  The
+  Isar proof language allows to organize reasoning within the
+  underlying rule calculus of Pure, but Isar is not another logical
+  calculus!
+
+  Isar is an exercise in sound minimalism.  Approximately half of the
+  language is introduced as primitive, the rest defined as derived
+  concepts.  The following grammar describes the core language
+  (category @{text "proof"}), which is embedded into theory
+  specification elements such as @{command theorem}; see also
+  \secref{sec:framework-stmt} for the separate category @{text
+  "statement"}.
+
+  \medskip
+  \begin{tabular}{rcl}
+    @{text "theory\<hyphen>stmt"} & = & @{command "theorem"}~@{text "statement proof  |"}~~@{command "definition"}~@{text "\<dots>  |  \<dots>"} \\[1ex]
+
+    @{text "proof"} & = & @{text "prfx\<^sup>*"}~@{command "proof"}~@{text "method\<^sup>? stmt\<^sup>*"}~@{command "qed"}~@{text "method\<^sup>?"} \\[1ex]
+
+    @{text prfx} & = & @{command "using"}~@{text "facts"} \\
+    & @{text "|"} & @{command "unfolding"}~@{text "facts"} \\
+
+    @{text stmt} & = & @{command "{"}~@{text "stmt\<^sup>*"}~@{command "}"} \\
+    & @{text "|"} & @{command "next"} \\
+    & @{text "|"} & @{command "note"}~@{text "name = facts"} \\
+    & @{text "|"} & @{command "let"}~@{text "term = term"} \\
+    & @{text "|"} & @{command "fix"}~@{text "var\<^sup>+"} \\
+    & @{text "|"} & @{command assume}~@{text "\<guillemotleft>inference\<guillemotright> name: props"} \\
+    & @{text "|"} & @{command "then"}@{text "\<^sup>?"}~@{text goal} \\
+    @{text goal} & = & @{command "have"}~@{text "name: props proof"} \\
+    & @{text "|"} & @{command "show"}~@{text "name: props proof"} \\
+  \end{tabular}
+
+  \medskip Simultaneous propositions or facts may be separated by the
+  @{keyword "and"} keyword.
+
+  \medskip The syntax for terms and propositions is inherited from
+  Pure (and the object-logic).  A @{text "pattern"} is a @{text
+  "term"} with schematic variables, to be bound by higher-order
+  matching.
+
+  \medskip Facts may be referenced by name or proposition.  For
+  example, the result of ``@{command have}~@{text "a: A \<langle>proof\<rangle>"}''
+  becomes available both as @{text "a"} and
+  \isacharbackquoteopen@{text "A"}\isacharbackquoteclose.  Moreover,
+  fact expressions may involve attributes that modify either the
+  theorem or the background context.  For example, the expression
+  ``@{text "a [OF b]"}'' refers to the composition of two facts
+  according to the @{inference resolution} inference of
+  \secref{sec:framework-resolution}, while ``@{text "a [intro]"}''
+  declares a fact as introduction rule in the context.
+
+  The special fact called ``@{fact this}'' always refers to the last
+  result, as produced by @{command note}, @{command assume}, @{command
+  have}, or @{command show}.  Since @{command note} occurs
+  frequently together with @{command then} we provide some
+  abbreviations:
+
+  \medskip
+  \begin{tabular}{rcl}
+    @{command from}~@{text a} & @{text "\<equiv>"} & @{command note}~@{text a}~@{command then} \\
+    @{command with}~@{text a} & @{text "\<equiv>"} & @{command from}~@{text "a \<AND> this"} \\
+  \end{tabular}
+  \medskip
+
+  The @{text "method"} category is essentially a parameter and may be
+  populated later.  Methods use the facts indicated by @{command
+  "then"} or @{command using}, and then operate on the goal state.
+  Some basic methods are predefined: ``@{method "-"}'' leaves the goal
+  unchanged, ``@{method this}'' applies the facts as rules to the
+  goal, ``@{method (Pure) "rule"}'' applies the facts to another rule and the
+  result to the goal (both ``@{method this}'' and ``@{method (Pure) rule}''
+  refer to @{inference resolution} of
+  \secref{sec:framework-resolution}).  The secondary arguments to
+  ``@{method (Pure) rule}'' may be specified explicitly as in ``@{text "(rule
+  a)"}'', or picked from the context.  In the latter case, the system
+  first tries rules declared as @{attribute (Pure) elim} or
+  @{attribute (Pure) dest}, followed by those declared as @{attribute
+  (Pure) intro}.
+
+  The default method for @{command proof} is ``@{method (Pure) rule}''
+  (arguments picked from the context), for @{command qed} it is
+  ``@{method "-"}''.  Further abbreviations for terminal proof steps
+  are ``@{command "by"}~@{text "method\<^sub>1 method\<^sub>2"}'' for
+  ``@{command proof}~@{text "method\<^sub>1"}~@{command qed}~@{text
+  "method\<^sub>2"}'', and ``@{command ".."}'' for ``@{command
+  "by"}~@{method (Pure) rule}, and ``@{command "."}'' for ``@{command
+  "by"}~@{method this}''.  The @{command unfolding} element operates
+  directly on the current facts and goal by applying equalities.
+
+  \medskip Block structure can be indicated explicitly by ``@{command
+  "{"}~@{text "\<dots>"}~@{command "}"}'', although the body of a sub-proof
+  already involves implicit nesting.  In any case, @{command next}
+  jumps into the next section of a block, i.e.\ it acts like closing
+  an implicit block scope and opening another one; there is no direct
+  correspondence to subgoals here.
+
+  The remaining elements @{command fix} and @{command assume} build up
+  a local context (see \secref{sec:framework-context}), while
+  @{command show} refines a pending sub-goal by the rule resulting
+  from a nested sub-proof (see \secref{sec:framework-subproof}).
+  Further derived concepts will support calculational reasoning (see
+  \secref{sec:framework-calc}).
+*}
+
+
+subsection {* Context elements \label{sec:framework-context} *}
+
+text {*
+  In judgments @{text "\<Gamma> \<turnstile> \<phi>"} of the primitive framework, @{text "\<Gamma>"}
+  essentially acts like a proof context.  Isar elaborates this idea
+  towards a higher-level notion, with additional information for
+  type-inference, term abbreviations, local facts, hypotheses etc.
+
+  The element @{command fix}~@{text "x :: \<alpha>"} declares a local
+  parameter, i.e.\ an arbitrary-but-fixed entity of a given type; in
+  results exported from the context, @{text "x"} may become anything.
+  The @{command assume}~@{text "\<guillemotleft>inference\<guillemotright>"} element provides a
+  general interface to hypotheses: ``@{command assume}~@{text
+  "\<guillemotleft>inference\<guillemotright> A"}'' produces @{text "A \<turnstile> A"} locally, while the
+  included inference tells how to discharge @{text A} from results
+  @{text "A \<turnstile> B"} later on.  There is no user-syntax for @{text
+  "\<guillemotleft>inference\<guillemotright>"}, i.e.\ it may only occur internally when derived
+  commands are defined in ML.
+
+  At the user-level, the default inference for @{command assume} is
+  @{inference discharge} as given below.  The additional variants
+  @{command presume} and @{command def} are defined as follows:
+
+  \medskip
+  \begin{tabular}{rcl}
+    @{command presume}~@{text A} & @{text "\<equiv>"} & @{command assume}~@{text "\<guillemotleft>weak\<hyphen>discharge\<guillemotright> A"} \\
+    @{command def}~@{text "x \<equiv> a"} & @{text "\<equiv>"} & @{command fix}~@{text x}~@{command assume}~@{text "\<guillemotleft>expansion\<guillemotright> x \<equiv> a"} \\
+  \end{tabular}
+  \medskip
+
+  \[
+  \infer[(@{inference_def discharge})]{@{text "\<strut>\<Gamma> - A \<turnstile> #A \<Longrightarrow> B"}}{@{text "\<strut>\<Gamma> \<turnstile> B"}}
+  \]
+  \[
+  \infer[(@{inference_def "weak\<hyphen>discharge"})]{@{text "\<strut>\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<strut>\<Gamma> \<turnstile> B"}}
+  \]
+  \[
+  \infer[(@{inference_def expansion})]{@{text "\<strut>\<Gamma> - (x \<equiv> a) \<turnstile> B a"}}{@{text "\<strut>\<Gamma> \<turnstile> B x"}}
+  \]
+
+  \medskip Note that @{inference discharge} and @{inference
+  "weak\<hyphen>discharge"} differ in the marker for @{prop A}, which is
+  relevant when the result of a @{command fix}-@{command
+  assume}-@{command show} outline is composed with a pending goal,
+  cf.\ \secref{sec:framework-subproof}.
+
+  The most interesting derived context element in Isar is @{command
+  obtain} \cite[\S5.3]{Wenzel-PhD}, which supports generalized
+  elimination steps in a purely forward manner.  The @{command obtain}
+  command takes a specification of parameters @{text "\<^vec>x"} and
+  assumptions @{text "\<^vec>A"} to be added to the context, together
+  with a proof of a case rule stating that this extension is
+  conservative (i.e.\ may be removed from closed results later on):
+
+  \medskip
+  \begin{tabular}{l}
+  @{text "\<langle>facts\<rangle>"}~~@{command obtain}~@{text "\<^vec>x \<WHERE> \<^vec>A \<^vec>x  \<langle>proof\<rangle> \<equiv>"} \\[0.5ex]
+  \quad @{command have}~@{text "case: \<And>thesis. (\<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis) \<Longrightarrow> thesis\<rangle>"} \\
+  \quad @{command proof}~@{method "-"} \\
+  \qquad @{command fix}~@{text thesis} \\
+  \qquad @{command assume}~@{text "[intro]: \<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis"} \\
+  \qquad @{command show}~@{text thesis}~@{command using}~@{text "\<langle>facts\<rangle> \<langle>proof\<rangle>"} \\
+  \quad @{command qed} \\
+  \quad @{command fix}~@{text "\<^vec>x"}~@{command assume}~@{text "\<guillemotleft>elimination case\<guillemotright> \<^vec>A \<^vec>x"} \\
+  \end{tabular}
+  \medskip
+
+  \[
+  \infer[(@{inference elimination})]{@{text "\<Gamma> \<turnstile> B"}}{
+    \begin{tabular}{rl}
+    @{text "case:"} &
+    @{text "\<Gamma> \<turnstile> \<And>thesis. (\<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis) \<Longrightarrow> thesis"} \\[0.2ex]
+    @{text "result:"} &
+    @{text "\<Gamma> \<union> \<^vec>A \<^vec>y \<turnstile> B"} \\[0.2ex]
+    \end{tabular}}
+  \]
+
+  \noindent Here the name ``@{text thesis}'' is a specific convention
+  for an arbitrary-but-fixed proposition; in the primitive natural
+  deduction rules shown before we have occasionally used @{text C}.
+  The whole statement of ``@{command obtain}~@{text x}~@{keyword
+  "where"}~@{text "A x"}'' may be read as a claim that @{text "A x"}
+  may be assumed for some arbitrary-but-fixed @{text "x"}.  Also note
+  that ``@{command obtain}~@{text "A \<AND> B"}'' without parameters
+  is similar to ``@{command have}~@{text "A \<AND> B"}'', but the
+  latter involves multiple sub-goals.
+
+  \medskip The subsequent Isar proof texts explain all context
+  elements introduced above using the formal proof language itself.
+  After finishing a local proof within a block, we indicate the
+  exported result via @{command note}.
+*}
+
+(*<*)
+theorem True
+proof
+(*>*)
+  txt_raw {* \begin{minipage}[t]{0.45\textwidth} *}
+  {
+    fix x
+    have "B x" sorry %noproof
+  }
+  note `\<And>x. B x`
+  txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
+  {
+    assume A
+    have B sorry %noproof
+  }
+  note `A \<Longrightarrow> B`
+  txt_raw {* \end{minipage}\\[3ex]\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
+  {
+    def x \<equiv> a
+    have "B x" sorry %noproof
+  }
+  note `B a`
+  txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
+  {
+    obtain x where "A x" sorry %noproof
+    have B sorry %noproof
+  }
+  note `B`
+  txt_raw {* \end{minipage} *}
+(*<*)
+qed
+(*>*)
+
+text {*
+  \bigskip\noindent This illustrates the meaning of Isar context
+  elements without goals getting in between.
+*}
+
+subsection {* Structured statements \label{sec:framework-stmt} *}
+
+text {*
+  The category @{text "statement"} of top-level theorem specifications
+  is defined as follows:
+
+  \medskip
+  \begin{tabular}{rcl}
+  @{text "statement"} & @{text "\<equiv>"} & @{text "name: props \<AND> \<dots>"} \\
+  & @{text "|"} & @{text "context\<^sup>* conclusion"} \\[0.5ex]
+
+  @{text "context"} & @{text "\<equiv>"} & @{text "\<FIXES> vars \<AND> \<dots>"} \\
+  & @{text "|"} & @{text "\<ASSUMES> name: props \<AND> \<dots>"} \\
+
+  @{text "conclusion"} & @{text "\<equiv>"} & @{text "\<SHOWS> name: props \<AND> \<dots>"} \\
+  & @{text "|"} & @{text "\<OBTAINS> vars \<AND> \<dots> \<WHERE> name: props \<AND> \<dots>"} \\
+  & & \quad @{text "\<BBAR> \<dots>"} \\
+  \end{tabular}
+
+  \medskip\noindent A simple @{text "statement"} consists of named
+  propositions.  The full form admits local context elements followed
+  by the actual conclusions, such as ``@{keyword "fixes"}~@{text
+  x}~@{keyword "assumes"}~@{text "A x"}~@{keyword "shows"}~@{text "B
+  x"}''.  The final result emerges as a Pure rule after discharging
+  the context: @{prop "\<And>x. A x \<Longrightarrow> B x"}.
+
+  The @{keyword "obtains"} variant is another abbreviation defined
+  below; unlike @{command obtain} (cf.\
+  \secref{sec:framework-context}) there may be several ``cases''
+  separated by ``@{text "\<BBAR>"}'', each consisting of several
+  parameters (@{text "vars"}) and several premises (@{text "props"}).
+  This specifies multi-branch elimination rules.
+
+  \medskip
+  \begin{tabular}{l}
+  @{text "\<OBTAINS> \<^vec>x \<WHERE> \<^vec>A \<^vec>x   \<BBAR>   \<dots>   \<equiv>"} \\[0.5ex]
+  \quad @{text "\<FIXES> thesis"} \\
+  \quad @{text "\<ASSUMES> [intro]: \<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis  \<AND>  \<dots>"} \\
+  \quad @{text "\<SHOWS> thesis"} \\
+  \end{tabular}
+  \medskip
+
+  Presenting structured statements in such an ``open'' format usually
+  simplifies the subsequent proof, because the outer structure of the
+  problem is already laid out directly.  E.g.\ consider the following
+  canonical patterns for @{text "\<SHOWS>"} and @{text "\<OBTAINS>"},
+  respectively:
+*}
+
+text_raw {*\begin{minipage}{0.5\textwidth}*}
+
+theorem
+  fixes x and y
+  assumes "A x" and "B y"
+  shows "C x y"
+proof -
+  from `A x` and `B y`
+  show "C x y" sorry %noproof
+qed
+
+text_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
+
+theorem
+  obtains x and y
+  where "A x" and "B y"
+proof -
+  have "A a" and "B b" sorry %noproof
+  then show thesis ..
+qed
+
+text_raw {*\end{minipage}*}
+
+text {*
+  \medskip\noindent Here local facts \isacharbackquoteopen@{text "A
+  x"}\isacharbackquoteclose\ and \isacharbackquoteopen@{text "B
+  y"}\isacharbackquoteclose\ are referenced immediately; there is no
+  need to decompose the logical rule structure again.  In the second
+  proof the final ``@{command then}~@{command show}~@{text
+  thesis}~@{command ".."}''  involves the local rule case @{text "\<And>x
+  y. A x \<Longrightarrow> B y \<Longrightarrow> thesis"} for the particular instance of terms @{text
+  "a"} and @{text "b"} produced in the body.
+*}
+
+
+subsection {* Structured proof refinement \label{sec:framework-subproof} *}
+
+text {*
+  By breaking up the grammar for the Isar proof language, we may
+  understand a proof text as a linear sequence of individual proof
+  commands.  These are interpreted as transitions of the Isar virtual
+  machine (Isar/VM), which operates on a block-structured
+  configuration in single steps.  This allows users to write proof
+  texts in an incremental manner, and inspect intermediate
+  configurations for debugging.
+
+  The basic idea is analogous to evaluating algebraic expressions on a
+  stack machine: @{text "(a + b) \<cdot> c"} then corresponds to a sequence
+  of single transitions for each symbol @{text "(, a, +, b, ), \<cdot>, c"}.
+  In Isar the algebraic values are facts or goals, and the operations
+  are inferences.
+
+  \medskip The Isar/VM state maintains a stack of nodes, each node
+  contains the local proof context, the linguistic mode, and a pending
+  goal (optional).  The mode determines the type of transition that
+  may be performed next, it essentially alternates between forward and
+  backward reasoning, with an intermediate stage for chained facts
+  (see \figref{fig:isar-vm}).
+
+  \begin{figure}[htb]
+  \begin{center}
+  \includegraphics[width=0.8\textwidth]{isar-vm}
+  \end{center}
+  \caption{Isar/VM modes}\label{fig:isar-vm}
+  \end{figure}
+
+  For example, in @{text "state"} mode Isar acts like a mathematical
+  scratch-pad, accepting declarations like @{command fix}, @{command
+  assume}, and claims like @{command have}, @{command show}.  A goal
+  statement changes the mode to @{text "prove"}, which means that we
+  may now refine the problem via @{command unfolding} or @{command
+  proof}.  Then we are again in @{text "state"} mode of a proof body,
+  which may issue @{command show} statements to solve pending
+  sub-goals.  A concluding @{command qed} will return to the original
+  @{text "state"} mode one level upwards.  The subsequent Isar/VM
+  trace indicates block structure, linguistic mode, goal state, and
+  inferences:
+*}
+
+text_raw {* \begingroup\footnotesize *}
+(*<*)notepad begin
+(*>*)
+  txt_raw {* \begin{minipage}[t]{0.18\textwidth} *}
+  have "A \<longrightarrow> B"
+  proof
+    assume A
+    show B
+      sorry %noproof
+  qed
+  txt_raw {* \end{minipage}\quad
+\begin{minipage}[t]{0.06\textwidth}
+@{text "begin"} \\
+\\
+\\
+@{text "begin"} \\
+@{text "end"} \\
+@{text "end"} \\
+\end{minipage}
+\begin{minipage}[t]{0.08\textwidth}
+@{text "prove"} \\
+@{text "state"} \\
+@{text "state"} \\
+@{text "prove"} \\
+@{text "state"} \\
+@{text "state"} \\
+\end{minipage}\begin{minipage}[t]{0.35\textwidth}
+@{text "(A \<longrightarrow> B) \<Longrightarrow> #(A \<longrightarrow> B)"} \\
+@{text "(A \<Longrightarrow> B) \<Longrightarrow> #(A \<longrightarrow> B)"} \\
+\\
+\\
+@{text "#(A \<longrightarrow> B)"} \\
+@{text "A \<longrightarrow> B"} \\
+\end{minipage}\begin{minipage}[t]{0.4\textwidth}
+@{text "(init)"} \\
+@{text "(resolution impI)"} \\
+\\
+\\
+@{text "(refinement #A \<Longrightarrow> B)"} \\
+@{text "(finish)"} \\
+\end{minipage} *}
+(*<*)
+end
+(*>*)
+text_raw {* \endgroup *}
+
+text {*
+  \noindent Here the @{inference refinement} inference from
+  \secref{sec:framework-resolution} mediates composition of Isar
+  sub-proofs nicely.  Observe that this principle incorporates some
+  degree of freedom in proof composition.  In particular, the proof
+  body allows parameters and assumptions to be re-ordered, or commuted
+  according to Hereditary Harrop Form.  Moreover, context elements
+  that are not used in a sub-proof may be omitted altogether.  For
+  example:
+*}
+
+text_raw {*\begin{minipage}{0.5\textwidth}*}
+
+(*<*)
+notepad
+begin
+(*>*)
+  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
+  proof -
+    fix x and y
+    assume "A x" and "B y"
+    show "C x y" sorry %noproof
+  qed
+
+txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
+
+(*<*)
+next
+(*>*)
+  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
+  proof -
+    fix x assume "A x"
+    fix y assume "B y"
+    show "C x y" sorry %noproof
+  qed
+
+txt_raw {*\end{minipage}\\[3ex]\begin{minipage}{0.5\textwidth}*}
+
+(*<*)
+next
+(*>*)
+  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
+  proof -
+    fix y assume "B y"
+    fix x assume "A x"
+    show "C x y" sorry
+  qed
+
+txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
+(*<*)
+next
+(*>*)
+  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
+  proof -
+    fix y assume "B y"
+    fix x
+    show "C x y" sorry
+  qed
+(*<*)
+end
+(*>*)
+
+text_raw {*\end{minipage}*}
+
+text {*
+  \medskip\noindent Such ``peephole optimizations'' of Isar texts are
+  practically important to improve readability, by rearranging
+  contexts elements according to the natural flow of reasoning in the
+  body, while still observing the overall scoping rules.
+
+  \medskip This illustrates the basic idea of structured proof
+  processing in Isar.  The main mechanisms are based on natural
+  deduction rule composition within the Pure framework.  In
+  particular, there are no direct operations on goal states within the
+  proof body.  Moreover, there is no hidden automated reasoning
+  involved, just plain unification.
+*}
+
+
+subsection {* Calculational reasoning \label{sec:framework-calc} *}
+
+text {*
+  The existing Isar infrastructure is sufficiently flexible to support
+  calculational reasoning (chains of transitivity steps) as derived
+  concept.  The generic proof elements introduced below depend on
+  rules declared as @{attribute trans} in the context.  It is left to
+  the object-logic to provide a suitable rule collection for mixed
+  relations of @{text "="}, @{text "<"}, @{text "\<le>"}, @{text "\<subset>"},
+  @{text "\<subseteq>"} etc.  Due to the flexibility of rule composition
+  (\secref{sec:framework-resolution}), substitution of equals by
+  equals is covered as well, even substitution of inequalities
+  involving monotonicity conditions; see also \cite[\S6]{Wenzel-PhD}
+  and \cite{Bauer-Wenzel:2001}.
+
+  The generic calculational mechanism is based on the observation that
+  rules such as @{text "trans:"}~@{prop "x = y \<Longrightarrow> y = z \<Longrightarrow> x = z"}
+  proceed from the premises towards the conclusion in a deterministic
+  fashion.  Thus we may reason in forward mode, feeding intermediate
+  results into rules selected from the context.  The course of
+  reasoning is organized by maintaining a secondary fact called
+  ``@{fact calculation}'', apart from the primary ``@{fact this}''
+  already provided by the Isar primitives.  In the definitions below,
+  @{attribute OF} refers to @{inference resolution}
+  (\secref{sec:framework-resolution}) with multiple rule arguments,
+  and @{text "trans"} represents to a suitable rule from the context:
+
+  \begin{matharray}{rcl}
+    @{command "also"}@{text "\<^sub>0"} & \equiv & @{command "note"}~@{text "calculation = this"} \\
+    @{command "also"}@{text "\<^sub>n\<^sub>+\<^sub>1"} & \equiv & @{command "note"}~@{text "calculation = trans [OF calculation this]"} \\[0.5ex]
+    @{command "finally"} & \equiv & @{command "also"}~@{command "from"}~@{text calculation} \\
+  \end{matharray}
+
+  \noindent The start of a calculation is determined implicitly in the
+  text: here @{command also} sets @{fact calculation} to the current
+  result; any subsequent occurrence will update @{fact calculation} by
+  combination with the next result and a transitivity rule.  The
+  calculational sequence is concluded via @{command finally}, where
+  the final result is exposed for use in a concluding claim.
+
+  Here is a canonical proof pattern, using @{command have} to
+  establish the intermediate results:
+*}
+
+(*<*)
+notepad
+begin
+(*>*)
+  have "a = b" sorry
+  also have "\<dots> = c" sorry
+  also have "\<dots> = d" sorry
+  finally have "a = d" .
+(*<*)
+end
+(*>*)
+
+text {*
+  \noindent The term ``@{text "\<dots>"}'' above is a special abbreviation
+  provided by the Isabelle/Isar syntax layer: it statically refers to
+  the right-hand side argument of the previous statement given in the
+  text.  Thus it happens to coincide with relevant sub-expressions in
+  the calculational chain, but the exact correspondence is dependent
+  on the transitivity rules being involved.
+
+  \medskip Symmetry rules such as @{prop "x = y \<Longrightarrow> y = x"} are like
+  transitivities with only one premise.  Isar maintains a separate
+  rule collection declared via the @{attribute sym} attribute, to be
+  used in fact expressions ``@{text "a [symmetric]"}'', or single-step
+  proofs ``@{command assume}~@{text "x = y"}~@{command then}~@{command
+  have}~@{text "y = x"}~@{command ".."}''.
+*}
+
+end
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/Generic.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,2016 @@
+theory Generic
+imports Base Main
+begin
+
+chapter {* Generic tools and packages \label{ch:gen-tools} *}
+
+section {* Configuration options \label{sec:config} *}
+
+text {* Isabelle/Pure maintains a record of named configuration
+  options within the theory or proof context, with values of type
+  @{ML_type bool}, @{ML_type int}, @{ML_type real}, or @{ML_type
+  string}.  Tools may declare options in ML, and then refer to these
+  values (relative to the context).  Thus global reference variables
+  are easily avoided.  The user may change the value of a
+  configuration option by means of an associated attribute of the same
+  name.  This form of context declaration works particularly well with
+  commands such as @{command "declare"} or @{command "using"} like
+  this:
+*}
+
+declare [[show_main_goal = false]]
+
+notepad
+begin
+  note [[show_main_goal = true]]
+end
+
+text {* For historical reasons, some tools cannot take the full proof
+  context into account and merely refer to the background theory.
+  This is accommodated by configuration options being declared as
+  ``global'', which may not be changed within a local context.
+
+  \begin{matharray}{rcll}
+    @{command_def "print_options"} & : & @{text "context \<rightarrow>"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @{syntax name} ('=' ('true' | 'false' | @{syntax int} | @{syntax float} | @{syntax name}))?
+  \<close>}
+
+  \begin{description}
+  
+  \item @{command "print_options"} prints the available configuration
+  options, with names, types, and current values.
+  
+  \item @{text "name = value"} as an attribute expression modifies the
+  named option, with the syntax of the value depending on the option's
+  type.  For @{ML_type bool} the default value is @{text true}.  Any
+  attempt to change a global option in a local context is ignored.
+
+  \end{description}
+*}
+
+
+section {* Basic proof tools *}
+
+subsection {* Miscellaneous methods and attributes \label{sec:misc-meth-att} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def unfold} & : & @{text method} \\
+    @{method_def fold} & : & @{text method} \\
+    @{method_def insert} & : & @{text method} \\[0.5ex]
+    @{method_def erule}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def drule}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def frule}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def intro} & : & @{text method} \\
+    @{method_def elim} & : & @{text method} \\
+    @{method_def succeed} & : & @{text method} \\
+    @{method_def fail} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{method fold} | @@{method unfold} | @@{method insert}) @{syntax thmrefs}
+    ;
+    (@@{method erule} | @@{method drule} | @@{method frule})
+      ('(' @{syntax nat} ')')? @{syntax thmrefs}
+    ;
+    (@@{method intro} | @@{method elim}) @{syntax thmrefs}?
+  \<close>}
+
+  \begin{description}
+  
+  \item @{method unfold}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{method fold}~@{text
+  "a\<^sub>1 \<dots> a\<^sub>n"} expand (or fold back) the given definitions throughout
+  all goals; any chained facts provided are inserted into the goal and
+  subject to rewriting as well.
+
+  \item @{method insert}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} inserts theorems as facts
+  into all goals of the proof state.  Note that current facts
+  indicated for forward chaining are ignored.
+
+  \item @{method erule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, @{method
+  drule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, and @{method frule}~@{text
+  "a\<^sub>1 \<dots> a\<^sub>n"} are similar to the basic @{method rule}
+  method (see \secref{sec:pure-meth-att}), but apply rules by
+  elim-resolution, destruct-resolution, and forward-resolution,
+  respectively \cite{isabelle-implementation}.  The optional natural
+  number argument (default 0) specifies additional assumption steps to
+  be performed here.
+
+  Note that these methods are improper ones, mainly serving for
+  experimentation and tactic script emulation.  Different modes of
+  basic rule application are usually expressed in Isar at the proof
+  language level, rather than via implicit proof state manipulations.
+  For example, a proper single-step elimination would be done using
+  the plain @{method rule} method, with forward chaining of current
+  facts.
+
+  \item @{method intro} and @{method elim} repeatedly refine some goal
+  by intro- or elim-resolution, after having inserted any chained
+  facts.  Exactly the rules given as arguments are taken into account;
+  this allows fine-tuned decomposition of a proof problem, in contrast
+  to common automated tools.
+
+  \item @{method succeed} yields a single (unchanged) result; it is
+  the identity of the ``@{text ","}'' method combinator (cf.\
+  \secref{sec:proof-meth}).
+
+  \item @{method fail} yields an empty result sequence; it is the
+  identity of the ``@{text "|"}'' method combinator (cf.\
+  \secref{sec:proof-meth}).
+
+  \end{description}
+
+  \begin{matharray}{rcl}
+    @{attribute_def tagged} & : & @{text attribute} \\
+    @{attribute_def untagged} & : & @{text attribute} \\[0.5ex]
+    @{attribute_def THEN} & : & @{text attribute} \\
+    @{attribute_def unfolded} & : & @{text attribute} \\
+    @{attribute_def folded} & : & @{text attribute} \\
+    @{attribute_def abs_def} & : & @{text attribute} \\[0.5ex]
+    @{attribute_def rotated} & : & @{text attribute} \\
+    @{attribute_def (Pure) elim_format} & : & @{text attribute} \\
+    @{attribute_def no_vars}@{text "\<^sup>*"} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{attribute tagged} @{syntax name} @{syntax name}
+    ;
+    @@{attribute untagged} @{syntax name}
+    ;
+    @@{attribute THEN} ('[' @{syntax nat} ']')? @{syntax thmref}
+    ;
+    (@@{attribute unfolded} | @@{attribute folded}) @{syntax thmrefs}
+    ;
+    @@{attribute rotated} @{syntax int}?
+  \<close>}
+
+  \begin{description}
+
+  \item @{attribute tagged}~@{text "name value"} and @{attribute
+  untagged}~@{text name} add and remove \emph{tags} of some theorem.
+  Tags may be any list of string pairs that serve as formal comment.
+  The first string is considered the tag name, the second its value.
+  Note that @{attribute untagged} removes any tags of the same name.
+
+  \item @{attribute THEN}~@{text a} composes rules by resolution; it
+  resolves with the first premise of @{text a} (an alternative
+  position may be also specified).  See also @{ML_op "RS"} in
+  \cite{isabelle-implementation}.
+  
+  \item @{attribute unfolded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{attribute
+  folded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} expand and fold back again the given
+  definitions throughout a rule.
+
+  \item @{attribute abs_def} turns an equation of the form @{prop "f x
+  y \<equiv> t"} into @{prop "f \<equiv> \<lambda>x y. t"}, which ensures that @{method
+  simp} or @{method unfold} steps always expand it.  This also works
+  for object-logic equality.
+
+  \item @{attribute rotated}~@{text n} rotate the premises of a
+  theorem by @{text n} (default 1).
+
+  \item @{attribute (Pure) elim_format} turns a destruction rule into
+  elimination rule format, by resolving with the rule @{prop "PROP A \<Longrightarrow>
+  (PROP A \<Longrightarrow> PROP B) \<Longrightarrow> PROP B"}.
+  
+  Note that the Classical Reasoner (\secref{sec:classical}) provides
+  its own version of this operation.
+
+  \item @{attribute no_vars} replaces schematic variables by free
+  ones; this is mainly for tuning output of pretty printed theorems.
+
+  \end{description}
+*}
+
+
+subsection {* Low-level equational reasoning *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def subst} & : & @{text method} \\
+    @{method_def hypsubst} & : & @{text method} \\
+    @{method_def split} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{method subst} ('(' 'asm' ')')? \<newline> ('(' (@{syntax nat}+) ')')? @{syntax thmref}
+    ;
+    @@{method split} @{syntax thmrefs}
+  \<close>}
+
+  These methods provide low-level facilities for equational reasoning
+  that are intended for specialized applications only.  Normally,
+  single step calculations would be performed in a structured text
+  (see also \secref{sec:calculation}), while the Simplifier methods
+  provide the canonical way for automated normalization (see
+  \secref{sec:simplifier}).
+
+  \begin{description}
+
+  \item @{method subst}~@{text eq} performs a single substitution step
+  using rule @{text eq}, which may be either a meta or object
+  equality.
+
+  \item @{method subst}~@{text "(asm) eq"} substitutes in an
+  assumption.
+
+  \item @{method subst}~@{text "(i \<dots> j) eq"} performs several
+  substitutions in the conclusion. The numbers @{text i} to @{text j}
+  indicate the positions to substitute at.  Positions are ordered from
+  the top of the term tree moving down from left to right. For
+  example, in @{text "(a + b) + (c + d)"} there are three positions
+  where commutativity of @{text "+"} is applicable: 1 refers to @{text
+  "a + b"}, 2 to the whole term, and 3 to @{text "c + d"}.
+
+  If the positions in the list @{text "(i \<dots> j)"} are non-overlapping
+  (e.g.\ @{text "(2 3)"} in @{text "(a + b) + (c + d)"}) you may
+  assume all substitutions are performed simultaneously.  Otherwise
+  the behaviour of @{text subst} is not specified.
+
+  \item @{method subst}~@{text "(asm) (i \<dots> j) eq"} performs the
+  substitutions in the assumptions. The positions refer to the
+  assumptions in order from left to right.  For example, given in a
+  goal of the form @{text "P (a + b) \<Longrightarrow> P (c + d) \<Longrightarrow> \<dots>"}, position 1 of
+  commutativity of @{text "+"} is the subterm @{text "a + b"} and
+  position 2 is the subterm @{text "c + d"}.
+
+  \item @{method hypsubst} performs substitution using some
+  assumption; this only works for equations of the form @{text "x =
+  t"} where @{text x} is a free or bound variable.
+
+  \item @{method split}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} performs single-step case
+  splitting using the given rules.  Splitting is performed in the
+  conclusion or some assumption of the subgoal, depending of the
+  structure of the rule.
+  
+  Note that the @{method simp} method already involves repeated
+  application of split rules as declared in the current context, using
+  @{attribute split}, for example.
+
+  \end{description}
+*}
+
+
+subsection {* Further tactic emulations \label{sec:tactics} *}
+
+text {*
+  The following improper proof methods emulate traditional tactics.
+  These admit direct access to the goal state, which is normally
+  considered harmful!  In particular, this may involve both numbered
+  goal addressing (default 1), and dynamic instantiation within the
+  scope of some subgoal.
+
+  \begin{warn}
+    Dynamic instantiations refer to universally quantified parameters
+    of a subgoal (the dynamic context) rather than fixed variables and
+    term abbreviations of a (static) Isar context.
+  \end{warn}
+
+  Tactic emulation methods, unlike their ML counterparts, admit
+  simultaneous instantiation from both dynamic and static contexts.
+  If names occur in both contexts goal parameters hide locally fixed
+  variables.  Likewise, schematic variables refer to term
+  abbreviations, if present in the static context.  Otherwise the
+  schematic variable is interpreted as a schematic variable and left
+  to be solved by unification with certain parts of the subgoal.
+
+  Note that the tactic emulation proof methods in Isabelle/Isar are
+  consistently named @{text foo_tac}.  Note also that variable names
+  occurring on left hand sides of instantiations must be preceded by a
+  question mark if they coincide with a keyword or contain dots.  This
+  is consistent with the attribute @{attribute "where"} (see
+  \secref{sec:pure-meth-att}).
+
+  \begin{matharray}{rcl}
+    @{method_def rule_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def erule_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def drule_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def frule_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def cut_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def thin_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def subgoal_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def rename_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def rotate_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def tactic}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def raw_tactic}@{text "\<^sup>*"} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{method rule_tac} | @@{method erule_tac} | @@{method drule_tac} |
+      @@{method frule_tac} | @@{method cut_tac} | @@{method thin_tac}) @{syntax goal_spec}? \<newline>
+    ( dynamic_insts @'in' @{syntax thmref} | @{syntax thmrefs} )
+    ;
+    @@{method subgoal_tac} @{syntax goal_spec}? (@{syntax prop} +)
+    ;
+    @@{method rename_tac} @{syntax goal_spec}? (@{syntax name} +)
+    ;
+    @@{method rotate_tac} @{syntax goal_spec}? @{syntax int}?
+    ;
+    (@@{method tactic} | @@{method raw_tactic}) @{syntax text}
+    ;
+
+    dynamic_insts: ((@{syntax name} '=' @{syntax term}) + @'and')
+  \<close>}
+
+\begin{description}
+
+  \item @{method rule_tac} etc. do resolution of rules with explicit
+  instantiation.  This works the same way as the ML tactics @{ML
+  res_inst_tac} etc. (see \cite{isabelle-implementation})
+
+  Multiple rules may be only given if there is no instantiation; then
+  @{method rule_tac} is the same as @{ML resolve_tac} in ML (see
+  \cite{isabelle-implementation}).
+
+  \item @{method cut_tac} inserts facts into the proof state as
+  assumption of a subgoal; instantiations may be given as well.  Note
+  that the scope of schematic variables is spread over the main goal
+  statement and rule premises are turned into new subgoals.  This is
+  in contrast to the regular method @{method insert} which inserts
+  closed rule statements.
+
+  \item @{method thin_tac}~@{text \<phi>} deletes the specified premise
+  from a subgoal.  Note that @{text \<phi>} may contain schematic
+  variables, to abbreviate the intended proposition; the first
+  matching subgoal premise will be deleted.  Removing useless premises
+  from a subgoal increases its readability and can make search tactics
+  run faster.
+
+  \item @{method subgoal_tac}~@{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} adds the propositions
+  @{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} as local premises to a subgoal, and poses the same
+  as new subgoals (in the original context).
+
+  \item @{method rename_tac}~@{text "x\<^sub>1 \<dots> x\<^sub>n"} renames parameters of a
+  goal according to the list @{text "x\<^sub>1, \<dots>, x\<^sub>n"}, which refers to the
+  \emph{suffix} of variables.
+
+  \item @{method rotate_tac}~@{text n} rotates the premises of a
+  subgoal by @{text n} positions: from right to left if @{text n} is
+  positive, and from left to right if @{text n} is negative; the
+  default value is 1.
+
+  \item @{method tactic}~@{text "text"} produces a proof method from
+  any ML text of type @{ML_type tactic}.  Apart from the usual ML
+  environment and the current proof context, the ML code may refer to
+  the locally bound values @{ML_text facts}, which indicates any
+  current facts used for forward-chaining.
+
+  \item @{method raw_tactic} is similar to @{method tactic}, but
+  presents the goal state in its raw internal form, where simultaneous
+  subgoals appear as conjunction of the logical framework instead of
+  the usual split into several subgoals.  While feature this is useful
+  for debugging of complex method definitions, it should not never
+  appear in production theories.
+
+  \end{description}
+*}
+
+
+section {* The Simplifier \label{sec:simplifier} *}
+
+text {* The Simplifier performs conditional and unconditional
+  rewriting and uses contextual information: rule declarations in the
+  background theory or local proof context are taken into account, as
+  well as chained facts and subgoal premises (``local assumptions'').
+  There are several general hooks that allow to modify the
+  simplification strategy, or incorporate other proof tools that solve
+  sub-problems, produce rewrite rules on demand etc.
+
+  The rewriting strategy is always strictly bottom up, except for
+  congruence rules, which are applied while descending into a term.
+  Conditions in conditional rewrite rules are solved recursively
+  before the rewrite rule is applied.
+
+  The default Simplifier setup of major object logics (HOL, HOLCF,
+  FOL, ZF) makes the Simplifier ready for immediate use, without
+  engaging into the internal structures.  Thus it serves as
+  general-purpose proof tool with the main focus on equational
+  reasoning, and a bit more than that.
+*}
+
+
+subsection {* Simplification methods \label{sec:simp-meth} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def simp} & : & @{text method} \\
+    @{method_def simp_all} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{method simp} | @@{method simp_all}) opt? (@{syntax simpmod} * )
+    ;
+
+    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use' | 'asm_lr' ) ')'
+    ;
+    @{syntax_def simpmod}: ('add' | 'del' | 'only' | 'split' (() | 'add' | 'del') |
+      'cong' (() | 'add' | 'del')) ':' @{syntax thmrefs}
+  \<close>}
+
+  \begin{description}
+
+  \item @{method simp} invokes the Simplifier on the first subgoal,
+  after inserting chained facts as additional goal premises; further
+  rule declarations may be included via @{text "(simp add: facts)"}.
+  The proof method fails if the subgoal remains unchanged after
+  simplification.
+
+  Note that the original goal premises and chained facts are subject
+  to simplification themselves, while declarations via @{text
+  "add"}/@{text "del"} merely follow the policies of the object-logic
+  to extract rewrite rules from theorems, without further
+  simplification.  This may lead to slightly different behavior in
+  either case, which might be required precisely like that in some
+  boundary situations to perform the intended simplification step!
+
+  \medskip The @{text only} modifier first removes all other rewrite
+  rules, looper tactics (including split rules), congruence rules, and
+  then behaves like @{text add}.  Implicit solvers remain, which means
+  that trivial rules like reflexivity or introduction of @{text
+  "True"} are available to solve the simplified subgoals, but also
+  non-trivial tools like linear arithmetic in HOL.  The latter may
+  lead to some surprise of the meaning of ``only'' in Isabelle/HOL
+  compared to English!
+
+  \medskip The @{text split} modifiers add or delete rules for the
+  Splitter (see also \secref{sec:simp-strategies} on the looper).
+  This works only if the Simplifier method has been properly setup to
+  include the Splitter (all major object logics such HOL, HOLCF, FOL,
+  ZF do this already).
+
+  There is also a separate @{method_ref split} method available for
+  single-step case splitting.  The effect of repeatedly applying
+  @{text "(split thms)"} can be imitated by ``@{text "(simp only:
+  split: thms)"}''.
+
+  \medskip The @{text cong} modifiers add or delete Simplifier
+  congruence rules (see also \secref{sec:simp-rules}); the default is
+  to add.
+
+  \item @{method simp_all} is similar to @{method simp}, but acts on
+  all goals, working backwards from the last to the first one as usual
+  in Isabelle.\footnote{The order is irrelevant for goals without
+  schematic variables, so simplification might actually be performed
+  in parallel here.}
+
+  Chained facts are inserted into all subgoals, before the
+  simplification process starts.  Further rule declarations are the
+  same as for @{method simp}.
+
+  The proof method fails if all subgoals remain unchanged after
+  simplification.
+
+  \end{description}
+
+  By default the Simplifier methods above take local assumptions fully
+  into account, using equational assumptions in the subsequent
+  normalization process, or simplifying assumptions themselves.
+  Further options allow to fine-tune the behavior of the Simplifier
+  in this respect, corresponding to a variety of ML tactics as
+  follows.\footnote{Unlike the corresponding Isar proof methods, the
+  ML tactics do not insist in changing the goal state.}
+
+  \begin{center}
+  \small
+  \begin{supertabular}{|l|l|p{0.3\textwidth}|}
+  \hline
+  Isar method & ML tactic & behavior \\\hline
+
+  @{text "(simp (no_asm))"} & @{ML simp_tac} & assumptions are ignored
+  completely \\\hline
+
+  @{text "(simp (no_asm_simp))"} & @{ML asm_simp_tac} & assumptions
+  are used in the simplification of the conclusion but are not
+  themselves simplified \\\hline
+
+  @{text "(simp (no_asm_use))"} & @{ML full_simp_tac} & assumptions
+  are simplified but are not used in the simplification of each other
+  or the conclusion \\\hline
+
+  @{text "(simp)"} & @{ML asm_full_simp_tac} & assumptions are used in
+  the simplification of the conclusion and to simplify other
+  assumptions \\\hline
+
+  @{text "(simp (asm_lr))"} & @{ML asm_lr_simp_tac} & compatibility
+  mode: an assumption is only used for simplifying assumptions which
+  are to the right of it \\\hline
+
+  \end{supertabular}
+  \end{center}
+*}
+
+
+subsubsection {* Examples *}
+
+text {* We consider basic algebraic simplifications in Isabelle/HOL.
+  The rather trivial goal @{prop "0 + (x + 0) = x + 0 + 0"} looks like
+  a good candidate to be solved by a single call of @{method simp}:
+*}
+
+lemma "0 + (x + 0) = x + 0 + 0" apply simp? oops
+
+text {* The above attempt \emph{fails}, because @{term "0"} and @{term
+  "op +"} in the HOL library are declared as generic type class
+  operations, without stating any algebraic laws yet.  More specific
+  types are required to get access to certain standard simplifications
+  of the theory context, e.g.\ like this: *}
+
+lemma fixes x :: nat shows "0 + (x + 0) = x + 0 + 0" by simp
+lemma fixes x :: int shows "0 + (x + 0) = x + 0 + 0" by simp
+lemma fixes x :: "'a :: monoid_add" shows "0 + (x + 0) = x + 0 + 0" by simp
+
+text {*
+  \medskip In many cases, assumptions of a subgoal are also needed in
+  the simplification process.  For example:
+*}
+
+lemma fixes x :: nat shows "x = 0 \<Longrightarrow> x + x = 0" by simp
+lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" apply simp oops
+lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" using assms by simp
+
+text {* As seen above, local assumptions that shall contribute to
+  simplification need to be part of the subgoal already, or indicated
+  explicitly for use by the subsequent method invocation.  Both too
+  little or too much information can make simplification fail, for
+  different reasons.
+
+  In the next example the malicious assumption @{prop "\<And>x::nat. f x =
+  g (f (g x))"} does not contribute to solve the problem, but makes
+  the default @{method simp} method loop: the rewrite rule @{text "f
+  ?x \<equiv> g (f (g ?x))"} extracted from the assumption does not
+  terminate.  The Simplifier notices certain simple forms of
+  nontermination, but not this one.  The problem can be solved
+  nonetheless, by ignoring assumptions via special options as
+  explained before:
+*}
+
+lemma "(\<And>x::nat. f x = g (f (g x))) \<Longrightarrow> f 0 = f 0 + 0"
+  by (simp (no_asm))
+
+text {* The latter form is typical for long unstructured proof
+  scripts, where the control over the goal content is limited.  In
+  structured proofs it is usually better to avoid pushing too many
+  facts into the goal state in the first place.  Assumptions in the
+  Isar proof context do not intrude the reasoning if not used
+  explicitly.  This is illustrated for a toplevel statement and a
+  local proof body as follows:
+*}
+
+lemma
+  assumes "\<And>x::nat. f x = g (f (g x))"
+  shows "f 0 = f 0 + 0" by simp
+
+notepad
+begin
+  assume "\<And>x::nat. f x = g (f (g x))"
+  have "f 0 = f 0 + 0" by simp
+end
+
+text {* \medskip Because assumptions may simplify each other, there
+  can be very subtle cases of nontermination. For example, the regular
+  @{method simp} method applied to @{prop "P (f x) \<Longrightarrow> y = x \<Longrightarrow> f x = f y
+  \<Longrightarrow> Q"} gives rise to the infinite reduction sequence
+  \[
+  @{text "P (f x)"} \stackrel{@{text "f x \<equiv> f y"}}{\longmapsto}
+  @{text "P (f y)"} \stackrel{@{text "y \<equiv> x"}}{\longmapsto}
+  @{text "P (f x)"} \stackrel{@{text "f x \<equiv> f y"}}{\longmapsto} \cdots
+  \]
+  whereas applying the same to @{prop "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow>
+  Q"} terminates (without solving the goal):
+*}
+
+lemma "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow> Q"
+  apply simp
+  oops
+
+text {* See also \secref{sec:simp-config} for options to enable
+  Simplifier trace mode, which often helps to diagnose problems with
+  rewrite systems.
+*}
+
+
+subsection {* Declaring rules \label{sec:simp-rules} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{attribute_def simp} & : & @{text attribute} \\
+    @{attribute_def split} & : & @{text attribute} \\
+    @{attribute_def cong} & : & @{text attribute} \\
+    @{command_def "print_simpset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{attribute simp} | @@{attribute split} | @@{attribute cong})
+      (() | 'add' | 'del')
+  \<close>}
+
+  \begin{description}
+
+  \item @{attribute simp} declares rewrite rules, by adding or
+  deleting them from the simpset within the theory or proof context.
+  Rewrite rules are theorems expressing some form of equality, for
+  example:
+
+  @{text "Suc ?m + ?n = ?m + Suc ?n"} \\
+  @{text "?P \<and> ?P \<longleftrightarrow> ?P"} \\
+  @{text "?A \<union> ?B \<equiv> {x. x \<in> ?A \<or> x \<in> ?B}"}
+
+  \smallskip
+  Conditional rewrites such as @{text "?m < ?n \<Longrightarrow> ?m div ?n = 0"} are
+  also permitted; the conditions can be arbitrary formulas.
+
+  \medskip Internally, all rewrite rules are translated into Pure
+  equalities, theorems with conclusion @{text "lhs \<equiv> rhs"}. The
+  simpset contains a function for extracting equalities from arbitrary
+  theorems, which is usually installed when the object-logic is
+  configured initially. For example, @{text "\<not> ?x \<in> {}"} could be
+  turned into @{text "?x \<in> {} \<equiv> False"}. Theorems that are declared as
+  @{attribute simp} and local assumptions within a goal are treated
+  uniformly in this respect.
+
+  The Simplifier accepts the following formats for the @{text "lhs"}
+  term:
+
+  \begin{enumerate}
+
+  \item First-order patterns, considering the sublanguage of
+  application of constant operators to variable operands, without
+  @{text "\<lambda>"}-abstractions or functional variables.
+  For example:
+
+  @{text "(?x + ?y) + ?z \<equiv> ?x + (?y + ?z)"} \\
+  @{text "f (f ?x ?y) ?z \<equiv> f ?x (f ?y ?z)"}
+
+  \item Higher-order patterns in the sense of \cite{nipkow-patterns}.
+  These are terms in @{text "\<beta>"}-normal form (this will always be the
+  case unless you have done something strange) where each occurrence
+  of an unknown is of the form @{text "?F x\<^sub>1 \<dots> x\<^sub>n"}, where the
+  @{text "x\<^sub>i"} are distinct bound variables.
+
+  For example, @{text "(\<forall>x. ?P x \<and> ?Q x) \<equiv> (\<forall>x. ?P x) \<and> (\<forall>x. ?Q x)"}
+  or its symmetric form, since the @{text "rhs"} is also a
+  higher-order pattern.
+
+  \item Physical first-order patterns over raw @{text "\<lambda>"}-term
+  structure without @{text "\<alpha>\<beta>\<eta>"}-equality; abstractions and bound
+  variables are treated like quasi-constant term material.
+
+  For example, the rule @{text "?f ?x \<in> range ?f = True"} rewrites the
+  term @{text "g a \<in> range g"} to @{text "True"}, but will fail to
+  match @{text "g (h b) \<in> range (\<lambda>x. g (h x))"}. However, offending
+  subterms (in our case @{text "?f ?x"}, which is not a pattern) can
+  be replaced by adding new variables and conditions like this: @{text
+  "?y = ?f ?x \<Longrightarrow> ?y \<in> range ?f = True"} is acceptable as a conditional
+  rewrite rule of the second category since conditions can be
+  arbitrary terms.
+
+  \end{enumerate}
+
+  \item @{attribute split} declares case split rules.
+
+  \item @{attribute cong} declares congruence rules to the Simplifier
+  context.
+
+  Congruence rules are equalities of the form @{text [display]
+  "\<dots> \<Longrightarrow> f ?x\<^sub>1 \<dots> ?x\<^sub>n = f ?y\<^sub>1 \<dots> ?y\<^sub>n"}
+
+  This controls the simplification of the arguments of @{text f}.  For
+  example, some arguments can be simplified under additional
+  assumptions: @{text [display] "?P\<^sub>1 \<longleftrightarrow> ?Q\<^sub>1 \<Longrightarrow> (?Q\<^sub>1 \<Longrightarrow> ?P\<^sub>2 \<longleftrightarrow> ?Q\<^sub>2) \<Longrightarrow>
+  (?P\<^sub>1 \<longrightarrow> ?P\<^sub>2) \<longleftrightarrow> (?Q\<^sub>1 \<longrightarrow> ?Q\<^sub>2)"}
+
+  Given this rule, the simplifier assumes @{text "?Q\<^sub>1"} and extracts
+  rewrite rules from it when simplifying @{text "?P\<^sub>2"}.  Such local
+  assumptions are effective for rewriting formulae such as @{text "x =
+  0 \<longrightarrow> y + x = y"}.
+
+  %FIXME
+  %The local assumptions are also provided as theorems to the solver;
+  %see \secref{sec:simp-solver} below.
+
+  \medskip The following congruence rule for bounded quantifiers also
+  supplies contextual information --- about the bound variable:
+  @{text [display] "(?A = ?B) \<Longrightarrow> (\<And>x. x \<in> ?B \<Longrightarrow> ?P x \<longleftrightarrow> ?Q x) \<Longrightarrow>
+    (\<forall>x \<in> ?A. ?P x) \<longleftrightarrow> (\<forall>x \<in> ?B. ?Q x)"}
+
+  \medskip This congruence rule for conditional expressions can
+  supply contextual information for simplifying the arms:
+  @{text [display] "?p = ?q \<Longrightarrow> (?q \<Longrightarrow> ?a = ?c) \<Longrightarrow> (\<not> ?q \<Longrightarrow> ?b = ?d) \<Longrightarrow>
+    (if ?p then ?a else ?b) = (if ?q then ?c else ?d)"}
+
+  A congruence rule can also \emph{prevent} simplification of some
+  arguments.  Here is an alternative congruence rule for conditional
+  expressions that conforms to non-strict functional evaluation:
+  @{text [display] "?p = ?q \<Longrightarrow> (if ?p then ?a else ?b) = (if ?q then ?a else ?b)"}
+
+  Only the first argument is simplified; the others remain unchanged.
+  This can make simplification much faster, but may require an extra
+  case split over the condition @{text "?q"} to prove the goal.
+
+  \item @{command "print_simpset"} prints the collection of rules
+  declared to the Simplifier, which is also known as ``simpset''
+  internally.
+
+  For historical reasons, simpsets may occur independently from the
+  current context, but are conceptually dependent on it.  When the
+  Simplifier is invoked via one of its main entry points in the Isar
+  source language (as proof method \secref{sec:simp-meth} or rule
+  attribute \secref{sec:simp-meth}), its simpset is derived from the
+  current proof context, and carries a back-reference to that for
+  other tools that might get invoked internally (e.g.\ simplification
+  procedures \secref{sec:simproc}).  A mismatch of the context of the
+  simpset and the context of the problem being simplified may lead to
+  unexpected results.
+
+  \end{description}
+
+  The implicit simpset of the theory context is propagated
+  monotonically through the theory hierarchy: forming a new theory,
+  the union of the simpsets of its imports are taken as starting
+  point.  Also note that definitional packages like @{command
+  "datatype"}, @{command "primrec"}, @{command "fun"} routinely
+  declare Simplifier rules to the target context, while plain
+  @{command "definition"} is an exception in \emph{not} declaring
+  anything.
+
+  \medskip It is up the user to manipulate the current simpset further
+  by explicitly adding or deleting theorems as simplification rules,
+  or installing other tools via simplification procedures
+  (\secref{sec:simproc}).  Good simpsets are hard to design.  Rules
+  that obviously simplify, like @{text "?n + 0 \<equiv> ?n"} are good
+  candidates for the implicit simpset, unless a special
+  non-normalizing behavior of certain operations is intended.  More
+  specific rules (such as distributive laws, which duplicate subterms)
+  should be added only for specific proof steps.  Conversely,
+  sometimes a rule needs to be deleted just for some part of a proof.
+  The need of frequent additions or deletions may indicate a poorly
+  designed simpset.
+
+  \begin{warn}
+  The union of simpsets from theory imports (as described above) is
+  not always a good starting point for the new theory.  If some
+  ancestors have deleted simplification rules because they are no
+  longer wanted, while others have left those rules in, then the union
+  will contain the unwanted rules, and thus have to be deleted again
+  in the theory body.
+  \end{warn}
+*}
+
+
+subsection {* Ordered rewriting with permutative rules *}
+
+text {* A rewrite rule is \emph{permutative} if the left-hand side and
+  right-hand side are the equal up to renaming of variables.  The most
+  common permutative rule is commutativity: @{text "?x + ?y = ?y +
+  ?x"}.  Other examples include @{text "(?x - ?y) - ?z = (?x - ?z) -
+  ?y"} in arithmetic and @{text "insert ?x (insert ?y ?A) = insert ?y
+  (insert ?x ?A)"} for sets.  Such rules are common enough to merit
+  special attention.
+
+  Because ordinary rewriting loops given such rules, the Simplifier
+  employs a special strategy, called \emph{ordered rewriting}.
+  Permutative rules are detected and only applied if the rewriting
+  step decreases the redex wrt.\ a given term ordering.  For example,
+  commutativity rewrites @{text "b + a"} to @{text "a + b"}, but then
+  stops, because the redex cannot be decreased further in the sense of
+  the term ordering.
+
+  The default is lexicographic ordering of term structure, but this
+  could be also changed locally for special applications via
+  @{index_ML Simplifier.set_termless} in Isabelle/ML.
+
+  \medskip Permutative rewrite rules are declared to the Simplifier
+  just like other rewrite rules.  Their special status is recognized
+  automatically, and their application is guarded by the term ordering
+  accordingly. *}
+
+
+subsubsection {* Rewriting with AC operators *}
+
+text {* Ordered rewriting is particularly effective in the case of
+  associative-commutative operators.  (Associativity by itself is not
+  permutative.)  When dealing with an AC-operator @{text "f"}, keep
+  the following points in mind:
+
+  \begin{itemize}
+
+  \item The associative law must always be oriented from left to
+  right, namely @{text "f (f x y) z = f x (f y z)"}.  The opposite
+  orientation, if used with commutativity, leads to looping in
+  conjunction with the standard term order.
+
+  \item To complete your set of rewrite rules, you must add not just
+  associativity (A) and commutativity (C) but also a derived rule
+  \emph{left-commutativity} (LC): @{text "f x (f y z) = f y (f x z)"}.
+
+  \end{itemize}
+
+  Ordered rewriting with the combination of A, C, and LC sorts a term
+  lexicographically --- the rewriting engine imitates bubble-sort.
+*}
+
+locale AC_example =
+  fixes f :: "'a \<Rightarrow> 'a \<Rightarrow> 'a"  (infix "\<bullet>" 60)
+  assumes assoc: "(x \<bullet> y) \<bullet> z = x \<bullet> (y \<bullet> z)"
+  assumes commute: "x \<bullet> y = y \<bullet> x"
+begin
+
+lemma left_commute: "x \<bullet> (y \<bullet> z) = y \<bullet> (x \<bullet> z)"
+proof -
+  have "(x \<bullet> y) \<bullet> z = (y \<bullet> x) \<bullet> z" by (simp only: commute)
+  then show ?thesis by (simp only: assoc)
+qed
+
+lemmas AC_rules = assoc commute left_commute
+
+text {* Thus the Simplifier is able to establish equalities with
+  arbitrary permutations of subterms, by normalizing to a common
+  standard form.  For example: *}
+
+lemma "(b \<bullet> c) \<bullet> a = xxx"
+  apply (simp only: AC_rules)
+  txt {* @{subgoals} *}
+  oops
+
+lemma "(b \<bullet> c) \<bullet> a = a \<bullet> (b \<bullet> c)" by (simp only: AC_rules)
+lemma "(b \<bullet> c) \<bullet> a = c \<bullet> (b \<bullet> a)" by (simp only: AC_rules)
+lemma "(b \<bullet> c) \<bullet> a = (c \<bullet> b) \<bullet> a" by (simp only: AC_rules)
+
+end
+
+text {* Martin and Nipkow \cite{martin-nipkow} discuss the theory and
+  give many examples; other algebraic structures are amenable to
+  ordered rewriting, such as boolean rings.  The Boyer-Moore theorem
+  prover \cite{bm88book} also employs ordered rewriting.
+*}
+
+
+subsubsection {* Re-orienting equalities *}
+
+text {* Another application of ordered rewriting uses the derived rule
+  @{thm [source] eq_commute}: @{thm [source = false] eq_commute} to
+  reverse equations.
+
+  This is occasionally useful to re-orient local assumptions according
+  to the term ordering, when other built-in mechanisms of
+  reorientation and mutual simplification fail to apply.  *}
+
+
+subsection {* Configuration options \label{sec:simp-config} *}
+
+text {*
+  \begin{tabular}{rcll}
+    @{attribute_def simp_depth_limit} & : & @{text attribute} & default @{text 100} \\
+    @{attribute_def simp_trace} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def simp_trace_depth_limit} & : & @{text attribute} & default @{text 1} \\
+    @{attribute_def simp_debug} & : & @{text attribute} & default @{text false} \\
+  \end{tabular}
+  \medskip
+
+  These configurations options control further aspects of the Simplifier.
+  See also \secref{sec:config}.
+
+  \begin{description}
+
+  \item @{attribute simp_depth_limit} limits the number of recursive
+  invocations of the Simplifier during conditional rewriting.
+
+  \item @{attribute simp_trace} makes the Simplifier output internal
+  operations.  This includes rewrite steps, but also bookkeeping like
+  modifications of the simpset.
+
+  \item @{attribute simp_trace_depth_limit} limits the effect of
+  @{attribute simp_trace} to the given depth of recursive Simplifier
+  invocations (when solving conditions of rewrite rules).
+
+  \item @{attribute simp_debug} makes the Simplifier output some extra
+  information about internal operations.  This includes any attempted
+  invocation of simplification procedures.
+
+  \end{description}
+*}
+
+
+subsection {* Simplification procedures \label{sec:simproc} *}
+
+text {* Simplification procedures are ML functions that produce proven
+  rewrite rules on demand.  They are associated with higher-order
+  patterns that approximate the left-hand sides of equations.  The
+  Simplifier first matches the current redex against one of the LHS
+  patterns; if this succeeds, the corresponding ML function is
+  invoked, passing the Simplifier context and redex term.  Thus rules
+  may be specifically fashioned for particular situations, resulting
+  in a more powerful mechanism than term rewriting by a fixed set of
+  rules.
+
+  Any successful result needs to be a (possibly conditional) rewrite
+  rule @{text "t \<equiv> u"} that is applicable to the current redex.  The
+  rule will be applied just as any ordinary rewrite rule.  It is
+  expected to be already in \emph{internal form}, bypassing the
+  automatic preprocessing of object-level equivalences.
+
+  \begin{matharray}{rcl}
+    @{command_def "simproc_setup"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    simproc & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command simproc_setup} @{syntax name} '(' (@{syntax term} + '|') ')' '='
+      @{syntax text} \<newline> (@'identifier' (@{syntax nameref}+))?
+    ;
+
+    @@{attribute simproc} (('add' ':')? | 'del' ':') (@{syntax name}+)
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "simproc_setup"} defines a named simplification
+  procedure that is invoked by the Simplifier whenever any of the
+  given term patterns match the current redex.  The implementation,
+  which is provided as ML source text, needs to be of type @{ML_type
+  "morphism -> simpset -> cterm -> thm option"}, where the @{ML_type
+  cterm} represents the current redex @{text r} and the result is
+  supposed to be some proven rewrite rule @{text "r \<equiv> r'"} (or a
+  generalized version), or @{ML NONE} to indicate failure.  The
+  @{ML_type simpset} argument holds the full context of the current
+  Simplifier invocation, including the actual Isar proof context.  The
+  @{ML_type morphism} informs about the difference of the original
+  compilation context wrt.\ the one of the actual application later
+  on.  The optional @{keyword "identifier"} specifies theorems that
+  represent the logical content of the abstract theory of this
+  simproc.
+
+  Morphisms and identifiers are only relevant for simprocs that are
+  defined within a local target context, e.g.\ in a locale.
+
+  \item @{text "simproc add: name"} and @{text "simproc del: name"}
+  add or delete named simprocs to the current Simplifier context.  The
+  default is to add a simproc.  Note that @{command "simproc_setup"}
+  already adds the new simproc to the subsequent context.
+
+  \end{description}
+*}
+
+
+subsubsection {* Example *}
+
+text {* The following simplification procedure for @{thm
+  [source=false, show_types] unit_eq} in HOL performs fine-grained
+  control over rule application, beyond higher-order pattern matching.
+  Declaring @{thm unit_eq} as @{attribute simp} directly would make
+  the simplifier loop!  Note that a version of this simplification
+  procedure is already active in Isabelle/HOL.  *}
+
+simproc_setup unit ("x::unit") = {*
+  fn _ => fn _ => fn ct =>
+    if HOLogic.is_unit (term_of ct) then NONE
+    else SOME (mk_meta_eq @{thm unit_eq})
+*}
+
+text {* Since the Simplifier applies simplification procedures
+  frequently, it is important to make the failure check in ML
+  reasonably fast. *}
+
+
+subsection {* Configurable Simplifier strategies \label{sec:simp-strategies} *}
+
+text {* The core term-rewriting engine of the Simplifier is normally
+  used in combination with some add-on components that modify the
+  strategy and allow to integrate other non-Simplifier proof tools.
+  These may be reconfigured in ML as explained below.  Even if the
+  default strategies of object-logics like Isabelle/HOL are used
+  unchanged, it helps to understand how the standard Simplifier
+  strategies work. *}
+
+
+subsubsection {* The subgoaler *}
+
+text {*
+  \begin{mldecls}
+  @{index_ML Simplifier.set_subgoaler: "(Proof.context -> int -> tactic) ->
+  Proof.context -> Proof.context"} \\
+  @{index_ML Simplifier.prems_of: "Proof.context -> thm list"} \\
+  \end{mldecls}
+
+  The subgoaler is the tactic used to solve subgoals arising out of
+  conditional rewrite rules or congruence rules.  The default should
+  be simplification itself.  In rare situations, this strategy may
+  need to be changed.  For example, if the premise of a conditional
+  rule is an instance of its conclusion, as in @{text "Suc ?m < ?n \<Longrightarrow>
+  ?m < ?n"}, the default strategy could loop.  % FIXME !??
+
+  \begin{description}
+
+  \item @{ML Simplifier.set_subgoaler}~@{text "tac ctxt"} sets the
+  subgoaler of the context to @{text "tac"}.  The tactic will
+  be applied to the context of the running Simplifier instance.
+
+  \item @{ML Simplifier.prems_of}~@{text "ctxt"} retrieves the current
+  set of premises from the context.  This may be non-empty only if
+  the Simplifier has been told to utilize local assumptions in the
+  first place (cf.\ the options in \secref{sec:simp-meth}).
+
+  \end{description}
+
+  As an example, consider the following alternative subgoaler:
+*}
+
+ML {*
+  fun subgoaler_tac ctxt =
+    assume_tac ORELSE'
+    resolve_tac (Simplifier.prems_of ctxt) ORELSE'
+    asm_simp_tac ctxt
+*}
+
+text {* This tactic first tries to solve the subgoal by assumption or
+  by resolving with with one of the premises, calling simplification
+  only if that fails. *}
+
+
+subsubsection {* The solver *}
+
+text {*
+  \begin{mldecls}
+  @{index_ML_type solver} \\
+  @{index_ML Simplifier.mk_solver: "string ->
+  (Proof.context -> int -> tactic) -> solver"} \\
+  @{index_ML_op setSolver: "Proof.context * solver -> Proof.context"} \\
+  @{index_ML_op addSolver: "Proof.context * solver -> Proof.context"} \\
+  @{index_ML_op setSSolver: "Proof.context * solver -> Proof.context"} \\
+  @{index_ML_op addSSolver: "Proof.context * solver -> Proof.context"} \\
+  \end{mldecls}
+
+  A solver is a tactic that attempts to solve a subgoal after
+  simplification.  Its core functionality is to prove trivial subgoals
+  such as @{prop "True"} and @{text "t = t"}, but object-logics might
+  be more ambitious.  For example, Isabelle/HOL performs a restricted
+  version of linear arithmetic here.
+
+  Solvers are packaged up in abstract type @{ML_type solver}, with
+  @{ML Simplifier.mk_solver} as the only operation to create a solver.
+
+  \medskip Rewriting does not instantiate unknowns.  For example,
+  rewriting alone cannot prove @{text "a \<in> ?A"} since this requires
+  instantiating @{text "?A"}.  The solver, however, is an arbitrary
+  tactic and may instantiate unknowns as it pleases.  This is the only
+  way the Simplifier can handle a conditional rewrite rule whose
+  condition contains extra variables.  When a simplification tactic is
+  to be combined with other provers, especially with the Classical
+  Reasoner, it is important whether it can be considered safe or not.
+  For this reason a simpset contains two solvers: safe and unsafe.
+
+  The standard simplification strategy solely uses the unsafe solver,
+  which is appropriate in most cases.  For special applications where
+  the simplification process is not allowed to instantiate unknowns
+  within the goal, simplification starts with the safe solver, but may
+  still apply the ordinary unsafe one in nested simplifications for
+  conditional rules or congruences. Note that in this way the overall
+  tactic is not totally safe: it may instantiate unknowns that appear
+  also in other subgoals.
+
+  \begin{description}
+
+  \item @{ML Simplifier.mk_solver}~@{text "name tac"} turns @{text
+  "tac"} into a solver; the @{text "name"} is only attached as a
+  comment and has no further significance.
+
+  \item @{text "ctxt setSSolver solver"} installs @{text "solver"} as
+  the safe solver of @{text "ctxt"}.
+
+  \item @{text "ctxt addSSolver solver"} adds @{text "solver"} as an
+  additional safe solver; it will be tried after the solvers which had
+  already been present in @{text "ctxt"}.
+
+  \item @{text "ctxt setSolver solver"} installs @{text "solver"} as the
+  unsafe solver of @{text "ctxt"}.
+
+  \item @{text "ctxt addSolver solver"} adds @{text "solver"} as an
+  additional unsafe solver; it will be tried after the solvers which
+  had already been present in @{text "ctxt"}.
+
+  \end{description}
+
+  \medskip The solver tactic is invoked with the context of the
+  running Simplifier.  Further operations
+  may be used to retrieve relevant information, such as the list of
+  local Simplifier premises via @{ML Simplifier.prems_of} --- this
+  list may be non-empty only if the Simplifier runs in a mode that
+  utilizes local assumptions (see also \secref{sec:simp-meth}).  The
+  solver is also presented the full goal including its assumptions in
+  any case.  Thus it can use these (e.g.\ by calling @{ML
+  assume_tac}), even if the Simplifier proper happens to ignore local
+  premises at the moment.
+
+  \medskip As explained before, the subgoaler is also used to solve
+  the premises of congruence rules.  These are usually of the form
+  @{text "s = ?x"}, where @{text "s"} needs to be simplified and
+  @{text "?x"} needs to be instantiated with the result.  Typically,
+  the subgoaler will invoke the Simplifier at some point, which will
+  eventually call the solver.  For this reason, solver tactics must be
+  prepared to solve goals of the form @{text "t = ?x"}, usually by
+  reflexivity.  In particular, reflexivity should be tried before any
+  of the fancy automated proof tools.
+
+  It may even happen that due to simplification the subgoal is no
+  longer an equality.  For example, @{text "False \<longleftrightarrow> ?Q"} could be
+  rewritten to @{text "\<not> ?Q"}.  To cover this case, the solver could
+  try resolving with the theorem @{text "\<not> False"} of the
+  object-logic.
+
+  \medskip
+
+  \begin{warn}
+  If a premise of a congruence rule cannot be proved, then the
+  congruence is ignored.  This should only happen if the rule is
+  \emph{conditional} --- that is, contains premises not of the form
+  @{text "t = ?x"}.  Otherwise it indicates that some congruence rule,
+  or possibly the subgoaler or solver, is faulty.
+  \end{warn}
+*}
+
+
+subsubsection {* The looper *}
+
+text {*
+  \begin{mldecls}
+  @{index_ML_op setloop: "Proof.context *
+  (Proof.context -> int -> tactic) -> Proof.context"} \\
+  @{index_ML_op addloop: "Proof.context *
+  (string * (Proof.context -> int -> tactic))
+  -> Proof.context"} \\
+  @{index_ML_op delloop: "Proof.context * string -> Proof.context"} \\
+  @{index_ML Splitter.add_split: "thm -> Proof.context -> Proof.context"} \\
+  @{index_ML Splitter.del_split: "thm -> Proof.context -> Proof.context"} \\
+  \end{mldecls}
+
+  The looper is a list of tactics that are applied after
+  simplification, in case the solver failed to solve the simplified
+  goal.  If the looper succeeds, the simplification process is started
+  all over again.  Each of the subgoals generated by the looper is
+  attacked in turn, in reverse order.
+
+  A typical looper is \emph{case splitting}: the expansion of a
+  conditional.  Another possibility is to apply an elimination rule on
+  the assumptions.  More adventurous loopers could start an induction.
+
+  \begin{description}
+
+  \item @{text "ctxt setloop tac"} installs @{text "tac"} as the only
+  looper tactic of @{text "ctxt"}.
+
+  \item @{text "ctxt addloop (name, tac)"} adds @{text "tac"} as an
+  additional looper tactic with name @{text "name"}, which is
+  significant for managing the collection of loopers.  The tactic will
+  be tried after the looper tactics that had already been present in
+  @{text "ctxt"}.
+
+  \item @{text "ctxt delloop name"} deletes the looper tactic that was
+  associated with @{text "name"} from @{text "ctxt"}.
+
+  \item @{ML Splitter.add_split}~@{text "thm ctxt"} adds split tactics
+  for @{text "thm"} as additional looper tactics of @{text "ctxt"}.
+
+  \item @{ML Splitter.del_split}~@{text "thm ctxt"} deletes the split
+  tactic corresponding to @{text thm} from the looper tactics of
+  @{text "ctxt"}.
+
+  \end{description}
+
+  The splitter replaces applications of a given function; the
+  right-hand side of the replacement can be anything.  For example,
+  here is a splitting rule for conditional expressions:
+
+  @{text [display] "?P (if ?Q ?x ?y) \<longleftrightarrow> (?Q \<longrightarrow> ?P ?x) \<and> (\<not> ?Q \<longrightarrow> ?P ?y)"}
+
+  Another example is the elimination operator for Cartesian products
+  (which happens to be called @{text split} in Isabelle/HOL:
+
+  @{text [display] "?P (split ?f ?p) \<longleftrightarrow> (\<forall>a b. ?p = (a, b) \<longrightarrow> ?P (f a b))"}
+
+  For technical reasons, there is a distinction between case splitting
+  in the conclusion and in the premises of a subgoal.  The former is
+  done by @{ML Splitter.split_tac} with rules like @{thm [source]
+  split_if} or @{thm [source] option.split}, which do not split the
+  subgoal, while the latter is done by @{ML Splitter.split_asm_tac}
+  with rules like @{thm [source] split_if_asm} or @{thm [source]
+  option.split_asm}, which split the subgoal.  The function @{ML
+  Splitter.add_split} automatically takes care of which tactic to
+  call, analyzing the form of the rules given as argument; it is the
+  same operation behind @{text "split"} attribute or method modifier
+  syntax in the Isar source language.
+
+  Case splits should be allowed only when necessary; they are
+  expensive and hard to control.  Case-splitting on if-expressions in
+  the conclusion is usually beneficial, so it is enabled by default in
+  Isabelle/HOL and Isabelle/FOL/ZF.
+
+  \begin{warn}
+  With @{ML Splitter.split_asm_tac} as looper component, the
+  Simplifier may split subgoals!  This might cause unexpected problems
+  in tactic expressions that silently assume 0 or 1 subgoals after
+  simplification.
+  \end{warn}
+*}
+
+
+subsection {* Forward simplification \label{sec:simp-forward} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{attribute_def simplified} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{attribute simplified} opt? @{syntax thmrefs}?
+    ;
+
+    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use') ')'
+  \<close>}
+
+  \begin{description}
+  
+  \item @{attribute simplified}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} causes a theorem to
+  be simplified, either by exactly the specified rules @{text "a\<^sub>1, \<dots>,
+  a\<^sub>n"}, or the implicit Simplifier context if no arguments are given.
+  The result is fully simplified by default, including assumptions and
+  conclusion; the options @{text no_asm} etc.\ tune the Simplifier in
+  the same way as the for the @{text simp} method.
+
+  Note that forward simplification restricts the simplifier to its
+  most basic operation of term rewriting; solver and looper tactics
+  (\secref{sec:simp-strategies}) are \emph{not} involved here.  The
+  @{attribute simplified} attribute should be only rarely required
+  under normal circumstances.
+
+  \end{description}
+*}
+
+
+section {* The Classical Reasoner \label{sec:classical} *}
+
+subsection {* Basic concepts *}
+
+text {* Although Isabelle is generic, many users will be working in
+  some extension of classical first-order logic.  Isabelle/ZF is built
+  upon theory FOL, while Isabelle/HOL conceptually contains
+  first-order logic as a fragment.  Theorem-proving in predicate logic
+  is undecidable, but many automated strategies have been developed to
+  assist in this task.
+
+  Isabelle's classical reasoner is a generic package that accepts
+  certain information about a logic and delivers a suite of automatic
+  proof tools, based on rules that are classified and declared in the
+  context.  These proof procedures are slow and simplistic compared
+  with high-end automated theorem provers, but they can save
+  considerable time and effort in practice.  They can prove theorems
+  such as Pelletier's \cite{pelletier86} problems 40 and 41 in a few
+  milliseconds (including full proof reconstruction): *}
+
+lemma "(\<exists>y. \<forall>x. F x y \<longleftrightarrow> F x x) \<longrightarrow> \<not> (\<forall>x. \<exists>y. \<forall>z. F z y \<longleftrightarrow> \<not> F z x)"
+  by blast
+
+lemma "(\<forall>z. \<exists>y. \<forall>x. f x y \<longleftrightarrow> f x z \<and> \<not> f x x) \<longrightarrow> \<not> (\<exists>z. \<forall>x. f x z)"
+  by blast
+
+text {* The proof tools are generic.  They are not restricted to
+  first-order logic, and have been heavily used in the development of
+  the Isabelle/HOL library and applications.  The tactics can be
+  traced, and their components can be called directly; in this manner,
+  any proof can be viewed interactively.  *}
+
+
+subsubsection {* The sequent calculus *}
+
+text {* Isabelle supports natural deduction, which is easy to use for
+  interactive proof.  But natural deduction does not easily lend
+  itself to automation, and has a bias towards intuitionism.  For
+  certain proofs in classical logic, it can not be called natural.
+  The \emph{sequent calculus}, a generalization of natural deduction,
+  is easier to automate.
+
+  A \textbf{sequent} has the form @{text "\<Gamma> \<turnstile> \<Delta>"}, where @{text "\<Gamma>"}
+  and @{text "\<Delta>"} are sets of formulae.\footnote{For first-order
+  logic, sequents can equivalently be made from lists or multisets of
+  formulae.} The sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} is
+  \textbf{valid} if @{text "P\<^sub>1 \<and> \<dots> \<and> P\<^sub>m"} implies @{text "Q\<^sub>1 \<or> \<dots> \<or>
+  Q\<^sub>n"}.  Thus @{text "P\<^sub>1, \<dots>, P\<^sub>m"} represent assumptions, each of which
+  is true, while @{text "Q\<^sub>1, \<dots>, Q\<^sub>n"} represent alternative goals.  A
+  sequent is \textbf{basic} if its left and right sides have a common
+  formula, as in @{text "P, Q \<turnstile> Q, R"}; basic sequents are trivially
+  valid.
+
+  Sequent rules are classified as \textbf{right} or \textbf{left},
+  indicating which side of the @{text "\<turnstile>"} symbol they operate on.
+  Rules that operate on the right side are analogous to natural
+  deduction's introduction rules, and left rules are analogous to
+  elimination rules.  The sequent calculus analogue of @{text "(\<longrightarrow>I)"}
+  is the rule
+  \[
+  \infer[@{text "(\<longrightarrow>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<longrightarrow> Q"}}{@{text "P, \<Gamma> \<turnstile> \<Delta>, Q"}}
+  \]
+  Applying the rule backwards, this breaks down some implication on
+  the right side of a sequent; @{text "\<Gamma>"} and @{text "\<Delta>"} stand for
+  the sets of formulae that are unaffected by the inference.  The
+  analogue of the pair @{text "(\<or>I1)"} and @{text "(\<or>I2)"} is the
+  single rule
+  \[
+  \infer[@{text "(\<or>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<or> Q"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P, Q"}}
+  \]
+  This breaks down some disjunction on the right side, replacing it by
+  both disjuncts.  Thus, the sequent calculus is a kind of
+  multiple-conclusion logic.
+
+  To illustrate the use of multiple formulae on the right, let us
+  prove the classical theorem @{text "(P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}.  Working
+  backwards, we reduce this formula to a basic sequent:
+  \[
+  \infer[@{text "(\<or>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}}
+    {\infer[@{text "(\<longrightarrow>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q), (Q \<longrightarrow> P)"}}
+      {\infer[@{text "(\<longrightarrow>R)"}]{@{text "P \<turnstile> Q, (Q \<longrightarrow> P)"}}
+        {@{text "P, Q \<turnstile> Q, P"}}}}
+  \]
+
+  This example is typical of the sequent calculus: start with the
+  desired theorem and apply rules backwards in a fairly arbitrary
+  manner.  This yields a surprisingly effective proof procedure.
+  Quantifiers add only few complications, since Isabelle handles
+  parameters and schematic variables.  See \cite[Chapter
+  10]{paulson-ml2} for further discussion.  *}
+
+
+subsubsection {* Simulating sequents by natural deduction *}
+
+text {* Isabelle can represent sequents directly, as in the
+  object-logic LK.  But natural deduction is easier to work with, and
+  most object-logics employ it.  Fortunately, we can simulate the
+  sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} by the Isabelle formula
+  @{text "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>2 \<Longrightarrow> ... \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> Q\<^sub>1"} where the order of
+  the assumptions and the choice of @{text "Q\<^sub>1"} are arbitrary.
+  Elim-resolution plays a key role in simulating sequent proofs.
+
+  We can easily handle reasoning on the left.  Elim-resolution with
+  the rules @{text "(\<or>E)"}, @{text "(\<bottom>E)"} and @{text "(\<exists>E)"} achieves
+  a similar effect as the corresponding sequent rules.  For the other
+  connectives, we use sequent-style elimination rules instead of
+  destruction rules such as @{text "(\<and>E1, 2)"} and @{text "(\<forall>E)"}.
+  But note that the rule @{text "(\<not>L)"} has no effect under our
+  representation of sequents!
+  \[
+  \infer[@{text "(\<not>L)"}]{@{text "\<not> P, \<Gamma> \<turnstile> \<Delta>"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P"}}
+  \]
+
+  What about reasoning on the right?  Introduction rules can only
+  affect the formula in the conclusion, namely @{text "Q\<^sub>1"}.  The
+  other right-side formulae are represented as negated assumptions,
+  @{text "\<not> Q\<^sub>2, \<dots>, \<not> Q\<^sub>n"}.  In order to operate on one of these, it
+  must first be exchanged with @{text "Q\<^sub>1"}.  Elim-resolution with the
+  @{text swap} rule has this effect: @{text "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"}
+
+  To ensure that swaps occur only when necessary, each introduction
+  rule is converted into a swapped form: it is resolved with the
+  second premise of @{text "(swap)"}.  The swapped form of @{text
+  "(\<and>I)"}, which might be called @{text "(\<not>\<and>E)"}, is
+  @{text [display] "\<not> (P \<and> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> (\<not> R \<Longrightarrow> Q) \<Longrightarrow> R"}
+
+  Similarly, the swapped form of @{text "(\<longrightarrow>I)"} is
+  @{text [display] "\<not> (P \<longrightarrow> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P \<Longrightarrow> Q) \<Longrightarrow> R"}
+
+  Swapped introduction rules are applied using elim-resolution, which
+  deletes the negated formula.  Our representation of sequents also
+  requires the use of ordinary introduction rules.  If we had no
+  regard for readability of intermediate goal states, we could treat
+  the right side more uniformly by representing sequents as @{text
+  [display] "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> \<bottom>"}
+*}
+
+
+subsubsection {* Extra rules for the sequent calculus *}
+
+text {* As mentioned, destruction rules such as @{text "(\<and>E1, 2)"} and
+  @{text "(\<forall>E)"} must be replaced by sequent-style elimination rules.
+  In addition, we need rules to embody the classical equivalence
+  between @{text "P \<longrightarrow> Q"} and @{text "\<not> P \<or> Q"}.  The introduction
+  rules @{text "(\<or>I1, 2)"} are replaced by a rule that simulates
+  @{text "(\<or>R)"}: @{text [display] "(\<not> Q \<Longrightarrow> P) \<Longrightarrow> P \<or> Q"}
+
+  The destruction rule @{text "(\<longrightarrow>E)"} is replaced by @{text [display]
+  "(P \<longrightarrow> Q) \<Longrightarrow> (\<not> P \<Longrightarrow> R) \<Longrightarrow> (Q \<Longrightarrow> R) \<Longrightarrow> R"}
+
+  Quantifier replication also requires special rules.  In classical
+  logic, @{text "\<exists>x. P x"} is equivalent to @{text "\<not> (\<forall>x. \<not> P x)"};
+  the rules @{text "(\<exists>R)"} and @{text "(\<forall>L)"} are dual:
+  \[
+  \infer[@{text "(\<exists>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x"}}{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x, P t"}}
+  \qquad
+  \infer[@{text "(\<forall>L)"}]{@{text "\<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}{@{text "P t, \<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}
+  \]
+  Thus both kinds of quantifier may be replicated.  Theorems requiring
+  multiple uses of a universal formula are easy to invent; consider
+  @{text [display] "(\<forall>x. P x \<longrightarrow> P (f x)) \<and> P a \<longrightarrow> P (f\<^sup>n a)"} for any
+  @{text "n > 1"}.  Natural examples of the multiple use of an
+  existential formula are rare; a standard one is @{text "\<exists>x. \<forall>y. P x
+  \<longrightarrow> P y"}.
+
+  Forgoing quantifier replication loses completeness, but gains
+  decidability, since the search space becomes finite.  Many useful
+  theorems can be proved without replication, and the search generally
+  delivers its verdict in a reasonable time.  To adopt this approach,
+  represent the sequent rules @{text "(\<exists>R)"}, @{text "(\<exists>L)"} and
+  @{text "(\<forall>R)"} by @{text "(\<exists>I)"}, @{text "(\<exists>E)"} and @{text "(\<forall>I)"},
+  respectively, and put @{text "(\<forall>E)"} into elimination form: @{text
+  [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"}
+
+  Elim-resolution with this rule will delete the universal formula
+  after a single use.  To replicate universal quantifiers, replace the
+  rule by @{text [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"}
+
+  To replicate existential quantifiers, replace @{text "(\<exists>I)"} by
+  @{text [display] "(\<not> (\<exists>x. P x) \<Longrightarrow> P t) \<Longrightarrow> \<exists>x. P x"}
+
+  All introduction rules mentioned above are also useful in swapped
+  form.
+
+  Replication makes the search space infinite; we must apply the rules
+  with care.  The classical reasoner distinguishes between safe and
+  unsafe rules, applying the latter only when there is no alternative.
+  Depth-first search may well go down a blind alley; best-first search
+  is better behaved in an infinite search space.  However, quantifier
+  replication is too expensive to prove any but the simplest theorems.
+*}
+
+
+subsection {* Rule declarations *}
+
+text {* The proof tools of the Classical Reasoner depend on
+  collections of rules declared in the context, which are classified
+  as introduction, elimination or destruction and as \emph{safe} or
+  \emph{unsafe}.  In general, safe rules can be attempted blindly,
+  while unsafe rules must be used with care.  A safe rule must never
+  reduce a provable goal to an unprovable set of subgoals.
+
+  The rule @{text "P \<Longrightarrow> P \<or> Q"} is unsafe because it reduces @{text "P
+  \<or> Q"} to @{text "P"}, which might turn out as premature choice of an
+  unprovable subgoal.  Any rule is unsafe whose premises contain new
+  unknowns.  The elimination rule @{text "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"} is
+  unsafe, since it is applied via elim-resolution, which discards the
+  assumption @{text "\<forall>x. P x"} and replaces it by the weaker
+  assumption @{text "P t"}.  The rule @{text "P t \<Longrightarrow> \<exists>x. P x"} is
+  unsafe for similar reasons.  The quantifier duplication rule @{text
+  "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"} is unsafe in a different sense:
+  since it keeps the assumption @{text "\<forall>x. P x"}, it is prone to
+  looping.  In classical first-order logic, all rules are safe except
+  those mentioned above.
+
+  The safe~/ unsafe distinction is vague, and may be regarded merely
+  as a way of giving some rules priority over others.  One could argue
+  that @{text "(\<or>E)"} is unsafe, because repeated application of it
+  could generate exponentially many subgoals.  Induction rules are
+  unsafe because inductive proofs are difficult to set up
+  automatically.  Any inference is unsafe that instantiates an unknown
+  in the proof state --- thus matching must be used, rather than
+  unification.  Even proof by assumption is unsafe if it instantiates
+  unknowns shared with other subgoals.
+
+  \begin{matharray}{rcl}
+    @{command_def "print_claset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{attribute_def intro} & : & @{text attribute} \\
+    @{attribute_def elim} & : & @{text attribute} \\
+    @{attribute_def dest} & : & @{text attribute} \\
+    @{attribute_def rule} & : & @{text attribute} \\
+    @{attribute_def iff} & : & @{text attribute} \\
+    @{attribute_def swapped} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{attribute intro} | @@{attribute elim} | @@{attribute dest}) ('!' | () | '?') @{syntax nat}?
+    ;
+    @@{attribute rule} 'del'
+    ;
+    @@{attribute iff} (((() | 'add') '?'?) | 'del')
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "print_claset"} prints the collection of rules
+  declared to the Classical Reasoner, i.e.\ the @{ML_type claset}
+  within the context.
+
+  \item @{attribute intro}, @{attribute elim}, and @{attribute dest}
+  declare introduction, elimination, and destruction rules,
+  respectively.  By default, rules are considered as \emph{unsafe}
+  (i.e.\ not applied blindly without backtracking), while ``@{text
+  "!"}'' classifies as \emph{safe}.  Rule declarations marked by
+  ``@{text "?"}'' coincide with those of Isabelle/Pure, cf.\
+  \secref{sec:pure-meth-att} (i.e.\ are only applied in single steps
+  of the @{method rule} method).  The optional natural number
+  specifies an explicit weight argument, which is ignored by the
+  automated reasoning tools, but determines the search order of single
+  rule steps.
+
+  Introduction rules are those that can be applied using ordinary
+  resolution.  Their swapped forms are generated internally, which
+  will be applied using elim-resolution.  Elimination rules are
+  applied using elim-resolution.  Rules are sorted by the number of
+  new subgoals they will yield; rules that generate the fewest
+  subgoals will be tried first.  Otherwise, later declarations take
+  precedence over earlier ones.
+
+  Rules already present in the context with the same classification
+  are ignored.  A warning is printed if the rule has already been
+  added with some other classification, but the rule is added anyway
+  as requested.
+
+  \item @{attribute rule}~@{text del} deletes all occurrences of a
+  rule from the classical context, regardless of its classification as
+  introduction~/ elimination~/ destruction and safe~/ unsafe.
+
+  \item @{attribute iff} declares logical equivalences to the
+  Simplifier and the Classical reasoner at the same time.
+  Non-conditional rules result in a safe introduction and elimination
+  pair; conditional ones are considered unsafe.  Rules with negative
+  conclusion are automatically inverted (using @{text "\<not>"}-elimination
+  internally).
+
+  The ``@{text "?"}'' version of @{attribute iff} declares rules to
+  the Isabelle/Pure context only, and omits the Simplifier
+  declaration.
+
+  \item @{attribute swapped} turns an introduction rule into an
+  elimination, by resolving with the classical swap principle @{text
+  "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"} in the second position.  This is mainly for
+  illustrative purposes: the Classical Reasoner already swaps rules
+  internally as explained above.
+
+  \end{description}
+*}
+
+
+subsection {* Structured methods *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def rule} & : & @{text method} \\
+    @{method_def contradiction} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{method rule} @{syntax thmrefs}?
+  \<close>}
+
+  \begin{description}
+
+  \item @{method rule} as offered by the Classical Reasoner is a
+  refinement over the Pure one (see \secref{sec:pure-meth-att}).  Both
+  versions work the same, but the classical version observes the
+  classical rule context in addition to that of Isabelle/Pure.
+
+  Common object logics (HOL, ZF, etc.) declare a rich collection of
+  classical rules (even if these would qualify as intuitionistic
+  ones), but only few declarations to the rule context of
+  Isabelle/Pure (\secref{sec:pure-meth-att}).
+
+  \item @{method contradiction} solves some goal by contradiction,
+  deriving any result from both @{text "\<not> A"} and @{text A}.  Chained
+  facts, which are guaranteed to participate, may appear in either
+  order.
+
+  \end{description}
+*}
+
+
+subsection {* Fully automated methods *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def blast} & : & @{text method} \\
+    @{method_def auto} & : & @{text method} \\
+    @{method_def force} & : & @{text method} \\
+    @{method_def fast} & : & @{text method} \\
+    @{method_def slow} & : & @{text method} \\
+    @{method_def best} & : & @{text method} \\
+    @{method_def fastforce} & : & @{text method} \\
+    @{method_def slowsimp} & : & @{text method} \\
+    @{method_def bestsimp} & : & @{text method} \\
+    @{method_def deepen} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{method blast} @{syntax nat}? (@{syntax clamod} * )
+    ;
+    @@{method auto} (@{syntax nat} @{syntax nat})? (@{syntax clasimpmod} * )
+    ;
+    @@{method force} (@{syntax clasimpmod} * )
+    ;
+    (@@{method fast} | @@{method slow} | @@{method best}) (@{syntax clamod} * )
+    ;
+    (@@{method fastforce} | @@{method slowsimp} | @@{method bestsimp})
+      (@{syntax clasimpmod} * )
+    ;
+    @@{method deepen} (@{syntax nat} ?) (@{syntax clamod} * )
+    ;
+    @{syntax_def clamod}:
+      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del') ':' @{syntax thmrefs}
+    ;
+    @{syntax_def clasimpmod}: ('simp' (() | 'add' | 'del' | 'only') |
+      ('cong' | 'split') (() | 'add' | 'del') |
+      'iff' (((() | 'add') '?'?) | 'del') |
+      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del')) ':' @{syntax thmrefs}
+  \<close>}
+
+  \begin{description}
+
+  \item @{method blast} is a separate classical tableau prover that
+  uses the same classical rule declarations as explained before.
+
+  Proof search is coded directly in ML using special data structures.
+  A successful proof is then reconstructed using regular Isabelle
+  inferences.  It is faster and more powerful than the other classical
+  reasoning tools, but has major limitations too.
+
+  \begin{itemize}
+
+  \item It does not use the classical wrapper tacticals, such as the
+  integration with the Simplifier of @{method fastforce}.
+
+  \item It does not perform higher-order unification, as needed by the
+  rule @{thm [source=false] rangeI} in HOL.  There are often
+  alternatives to such rules, for example @{thm [source=false]
+  range_eqI}.
+
+  \item Function variables may only be applied to parameters of the
+  subgoal.  (This restriction arises because the prover does not use
+  higher-order unification.)  If other function variables are present
+  then the prover will fail with the message \texttt{Function Var's
+  argument not a bound variable}.
+
+  \item Its proof strategy is more general than @{method fast} but can
+  be slower.  If @{method blast} fails or seems to be running forever,
+  try @{method fast} and the other proof tools described below.
+
+  \end{itemize}
+
+  The optional integer argument specifies a bound for the number of
+  unsafe steps used in a proof.  By default, @{method blast} starts
+  with a bound of 0 and increases it successively to 20.  In contrast,
+  @{text "(blast lim)"} tries to prove the goal using a search bound
+  of @{text "lim"}.  Sometimes a slow proof using @{method blast} can
+  be made much faster by supplying the successful search bound to this
+  proof method instead.
+
+  \item @{method auto} combines classical reasoning with
+  simplification.  It is intended for situations where there are a lot
+  of mostly trivial subgoals; it proves all the easy ones, leaving the
+  ones it cannot prove.  Occasionally, attempting to prove the hard
+  ones may take a long time.
+
+  The optional depth arguments in @{text "(auto m n)"} refer to its
+  builtin classical reasoning procedures: @{text m} (default 4) is for
+  @{method blast}, which is tried first, and @{text n} (default 2) is
+  for a slower but more general alternative that also takes wrappers
+  into account.
+
+  \item @{method force} is intended to prove the first subgoal
+  completely, using many fancy proof tools and performing a rather
+  exhaustive search.  As a result, proof attempts may take rather long
+  or diverge easily.
+
+  \item @{method fast}, @{method best}, @{method slow} attempt to
+  prove the first subgoal using sequent-style reasoning as explained
+  before.  Unlike @{method blast}, they construct proofs directly in
+  Isabelle.
+
+  There is a difference in search strategy and back-tracking: @{method
+  fast} uses depth-first search and @{method best} uses best-first
+  search (guided by a heuristic function: normally the total size of
+  the proof state).
+
+  Method @{method slow} is like @{method fast}, but conducts a broader
+  search: it may, when backtracking from a failed proof attempt, undo
+  even the step of proving a subgoal by assumption.
+
+  \item @{method fastforce}, @{method slowsimp}, @{method bestsimp}
+  are like @{method fast}, @{method slow}, @{method best},
+  respectively, but use the Simplifier as additional wrapper. The name
+  @{method fastforce}, reflects the behaviour of this popular method
+  better without requiring an understanding of its implementation.
+
+  \item @{method deepen} works by exhaustive search up to a certain
+  depth.  The start depth is 4 (unless specified explicitly), and the
+  depth is increased iteratively up to 10.  Unsafe rules are modified
+  to preserve the formula they act on, so that it be used repeatedly.
+  This method can prove more goals than @{method fast}, but is much
+  slower, for example if the assumptions have many universal
+  quantifiers.
+
+  \end{description}
+
+  Any of the above methods support additional modifiers of the context
+  of classical (and simplifier) rules, but the ones related to the
+  Simplifier are explicitly prefixed by @{text simp} here.  The
+  semantics of these ad-hoc rule declarations is analogous to the
+  attributes given before.  Facts provided by forward chaining are
+  inserted into the goal before commencing proof search.
+*}
+
+
+subsection {* Partially automated methods *}
+
+text {* These proof methods may help in situations when the
+  fully-automated tools fail.  The result is a simpler subgoal that
+  can be tackled by other means, such as by manual instantiation of
+  quantifiers.
+
+  \begin{matharray}{rcl}
+    @{method_def safe} & : & @{text method} \\
+    @{method_def clarify} & : & @{text method} \\
+    @{method_def clarsimp} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{method safe} | @@{method clarify}) (@{syntax clamod} * )
+    ;
+    @@{method clarsimp} (@{syntax clasimpmod} * )
+  \<close>}
+
+  \begin{description}
+
+  \item @{method safe} repeatedly performs safe steps on all subgoals.
+  It is deterministic, with at most one outcome.
+
+  \item @{method clarify} performs a series of safe steps without
+  splitting subgoals; see also @{method clarify_step}.
+
+  \item @{method clarsimp} acts like @{method clarify}, but also does
+  simplification.  Note that if the Simplifier context includes a
+  splitter for the premises, the subgoal may still be split.
+
+  \end{description}
+*}
+
+
+subsection {* Single-step tactics *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def safe_step} & : & @{text method} \\
+    @{method_def inst_step} & : & @{text method} \\
+    @{method_def step} & : & @{text method} \\
+    @{method_def slow_step} & : & @{text method} \\
+    @{method_def clarify_step} & : &  @{text method} \\
+  \end{matharray}
+
+  These are the primitive tactics behind the automated proof methods
+  of the Classical Reasoner.  By calling them yourself, you can
+  execute these procedures one step at a time.
+
+  \begin{description}
+
+  \item @{method safe_step} performs a safe step on the first subgoal.
+  The safe wrapper tacticals are applied to a tactic that may include
+  proof by assumption or Modus Ponens (taking care not to instantiate
+  unknowns), or substitution.
+
+  \item @{method inst_step} is like @{method safe_step}, but allows
+  unknowns to be instantiated.
+
+  \item @{method step} is the basic step of the proof procedure, it
+  operates on the first subgoal.  The unsafe wrapper tacticals are
+  applied to a tactic that tries @{method safe}, @{method inst_step},
+  or applies an unsafe rule from the context.
+
+  \item @{method slow_step} resembles @{method step}, but allows
+  backtracking between using safe rules with instantiation (@{method
+  inst_step}) and using unsafe rules.  The resulting search space is
+  larger.
+
+  \item @{method clarify_step} performs a safe step on the first
+  subgoal; no splitting step is applied.  For example, the subgoal
+  @{text "A \<and> B"} is left as a conjunction.  Proof by assumption,
+  Modus Ponens, etc., may be performed provided they do not
+  instantiate unknowns.  Assumptions of the form @{text "x = t"} may
+  be eliminated.  The safe wrapper tactical is applied.
+
+  \end{description}
+*}
+
+
+subsection {* Modifying the search step *}
+
+text {*
+  \begin{mldecls}
+    @{index_ML_type wrapper: "(int -> tactic) -> (int -> tactic)"} \\[0.5ex]
+    @{index_ML_op addSWrapper: "Proof.context *
+  (string * (Proof.context -> wrapper)) -> Proof.context"} \\
+    @{index_ML_op addSbefore: "Proof.context *
+  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
+    @{index_ML_op addSafter: "Proof.context *
+  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
+    @{index_ML_op delSWrapper: "Proof.context * string -> Proof.context"} \\[0.5ex]
+    @{index_ML_op addWrapper: "Proof.context *
+  (string * (Proof.context -> wrapper)) -> Proof.context"} \\
+    @{index_ML_op addbefore: "Proof.context *
+  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
+    @{index_ML_op addafter: "Proof.context *
+  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
+    @{index_ML_op delWrapper: "Proof.context * string -> Proof.context"} \\[0.5ex]
+    @{index_ML addSss: "Proof.context -> Proof.context"} \\
+    @{index_ML addss: "Proof.context -> Proof.context"} \\
+  \end{mldecls}
+
+  The proof strategy of the Classical Reasoner is simple.  Perform as
+  many safe inferences as possible; or else, apply certain safe rules,
+  allowing instantiation of unknowns; or else, apply an unsafe rule.
+  The tactics also eliminate assumptions of the form @{text "x = t"}
+  by substitution if they have been set up to do so.  They may perform
+  a form of Modus Ponens: if there are assumptions @{text "P \<longrightarrow> Q"} and
+  @{text "P"}, then replace @{text "P \<longrightarrow> Q"} by @{text "Q"}.
+
+  The classical reasoning tools --- except @{method blast} --- allow
+  to modify this basic proof strategy by applying two lists of
+  arbitrary \emph{wrapper tacticals} to it.  The first wrapper list,
+  which is considered to contain safe wrappers only, affects @{method
+  safe_step} and all the tactics that call it.  The second one, which
+  may contain unsafe wrappers, affects the unsafe parts of @{method
+  step}, @{method slow_step}, and the tactics that call them.  A
+  wrapper transforms each step of the search, for example by
+  attempting other tactics before or after the original step tactic.
+  All members of a wrapper list are applied in turn to the respective
+  step tactic.
+
+  Initially the two wrapper lists are empty, which means no
+  modification of the step tactics. Safe and unsafe wrappers are added
+  to a claset with the functions given below, supplying them with
+  wrapper names.  These names may be used to selectively delete
+  wrappers.
+
+  \begin{description}
+
+  \item @{text "ctxt addSWrapper (name, wrapper)"} adds a new wrapper,
+  which should yield a safe tactic, to modify the existing safe step
+  tactic.
+
+  \item @{text "ctxt addSbefore (name, tac)"} adds the given tactic as a
+  safe wrapper, such that it is tried \emph{before} each safe step of
+  the search.
+
+  \item @{text "ctxt addSafter (name, tac)"} adds the given tactic as a
+  safe wrapper, such that it is tried when a safe step of the search
+  would fail.
+
+  \item @{text "ctxt delSWrapper name"} deletes the safe wrapper with
+  the given name.
+
+  \item @{text "ctxt addWrapper (name, wrapper)"} adds a new wrapper to
+  modify the existing (unsafe) step tactic.
+
+  \item @{text "ctxt addbefore (name, tac)"} adds the given tactic as an
+  unsafe wrapper, such that it its result is concatenated
+  \emph{before} the result of each unsafe step.
+
+  \item @{text "ctxt addafter (name, tac)"} adds the given tactic as an
+  unsafe wrapper, such that it its result is concatenated \emph{after}
+  the result of each unsafe step.
+
+  \item @{text "ctxt delWrapper name"} deletes the unsafe wrapper with
+  the given name.
+
+  \item @{text "addSss"} adds the simpset of the context to its
+  classical set. The assumptions and goal will be simplified, in a
+  rather safe way, after each safe step of the search.
+
+  \item @{text "addss"} adds the simpset of the context to its
+  classical set. The assumptions and goal will be simplified, before
+  the each unsafe step of the search.
+
+  \end{description}
+*}
+
+
+section {* Object-logic setup \label{sec:object-logic} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "judgment"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{method_def atomize} & : & @{text method} \\
+    @{attribute_def atomize} & : & @{text attribute} \\
+    @{attribute_def rule_format} & : & @{text attribute} \\
+    @{attribute_def rulify} & : & @{text attribute} \\
+  \end{matharray}
+
+  The very starting point for any Isabelle object-logic is a ``truth
+  judgment'' that links object-level statements to the meta-logic
+  (with its minimal language of @{text prop} that covers universal
+  quantification @{text "\<And>"} and implication @{text "\<Longrightarrow>"}).
+
+  Common object-logics are sufficiently expressive to internalize rule
+  statements over @{text "\<And>"} and @{text "\<Longrightarrow>"} within their own
+  language.  This is useful in certain situations where a rule needs
+  to be viewed as an atomic statement from the meta-level perspective,
+  e.g.\ @{text "\<And>x. x \<in> A \<Longrightarrow> P x"} versus @{text "\<forall>x \<in> A. P x"}.
+
+  From the following language elements, only the @{method atomize}
+  method and @{attribute rule_format} attribute are occasionally
+  required by end-users, the rest is for those who need to setup their
+  own object-logic.  In the latter case existing formulations of
+  Isabelle/FOL or Isabelle/HOL may be taken as realistic examples.
+
+  Generic tools may refer to the information provided by object-logic
+  declarations internally.
+
+  @{rail \<open>
+    @@{command judgment} @{syntax name} '::' @{syntax type} @{syntax mixfix}?
+    ;
+    @@{attribute atomize} ('(' 'full' ')')?
+    ;
+    @@{attribute rule_format} ('(' 'noasm' ')')?
+  \<close>}
+
+  \begin{description}
+  
+  \item @{command "judgment"}~@{text "c :: \<sigma> (mx)"} declares constant
+  @{text c} as the truth judgment of the current object-logic.  Its
+  type @{text \<sigma>} should specify a coercion of the category of
+  object-level propositions to @{text prop} of the Pure meta-logic;
+  the mixfix annotation @{text "(mx)"} would typically just link the
+  object language (internally of syntactic category @{text logic})
+  with that of @{text prop}.  Only one @{command "judgment"}
+  declaration may be given in any theory development.
+  
+  \item @{method atomize} (as a method) rewrites any non-atomic
+  premises of a sub-goal, using the meta-level equations declared via
+  @{attribute atomize} (as an attribute) beforehand.  As a result,
+  heavily nested goals become amenable to fundamental operations such
+  as resolution (cf.\ the @{method (Pure) rule} method).  Giving the ``@{text
+  "(full)"}'' option here means to turn the whole subgoal into an
+  object-statement (if possible), including the outermost parameters
+  and assumptions as well.
+
+  A typical collection of @{attribute atomize} rules for a particular
+  object-logic would provide an internalization for each of the
+  connectives of @{text "\<And>"}, @{text "\<Longrightarrow>"}, and @{text "\<equiv>"}.
+  Meta-level conjunction should be covered as well (this is
+  particularly important for locales, see \secref{sec:locale}).
+
+  \item @{attribute rule_format} rewrites a theorem by the equalities
+  declared as @{attribute rulify} rules in the current object-logic.
+  By default, the result is fully normalized, including assumptions
+  and conclusions at any depth.  The @{text "(no_asm)"} option
+  restricts the transformation to the conclusion of a rule.
+
+  In common object-logics (HOL, FOL, ZF), the effect of @{attribute
+  rule_format} is to replace (bounded) universal quantification
+  (@{text "\<forall>"}) and implication (@{text "\<longrightarrow>"}) by the corresponding
+  rule statements over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
+
+  \end{description}
+*}
+
+
+section {* Tracing higher-order unification *}
+
+text {*
+  \begin{tabular}{rcll}
+    @{attribute_def unify_trace_simp} & : & @{text "attribute"} & default @{text "false"} \\
+    @{attribute_def unify_trace_types} & : & @{text "attribute"} & default @{text "false"} \\
+    @{attribute_def unify_trace_bound} & : & @{text "attribute"} & default @{text "50"} \\
+    @{attribute_def unify_search_bound} & : & @{text "attribute"} & default @{text "60"} \\
+  \end{tabular}
+  \medskip
+
+  Higher-order unification works well in most practical situations,
+  but sometimes needs extra care to identify problems.  These tracing
+  options may help.
+
+  \begin{description}
+
+  \item @{attribute unify_trace_simp} controls tracing of the
+  simplification phase of higher-order unification.
+
+  \item @{attribute unify_trace_types} controls warnings of
+  incompleteness, when unification is not considering all possible
+  instantiations of schematic type variables.
+
+  \item @{attribute unify_trace_bound} determines the depth where
+  unification starts to print tracing information once it reaches
+  depth; 0 for full tracing.  At the default value, tracing
+  information is almost never printed in practice.
+
+  \item @{attribute unify_search_bound} prevents unification from
+  searching past the given depth.  Because of this bound, higher-order
+  unification cannot return an infinite sequence, though it can return
+  an exponentially long one.  The search rarely approaches the default
+  value in practice.  If the search is cut off, unification prints a
+  warning ``Unification bound exceeded''.
+
+  \end{description}
+
+  \begin{warn}
+  Options for unification cannot be modified in a local context.  Only
+  the global theory content is taken into account.
+  \end{warn}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/HOL_Specific.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,2677 @@
+theory HOL_Specific
+imports Base Main "~~/src/HOL/Library/Old_Recdef" "~~/src/Tools/Adhoc_Overloading"
+begin
+
+chapter {* Higher-Order Logic *}
+
+text {* Isabelle/HOL is based on Higher-Order Logic, a polymorphic
+  version of Church's Simple Theory of Types.  HOL can be best
+  understood as a simply-typed version of classical set theory.  The
+  logic was first implemented in Gordon's HOL system
+  \cite{mgordon-hol}.  It extends Church's original logic
+  \cite{church40} by explicit type variables (naive polymorphism) and
+  a sound axiomatization scheme for new types based on subsets of
+  existing types.
+
+  Andrews's book \cite{andrews86} is a full description of the
+  original Church-style higher-order logic, with proofs of correctness
+  and completeness wrt.\ certain set-theoretic interpretations.  The
+  particular extensions of Gordon-style HOL are explained semantically
+  in two chapters of the 1993 HOL book \cite{pitts93}.
+
+  Experience with HOL over decades has demonstrated that higher-order
+  logic is widely applicable in many areas of mathematics and computer
+  science.  In a sense, Higher-Order Logic is simpler than First-Order
+  Logic, because there are fewer restrictions and special cases.  Note
+  that HOL is \emph{weaker} than FOL with axioms for ZF set theory,
+  which is traditionally considered the standard foundation of regular
+  mathematics, but for most applications this does not matter.  If you
+  prefer ML to Lisp, you will probably prefer HOL to ZF.
+
+  \medskip The syntax of HOL follows @{text "\<lambda>"}-calculus and
+  functional programming.  Function application is curried.  To apply
+  the function @{text f} of type @{text "\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> \<tau>\<^sub>3"} to the
+  arguments @{text a} and @{text b} in HOL, you simply write @{text "f
+  a b"} (as in ML or Haskell).  There is no ``apply'' operator; the
+  existing application of the Pure @{text "\<lambda>"}-calculus is re-used.
+  Note that in HOL @{text "f (a, b)"} means ``@{text "f"} applied to
+  the pair @{text "(a, b)"} (which is notation for @{text "Pair a
+  b"}).  The latter typically introduces extra formal efforts that can
+  be avoided by currying functions by default.  Explicit tuples are as
+  infrequent in HOL formalizations as in good ML or Haskell programs.
+
+  \medskip Isabelle/HOL has a distinct feel, compared to other
+  object-logics like Isabelle/ZF.  It identifies object-level types
+  with meta-level types, taking advantage of the default
+  type-inference mechanism of Isabelle/Pure.  HOL fully identifies
+  object-level functions with meta-level functions, with native
+  abstraction and application.
+
+  These identifications allow Isabelle to support HOL particularly
+  nicely, but they also mean that HOL requires some sophistication
+  from the user.  In particular, an understanding of Hindley-Milner
+  type-inference with type-classes, which are both used extensively in
+  the standard libraries and applications.  Beginners can set
+  @{attribute show_types} or even @{attribute show_sorts} to get more
+  explicit information about the result of type-inference.  *}
+
+
+chapter {* Derived specification elements *}
+
+section {* Inductive and coinductive definitions \label{sec:hol-inductive} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "inductive"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def (HOL) "inductive_set"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def (HOL) "coinductive"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def (HOL) "coinductive_set"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "print_inductives"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{attribute_def (HOL) mono} & : & @{text attribute} \\
+  \end{matharray}
+
+  An \emph{inductive definition} specifies the least predicate or set
+  @{text R} closed under given rules: applying a rule to elements of
+  @{text R} yields a result within @{text R}.  For example, a
+  structural operational semantics is an inductive definition of an
+  evaluation relation.
+
+  Dually, a \emph{coinductive definition} specifies the greatest
+  predicate or set @{text R} that is consistent with given rules:
+  every element of @{text R} can be seen as arising by applying a rule
+  to elements of @{text R}.  An important example is using
+  bisimulation relations to formalise equivalence of processes and
+  infinite data structures.
+
+  Both inductive and coinductive definitions are based on the
+  Knaster-Tarski fixed-point theorem for complete lattices.  The
+  collection of introduction rules given by the user determines a
+  functor on subsets of set-theoretic relations.  The required
+  monotonicity of the recursion scheme is proven as a prerequisite to
+  the fixed-point definition and the resulting consequences.  This
+  works by pushing inclusion through logical connectives and any other
+  operator that might be wrapped around recursive occurrences of the
+  defined relation: there must be a monotonicity theorem of the form
+  @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for each premise @{text "\<M> R t"} in an
+  introduction rule.  The default rule declarations of Isabelle/HOL
+  already take care of most common situations.
+
+  @{rail \<open>
+    (@@{command (HOL) inductive} | @@{command (HOL) inductive_set} |
+      @@{command (HOL) coinductive} | @@{command (HOL) coinductive_set})
+    @{syntax target}? \<newline>
+    @{syntax "fixes"} (@'for' @{syntax "fixes"})? (@'where' clauses)? \<newline>
+    (@'monos' @{syntax thmrefs})?
+    ;
+    clauses: (@{syntax thmdecl}? @{syntax prop} + '|')
+    ;
+    @@{attribute (HOL) mono} (() | 'add' | 'del')
+  \<close>}
+
+  \begin{description}
+
+  \item @{command (HOL) "inductive"} and @{command (HOL)
+  "coinductive"} define (co)inductive predicates from the introduction
+  rules.
+
+  The propositions given as @{text "clauses"} in the @{keyword
+  "where"} part are either rules of the usual @{text "\<And>/\<Longrightarrow>"} format
+  (with arbitrary nesting), or equalities using @{text "\<equiv>"}.  The
+  latter specifies extra-logical abbreviations in the sense of
+  @{command_ref abbreviation}.  Introducing abstract syntax
+  simultaneously with the actual introduction rules is occasionally
+  useful for complex specifications.
+
+  The optional @{keyword "for"} part contains a list of parameters of
+  the (co)inductive predicates that remain fixed throughout the
+  definition, in contrast to arguments of the relation that may vary
+  in each occurrence within the given @{text "clauses"}.
+
+  The optional @{keyword "monos"} declaration contains additional
+  \emph{monotonicity theorems}, which are required for each operator
+  applied to a recursive set in the introduction rules.
+
+  \item @{command (HOL) "inductive_set"} and @{command (HOL)
+  "coinductive_set"} are wrappers for to the previous commands for
+  native HOL predicates.  This allows to define (co)inductive sets,
+  where multiple arguments are simulated via tuples.
+
+  \item @{command "print_inductives"} prints (co)inductive definitions and
+  monotonicity rules.
+
+  \item @{attribute (HOL) mono} declares monotonicity rules in the
+  context.  These rule are involved in the automated monotonicity
+  proof of the above inductive and coinductive definitions.
+
+  \end{description}
+*}
+
+
+subsection {* Derived rules *}
+
+text {* A (co)inductive definition of @{text R} provides the following
+  main theorems:
+
+  \begin{description}
+
+  \item @{text R.intros} is the list of introduction rules as proven
+  theorems, for the recursive predicates (or sets).  The rules are
+  also available individually, using the names given them in the
+  theory file;
+
+  \item @{text R.cases} is the case analysis (or elimination) rule;
+
+  \item @{text R.induct} or @{text R.coinduct} is the (co)induction
+  rule;
+
+  \item @{text R.simps} is the equation unrolling the fixpoint of the
+  predicate one step.
+
+  \end{description}
+
+  When several predicates @{text "R\<^sub>1, \<dots>, R\<^sub>n"} are
+  defined simultaneously, the list of introduction rules is called
+  @{text "R\<^sub>1_\<dots>_R\<^sub>n.intros"}, the case analysis rules are
+  called @{text "R\<^sub>1.cases, \<dots>, R\<^sub>n.cases"}, and the list
+  of mutual induction rules is called @{text
+  "R\<^sub>1_\<dots>_R\<^sub>n.inducts"}.
+*}
+
+
+subsection {* Monotonicity theorems *}
+
+text {* The context maintains a default set of theorems that are used
+  in monotonicity proofs.  New rules can be declared via the
+  @{attribute (HOL) mono} attribute.  See the main Isabelle/HOL
+  sources for some examples.  The general format of such monotonicity
+  theorems is as follows:
+
+  \begin{itemize}
+
+  \item Theorems of the form @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for proving
+  monotonicity of inductive definitions whose introduction rules have
+  premises involving terms such as @{text "\<M> R t"}.
+
+  \item Monotonicity theorems for logical operators, which are of the
+  general form @{text "(\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> (\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> \<longrightarrow> \<dots>"}.  For example, in
+  the case of the operator @{text "\<or>"}, the corresponding theorem is
+  \[
+  \infer{@{text "P\<^sub>1 \<or> P\<^sub>2 \<longrightarrow> Q\<^sub>1 \<or> Q\<^sub>2"}}{@{text "P\<^sub>1 \<longrightarrow> Q\<^sub>1"} & @{text "P\<^sub>2 \<longrightarrow> Q\<^sub>2"}}
+  \]
+
+  \item De Morgan style equations for reasoning about the ``polarity''
+  of expressions, e.g.
+  \[
+  @{prop "\<not> \<not> P \<longleftrightarrow> P"} \qquad\qquad
+  @{prop "\<not> (P \<and> Q) \<longleftrightarrow> \<not> P \<or> \<not> Q"}
+  \]
+
+  \item Equations for reducing complex operators to more primitive
+  ones whose monotonicity can easily be proved, e.g.
+  \[
+  @{prop "(P \<longrightarrow> Q) \<longleftrightarrow> \<not> P \<or> Q"} \qquad\qquad
+  @{prop "Ball A P \<equiv> \<forall>x. x \<in> A \<longrightarrow> P x"}
+  \]
+
+  \end{itemize}
+*}
+
+subsubsection {* Examples *}
+
+text {* The finite powerset operator can be defined inductively like this: *}
+
+inductive_set Fin :: "'a set \<Rightarrow> 'a set set" for A :: "'a set"
+where
+  empty: "{} \<in> Fin A"
+| insert: "a \<in> A \<Longrightarrow> B \<in> Fin A \<Longrightarrow> insert a B \<in> Fin A"
+
+text {* The accessible part of a relation is defined as follows: *}
+
+inductive acc :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> bool"
+  for r :: "'a \<Rightarrow> 'a \<Rightarrow> bool"  (infix "\<prec>" 50)
+where acc: "(\<And>y. y \<prec> x \<Longrightarrow> acc r y) \<Longrightarrow> acc r x"
+
+text {* Common logical connectives can be easily characterized as
+non-recursive inductive definitions with parameters, but without
+arguments. *}
+
+inductive AND for A B :: bool
+where "A \<Longrightarrow> B \<Longrightarrow> AND A B"
+
+inductive OR for A B :: bool
+where "A \<Longrightarrow> OR A B"
+  | "B \<Longrightarrow> OR A B"
+
+inductive EXISTS for B :: "'a \<Rightarrow> bool"
+where "B a \<Longrightarrow> EXISTS B"
+
+text {* Here the @{text "cases"} or @{text "induct"} rules produced by
+  the @{command inductive} package coincide with the expected
+  elimination rules for Natural Deduction.  Already in the original
+  article by Gerhard Gentzen \cite{Gentzen:1935} there is a hint that
+  each connective can be characterized by its introductions, and the
+  elimination can be constructed systematically. *}
+
+
+section {* Recursive functions \label{sec:recursion} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "primrec"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def (HOL) "fun"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def (HOL) "function"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
+    @{command_def (HOL) "termination"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
+    @{command_def (HOL) "fun_cases"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command (HOL) primrec} @{syntax target}? @{syntax "fixes"} @'where' equations
+    ;
+    (@@{command (HOL) fun} | @@{command (HOL) function}) @{syntax target}? functionopts?
+      @{syntax "fixes"} \<newline> @'where' equations
+    ;
+
+    equations: (@{syntax thmdecl}? @{syntax prop} + '|')
+    ;
+    functionopts: '(' (('sequential' | 'domintros') + ',') ')'
+    ;
+    @@{command (HOL) termination} @{syntax term}?
+    ;
+    @@{command (HOL) fun_cases} (@{syntax thmdecl}? @{syntax prop} + @'and')
+  \<close>}
+
+  \begin{description}
+
+  \item @{command (HOL) "primrec"} defines primitive recursive
+  functions over datatypes (see also @{command_ref (HOL) datatype} and
+  @{command_ref (HOL) rep_datatype}).  The given @{text equations}
+  specify reduction rules that are produced by instantiating the
+  generic combinator for primitive recursion that is available for
+  each datatype.
+
+  Each equation needs to be of the form:
+
+  @{text [display] "f x\<^sub>1 \<dots> x\<^sub>m (C y\<^sub>1 \<dots> y\<^sub>k) z\<^sub>1 \<dots> z\<^sub>n = rhs"}
+
+  such that @{text C} is a datatype constructor, @{text rhs} contains
+  only the free variables on the left-hand side (or from the context),
+  and all recursive occurrences of @{text "f"} in @{text "rhs"} are of
+  the form @{text "f \<dots> y\<^sub>i \<dots>"} for some @{text i}.  At most one
+  reduction rule for each constructor can be given.  The order does
+  not matter.  For missing constructors, the function is defined to
+  return a default value, but this equation is made difficult to
+  access for users.
+
+  The reduction rules are declared as @{attribute simp} by default,
+  which enables standard proof methods like @{method simp} and
+  @{method auto} to normalize expressions of @{text "f"} applied to
+  datatype constructions, by simulating symbolic computation via
+  rewriting.
+
+  \item @{command (HOL) "function"} defines functions by general
+  wellfounded recursion. A detailed description with examples can be
+  found in \cite{isabelle-function}. The function is specified by a
+  set of (possibly conditional) recursive equations with arbitrary
+  pattern matching. The command generates proof obligations for the
+  completeness and the compatibility of patterns.
+
+  The defined function is considered partial, and the resulting
+  simplification rules (named @{text "f.psimps"}) and induction rule
+  (named @{text "f.pinduct"}) are guarded by a generated domain
+  predicate @{text "f_dom"}. The @{command (HOL) "termination"}
+  command can then be used to establish that the function is total.
+
+  \item @{command (HOL) "fun"} is a shorthand notation for ``@{command
+  (HOL) "function"}~@{text "(sequential)"}, followed by automated
+  proof attempts regarding pattern matching and termination.  See
+  \cite{isabelle-function} for further details.
+
+  \item @{command (HOL) "termination"}~@{text f} commences a
+  termination proof for the previously defined function @{text f}.  If
+  this is omitted, the command refers to the most recent function
+  definition.  After the proof is closed, the recursive equations and
+  the induction principle is established.
+
+  \item @{command (HOL) "fun_cases"} generates specialized elimination
+  rules for function equations. It expects one or more function equations
+  and produces rules that eliminate the given equalities, following the cases
+  given in the function definition.
+  \end{description}
+
+  Recursive definitions introduced by the @{command (HOL) "function"}
+  command accommodate reasoning by induction (cf.\ @{method induct}):
+  rule @{text "f.induct"} refers to a specific induction rule, with
+  parameters named according to the user-specified equations. Cases
+  are numbered starting from 1.  For @{command (HOL) "primrec"}, the
+  induction principle coincides with structural recursion on the
+  datatype where the recursion is carried out.
+
+  The equations provided by these packages may be referred later as
+  theorem list @{text "f.simps"}, where @{text f} is the (collective)
+  name of the functions defined.  Individual equations may be named
+  explicitly as well.
+
+  The @{command (HOL) "function"} command accepts the following
+  options.
+
+  \begin{description}
+
+  \item @{text sequential} enables a preprocessor which disambiguates
+  overlapping patterns by making them mutually disjoint.  Earlier
+  equations take precedence over later ones.  This allows to give the
+  specification in a format very similar to functional programming.
+  Note that the resulting simplification and induction rules
+  correspond to the transformed specification, not the one given
+  originally. This usually means that each equation given by the user
+  may result in several theorems.  Also note that this automatic
+  transformation only works for ML-style datatype patterns.
+
+  \item @{text domintros} enables the automated generation of
+  introduction rules for the domain predicate. While mostly not
+  needed, they can be helpful in some proofs about partial functions.
+
+  \end{description}
+*}
+
+subsubsection {* Example: evaluation of expressions *}
+
+text {* Subsequently, we define mutual datatypes for arithmetic and
+  boolean expressions, and use @{command primrec} for evaluation
+  functions that follow the same recursive structure. *}
+
+datatype 'a aexp =
+    IF "'a bexp"  "'a aexp"  "'a aexp"
+  | Sum "'a aexp"  "'a aexp"
+  | Diff "'a aexp"  "'a aexp"
+  | Var 'a
+  | Num nat
+and 'a bexp =
+    Less "'a aexp"  "'a aexp"
+  | And "'a bexp"  "'a bexp"
+  | Neg "'a bexp"
+
+
+text {* \medskip Evaluation of arithmetic and boolean expressions *}
+
+primrec evala :: "('a \<Rightarrow> nat) \<Rightarrow> 'a aexp \<Rightarrow> nat"
+  and evalb :: "('a \<Rightarrow> nat) \<Rightarrow> 'a bexp \<Rightarrow> bool"
+where
+  "evala env (IF b a1 a2) = (if evalb env b then evala env a1 else evala env a2)"
+| "evala env (Sum a1 a2) = evala env a1 + evala env a2"
+| "evala env (Diff a1 a2) = evala env a1 - evala env a2"
+| "evala env (Var v) = env v"
+| "evala env (Num n) = n"
+| "evalb env (Less a1 a2) = (evala env a1 < evala env a2)"
+| "evalb env (And b1 b2) = (evalb env b1 \<and> evalb env b2)"
+| "evalb env (Neg b) = (\<not> evalb env b)"
+
+text {* Since the value of an expression depends on the value of its
+  variables, the functions @{const evala} and @{const evalb} take an
+  additional parameter, an \emph{environment} that maps variables to
+  their values.
+
+  \medskip Substitution on expressions can be defined similarly.  The
+  mapping @{text f} of type @{typ "'a \<Rightarrow> 'a aexp"} given as a
+  parameter is lifted canonically on the types @{typ "'a aexp"} and
+  @{typ "'a bexp"}, respectively.
+*}
+
+primrec substa :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a aexp \<Rightarrow> 'b aexp"
+  and substb :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a bexp \<Rightarrow> 'b bexp"
+where
+  "substa f (IF b a1 a2) = IF (substb f b) (substa f a1) (substa f a2)"
+| "substa f (Sum a1 a2) = Sum (substa f a1) (substa f a2)"
+| "substa f (Diff a1 a2) = Diff (substa f a1) (substa f a2)"
+| "substa f (Var v) = f v"
+| "substa f (Num n) = Num n"
+| "substb f (Less a1 a2) = Less (substa f a1) (substa f a2)"
+| "substb f (And b1 b2) = And (substb f b1) (substb f b2)"
+| "substb f (Neg b) = Neg (substb f b)"
+
+text {* In textbooks about semantics one often finds substitution
+  theorems, which express the relationship between substitution and
+  evaluation.  For @{typ "'a aexp"} and @{typ "'a bexp"}, we can prove
+  such a theorem by mutual induction, followed by simplification.
+*}
+
+lemma subst_one:
+  "evala env (substa (Var (v := a')) a) = evala (env (v := evala env a')) a"
+  "evalb env (substb (Var (v := a')) b) = evalb (env (v := evala env a')) b"
+  by (induct a and b) simp_all
+
+lemma subst_all:
+  "evala env (substa s a) = evala (\<lambda>x. evala env (s x)) a"
+  "evalb env (substb s b) = evalb (\<lambda>x. evala env (s x)) b"
+  by (induct a and b) simp_all
+
+
+subsubsection {* Example: a substitution function for terms *}
+
+text {* Functions on datatypes with nested recursion are also defined
+  by mutual primitive recursion. *}
+
+datatype ('a, 'b) "term" = Var 'a | App 'b "('a, 'b) term list"
+
+text {* A substitution function on type @{typ "('a, 'b) term"} can be
+  defined as follows, by working simultaneously on @{typ "('a, 'b)
+  term list"}: *}
+
+primrec subst_term :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term \<Rightarrow> ('a, 'b) term" and
+  subst_term_list :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term list \<Rightarrow> ('a, 'b) term list"
+where
+  "subst_term f (Var a) = f a"
+| "subst_term f (App b ts) = App b (subst_term_list f ts)"
+| "subst_term_list f [] = []"
+| "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
+
+text {* The recursion scheme follows the structure of the unfolded
+  definition of type @{typ "('a, 'b) term"}.  To prove properties of this
+  substitution function, mutual induction is needed:
+*}
+
+lemma "subst_term (subst_term f1 \<circ> f2) t = subst_term f1 (subst_term f2 t)" and
+  "subst_term_list (subst_term f1 \<circ> f2) ts = subst_term_list f1 (subst_term_list f2 ts)"
+  by (induct t and ts) simp_all
+
+
+subsubsection {* Example: a map function for infinitely branching trees *}
+
+text {* Defining functions on infinitely branching datatypes by
+  primitive recursion is just as easy.
+*}
+
+datatype 'a tree = Atom 'a | Branch "nat \<Rightarrow> 'a tree"
+
+primrec map_tree :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a tree \<Rightarrow> 'b tree"
+where
+  "map_tree f (Atom a) = Atom (f a)"
+| "map_tree f (Branch ts) = Branch (\<lambda>x. map_tree f (ts x))"
+
+text {* Note that all occurrences of functions such as @{text ts}
+  above must be applied to an argument.  In particular, @{term
+  "map_tree f \<circ> ts"} is not allowed here. *}
+
+text {* Here is a simple composition lemma for @{term map_tree}: *}
+
+lemma "map_tree g (map_tree f t) = map_tree (g \<circ> f) t"
+  by (induct t) simp_all
+
+
+subsection {* Proof methods related to recursive definitions *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) pat_completeness} & : & @{text method} \\
+    @{method_def (HOL) relation} & : & @{text method} \\
+    @{method_def (HOL) lexicographic_order} & : & @{text method} \\
+    @{method_def (HOL) size_change} & : & @{text method} \\
+    @{method_def (HOL) induction_schema} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{method (HOL) relation} @{syntax term}
+    ;
+    @@{method (HOL) lexicographic_order} (@{syntax clasimpmod} * )
+    ;
+    @@{method (HOL) size_change} ( orders (@{syntax clasimpmod} * ) )
+    ;
+    @@{method (HOL) induction_schema}
+    ;
+    orders: ( 'max' | 'min' | 'ms' ) *
+  \<close>}
+
+  \begin{description}
+
+  \item @{method (HOL) pat_completeness} is a specialized method to
+  solve goals regarding the completeness of pattern matching, as
+  required by the @{command (HOL) "function"} package (cf.\
+  \cite{isabelle-function}).
+
+  \item @{method (HOL) relation}~@{text R} introduces a termination
+  proof using the relation @{text R}.  The resulting proof state will
+  contain goals expressing that @{text R} is wellfounded, and that the
+  arguments of recursive calls decrease with respect to @{text R}.
+  Usually, this method is used as the initial proof step of manual
+  termination proofs.
+
+  \item @{method (HOL) "lexicographic_order"} attempts a fully
+  automated termination proof by searching for a lexicographic
+  combination of size measures on the arguments of the function. The
+  method accepts the same arguments as the @{method auto} method,
+  which it uses internally to prove local descents.  The @{syntax
+  clasimpmod} modifiers are accepted (as for @{method auto}).
+
+  In case of failure, extensive information is printed, which can help
+  to analyse the situation (cf.\ \cite{isabelle-function}).
+
+  \item @{method (HOL) "size_change"} also works on termination goals,
+  using a variation of the size-change principle, together with a
+  graph decomposition technique (see \cite{krauss_phd} for details).
+  Three kinds of orders are used internally: @{text max}, @{text min},
+  and @{text ms} (multiset), which is only available when the theory
+  @{text Multiset} is loaded. When no order kinds are given, they are
+  tried in order. The search for a termination proof uses SAT solving
+  internally.
+
+  For local descent proofs, the @{syntax clasimpmod} modifiers are
+  accepted (as for @{method auto}).
+
+  \item @{method (HOL) induction_schema} derives user-specified
+  induction rules from well-founded induction and completeness of
+  patterns. This factors out some operations that are done internally
+  by the function package and makes them available separately. See
+  @{file "~~/src/HOL/ex/Induction_Schema.thy"} for examples.
+
+  \end{description}
+*}
+
+
+subsection {* Functions with explicit partiality *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "partial_function"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{attribute_def (HOL) "partial_function_mono"} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command (HOL) partial_function} @{syntax target}?
+      '(' @{syntax nameref} ')' @{syntax "fixes"} \<newline>
+      @'where' @{syntax thmdecl}? @{syntax prop}
+  \<close>}
+
+  \begin{description}
+
+  \item @{command (HOL) "partial_function"}~@{text "(mode)"} defines
+  recursive functions based on fixpoints in complete partial
+  orders. No termination proof is required from the user or
+  constructed internally. Instead, the possibility of non-termination
+  is modelled explicitly in the result type, which contains an
+  explicit bottom element.
+
+  Pattern matching and mutual recursion are currently not supported.
+  Thus, the specification consists of a single function described by a
+  single recursive equation.
+
+  There are no fixed syntactic restrictions on the body of the
+  function, but the induced functional must be provably monotonic
+  wrt.\ the underlying order.  The monotonicity proof is performed
+  internally, and the definition is rejected when it fails. The proof
+  can be influenced by declaring hints using the
+  @{attribute (HOL) partial_function_mono} attribute.
+
+  The mandatory @{text mode} argument specifies the mode of operation
+  of the command, which directly corresponds to a complete partial
+  order on the result type. By default, the following modes are
+  defined:
+
+  \begin{description}
+
+  \item @{text option} defines functions that map into the @{type
+  option} type. Here, the value @{term None} is used to model a
+  non-terminating computation. Monotonicity requires that if @{term
+  None} is returned by a recursive call, then the overall result must
+  also be @{term None}. This is best achieved through the use of the
+  monadic operator @{const "Option.bind"}.
+
+  \item @{text tailrec} defines functions with an arbitrary result
+  type and uses the slightly degenerated partial order where @{term
+  "undefined"} is the bottom element.  Now, monotonicity requires that
+  if @{term undefined} is returned by a recursive call, then the
+  overall result must also be @{term undefined}. In practice, this is
+  only satisfied when each recursive call is a tail call, whose result
+  is directly returned. Thus, this mode of operation allows the
+  definition of arbitrary tail-recursive functions.
+
+  \end{description}
+
+  Experienced users may define new modes by instantiating the locale
+  @{const "partial_function_definitions"} appropriately.
+
+  \item @{attribute (HOL) partial_function_mono} declares rules for
+  use in the internal monotonicity proofs of partial function
+  definitions.
+
+  \end{description}
+
+*}
+
+
+subsection {* Old-style recursive function definitions (TFL) *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "recdef"} & : & @{text "theory \<rightarrow> theory)"} \\
+    @{command_def (HOL) "recdef_tc"}@{text "\<^sup>*"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
+  \end{matharray}
+
+  The old TFL commands @{command (HOL) "recdef"} and @{command (HOL)
+  "recdef_tc"} for defining recursive are mostly obsolete; @{command
+  (HOL) "function"} or @{command (HOL) "fun"} should be used instead.
+
+  @{rail \<open>
+    @@{command (HOL) recdef} ('(' @'permissive' ')')? \<newline>
+      @{syntax name} @{syntax term} (@{syntax prop} +) hints?
+    ;
+    recdeftc @{syntax thmdecl}? tc
+    ;
+    hints: '(' @'hints' ( recdefmod * ) ')'
+    ;
+    recdefmod: (('recdef_simp' | 'recdef_cong' | 'recdef_wf')
+      (() | 'add' | 'del') ':' @{syntax thmrefs}) | @{syntax clasimpmod}
+    ;
+    tc: @{syntax nameref} ('(' @{syntax nat} ')')?
+  \<close>}
+
+  \begin{description}
+
+  \item @{command (HOL) "recdef"} defines general well-founded
+  recursive functions (using the TFL package), see also
+  \cite{isabelle-HOL}.  The ``@{text "(permissive)"}'' option tells
+  TFL to recover from failed proof attempts, returning unfinished
+  results.  The @{text recdef_simp}, @{text recdef_cong}, and @{text
+  recdef_wf} hints refer to auxiliary rules to be used in the internal
+  automated proof process of TFL.  Additional @{syntax clasimpmod}
+  declarations may be given to tune the context of the Simplifier
+  (cf.\ \secref{sec:simplifier}) and Classical reasoner (cf.\
+  \secref{sec:classical}).
+
+  \item @{command (HOL) "recdef_tc"}~@{text "c (i)"} recommences the
+  proof for leftover termination condition number @{text i} (default
+  1) as generated by a @{command (HOL) "recdef"} definition of
+  constant @{text c}.
+
+  Note that in most cases, @{command (HOL) "recdef"} is able to finish
+  its internal proofs without manual intervention.
+
+  \end{description}
+
+  \medskip Hints for @{command (HOL) "recdef"} may be also declared
+  globally, using the following attributes.
+
+  \begin{matharray}{rcl}
+    @{attribute_def (HOL) recdef_simp} & : & @{text attribute} \\
+    @{attribute_def (HOL) recdef_cong} & : & @{text attribute} \\
+    @{attribute_def (HOL) recdef_wf} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{attribute (HOL) recdef_simp} | @@{attribute (HOL) recdef_cong} |
+      @@{attribute (HOL) recdef_wf}) (() | 'add' | 'del')
+  \<close>}
+*}
+
+
+section {* Datatypes \label{sec:hol-datatype} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "datatype"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "rep_datatype"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command (HOL) datatype} (spec + @'and')
+    ;
+    @@{command (HOL) rep_datatype} ('(' (@{syntax name} +) ')')? (@{syntax term} +)
+    ;
+
+    spec: @{syntax typespec_sorts} @{syntax mixfix}? '=' (cons + '|')
+    ;
+    cons: @{syntax name} (@{syntax type} * ) @{syntax mixfix}?
+  \<close>}
+
+  \begin{description}
+
+  \item @{command (HOL) "datatype"} defines inductive datatypes in
+  HOL.
+
+  \item @{command (HOL) "rep_datatype"} represents existing types as
+  datatypes.
+
+  For foundational reasons, some basic types such as @{typ nat}, @{typ
+  "'a \<times> 'b"}, @{typ "'a + 'b"}, @{typ bool} and @{typ unit} are
+  introduced by more primitive means using @{command_ref typedef}.  To
+  recover the rich infrastructure of @{command datatype} (e.g.\ rules
+  for @{method cases} and @{method induct} and the primitive recursion
+  combinators), such types may be represented as actual datatypes
+  later.  This is done by specifying the constructors of the desired
+  type, and giving a proof of the induction rule, distinctness and
+  injectivity of constructors.
+
+  For example, see @{file "~~/src/HOL/Sum_Type.thy"} for the
+  representation of the primitive sum type as fully-featured datatype.
+
+  \end{description}
+
+  The generated rules for @{method induct} and @{method cases} provide
+  case names according to the given constructors, while parameters are
+  named after the types (see also \secref{sec:cases-induct}).
+
+  See \cite{isabelle-HOL} for more details on datatypes, but beware of
+  the old-style theory syntax being used there!  Apart from proper
+  proof methods for case-analysis and induction, there are also
+  emulations of ML tactics @{method (HOL) case_tac} and @{method (HOL)
+  induct_tac} available, see \secref{sec:hol-induct-tac}; these admit
+  to refer directly to the internal structure of subgoals (including
+  internally bound parameters).
+*}
+
+
+subsubsection {* Examples *}
+
+text {* We define a type of finite sequences, with slightly different
+  names than the existing @{typ "'a list"} that is already in @{theory
+  Main}: *}
+
+datatype 'a seq = Empty | Seq 'a "'a seq"
+
+text {* We can now prove some simple lemma by structural induction: *}
+
+lemma "Seq x xs \<noteq> xs"
+proof (induct xs arbitrary: x)
+  case Empty
+  txt {* This case can be proved using the simplifier: the freeness
+    properties of the datatype are already declared as @{attribute
+    simp} rules. *}
+  show "Seq x Empty \<noteq> Empty"
+    by simp
+next
+  case (Seq y ys)
+  txt {* The step case is proved similarly. *}
+  show "Seq x (Seq y ys) \<noteq> Seq y ys"
+    using `Seq y ys \<noteq> ys` by simp
+qed
+
+text {* Here is a more succinct version of the same proof: *}
+
+lemma "Seq x xs \<noteq> xs"
+  by (induct xs arbitrary: x) simp_all
+
+
+section {* Records \label{sec:hol-record} *}
+
+text {*
+  In principle, records merely generalize the concept of tuples, where
+  components may be addressed by labels instead of just position.  The
+  logical infrastructure of records in Isabelle/HOL is slightly more
+  advanced, though, supporting truly extensible record schemes.  This
+  admits operations that are polymorphic with respect to record
+  extension, yielding ``object-oriented'' effects like (single)
+  inheritance.  See also \cite{NaraschewskiW-TPHOLs98} for more
+  details on object-oriented verification and record subtyping in HOL.
+*}
+
+
+subsection {* Basic concepts *}
+
+text {*
+  Isabelle/HOL supports both \emph{fixed} and \emph{schematic} records
+  at the level of terms and types.  The notation is as follows:
+
+  \begin{center}
+  \begin{tabular}{l|l|l}
+    & record terms & record types \\ \hline
+    fixed & @{text "\<lparr>x = a, y = b\<rparr>"} & @{text "\<lparr>x :: A, y :: B\<rparr>"} \\
+    schematic & @{text "\<lparr>x = a, y = b, \<dots> = m\<rparr>"} &
+      @{text "\<lparr>x :: A, y :: B, \<dots> :: M\<rparr>"} \\
+  \end{tabular}
+  \end{center}
+
+  \noindent The ASCII representation of @{text "\<lparr>x = a\<rparr>"} is @{text
+  "(| x = a |)"}.
+
+  A fixed record @{text "\<lparr>x = a, y = b\<rparr>"} has field @{text x} of value
+  @{text a} and field @{text y} of value @{text b}.  The corresponding
+  type is @{text "\<lparr>x :: A, y :: B\<rparr>"}, assuming that @{text "a :: A"}
+  and @{text "b :: B"}.
+
+  A record scheme like @{text "\<lparr>x = a, y = b, \<dots> = m\<rparr>"} contains fields
+  @{text x} and @{text y} as before, but also possibly further fields
+  as indicated by the ``@{text "\<dots>"}'' notation (which is actually part
+  of the syntax).  The improper field ``@{text "\<dots>"}'' of a record
+  scheme is called the \emph{more part}.  Logically it is just a free
+  variable, which is occasionally referred to as ``row variable'' in
+  the literature.  The more part of a record scheme may be
+  instantiated by zero or more further components.  For example, the
+  previous scheme may get instantiated to @{text "\<lparr>x = a, y = b, z =
+  c, \<dots> = m'\<rparr>"}, where @{text m'} refers to a different more part.
+  Fixed records are special instances of record schemes, where
+  ``@{text "\<dots>"}'' is properly terminated by the @{text "() :: unit"}
+  element.  In fact, @{text "\<lparr>x = a, y = b\<rparr>"} is just an abbreviation
+  for @{text "\<lparr>x = a, y = b, \<dots> = ()\<rparr>"}.
+
+  \medskip Two key observations make extensible records in a simply
+  typed language like HOL work out:
+
+  \begin{enumerate}
+
+  \item the more part is internalized, as a free term or type
+  variable,
+
+  \item field names are externalized, they cannot be accessed within
+  the logic as first-class values.
+
+  \end{enumerate}
+
+  \medskip In Isabelle/HOL record types have to be defined explicitly,
+  fixing their field names and types, and their (optional) parent
+  record.  Afterwards, records may be formed using above syntax, while
+  obeying the canonical order of fields as given by their declaration.
+  The record package provides several standard operations like
+  selectors and updates.  The common setup for various generic proof
+  tools enable succinct reasoning patterns.  See also the Isabelle/HOL
+  tutorial \cite{isabelle-hol-book} for further instructions on using
+  records in practice.
+*}
+
+
+subsection {* Record specifications *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "record"} & : & @{text "theory \<rightarrow> theory"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command (HOL) record} @{syntax typespec_sorts} '=' \<newline>
+      (@{syntax type} '+')? (constdecl +)
+    ;
+    constdecl: @{syntax name} '::' @{syntax type} @{syntax mixfix}?
+  \<close>}
+
+  \begin{description}
+
+  \item @{command (HOL) "record"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t = \<tau> + c\<^sub>1 :: \<sigma>\<^sub>1
+  \<dots> c\<^sub>n :: \<sigma>\<^sub>n"} defines extensible record type @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"},
+  derived from the optional parent record @{text "\<tau>"} by adding new
+  field components @{text "c\<^sub>i :: \<sigma>\<^sub>i"} etc.
+
+  The type variables of @{text "\<tau>"} and @{text "\<sigma>\<^sub>i"} need to be
+  covered by the (distinct) parameters @{text "\<alpha>\<^sub>1, \<dots>,
+  \<alpha>\<^sub>m"}.  Type constructor @{text t} has to be new, while @{text
+  \<tau>} needs to specify an instance of an existing record type.  At
+  least one new field @{text "c\<^sub>i"} has to be specified.
+  Basically, field names need to belong to a unique record.  This is
+  not a real restriction in practice, since fields are qualified by
+  the record name internally.
+
+  The parent record specification @{text \<tau>} is optional; if omitted
+  @{text t} becomes a root record.  The hierarchy of all records
+  declared within a theory context forms a forest structure, i.e.\ a
+  set of trees starting with a root record each.  There is no way to
+  merge multiple parent records!
+
+  For convenience, @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"} is made a
+  type abbreviation for the fixed record type @{text "\<lparr>c\<^sub>1 ::
+  \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n\<rparr>"}, likewise is @{text
+  "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m, \<zeta>) t_scheme"} made an abbreviation for
+  @{text "\<lparr>c\<^sub>1 :: \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n, \<dots> ::
+  \<zeta>\<rparr>"}.
+
+  \end{description}
+*}
+
+
+subsection {* Record operations *}
+
+text {*
+  Any record definition of the form presented above produces certain
+  standard operations.  Selectors and updates are provided for any
+  field, including the improper one ``@{text more}''.  There are also
+  cumulative record constructor functions.  To simplify the
+  presentation below, we assume for now that @{text "(\<alpha>\<^sub>1, \<dots>,
+  \<alpha>\<^sub>m) t"} is a root record with fields @{text "c\<^sub>1 ::
+  \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n"}.
+
+  \medskip \textbf{Selectors} and \textbf{updates} are available for
+  any field (including ``@{text more}''):
+
+  \begin{matharray}{lll}
+    @{text "c\<^sub>i"} & @{text "::"} & @{text "\<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<sigma>\<^sub>i"} \\
+    @{text "c\<^sub>i_update"} & @{text "::"} & @{text "\<sigma>\<^sub>i \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
+  \end{matharray}
+
+  There is special syntax for application of updates: @{text "r\<lparr>x :=
+  a\<rparr>"} abbreviates term @{text "x_update a r"}.  Further notation for
+  repeated updates is also available: @{text "r\<lparr>x := a\<rparr>\<lparr>y := b\<rparr>\<lparr>z :=
+  c\<rparr>"} may be written @{text "r\<lparr>x := a, y := b, z := c\<rparr>"}.  Note that
+  because of postfix notation the order of fields shown here is
+  reverse than in the actual term.  Since repeated updates are just
+  function applications, fields may be freely permuted in @{text "\<lparr>x
+  := a, y := b, z := c\<rparr>"}, as far as logical equality is concerned.
+  Thus commutativity of independent updates can be proven within the
+  logic for any two fields, but not as a general theorem.
+
+  \medskip The \textbf{make} operation provides a cumulative record
+  constructor function:
+
+  \begin{matharray}{lll}
+    @{text "t.make"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
+  \end{matharray}
+
+  \medskip We now reconsider the case of non-root records, which are
+  derived of some parent.  In general, the latter may depend on
+  another parent as well, resulting in a list of \emph{ancestor
+  records}.  Appending the lists of fields of all ancestors results in
+  a certain field prefix.  The record package automatically takes care
+  of this by lifting operations over this context of ancestor fields.
+  Assuming that @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"} has ancestor
+  fields @{text "b\<^sub>1 :: \<rho>\<^sub>1, \<dots>, b\<^sub>k :: \<rho>\<^sub>k"},
+  the above record operations will get the following types:
+
+  \medskip
+  \begin{tabular}{lll}
+    @{text "c\<^sub>i"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<sigma>\<^sub>i"} \\
+    @{text "c\<^sub>i_update"} & @{text "::"} & @{text "\<sigma>\<^sub>i \<Rightarrow>
+      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow>
+      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
+    @{text "t.make"} & @{text "::"} & @{text "\<rho>\<^sub>1 \<Rightarrow> \<dots> \<rho>\<^sub>k \<Rightarrow> \<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow>
+      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
+  \end{tabular}
+  \medskip
+
+  \noindent Some further operations address the extension aspect of a
+  derived record scheme specifically: @{text "t.fields"} produces a
+  record fragment consisting of exactly the new fields introduced here
+  (the result may serve as a more part elsewhere); @{text "t.extend"}
+  takes a fixed record and adds a given more part; @{text
+  "t.truncate"} restricts a record scheme to a fixed record.
+
+  \medskip
+  \begin{tabular}{lll}
+    @{text "t.fields"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
+    @{text "t.extend"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr> \<Rightarrow>
+      \<zeta> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
+    @{text "t.truncate"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
+  \end{tabular}
+  \medskip
+
+  \noindent Note that @{text "t.make"} and @{text "t.fields"} coincide
+  for root records.
+*}
+
+
+subsection {* Derived rules and proof tools *}
+
+text {*
+  The record package proves several results internally, declaring
+  these facts to appropriate proof tools.  This enables users to
+  reason about record structures quite conveniently.  Assume that
+  @{text t} is a record type as specified above.
+
+  \begin{enumerate}
+
+  \item Standard conversions for selectors or updates applied to
+  record constructor terms are made part of the default Simplifier
+  context; thus proofs by reduction of basic operations merely require
+  the @{method simp} method without further arguments.  These rules
+  are available as @{text "t.simps"}, too.
+
+  \item Selectors applied to updated records are automatically reduced
+  by an internal simplification procedure, which is also part of the
+  standard Simplifier setup.
+
+  \item Inject equations of a form analogous to @{prop "(x, y) = (x',
+  y') \<equiv> x = x' \<and> y = y'"} are declared to the Simplifier and Classical
+  Reasoner as @{attribute iff} rules.  These rules are available as
+  @{text "t.iffs"}.
+
+  \item The introduction rule for record equality analogous to @{text
+  "x r = x r' \<Longrightarrow> y r = y r' \<dots> \<Longrightarrow> r = r'"} is declared to the Simplifier,
+  and as the basic rule context as ``@{attribute intro}@{text "?"}''.
+  The rule is called @{text "t.equality"}.
+
+  \item Representations of arbitrary record expressions as canonical
+  constructor terms are provided both in @{method cases} and @{method
+  induct} format (cf.\ the generic proof methods of the same name,
+  \secref{sec:cases-induct}).  Several variations are available, for
+  fixed records, record schemes, more parts etc.
+
+  The generic proof methods are sufficiently smart to pick the most
+  sensible rule according to the type of the indicated record
+  expression: users just need to apply something like ``@{text "(cases
+  r)"}'' to a certain proof problem.
+
+  \item The derived record operations @{text "t.make"}, @{text
+  "t.fields"}, @{text "t.extend"}, @{text "t.truncate"} are \emph{not}
+  treated automatically, but usually need to be expanded by hand,
+  using the collective fact @{text "t.defs"}.
+
+  \end{enumerate}
+*}
+
+
+subsubsection {* Examples *}
+
+text {* See @{file "~~/src/HOL/ex/Records.thy"}, for example. *}
+
+section {* Typedef axiomatization \label{sec:hol-typedef} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "typedef"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
+  \end{matharray}
+
+  A Gordon/HOL-style type definition is a certain axiom scheme that
+  identifies a new type with a subset of an existing type.  More
+  precisely, the new type is defined by exhibiting an existing type
+  @{text \<tau>}, a set @{text "A :: \<tau> set"}, and a theorem that proves
+  @{prop "\<exists>x. x \<in> A"}.  Thus @{text A} is a non-empty subset of @{text
+  \<tau>}, and the new type denotes this subset.  New functions are
+  postulated that establish an isomorphism between the new type and
+  the subset.  In general, the type @{text \<tau>} may involve type
+  variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} which means that the type definition
+  produces a type constructor @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} depending on
+  those type arguments.
+
+  The axiomatization can be considered a ``definition'' in the sense
+  of the particular set-theoretic interpretation of HOL
+  \cite{pitts93}, where the universe of types is required to be
+  downwards-closed wrt.\ arbitrary non-empty subsets.  Thus genuinely
+  new types introduced by @{command "typedef"} stay within the range
+  of HOL models by construction.  Note that @{command_ref
+  type_synonym} from Isabelle/Pure merely introduces syntactic
+  abbreviations, without any logical significance.
+
+  @{rail \<open>
+    @@{command (HOL) typedef} abs_type '=' rep_set
+    ;
+    abs_type: @{syntax typespec_sorts} @{syntax mixfix}?
+    ;
+    rep_set: @{syntax term} (@'morphisms' @{syntax name} @{syntax name})?
+  \<close>}
+
+  \begin{description}
+
+  \item @{command (HOL) "typedef"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = A"}
+  axiomatizes a type definition in the background theory of the
+  current context, depending on a non-emptiness result of the set
+  @{text A} that needs to be proven here.  The set @{text A} may
+  contain type variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} as specified on the LHS,
+  but no term variables.
+
+  Even though a local theory specification, the newly introduced type
+  constructor cannot depend on parameters or assumptions of the
+  context: this is structurally impossible in HOL.  In contrast, the
+  non-emptiness proof may use local assumptions in unusual situations,
+  which could result in different interpretations in target contexts:
+  the meaning of the bijection between the representing set @{text A}
+  and the new type @{text t} may then change in different application
+  contexts.
+
+  For @{command (HOL) "typedef"}~@{text "t = A"} the newly introduced
+  type @{text t} is accompanied by a pair of morphisms to relate it to
+  the representing set over the old type.  By default, the injection
+  from type to set is called @{text Rep_t} and its inverse @{text
+  Abs_t}: An explicit @{keyword (HOL) "morphisms"} specification
+  allows to provide alternative names.
+
+  The core axiomatization uses the locale predicate @{const
+  type_definition} as defined in Isabelle/HOL.  Various basic
+  consequences of that are instantiated accordingly, re-using the
+  locale facts with names derived from the new type constructor.  Thus
+  the generic @{thm type_definition.Rep} is turned into the specific
+  @{text "Rep_t"}, for example.
+
+  Theorems @{thm type_definition.Rep}, @{thm
+  type_definition.Rep_inverse}, and @{thm type_definition.Abs_inverse}
+  provide the most basic characterization as a corresponding
+  injection/surjection pair (in both directions).  The derived rules
+  @{thm type_definition.Rep_inject} and @{thm
+  type_definition.Abs_inject} provide a more convenient version of
+  injectivity, suitable for automated proof tools (e.g.\ in
+  declarations involving @{attribute simp} or @{attribute iff}).
+  Furthermore, the rules @{thm type_definition.Rep_cases}~/ @{thm
+  type_definition.Rep_induct}, and @{thm type_definition.Abs_cases}~/
+  @{thm type_definition.Abs_induct} provide alternative views on
+  surjectivity.  These rules are already declared as set or type rules
+  for the generic @{method cases} and @{method induct} methods,
+  respectively.
+
+  \end{description}
+
+  \begin{warn}
+  If you introduce a new type axiomatically, i.e.\ via @{command_ref
+  typedecl} and @{command_ref axiomatization}, the minimum requirement
+  is that it has a non-empty model, to avoid immediate collapse of the
+  HOL logic.  Moreover, one needs to demonstrate that the
+  interpretation of such free-form axiomatizations can coexist with
+  that of the regular @{command_def typedef} scheme, and any extension
+  that other people might have introduced elsewhere.
+  \end{warn}
+*}
+
+subsubsection {* Examples *}
+
+text {* Type definitions permit the introduction of abstract data
+  types in a safe way, namely by providing models based on already
+  existing types.  Given some abstract axiomatic description @{text P}
+  of a type, this involves two steps:
+
+  \begin{enumerate}
+
+  \item Find an appropriate type @{text \<tau>} and subset @{text A} which
+  has the desired properties @{text P}, and make a type definition
+  based on this representation.
+
+  \item Prove that @{text P} holds for @{text \<tau>} by lifting @{text P}
+  from the representation.
+
+  \end{enumerate}
+
+  You can later forget about the representation and work solely in
+  terms of the abstract properties @{text P}.
+
+  \medskip The following trivial example pulls a three-element type
+  into existence within the formal logical environment of HOL. *}
+
+typedef three = "{(True, True), (True, False), (False, True)}"
+  by blast
+
+definition "One = Abs_three (True, True)"
+definition "Two = Abs_three (True, False)"
+definition "Three = Abs_three (False, True)"
+
+lemma three_distinct: "One \<noteq> Two"  "One \<noteq> Three"  "Two \<noteq> Three"
+  by (simp_all add: One_def Two_def Three_def Abs_three_inject)
+
+lemma three_cases:
+  fixes x :: three obtains "x = One" | "x = Two" | "x = Three"
+  by (cases x) (auto simp: One_def Two_def Three_def Abs_three_inject)
+
+text {* Note that such trivial constructions are better done with
+  derived specification mechanisms such as @{command datatype}: *}
+
+datatype three' = One' | Two' | Three'
+
+text {* This avoids re-doing basic definitions and proofs from the
+  primitive @{command typedef} above. *}
+
+
+
+section {* Functorial structure of types *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "functor"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command (HOL) functor} (@{syntax name} ':')? @{syntax term}
+  \<close>}
+
+  \begin{description}
+
+  \item @{command (HOL) "functor"}~@{text "prefix: m"} allows to
+  prove and register properties about the functorial structure of type
+  constructors.  These properties then can be used by other packages
+  to deal with those type constructors in certain type constructions.
+  Characteristic theorems are noted in the current local theory.  By
+  default, they are prefixed with the base name of the type
+  constructor, an explicit prefix can be given alternatively.
+
+  The given term @{text "m"} is considered as \emph{mapper} for the
+  corresponding type constructor and must conform to the following
+  type pattern:
+
+  \begin{matharray}{lll}
+    @{text "m"} & @{text "::"} &
+      @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>k \<Rightarrow> (\<^vec>\<alpha>\<^sub>n) t \<Rightarrow> (\<^vec>\<beta>\<^sub>n) t"} \\
+  \end{matharray}
+
+  \noindent where @{text t} is the type constructor, @{text
+  "\<^vec>\<alpha>\<^sub>n"} and @{text "\<^vec>\<beta>\<^sub>n"} are distinct
+  type variables free in the local theory and @{text "\<sigma>\<^sub>1"},
+  \ldots, @{text "\<sigma>\<^sub>k"} is a subsequence of @{text "\<alpha>\<^sub>1 \<Rightarrow>
+  \<beta>\<^sub>1"}, @{text "\<beta>\<^sub>1 \<Rightarrow> \<alpha>\<^sub>1"}, \ldots,
+  @{text "\<alpha>\<^sub>n \<Rightarrow> \<beta>\<^sub>n"}, @{text "\<beta>\<^sub>n \<Rightarrow>
+  \<alpha>\<^sub>n"}.
+
+  \end{description}
+*}
+
+
+section {* Quotient types *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "quotient_type"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
+    @{command_def (HOL) "quotient_definition"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
+    @{command_def (HOL) "print_quotmapsQ3"} & : & @{text "context \<rightarrow>"}\\
+    @{command_def (HOL) "print_quotientsQ3"} & : & @{text "context \<rightarrow>"}\\
+    @{command_def (HOL) "print_quotconsts"} & : & @{text "context \<rightarrow>"}\\
+    @{method_def (HOL) "lifting"} & : & @{text method} \\
+    @{method_def (HOL) "lifting_setup"} & : & @{text method} \\
+    @{method_def (HOL) "descending"} & : & @{text method} \\
+    @{method_def (HOL) "descending_setup"} & : & @{text method} \\
+    @{method_def (HOL) "partiality_descending"} & : & @{text method} \\
+    @{method_def (HOL) "partiality_descending_setup"} & : & @{text method} \\
+    @{method_def (HOL) "regularize"} & : & @{text method} \\
+    @{method_def (HOL) "injection"} & : & @{text method} \\
+    @{method_def (HOL) "cleaning"} & : & @{text method} \\
+    @{attribute_def (HOL) "quot_thm"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "quot_lifted"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "quot_respect"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "quot_preserve"} & : & @{text attribute} \\
+  \end{matharray}
+
+  The quotient package defines a new quotient type given a raw type
+  and a partial equivalence relation. The package also historically 
+  includes automation for transporting definitions and theorems. 
+  But most of this automation was superseded by the Lifting and Transfer
+  packages. The user should consider using these two new packages for
+  lifting definitions and transporting theorems.
+
+  @{rail \<open>
+    @@{command (HOL) quotient_type} (spec)
+    ;
+    spec: @{syntax typespec} @{syntax mixfix}? '=' \<newline>
+     @{syntax type} '/' ('partial' ':')? @{syntax term} \<newline>
+     (@'morphisms' @{syntax name} @{syntax name})? (@'parametric' @{syntax thmref})?
+  \<close>}
+
+  @{rail \<open>
+    @@{command (HOL) quotient_definition} constdecl? @{syntax thmdecl}? \<newline>
+    @{syntax term} 'is' @{syntax term}
+    ;
+    constdecl: @{syntax name} ('::' @{syntax type})? @{syntax mixfix}?
+  \<close>}
+
+  @{rail \<open>
+    @@{method (HOL) lifting} @{syntax thmrefs}?
+    ;
+    @@{method (HOL) lifting_setup} @{syntax thmrefs}?
+  \<close>}
+
+  \begin{description}
+
+  \item @{command (HOL) "quotient_type"} defines a new quotient type @{text \<tau>}. The
+  injection from a quotient type to a raw type is called @{text
+  rep_\<tau>}, its inverse @{text abs_\<tau>} unless explicit @{keyword (HOL)
+  "morphisms"} specification provides alternative names. @{command
+  (HOL) "quotient_type"} requires the user to prove that the relation
+  is an equivalence relation (predicate @{text equivp}), unless the
+  user specifies explicitly @{text partial} in which case the
+  obligation is @{text part_equivp}.  A quotient defined with @{text
+  partial} is weaker in the sense that less things can be proved
+  automatically.
+
+  The command internally proves a Quotient theorem and sets up the Lifting
+  package by the command @{command (HOL) setup_lifting}. Thus the Lifting 
+  and Transfer packages can be used also with quotient types defined by
+  @{command (HOL) "quotient_type"} without any extra set-up. The parametricity 
+  theorem for the equivalence relation R can be provided as an extra argument 
+  of the command and is passed to the corresponding internal call of @{command (HOL) setup_lifting}.
+  This theorem allows the Lifting package to generate a stronger transfer rule for equality.
+  
+  \end{description}
+
+  The most of the rest of the package was superseded by the Lifting and Transfer
+  packages. The user should consider using these two new packages for
+  lifting definitions and transporting theorems.
+
+  \begin{description}  
+
+  \item @{command (HOL) "quotient_definition"} defines a constant on
+  the quotient type.
+
+  \item @{command (HOL) "print_quotmapsQ3"} prints quotient map
+  functions.
+
+  \item @{command (HOL) "print_quotientsQ3"} prints quotients.
+
+  \item @{command (HOL) "print_quotconsts"} prints quotient constants.
+
+  \item @{method (HOL) "lifting"} and @{method (HOL) "lifting_setup"}
+    methods match the current goal with the given raw theorem to be
+    lifted producing three new subgoals: regularization, injection and
+    cleaning subgoals. @{method (HOL) "lifting"} tries to apply the
+    heuristics for automatically solving these three subgoals and
+    leaves only the subgoals unsolved by the heuristics to the user as
+    opposed to @{method (HOL) "lifting_setup"} which leaves the three
+    subgoals unsolved.
+
+  \item @{method (HOL) "descending"} and @{method (HOL)
+    "descending_setup"} try to guess a raw statement that would lift
+    to the current subgoal. Such statement is assumed as a new subgoal
+    and @{method (HOL) "descending"} continues in the same way as
+    @{method (HOL) "lifting"} does. @{method (HOL) "descending"} tries
+    to solve the arising regularization, injection and cleaning
+    subgoals with the analogous method @{method (HOL)
+    "descending_setup"} which leaves the four unsolved subgoals.
+
+  \item @{method (HOL) "partiality_descending"} finds the regularized
+    theorem that would lift to the current subgoal, lifts it and
+    leaves as a subgoal. This method can be used with partial
+    equivalence quotients where the non regularized statements would
+    not be true. @{method (HOL) "partiality_descending_setup"} leaves
+    the injection and cleaning subgoals unchanged.
+
+  \item @{method (HOL) "regularize"} applies the regularization
+    heuristics to the current subgoal.
+
+  \item @{method (HOL) "injection"} applies the injection heuristics
+    to the current goal using the stored quotient respectfulness
+    theorems.
+
+  \item @{method (HOL) "cleaning"} applies the injection cleaning
+    heuristics to the current subgoal using the stored quotient
+    preservation theorems.
+
+  \item @{attribute (HOL) quot_lifted} attribute tries to
+    automatically transport the theorem to the quotient type.
+    The attribute uses all the defined quotients types and quotient
+    constants often producing undesired results or theorems that
+    cannot be lifted.
+
+  \item @{attribute (HOL) quot_respect} and @{attribute (HOL)
+    quot_preserve} attributes declare a theorem as a respectfulness
+    and preservation theorem respectively.  These are stored in the
+    local theory store and used by the @{method (HOL) "injection"}
+    and @{method (HOL) "cleaning"} methods respectively.
+
+  \item @{attribute (HOL) quot_thm} declares that a certain theorem
+    is a quotient extension theorem. Quotient extension theorems
+    allow for quotienting inside container types. Given a polymorphic
+    type that serves as a container, a map function defined for this
+    container using @{command (HOL) "functor"} and a relation
+    map defined for for the container type, the quotient extension
+    theorem should be @{term "Quotient3 R Abs Rep \<Longrightarrow> Quotient3
+    (rel_map R) (map Abs) (map Rep)"}. Quotient extension theorems
+    are stored in a database and are used all the steps of lifting
+    theorems.
+
+  \end{description}
+*}
+
+
+section {* Definition by specification \label{sec:hol-specification} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "specification"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command (HOL) specification} '(' (decl +) ')' \<newline>
+      (@{syntax thmdecl}? @{syntax prop} +)
+    ;
+    decl: (@{syntax name} ':')? @{syntax term} ('(' @'overloaded' ')')?
+  \<close>}
+
+  \begin{description}
+
+  \item @{command (HOL) "specification"}~@{text "decls \<phi>"} sets up a
+  goal stating the existence of terms with the properties specified to
+  hold for the constants given in @{text decls}.  After finishing the
+  proof, the theory will be augmented with definitions for the given
+  constants, as well as with theorems stating the properties for these
+  constants.
+
+  @{text decl} declares a constant to be defined by the
+  specification given.  The definition for the constant @{text c} is
+  bound to the name @{text c_def} unless a theorem name is given in
+  the declaration.  Overloaded constants should be declared as such.
+
+  \end{description}
+*}
+
+
+section {* Adhoc overloading of constants *}
+
+text {*
+  \begin{tabular}{rcll}
+  @{command_def "adhoc_overloading"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+  @{command_def "no_adhoc_overloading"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+  @{attribute_def "show_variants"} & : & @{text "attribute"} & default @{text false} \\
+  \end{tabular}
+
+  \medskip
+
+  Adhoc overloading allows to overload a constant depending on
+  its type. Typically this involves the introduction of an
+  uninterpreted constant (used for input and output) and the addition
+  of some variants (used internally). For examples see
+  @{file "~~/src/HOL/ex/Adhoc_Overloading_Examples.thy"} and
+  @{file "~~/src/HOL/Library/Monad_Syntax.thy"}.
+
+  @{rail \<open>
+    (@@{command adhoc_overloading} | @@{command no_adhoc_overloading})
+      (@{syntax nameref} (@{syntax term} + ) + @'and')
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "adhoc_overloading"}~@{text "c v\<^sub>1 ... v\<^sub>n"}
+  associates variants with an existing constant.
+
+  \item @{command "no_adhoc_overloading"} is similar to
+  @{command "adhoc_overloading"}, but removes the specified variants
+  from the present context.
+  
+  \item @{attribute "show_variants"} controls printing of variants
+  of overloaded constants. If enabled, the internally used variants
+  are printed instead of their respective overloaded constants. This
+  is occasionally useful to check whether the system agrees with a
+  user's expectations about derived variants.
+
+  \end{description}
+*}
+
+chapter {* Proof tools *}
+
+section {* Adhoc tuples *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{attribute_def (HOL) split_format}@{text "\<^sup>*"} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{attribute (HOL) split_format} ('(' 'complete' ')')?
+  \<close>}
+
+  \begin{description}
+
+  \item @{attribute (HOL) split_format}\ @{text "(complete)"} causes
+  arguments in function applications to be represented canonically
+  according to their tuple type structure.
+
+  Note that this operation tends to invent funny names for new local
+  parameters introduced.
+
+  \end{description}
+*}
+
+
+section {* Transfer package *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) "transfer"} & : & @{text method} \\
+    @{method_def (HOL) "transfer'"} & : & @{text method} \\
+    @{method_def (HOL) "transfer_prover"} & : & @{text method} \\
+    @{attribute_def (HOL) "Transfer.transferred"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "untransferred"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "transfer_rule"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "transfer_domain_rule"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "relator_eq"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "relator_domain"} & : & @{text attribute} \\
+  \end{matharray}
+
+  \begin{description}
+
+  \item @{method (HOL) "transfer"} method replaces the current subgoal
+    with a logically equivalent one that uses different types and
+    constants. The replacement of types and constants is guided by the
+    database of transfer rules. Goals are generalized over all free
+    variables by default; this is necessary for variables whose types
+    change, but can be overridden for specific variables with e.g.
+    @{text "transfer fixing: x y z"}.
+
+  \item @{method (HOL) "transfer'"} is a variant of @{method (HOL)
+    transfer} that allows replacing a subgoal with one that is
+    logically stronger (rather than equivalent). For example, a
+    subgoal involving equality on a quotient type could be replaced
+    with a subgoal involving equality (instead of the corresponding
+    equivalence relation) on the underlying raw type.
+
+  \item @{method (HOL) "transfer_prover"} method assists with proving
+    a transfer rule for a new constant, provided the constant is
+    defined in terms of other constants that already have transfer
+    rules. It should be applied after unfolding the constant
+    definitions.
+
+  \item @{attribute (HOL) "untransferred"} proves the same equivalent theorem
+     as @{method (HOL) "transfer"} internally does.
+
+  \item @{attribute (HOL) Transfer.transferred} works in the opposite
+    direction than @{method (HOL) "transfer'"}. E.g., given the transfer
+    relation @{text "ZN x n \<equiv> (x = int n)"}, corresponding transfer rules and the theorem
+    @{text "\<forall>x::int \<in> {0..}. x < x + 1"}, the attribute would prove 
+    @{text "\<forall>n::nat. n < n + 1"}. The attribute is still in experimental
+    phase of development.
+
+  \item @{attribute (HOL) "transfer_rule"} attribute maintains a
+    collection of transfer rules, which relate constants at two
+    different types. Typical transfer rules may relate different type
+    instances of the same polymorphic constant, or they may relate an
+    operation on a raw type to a corresponding operation on an
+    abstract type (quotient or subtype). For example:
+
+    @{text "((A ===> B) ===> list_all2 A ===> list_all2 B) map map"}\\
+    @{text "(cr_int ===> cr_int ===> cr_int) (\<lambda>(x,y) (u,v). (x+u, y+v)) plus"}
+
+    Lemmas involving predicates on relations can also be registered
+    using the same attribute. For example:
+
+    @{text "bi_unique A \<Longrightarrow> (list_all2 A ===> op =) distinct distinct"}\\
+    @{text "\<lbrakk>bi_unique A; bi_unique B\<rbrakk> \<Longrightarrow> bi_unique (rel_prod A B)"}
+
+  \item @{attribute (HOL) "transfer_domain_rule"} attribute maintains a collection
+    of rules, which specify a domain of a transfer relation by a predicate.
+    E.g., given the transfer relation @{text "ZN x n \<equiv> (x = int n)"}, 
+    one can register the following transfer domain rule: 
+    @{text "Domainp ZN = (\<lambda>x. x \<ge> 0)"}. The rules allow the package to produce
+    more readable transferred goals, e.g., when quantifiers are transferred.
+
+  \item @{attribute (HOL) relator_eq} attribute collects identity laws
+    for relators of various type constructors, e.g. @{text "list_all2
+    (op =) = (op =)"}. The @{method (HOL) transfer} method uses these
+    lemmas to infer transfer rules for non-polymorphic constants on
+    the fly.
+
+  \item @{attribute_def (HOL) "relator_domain"} attribute collects rules 
+    describing domains of relators by predicators. E.g., @{text "Domainp A = P \<Longrightarrow>
+    Domainp (list_all2 A) = (list_all P)"}. This allows the package to lift transfer
+    domain rules through type constructors.
+
+  \end{description}
+
+  Theoretical background can be found in \cite{Huffman-Kuncar:2013:lifting_transfer}.
+*}
+
+
+section {* Lifting package *}
+
+text {*
+  The Lifting package allows users to lift terms of the raw type to the abstract type, which is 
+  a necessary step in building a library for an abstract type. Lifting defines a new constant 
+  by combining coercion functions (Abs and Rep) with the raw term. It also proves an appropriate 
+  transfer rule for the Transfer package and, if possible, an equation for the code generator.
+
+  The Lifting package provides two main commands: @{command (HOL) "setup_lifting"} for initializing 
+  the package to work with a new type, and @{command (HOL) "lift_definition"} for lifting constants. 
+  The Lifting package works with all four kinds of type abstraction: type copies, subtypes, 
+  total quotients and partial quotients.
+
+  Theoretical background can be found in \cite{Huffman-Kuncar:2013:lifting_transfer}.
+
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "setup_lifting"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
+    @{command_def (HOL) "lift_definition"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
+    @{command_def (HOL) "lifting_forget"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
+    @{command_def (HOL) "lifting_update"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
+    @{command_def (HOL) "print_quot_maps"} & : & @{text "context \<rightarrow>"}\\
+    @{command_def (HOL) "print_quotients"} & : & @{text "context \<rightarrow>"}\\
+    @{attribute_def (HOL) "quot_map"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "invariant_commute"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "reflexivity_rule"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "relator_mono"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "relator_distr"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "quot_del"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "lifting_restore"} & : & @{text attribute} \\   
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command (HOL) setup_lifting} ('(' 'no_code' ')')? \<newline>
+      @{syntax thmref} @{syntax thmref}? (@'parametric' @{syntax thmref})?;
+  \<close>}
+
+  @{rail \<open>
+    @@{command (HOL) lift_definition} @{syntax name} '::' @{syntax type}  @{syntax mixfix}? \<newline>
+      'is' @{syntax term} (@'parametric' @{syntax thmref})?;
+  \<close>}
+
+  @{rail \<open>
+    @@{command (HOL) lifting_forget} @{syntax nameref};
+  \<close>}
+
+  @{rail \<open>
+    @@{command (HOL) lifting_update} @{syntax nameref};
+  \<close>}
+
+  @{rail \<open>
+    @@{attribute (HOL) lifting_restore} @{syntax thmref} (@{syntax thmref} @{syntax thmref})?;
+  \<close>}
+
+  \begin{description}
+
+  \item @{command (HOL) "setup_lifting"} Sets up the Lifting package
+    to work with a user-defined type. 
+    The command supports two modes. The first one is a low-level mode when 
+    the user must provide as a first
+    argument of @{command (HOL) "setup_lifting"} a
+    quotient theorem @{text "Quotient R Abs Rep T"}. The
+    package configures a transfer rule for equality, a domain transfer
+    rules and sets up the @{command_def (HOL) "lift_definition"}
+    command to work with the abstract type. An optional theorem @{text "reflp R"}, which certifies that 
+    the equivalence relation R is total,
+    can be provided as a second argument. This allows the package to generate stronger transfer
+    rules. And finally, the parametricity theorem for R can be provided as a third argument.
+    This allows the package to generate a stronger transfer rule for equality.
+
+    Users generally will not prove the @{text Quotient} theorem manually for 
+    new types, as special commands exist to automate the process.
+    
+    When a new subtype is defined by @{command (HOL) typedef}, @{command (HOL) "lift_definition"} 
+    can be used in its
+    second mode, where only the type_definition theorem @{text "type_definition Rep Abs A"}
+    is used as an argument of the command. The command internally proves the corresponding 
+    Quotient theorem and registers it with @{command (HOL) setup_lifting} using its first mode.
+
+    For quotients, the command @{command (HOL) quotient_type} can be used. The command defines 
+    a new quotient type and similarly to the previous case, the corresponding Quotient theorem is proved 
+    and registered by @{command (HOL) setup_lifting}.
+    
+    The command @{command (HOL) "setup_lifting"} also sets up the code generator
+    for the new type. Later on, when a new constant is defined by @{command (HOL) "lift_definition"},
+    the Lifting package proves and registers a code equation (if there is one) for the new constant.
+    If the option @{text "no_code"} is specified, the Lifting package does not set up the code
+    generator and as a consequence no code equations involving an abstract type are registered
+    by @{command (HOL) "lift_definition"}.
+
+  \item @{command (HOL) "lift_definition"} @{text "f :: \<tau>"} @{keyword (HOL) "is"} @{text t}
+    Defines a new function @{text f} with an abstract type @{text \<tau>}
+    in terms of a corresponding operation @{text t} on a
+    representation type. More formally, if @{text "t :: \<sigma>"}, then
+    the command builds a term @{text "F"} as a corresponding combination of abstraction 
+    and representation functions such that @{text "F :: \<sigma> \<Rightarrow> \<tau>" } and 
+    defines @{text f} is as @{text "f \<equiv> F t"}.
+    The term @{text t} does not have to be necessarily a constant but it can be any term.
+
+    The command opens a proof environment and the user must discharge 
+    a respectfulness proof obligation. For a type copy, i.e., a typedef with @{text
+    UNIV}, the obligation is discharged automatically. The proof goal is
+    presented in a user-friendly, readable form. A respectfulness
+    theorem in the standard format @{text f.rsp} and a transfer rule
+    @{text f.transfer} for the Transfer package are generated by the
+    package.
+
+    The user can specify a parametricity theorem for @{text t} after the keyword 
+    @{keyword "parametric"}, which allows the command
+    to generate a parametric transfer rule for @{text f}.
+
+    For each constant defined through trivial quotients (type copies or
+    subtypes) @{text f.rep_eq} is generated. The equation is a code certificate
+    that defines @{text f} using the representation function.
+
+    For each constant @{text f.abs_eq} is generated. The equation is unconditional
+    for total quotients. The equation defines @{text f} using
+    the abstraction function.
+
+    Integration with [@{attribute code} abstract]: For subtypes (e.g.,
+    corresponding to a datatype invariant, such as dlist), @{command
+    (HOL) "lift_definition"} uses a code certificate theorem
+    @{text f.rep_eq} as a code equation.
+
+    Integration with [@{attribute code} equation]: For total quotients, @{command
+    (HOL) "lift_definition"} uses @{text f.abs_eq} as a code equation.
+
+  \item @{command (HOL) lifting_forget} and  @{command (HOL) lifting_update}
+    These two commands serve for storing and deleting the set-up of
+    the Lifting package and corresponding transfer rules defined by this package.
+    This is useful for hiding of type construction details of an abstract type 
+    when the construction is finished but it still allows additions to this construction
+    when this is later necessary.
+
+    Whenever the Lifting package is set up with a new abstract type @{text "\<tau>"} by  
+    @{command_def (HOL) "lift_definition"}, the package defines a new bundle
+    that is called @{text "\<tau>.lifting"}. This bundle already includes set-up for the Lifting package. 
+    The new transfer rules
+    introduced by @{command (HOL) "lift_definition"} can be stored in the bundle by
+    the command @{command (HOL) "lifting_update"} @{text "\<tau>.lifting"}.
+
+    The command @{command (HOL) "lifting_forget"} @{text "\<tau>.lifting"} deletes set-up of the Lifting 
+    package
+    for @{text \<tau>} and deletes all the transfer rules that were introduced
+    by @{command (HOL) "lift_definition"} using @{text \<tau>} as an abstract type.
+
+    The stored set-up in a bundle can be reintroduced by the Isar commands for including a bundle
+    (@{command "include"}, @{keyword "includes"} and @{command "including"}).
+
+  \item @{command (HOL) "print_quot_maps"} prints stored quotient map
+    theorems.
+
+  \item @{command (HOL) "print_quotients"} prints stored quotient
+    theorems.
+
+  \item @{attribute (HOL) quot_map} registers a quotient map
+    theorem. E.g., @{text "Quotient R Abs Rep T \<Longrightarrow> 
+    Quotient (list_all2 R) (map Abs) (map Rep) (list_all2 T)"}. 
+    For examples see @{file
+    "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} or Lifting_*.thy files
+    in the same directory.
+
+  \item @{attribute (HOL) invariant_commute} registers a theorem that
+    shows a relationship between the constant @{text
+    Lifting.invariant} (used for internal encoding of proper subtypes)
+    and a relator.  Such theorems allows the package to hide @{text
+    Lifting.invariant} from a user in a user-readable form of a
+    respectfulness theorem. For examples see @{file
+    "~~/src/HOL/List.thy"} or Lifting_*.thy files in the same directory.
+
+  \item @{attribute (HOL) reflexivity_rule} registers a theorem that shows
+    that a relator respects left-totality and left_uniqueness. For examples 
+    see @{file "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} or Lifting_*.thy files 
+    in the same directory.
+    The property is used in a reflexivity prover, which is used for discharging respectfulness
+    theorems for type copies and also for discharging assumptions of abstraction function equations.
+
+  \item @{attribute (HOL) "relator_mono"} registers a property describing a monotonicity of a relator.
+    E.g., @{text "A \<le> B \<Longrightarrow> list_all2 A \<le> list_all2 B"}. For examples 
+    see @{file "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} 
+    or Lifting_*.thy files in the same directory.
+    This property is needed for proving a stronger transfer rule in @{command_def (HOL) "lift_definition"}
+    when a parametricity theorem for the raw term is specified.
+
+  \item @{attribute (HOL) "relator_distr"} registers a property describing a distributivity
+    of the relation composition and a relator. E.g., 
+    @{text "list_all2 R \<circ>\<circ> list_all2 S = list_all2 (R \<circ>\<circ> S)"}. 
+    This property is needed for proving a stronger transfer rule in @{command_def (HOL) "lift_definition"}
+    when a parametricity theorem for the raw term is specified.
+    When this equality does not hold unconditionally (e.g., for the function type), the user can specified
+    each direction separately and also register multiple theorems with different set of assumptions.
+    This attribute can be used only after the monotonicity property was already registered by
+    @{attribute (HOL) "relator_mono"}. For examples 
+    see @{file "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} 
+    or Lifting_*.thy files in the same directory.
+
+  \item @{attribute (HOL) quot_del} deletes a corresponding Quotient theorem
+    from the Lifting infrastructure and thus de-register the corresponding quotient. 
+    This effectively causes that @{command (HOL) lift_definition}  will not
+    do any lifting for the corresponding type. This attribute is rather used for low-level
+    manipulation with set-up of the Lifting package because @{command (HOL) lifting_forget} is
+    preferred for normal usage.
+
+  \item @{attribute (HOL) lifting_restore} @{text "Quotient_thm pcr_def pcr_cr_eq_thm"} 
+    registers the Quotient theorem @{text Quotient_thm} in the Lifting infrastructure 
+    and thus sets up lifting for an abstract type @{text \<tau>} (that is defined by @{text Quotient_thm}).
+    Optional theorems @{text pcr_def} and @{text pcr_cr_eq_thm} can be specified to register 
+    the parametrized
+    correspondence relation for @{text \<tau>}. E.g., for @{text "'a dlist"}, @{text pcr_def} is
+    @{text "pcr_dlist A \<equiv> list_all2 A \<circ>\<circ> cr_dlist"} and @{text pcr_cr_eq_thm} is 
+    @{text "pcr_dlist op= = op="}.
+    This attribute is rather used for low-level
+    manipulation with set-up of the Lifting package because using of the bundle @{text \<tau>.lifting} 
+    together with the commands @{command (HOL) lifting_forget} and @{command (HOL) lifting_update} is
+    preferred for normal usage.
+
+  \end{description}
+*}
+
+
+section {* Coercive subtyping *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{attribute_def (HOL) coercion} & : & @{text attribute} \\
+    @{attribute_def (HOL) coercion_enabled} & : & @{text attribute} \\
+    @{attribute_def (HOL) coercion_map} & : & @{text attribute} \\
+  \end{matharray}
+
+  Coercive subtyping allows the user to omit explicit type
+  conversions, also called \emph{coercions}.  Type inference will add
+  them as necessary when parsing a term. See
+  \cite{traytel-berghofer-nipkow-2011} for details.
+
+  @{rail \<open>
+    @@{attribute (HOL) coercion} (@{syntax term})?
+    ;
+    @@{attribute (HOL) coercion_map} (@{syntax term})?
+  \<close>}
+
+  \begin{description}
+
+  \item @{attribute (HOL) "coercion"}~@{text "f"} registers a new
+  coercion function @{text "f :: \<sigma>\<^sub>1 \<Rightarrow> \<sigma>\<^sub>2"} where @{text "\<sigma>\<^sub>1"} and
+  @{text "\<sigma>\<^sub>2"} are type constructors without arguments.  Coercions are
+  composed by the inference algorithm if needed.  Note that the type
+  inference algorithm is complete only if the registered coercions
+  form a lattice.
+
+  \item @{attribute (HOL) "coercion_map"}~@{text "map"} registers a
+  new map function to lift coercions through type constructors. The
+  function @{text "map"} must conform to the following type pattern
+
+  \begin{matharray}{lll}
+    @{text "map"} & @{text "::"} &
+      @{text "f\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> f\<^sub>n \<Rightarrow> (\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t \<Rightarrow> (\<beta>\<^sub>1, \<dots>, \<beta>\<^sub>n) t"} \\
+  \end{matharray}
+
+  where @{text "t"} is a type constructor and @{text "f\<^sub>i"} is of type
+  @{text "\<alpha>\<^sub>i \<Rightarrow> \<beta>\<^sub>i"} or @{text "\<beta>\<^sub>i \<Rightarrow> \<alpha>\<^sub>i"}.  Registering a map function
+  overwrites any existing map function for this particular type
+  constructor.
+
+  \item @{attribute (HOL) "coercion_enabled"} enables the coercion
+  inference algorithm.
+
+  \end{description}
+*}
+
+
+section {* Arithmetic proof support *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) arith} & : & @{text method} \\
+    @{attribute_def (HOL) arith} & : & @{text attribute} \\
+    @{attribute_def (HOL) arith_split} & : & @{text attribute} \\
+  \end{matharray}
+
+  \begin{description}
+
+  \item @{method (HOL) arith} decides linear arithmetic problems (on
+  types @{text nat}, @{text int}, @{text real}).  Any current facts
+  are inserted into the goal before running the procedure.
+
+  \item @{attribute (HOL) arith} declares facts that are supplied to
+  the arithmetic provers implicitly.
+
+  \item @{attribute (HOL) arith_split} attribute declares case split
+  rules to be expanded before @{method (HOL) arith} is invoked.
+
+  \end{description}
+
+  Note that a simpler (but faster) arithmetic prover is already
+  invoked by the Simplifier.
+*}
+
+
+section {* Intuitionistic proof search *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) iprover} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{method (HOL) iprover} (@{syntax rulemod} *)
+  \<close>}
+
+  \begin{description}
+
+  \item @{method (HOL) iprover} performs intuitionistic proof search,
+  depending on specifically declared rules from the context, or given
+  as explicit arguments.  Chained facts are inserted into the goal
+  before commencing proof search.
+
+  Rules need to be classified as @{attribute (Pure) intro},
+  @{attribute (Pure) elim}, or @{attribute (Pure) dest}; here the
+  ``@{text "!"}'' indicator refers to ``safe'' rules, which may be
+  applied aggressively (without considering back-tracking later).
+  Rules declared with ``@{text "?"}'' are ignored in proof search (the
+  single-step @{method (Pure) rule} method still observes these).  An
+  explicit weight annotation may be given as well; otherwise the
+  number of rule premises will be taken into account here.
+
+  \end{description}
+*}
+
+
+section {* Model Elimination and Resolution *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) "meson"} & : & @{text method} \\
+    @{method_def (HOL) "metis"} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{method (HOL) meson} @{syntax thmrefs}?
+    ;
+    @@{method (HOL) metis}
+      ('(' ('partial_types' | 'full_types' | 'no_types' | @{syntax name}) ')')?
+      @{syntax thmrefs}?
+  \<close>}
+
+  \begin{description}
+
+  \item @{method (HOL) meson} implements Loveland's model elimination
+  procedure \cite{loveland-78}.  See @{file
+  "~~/src/HOL/ex/Meson_Test.thy"} for examples.
+
+  \item @{method (HOL) metis} combines ordered resolution and ordered
+  paramodulation to find first-order (or mildly higher-order) proofs.
+  The first optional argument specifies a type encoding; see the
+  Sledgehammer manual \cite{isabelle-sledgehammer} for details.  The
+  directory @{file "~~/src/HOL/Metis_Examples"} contains several small
+  theories developed to a large extent using @{method (HOL) metis}.
+
+  \end{description}
+*}
+
+
+section {* Algebraic reasoning via Gr\"obner bases *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) "algebra"} & : & @{text method} \\
+    @{attribute_def (HOL) algebra} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{method (HOL) algebra}
+      ('add' ':' @{syntax thmrefs})?
+      ('del' ':' @{syntax thmrefs})?
+    ;
+    @@{attribute (HOL) algebra} (() | 'add' | 'del')
+  \<close>}
+
+  \begin{description}
+
+  \item @{method (HOL) algebra} performs algebraic reasoning via
+  Gr\"obner bases, see also \cite{Chaieb-Wenzel:2007} and
+  \cite[\S3.2]{Chaieb-thesis}. The method handles deals with two main
+  classes of problems:
+
+  \begin{enumerate}
+
+  \item Universal problems over multivariate polynomials in a
+  (semi)-ring/field/idom; the capabilities of the method are augmented
+  according to properties of these structures. For this problem class
+  the method is only complete for algebraically closed fields, since
+  the underlying method is based on Hilbert's Nullstellensatz, where
+  the equivalence only holds for algebraically closed fields.
+
+  The problems can contain equations @{text "p = 0"} or inequations
+  @{text "q \<noteq> 0"} anywhere within a universal problem statement.
+
+  \item All-exists problems of the following restricted (but useful)
+  form:
+
+  @{text [display] "\<forall>x\<^sub>1 \<dots> x\<^sub>n.
+    e\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<and> \<dots> \<and> e\<^sub>m(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<longrightarrow>
+    (\<exists>y\<^sub>1 \<dots> y\<^sub>k.
+      p\<^sub>1\<^sub>1(x\<^sub>1, \<dots> ,x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>1\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0 \<and>
+      \<dots> \<and>
+      p\<^sub>t\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>t\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0)"}
+
+  Here @{text "e\<^sub>1, \<dots>, e\<^sub>n"} and the @{text "p\<^sub>i\<^sub>j"} are multivariate
+  polynomials only in the variables mentioned as arguments.
+
+  \end{enumerate}
+
+  The proof method is preceded by a simplification step, which may be
+  modified by using the form @{text "(algebra add: ths\<^sub>1 del: ths\<^sub>2)"}.
+  This acts like declarations for the Simplifier
+  (\secref{sec:simplifier}) on a private simpset for this tool.
+
+  \item @{attribute algebra} (as attribute) manages the default
+  collection of pre-simplification rules of the above proof method.
+
+  \end{description}
+*}
+
+
+subsubsection {* Example *}
+
+text {* The subsequent example is from geometry: collinearity is
+  invariant by rotation.  *}
+
+type_synonym point = "int \<times> int"
+
+fun collinear :: "point \<Rightarrow> point \<Rightarrow> point \<Rightarrow> bool" where
+  "collinear (Ax, Ay) (Bx, By) (Cx, Cy) \<longleftrightarrow>
+    (Ax - Bx) * (By - Cy) = (Ay - By) * (Bx - Cx)"
+
+lemma collinear_inv_rotation:
+  assumes "collinear (Ax, Ay) (Bx, By) (Cx, Cy)" and "c\<^sup>2 + s\<^sup>2 = 1"
+  shows "collinear (Ax * c - Ay * s, Ay * c + Ax * s)
+    (Bx * c - By * s, By * c + Bx * s) (Cx * c - Cy * s, Cy * c + Cx * s)"
+  using assms by (algebra add: collinear.simps)
+
+text {*
+ See also @{file "~~/src/HOL/ex/Groebner_Examples.thy"}.
+*}
+
+
+section {* Coherent Logic *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) "coherent"} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{method (HOL) coherent} @{syntax thmrefs}?
+  \<close>}
+
+  \begin{description}
+
+  \item @{method (HOL) coherent} solves problems of \emph{Coherent
+  Logic} \cite{Bezem-Coquand:2005}, which covers applications in
+  confluence theory, lattice theory and projective geometry.  See
+  @{file "~~/src/HOL/ex/Coherent.thy"} for some examples.
+
+  \end{description}
+*}
+
+
+section {* Proving propositions *}
+
+text {*
+  In addition to the standard proof methods, a number of diagnosis
+  tools search for proofs and provide an Isar proof snippet on success.
+  These tools are available via the following commands.
+
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "solve_direct"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
+    @{command_def (HOL) "try"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
+    @{command_def (HOL) "try0"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
+    @{command_def (HOL) "sledgehammer"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
+    @{command_def (HOL) "sledgehammer_params"} & : & @{text "theory \<rightarrow> theory"}
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command (HOL) try}
+    ;
+
+    @@{command (HOL) try0} ( ( ( 'simp' | 'intro' | 'elim' | 'dest' ) ':' @{syntax thmrefs} ) + ) ?
+      @{syntax nat}?
+    ;
+
+    @@{command (HOL) sledgehammer} ( '[' args ']' )? facts? @{syntax nat}?
+    ;
+
+    @@{command (HOL) sledgehammer_params} ( ( '[' args ']' ) ? )
+    ;
+    args: ( @{syntax name} '=' value + ',' )
+    ;
+    facts: '(' ( ( ( ( 'add' | 'del' ) ':' ) ? @{syntax thmrefs} ) + ) ? ')'
+  \<close>} % FIXME check args "value"
+
+  \begin{description}
+
+  \item @{command (HOL) "solve_direct"} checks whether the current
+  subgoals can be solved directly by an existing theorem. Duplicate
+  lemmas can be detected in this way.
+
+  \item @{command (HOL) "try0"} attempts to prove a subgoal
+  using a combination of standard proof methods (@{method auto},
+  @{method simp}, @{method blast}, etc.).  Additional facts supplied
+  via @{text "simp:"}, @{text "intro:"}, @{text "elim:"}, and @{text
+  "dest:"} are passed to the appropriate proof methods.
+
+  \item @{command (HOL) "try"} attempts to prove or disprove a subgoal
+  using a combination of provers and disprovers (@{command (HOL)
+  "solve_direct"}, @{command (HOL) "quickcheck"}, @{command (HOL)
+  "try0"}, @{command (HOL) "sledgehammer"}, @{command (HOL)
+  "nitpick"}).
+
+  \item @{command (HOL) "sledgehammer"} attempts to prove a subgoal
+  using external automatic provers (resolution provers and SMT
+  solvers). See the Sledgehammer manual \cite{isabelle-sledgehammer}
+  for details.
+
+  \item @{command (HOL) "sledgehammer_params"} changes @{command (HOL)
+  "sledgehammer"} configuration options persistently.
+
+  \end{description}
+*}
+
+
+section {* Checking and refuting propositions *}
+
+text {*
+  Identifying incorrect propositions usually involves evaluation of
+  particular assignments and systematic counterexample search.  This
+  is supported by the following commands.
+
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "value"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def (HOL) "values"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def (HOL) "quickcheck"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
+    @{command_def (HOL) "nitpick"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
+    @{command_def (HOL) "quickcheck_params"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "nitpick_params"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "quickcheck_generator"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "find_unused_assms"} & : & @{text "context \<rightarrow>"}
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command (HOL) value} ( '[' @{syntax name} ']' )? modes? @{syntax term}
+    ;
+
+    @@{command (HOL) values} modes? @{syntax nat}? @{syntax term}
+    ;
+
+    (@@{command (HOL) quickcheck} | @@{command (HOL) nitpick})
+      ( '[' args ']' )? @{syntax nat}?
+    ;
+
+    (@@{command (HOL) quickcheck_params} |
+      @@{command (HOL) nitpick_params}) ( '[' args ']' )?
+    ;
+
+    @@{command (HOL) quickcheck_generator} @{syntax nameref} \<newline>
+      'operations:' ( @{syntax term} +)
+    ;
+
+    @@{command (HOL) find_unused_assms} @{syntax name}?
+    ;
+    modes: '(' (@{syntax name} +) ')'
+    ;
+    args: ( @{syntax name} '=' value + ',' )
+  \<close>} % FIXME check "value"
+
+  \begin{description}
+
+  \item @{command (HOL) "value"}~@{text t} evaluates and prints a
+  term; optionally @{text modes} can be specified, which are appended
+  to the current print mode; see \secref{sec:print-modes}.
+  Internally, the evaluation is performed by registered evaluators,
+  which are invoked sequentially until a result is returned.
+  Alternatively a specific evaluator can be selected using square
+  brackets; typical evaluators use the current set of code equations
+  to normalize and include @{text simp} for fully symbolic evaluation
+  using the simplifier, @{text nbe} for \emph{normalization by
+  evaluation} and \emph{code} for code generation in SML.
+
+  \item @{command (HOL) "values"}~@{text t} enumerates a set
+  comprehension by evaluation and prints its values up to the given
+  number of solutions; optionally @{text modes} can be specified,
+  which are appended to the current print mode; see
+  \secref{sec:print-modes}.
+
+  \item @{command (HOL) "quickcheck"} tests the current goal for
+  counterexamples using a series of assignments for its free
+  variables; by default the first subgoal is tested, an other can be
+  selected explicitly using an optional goal index.  Assignments can
+  be chosen exhausting the search space up to a given size, or using a
+  fixed number of random assignments in the search space, or exploring
+  the search space symbolically using narrowing.  By default,
+  quickcheck uses exhaustive testing.  A number of configuration
+  options are supported for @{command (HOL) "quickcheck"}, notably:
+
+    \begin{description}
+
+    \item[@{text tester}] specifies which testing approach to apply.
+    There are three testers, @{text exhaustive}, @{text random}, and
+    @{text narrowing}.  An unknown configuration option is treated as
+    an argument to tester, making @{text "tester ="} optional.  When
+    multiple testers are given, these are applied in parallel.  If no
+    tester is specified, quickcheck uses the testers that are set
+    active, i.e., configurations @{attribute
+    quickcheck_exhaustive_active}, @{attribute
+    quickcheck_random_active}, @{attribute
+    quickcheck_narrowing_active} are set to true.
+
+    \item[@{text size}] specifies the maximum size of the search space
+    for assignment values.
+
+    \item[@{text genuine_only}] sets quickcheck only to return genuine
+    counterexample, but not potentially spurious counterexamples due
+    to underspecified functions.
+
+    \item[@{text abort_potential}] sets quickcheck to abort once it
+    found a potentially spurious counterexample and to not continue
+    to search for a further genuine counterexample.
+    For this option to be effective, the @{text genuine_only} option
+    must be set to false.
+
+    \item[@{text eval}] takes a term or a list of terms and evaluates
+    these terms under the variable assignment found by quickcheck.
+    This option is currently only supported by the default
+    (exhaustive) tester.
+
+    \item[@{text iterations}] sets how many sets of assignments are
+    generated for each particular size.
+
+    \item[@{text no_assms}] specifies whether assumptions in
+    structured proofs should be ignored.
+
+    \item[@{text locale}] specifies how to process conjectures in
+    a locale context, i.e., they can be interpreted or expanded.
+    The option is a whitespace-separated list of the two words
+    @{text interpret} and @{text expand}. The list determines the
+    order they are employed. The default setting is to first use
+    interpretations and then test the expanded conjecture.
+    The option is only provided as attribute declaration, but not
+    as parameter to the command.
+
+    \item[@{text timeout}] sets the time limit in seconds.
+
+    \item[@{text default_type}] sets the type(s) generally used to
+    instantiate type variables.
+
+    \item[@{text report}] if set quickcheck reports how many tests
+    fulfilled the preconditions.
+
+    \item[@{text use_subtype}] if set quickcheck automatically lifts
+    conjectures to registered subtypes if possible, and tests the
+    lifted conjecture.
+
+    \item[@{text quiet}] if set quickcheck does not output anything
+    while testing.
+
+    \item[@{text verbose}] if set quickcheck informs about the current
+    size and cardinality while testing.
+
+    \item[@{text expect}] can be used to check if the user's
+    expectation was met (@{text no_expectation}, @{text
+    no_counterexample}, or @{text counterexample}).
+
+    \end{description}
+
+  These option can be given within square brackets.
+
+  Using the following type classes, the testers generate values and convert
+  them back into Isabelle terms for displaying counterexamples.
+    \begin{description}
+    \item[@{text exhaustive}] The parameters of the type classes @{class exhaustive}
+      and @{class full_exhaustive} implement the testing. They take a 
+      testing function as a parameter, which takes a value of type @{typ "'a"}
+      and optionally produces a counterexample, and a size parameter for the test values.
+      In @{class full_exhaustive}, the testing function parameter additionally 
+      expects a lazy term reconstruction in the type @{typ Code_Evaluation.term}
+      of the tested value.
+
+      The canonical implementation for @{text exhaustive} testers calls the given
+      testing function on all values up to the given size and stops as soon
+      as a counterexample is found.
+
+    \item[@{text random}] The operation @{const Quickcheck_Random.random}
+      of the type class @{class random} generates a pseudo-random
+      value of the given size and a lazy term reconstruction of the value
+      in the type @{typ Code_Evaluation.term}. A pseudo-randomness generator
+      is defined in theory @{theory Random}.
+      
+    \item[@{text narrowing}] implements Haskell's Lazy Smallcheck~\cite{runciman-naylor-lindblad}
+      using the type classes @{class narrowing} and @{class partial_term_of}.
+      Variables in the current goal are initially represented as symbolic variables.
+      If the execution of the goal tries to evaluate one of them, the test engine
+      replaces it with refinements provided by @{const narrowing}.
+      Narrowing views every value as a sum-of-products which is expressed using the operations
+      @{const Quickcheck_Narrowing.cons} (embedding a value),
+      @{const Quickcheck_Narrowing.apply} (product) and @{const Quickcheck_Narrowing.sum} (sum).
+      The refinement should enable further evaluation of the goal.
+
+      For example, @{const narrowing} for the list type @{typ "'a :: narrowing list"}
+      can be recursively defined as
+      @{term "Quickcheck_Narrowing.sum (Quickcheck_Narrowing.cons [])
+                (Quickcheck_Narrowing.apply
+                  (Quickcheck_Narrowing.apply
+                    (Quickcheck_Narrowing.cons (op #))
+                    narrowing)
+                  narrowing)"}.
+      If a symbolic variable of type @{typ "_ list"} is evaluated, it is replaced by (i)~the empty
+      list @{term "[]"} and (ii)~by a non-empty list whose head and tail can then be recursively
+      refined if needed.
+
+      To reconstruct counterexamples, the operation @{const partial_term_of} transforms
+      @{text narrowing}'s deep representation of terms to the type @{typ Code_Evaluation.term}.
+      The deep representation models symbolic variables as
+      @{const Quickcheck_Narrowing.Narrowing_variable}, which are normally converted to
+      @{const Code_Evaluation.Free}, and refined values as
+      @{term "Quickcheck_Narrowing.Narrowing_constructor i args"}, where @{term "i :: integer"}
+      denotes the index in the sum of refinements. In the above example for lists,
+      @{term "0"} corresponds to @{term "[]"} and @{term "1"}
+      to @{term "op #"}.
+
+      The command @{command (HOL) "code_datatype"} sets up @{const partial_term_of}
+      such that the @{term "i"}-th refinement is interpreted as the @{term "i"}-th constructor,
+      but it does not ensures consistency with @{const narrowing}.
+    \end{description}
+
+  \item @{command (HOL) "quickcheck_params"} changes @{command (HOL)
+  "quickcheck"} configuration options persistently.
+
+  \item @{command (HOL) "quickcheck_generator"} creates random and
+  exhaustive value generators for a given type and operations.  It
+  generates values by using the operations as if they were
+  constructors of that type.
+
+  \item @{command (HOL) "nitpick"} tests the current goal for
+  counterexamples using a reduction to first-order relational
+  logic. See the Nitpick manual \cite{isabelle-nitpick} for details.
+
+  \item @{command (HOL) "nitpick_params"} changes @{command (HOL)
+  "nitpick"} configuration options persistently.
+
+  \item @{command (HOL) "find_unused_assms"} finds potentially superfluous
+  assumptions in theorems using quickcheck.
+  It takes the theory name to be checked for superfluous assumptions as
+  optional argument. If not provided, it checks the current theory.
+  Options to the internal quickcheck invocations can be changed with
+  common configuration declarations.
+
+  \end{description}
+*}
+
+
+section {* Unstructured case analysis and induction \label{sec:hol-induct-tac} *}
+
+text {*
+  The following tools of Isabelle/HOL support cases analysis and
+  induction in unstructured tactic scripts; see also
+  \secref{sec:cases-induct} for proper Isar versions of similar ideas.
+
+  \begin{matharray}{rcl}
+    @{method_def (HOL) case_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def (HOL) induct_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def (HOL) ind_cases}@{text "\<^sup>*"} & : & @{text method} \\
+    @{command_def (HOL) "inductive_cases"}@{text "\<^sup>*"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{method (HOL) case_tac} @{syntax goal_spec}? @{syntax term} rule?
+    ;
+    @@{method (HOL) induct_tac} @{syntax goal_spec}? (@{syntax insts} * @'and') rule?
+    ;
+    @@{method (HOL) ind_cases} (@{syntax prop}+) (@'for' (@{syntax name}+))?
+    ;
+    @@{command (HOL) inductive_cases} (@{syntax thmdecl}? (@{syntax prop}+) + @'and')
+    ;
+    rule: 'rule' ':' @{syntax thmref}
+  \<close>}
+
+  \begin{description}
+
+  \item @{method (HOL) case_tac} and @{method (HOL) induct_tac} admit
+  to reason about inductive types.  Rules are selected according to
+  the declarations by the @{attribute cases} and @{attribute induct}
+  attributes, cf.\ \secref{sec:cases-induct}.  The @{command (HOL)
+  datatype} package already takes care of this.
+
+  These unstructured tactics feature both goal addressing and dynamic
+  instantiation.  Note that named rule cases are \emph{not} provided
+  as would be by the proper @{method cases} and @{method induct} proof
+  methods (see \secref{sec:cases-induct}).  Unlike the @{method
+  induct} method, @{method induct_tac} does not handle structured rule
+  statements, only the compact object-logic conclusion of the subgoal
+  being addressed.
+
+  \item @{method (HOL) ind_cases} and @{command (HOL)
+  "inductive_cases"} provide an interface to the internal @{ML_text
+  mk_cases} operation.  Rules are simplified in an unrestricted
+  forward manner.
+
+  While @{method (HOL) ind_cases} is a proof method to apply the
+  result immediately as elimination rules, @{command (HOL)
+  "inductive_cases"} provides case split theorems at the theory level
+  for later use.  The @{keyword "for"} argument of the @{method (HOL)
+  ind_cases} method allows to specify a list of variables that should
+  be generalized before applying the resulting rule.
+
+  \end{description}
+*}
+
+
+chapter {* Executable code *}
+
+text {* For validation purposes, it is often useful to \emph{execute}
+  specifications.  In principle, execution could be simulated by
+  Isabelle's inference kernel, i.e. by a combination of resolution and
+  simplification.  Unfortunately, this approach is rather inefficient.
+  A more efficient way of executing specifications is to translate
+  them into a functional programming language such as ML.
+
+  Isabelle provides a generic framework to support code generation
+  from executable specifications.  Isabelle/HOL instantiates these
+  mechanisms in a way that is amenable to end-user applications.  Code
+  can be generated for functional programs (including overloading
+  using type classes) targeting SML \cite{SML}, OCaml \cite{OCaml},
+  Haskell \cite{haskell-revised-report} and Scala
+  \cite{scala-overview-tech-report}.  Conceptually, code generation is
+  split up in three steps: \emph{selection} of code theorems,
+  \emph{translation} into an abstract executable view and
+  \emph{serialization} to a specific \emph{target language}.
+  Inductive specifications can be executed using the predicate
+  compiler which operates within HOL.  See \cite{isabelle-codegen} for
+  an introduction.
+
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "export_code"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{attribute_def (HOL) code} & : & @{text attribute} \\
+    @{command_def (HOL) "code_datatype"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "print_codesetup"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{attribute_def (HOL) code_unfold} & : & @{text attribute} \\
+    @{attribute_def (HOL) code_post} & : & @{text attribute} \\
+    @{attribute_def (HOL) code_abbrev} & : & @{text attribute} \\
+    @{command_def (HOL) "print_codeproc"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def (HOL) "code_thms"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def (HOL) "code_deps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def (HOL) "code_reserved"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_printing"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_identifier"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_monad"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_reflect"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_pred"} & : & @{text "theory \<rightarrow> proof(prove)"}
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command (HOL) export_code} ( @'open' ) ? ( constexpr + ) \<newline>
+       ( ( @'in' target ( @'module_name' @{syntax string} ) ? \<newline>
+        ( @'file' @{syntax string} ) ? ( '(' args ')' ) ?) + ) ?
+    ;
+
+    const: @{syntax term}
+    ;
+
+    constexpr: ( const | 'name._' | '_' )
+    ;
+
+    typeconstructor: @{syntax nameref}
+    ;
+
+    class: @{syntax nameref}
+    ;
+
+    target: 'SML' | 'OCaml' | 'Haskell' | 'Scala' | 'Eval'
+    ;
+
+    @@{attribute (HOL) code} ( 'del' | 'equation' | 'abstype' | 'abstract'
+      | 'drop:' ( const + ) | 'abort:' ( const + ) )?
+    ;
+
+    @@{command (HOL) code_datatype} ( const + )
+    ;
+
+    @@{attribute (HOL) code_unfold} ( 'del' ) ?
+    ;
+
+    @@{attribute (HOL) code_post} ( 'del' ) ?
+    ;
+
+    @@{attribute (HOL) code_abbrev}
+    ;
+
+    @@{command (HOL) code_thms} ( constexpr + ) ?
+    ;
+
+    @@{command (HOL) code_deps} ( constexpr + ) ?
+    ;
+
+    @@{command (HOL) code_reserved} target ( @{syntax string} + )
+    ;
+
+    symbol_const: ( @'constant' const )
+    ;
+
+    symbol_typeconstructor: ( @'type_constructor' typeconstructor )
+    ;
+
+    symbol_class: ( @'type_class' class )
+    ;
+
+    symbol_class_relation: ( @'class_relation' class ( '<' | '\<subseteq>' ) class )
+    ;
+
+    symbol_class_instance: ( @'class_instance' typeconstructor @'::' class )
+    ;
+
+    symbol_module: ( @'code_module' name )
+    ;
+
+    syntax: @{syntax string} | ( @'infix' | @'infixl' | @'infixr' ) @{syntax nat} @{syntax string}
+    ;
+
+    printing_const: symbol_const ( '\<rightharpoonup>' | '=>' ) \<newline>
+      ( '(' target ')' syntax ? + @'and' )
+    ;
+
+    printing_typeconstructor: symbol_typeconstructor ( '\<rightharpoonup>' | '=>' ) \<newline>
+      ( '(' target ')' syntax ? + @'and' )
+    ;
+
+    printing_class: symbol_class ( '\<rightharpoonup>' | '=>' ) \<newline>
+      ( '(' target ')' @{syntax string} ? + @'and' )
+    ;
+
+    printing_class_relation: symbol_class_relation ( '\<rightharpoonup>' | '=>' ) \<newline>
+      ( '(' target ')' @{syntax string} ? + @'and' )
+    ;
+
+    printing_class_instance: symbol_class_instance ( '\<rightharpoonup>' | '=>' ) \<newline>
+      ( '(' target ')' '-' ? + @'and' )
+    ;
+
+    printing_module: symbol_module ( '\<rightharpoonup>' | '=>' ) \<newline>
+      ( '(' target ')' ( @{syntax string} ( @'attach' ( const + ) ) ? ) ? + @'and' )
+    ;
+
+    @@{command (HOL) code_printing} ( ( printing_const | printing_typeconstructor
+      | printing_class | printing_class_relation | printing_class_instance
+      | printing_module ) + '|' )
+    ;
+
+    @@{command (HOL) code_identifier} ( ( symbol_const | symbol_typeconstructor
+      | symbol_class | symbol_class_relation | symbol_class_instance
+      | symbol_module ) ( '\<rightharpoonup>' | '=>' ) \<newline>
+      ( '(' target ')' @{syntax string} ? + @'and' ) + '|' )
+    ;
+
+    @@{command (HOL) code_monad} const const target
+    ;
+
+    @@{command (HOL) code_reflect} @{syntax string} \<newline>
+      ( @'datatypes' ( @{syntax string} '=' ( '_' | ( @{syntax string} + '|' ) + @'and' ) ) ) ? \<newline>
+      ( @'functions' ( @{syntax string} + ) ) ? ( @'file' @{syntax string} ) ?
+    ;
+
+    @@{command (HOL) code_pred} \<newline> ('(' @'modes' ':' modedecl ')')? \<newline> const
+    ;
+
+    modedecl: (modes | ((const ':' modes) \<newline>
+        (@'and' ((const ':' modes @'and') +))?))
+    ;
+
+    modes: mode @'as' const
+  \<close>}
+
+  \begin{description}
+
+  \item @{command (HOL) "export_code"} generates code for a given list
+  of constants in the specified target language(s).  If no
+  serialization instruction is given, only abstract code is generated
+  internally.
+
+  Constants may be specified by giving them literally, referring to
+  all executable constants within a certain theory by giving @{text
+  "name._"}, or referring to \emph{all} executable constants currently
+  available by giving @{text "_"}.
+
+  By default, exported identifiers are minimized per module.  This
+  can be suppressed by prepending @{keyword "open"} before the list
+  of contants.
+
+  By default, for each involved theory one corresponding name space
+  module is generated.  Alternatively, a module name may be specified
+  after the @{keyword "module_name"} keyword; then \emph{all} code is
+  placed in this module.
+
+  For \emph{SML}, \emph{OCaml} and \emph{Scala} the file specification
+  refers to a single file; for \emph{Haskell}, it refers to a whole
+  directory, where code is generated in multiple files reflecting the
+  module hierarchy.  Omitting the file specification denotes standard
+  output.
+
+  Serializers take an optional list of arguments in parentheses.
+  For \emph{Haskell} a module name prefix may be given using the
+  ``@{text "root:"}'' argument; ``@{text string_classes}'' adds a
+  ``@{verbatim "deriving (Read, Show)"}'' clause to each appropriate
+  datatype declaration.
+
+  \item @{attribute (HOL) code} declare code equations for code
+  generation.  Variant @{text "code equation"} declares a conventional
+  equation as code equation.  Variants @{text "code abstype"} and
+  @{text "code abstract"} declare abstract datatype certificates or
+  code equations on abstract datatype representations respectively.
+  Vanilla @{text "code"} falls back to @{text "code equation"}
+  or @{text "code abstype"} depending on the syntactic shape
+  of the underlying equation.  Variant @{text "code del"}
+  deselects a code equation for code generation.
+
+  Variants @{text "code drop:"} and @{text "code abort:"} take
+  a list of constant as arguments and drop all code equations declared
+  for them.  In the case of {text abort}, these constants then are
+  are not required to have a definition by means of code equations;
+  if needed these are implemented by program abort (exception) instead.
+
+  Usually packages introducing code equations provide a reasonable
+  default setup for selection.  
+
+  \item @{command (HOL) "code_datatype"} specifies a constructor set
+  for a logical type.
+
+  \item @{command (HOL) "print_codesetup"} gives an overview on
+  selected code equations and code generator datatypes.
+
+  \item @{attribute (HOL) code_unfold} declares (or with option
+  ``@{text "del"}'' removes) theorems which during preprocessing
+  are applied as rewrite rules to any code equation or evaluation
+  input.
+
+  \item @{attribute (HOL) code_post} declares (or with option ``@{text
+  "del"}'' removes) theorems which are applied as rewrite rules to any
+  result of an evaluation.
+
+  \item @{attribute (HOL) code_abbrev} declares equations which are
+  applied as rewrite rules to any result of an evaluation and
+  symmetrically during preprocessing to any code equation or evaluation
+  input.
+
+  \item @{command (HOL) "print_codeproc"} prints the setup of the code
+  generator preprocessor.
+
+  \item @{command (HOL) "code_thms"} prints a list of theorems
+  representing the corresponding program containing all given
+  constants after preprocessing.
+
+  \item @{command (HOL) "code_deps"} visualizes dependencies of
+  theorems representing the corresponding program containing all given
+  constants after preprocessing.
+
+  \item @{command (HOL) "code_reserved"} declares a list of names as
+  reserved for a given target, preventing it to be shadowed by any
+  generated code.
+
+  \item @{command (HOL) "code_printing"} associates a series of symbols
+  (constants, type constructors, classes, class relations, instances,
+  module names) with target-specific serializations; omitting a serialization
+  deletes an existing serialization.
+
+  \item @{command (HOL) "code_monad"} provides an auxiliary mechanism
+  to generate monadic code for Haskell.
+
+  \item @{command (HOL) "code_identifier"} associates a a series of symbols
+  (constants, type constructors, classes, class relations, instances,
+  module names) with target-specific hints how these symbols shall be named.
+  These hints gain precedence over names for symbols with no hints at all.
+  Conflicting hints are subject to name disambiguation.
+  \emph{Warning:} It is at the discretion
+  of the user to ensure that name prefixes of identifiers in compound
+  statements like type classes or datatypes are still the same.
+
+  \item @{command (HOL) "code_reflect"} without a ``@{text "file"}''
+  argument compiles code into the system runtime environment and
+  modifies the code generator setup that future invocations of system
+  runtime code generation referring to one of the ``@{text
+  "datatypes"}'' or ``@{text "functions"}'' entities use these
+  precompiled entities.  With a ``@{text "file"}'' argument, the
+  corresponding code is generated into that specified file without
+  modifying the code generator setup.
+
+  \item @{command (HOL) "code_pred"} creates code equations for a
+    predicate given a set of introduction rules. Optional mode
+    annotations determine which arguments are supposed to be input or
+    output. If alternative introduction rules are declared, one must
+    prove a corresponding elimination rule.
+
+  \end{description}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/Inner_Syntax.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,1709 @@
+theory Inner_Syntax
+imports Base Main
+begin
+
+chapter {* Inner syntax --- the term language \label{ch:inner-syntax} *}
+
+text {* The inner syntax of Isabelle provides concrete notation for
+  the main entities of the logical framework, notably @{text
+  "\<lambda>"}-terms with types and type classes.  Applications may either
+  extend existing syntactic categories by additional notation, or
+  define new sub-languages that are linked to the standard term
+  language via some explicit markers.  For example @{verbatim
+  FOO}~@{text "foo"} could embed the syntax corresponding for some
+  user-defined nonterminal @{text "foo"} --- within the bounds of the
+  given lexical syntax of Isabelle/Pure.
+
+  The most basic way to specify concrete syntax for logical entities
+  works via mixfix annotations (\secref{sec:mixfix}), which may be
+  usually given as part of the original declaration or via explicit
+  notation commands later on (\secref{sec:notation}).  This already
+  covers many needs of concrete syntax without having to understand
+  the full complexity of inner syntax layers.
+
+  Further details of the syntax engine involves the classical
+  distinction of lexical language versus context-free grammar (see
+  \secref{sec:pure-syntax}), and various mechanisms for \emph{syntax
+  transformations} (see \secref{sec:syntax-transformations}).
+*}
+
+
+section {* Printing logical entities *}
+
+subsection {* Diagnostic commands \label{sec:print-diag} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "typ"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "term"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "prop"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "thm"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "prf"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "full_prf"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "print_state"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+  \end{matharray}
+
+  These diagnostic commands assist interactive development by printing
+  internal logical entities in a human-readable fashion.
+
+  @{rail \<open>
+    @@{command typ} @{syntax modes}? @{syntax type} ('::' @{syntax sort})?
+    ;
+    @@{command term} @{syntax modes}? @{syntax term}
+    ;
+    @@{command prop} @{syntax modes}? @{syntax prop}
+    ;
+    @@{command thm} @{syntax modes}? @{syntax thmrefs}
+    ;
+    ( @@{command prf} | @@{command full_prf} ) @{syntax modes}? @{syntax thmrefs}?
+    ;
+    @@{command print_state} @{syntax modes}?
+    ;
+    @{syntax_def modes}: '(' (@{syntax name} + ) ')'
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "typ"}~@{text \<tau>} reads and prints a type expression
+  according to the current context.
+
+  \item @{command "typ"}~@{text "\<tau> :: s"} uses type-inference to
+  determine the most general way to make @{text "\<tau>"} conform to sort
+  @{text "s"}.  For concrete @{text "\<tau>"} this checks if the type
+  belongs to that sort.  Dummy type parameters ``@{text "_"}''
+  (underscore) are assigned to fresh type variables with most general
+  sorts, according the the principles of type-inference.
+
+  \item @{command "term"}~@{text t} and @{command "prop"}~@{text \<phi>}
+  read, type-check and print terms or propositions according to the
+  current theory or proof context; the inferred type of @{text t} is
+  output as well.  Note that these commands are also useful in
+  inspecting the current environment of term abbreviations.
+
+  \item @{command "thm"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} retrieves
+  theorems from the current theory or proof context.  Note that any
+  attributes included in the theorem specifications are applied to a
+  temporary context derived from the current theory or proof; the
+  result is discarded, i.e.\ attributes involved in @{text "a\<^sub>1,
+  \<dots>, a\<^sub>n"} do not have any permanent effect.
+
+  \item @{command "prf"} displays the (compact) proof term of the
+  current proof state (if present), or of the given theorems. Note
+  that this requires proof terms to be switched on for the current
+  object logic (see the ``Proof terms'' section of the Isabelle
+  reference manual for information on how to do this).
+
+  \item @{command "full_prf"} is like @{command "prf"}, but displays
+  the full proof term, i.e.\ also displays information omitted in the
+  compact proof term, which is denoted by ``@{text _}'' placeholders
+  there.
+
+  \item @{command "print_state"} prints the current proof state (if
+  present), including current facts and goals.
+
+  \end{description}
+
+  All of the diagnostic commands above admit a list of @{text modes}
+  to be specified, which is appended to the current print mode; see
+  also \secref{sec:print-modes}.  Thus the output behavior may be
+  modified according particular print mode features.  For example,
+  @{command "print_state"}~@{text "(latex xsymbols)"} prints the
+  current proof state with mathematical symbols and special characters
+  represented in {\LaTeX} source, according to the Isabelle style
+  \cite{isabelle-sys}.
+
+  Note that antiquotations (cf.\ \secref{sec:antiq}) provide a more
+  systematic way to include formal items into the printed text
+  document.
+*}
+
+
+subsection {* Details of printed content *}
+
+text {*
+  \begin{tabular}{rcll}
+    @{attribute_def show_markup} & : & @{text attribute} \\
+    @{attribute_def show_types} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def show_sorts} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def show_consts} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def show_abbrevs} & : & @{text attribute} & default @{text true} \\
+    @{attribute_def show_brackets} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def names_long} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def names_short} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def names_unique} & : & @{text attribute} & default @{text true} \\
+    @{attribute_def eta_contract} & : & @{text attribute} & default @{text true} \\
+    @{attribute_def goals_limit} & : & @{text attribute} & default @{text 10} \\
+    @{attribute_def show_main_goal} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def show_hyps} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def show_tags} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def show_question_marks} & : & @{text attribute} & default @{text true} \\
+  \end{tabular}
+  \medskip
+
+  These configuration options control the detail of information that
+  is displayed for types, terms, theorems, goals etc.  See also
+  \secref{sec:config}.
+
+  \begin{description}
+
+  \item @{attribute show_markup} controls direct inlining of markup
+  into the printed representation of formal entities --- notably type
+  and sort constraints.  This enables Prover IDE users to retrieve
+  that information via tooltips or popups while hovering with the
+  mouse over the output window, for example.  Consequently, this
+  option is enabled by default for Isabelle/jEdit, but disabled for
+  TTY and Proof~General~/Emacs where document markup would not work.
+
+  \item @{attribute show_types} and @{attribute show_sorts} control
+  printing of type constraints for term variables, and sort
+  constraints for type variables.  By default, neither of these are
+  shown in output.  If @{attribute show_sorts} is enabled, types are
+  always shown as well.  In Isabelle/jEdit, manual setting of these
+  options is normally not required thanks to @{attribute show_markup}
+  above.
+
+  Note that displaying types and sorts may explain why a polymorphic
+  inference rule fails to resolve with some goal, or why a rewrite
+  rule does not apply as expected.
+
+  \item @{attribute show_consts} controls printing of types of
+  constants when displaying a goal state.
+
+  Note that the output can be enormous, because polymorphic constants
+  often occur at several different type instances.
+
+  \item @{attribute show_abbrevs} controls folding of constant
+  abbreviations.
+
+  \item @{attribute show_brackets} controls bracketing in pretty
+  printed output.  If enabled, all sub-expressions of the pretty
+  printing tree will be parenthesized, even if this produces malformed
+  term syntax!  This crude way of showing the internal structure of
+  pretty printed entities may occasionally help to diagnose problems
+  with operator priorities, for example.
+
+  \item @{attribute names_long}, @{attribute names_short}, and
+  @{attribute names_unique} control the way of printing fully
+  qualified internal names in external form.  See also
+  \secref{sec:antiq} for the document antiquotation options of the
+  same names.
+
+  \item @{attribute eta_contract} controls @{text "\<eta>"}-contracted
+  printing of terms.
+
+  The @{text \<eta>}-contraction law asserts @{prop "(\<lambda>x. f x) \<equiv> f"},
+  provided @{text x} is not free in @{text f}.  It asserts
+  \emph{extensionality} of functions: @{prop "f \<equiv> g"} if @{prop "f x \<equiv>
+  g x"} for all @{text x}.  Higher-order unification frequently puts
+  terms into a fully @{text \<eta>}-expanded form.  For example, if @{text
+  F} has type @{text "(\<tau> \<Rightarrow> \<tau>) \<Rightarrow> \<tau>"} then its expanded form is @{term
+  "\<lambda>h. F (\<lambda>x. h x)"}.
+
+  Enabling @{attribute eta_contract} makes Isabelle perform @{text
+  \<eta>}-contractions before printing, so that @{term "\<lambda>h. F (\<lambda>x. h x)"}
+  appears simply as @{text F}.
+
+  Note that the distinction between a term and its @{text \<eta>}-expanded
+  form occasionally matters.  While higher-order resolution and
+  rewriting operate modulo @{text "\<alpha>\<beta>\<eta>"}-conversion, some other tools
+  might look at terms more discretely.
+
+  \item @{attribute goals_limit} controls the maximum number of
+  subgoals to be printed.
+
+  \item @{attribute show_main_goal} controls whether the main result
+  to be proven should be displayed.  This information might be
+  relevant for schematic goals, to inspect the current claim that has
+  been synthesized so far.
+
+  \item @{attribute show_hyps} controls printing of implicit
+  hypotheses of local facts.  Normally, only those hypotheses are
+  displayed that are \emph{not} covered by the assumptions of the
+  current context: this situation indicates a fault in some tool being
+  used.
+
+  By enabling @{attribute show_hyps}, output of \emph{all} hypotheses
+  can be enforced, which is occasionally useful for diagnostic
+  purposes.
+
+  \item @{attribute show_tags} controls printing of extra annotations
+  within theorems, such as internal position information, or the case
+  names being attached by the attribute @{attribute case_names}.
+
+  Note that the @{attribute tagged} and @{attribute untagged}
+  attributes provide low-level access to the collection of tags
+  associated with a theorem.
+
+  \item @{attribute show_question_marks} controls printing of question
+  marks for schematic variables, such as @{text ?x}.  Only the leading
+  question mark is affected, the remaining text is unchanged
+  (including proper markup for schematic variables that might be
+  relevant for user interfaces).
+
+  \end{description}
+*}
+
+
+subsection {* Alternative print modes \label{sec:print-modes} *}
+
+text {*
+  \begin{mldecls}
+    @{index_ML print_mode_value: "unit -> string list"} \\
+    @{index_ML Print_Mode.with_modes: "string list -> ('a -> 'b) -> 'a -> 'b"} \\
+  \end{mldecls}
+
+  The \emph{print mode} facility allows to modify various operations
+  for printing.  Commands like @{command typ}, @{command term},
+  @{command thm} (see \secref{sec:print-diag}) take additional print
+  modes as optional argument.  The underlying ML operations are as
+  follows.
+
+  \begin{description}
+
+  \item @{ML "print_mode_value ()"} yields the list of currently
+  active print mode names.  This should be understood as symbolic
+  representation of certain individual features for printing (with
+  precedence from left to right).
+
+  \item @{ML Print_Mode.with_modes}~@{text "modes f x"} evaluates
+  @{text "f x"} in an execution context where the print mode is
+  prepended by the given @{text "modes"}.  This provides a thread-safe
+  way to augment print modes.  It is also monotonic in the set of mode
+  names: it retains the default print mode that certain
+  user-interfaces might have installed for their proper functioning!
+
+  \end{description}
+
+  \begin{warn}
+  The old global reference @{ML print_mode} should never be used
+  directly in applications.  Its main reason for being publicly
+  accessible is to support historic versions of Proof~General.
+  \end{warn}
+
+  \medskip The pretty printer for inner syntax maintains alternative
+  mixfix productions for any print mode name invented by the user, say
+  in commands like @{command notation} or @{command abbreviation}.
+  Mode names can be arbitrary, but the following ones have a specific
+  meaning by convention:
+
+  \begin{itemize}
+
+  \item @{verbatim "\"\""} (the empty string): default mode;
+  implicitly active as last element in the list of modes.
+
+  \item @{verbatim input}: dummy print mode that is never active; may
+  be used to specify notation that is only available for input.
+
+  \item @{verbatim internal} dummy print mode that is never active;
+  used internally in Isabelle/Pure.
+
+  \item @{verbatim xsymbols}: enable proper mathematical symbols
+  instead of ASCII art.\footnote{This traditional mode name stems from
+  the ``X-Symbol'' package for old versions Proof~General with XEmacs,
+  although that package has been superseded by Unicode in recent
+  years.}
+
+  \item @{verbatim HTML}: additional mode that is active in HTML
+  presentation of Isabelle theory sources; allows to provide
+  alternative output notation.
+
+  \item @{verbatim latex}: additional mode that is active in {\LaTeX}
+  document preparation of Isabelle theory sources; allows to provide
+  alternative output notation.
+
+  \end{itemize}
+*}
+
+
+subsection {* Printing limits *}
+
+text {*
+  \begin{mldecls}
+    @{index_ML Pretty.margin_default: "int Unsynchronized.ref"} \\
+  \end{mldecls}
+
+  \begin{tabular}{rcll}
+    @{attribute_def ML_print_depth} & : & @{text attribute} & default 10 \\ %FIXME move?
+  \end{tabular}
+
+  \begin{description}
+
+  \item @{ML Pretty.margin_default} indicates the global default for
+  the right margin of the built-in pretty printer, with initial value
+  76.  Note that user-interfaces typically control margins
+  automatically when resizing windows, or even bypass the formatting
+  engine of Isabelle/ML altogether and do it within the front end via
+  Isabelle/Scala.
+
+  \item @{attribute ML_print_depth} limits the printing depth of the
+  ML toplevel pretty printer; the precise effect depends on the ML
+  compiler and run-time system.  Typically the limit should be less
+  than 10.  Bigger values such as 100--1000 are useful for debugging.
+
+  \end{description}
+*}
+
+
+section {* Mixfix annotations \label{sec:mixfix} *}
+
+text {* Mixfix annotations specify concrete \emph{inner syntax} of
+  Isabelle types and terms.  Locally fixed parameters in toplevel
+  theorem statements, locale and class specifications also admit
+  mixfix annotations in a fairly uniform manner.  A mixfix annotation
+  describes the concrete syntax, the translation to abstract
+  syntax, and the pretty printing.  Special case annotations provide a
+  simple means of specifying infix operators and binders.
+
+  Isabelle mixfix syntax is inspired by {\OBJ} \cite{OBJ}.  It allows
+  to specify any context-free priority grammar, which is more general
+  than the fixity declarations of ML and Prolog.
+
+  @{rail \<open>
+    @{syntax_def mixfix}: '('
+      @{syntax template} prios? @{syntax nat}? |
+      (@'infix' | @'infixl' | @'infixr') @{syntax template} @{syntax nat} |
+      @'binder' @{syntax template} prios? @{syntax nat} |
+      @'structure'
+    ')'
+    ;
+    template: string
+    ;
+    prios: '[' (@{syntax nat} + ',') ']'
+  \<close>}
+
+  The string given as @{text template} may include literal text,
+  spacing, blocks, and arguments (denoted by ``@{text _}''); the
+  special symbol ``@{verbatim "\<index>"}'' (printed as ``@{text "\<index>"}'')
+  represents an index argument that specifies an implicit @{keyword
+  "structure"} reference (see also \secref{sec:locale}).  Only locally
+  fixed variables may be declared as @{keyword "structure"}.
+
+  Infix and binder declarations provide common abbreviations for
+  particular mixfix declarations.  So in practice, mixfix templates
+  mostly degenerate to literal text for concrete syntax, such as
+  ``@{verbatim "++"}'' for an infix symbol.  *}
+
+
+subsection {* The general mixfix form *}
+
+text {* In full generality, mixfix declarations work as follows.
+  Suppose a constant @{text "c :: \<tau>\<^sub>1 \<Rightarrow> \<dots> \<tau>\<^sub>n \<Rightarrow> \<tau>"} is annotated by
+  @{text "(mixfix [p\<^sub>1, \<dots>, p\<^sub>n] p)"}, where @{text "mixfix"} is a string
+  @{text "d\<^sub>0 _ d\<^sub>1 _ \<dots> _ d\<^sub>n"} consisting of delimiters that surround
+  argument positions as indicated by underscores.
+
+  Altogether this determines a production for a context-free priority
+  grammar, where for each argument @{text "i"} the syntactic category
+  is determined by @{text "\<tau>\<^sub>i"} (with priority @{text "p\<^sub>i"}), and the
+  result category is determined from @{text "\<tau>"} (with priority @{text
+  "p"}).  Priority specifications are optional, with default 0 for
+  arguments and 1000 for the result.\footnote{Omitting priorities is
+  prone to syntactic ambiguities unless the delimiter tokens determine
+  fully bracketed notation, as in @{text "if _ then _ else _ fi"}.}
+
+  Since @{text "\<tau>"} may be again a function type, the constant
+  type scheme may have more argument positions than the mixfix
+  pattern.  Printing a nested application @{text "c t\<^sub>1 \<dots> t\<^sub>m"} for
+  @{text "m > n"} works by attaching concrete notation only to the
+  innermost part, essentially by printing @{text "(c t\<^sub>1 \<dots> t\<^sub>n) \<dots> t\<^sub>m"}
+  instead.  If a term has fewer arguments than specified in the mixfix
+  template, the concrete syntax is ignored.
+
+  \medskip A mixfix template may also contain additional directives
+  for pretty printing, notably spaces, blocks, and breaks.  The
+  general template format is a sequence over any of the following
+  entities.
+
+  \begin{description}
+
+  \item @{text "d"} is a delimiter, namely a non-empty sequence of
+  characters other than the following special characters:
+
+  \smallskip
+  \begin{tabular}{ll}
+    @{verbatim "'"} & single quote \\
+    @{verbatim "_"} & underscore \\
+    @{text "\<index>"} & index symbol \\
+    @{verbatim "("} & open parenthesis \\
+    @{verbatim ")"} & close parenthesis \\
+    @{verbatim "/"} & slash \\
+  \end{tabular}
+  \medskip
+
+  \item @{verbatim "'"} escapes the special meaning of these
+  meta-characters, producing a literal version of the following
+  character, unless that is a blank.
+
+  A single quote followed by a blank separates delimiters, without
+  affecting printing, but input tokens may have additional white space
+  here.
+
+  \item @{verbatim "_"} is an argument position, which stands for a
+  certain syntactic category in the underlying grammar.
+
+  \item @{text "\<index>"} is an indexed argument position; this is the place
+  where implicit structure arguments can be attached.
+
+  \item @{text "s"} is a non-empty sequence of spaces for printing.
+  This and the following specifications do not affect parsing at all.
+
+  \item @{verbatim "("}@{text n} opens a pretty printing block.  The
+  optional number specifies how much indentation to add when a line
+  break occurs within the block.  If the parenthesis is not followed
+  by digits, the indentation defaults to 0.  A block specified via
+  @{verbatim "(00"} is unbreakable.
+
+  \item @{verbatim ")"} closes a pretty printing block.
+
+  \item @{verbatim "//"} forces a line break.
+
+  \item @{verbatim "/"}@{text s} allows a line break.  Here @{text s}
+  stands for the string of spaces (zero or more) right after the
+  slash.  These spaces are printed if the break is \emph{not} taken.
+
+  \end{description}
+
+  The general idea of pretty printing with blocks and breaks is also
+  described in \cite{paulson-ml2}; it goes back to \cite{Oppen:1980}.
+*}
+
+
+subsection {* Infixes *}
+
+text {* Infix operators are specified by convenient short forms that
+  abbreviate general mixfix annotations as follows:
+
+  \begin{center}
+  \begin{tabular}{lll}
+
+  @{verbatim "("}@{keyword_def "infix"}~@{verbatim "\""}@{text sy}@{verbatim "\""} @{text "p"}@{verbatim ")"}
+  & @{text "\<mapsto>"} &
+  @{verbatim "(\"(_ "}@{text sy}@{verbatim "/ _)\" ["}@{text "p + 1"}@{verbatim ", "}@{text "p + 1"}@{verbatim "]"}@{text " p"}@{verbatim ")"} \\
+  @{verbatim "("}@{keyword_def "infixl"}~@{verbatim "\""}@{text sy}@{verbatim "\""} @{text "p"}@{verbatim ")"}
+  & @{text "\<mapsto>"} &
+  @{verbatim "(\"(_ "}@{text sy}@{verbatim "/ _)\" ["}@{text "p"}@{verbatim ", "}@{text "p + 1"}@{verbatim "]"}@{text " p"}@{verbatim ")"} \\
+  @{verbatim "("}@{keyword_def "infixr"}~@{verbatim "\""}@{text sy}@{verbatim "\""} @{text "p"}@{verbatim ")"}
+  & @{text "\<mapsto>"} &
+  @{verbatim "(\"(_ "}@{text sy}@{verbatim "/ _)\" ["}@{text "p + 1"}@{verbatim ", "}@{text "p"}@{verbatim "]"}@{text " p"}@{verbatim ")"} \\
+
+  \end{tabular}
+  \end{center}
+
+  The mixfix template @{verbatim "\"(_ "}@{text sy}@{verbatim "/ _)\""}
+  specifies two argument positions; the delimiter is preceded by a
+  space and followed by a space or line break; the entire phrase is a
+  pretty printing block.
+
+  The alternative notation @{verbatim "op"}~@{text sy} is introduced
+  in addition.  Thus any infix operator may be written in prefix form
+  (as in ML), independently of the number of arguments in the term.
+*}
+
+
+subsection {* Binders *}
+
+text {* A \emph{binder} is a variable-binding construct such as a
+  quantifier.  The idea to formalize @{text "\<forall>x. b"} as @{text "All
+  (\<lambda>x. b)"} for @{text "All :: ('a \<Rightarrow> bool) \<Rightarrow> bool"} already goes back
+  to \cite{church40}.  Isabelle declarations of certain higher-order
+  operators may be annotated with @{keyword_def "binder"} annotations
+  as follows:
+
+  \begin{center}
+  @{text "c :: "}@{verbatim "\""}@{text "(\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2) \<Rightarrow> \<tau>\<^sub>3"}@{verbatim "\"  ("}@{keyword "binder"}@{verbatim " \""}@{text "sy"}@{verbatim "\" ["}@{text "p"}@{verbatim "] "}@{text "q"}@{verbatim ")"}
+  \end{center}
+
+  This introduces concrete binder syntax @{text "sy x. b"}, where
+  @{text x} is a bound variable of type @{text "\<tau>\<^sub>1"}, the body @{text
+  b} has type @{text "\<tau>\<^sub>2"} and the whole term has type @{text "\<tau>\<^sub>3"}.
+  The optional integer @{text p} specifies the syntactic priority of
+  the body; the default is @{text "q"}, which is also the priority of
+  the whole construct.
+
+  Internally, the binder syntax is expanded to something like this:
+  \begin{center}
+  @{text "c_binder :: "}@{verbatim "\""}@{text "idts \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> \<tau>\<^sub>3"}@{verbatim "\"  (\"(3"}@{text sy}@{verbatim "_./ _)\" [0, "}@{text "p"}@{verbatim "] "}@{text "q"}@{verbatim ")"}
+  \end{center}
+
+  Here @{syntax (inner) idts} is the nonterminal symbol for a list of
+  identifiers with optional type constraints (see also
+  \secref{sec:pure-grammar}).  The mixfix template @{verbatim
+  "\"(3"}@{text sy}@{verbatim "_./ _)\""} defines argument positions
+  for the bound identifiers and the body, separated by a dot with
+  optional line break; the entire phrase is a pretty printing block of
+  indentation level 3.  Note that there is no extra space after @{text
+  "sy"}, so it needs to be included user specification if the binder
+  syntax ends with a token that may be continued by an identifier
+  token at the start of @{syntax (inner) idts}.
+
+  Furthermore, a syntax translation to transforms @{text "c_binder x\<^sub>1
+  \<dots> x\<^sub>n b"} into iterated application @{text "c (\<lambda>x\<^sub>1. \<dots> c (\<lambda>x\<^sub>n. b)\<dots>)"}.
+  This works in both directions, for parsing and printing.  *}
+
+
+section {* Explicit notation \label{sec:notation} *}
+
+text {*
+  \begin{matharray}{rcll}
+    @{command_def "type_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "no_type_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "no_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "write"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+  \end{matharray}
+
+  Commands that introduce new logical entities (terms or types)
+  usually allow to provide mixfix annotations on the spot, which is
+  convenient for default notation.  Nonetheless, the syntax may be
+  modified later on by declarations for explicit notation.  This
+  allows to add or delete mixfix annotations for of existing logical
+  entities within the current context.
+
+  @{rail \<open>
+    (@@{command type_notation} | @@{command no_type_notation}) @{syntax target}?
+      @{syntax mode}? \<newline> (@{syntax nameref} @{syntax mixfix} + @'and')
+    ;
+    (@@{command notation} | @@{command no_notation}) @{syntax target}? @{syntax mode}? \<newline>
+      (@{syntax nameref} @{syntax mixfix} + @'and')
+    ;
+    @@{command write} @{syntax mode}? (@{syntax nameref} @{syntax mixfix} + @'and')
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "type_notation"}~@{text "c (mx)"} associates mixfix
+  syntax with an existing type constructor.  The arity of the
+  constructor is retrieved from the context.
+
+  \item @{command "no_type_notation"} is similar to @{command
+  "type_notation"}, but removes the specified syntax annotation from
+  the present context.
+
+  \item @{command "notation"}~@{text "c (mx)"} associates mixfix
+  syntax with an existing constant or fixed variable.  The type
+  declaration of the given entity is retrieved from the context.
+
+  \item @{command "no_notation"} is similar to @{command "notation"},
+  but removes the specified syntax annotation from the present
+  context.
+
+  \item @{command "write"} is similar to @{command "notation"}, but
+  works within an Isar proof body.
+
+  \end{description}
+*}
+
+
+section {* The Pure syntax \label{sec:pure-syntax} *}
+
+subsection {* Lexical matters \label{sec:inner-lex} *}
+
+text {* The inner lexical syntax vaguely resembles the outer one
+  (\secref{sec:outer-lex}), but some details are different.  There are
+  two main categories of inner syntax tokens:
+
+  \begin{enumerate}
+
+  \item \emph{delimiters} --- the literal tokens occurring in
+  productions of the given priority grammar (cf.\
+  \secref{sec:priority-grammar});
+
+  \item \emph{named tokens} --- various categories of identifiers etc.
+
+  \end{enumerate}
+
+  Delimiters override named tokens and may thus render certain
+  identifiers inaccessible.  Sometimes the logical context admits
+  alternative ways to refer to the same entity, potentially via
+  qualified names.
+
+  \medskip The categories for named tokens are defined once and for
+  all as follows, reusing some categories of the outer token syntax
+  (\secref{sec:outer-lex}).
+
+  \begin{center}
+  \begin{supertabular}{rcl}
+    @{syntax_def (inner) id} & = & @{syntax_ref ident} \\
+    @{syntax_def (inner) longid} & = & @{syntax_ref longident} \\
+    @{syntax_def (inner) var} & = & @{syntax_ref var} \\
+    @{syntax_def (inner) tid} & = & @{syntax_ref typefree} \\
+    @{syntax_def (inner) tvar} & = & @{syntax_ref typevar} \\
+    @{syntax_def (inner) num_token} & = & @{syntax_ref nat}@{text "  |  "}@{verbatim "-"}@{syntax_ref nat} \\
+    @{syntax_def (inner) float_token} & = & @{syntax_ref nat}@{verbatim "."}@{syntax_ref nat}@{text "  |  "}@{verbatim "-"}@{syntax_ref nat}@{verbatim "."}@{syntax_ref nat} \\
+    @{syntax_def (inner) xnum_token} & = & @{verbatim "#"}@{syntax_ref nat}@{text "  |  "}@{verbatim "#-"}@{syntax_ref nat} \\
+    @{syntax_def (inner) str_token} & = & @{verbatim "''"} @{text "\<dots>"} @{verbatim "''"} \\
+    @{syntax_def (inner) string_token} & = & @{verbatim "\""} @{text "\<dots>"} @{verbatim "\""} \\
+    @{syntax_def (inner) cartouche} & = & @{verbatim "\<open>"} @{text "\<dots>"} @{verbatim "\<close>"} \\
+  \end{supertabular}
+  \end{center}
+
+  The token categories @{syntax (inner) num_token}, @{syntax (inner)
+  float_token}, @{syntax (inner) xnum_token}, @{syntax (inner)
+  str_token}, @{syntax (inner) string_token}, and @{syntax (inner)
+  cartouche} are not used in Pure. Object-logics may implement
+  numerals and string literals by adding appropriate syntax
+  declarations, together with some translation functions (e.g.\ see
+  @{file "~~/src/HOL/Tools/string_syntax.ML"}).
+
+  The derived categories @{syntax_def (inner) num_const}, @{syntax_def
+  (inner) float_const}, and @{syntax_def (inner) num_const} provide
+  robust access to the respective tokens: the syntax tree holds a
+  syntactic constant instead of a free variable.
+*}
+
+
+subsection {* Priority grammars \label{sec:priority-grammar} *}
+
+text {* A context-free grammar consists of a set of \emph{terminal
+  symbols}, a set of \emph{nonterminal symbols} and a set of
+  \emph{productions}.  Productions have the form @{text "A = \<gamma>"},
+  where @{text A} is a nonterminal and @{text \<gamma>} is a string of
+  terminals and nonterminals.  One designated nonterminal is called
+  the \emph{root symbol}.  The language defined by the grammar
+  consists of all strings of terminals that can be derived from the
+  root symbol by applying productions as rewrite rules.
+
+  The standard Isabelle parser for inner syntax uses a \emph{priority
+  grammar}.  Each nonterminal is decorated by an integer priority:
+  @{text "A\<^sup>(\<^sup>p\<^sup>)"}.  In a derivation, @{text "A\<^sup>(\<^sup>p\<^sup>)"} may be rewritten
+  using a production @{text "A\<^sup>(\<^sup>q\<^sup>) = \<gamma>"} only if @{text "p \<le> q"}.  Any
+  priority grammar can be translated into a normal context-free
+  grammar by introducing new nonterminals and productions.
+
+  \medskip Formally, a set of context free productions @{text G}
+  induces a derivation relation @{text "\<longrightarrow>\<^sub>G"} as follows.  Let @{text
+  \<alpha>} and @{text \<beta>} denote strings of terminal or nonterminal symbols.
+  Then @{text "\<alpha> A\<^sup>(\<^sup>p\<^sup>) \<beta> \<longrightarrow>\<^sub>G \<alpha> \<gamma> \<beta>"} holds if and only if @{text G}
+  contains some production @{text "A\<^sup>(\<^sup>q\<^sup>) = \<gamma>"} for @{text "p \<le> q"}.
+
+  \medskip The following grammar for arithmetic expressions
+  demonstrates how binding power and associativity of operators can be
+  enforced by priorities.
+
+  \begin{center}
+  \begin{tabular}{rclr}
+  @{text "A\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "="} & @{verbatim "("} @{text "A\<^sup>(\<^sup>0\<^sup>)"} @{verbatim ")"} \\
+  @{text "A\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "="} & @{verbatim 0} \\
+  @{text "A\<^sup>(\<^sup>0\<^sup>)"} & @{text "="} & @{text "A\<^sup>(\<^sup>0\<^sup>)"} @{verbatim "+"} @{text "A\<^sup>(\<^sup>1\<^sup>)"} \\
+  @{text "A\<^sup>(\<^sup>2\<^sup>)"} & @{text "="} & @{text "A\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "*"} @{text "A\<^sup>(\<^sup>2\<^sup>)"} \\
+  @{text "A\<^sup>(\<^sup>3\<^sup>)"} & @{text "="} & @{verbatim "-"} @{text "A\<^sup>(\<^sup>3\<^sup>)"} \\
+  \end{tabular}
+  \end{center}
+  The choice of priorities determines that @{verbatim "-"} binds
+  tighter than @{verbatim "*"}, which binds tighter than @{verbatim
+  "+"}.  Furthermore @{verbatim "+"} associates to the left and
+  @{verbatim "*"} to the right.
+
+  \medskip For clarity, grammars obey these conventions:
+  \begin{itemize}
+
+  \item All priorities must lie between 0 and 1000.
+
+  \item Priority 0 on the right-hand side and priority 1000 on the
+  left-hand side may be omitted.
+
+  \item The production @{text "A\<^sup>(\<^sup>p\<^sup>) = \<alpha>"} is written as @{text "A = \<alpha>
+  (p)"}, i.e.\ the priority of the left-hand side actually appears in
+  a column on the far right.
+
+  \item Alternatives are separated by @{text "|"}.
+
+  \item Repetition is indicated by dots @{text "(\<dots>)"} in an informal
+  but obvious way.
+
+  \end{itemize}
+
+  Using these conventions, the example grammar specification above
+  takes the form:
+  \begin{center}
+  \begin{tabular}{rclc}
+    @{text A} & @{text "="} & @{verbatim "("} @{text A} @{verbatim ")"} \\
+              & @{text "|"} & @{verbatim 0} & \qquad\qquad \\
+              & @{text "|"} & @{text A} @{verbatim "+"} @{text "A\<^sup>(\<^sup>1\<^sup>)"} & @{text "(0)"} \\
+              & @{text "|"} & @{text "A\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "*"} @{text "A\<^sup>(\<^sup>2\<^sup>)"} & @{text "(2)"} \\
+              & @{text "|"} & @{verbatim "-"} @{text "A\<^sup>(\<^sup>3\<^sup>)"} & @{text "(3)"} \\
+  \end{tabular}
+  \end{center}
+*}
+
+
+subsection {* The Pure grammar \label{sec:pure-grammar} *}
+
+text {* The priority grammar of the @{text "Pure"} theory is defined
+  approximately like this:
+
+  \begin{center}
+  \begin{supertabular}{rclr}
+
+  @{syntax_def (inner) any} & = & @{text "prop  |  logic"} \\\\
+
+  @{syntax_def (inner) prop} & = & @{verbatim "("} @{text prop} @{verbatim ")"} \\
+    & @{text "|"} & @{text "prop\<^sup>(\<^sup>4\<^sup>)"} @{verbatim "::"} @{text type} & @{text "(3)"} \\
+    & @{text "|"} & @{text "any\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "=="} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(2)"} \\
+    & @{text "|"} & @{text "any\<^sup>(\<^sup>3\<^sup>)"} @{text "\<equiv>"} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(2)"} \\
+    & @{text "|"} & @{text "prop\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "&&&"} @{text "prop\<^sup>(\<^sup>2\<^sup>)"} & @{text "(2)"} \\
+    & @{text "|"} & @{text "prop\<^sup>(\<^sup>2\<^sup>)"} @{verbatim "==>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
+    & @{text "|"} & @{text "prop\<^sup>(\<^sup>2\<^sup>)"} @{text "\<Longrightarrow>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
+    & @{text "|"} & @{verbatim "[|"} @{text prop} @{verbatim ";"} @{text "\<dots>"} @{verbatim ";"} @{text prop} @{verbatim "|]"} @{verbatim "==>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
+    & @{text "|"} & @{text "\<lbrakk>"} @{text prop} @{verbatim ";"} @{text "\<dots>"} @{verbatim ";"} @{text prop} @{text "\<rbrakk>"} @{text "\<Longrightarrow>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
+    & @{text "|"} & @{verbatim "!!"} @{text idts} @{verbatim "."} @{text prop} & @{text "(0)"} \\
+    & @{text "|"} & @{text "\<And>"} @{text idts} @{verbatim "."} @{text prop} & @{text "(0)"} \\
+    & @{text "|"} & @{verbatim OFCLASS} @{verbatim "("} @{text type} @{verbatim ","} @{text logic} @{verbatim ")"} \\
+    & @{text "|"} & @{verbatim SORT_CONSTRAINT} @{verbatim "("} @{text type} @{verbatim ")"} \\
+    & @{text "|"} & @{verbatim TERM} @{text logic} \\
+    & @{text "|"} & @{verbatim PROP} @{text aprop} \\\\
+
+  @{syntax_def (inner) aprop} & = & @{verbatim "("} @{text aprop} @{verbatim ")"} \\
+    & @{text "|"} & @{text "id  |  longid  |  var  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "..."} \\
+    & @{text "|"} & @{verbatim CONST} @{text "id  |  "}@{verbatim CONST} @{text "longid"} \\
+    & @{text "|"} & @{verbatim XCONST} @{text "id  |  "}@{verbatim XCONST} @{text "longid"} \\
+    & @{text "|"} & @{text "logic\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)  any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) \<dots> any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "(999)"} \\\\
+
+  @{syntax_def (inner) logic} & = & @{verbatim "("} @{text logic} @{verbatim ")"} \\
+    & @{text "|"} & @{text "logic\<^sup>(\<^sup>4\<^sup>)"} @{verbatim "::"} @{text type} & @{text "(3)"} \\
+    & @{text "|"} & @{text "id  |  longid  |  var  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "..."} \\
+    & @{text "|"} & @{verbatim CONST} @{text "id  |  "}@{verbatim CONST} @{text "longid"} \\
+    & @{text "|"} & @{verbatim XCONST} @{text "id  |  "}@{verbatim XCONST} @{text "longid"} \\
+    & @{text "|"} & @{text "logic\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)  any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) \<dots> any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "(999)"} \\
+    & @{text "|"} & @{text "\<struct> index\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} \\
+    & @{text "|"} & @{verbatim "%"} @{text pttrns} @{verbatim "."} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(3)"} \\
+    & @{text "|"} & @{text \<lambda>} @{text pttrns} @{verbatim "."} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(3)"} \\
+    & @{text "|"} & @{verbatim op} @{verbatim "=="}@{text "  |  "}@{verbatim op} @{text "\<equiv>"}@{text "  |  "}@{verbatim op} @{verbatim "&&&"} \\
+    & @{text "|"} & @{verbatim op} @{verbatim "==>"}@{text "  |  "}@{verbatim op} @{text "\<Longrightarrow>"} \\
+    & @{text "|"} & @{verbatim TYPE} @{verbatim "("} @{text type} @{verbatim ")"} \\\\
+
+  @{syntax_def (inner) idt} & = & @{verbatim "("} @{text idt} @{verbatim ")"}@{text "  |  id  |  "}@{verbatim "_"} \\
+    & @{text "|"} & @{text id} @{verbatim "::"} @{text type} & @{text "(0)"} \\
+    & @{text "|"} & @{verbatim "_"} @{verbatim "::"} @{text type} & @{text "(0)"} \\\\
+
+  @{syntax_def (inner) index} & = & @{verbatim "\<^bsub>"} @{text "logic\<^sup>(\<^sup>0\<^sup>)"} @{verbatim "\<^esub>"}@{text "  |  |  \<index>"} \\\\
+
+  @{syntax_def (inner) idts} & = & @{text "idt  |  idt\<^sup>(\<^sup>1\<^sup>) idts"} & @{text "(0)"} \\\\
+
+  @{syntax_def (inner) pttrn} & = & @{text idt} \\\\
+
+  @{syntax_def (inner) pttrns} & = & @{text "pttrn  |  pttrn\<^sup>(\<^sup>1\<^sup>) pttrns"} & @{text "(0)"} \\\\
+
+  @{syntax_def (inner) type} & = & @{verbatim "("} @{text type} @{verbatim ")"} \\
+    & @{text "|"} & @{text "tid  |  tvar  |  "}@{verbatim "_"} \\
+    & @{text "|"} & @{text "tid"} @{verbatim "::"} @{text "sort  |  tvar  "}@{verbatim "::"} @{text "sort  |  "}@{verbatim "_"} @{verbatim "::"} @{text "sort"} \\
+    & @{text "|"} & @{text "type_name  |  type\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) type_name"} \\
+    & @{text "|"} & @{verbatim "("} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim ")"} @{text type_name} \\
+    & @{text "|"} & @{text "type\<^sup>(\<^sup>1\<^sup>)"} @{verbatim "=>"} @{text type} & @{text "(0)"} \\
+    & @{text "|"} & @{text "type\<^sup>(\<^sup>1\<^sup>)"} @{text "\<Rightarrow>"} @{text type} & @{text "(0)"} \\
+    & @{text "|"} & @{verbatim "["} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim "]"} @{verbatim "=>"} @{text type} & @{text "(0)"} \\
+    & @{text "|"} & @{verbatim "["} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim "]"} @{text "\<Rightarrow>"} @{text type} & @{text "(0)"} \\
+  @{syntax_def (inner) type_name} & = & @{text "id  |  longid"} \\\\
+
+  @{syntax_def (inner) sort} & = & @{syntax class_name}~@{text "  |  "}@{verbatim "{}"} \\
+    & @{text "|"} & @{verbatim "{"} @{syntax class_name} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{syntax class_name} @{verbatim "}"} \\
+  @{syntax_def (inner) class_name} & = & @{text "id  |  longid"} \\
+  \end{supertabular}
+  \end{center}
+
+  \medskip Here literal terminals are printed @{verbatim "verbatim"};
+  see also \secref{sec:inner-lex} for further token categories of the
+  inner syntax.  The meaning of the nonterminals defined by the above
+  grammar is as follows:
+
+  \begin{description}
+
+  \item @{syntax_ref (inner) any} denotes any term.
+
+  \item @{syntax_ref (inner) prop} denotes meta-level propositions,
+  which are terms of type @{typ prop}.  The syntax of such formulae of
+  the meta-logic is carefully distinguished from usual conventions for
+  object-logics.  In particular, plain @{text "\<lambda>"}-term notation is
+  \emph{not} recognized as @{syntax (inner) prop}.
+
+  \item @{syntax_ref (inner) aprop} denotes atomic propositions, which
+  are embedded into regular @{syntax (inner) prop} by means of an
+  explicit @{verbatim PROP} token.
+
+  Terms of type @{typ prop} with non-constant head, e.g.\ a plain
+  variable, are printed in this form.  Constants that yield type @{typ
+  prop} are expected to provide their own concrete syntax; otherwise
+  the printed version will appear like @{syntax (inner) logic} and
+  cannot be parsed again as @{syntax (inner) prop}.
+
+  \item @{syntax_ref (inner) logic} denotes arbitrary terms of a
+  logical type, excluding type @{typ prop}.  This is the main
+  syntactic category of object-logic entities, covering plain @{text
+  \<lambda>}-term notation (variables, abstraction, application), plus
+  anything defined by the user.
+
+  When specifying notation for logical entities, all logical types
+  (excluding @{typ prop}) are \emph{collapsed} to this single category
+  of @{syntax (inner) logic}.
+
+  \item @{syntax_ref (inner) index} denotes an optional index term for
+  indexed syntax.  If omitted, it refers to the first @{keyword_ref
+  "structure"} variable in the context.  The special dummy ``@{text
+  "\<index>"}'' serves as pattern variable in mixfix annotations that
+  introduce indexed notation.
+
+  \item @{syntax_ref (inner) idt} denotes identifiers, possibly
+  constrained by types.
+
+  \item @{syntax_ref (inner) idts} denotes a sequence of @{syntax_ref
+  (inner) idt}.  This is the most basic category for variables in
+  iterated binders, such as @{text "\<lambda>"} or @{text "\<And>"}.
+
+  \item @{syntax_ref (inner) pttrn} and @{syntax_ref (inner) pttrns}
+  denote patterns for abstraction, cases bindings etc.  In Pure, these
+  categories start as a merely copy of @{syntax (inner) idt} and
+  @{syntax (inner) idts}, respectively.  Object-logics may add
+  additional productions for binding forms.
+
+  \item @{syntax_ref (inner) type} denotes types of the meta-logic.
+
+  \item @{syntax_ref (inner) sort} denotes meta-level sorts.
+
+  \end{description}
+
+  Here are some further explanations of certain syntax features.
+
+  \begin{itemize}
+
+  \item In @{syntax (inner) idts}, note that @{text "x :: nat y"} is
+  parsed as @{text "x :: (nat y)"}, treating @{text y} like a type
+  constructor applied to @{text nat}.  To avoid this interpretation,
+  write @{text "(x :: nat) y"} with explicit parentheses.
+
+  \item Similarly, @{text "x :: nat y :: nat"} is parsed as @{text "x ::
+  (nat y :: nat)"}.  The correct form is @{text "(x :: nat) (y ::
+  nat)"}, or @{text "(x :: nat) y :: nat"} if @{text y} is last in the
+  sequence of identifiers.
+
+  \item Type constraints for terms bind very weakly.  For example,
+  @{text "x < y :: nat"} is normally parsed as @{text "(x < y) ::
+  nat"}, unless @{text "<"} has a very low priority, in which case the
+  input is likely to be ambiguous.  The correct form is @{text "x < (y
+  :: nat)"}.
+
+  \item Constraints may be either written with two literal colons
+  ``@{verbatim "::"}'' or the double-colon symbol @{verbatim "\<Colon>"},
+  which actually looks exactly the same in some {\LaTeX} styles.
+
+  \item Dummy variables (written as underscore) may occur in different
+  roles.
+
+  \begin{description}
+
+  \item A type ``@{text "_"}'' or ``@{text "_ :: sort"}'' acts like an
+  anonymous inference parameter, which is filled-in according to the
+  most general type produced by the type-checking phase.
+
+  \item A bound ``@{text "_"}'' refers to a vacuous abstraction, where
+  the body does not refer to the binding introduced here.  As in the
+  term @{term "\<lambda>x _. x"}, which is @{text "\<alpha>"}-equivalent to @{text
+  "\<lambda>x y. x"}.
+
+  \item A free ``@{text "_"}'' refers to an implicit outer binding.
+  Higher definitional packages usually allow forms like @{text "f x _
+  = x"}.
+
+  \item A schematic ``@{text "_"}'' (within a term pattern, see
+  \secref{sec:term-decls}) refers to an anonymous variable that is
+  implicitly abstracted over its context of locally bound variables.
+  For example, this allows pattern matching of @{text "{x. f x = g
+  x}"} against @{text "{x. _ = _}"}, or even @{text "{_. _ = _}"} by
+  using both bound and schematic dummies.
+
+  \end{description}
+
+  \item The three literal dots ``@{verbatim "..."}'' may be also
+  written as ellipsis symbol @{verbatim "\<dots>"}.  In both cases this
+  refers to a special schematic variable, which is bound in the
+  context.  This special term abbreviation works nicely with
+  calculational reasoning (\secref{sec:calculation}).
+
+  \item @{verbatim CONST} ensures that the given identifier is treated
+  as constant term, and passed through the parse tree in fully
+  internalized form.  This is particularly relevant for translation
+  rules (\secref{sec:syn-trans}), notably on the RHS.
+
+  \item @{verbatim XCONST} is similar to @{verbatim CONST}, but
+  retains the constant name as given.  This is only relevant to
+  translation rules (\secref{sec:syn-trans}), notably on the LHS.
+
+  \end{itemize}
+*}
+
+
+subsection {* Inspecting the syntax *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "print_syntax"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+  \end{matharray}
+
+  \begin{description}
+
+  \item @{command "print_syntax"} prints the inner syntax of the
+  current context.  The output can be quite large; the most important
+  sections are explained below.
+
+  \begin{description}
+
+  \item @{text "lexicon"} lists the delimiters of the inner token
+  language; see \secref{sec:inner-lex}.
+
+  \item @{text "prods"} lists the productions of the underlying
+  priority grammar; see \secref{sec:priority-grammar}.
+
+  The nonterminal @{text "A\<^sup>(\<^sup>p\<^sup>)"} is rendered in plain text as @{text
+  "A[p]"}; delimiters are quoted.  Many productions have an extra
+  @{text "\<dots> => name"}.  These names later become the heads of parse
+  trees; they also guide the pretty printer.
+
+  Productions without such parse tree names are called \emph{copy
+  productions}.  Their right-hand side must have exactly one
+  nonterminal symbol (or named token).  The parser does not create a
+  new parse tree node for copy productions, but simply returns the
+  parse tree of the right-hand symbol.
+
+  If the right-hand side of a copy production consists of a single
+  nonterminal without any delimiters, then it is called a \emph{chain
+  production}.  Chain productions act as abbreviations: conceptually,
+  they are removed from the grammar by adding new productions.
+  Priority information attached to chain productions is ignored; only
+  the dummy value @{text "-1"} is displayed.
+
+  \item @{text "print modes"} lists the alternative print modes
+  provided by this grammar; see \secref{sec:print-modes}.
+
+  \item @{text "parse_rules"} and @{text "print_rules"} relate to
+  syntax translations (macros); see \secref{sec:syn-trans}.
+
+  \item @{text "parse_ast_translation"} and @{text
+  "print_ast_translation"} list sets of constants that invoke
+  translation functions for abstract syntax trees, which are only
+  required in very special situations; see \secref{sec:tr-funs}.
+
+  \item @{text "parse_translation"} and @{text "print_translation"}
+  list the sets of constants that invoke regular translation
+  functions; see \secref{sec:tr-funs}.
+
+  \end{description}
+
+  \end{description}
+*}
+
+
+subsection {* Ambiguity of parsed expressions *}
+
+text {*
+  \begin{tabular}{rcll}
+    @{attribute_def syntax_ambiguity_warning} & : & @{text attribute} & default @{text true} \\
+    @{attribute_def syntax_ambiguity_limit} & : & @{text attribute} & default @{text 10} \\
+  \end{tabular}
+
+  Depending on the grammar and the given input, parsing may be
+  ambiguous.  Isabelle lets the Earley parser enumerate all possible
+  parse trees, and then tries to make the best out of the situation.
+  Terms that cannot be type-checked are filtered out, which often
+  leads to a unique result in the end.  Unlike regular type
+  reconstruction, which is applied to the whole collection of input
+  terms simultaneously, the filtering stage only treats each given
+  term in isolation.  Filtering is also not attempted for individual
+  types or raw ASTs (as required for @{command translations}).
+
+  Certain warning or error messages are printed, depending on the
+  situation and the given configuration options.  Parsing ultimately
+  fails, if multiple results remain after the filtering phase.
+
+  \begin{description}
+
+  \item @{attribute syntax_ambiguity_warning} controls output of
+  explicit warning messages about syntax ambiguity.
+
+  \item @{attribute syntax_ambiguity_limit} determines the number of
+  resulting parse trees that are shown as part of the printed message
+  in case of an ambiguity.
+
+  \end{description}
+*}
+
+
+section {* Syntax transformations \label{sec:syntax-transformations} *}
+
+text {* The inner syntax engine of Isabelle provides separate
+  mechanisms to transform parse trees either via rewrite systems on
+  first-order ASTs (\secref{sec:syn-trans}), or ML functions on ASTs
+  or syntactic @{text "\<lambda>"}-terms (\secref{sec:tr-funs}).  This works
+  both for parsing and printing, as outlined in
+  \figref{fig:parse-print}.
+
+  \begin{figure}[htbp]
+  \begin{center}
+  \begin{tabular}{cl}
+  string          & \\
+  @{text "\<down>"}     & lexer + parser \\
+  parse tree      & \\
+  @{text "\<down>"}     & parse AST translation \\
+  AST             & \\
+  @{text "\<down>"}     & AST rewriting (macros) \\
+  AST             & \\
+  @{text "\<down>"}     & parse translation \\
+  --- pre-term ---    & \\
+  @{text "\<down>"}     & print translation \\
+  AST             & \\
+  @{text "\<down>"}     & AST rewriting (macros) \\
+  AST             & \\
+  @{text "\<down>"}     & print AST translation \\
+  string          &
+  \end{tabular}
+  \end{center}
+  \caption{Parsing and printing with translations}\label{fig:parse-print}
+  \end{figure}
+
+  These intermediate syntax tree formats eventually lead to a pre-term
+  with all names and binding scopes resolved, but most type
+  information still missing.  Explicit type constraints might be given by
+  the user, or implicit position information by the system --- both
+  need to be passed-through carefully by syntax transformations.
+
+  Pre-terms are further processed by the so-called \emph{check} and
+  \emph{unckeck} phases that are intertwined with type-inference (see
+  also \cite{isabelle-implementation}).  The latter allows to operate
+  on higher-order abstract syntax with proper binding and type
+  information already available.
+
+  As a rule of thumb, anything that manipulates bindings of variables
+  or constants needs to be implemented as syntax transformation (see
+  below).  Anything else is better done via check/uncheck: a prominent
+  example application is the @{command abbreviation} concept of
+  Isabelle/Pure. *}
+
+
+subsection {* Abstract syntax trees \label{sec:ast} *}
+
+text {* The ML datatype @{ML_type Ast.ast} explicitly represents the
+  intermediate AST format that is used for syntax rewriting
+  (\secref{sec:syn-trans}).  It is defined in ML as follows:
+  \begin{ttbox}
+  datatype ast =
+    Constant of string |
+    Variable of string |
+    Appl of ast list
+  \end{ttbox}
+
+  An AST is either an atom (constant or variable) or a list of (at
+  least two) subtrees.  Occasional diagnostic output of ASTs uses
+  notation that resembles S-expression of LISP.  Constant atoms are
+  shown as quoted strings, variable atoms as non-quoted strings and
+  applications as a parenthesized list of subtrees.  For example, the
+  AST
+  @{ML [display] "Ast.Appl
+  [Ast.Constant \"_abs\", Ast.Variable \"x\", Ast.Variable \"t\"]"}
+  is pretty-printed as @{verbatim "(\"_abs\" x t)"}.  Note that
+  @{verbatim "()"} and @{verbatim "(x)"} are excluded as ASTs, because
+  they have too few subtrees.
+
+  \medskip AST application is merely a pro-forma mechanism to indicate
+  certain syntactic structures.  Thus @{verbatim "(c a b)"} could mean
+  either term application or type application, depending on the
+  syntactic context.
+
+  Nested application like @{verbatim "((\"_abs\" x t) u)"} is also
+  possible, but ASTs are definitely first-order: the syntax constant
+  @{verbatim "\"_abs\""} does not bind the @{verbatim x} in any way.
+  Proper bindings are introduced in later stages of the term syntax,
+  where @{verbatim "(\"_abs\" x t)"} becomes an @{ML Abs} node and
+  occurrences of @{verbatim x} in @{verbatim t} are replaced by bound
+  variables (represented as de-Bruijn indices).
+*}
+
+
+subsubsection {* AST constants versus variables *}
+
+text {* Depending on the situation --- input syntax, output syntax,
+  translation patterns --- the distinction of atomic asts as @{ML
+  Ast.Constant} versus @{ML Ast.Variable} serves slightly different
+  purposes.
+
+  Input syntax of a term such as @{text "f a b = c"} does not yet
+  indicate the scopes of atomic entities @{text "f, a, b, c"}: they
+  could be global constants or local variables, even bound ones
+  depending on the context of the term.  @{ML Ast.Variable} leaves
+  this choice still open: later syntax layers (or translation
+  functions) may capture such a variable to determine its role
+  specifically, to make it a constant, bound variable, free variable
+  etc.  In contrast, syntax translations that introduce already known
+  constants would rather do it via @{ML Ast.Constant} to prevent
+  accidental re-interpretation later on.
+
+  Output syntax turns term constants into @{ML Ast.Constant} and
+  variables (free or schematic) into @{ML Ast.Variable}.  This
+  information is precise when printing fully formal @{text "\<lambda>"}-terms.
+
+  \medskip AST translation patterns (\secref{sec:syn-trans}) that
+  represent terms cannot distinguish constants and variables
+  syntactically.  Explicit indication of @{text "CONST c"} inside the
+  term language is required, unless @{text "c"} is known as special
+  \emph{syntax constant} (see also @{command syntax}).  It is also
+  possible to use @{command syntax} declarations (without mixfix
+  annotation) to enforce that certain unqualified names are always
+  treated as constant within the syntax machinery.
+
+  The situation is simpler for ASTs that represent types or sorts,
+  since the concrete syntax already distinguishes type variables from
+  type constants (constructors).  So @{text "('a, 'b) foo"}
+  corresponds to an AST application of some constant for @{text foo}
+  and variable arguments for @{text "'a"} and @{text "'b"}.  Note that
+  the postfix application is merely a feature of the concrete syntax,
+  while in the AST the constructor occurs in head position.  *}
+
+
+subsubsection {* Authentic syntax names *}
+
+text {* Naming constant entities within ASTs is another delicate
+  issue.  Unqualified names are resolved in the name space tables in
+  the last stage of parsing, after all translations have been applied.
+  Since syntax transformations do not know about this later name
+  resolution, there can be surprises in boundary cases.
+
+  \emph{Authentic syntax names} for @{ML Ast.Constant} avoid this
+  problem: the fully-qualified constant name with a special prefix for
+  its formal category (@{text "class"}, @{text "type"}, @{text
+  "const"}, @{text "fixed"}) represents the information faithfully
+  within the untyped AST format.  Accidental overlap with free or
+  bound variables is excluded as well.  Authentic syntax names work
+  implicitly in the following situations:
+
+  \begin{itemize}
+
+  \item Input of term constants (or fixed variables) that are
+  introduced by concrete syntax via @{command notation}: the
+  correspondence of a particular grammar production to some known term
+  entity is preserved.
+
+  \item Input of type constants (constructors) and type classes ---
+  thanks to explicit syntactic distinction independently on the
+  context.
+
+  \item Output of term constants, type constants, type classes ---
+  this information is already available from the internal term to be
+  printed.
+
+  \end{itemize}
+
+  In other words, syntax transformations that operate on input terms
+  written as prefix applications are difficult to make robust.
+  Luckily, this case rarely occurs in practice, because syntax forms
+  to be translated usually correspond to some concrete notation. *}
+
+
+subsection {* Raw syntax and translations \label{sec:syn-trans} *}
+
+text {*
+  \begin{tabular}{rcll}
+    @{command_def "nonterminal"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "syntax"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "no_syntax"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "translations"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "no_translations"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{attribute_def syntax_ast_trace} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def syntax_ast_stats} & : & @{text attribute} & default @{text false} \\
+  \end{tabular}
+
+  Unlike mixfix notation for existing formal entities
+  (\secref{sec:notation}), raw syntax declarations provide full access
+  to the priority grammar of the inner syntax, without any sanity
+  checks.  This includes additional syntactic categories (via
+  @{command nonterminal}) and free-form grammar productions (via
+  @{command syntax}).  Additional syntax translations (or macros, via
+  @{command translations}) are required to turn resulting parse trees
+  into proper representations of formal entities again.
+
+  @{rail \<open>
+    @@{command nonterminal} (@{syntax name} + @'and')
+    ;
+    (@@{command syntax} | @@{command no_syntax}) @{syntax mode}? (constdecl +)
+    ;
+    (@@{command translations} | @@{command no_translations})
+      (transpat ('==' | '=>' | '<=' | '\<rightleftharpoons>' | '\<rightharpoonup>' | '\<leftharpoondown>') transpat +)
+    ;
+
+    constdecl: @{syntax name} '::' @{syntax type} @{syntax mixfix}?
+    ;
+    mode: ('(' ( @{syntax name} | @'output' | @{syntax name} @'output' ) ')')
+    ;
+    transpat: ('(' @{syntax nameref} ')')? @{syntax string}
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "nonterminal"}~@{text c} declares a type
+  constructor @{text c} (without arguments) to act as purely syntactic
+  type: a nonterminal symbol of the inner syntax.
+
+  \item @{command "syntax"}~@{text "(mode) c :: \<sigma> (mx)"} augments the
+  priority grammar and the pretty printer table for the given print
+  mode (default @{verbatim "\"\""}). An optional keyword @{keyword_ref
+  "output"} means that only the pretty printer table is affected.
+
+  Following \secref{sec:mixfix}, the mixfix annotation @{text "mx =
+  template ps q"} together with type @{text "\<sigma> = \<tau>\<^sub>1 \<Rightarrow> \<dots> \<tau>\<^sub>n \<Rightarrow> \<tau>"} and
+  specify a grammar production.  The @{text template} contains
+  delimiter tokens that surround @{text "n"} argument positions
+  (@{verbatim "_"}).  The latter correspond to nonterminal symbols
+  @{text "A\<^sub>i"} derived from the argument types @{text "\<tau>\<^sub>i"} as
+  follows:
+  \begin{itemize}
+
+  \item @{text "prop"} if @{text "\<tau>\<^sub>i = prop"}
+
+  \item @{text "logic"} if @{text "\<tau>\<^sub>i = (\<dots>)\<kappa>"} for logical type
+  constructor @{text "\<kappa> \<noteq> prop"}
+
+  \item @{text any} if @{text "\<tau>\<^sub>i = \<alpha>"} for type variables
+
+  \item @{text "\<kappa>"} if @{text "\<tau>\<^sub>i = \<kappa>"} for nonterminal @{text "\<kappa>"}
+  (syntactic type constructor)
+
+  \end{itemize}
+
+  Each @{text "A\<^sub>i"} is decorated by priority @{text "p\<^sub>i"} from the
+  given list @{text "ps"}; misssing priorities default to 0.
+
+  The resulting nonterminal of the production is determined similarly
+  from type @{text "\<tau>"}, with priority @{text "q"} and default 1000.
+
+  \medskip Parsing via this production produces parse trees @{text
+  "t\<^sub>1, \<dots>, t\<^sub>n"} for the argument slots.  The resulting parse tree is
+  composed as @{text "c t\<^sub>1 \<dots> t\<^sub>n"}, by using the syntax constant @{text
+  "c"} of the syntax declaration.
+
+  Such syntactic constants are invented on the spot, without formal
+  check wrt.\ existing declarations.  It is conventional to use plain
+  identifiers prefixed by a single underscore (e.g.\ @{text
+  "_foobar"}).  Names should be chosen with care, to avoid clashes
+  with other syntax declarations.
+
+  \medskip The special case of copy production is specified by @{text
+  "c = "}@{verbatim "\"\""} (empty string).  It means that the
+  resulting parse tree @{text "t"} is copied directly, without any
+  further decoration.
+
+  \item @{command "no_syntax"}~@{text "(mode) decls"} removes grammar
+  declarations (and translations) resulting from @{text decls}, which
+  are interpreted in the same manner as for @{command "syntax"} above.
+
+  \item @{command "translations"}~@{text rules} specifies syntactic
+  translation rules (i.e.\ macros) as first-order rewrite rules on
+  ASTs (\secref{sec:ast}).  The theory context maintains two
+  independent lists translation rules: parse rules (@{verbatim "=>"}
+  or @{text "\<rightharpoonup>"}) and print rules (@{verbatim "<="} or @{text "\<leftharpoondown>"}).
+  For convenience, both can be specified simultaneously as parse~/
+  print rules (@{verbatim "=="} or @{text "\<rightleftharpoons>"}).
+
+  Translation patterns may be prefixed by the syntactic category to be
+  used for parsing; the default is @{text logic} which means that
+  regular term syntax is used.  Both sides of the syntax translation
+  rule undergo parsing and parse AST translations
+  \secref{sec:tr-funs}, in order to perform some fundamental
+  normalization like @{text "\<lambda>x y. b \<leadsto> \<lambda>x. \<lambda>y. b"}, but other AST
+  translation rules are \emph{not} applied recursively here.
+
+  When processing AST patterns, the inner syntax lexer runs in a
+  different mode that allows identifiers to start with underscore.
+  This accommodates the usual naming convention for auxiliary syntax
+  constants --- those that do not have a logical counter part --- by
+  allowing to specify arbitrary AST applications within the term
+  syntax, independently of the corresponding concrete syntax.
+
+  Atomic ASTs are distinguished as @{ML Ast.Constant} versus @{ML
+  Ast.Variable} as follows: a qualified name or syntax constant
+  declared via @{command syntax}, or parse tree head of concrete
+  notation becomes @{ML Ast.Constant}, anything else @{ML
+  Ast.Variable}.  Note that @{text CONST} and @{text XCONST} within
+  the term language (\secref{sec:pure-grammar}) allow to enforce
+  treatment as constants.
+
+  AST rewrite rules @{text "(lhs, rhs)"} need to obey the following
+  side-conditions:
+
+  \begin{itemize}
+
+  \item Rules must be left linear: @{text "lhs"} must not contain
+  repeated variables.\footnote{The deeper reason for this is that AST
+  equality is not well-defined: different occurrences of the ``same''
+  AST could be decorated differently by accidental type-constraints or
+  source position information, for example.}
+
+  \item Every variable in @{text "rhs"} must also occur in @{text
+  "lhs"}.
+
+  \end{itemize}
+
+  \item @{command "no_translations"}~@{text rules} removes syntactic
+  translation rules, which are interpreted in the same manner as for
+  @{command "translations"} above.
+
+  \item @{attribute syntax_ast_trace} and @{attribute
+  syntax_ast_stats} control diagnostic output in the AST normalization
+  process, when translation rules are applied to concrete input or
+  output.
+
+  \end{description}
+
+  Raw syntax and translations provides a slightly more low-level
+  access to the grammar and the form of resulting parse trees.  It is
+  often possible to avoid this untyped macro mechanism, and use
+  type-safe @{command abbreviation} or @{command notation} instead.
+  Some important situations where @{command syntax} and @{command
+  translations} are really need are as follows:
+
+  \begin{itemize}
+
+  \item Iterated replacement via recursive @{command translations}.
+  For example, consider list enumeration @{term "[a, b, c, d]"} as
+  defined in theory @{theory List} in Isabelle/HOL.
+
+  \item Change of binding status of variables: anything beyond the
+  built-in @{keyword "binder"} mixfix annotation requires explicit
+  syntax translations.  For example, consider list filter
+  comprehension @{term "[x \<leftarrow> xs . P]"} as defined in theory @{theory
+  List} in Isabelle/HOL.
+
+  \end{itemize}
+*}
+
+subsubsection {* Applying translation rules *}
+
+text {* As a term is being parsed or printed, an AST is generated as
+  an intermediate form according to \figref{fig:parse-print}.  The AST
+  is normalized by applying translation rules in the manner of a
+  first-order term rewriting system.  We first examine how a single
+  rule is applied.
+
+  Let @{text "t"} be the abstract syntax tree to be normalized and
+  @{text "(lhs, rhs)"} some translation rule.  A subtree @{text "u"}
+  of @{text "t"} is called \emph{redex} if it is an instance of @{text
+  "lhs"}; in this case the pattern @{text "lhs"} is said to match the
+  object @{text "u"}.  A redex matched by @{text "lhs"} may be
+  replaced by the corresponding instance of @{text "rhs"}, thus
+  \emph{rewriting} the AST @{text "t"}.  Matching requires some notion
+  of \emph{place-holders} in rule patterns: @{ML Ast.Variable} serves
+  this purpose.
+
+  More precisely, the matching of the object @{text "u"} against the
+  pattern @{text "lhs"} is performed as follows:
+
+  \begin{itemize}
+
+  \item Objects of the form @{ML Ast.Variable}~@{text "x"} or @{ML
+  Ast.Constant}~@{text "x"} are matched by pattern @{ML
+  Ast.Constant}~@{text "x"}.  Thus all atomic ASTs in the object are
+  treated as (potential) constants, and a successful match makes them
+  actual constants even before name space resolution (see also
+  \secref{sec:ast}).
+
+  \item Object @{text "u"} is matched by pattern @{ML
+  Ast.Variable}~@{text "x"}, binding @{text "x"} to @{text "u"}.
+
+  \item Object @{ML Ast.Appl}~@{text "us"} is matched by @{ML
+  Ast.Appl}~@{text "ts"} if @{text "us"} and @{text "ts"} have the
+  same length and each corresponding subtree matches.
+
+  \item In every other case, matching fails.
+
+  \end{itemize}
+
+  A successful match yields a substitution that is applied to @{text
+  "rhs"}, generating the instance that replaces @{text "u"}.
+
+  Normalizing an AST involves repeatedly applying translation rules
+  until none are applicable.  This works yoyo-like: top-down,
+  bottom-up, top-down, etc.  At each subtree position, rules are
+  chosen in order of appearance in the theory definitions.
+
+  The configuration options @{attribute syntax_ast_trace} and
+  @{attribute syntax_ast_stats} might help to understand this process
+  and diagnose problems.
+
+  \begin{warn}
+  If syntax translation rules work incorrectly, the output of
+  @{command_ref print_syntax} with its \emph{rules} sections reveals the
+  actual internal forms of AST pattern, without potentially confusing
+  concrete syntax.  Recall that AST constants appear as quoted strings
+  and variables without quotes.
+  \end{warn}
+
+  \begin{warn}
+  If @{attribute_ref eta_contract} is set to @{text "true"}, terms
+  will be @{text "\<eta>"}-contracted \emph{before} the AST rewriter sees
+  them.  Thus some abstraction nodes needed for print rules to match
+  may vanish.  For example, @{text "Ball A (\<lambda>x. P x)"} would contract
+  to @{text "Ball A P"} and the standard print rule would fail to
+  apply.  This problem can be avoided by hand-written ML translation
+  functions (see also \secref{sec:tr-funs}), which is in fact the same
+  mechanism used in built-in @{keyword "binder"} declarations.
+  \end{warn}
+*}
+
+
+subsection {* Syntax translation functions \label{sec:tr-funs} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "parse_ast_translation"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "parse_translation"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "print_translation"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "typed_print_translation"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "print_ast_translation"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{ML_antiquotation_def "class_syntax"} & : & @{text "ML antiquotation"} \\
+    @{ML_antiquotation_def "type_syntax"} & : & @{text "ML antiquotation"} \\
+    @{ML_antiquotation_def "const_syntax"} & : & @{text "ML antiquotation"} \\
+    @{ML_antiquotation_def "syntax_const"} & : & @{text "ML antiquotation"} \\
+  \end{matharray}
+
+  Syntax translation functions written in ML admit almost arbitrary
+  manipulations of inner syntax, at the expense of some complexity and
+  obscurity in the implementation.
+
+  @{rail \<open>
+  ( @@{command parse_ast_translation} | @@{command parse_translation} |
+    @@{command print_translation} | @@{command typed_print_translation} |
+    @@{command print_ast_translation}) @{syntax text}
+  ;
+  (@@{ML_antiquotation class_syntax} |
+   @@{ML_antiquotation type_syntax} |
+   @@{ML_antiquotation const_syntax} |
+   @@{ML_antiquotation syntax_const}) name
+  \<close>}
+
+  \begin{description}
+
+  \item @{command parse_translation} etc. declare syntax translation
+  functions to the theory.  Any of these commands have a single
+  @{syntax text} argument that refers to an ML expression of
+  appropriate type as follows:
+
+  \medskip
+  {\footnotesize
+  \begin{tabular}{l}
+  @{command parse_ast_translation} : \\
+  \quad @{ML_type "(string * (Proof.context -> Ast.ast list -> Ast.ast)) list"} \\
+  @{command parse_translation} : \\
+  \quad @{ML_type "(string * (Proof.context -> term list -> term)) list"} \\
+  @{command print_translation} : \\
+  \quad @{ML_type "(string * (Proof.context -> term list -> term)) list"} \\
+  @{command typed_print_translation} : \\
+  \quad @{ML_type "(string * (Proof.context -> typ -> term list -> term)) list"} \\
+  @{command print_ast_translation} : \\
+  \quad @{ML_type "(string * (Proof.context -> Ast.ast list -> Ast.ast)) list"} \\
+  \end{tabular}}
+  \medskip
+
+  The argument list consists of @{text "(c, tr)"} pairs, where @{text
+  "c"} is the syntax name of the formal entity involved, and @{text
+  "tr"} a function that translates a syntax form @{text "c args"} into
+  @{text "tr ctxt args"} (depending on the context).  The Isabelle/ML
+  naming convention for parse translations is @{text "c_tr"} and for
+  print translations @{text "c_tr'"}.
+
+  The @{command_ref print_syntax} command displays the sets of names
+  associated with the translation functions of a theory under @{text
+  "parse_ast_translation"} etc.
+
+  \item @{text "@{class_syntax c}"}, @{text "@{type_syntax c}"},
+  @{text "@{const_syntax c}"} inline the authentic syntax name of the
+  given formal entities into the ML source.  This is the
+  fully-qualified logical name prefixed by a special marker to
+  indicate its kind: thus different logical name spaces are properly
+  distinguished within parse trees.
+
+  \item @{text "@{const_syntax c}"} inlines the name @{text "c"} of
+  the given syntax constant, having checked that it has been declared
+  via some @{command syntax} commands within the theory context.  Note
+  that the usual naming convention makes syntax constants start with
+  underscore, to reduce the chance of accidental clashes with other
+  names occurring in parse trees (unqualified constants etc.).
+
+  \end{description}
+*}
+
+
+subsubsection {* The translation strategy *}
+
+text {* The different kinds of translation functions are invoked during
+  the transformations between parse trees, ASTs and syntactic terms
+  (cf.\ \figref{fig:parse-print}).  Whenever a combination of the form
+  @{text "c x\<^sub>1 \<dots> x\<^sub>n"} is encountered, and a translation function
+  @{text "f"} of appropriate kind is declared for @{text "c"}, the
+  result is produced by evaluation of @{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} in ML.
+
+  For AST translations, the arguments @{text "x\<^sub>1, \<dots>, x\<^sub>n"} are ASTs.  A
+  combination has the form @{ML "Ast.Constant"}~@{text "c"} or @{ML
+  "Ast.Appl"}~@{text "["}@{ML Ast.Constant}~@{text "c, x\<^sub>1, \<dots>, x\<^sub>n]"}.
+  For term translations, the arguments are terms and a combination has
+  the form @{ML Const}~@{text "(c, \<tau>)"} or @{ML Const}~@{text "(c, \<tau>)
+  $ x\<^sub>1 $ \<dots> $ x\<^sub>n"}.  Terms allow more sophisticated transformations
+  than ASTs do, typically involving abstractions and bound
+  variables. \emph{Typed} print translations may even peek at the type
+  @{text "\<tau>"} of the constant they are invoked on, although some
+  information might have been suppressed for term output already.
+
+  Regardless of whether they act on ASTs or terms, translation
+  functions called during the parsing process differ from those for
+  printing in their overall behaviour:
+
+  \begin{description}
+
+  \item [Parse translations] are applied bottom-up.  The arguments are
+  already in translated form.  The translations must not fail;
+  exceptions trigger an error message.  There may be at most one
+  function associated with any syntactic name.
+
+  \item [Print translations] are applied top-down.  They are supplied
+  with arguments that are partly still in internal form.  The result
+  again undergoes translation; therefore a print translation should
+  not introduce as head the very constant that invoked it.  The
+  function may raise exception @{ML Match} to indicate failure; in
+  this event it has no effect.  Multiple functions associated with
+  some syntactic name are tried in the order of declaration in the
+  theory.
+
+  \end{description}
+
+  Only constant atoms --- constructor @{ML Ast.Constant} for ASTs and
+  @{ML Const} for terms --- can invoke translation functions.  This
+  means that parse translations can only be associated with parse tree
+  heads of concrete syntax, or syntactic constants introduced via
+  other translations.  For plain identifiers within the term language,
+  the status of constant versus variable is not yet know during
+  parsing.  This is in contrast to print translations, where constants
+  are explicitly known from the given term in its fully internal form.
+*}
+
+
+subsection {* Built-in syntax transformations *}
+
+text {*
+  Here are some further details of the main syntax transformation
+  phases of \figref{fig:parse-print}.
+*}
+
+
+subsubsection {* Transforming parse trees to ASTs *}
+
+text {* The parse tree is the raw output of the parser.  It is
+  transformed into an AST according to some basic scheme that may be
+  augmented by AST translation functions as explained in
+  \secref{sec:tr-funs}.
+
+  The parse tree is constructed by nesting the right-hand sides of the
+  productions used to recognize the input.  Such parse trees are
+  simply lists of tokens and constituent parse trees, the latter
+  representing the nonterminals of the productions.  Ignoring AST
+  translation functions, parse trees are transformed to ASTs by
+  stripping out delimiters and copy productions, while retaining some
+  source position information from input tokens.
+
+  The Pure syntax provides predefined AST translations to make the
+  basic @{text "\<lambda>"}-term structure more apparent within the
+  (first-order) AST representation, and thus facilitate the use of
+  @{command translations} (see also \secref{sec:syn-trans}).  This
+  covers ordinary term application, type application, nested
+  abstraction, iterated meta implications and function types.  The
+  effect is illustrated on some representative input strings is as
+  follows:
+
+  \begin{center}
+  \begin{tabular}{ll}
+  input source & AST \\
+  \hline
+  @{text "f x y z"} & @{verbatim "(f x y z)"} \\
+  @{text "'a ty"} & @{verbatim "(ty 'a)"} \\
+  @{text "('a, 'b)ty"} & @{verbatim "(ty 'a 'b)"} \\
+  @{text "\<lambda>x y z. t"} & @{verbatim "(\"_abs\" x (\"_abs\" y (\"_abs\" z t)))"} \\
+  @{text "\<lambda>x :: 'a. t"} & @{verbatim "(\"_abs\" (\"_constrain\" x 'a) t)"} \\
+  @{text "\<lbrakk>P; Q; R\<rbrakk> \<Longrightarrow> S"} & @{verbatim "(\"==>\" P (\"==>\" Q (\"==>\" R S)))"} \\
+   @{text "['a, 'b, 'c] \<Rightarrow> 'd"} & @{verbatim "(\"fun\" 'a (\"fun\" 'b (\"fun\" 'c 'd)))"} \\
+  \end{tabular}
+  \end{center}
+
+  Note that type and sort constraints may occur in further places ---
+  translations need to be ready to cope with them.  The built-in
+  syntax transformation from parse trees to ASTs insert additional
+  constraints that represent source positions.
+*}
+
+
+subsubsection {* Transforming ASTs to terms *}
+
+text {* After application of macros (\secref{sec:syn-trans}), the AST
+  is transformed into a term.  This term still lacks proper type
+  information, but it might contain some constraints consisting of
+  applications with head @{verbatim "_constrain"}, where the second
+  argument is a type encoded as a pre-term within the syntax.  Type
+  inference later introduces correct types, or indicates type errors
+  in the input.
+
+  Ignoring parse translations, ASTs are transformed to terms by
+  mapping AST constants to term constants, AST variables to term
+  variables or constants (according to the name space), and AST
+  applications to iterated term applications.
+
+  The outcome is still a first-order term.  Proper abstractions and
+  bound variables are introduced by parse translations associated with
+  certain syntax constants.  Thus @{verbatim "(_abs x x)"} eventually
+  becomes a de-Bruijn term @{verbatim "Abs (\"x\", _, Bound 0)"}.
+*}
+
+
+subsubsection {* Printing of terms *}
+
+text {* The output phase is essentially the inverse of the input
+  phase.  Terms are translated via abstract syntax trees into
+  pretty-printed text.
+
+  Ignoring print translations, the transformation maps term constants,
+  variables and applications to the corresponding constructs on ASTs.
+  Abstractions are mapped to applications of the special constant
+  @{verbatim "_abs"} as seen before.  Type constraints are represented
+  via special @{verbatim "_constrain"} forms, according to various
+  policies of type annotation determined elsewhere.  Sort constraints
+  of type variables are handled in a similar fashion.
+
+  After application of macros (\secref{sec:syn-trans}), the AST is
+  finally pretty-printed.  The built-in print AST translations reverse
+  the corresponding parse AST translations.
+
+  \medskip For the actual printing process, the priority grammar
+  (\secref{sec:priority-grammar}) plays a vital role: productions are
+  used as templates for pretty printing, with argument slots stemming
+  from nonterminals, and syntactic sugar stemming from literal tokens.
+
+  Each AST application with constant head @{text "c"} and arguments
+  @{text "t\<^sub>1"}, \dots, @{text "t\<^sub>n"} (for @{text "n = 0"} the AST is
+  just the constant @{text "c"} itself) is printed according to the
+  first grammar production of result name @{text "c"}.  The required
+  syntax priority of the argument slot is given by its nonterminal
+  @{text "A\<^sup>(\<^sup>p\<^sup>)"}.  The argument @{text "t\<^sub>i"} that corresponds to the
+  position of @{text "A\<^sup>(\<^sup>p\<^sup>)"} is printed recursively, and then put in
+  parentheses \emph{if} its priority @{text "p"} requires this.  The
+  resulting output is concatenated with the syntactic sugar according
+  to the grammar production.
+
+  If an AST application @{text "(c x\<^sub>1 \<dots> x\<^sub>m)"} has more arguments than
+  the corresponding production, it is first split into @{text "((c x\<^sub>1
+  \<dots> x\<^sub>n) x\<^sub>n\<^sub>+\<^sub>1 \<dots> x\<^sub>m)"} and then printed recursively as above.
+
+  Applications with too few arguments or with non-constant head or
+  without a corresponding production are printed in prefix-form like
+  @{text "f t\<^sub>1 \<dots> t\<^sub>n"} for terms.
+
+  Multiple productions associated with some name @{text "c"} are tried
+  in order of appearance within the grammar.  An occurrence of some
+  AST variable @{text "x"} is printed as @{text "x"} outright.
+
+  \medskip White space is \emph{not} inserted automatically.  If
+  blanks (or breaks) are required to separate tokens, they need to be
+  specified in the mixfix declaration (\secref{sec:mixfix}).
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/ML_Tactic.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,177 @@
+theory ML_Tactic
+imports Base Main
+begin
+
+chapter {* ML tactic expressions *}
+
+text {*
+  Isar Proof methods closely resemble traditional tactics, when used
+  in unstructured sequences of @{command "apply"} commands.
+  Isabelle/Isar provides emulations for all major ML tactics of
+  classic Isabelle --- mostly for the sake of easy porting of existing
+  developments, as actual Isar proof texts would demand much less
+  diversity of proof methods.
+
+  Unlike tactic expressions in ML, Isar proof methods provide proper
+  concrete syntax for additional arguments, options, modifiers etc.
+  Thus a typical method text is usually more concise than the
+  corresponding ML tactic.  Furthermore, the Isar versions of classic
+  Isabelle tactics often cover several variant forms by a single
+  method with separate options to tune the behavior.  For example,
+  method @{method simp} replaces all of @{ML simp_tac}~/ @{ML
+  asm_simp_tac}~/ @{ML full_simp_tac}~/ @{ML asm_full_simp_tac}, there
+  is also concrete syntax for augmenting the Simplifier context (the
+  current ``simpset'') in a convenient way.
+*}
+
+
+section {* Resolution tactics *}
+
+text {*
+  Classic Isabelle provides several variant forms of tactics for
+  single-step rule applications (based on higher-order resolution).
+  The space of resolution tactics has the following main dimensions.
+
+  \begin{enumerate}
+
+  \item The ``mode'' of resolution: intro, elim, destruct, or forward
+  (e.g.\ @{ML resolve_tac}, @{ML eresolve_tac}, @{ML dresolve_tac},
+  @{ML forward_tac}).
+
+  \item Optional explicit instantiation (e.g.\ @{ML resolve_tac} vs.\
+  @{ML res_inst_tac}).
+
+  \item Abbreviations for singleton arguments (e.g.\ @{ML resolve_tac}
+  vs.\ @{ML rtac}).
+
+  \end{enumerate}
+
+  Basically, the set of Isar tactic emulations @{method rule_tac},
+  @{method erule_tac}, @{method drule_tac}, @{method frule_tac} (see
+  \secref{sec:tactics}) would be sufficient to cover the four modes,
+  either with or without instantiation, and either with single or
+  multiple arguments.  Although it is more convenient in most cases to
+  use the plain @{method_ref (Pure) rule} method, or any of its
+  ``improper'' variants @{method erule}, @{method drule}, @{method
+  frule}.  Note that explicit goal addressing is only supported by the
+  actual @{method rule_tac} version.
+
+  With this in mind, plain resolution tactics correspond to Isar
+  methods as follows.
+
+  \medskip
+  \begin{tabular}{lll}
+    @{ML rtac}~@{text "a 1"} & & @{text "rule a"} \\
+    @{ML resolve_tac}~@{text "[a\<^sub>1, \<dots>] 1"} & & @{text "rule a\<^sub>1 \<dots>"} \\
+    @{ML res_inst_tac}~@{text "ctxt [(x\<^sub>1, t\<^sub>1), \<dots>] a 1"} & &
+    @{text "rule_tac x\<^sub>1 = t\<^sub>1 \<AND> \<dots> \<IN> a"} \\[0.5ex]
+    @{ML rtac}~@{text "a i"} & & @{text "rule_tac [i] a"} \\
+    @{ML resolve_tac}~@{text "[a\<^sub>1, \<dots>] i"} & & @{text "rule_tac [i] a\<^sub>1 \<dots>"} \\
+    @{ML res_inst_tac}~@{text "ctxt [(x\<^sub>1, t\<^sub>1), \<dots>] a i"} & &
+    @{text "rule_tac [i] x\<^sub>1 = t\<^sub>1 \<AND> \<dots> \<IN> a"} \\
+  \end{tabular}
+  \medskip
+
+  Note that explicit goal addressing may be usually avoided by
+  changing the order of subgoals with @{command "defer"} or @{command
+  "prefer"} (see \secref{sec:tactic-commands}).
+*}
+
+
+section {* Simplifier tactics *}
+
+text {* The main Simplifier tactics @{ML simp_tac} and variants are
+  all covered by the @{method simp} and @{method simp_all} methods
+  (see \secref{sec:simplifier}).  Note that there is no individual
+  goal addressing available, simplification acts either on the first
+  goal (@{method simp}) or all goals (@{method simp_all}).
+
+  \medskip
+  \begin{tabular}{lll}
+    @{ML asm_full_simp_tac}~@{text "@{context} 1"} & & @{method simp} \\
+    @{ML ALLGOALS}~(@{ML asm_full_simp_tac}~@{text "@{context}"}) & & @{method simp_all} \\[0.5ex]
+    @{ML simp_tac}~@{text "@{context} 1"} & & @{method simp}~@{text "(no_asm)"} \\
+    @{ML asm_simp_tac}~@{text "@{context} 1"} & & @{method simp}~@{text "(no_asm_simp)"} \\
+    @{ML full_simp_tac}~@{text "@{context} 1"} & & @{method simp}~@{text "(no_asm_use)"} \\
+    @{ML asm_lr_simp_tac}~@{text "@{context} 1"} & & @{method simp}~@{text "(asm_lr)"} \\
+  \end{tabular}
+  \medskip
+*}
+
+
+section {* Classical Reasoner tactics *}
+
+text {* The Classical Reasoner provides a rather large number of
+  variations of automated tactics, such as @{ML blast_tac}, @{ML
+  fast_tac}, @{ML clarify_tac} etc.  The corresponding Isar methods
+  usually share the same base name, such as @{method blast}, @{method
+  fast}, @{method clarify} etc.\ (see \secref{sec:classical}).  *}
+
+
+section {* Miscellaneous tactics *}
+
+text {*
+  There are a few additional tactics defined in various theories of
+  Isabelle/HOL, some of these also in Isabelle/FOL or Isabelle/ZF.
+  The most common ones of these may be ported to Isar as follows.
+
+  \medskip
+  \begin{tabular}{lll}
+    @{ML stac}~@{text "a 1"} & & @{text "subst a"} \\
+    @{ML hyp_subst_tac}~@{text 1} & & @{text hypsubst} \\
+    @{ML split_all_tac}~@{text 1} & & @{text "simp (no_asm_simp) only: split_tupled_all"} \\
+      & @{text "\<approx>"} & @{text "simp only: split_tupled_all"} \\
+      & @{text "\<lless>"} & @{text "clarify"} \\
+  \end{tabular}
+*}
+
+
+section {* Tacticals *}
+
+text {*
+  Classic Isabelle provides a huge amount of tacticals for combination
+  and modification of existing tactics.  This has been greatly reduced
+  in Isar, providing the bare minimum of combinators only: ``@{text
+  ","}'' (sequential composition), ``@{text "|"}'' (alternative
+  choices), ``@{text "?"}'' (try), ``@{text "+"}'' (repeat at least
+  once).  These are usually sufficient in practice; if all fails,
+  arbitrary ML tactic code may be invoked via the @{method tactic}
+  method (see \secref{sec:tactics}).
+
+  \medskip Common ML tacticals may be expressed directly in Isar as
+  follows:
+
+  \medskip
+  \begin{tabular}{lll}
+    @{text "tac\<^sub>1"}~@{ML_text THEN}~@{text "tac\<^sub>2"} & & @{text "meth\<^sub>1, meth\<^sub>2"} \\
+    @{text "tac\<^sub>1"}~@{ML_text ORELSE}~@{text "tac\<^sub>2"} & & @{text "meth\<^sub>1 | meth\<^sub>2"} \\
+    @{ML TRY}~@{text tac} & & @{text "meth?"} \\
+    @{ML REPEAT1}~@{text tac} & & @{text "meth+"} \\
+    @{ML REPEAT}~@{text tac} & & @{text "(meth+)?"} \\
+    @{ML EVERY}~@{text "[tac\<^sub>1, \<dots>]"} & & @{text "meth\<^sub>1, \<dots>"} \\
+    @{ML FIRST}~@{text "[tac\<^sub>1, \<dots>]"} & & @{text "meth\<^sub>1 | \<dots>"} \\
+  \end{tabular}
+  \medskip
+
+  \medskip @{ML CHANGED} (see \cite{isabelle-implementation}) is
+  usually not required in Isar, since most basic proof methods already
+  fail unless there is an actual change in the goal state.
+  Nevertheless, ``@{text "?"}''  (try) may be used to accept
+  \emph{unchanged} results as well.
+
+  \medskip @{ML ALLGOALS}, @{ML SOMEGOAL} etc.\ (see
+  \cite{isabelle-implementation}) are not available in Isar, since
+  there is no direct goal addressing.  Nevertheless, some basic
+  methods address all goals internally, notably @{method simp_all}
+  (see \secref{sec:simplifier}).  Also note that @{ML ALLGOALS} can be
+  often replaced by ``@{text "+"}'' (repeat at least once), although
+  this usually has a different operational behavior: subgoals are
+  solved in a different order.
+
+  \medskip Iterated resolution, such as
+  @{ML_text "REPEAT (FIRSTGOAL (resolve_tac ...))"}, is usually better
+  expressed using the @{method intro} and @{method elim} methods of
+  Isar (see \secref{sec:classical}).
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/Misc.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,150 @@
+theory Misc
+imports Base Main
+begin
+
+chapter {* Other commands *}
+
+section {* Inspecting the context *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "print_theory"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "print_methods"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "print_attributes"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "print_theorems"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "find_theorems"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "find_consts"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "thm_deps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "unused_thms"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "print_facts"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "print_binds"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{command print_theory} | @@{command print_theorems} | @@{command print_facts}) ('!'?)
+    ;
+
+    @@{command find_theorems} ('(' @{syntax nat}? 'with_dups'? ')')? \<newline> (thmcriterion * )
+    ;
+    thmcriterion: ('-'?) ('name' ':' @{syntax nameref} | 'intro' | 'elim' | 'dest' |
+      'solves' | 'simp' ':' @{syntax term} | @{syntax term})
+    ;
+    @@{command find_consts} (constcriterion * )
+    ;
+    constcriterion: ('-'?)
+      ('name' ':' @{syntax nameref} | 'strict' ':' @{syntax type} | @{syntax type})
+    ;
+    @@{command thm_deps} @{syntax thmrefs}
+    ;
+    @@{command unused_thms} ((@{syntax name} +) '-' (@{syntax name} * ))?
+  \<close>}
+
+  These commands print certain parts of the theory and proof context.
+  Note that there are some further ones available, such as for the set
+  of rules declared for simplifications.
+
+  \begin{description}
+  
+  \item @{command "print_theory"} prints the main logical content of
+  the background theory; the ``@{text "!"}'' option indicates extra
+  verbosity.
+
+  \item @{command "print_methods"} prints all proof methods
+  available in the current theory context.
+  
+  \item @{command "print_attributes"} prints all attributes
+  available in the current theory context.
+  
+  \item @{command "print_theorems"} prints theorems of the background
+  theory resulting from the last command; the ``@{text "!"}'' option
+  indicates extra verbosity.
+  
+  \item @{command "print_facts"} prints all local facts of the
+  current context, both named and unnamed ones; the ``@{text "!"}''
+  option indicates extra verbosity.
+  
+  \item @{command "print_binds"} prints all term abbreviations
+  present in the context.
+
+  \item @{command "find_theorems"}~@{text criteria} retrieves facts
+  from the theory or proof context matching all of given search
+  criteria.  The criterion @{text "name: p"} selects all theorems
+  whose fully qualified name matches pattern @{text p}, which may
+  contain ``@{text "*"}'' wildcards.  The criteria @{text intro},
+  @{text elim}, and @{text dest} select theorems that match the
+  current goal as introduction, elimination or destruction rules,
+  respectively.  The criterion @{text "solves"} returns all rules
+  that would directly solve the current goal.  The criterion
+  @{text "simp: t"} selects all rewrite rules whose left-hand side
+  matches the given term.  The criterion term @{text t} selects all
+  theorems that contain the pattern @{text t} -- as usual, patterns
+  may contain occurrences of the dummy ``@{text _}'', schematic
+  variables, and type constraints.
+  
+  Criteria can be preceded by ``@{text "-"}'' to select theorems that
+  do \emph{not} match. Note that giving the empty list of criteria
+  yields \emph{all} currently known facts.  An optional limit for the
+  number of printed facts may be given; the default is 40.  By
+  default, duplicates are removed from the search result. Use
+  @{text with_dups} to display duplicates.
+
+  \item @{command "find_consts"}~@{text criteria} prints all constants
+  whose type meets all of the given criteria. The criterion @{text
+  "strict: ty"} is met by any type that matches the type pattern
+  @{text ty}.  Patterns may contain both the dummy type ``@{text _}''
+  and sort constraints. The criterion @{text ty} is similar, but it
+  also matches against subtypes. The criterion @{text "name: p"} and
+  the prefix ``@{text "-"}'' function as described for @{command
+  "find_theorems"}.
+
+  \item @{command "thm_deps"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}
+  visualizes dependencies of facts, using Isabelle's graph browser
+  tool (see also \cite{isabelle-sys}).
+
+  \item @{command "unused_thms"}~@{text "A\<^sub>1 \<dots> A\<^sub>m - B\<^sub>1 \<dots> B\<^sub>n"}
+  displays all theorems that are proved in theories @{text "B\<^sub>1 \<dots> B\<^sub>n"}
+  or their parents but not in @{text "A\<^sub>1 \<dots> A\<^sub>m"} or their parents and
+  that are never used.
+  If @{text n} is @{text 0}, the end of the range of theories
+  defaults to the current theory. If no range is specified,
+  only the unused theorems in the current theory are displayed.
+  
+  \end{description}
+*}
+
+
+section {* System commands *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "cd"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+    @{command_def "pwd"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+    @{command_def "use_thy"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{command cd} | @@{command use_thy}) @{syntax name}
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "cd"}~@{text path} changes the current directory
+  of the Isabelle process.
+
+  \item @{command "pwd"} prints the current working directory.
+
+  \item @{command "use_thy"}~@{text A} preload theory @{text A}.
+  These system commands are scarcely used when working interactively,
+  since loading of theories is done automatically as required.
+
+  \end{description}
+
+  %FIXME proper place (!?)
+  Isabelle file specification may contain path variables (e.g.\
+  @{verbatim "$ISABELLE_HOME"}) that are expanded accordingly.  Note
+  that @{verbatim "~"} abbreviates @{verbatim "$USER_HOME"}, and
+  @{verbatim "~~"} abbreviates @{verbatim "$ISABELLE_HOME"}.  The
+  general syntax for path specifications follows POSIX conventions.
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/Outer_Syntax.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,466 @@
+theory Outer_Syntax
+imports Base Main
+begin
+
+chapter {* Outer syntax --- the theory language \label{ch:outer-syntax} *}
+
+text {*
+  The rather generic framework of Isabelle/Isar syntax emerges from
+  three main syntactic categories: \emph{commands} of the top-level
+  Isar engine (covering theory and proof elements), \emph{methods} for
+  general goal refinements (analogous to traditional ``tactics''), and
+  \emph{attributes} for operations on facts (within a certain
+  context).  Subsequently we give a reference of basic syntactic
+  entities underlying Isabelle/Isar syntax in a bottom-up manner.
+  Concrete theory and proof language elements will be introduced later
+  on.
+
+  \medskip In order to get started with writing well-formed
+  Isabelle/Isar documents, the most important aspect to be noted is
+  the difference of \emph{inner} versus \emph{outer} syntax.  Inner
+  syntax is that of Isabelle types and terms of the logic, while outer
+  syntax is that of Isabelle/Isar theory sources (specifications and
+  proofs).  As a general rule, inner syntax entities may occur only as
+  \emph{atomic entities} within outer syntax.  For example, the string
+  @{verbatim "\"x + y\""} and identifier @{verbatim z} are legal term
+  specifications within a theory, while @{verbatim "x + y"} without
+  quotes is not.
+
+  Printed theory documents usually omit quotes to gain readability
+  (this is a matter of {\LaTeX} macro setup, say via @{verbatim
+  "\\isabellestyle"}, see also \cite{isabelle-sys}).  Experienced
+  users of Isabelle/Isar may easily reconstruct the lost technical
+  information, while mere readers need not care about quotes at all.
+
+  \medskip Isabelle/Isar input may contain any number of input
+  termination characters ``@{verbatim ";"}'' (semicolon) to separate
+  commands explicitly.  This is particularly useful in interactive
+  shell sessions to make clear where the current command is intended
+  to end.  Otherwise, the interpreter loop will continue to issue a
+  secondary prompt ``@{verbatim "#"}'' until an end-of-command is
+  clearly recognized from the input syntax, e.g.\ encounter of the
+  next command keyword.
+
+  More advanced interfaces such as Isabelle/jEdit \cite{Wenzel:2012}
+  and Proof~General \cite{proofgeneral} do not require explicit
+  semicolons: command spans are determined by inspecting the content
+  of the editor buffer.  In the printed presentation of Isabelle/Isar
+  documents semicolons are omitted altogether for readability.
+
+  \begin{warn}
+    Proof~General requires certain syntax classification tables in
+    order to achieve properly synchronized interaction with the
+    Isabelle/Isar process.  These tables need to be consistent with
+    the Isabelle version and particular logic image to be used in a
+    running session (common object-logics may well change the outer
+    syntax).  The standard setup should work correctly with any of the
+    ``official'' logic images derived from Isabelle/HOL (including
+    HOLCF etc.).  Users of alternative logics may need to tell
+    Proof~General explicitly, e.g.\ by giving an option @{verbatim "-k ZF"}
+    (in conjunction with @{verbatim "-l ZF"}, to specify the default
+    logic image).  Note that option @{verbatim "-L"} does both
+    of this at the same time.
+  \end{warn}
+*}
+
+
+section {* Commands *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "print_commands"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+    @{command_def "help"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command help} (@{syntax name} * )
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "print_commands"} prints all outer syntax keywords
+  and commands.
+
+  \item @{command "help"}~@{text "pats"} retrieves outer syntax
+  commands according to the specified name patterns.
+
+  \end{description}
+*}
+
+
+subsubsection {* Examples *}
+
+text {* Some common diagnostic commands are retrieved like this
+  (according to usual naming conventions): *}
+
+help "print"
+help "find"
+
+
+section {* Lexical matters \label{sec:outer-lex} *}
+
+text {* The outer lexical syntax consists of three main categories of
+  syntax tokens:
+
+  \begin{enumerate}
+
+  \item \emph{major keywords} --- the command names that are available
+  in the present logic session;
+
+  \item \emph{minor keywords} --- additional literal tokens required
+  by the syntax of commands;
+
+  \item \emph{named tokens} --- various categories of identifiers etc.
+
+  \end{enumerate}
+
+  Major keywords and minor keywords are guaranteed to be disjoint.
+  This helps user-interfaces to determine the overall structure of a
+  theory text, without knowing the full details of command syntax.
+  Internally, there is some additional information about the kind of
+  major keywords, which approximates the command type (theory command,
+  proof command etc.).
+
+  Keywords override named tokens.  For example, the presence of a
+  command called @{verbatim term} inhibits the identifier @{verbatim
+  term}, but the string @{verbatim "\"term\""} can be used instead.
+  By convention, the outer syntax always allows quoted strings in
+  addition to identifiers, wherever a named entity is expected.
+
+  When tokenizing a given input sequence, the lexer repeatedly takes
+  the longest prefix of the input that forms a valid token.  Spaces,
+  tabs, newlines and formfeeds between tokens serve as explicit
+  separators.
+
+  \medskip The categories for named tokens are defined once and for
+  all as follows.
+
+  \begin{center}
+  \begin{supertabular}{rcl}
+    @{syntax_def ident} & = & @{text "letter (subscript\<^sup>? quasiletter)\<^sup>*"} \\
+    @{syntax_def longident} & = & @{text "ident("}@{verbatim "."}@{text "ident)\<^sup>+"} \\
+    @{syntax_def symident} & = & @{text "sym\<^sup>+  |  "}@{verbatim "\\"}@{verbatim "<"}@{text ident}@{verbatim ">"} \\
+    @{syntax_def nat} & = & @{text "digit\<^sup>+"} \\
+    @{syntax_def float} & = & @{syntax_ref nat}@{verbatim "."}@{syntax_ref nat}@{text "  |  "}@{verbatim "-"}@{syntax_ref nat}@{verbatim "."}@{syntax_ref nat} \\
+    @{syntax_def var} & = & @{verbatim "?"}@{text "ident  |  "}@{verbatim "?"}@{text ident}@{verbatim "."}@{text nat} \\
+    @{syntax_def typefree} & = & @{verbatim "'"}@{text ident} \\
+    @{syntax_def typevar} & = & @{verbatim "?"}@{text "typefree  |  "}@{verbatim "?"}@{text typefree}@{verbatim "."}@{text nat} \\
+    @{syntax_def string} & = & @{verbatim "\""} @{text "\<dots>"} @{verbatim "\""} \\
+    @{syntax_def altstring} & = & @{verbatim "`"} @{text "\<dots>"} @{verbatim "`"} \\
+    @{syntax_def cartouche} & = & @{verbatim "\<open>"} @{text "\<dots>"} @{verbatim "\<close>"} \\
+    @{syntax_def verbatim} & = & @{verbatim "{*"} @{text "\<dots>"} @{verbatim "*"}@{verbatim "}"} \\[1ex]
+
+    @{text letter} & = & @{text "latin  |  "}@{verbatim "\\"}@{verbatim "<"}@{text latin}@{verbatim ">"}@{text "  |  "}@{verbatim "\\"}@{verbatim "<"}@{text "latin latin"}@{verbatim ">"}@{text "  |  greek  |"} \\
+    @{text subscript} & = & @{verbatim "\<^sub>"} \\
+    @{text quasiletter} & = & @{text "letter  |  digit  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "'"} \\
+    @{text latin} & = & @{verbatim a}@{text "  | \<dots> |  "}@{verbatim z}@{text "  |  "}@{verbatim A}@{text "  |  \<dots> |  "}@{verbatim Z} \\
+    @{text digit} & = & @{verbatim "0"}@{text "  |  \<dots> |  "}@{verbatim "9"} \\
+    @{text sym} & = & @{verbatim "!"}@{text "  |  "}@{verbatim "#"}@{text "  |  "}@{verbatim "$"}@{text "  |  "}@{verbatim "%"}@{text "  |  "}@{verbatim "&"}@{text "  |  "}@{verbatim "*"}@{text "  |  "}@{verbatim "+"}@{text "  |  "}@{verbatim "-"}@{text "  |  "}@{verbatim "/"}@{text "  |"} \\
+    & & @{verbatim "<"}@{text "  |  "}@{verbatim "="}@{text "  |  "}@{verbatim ">"}@{text "  |  "}@{verbatim "?"}@{text "  |  "}@{verbatim "@"}@{text "  |  "}@{verbatim "^"}@{text "  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "|"}@{text "  |  "}@{verbatim "~"} \\
+    @{text greek} & = & @{verbatim "\<alpha>"}@{text "  |  "}@{verbatim "\<beta>"}@{text "  |  "}@{verbatim "\<gamma>"}@{text "  |  "}@{verbatim "\<delta>"}@{text "  |"} \\
+          &   & @{verbatim "\<epsilon>"}@{text "  |  "}@{verbatim "\<zeta>"}@{text "  |  "}@{verbatim "\<eta>"}@{text "  |  "}@{verbatim "\<theta>"}@{text "  |"} \\
+          &   & @{verbatim "\<iota>"}@{text "  |  "}@{verbatim "\<kappa>"}@{text "  |  "}@{verbatim "\<mu>"}@{text "  |  "}@{verbatim "\<nu>"}@{text "  |"} \\
+          &   & @{verbatim "\<xi>"}@{text "  |  "}@{verbatim "\<pi>"}@{text "  |  "}@{verbatim "\<rho>"}@{text "  |  "}@{verbatim "\<sigma>"}@{text "  |  "}@{verbatim "\<tau>"}@{text "  |"} \\
+          &   & @{verbatim "\<upsilon>"}@{text "  |  "}@{verbatim "\<phi>"}@{text "  |  "}@{verbatim "\<chi>"}@{text "  |  "}@{verbatim "\<psi>"}@{text "  |"} \\
+          &   & @{verbatim "\<omega>"}@{text "  |  "}@{verbatim "\<Gamma>"}@{text "  |  "}@{verbatim "\<Delta>"}@{text "  |  "}@{verbatim "\<Theta>"}@{text "  |"} \\
+          &   & @{verbatim "\<Lambda>"}@{text "  |  "}@{verbatim "\<Xi>"}@{text "  |  "}@{verbatim "\<Pi>"}@{text "  |  "}@{verbatim "\<Sigma>"}@{text "  |"} \\
+          &   & @{verbatim "\<Upsilon>"}@{text "  |  "}@{verbatim "\<Phi>"}@{text "  |  "}@{verbatim "\<Psi>"}@{text "  |  "}@{verbatim "\<Omega>"} \\
+  \end{supertabular}
+  \end{center}
+
+  A @{syntax_ref var} or @{syntax_ref typevar} describes an unknown,
+  which is internally a pair of base name and index (ML type @{ML_type
+  indexname}).  These components are either separated by a dot as in
+  @{text "?x.1"} or @{text "?x7.3"} or run together as in @{text
+  "?x1"}.  The latter form is possible if the base name does not end
+  with digits.  If the index is 0, it may be dropped altogether:
+  @{text "?x"} and @{text "?x0"} and @{text "?x.0"} all refer to the
+  same unknown, with basename @{text "x"} and index 0.
+
+  The syntax of @{syntax_ref string} admits any characters, including
+  newlines; ``@{verbatim "\""}'' (double-quote) and ``@{verbatim
+  "\\"}'' (backslash) need to be escaped by a backslash; arbitrary
+  character codes may be specified as ``@{verbatim "\\"}@{text ddd}'',
+  with three decimal digits.  Alternative strings according to
+  @{syntax_ref altstring} are analogous, using single back-quotes
+  instead.
+
+  The body of @{syntax_ref verbatim} may consist of any text not
+  containing ``@{verbatim "*"}@{verbatim "}"}''; this allows
+  convenient inclusion of quotes without further escapes.  There is no
+  way to escape ``@{verbatim "*"}@{verbatim "}"}''.  If the quoted
+  text is {\LaTeX} source, one may usually add some blank or comment
+  to avoid the critical character sequence.
+
+  A @{syntax_ref cartouche} consists of arbitrary text, with properly
+  balanced blocks of ``@{verbatim "\<open>"}~@{text "\<dots>"}~@{verbatim
+  "\<close>"}''.  Note that the rendering of cartouche delimiters is
+  usually like this: ``@{text "\<open> \<dots> \<close>"}''.
+
+  Source comments take the form @{verbatim "(*"}~@{text
+  "\<dots>"}~@{verbatim "*)"} and may be nested, although the user-interface
+  might prevent this.  Note that this form indicates source comments
+  only, which are stripped after lexical analysis of the input.  The
+  Isar syntax also provides proper \emph{document comments} that are
+  considered as part of the text (see \secref{sec:comments}).
+
+  Common mathematical symbols such as @{text \<forall>} are represented in
+  Isabelle as @{verbatim \<forall>}.  There are infinitely many Isabelle
+  symbols like this, although proper presentation is left to front-end
+  tools such as {\LaTeX}, Proof~General, or Isabelle/jEdit.  A list of
+  predefined Isabelle symbols that work well with these tools is given
+  in \appref{app:symbols}.  Note that @{verbatim "\<lambda>"} does not belong
+  to the @{text letter} category, since it is already used differently
+  in the Pure term language.  *}
+
+
+section {* Common syntax entities *}
+
+text {*
+  We now introduce several basic syntactic entities, such as names,
+  terms, and theorem specifications, which are factored out of the
+  actual Isar language elements to be described later.
+*}
+
+
+subsection {* Names *}
+
+text {* Entity @{syntax name} usually refers to any name of types,
+  constants, theorems etc.\ that are to be \emph{declared} or
+  \emph{defined} (so qualified identifiers are excluded here).  Quoted
+  strings provide an escape for non-identifier names or those ruled
+  out by outer syntax keywords (e.g.\ quoted @{verbatim "\"let\""}).
+  Already existing objects are usually referenced by @{syntax
+  nameref}.
+
+  @{rail \<open>
+    @{syntax_def name}: @{syntax ident} | @{syntax symident} |
+      @{syntax string} | @{syntax nat}
+    ;
+    @{syntax_def parname}: '(' @{syntax name} ')'
+    ;
+    @{syntax_def nameref}: @{syntax name} | @{syntax longident}
+  \<close>}
+*}
+
+
+subsection {* Numbers *}
+
+text {* The outer lexical syntax (\secref{sec:outer-lex}) admits
+  natural numbers and floating point numbers.  These are combined as
+  @{syntax int} and @{syntax real} as follows.
+
+  @{rail \<open>
+    @{syntax_def int}: @{syntax nat} | '-' @{syntax nat}
+    ;
+    @{syntax_def real}: @{syntax float} | @{syntax int}
+  \<close>}
+
+  Note that there is an overlap with the category @{syntax name},
+  which also includes @{syntax nat}.
+*}
+
+
+subsection {* Comments \label{sec:comments} *}
+
+text {* Large chunks of plain @{syntax text} are usually given
+  @{syntax verbatim}, i.e.\ enclosed in @{verbatim "{"}@{verbatim
+  "*"}~@{text "\<dots>"}~@{verbatim "*"}@{verbatim "}"}.  For convenience,
+  any of the smaller text units conforming to @{syntax nameref} are
+  admitted as well.  A marginal @{syntax comment} is of the form
+  @{verbatim "--"}~@{syntax text}.  Any number of these may occur
+  within Isabelle/Isar commands.
+
+  @{rail \<open>
+    @{syntax_def text}: @{syntax verbatim} | @{syntax nameref}
+    ;
+    @{syntax_def comment}: '--' @{syntax text}
+  \<close>}
+*}
+
+
+subsection {* Type classes, sorts and arities *}
+
+text {*
+  Classes are specified by plain names.  Sorts have a very simple
+  inner syntax, which is either a single class name @{text c} or a
+  list @{text "{c\<^sub>1, \<dots>, c\<^sub>n}"} referring to the
+  intersection of these classes.  The syntax of type arities is given
+  directly at the outer level.
+
+  @{rail \<open>
+    @{syntax_def classdecl}: @{syntax name} (('<' | '\<subseteq>') (@{syntax nameref} + ','))?
+    ;
+    @{syntax_def sort}: @{syntax nameref}
+    ;
+    @{syntax_def arity}: ('(' (@{syntax sort} + ',') ')')? @{syntax sort}
+  \<close>}
+*}
+
+
+subsection {* Types and terms \label{sec:types-terms} *}
+
+text {*
+  The actual inner Isabelle syntax, that of types and terms of the
+  logic, is far too sophisticated in order to be modelled explicitly
+  at the outer theory level.  Basically, any such entity has to be
+  quoted to turn it into a single token (the parsing and type-checking
+  is performed internally later).  For convenience, a slightly more
+  liberal convention is adopted: quotes may be omitted for any type or
+  term that is already atomic at the outer level.  For example, one
+  may just write @{verbatim x} instead of quoted @{verbatim "\"x\""}.
+  Note that symbolic identifiers (e.g.\ @{verbatim "++"} or @{text
+  "\<forall>"} are available as well, provided these have not been superseded
+  by commands or other keywords already (such as @{verbatim "="} or
+  @{verbatim "+"}).
+
+  @{rail \<open>
+    @{syntax_def type}: @{syntax nameref} | @{syntax typefree} |
+      @{syntax typevar}
+    ;
+    @{syntax_def term}: @{syntax nameref} | @{syntax var}
+    ;
+    @{syntax_def prop}: @{syntax term}
+  \<close>}
+
+  Positional instantiations are indicated by giving a sequence of
+  terms, or the placeholder ``@{text _}'' (underscore), which means to
+  skip a position.
+
+  @{rail \<open>
+    @{syntax_def inst}: '_' | @{syntax term}
+    ;
+    @{syntax_def insts}: (@{syntax inst} *)
+  \<close>}
+
+  Type declarations and definitions usually refer to @{syntax
+  typespec} on the left-hand side.  This models basic type constructor
+  application at the outer syntax level.  Note that only plain postfix
+  notation is available here, but no infixes.
+
+  @{rail \<open>
+    @{syntax_def typespec}:
+      (() | @{syntax typefree} | '(' ( @{syntax typefree} + ',' ) ')') @{syntax name}
+    ;
+    @{syntax_def typespec_sorts}:
+      (() | (@{syntax typefree} ('::' @{syntax sort})?) |
+        '(' ( (@{syntax typefree} ('::' @{syntax sort})?) + ',' ) ')') @{syntax name}
+  \<close>}
+*}
+
+
+subsection {* Term patterns and declarations \label{sec:term-decls} *}
+
+text {* Wherever explicit propositions (or term fragments) occur in a
+  proof text, casual binding of schematic term variables may be given
+  specified via patterns of the form ``@{text "(\<IS> p\<^sub>1 \<dots> p\<^sub>n)"}''.
+  This works both for @{syntax term} and @{syntax prop}.
+
+  @{rail \<open>
+    @{syntax_def term_pat}: '(' (@'is' @{syntax term} +) ')'
+    ;
+    @{syntax_def prop_pat}: '(' (@'is' @{syntax prop} +) ')'
+  \<close>}
+
+  \medskip Declarations of local variables @{text "x :: \<tau>"} and
+  logical propositions @{text "a : \<phi>"} represent different views on
+  the same principle of introducing a local scope.  In practice, one
+  may usually omit the typing of @{syntax vars} (due to
+  type-inference), and the naming of propositions (due to implicit
+  references of current facts).  In any case, Isar proof elements
+  usually admit to introduce multiple such items simultaneously.
+
+  @{rail \<open>
+    @{syntax_def vars}: (@{syntax name} +) ('::' @{syntax type})?
+    ;
+    @{syntax_def props}: @{syntax thmdecl}? (@{syntax prop} @{syntax prop_pat}? +)
+  \<close>}
+
+  The treatment of multiple declarations corresponds to the
+  complementary focus of @{syntax vars} versus @{syntax props}.  In
+  ``@{text "x\<^sub>1 \<dots> x\<^sub>n :: \<tau>"}'' the typing refers to all variables, while
+  in @{text "a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} the naming refers to all propositions
+  collectively.  Isar language elements that refer to @{syntax vars}
+  or @{syntax props} typically admit separate typings or namings via
+  another level of iteration, with explicit @{keyword_ref "and"}
+  separators; e.g.\ see @{command "fix"} and @{command "assume"} in
+  \secref{sec:proof-context}.
+*}
+
+
+subsection {* Attributes and theorems \label{sec:syn-att} *}
+
+text {* Attributes have their own ``semi-inner'' syntax, in the sense
+  that input conforming to @{syntax args} below is parsed by the
+  attribute a second time.  The attribute argument specifications may
+  be any sequence of atomic entities (identifiers, strings etc.), or
+  properly bracketed argument lists.  Below @{syntax atom} refers to
+  any atomic entity, including any @{syntax keyword} conforming to
+  @{syntax symident}.
+
+  @{rail \<open>
+    @{syntax_def atom}: @{syntax nameref} | @{syntax typefree} |
+      @{syntax typevar} | @{syntax var} | @{syntax nat} | @{syntax float} |
+      @{syntax keyword} | @{syntax cartouche}
+    ;
+    arg: @{syntax atom} | '(' @{syntax args} ')' | '[' @{syntax args} ']'
+    ;
+    @{syntax_def args}: arg *
+    ;
+    @{syntax_def attributes}: '[' (@{syntax nameref} @{syntax args} * ',') ']'
+  \<close>}
+
+  Theorem specifications come in several flavors: @{syntax axmdecl}
+  and @{syntax thmdecl} usually refer to axioms, assumptions or
+  results of goal statements, while @{syntax thmdef} collects lists of
+  existing theorems.  Existing theorems are given by @{syntax thmref}
+  and @{syntax thmrefs}, the former requires an actual singleton
+  result.
+
+  There are three forms of theorem references:
+  \begin{enumerate}
+  
+  \item named facts @{text "a"},
+
+  \item selections from named facts @{text "a(i)"} or @{text "a(j - k)"},
+
+  \item literal fact propositions using @{syntax_ref altstring} syntax
+  @{verbatim "`"}@{text "\<phi>"}@{verbatim "`"} (see also method
+  @{method_ref fact}).
+
+  \end{enumerate}
+
+  Any kind of theorem specification may include lists of attributes
+  both on the left and right hand sides; attributes are applied to any
+  immediately preceding fact.  If names are omitted, the theorems are
+  not stored within the theorem database of the theory or proof
+  context, but any given attributes are applied nonetheless.
+
+  An extra pair of brackets around attributes (like ``@{text
+  "[[simproc a]]"}'') abbreviates a theorem reference involving an
+  internal dummy fact, which will be ignored later on.  So only the
+  effect of the attribute on the background context will persist.
+  This form of in-place declarations is particularly useful with
+  commands like @{command "declare"} and @{command "using"}.
+
+  @{rail \<open>
+    @{syntax_def axmdecl}: @{syntax name} @{syntax attributes}? ':'
+    ;
+    @{syntax_def thmdecl}: thmbind ':'
+    ;
+    @{syntax_def thmdef}: thmbind '='
+    ;
+    @{syntax_def thmref}:
+      (@{syntax nameref} selection? | @{syntax altstring}) @{syntax attributes}? |
+      '[' @{syntax attributes} ']'
+    ;
+    @{syntax_def thmrefs}: @{syntax thmref} +
+    ;
+
+    thmbind: @{syntax name} @{syntax attributes} | @{syntax name} | @{syntax attributes}
+    ;
+    selection: '(' ((@{syntax nat} | @{syntax nat} '-' @{syntax nat}?) + ',') ')'
+  \<close>}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/Preface.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,74 @@
+theory Preface
+imports Base Main
+begin
+
+chapter {* Preface *}
+
+text {*
+  The \emph{Isabelle} system essentially provides a generic
+  infrastructure for building deductive systems (programmed in
+  Standard ML), with a special focus on interactive theorem proving in
+  higher-order logics.  Many years ago, even end-users would refer to
+  certain ML functions (goal commands, tactics, tacticals etc.) to
+  pursue their everyday theorem proving tasks.
+  
+  In contrast \emph{Isar} provides an interpreted language environment
+  of its own, which has been specifically tailored for the needs of
+  theory and proof development.  Compared to raw ML, the Isabelle/Isar
+  top-level provides a more robust and comfortable development
+  platform, with proper support for theory development graphs, managed
+  transactions with unlimited undo etc.
+
+  In its pioneering times, the Isabelle/Isar version of the
+  \emph{Proof~General} user interface
+  \cite{proofgeneral,Aspinall:TACAS:2000} has contributed to the
+  success of for interactive theory and proof development in this
+  advanced theorem proving environment, even though it was somewhat
+  biased towards old-style proof scripts.  The more recent
+  Isabelle/jEdit Prover IDE \cite{Wenzel:2012} emphasizes the
+  document-oriented approach of Isabelle/Isar again more explicitly.
+
+  \medskip Apart from the technical advances over bare-bones ML
+  programming, the main purpose of the Isar language is to provide a
+  conceptually different view on machine-checked proofs
+  \cite{Wenzel:1999:TPHOL,Wenzel-PhD}.  \emph{Isar} stands for
+  \emph{Intelligible semi-automated reasoning}.  Drawing from both the
+  traditions of informal mathematical proof texts and high-level
+  programming languages, Isar offers a versatile environment for
+  structured formal proof documents.  Thus properly written Isar
+  proofs become accessible to a broader audience than unstructured
+  tactic scripts (which typically only provide operational information
+  for the machine).  Writing human-readable proof texts certainly
+  requires some additional efforts by the writer to achieve a good
+  presentation, both of formal and informal parts of the text.  On the
+  other hand, human-readable formal texts gain some value in their own
+  right, independently of the mechanic proof-checking process.
+
+  Despite its grand design of structured proof texts, Isar is able to
+  assimilate the old tactical style as an ``improper'' sub-language.
+  This provides an easy upgrade path for existing tactic scripts, as
+  well as some means for interactive experimentation and debugging of
+  structured proofs.  Isabelle/Isar supports a broad range of proof
+  styles, both readable and unreadable ones.
+
+  \medskip The generic Isabelle/Isar framework (see
+  \chref{ch:isar-framework}) works reasonably well for any Isabelle
+  object-logic that conforms to the natural deduction view of the
+  Isabelle/Pure framework.  Specific language elements introduced by
+  Isabelle/HOL are described in \partref{part:hol}.  Although the main
+  language elements are already provided by the Isabelle/Pure
+  framework, examples given in the generic parts will usually refer to
+  Isabelle/HOL.
+
+  \medskip Isar commands may be either \emph{proper} document
+  constructors, or \emph{improper commands}.  Some proof methods and
+  attributes introduced later are classified as improper as well.
+  Improper Isar language elements, which are marked by ``@{text
+  "\<^sup>*"}'' in the subsequent chapters; they are often helpful
+  when developing proof documents, but their use is discouraged for
+  the final human-readable outcome.  Typical examples are diagnostic
+  commands that print terms or theorems according to the current
+  context; other commands emulate old-style tactical theorem proving.
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/Proof.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,1551 @@
+theory Proof
+imports Base Main
+begin
+
+chapter {* Proofs \label{ch:proofs} *}
+
+text {*
+  Proof commands perform transitions of Isar/VM machine
+  configurations, which are block-structured, consisting of a stack of
+  nodes with three main components: logical proof context, current
+  facts, and open goals.  Isar/VM transitions are typed according to
+  the following three different modes of operation:
+
+  \begin{description}
+
+  \item @{text "proof(prove)"} means that a new goal has just been
+  stated that is now to be \emph{proven}; the next command may refine
+  it by some proof method, and enter a sub-proof to establish the
+  actual result.
+
+  \item @{text "proof(state)"} is like a nested theory mode: the
+  context may be augmented by \emph{stating} additional assumptions,
+  intermediate results etc.
+
+  \item @{text "proof(chain)"} is intermediate between @{text
+  "proof(state)"} and @{text "proof(prove)"}: existing facts (i.e.\
+  the contents of the special ``@{fact_ref this}'' register) have been
+  just picked up in order to be used when refining the goal claimed
+  next.
+
+  \end{description}
+
+  The proof mode indicator may be understood as an instruction to the
+  writer, telling what kind of operation may be performed next.  The
+  corresponding typings of proof commands restricts the shape of
+  well-formed proof texts to particular command sequences.  So dynamic
+  arrangements of commands eventually turn out as static texts of a
+  certain structure.
+
+  \Appref{ap:refcard} gives a simplified grammar of the (extensible)
+  language emerging that way from the different types of proof
+  commands.  The main ideas of the overall Isar framework are
+  explained in \chref{ch:isar-framework}.
+*}
+
+
+section {* Proof structure *}
+
+subsection {* Formal notepad *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "notepad"} & : & @{text "local_theory \<rightarrow> proof(state)"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command notepad} @'begin'
+    ;
+    @@{command end}
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "notepad"}~@{keyword "begin"} opens a proof state
+  without any goal statement.  This allows to experiment with Isar,
+  without producing any persistent result.
+
+  The notepad can be closed by @{command "end"} or discontinued by
+  @{command "oops"}.
+
+  \end{description}
+*}
+
+
+subsection {* Blocks *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "next"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "{"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "}"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+  \end{matharray}
+
+  While Isar is inherently block-structured, opening and closing
+  blocks is mostly handled rather casually, with little explicit
+  user-intervention.  Any local goal statement automatically opens
+  \emph{two} internal blocks, which are closed again when concluding
+  the sub-proof (by @{command "qed"} etc.).  Sections of different
+  context within a sub-proof may be switched via @{command "next"},
+  which is just a single block-close followed by block-open again.
+  The effect of @{command "next"} is to reset the local proof context;
+  there is no goal focus involved here!
+
+  For slightly more advanced applications, there are explicit block
+  parentheses as well.  These typically achieve a stronger forward
+  style of reasoning.
+
+  \begin{description}
+
+  \item @{command "next"} switches to a fresh block within a
+  sub-proof, resetting the local context to the initial one.
+
+  \item @{command "{"} and @{command "}"} explicitly open and close
+  blocks.  Any current facts pass through ``@{command "{"}''
+  unchanged, while ``@{command "}"}'' causes any result to be
+  \emph{exported} into the enclosing context.  Thus fixed variables
+  are generalized, assumptions discharged, and local definitions
+  unfolded (cf.\ \secref{sec:proof-context}).  There is no difference
+  of @{command "assume"} and @{command "presume"} in this mode of
+  forward reasoning --- in contrast to plain backward reasoning with
+  the result exported at @{command "show"} time.
+
+  \end{description}
+*}
+
+
+subsection {* Omitting proofs *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "oops"} & : & @{text "proof \<rightarrow> local_theory | theory"} \\
+  \end{matharray}
+
+  The @{command "oops"} command discontinues the current proof
+  attempt, while considering the partial proof text as properly
+  processed.  This is conceptually quite different from ``faking''
+  actual proofs via @{command_ref "sorry"} (see
+  \secref{sec:proof-steps}): @{command "oops"} does not observe the
+  proof structure at all, but goes back right to the theory level.
+  Furthermore, @{command "oops"} does not produce any result theorem
+  --- there is no intended claim to be able to complete the proof
+  in any way.
+
+  A typical application of @{command "oops"} is to explain Isar proofs
+  \emph{within} the system itself, in conjunction with the document
+  preparation tools of Isabelle described in \chref{ch:document-prep}.
+  Thus partial or even wrong proof attempts can be discussed in a
+  logically sound manner.  Note that the Isabelle {\LaTeX} macros can
+  be easily adapted to print something like ``@{text "\<dots>"}'' instead of
+  the keyword ``@{command "oops"}''.
+*}
+
+
+section {* Statements *}
+
+subsection {* Context elements \label{sec:proof-context} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "fix"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "assume"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "presume"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "def"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+  \end{matharray}
+
+  The logical proof context consists of fixed variables and
+  assumptions.  The former closely correspond to Skolem constants, or
+  meta-level universal quantification as provided by the Isabelle/Pure
+  logical framework.  Introducing some \emph{arbitrary, but fixed}
+  variable via ``@{command "fix"}~@{text x}'' results in a local value
+  that may be used in the subsequent proof as any other variable or
+  constant.  Furthermore, any result @{text "\<turnstile> \<phi>[x]"} exported from
+  the context will be universally closed wrt.\ @{text x} at the
+  outermost level: @{text "\<turnstile> \<And>x. \<phi>[x]"} (this is expressed in normal
+  form using Isabelle's meta-variables).
+
+  Similarly, introducing some assumption @{text \<chi>} has two effects.
+  On the one hand, a local theorem is created that may be used as a
+  fact in subsequent proof steps.  On the other hand, any result
+  @{text "\<chi> \<turnstile> \<phi>"} exported from the context becomes conditional wrt.\
+  the assumption: @{text "\<turnstile> \<chi> \<Longrightarrow> \<phi>"}.  Thus, solving an enclosing goal
+  using such a result would basically introduce a new subgoal stemming
+  from the assumption.  How this situation is handled depends on the
+  version of assumption command used: while @{command "assume"}
+  insists on solving the subgoal by unification with some premise of
+  the goal, @{command "presume"} leaves the subgoal unchanged in order
+  to be proved later by the user.
+
+  Local definitions, introduced by ``@{command "def"}~@{text "x \<equiv>
+  t"}'', are achieved by combining ``@{command "fix"}~@{text x}'' with
+  another version of assumption that causes any hypothetical equation
+  @{text "x \<equiv> t"} to be eliminated by the reflexivity rule.  Thus,
+  exporting some result @{text "x \<equiv> t \<turnstile> \<phi>[x]"} yields @{text "\<turnstile>
+  \<phi>[t]"}.
+
+  @{rail \<open>
+    @@{command fix} (@{syntax vars} + @'and')
+    ;
+    (@@{command assume} | @@{command presume}) (@{syntax props} + @'and')
+    ;
+    @@{command def} (def + @'and')
+    ;
+    def: @{syntax thmdecl}? \<newline>
+      @{syntax name} ('==' | '\<equiv>') @{syntax term} @{syntax term_pat}?
+  \<close>}
+
+  \begin{description}
+  
+  \item @{command "fix"}~@{text x} introduces a local variable @{text
+  x} that is \emph{arbitrary, but fixed.}
+  
+  \item @{command "assume"}~@{text "a: \<phi>"} and @{command
+  "presume"}~@{text "a: \<phi>"} introduce a local fact @{text "\<phi> \<turnstile> \<phi>"} by
+  assumption.  Subsequent results applied to an enclosing goal (e.g.\
+  by @{command_ref "show"}) are handled as follows: @{command
+  "assume"} expects to be able to unify with existing premises in the
+  goal, while @{command "presume"} leaves @{text \<phi>} as new subgoals.
+  
+  Several lists of assumptions may be given (separated by
+  @{keyword_ref "and"}; the resulting list of current facts consists
+  of all of these concatenated.
+  
+  \item @{command "def"}~@{text "x \<equiv> t"} introduces a local
+  (non-polymorphic) definition.  In results exported from the context,
+  @{text x} is replaced by @{text t}.  Basically, ``@{command
+  "def"}~@{text "x \<equiv> t"}'' abbreviates ``@{command "fix"}~@{text
+  x}~@{command "assume"}~@{text "x \<equiv> t"}'', with the resulting
+  hypothetical equation solved by reflexivity.
+  
+  The default name for the definitional equation is @{text x_def}.
+  Several simultaneous definitions may be given at the same time.
+
+  \end{description}
+
+  The special name @{fact_ref prems} refers to all assumptions of the
+  current context as a list of theorems.  This feature should be used
+  with great care!  It is better avoided in final proof texts.
+*}
+
+
+subsection {* Term abbreviations \label{sec:term-abbrev} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "let"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{keyword_def "is"} & : & syntax \\
+  \end{matharray}
+
+  Abbreviations may be either bound by explicit @{command
+  "let"}~@{text "p \<equiv> t"} statements, or by annotating assumptions or
+  goal statements with a list of patterns ``@{text "(\<IS> p\<^sub>1 \<dots>
+  p\<^sub>n)"}''.  In both cases, higher-order matching is invoked to
+  bind extra-logical term variables, which may be either named
+  schematic variables of the form @{text ?x}, or nameless dummies
+  ``@{variable _}'' (underscore). Note that in the @{command "let"}
+  form the patterns occur on the left-hand side, while the @{keyword
+  "is"} patterns are in postfix position.
+
+  Polymorphism of term bindings is handled in Hindley-Milner style,
+  similar to ML.  Type variables referring to local assumptions or
+  open goal statements are \emph{fixed}, while those of finished
+  results or bound by @{command "let"} may occur in \emph{arbitrary}
+  instances later.  Even though actual polymorphism should be rarely
+  used in practice, this mechanism is essential to achieve proper
+  incremental type-inference, as the user proceeds to build up the
+  Isar proof text from left to right.
+
+  \medskip Term abbreviations are quite different from local
+  definitions as introduced via @{command "def"} (see
+  \secref{sec:proof-context}).  The latter are visible within the
+  logic as actual equations, while abbreviations disappear during the
+  input process just after type checking.  Also note that @{command
+  "def"} does not support polymorphism.
+
+  @{rail \<open>
+    @@{command let} ((@{syntax term} + @'and') '=' @{syntax term} + @'and')
+  \<close>}
+
+  The syntax of @{keyword "is"} patterns follows @{syntax term_pat} or
+  @{syntax prop_pat} (see \secref{sec:term-decls}).
+
+  \begin{description}
+
+  \item @{command "let"}~@{text "p\<^sub>1 = t\<^sub>1 \<AND> \<dots> p\<^sub>n = t\<^sub>n"} binds any
+  text variables in patterns @{text "p\<^sub>1, \<dots>, p\<^sub>n"} by simultaneous
+  higher-order matching against terms @{text "t\<^sub>1, \<dots>, t\<^sub>n"}.
+
+  \item @{text "(\<IS> p\<^sub>1 \<dots> p\<^sub>n)"} resembles @{command "let"}, but
+  matches @{text "p\<^sub>1, \<dots>, p\<^sub>n"} against the preceding statement.  Also
+  note that @{keyword "is"} is not a separate command, but part of
+  others (such as @{command "assume"}, @{command "have"} etc.).
+
+  \end{description}
+
+  Some \emph{implicit} term abbreviations\index{term abbreviations}
+  for goals and facts are available as well.  For any open goal,
+  @{variable_ref thesis} refers to its object-level statement,
+  abstracted over any meta-level parameters (if present).  Likewise,
+  @{variable_ref this} is bound for fact statements resulting from
+  assumptions or finished goals.  In case @{variable this} refers to
+  an object-logic statement that is an application @{text "f t"}, then
+  @{text t} is bound to the special text variable ``@{variable "\<dots>"}''
+  (three dots).  The canonical application of this convenience are
+  calculational proofs (see \secref{sec:calculation}).
+*}
+
+
+subsection {* Facts and forward chaining \label{sec:proof-facts} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "note"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "then"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
+    @{command_def "from"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
+    @{command_def "with"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
+    @{command_def "using"} & : & @{text "proof(prove) \<rightarrow> proof(prove)"} \\
+    @{command_def "unfolding"} & : & @{text "proof(prove) \<rightarrow> proof(prove)"} \\
+  \end{matharray}
+
+  New facts are established either by assumption or proof of local
+  statements.  Any fact will usually be involved in further proofs,
+  either as explicit arguments of proof methods, or when forward
+  chaining towards the next goal via @{command "then"} (and variants);
+  @{command "from"} and @{command "with"} are composite forms
+  involving @{command "note"}.  The @{command "using"} elements
+  augments the collection of used facts \emph{after} a goal has been
+  stated.  Note that the special theorem name @{fact_ref this} refers
+  to the most recently established facts, but only \emph{before}
+  issuing a follow-up claim.
+
+  @{rail \<open>
+    @@{command note} (@{syntax thmdef}? @{syntax thmrefs} + @'and')
+    ;
+    (@@{command from} | @@{command with} | @@{command using} | @@{command unfolding})
+      (@{syntax thmrefs} + @'and')
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "note"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"} recalls existing facts
+  @{text "b\<^sub>1, \<dots>, b\<^sub>n"}, binding the result as @{text a}.  Note that
+  attributes may be involved as well, both on the left and right hand
+  sides.
+
+  \item @{command "then"} indicates forward chaining by the current
+  facts in order to establish the goal to be claimed next.  The
+  initial proof method invoked to refine that will be offered the
+  facts to do ``anything appropriate'' (see also
+  \secref{sec:proof-steps}).  For example, method @{method (Pure) rule}
+  (see \secref{sec:pure-meth-att}) would typically do an elimination
+  rather than an introduction.  Automatic methods usually insert the
+  facts into the goal state before operation.  This provides a simple
+  scheme to control relevance of facts in automated proof search.
+  
+  \item @{command "from"}~@{text b} abbreviates ``@{command
+  "note"}~@{text b}~@{command "then"}''; thus @{command "then"} is
+  equivalent to ``@{command "from"}~@{text this}''.
+  
+  \item @{command "with"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} abbreviates ``@{command
+  "from"}~@{text "b\<^sub>1 \<dots> b\<^sub>n \<AND> this"}''; thus the forward chaining
+  is from earlier facts together with the current ones.
+  
+  \item @{command "using"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} augments the facts being
+  currently indicated for use by a subsequent refinement step (such as
+  @{command_ref "apply"} or @{command_ref "proof"}).
+  
+  \item @{command "unfolding"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} is structurally
+  similar to @{command "using"}, but unfolds definitional equations
+  @{text "b\<^sub>1, \<dots> b\<^sub>n"} throughout the goal state and facts.
+
+  \end{description}
+
+  Forward chaining with an empty list of theorems is the same as not
+  chaining at all.  Thus ``@{command "from"}~@{text nothing}'' has no
+  effect apart from entering @{text "prove(chain)"} mode, since
+  @{fact_ref nothing} is bound to the empty list of theorems.
+
+  Basic proof methods (such as @{method_ref (Pure) rule}) expect multiple
+  facts to be given in their proper order, corresponding to a prefix
+  of the premises of the rule involved.  Note that positions may be
+  easily skipped using something like @{command "from"}~@{text "_
+  \<AND> a \<AND> b"}, for example.  This involves the trivial rule
+  @{text "PROP \<psi> \<Longrightarrow> PROP \<psi>"}, which is bound in Isabelle/Pure as
+  ``@{fact_ref "_"}'' (underscore).
+
+  Automated methods (such as @{method simp} or @{method auto}) just
+  insert any given facts before their usual operation.  Depending on
+  the kind of procedure involved, the order of facts is less
+  significant here.
+*}
+
+
+subsection {* Goals \label{sec:goals} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "lemma"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
+    @{command_def "theorem"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
+    @{command_def "corollary"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
+    @{command_def "schematic_lemma"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
+    @{command_def "schematic_theorem"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
+    @{command_def "schematic_corollary"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
+    @{command_def "have"} & : & @{text "proof(state) | proof(chain) \<rightarrow> proof(prove)"} \\
+    @{command_def "show"} & : & @{text "proof(state) | proof(chain) \<rightarrow> proof(prove)"} \\
+    @{command_def "hence"} & : & @{text "proof(state) \<rightarrow> proof(prove)"} \\
+    @{command_def "thus"} & : & @{text "proof(state) \<rightarrow> proof(prove)"} \\
+    @{command_def "print_statement"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+  \end{matharray}
+
+  From a theory context, proof mode is entered by an initial goal
+  command such as @{command "lemma"}, @{command "theorem"}, or
+  @{command "corollary"}.  Within a proof, new claims may be
+  introduced locally as well; four variants are available here to
+  indicate whether forward chaining of facts should be performed
+  initially (via @{command_ref "then"}), and whether the final result
+  is meant to solve some pending goal.
+
+  Goals may consist of multiple statements, resulting in a list of
+  facts eventually.  A pending multi-goal is internally represented as
+  a meta-level conjunction (@{text "&&&"}), which is usually
+  split into the corresponding number of sub-goals prior to an initial
+  method application, via @{command_ref "proof"}
+  (\secref{sec:proof-steps}) or @{command_ref "apply"}
+  (\secref{sec:tactic-commands}).  The @{method_ref induct} method
+  covered in \secref{sec:cases-induct} acts on multiple claims
+  simultaneously.
+
+  Claims at the theory level may be either in short or long form.  A
+  short goal merely consists of several simultaneous propositions
+  (often just one).  A long goal includes an explicit context
+  specification for the subsequent conclusion, involving local
+  parameters and assumptions.  Here the role of each part of the
+  statement is explicitly marked by separate keywords (see also
+  \secref{sec:locale}); the local assumptions being introduced here
+  are available as @{fact_ref assms} in the proof.  Moreover, there
+  are two kinds of conclusions: @{element_def "shows"} states several
+  simultaneous propositions (essentially a big conjunction), while
+  @{element_def "obtains"} claims several simultaneous simultaneous
+  contexts of (essentially a big disjunction of eliminated parameters
+  and assumptions, cf.\ \secref{sec:obtain}).
+
+  @{rail \<open>
+    (@@{command lemma} | @@{command theorem} | @@{command corollary} |
+     @@{command schematic_lemma} | @@{command schematic_theorem} |
+     @@{command schematic_corollary}) @{syntax target}? (goal | longgoal)
+    ;
+    (@@{command have} | @@{command show} | @@{command hence} | @@{command thus}) goal
+    ;
+    @@{command print_statement} @{syntax modes}? @{syntax thmrefs}
+    ;
+  
+    goal: (@{syntax props} + @'and')
+    ;
+    longgoal: @{syntax thmdecl}? (@{syntax_ref "includes"}?) (@{syntax context_elem} * ) conclusion
+    ;
+    conclusion: @'shows' goal | @'obtains' (@{syntax parname}? case + '|')
+    ;
+    case: (@{syntax vars} + @'and') @'where' (@{syntax props} + @'and')
+  \<close>}
+
+  \begin{description}
+  
+  \item @{command "lemma"}~@{text "a: \<phi>"} enters proof mode with
+  @{text \<phi>} as main goal, eventually resulting in some fact @{text "\<turnstile>
+  \<phi>"} to be put back into the target context.  An additional @{syntax
+  context} specification may build up an initial proof context for the
+  subsequent claim; this includes local definitions and syntax as
+  well, see also @{syntax "includes"} in \secref{sec:bundle} and
+  @{syntax context_elem} in \secref{sec:locale}.
+  
+  \item @{command "theorem"}~@{text "a: \<phi>"} and @{command
+  "corollary"}~@{text "a: \<phi>"} are essentially the same as @{command
+  "lemma"}~@{text "a: \<phi>"}, but the facts are internally marked as
+  being of a different kind.  This discrimination acts like a formal
+  comment.
+
+  \item @{command "schematic_lemma"}, @{command "schematic_theorem"},
+  @{command "schematic_corollary"} are similar to @{command "lemma"},
+  @{command "theorem"}, @{command "corollary"}, respectively but allow
+  the statement to contain unbound schematic variables.
+
+  Under normal circumstances, an Isar proof text needs to specify
+  claims explicitly.  Schematic goals are more like goals in Prolog,
+  where certain results are synthesized in the course of reasoning.
+  With schematic statements, the inherent compositionality of Isar
+  proofs is lost, which also impacts performance, because proof
+  checking is forced into sequential mode.
+  
+  \item @{command "have"}~@{text "a: \<phi>"} claims a local goal,
+  eventually resulting in a fact within the current logical context.
+  This operation is completely independent of any pending sub-goals of
+  an enclosing goal statements, so @{command "have"} may be freely
+  used for experimental exploration of potential results within a
+  proof body.
+  
+  \item @{command "show"}~@{text "a: \<phi>"} is like @{command
+  "have"}~@{text "a: \<phi>"} plus a second stage to refine some pending
+  sub-goal for each one of the finished result, after having been
+  exported into the corresponding context (at the head of the
+  sub-proof of this @{command "show"} command).
+  
+  To accommodate interactive debugging, resulting rules are printed
+  before being applied internally.  Even more, interactive execution
+  of @{command "show"} predicts potential failure and displays the
+  resulting error as a warning beforehand.  Watch out for the
+  following message:
+
+  %FIXME proper antiquitation
+  \begin{ttbox}
+  Problem! Local statement will fail to solve any pending goal
+  \end{ttbox}
+  
+  \item @{command "hence"} abbreviates ``@{command "then"}~@{command
+  "have"}'', i.e.\ claims a local goal to be proven by forward
+  chaining the current facts.  Note that @{command "hence"} is also
+  equivalent to ``@{command "from"}~@{text this}~@{command "have"}''.
+  
+  \item @{command "thus"} abbreviates ``@{command "then"}~@{command
+  "show"}''.  Note that @{command "thus"} is also equivalent to
+  ``@{command "from"}~@{text this}~@{command "show"}''.
+  
+  \item @{command "print_statement"}~@{text a} prints facts from the
+  current theory or proof context in long statement form, according to
+  the syntax for @{command "lemma"} given above.
+
+  \end{description}
+
+  Any goal statement causes some term abbreviations (such as
+  @{variable_ref "?thesis"}) to be bound automatically, see also
+  \secref{sec:term-abbrev}.
+
+  The optional case names of @{element_ref "obtains"} have a twofold
+  meaning: (1) during the of this claim they refer to the the local
+  context introductions, (2) the resulting rule is annotated
+  accordingly to support symbolic case splits when used with the
+  @{method_ref cases} method (cf.\ \secref{sec:cases-induct}).
+*}
+
+
+section {* Refinement steps *}
+
+subsection {* Proof method expressions \label{sec:proof-meth} *}
+
+text {* Proof methods are either basic ones, or expressions composed
+  of methods via ``@{verbatim ","}'' (sequential composition),
+  ``@{verbatim "|"}'' (alternative choices), ``@{verbatim "?"}''
+  (try), ``@{verbatim "+"}'' (repeat at least once), ``@{verbatim
+  "["}@{text n}@{verbatim "]"}'' (restriction to first @{text n}
+  sub-goals, with default @{text "n = 1"}).  In practice, proof
+  methods are usually just a comma separated list of @{syntax
+  nameref}~@{syntax args} specifications.  Note that parentheses may
+  be dropped for single method specifications (with no arguments).
+
+  @{rail \<open>
+    @{syntax_def method}:
+      (@{syntax nameref} | '(' methods ')') (() | '?' | '+' | '[' @{syntax nat}? ']')
+    ;
+    methods: (@{syntax nameref} @{syntax args} | @{syntax method}) + (',' | '|')
+  \<close>}
+
+  Proper Isar proof methods do \emph{not} admit arbitrary goal
+  addressing, but refer either to the first sub-goal or all sub-goals
+  uniformly.  The goal restriction operator ``@{text "[n]"}''
+  evaluates a method expression within a sandbox consisting of the
+  first @{text n} sub-goals (which need to exist).  For example, the
+  method ``@{text "simp_all[3]"}'' simplifies the first three
+  sub-goals, while ``@{text "(rule foo, simp_all)[]"}'' simplifies all
+  new goals that emerge from applying rule @{text "foo"} to the
+  originally first one.
+
+  Improper methods, notably tactic emulations, offer a separate
+  low-level goal addressing scheme as explicit argument to the
+  individual tactic being involved.  Here ``@{text "[!]"}'' refers to
+  all goals, and ``@{text "[n-]"}'' to all goals starting from @{text
+  "n"}.
+
+  @{rail \<open>
+    @{syntax_def goal_spec}:
+      '[' (@{syntax nat} '-' @{syntax nat} | @{syntax nat} '-' | @{syntax nat} | '!' ) ']'
+  \<close>}
+*}
+
+
+subsection {* Initial and terminal proof steps \label{sec:proof-steps} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "proof"} & : & @{text "proof(prove) \<rightarrow> proof(state)"} \\
+    @{command_def "qed"} & : & @{text "proof(state) \<rightarrow> proof(state) | local_theory | theory"} \\
+    @{command_def "by"} & : & @{text "proof(prove) \<rightarrow> proof(state) | local_theory | theory"} \\
+    @{command_def ".."} & : & @{text "proof(prove) \<rightarrow> proof(state) | local_theory | theory"} \\
+    @{command_def "."} & : & @{text "proof(prove) \<rightarrow> proof(state) | local_theory | theory"} \\
+    @{command_def "sorry"} & : & @{text "proof(prove) \<rightarrow> proof(state) | local_theory | theory"} \\
+  \end{matharray}
+
+  Arbitrary goal refinement via tactics is considered harmful.
+  Structured proof composition in Isar admits proof methods to be
+  invoked in two places only.
+
+  \begin{enumerate}
+
+  \item An \emph{initial} refinement step @{command_ref
+  "proof"}~@{text "m\<^sub>1"} reduces a newly stated goal to a number
+  of sub-goals that are to be solved later.  Facts are passed to
+  @{text "m\<^sub>1"} for forward chaining, if so indicated by @{text
+  "proof(chain)"} mode.
+  
+  \item A \emph{terminal} conclusion step @{command_ref "qed"}~@{text
+  "m\<^sub>2"} is intended to solve remaining goals.  No facts are
+  passed to @{text "m\<^sub>2"}.
+
+  \end{enumerate}
+
+  The only other (proper) way to affect pending goals in a proof body
+  is by @{command_ref "show"}, which involves an explicit statement of
+  what is to be solved eventually.  Thus we avoid the fundamental
+  problem of unstructured tactic scripts that consist of numerous
+  consecutive goal transformations, with invisible effects.
+
+  \medskip As a general rule of thumb for good proof style, initial
+  proof methods should either solve the goal completely, or constitute
+  some well-understood reduction to new sub-goals.  Arbitrary
+  automatic proof tools that are prone leave a large number of badly
+  structured sub-goals are no help in continuing the proof document in
+  an intelligible manner.
+
+  Unless given explicitly by the user, the default initial method is
+  @{method_ref (Pure) rule} (or its classical variant @{method_ref
+  rule}), which applies a single standard elimination or introduction
+  rule according to the topmost symbol involved.  There is no separate
+  default terminal method.  Any remaining goals are always solved by
+  assumption in the very last step.
+
+  @{rail \<open>
+    @@{command proof} method?
+    ;
+    @@{command qed} method?
+    ;
+    @@{command "by"} method method?
+    ;
+    (@@{command "."} | @@{command ".."} | @@{command sorry})
+  \<close>}
+
+  \begin{description}
+  
+  \item @{command "proof"}~@{text "m\<^sub>1"} refines the goal by proof
+  method @{text "m\<^sub>1"}; facts for forward chaining are passed if so
+  indicated by @{text "proof(chain)"} mode.
+  
+  \item @{command "qed"}~@{text "m\<^sub>2"} refines any remaining goals by
+  proof method @{text "m\<^sub>2"} and concludes the sub-proof by assumption.
+  If the goal had been @{text "show"} (or @{text "thus"}), some
+  pending sub-goal is solved as well by the rule resulting from the
+  result \emph{exported} into the enclosing goal context.  Thus @{text
+  "qed"} may fail for two reasons: either @{text "m\<^sub>2"} fails, or the
+  resulting rule does not fit to any pending goal\footnote{This
+  includes any additional ``strong'' assumptions as introduced by
+  @{command "assume"}.} of the enclosing context.  Debugging such a
+  situation might involve temporarily changing @{command "show"} into
+  @{command "have"}, or weakening the local context by replacing
+  occurrences of @{command "assume"} by @{command "presume"}.
+  
+  \item @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} is a \emph{terminal
+  proof}\index{proof!terminal}; it abbreviates @{command
+  "proof"}~@{text "m\<^sub>1"}~@{command "qed"}~@{text "m\<^sub>2"}, but with
+  backtracking across both methods.  Debugging an unsuccessful
+  @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} command can be done by expanding its
+  definition; in many cases @{command "proof"}~@{text "m\<^sub>1"} (or even
+  @{text "apply"}~@{text "m\<^sub>1"}) is already sufficient to see the
+  problem.
+
+  \item ``@{command ".."}'' is a \emph{default
+  proof}\index{proof!default}; it abbreviates @{command "by"}~@{text
+  "rule"}.
+
+  \item ``@{command "."}'' is a \emph{trivial
+  proof}\index{proof!trivial}; it abbreviates @{command "by"}~@{text
+  "this"}.
+  
+  \item @{command "sorry"} is a \emph{fake proof}\index{proof!fake}
+  pretending to solve the pending claim without further ado.  This
+  only works in interactive development, or if the @{attribute
+  quick_and_dirty} is enabled.  Facts emerging from fake
+  proofs are not the real thing.  Internally, the derivation object is
+  tainted by an oracle invocation, which may be inspected via the
+  theorem status \cite{isabelle-implementation}.
+  
+  The most important application of @{command "sorry"} is to support
+  experimentation and top-down proof development.
+
+  \end{description}
+*}
+
+
+subsection {* Fundamental methods and attributes \label{sec:pure-meth-att} *}
+
+text {*
+  The following proof methods and attributes refer to basic logical
+  operations of Isar.  Further methods and attributes are provided by
+  several generic and object-logic specific tools and packages (see
+  \chref{ch:gen-tools} and \partref{part:hol}).
+
+  \begin{matharray}{rcl}
+    @{command_def "print_rules"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\[0.5ex]
+    @{method_def "-"} & : & @{text method} \\
+    @{method_def "fact"} & : & @{text method} \\
+    @{method_def "assumption"} & : & @{text method} \\
+    @{method_def "this"} & : & @{text method} \\
+    @{method_def (Pure) "rule"} & : & @{text method} \\
+    @{attribute_def (Pure) "intro"} & : & @{text attribute} \\
+    @{attribute_def (Pure) "elim"} & : & @{text attribute} \\
+    @{attribute_def (Pure) "dest"} & : & @{text attribute} \\
+    @{attribute_def (Pure) "rule"} & : & @{text attribute} \\[0.5ex]
+    @{attribute_def "OF"} & : & @{text attribute} \\
+    @{attribute_def "of"} & : & @{text attribute} \\
+    @{attribute_def "where"} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{method fact} @{syntax thmrefs}?
+    ;
+    @@{method (Pure) rule} @{syntax thmrefs}?
+    ;
+    rulemod: ('intro' | 'elim' | 'dest')
+      ((('!' | () | '?') @{syntax nat}?) | 'del') ':' @{syntax thmrefs}
+    ;
+    (@@{attribute intro} | @@{attribute elim} | @@{attribute dest})
+      ('!' | () | '?') @{syntax nat}?
+    ;
+    @@{attribute (Pure) rule} 'del'
+    ;
+    @@{attribute OF} @{syntax thmrefs}
+    ;
+    @@{attribute of} @{syntax insts} ('concl' ':' @{syntax insts})? \<newline>
+      (@'for' (@{syntax vars} + @'and'))?
+    ;
+    @@{attribute "where"}
+      ((@{syntax name} | @{syntax var} | @{syntax typefree} | @{syntax typevar}) '='
+      (@{syntax type} | @{syntax term}) * @'and') \<newline>
+      (@'for' (@{syntax vars} + @'and'))?
+  \<close>}
+
+  \begin{description}
+  
+  \item @{command "print_rules"} prints rules declared via attributes
+  @{attribute (Pure) intro}, @{attribute (Pure) elim}, @{attribute
+  (Pure) dest} of Isabelle/Pure.
+
+  See also the analogous @{command "print_claset"} command for similar
+  rule declarations of the classical reasoner
+  (\secref{sec:classical}).
+
+  \item ``@{method "-"}'' (minus) does nothing but insert the forward
+  chaining facts as premises into the goal.  Note that command
+  @{command_ref "proof"} without any method actually performs a single
+  reduction step using the @{method_ref (Pure) rule} method; thus a plain
+  \emph{do-nothing} proof step would be ``@{command "proof"}~@{text
+  "-"}'' rather than @{command "proof"} alone.
+  
+  \item @{method "fact"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} composes some fact from
+  @{text "a\<^sub>1, \<dots>, a\<^sub>n"} (or implicitly from the current proof context)
+  modulo unification of schematic type and term variables.  The rule
+  structure is not taken into account, i.e.\ meta-level implication is
+  considered atomic.  This is the same principle underlying literal
+  facts (cf.\ \secref{sec:syn-att}): ``@{command "have"}~@{text
+  "\<phi>"}~@{command "by"}~@{text fact}'' is equivalent to ``@{command
+  "note"}~@{verbatim "`"}@{text \<phi>}@{verbatim "`"}'' provided that
+  @{text "\<turnstile> \<phi>"} is an instance of some known @{text "\<turnstile> \<phi>"} in the
+  proof context.
+  
+  \item @{method assumption} solves some goal by a single assumption
+  step.  All given facts are guaranteed to participate in the
+  refinement; this means there may be only 0 or 1 in the first place.
+  Recall that @{command "qed"} (\secref{sec:proof-steps}) already
+  concludes any remaining sub-goals by assumption, so structured
+  proofs usually need not quote the @{method assumption} method at
+  all.
+  
+  \item @{method this} applies all of the current facts directly as
+  rules.  Recall that ``@{command "."}'' (dot) abbreviates ``@{command
+  "by"}~@{text this}''.
+  
+  \item @{method (Pure) rule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} applies some rule given as
+  argument in backward manner; facts are used to reduce the rule
+  before applying it to the goal.  Thus @{method (Pure) rule} without facts
+  is plain introduction, while with facts it becomes elimination.
+  
+  When no arguments are given, the @{method (Pure) rule} method tries to pick
+  appropriate rules automatically, as declared in the current context
+  using the @{attribute (Pure) intro}, @{attribute (Pure) elim},
+  @{attribute (Pure) dest} attributes (see below).  This is the
+  default behavior of @{command "proof"} and ``@{command ".."}'' 
+  (double-dot) steps (see \secref{sec:proof-steps}).
+  
+  \item @{attribute (Pure) intro}, @{attribute (Pure) elim}, and
+  @{attribute (Pure) dest} declare introduction, elimination, and
+  destruct rules, to be used with method @{method (Pure) rule}, and similar
+  tools.  Note that the latter will ignore rules declared with
+  ``@{text "?"}'', while ``@{text "!"}''  are used most aggressively.
+  
+  The classical reasoner (see \secref{sec:classical}) introduces its
+  own variants of these attributes; use qualified names to access the
+  present versions of Isabelle/Pure, i.e.\ @{attribute (Pure)
+  "Pure.intro"}.
+  
+  \item @{attribute (Pure) rule}~@{text del} undeclares introduction,
+  elimination, or destruct rules.
+
+  \item @{attribute OF}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} applies some theorem to all
+  of the given rules @{text "a\<^sub>1, \<dots>, a\<^sub>n"} in canonical right-to-left
+  order, which means that premises stemming from the @{text "a\<^sub>i"}
+  emerge in parallel in the result, without interfering with each
+  other.  In many practical situations, the @{text "a\<^sub>i"} do not have
+  premises themselves, so @{text "rule [OF a\<^sub>1 \<dots> a\<^sub>n]"} can be actually
+  read as functional application (modulo unification).
+
+  Argument positions may be effectively skipped by using ``@{text _}''
+  (underscore), which refers to the propositional identity rule in the
+  Pure theory.
+  
+  \item @{attribute of}~@{text "t\<^sub>1 \<dots> t\<^sub>n"} performs positional
+  instantiation of term variables.  The terms @{text "t\<^sub>1, \<dots>, t\<^sub>n"} are
+  substituted for any schematic variables occurring in a theorem from
+  left to right; ``@{text _}'' (underscore) indicates to skip a
+  position.  Arguments following a ``@{text "concl:"}'' specification
+  refer to positions of the conclusion of a rule.
+
+  An optional context of local variables @{text "\<FOR> x\<^sub>1 \<dots> x\<^sub>m"} may
+  be specified: the instantiated theorem is exported, and these
+  variables become schematic (usually with some shifting of indices).
+  
+  \item @{attribute "where"}~@{text "x\<^sub>1 = t\<^sub>1 \<AND> \<dots> x\<^sub>n = t\<^sub>n"}
+  performs named instantiation of schematic type and term variables
+  occurring in a theorem.  Schematic variables have to be specified on
+  the left-hand side (e.g.\ @{text "?x1.3"}).  The question mark may
+  be omitted if the variable name is a plain identifier without index.
+  As type instantiations are inferred from term instantiations,
+  explicit type instantiations are seldom necessary.
+
+  An optional context of local variables @{text "\<FOR> x\<^sub>1 \<dots> x\<^sub>m"} may
+  be specified as for @{attribute "of"} above.
+
+  \end{description}
+*}
+
+
+subsection {* Emulating tactic scripts \label{sec:tactic-commands} *}
+
+text {*
+  The Isar provides separate commands to accommodate tactic-style
+  proof scripts within the same system.  While being outside the
+  orthodox Isar proof language, these might come in handy for
+  interactive exploration and debugging, or even actual tactical proof
+  within new-style theories (to benefit from document preparation, for
+  example).  See also \secref{sec:tactics} for actual tactics, that
+  have been encapsulated as proof methods.  Proper proof methods may
+  be used in scripts, too.
+
+  \begin{matharray}{rcl}
+    @{command_def "apply"}@{text "\<^sup>*"} & : & @{text "proof(prove) \<rightarrow> proof(prove)"} \\
+    @{command_def "apply_end"}@{text "\<^sup>*"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "done"}@{text "\<^sup>*"} & : & @{text "proof(prove) \<rightarrow> proof(state) | local_theory | theory"} \\
+    @{command_def "defer"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow> proof"} \\
+    @{command_def "prefer"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow> proof"} \\
+    @{command_def "back"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow> proof"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    ( @@{command apply} | @@{command apply_end} ) @{syntax method}
+    ;
+    @@{command defer} @{syntax nat}?
+    ;
+    @@{command prefer} @{syntax nat}
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "apply"}~@{text m} applies proof method @{text m} in
+  initial position, but unlike @{command "proof"} it retains ``@{text
+  "proof(prove)"}'' mode.  Thus consecutive method applications may be
+  given just as in tactic scripts.
+  
+  Facts are passed to @{text m} as indicated by the goal's
+  forward-chain mode, and are \emph{consumed} afterwards.  Thus any
+  further @{command "apply"} command would always work in a purely
+  backward manner.
+  
+  \item @{command "apply_end"}~@{text "m"} applies proof method @{text
+  m} as if in terminal position.  Basically, this simulates a
+  multi-step tactic script for @{command "qed"}, but may be given
+  anywhere within the proof body.
+  
+  No facts are passed to @{text m} here.  Furthermore, the static
+  context is that of the enclosing goal (as for actual @{command
+  "qed"}).  Thus the proof method may not refer to any assumptions
+  introduced in the current body, for example.
+  
+  \item @{command "done"} completes a proof script, provided that the
+  current goal state is solved completely.  Note that actual
+  structured proof commands (e.g.\ ``@{command "."}'' or @{command
+  "sorry"}) may be used to conclude proof scripts as well.
+
+  \item @{command "defer"}~@{text n} and @{command "prefer"}~@{text n}
+  shuffle the list of pending goals: @{command "defer"} puts off
+  sub-goal @{text n} to the end of the list (@{text "n = 1"} by
+  default), while @{command "prefer"} brings sub-goal @{text n} to the
+  front.
+  
+  \item @{command "back"} does back-tracking over the result sequence
+  of the latest proof command.  Any proof command may return multiple
+  results, and this command explores the possibilities step-by-step.
+  It is mainly useful for experimentation and interactive exploration,
+  and should be avoided in finished proofs.
+  
+  \end{description}
+
+  Any proper Isar proof method may be used with tactic script commands
+  such as @{command "apply"}.  A few additional emulations of actual
+  tactics are provided as well; these would be never used in actual
+  structured proofs, of course.
+*}
+
+
+subsection {* Defining proof methods *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "method_setup"} & : & @{text "theory \<rightarrow> theory"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command method_setup} @{syntax name} '=' @{syntax text} @{syntax text}?
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "method_setup"}~@{text "name = text description"}
+  defines a proof method in the current theory.  The given @{text
+  "text"} has to be an ML expression of type
+  @{ML_type "(Proof.context -> Proof.method) context_parser"}, cf.\
+  basic parsers defined in structure @{ML_structure Args} and @{ML_structure
+  Attrib}.  There are also combinators like @{ML METHOD} and @{ML
+  SIMPLE_METHOD} to turn certain tactic forms into official proof
+  methods; the primed versions refer to tactics with explicit goal
+  addressing.
+
+  Here are some example method definitions:
+
+  \end{description}
+*}
+
+  method_setup my_method1 = {*
+    Scan.succeed (K (SIMPLE_METHOD' (fn i: int => no_tac)))
+  *}  "my first method (without any arguments)"
+
+  method_setup my_method2 = {*
+    Scan.succeed (fn ctxt: Proof.context =>
+      SIMPLE_METHOD' (fn i: int => no_tac))
+  *}  "my second method (with context)"
+
+  method_setup my_method3 = {*
+    Attrib.thms >> (fn thms: thm list => fn ctxt: Proof.context =>
+      SIMPLE_METHOD' (fn i: int => no_tac))
+  *}  "my third method (with theorem arguments and context)"
+
+
+section {* Generalized elimination \label{sec:obtain} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "obtain"} & : & @{text "proof(state) | proof(chain) \<rightarrow> proof(prove)"} \\
+    @{command_def "guess"}@{text "\<^sup>*"} & : & @{text "proof(state) | proof(chain) \<rightarrow> proof(prove)"} \\
+  \end{matharray}
+
+  Generalized elimination means that additional elements with certain
+  properties may be introduced in the current context, by virtue of a
+  locally proven ``soundness statement''.  Technically speaking, the
+  @{command "obtain"} language element is like a declaration of
+  @{command "fix"} and @{command "assume"} (see also see
+  \secref{sec:proof-context}), together with a soundness proof of its
+  additional claim.  According to the nature of existential reasoning,
+  assumptions get eliminated from any result exported from the context
+  later, provided that the corresponding parameters do \emph{not}
+  occur in the conclusion.
+
+  @{rail \<open>
+    @@{command obtain} @{syntax parname}? (@{syntax vars} + @'and')
+      @'where' (@{syntax props} + @'and')
+    ;
+    @@{command guess} (@{syntax vars} + @'and')
+  \<close>}
+
+  The derived Isar command @{command "obtain"} is defined as follows
+  (where @{text "b\<^sub>1, \<dots>, b\<^sub>k"} shall refer to (optional)
+  facts indicated for forward chaining).
+  \begin{matharray}{l}
+    @{text "\<langle>using b\<^sub>1 \<dots> b\<^sub>k\<rangle>"}~~@{command "obtain"}~@{text "x\<^sub>1 \<dots> x\<^sub>m \<WHERE> a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n  \<langle>proof\<rangle> \<equiv>"} \\[1ex]
+    \quad @{command "have"}~@{text "\<And>thesis. (\<And>x\<^sub>1 \<dots> x\<^sub>m. \<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> thesis) \<Longrightarrow> thesis"} \\
+    \quad @{command "proof"}~@{method succeed} \\
+    \qquad @{command "fix"}~@{text thesis} \\
+    \qquad @{command "assume"}~@{text "that [Pure.intro?]: \<And>x\<^sub>1 \<dots> x\<^sub>m. \<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> thesis"} \\
+    \qquad @{command "then"}~@{command "show"}~@{text thesis} \\
+    \quad\qquad @{command "apply"}~@{text -} \\
+    \quad\qquad @{command "using"}~@{text "b\<^sub>1 \<dots> b\<^sub>k  \<langle>proof\<rangle>"} \\
+    \quad @{command "qed"} \\
+    \quad @{command "fix"}~@{text "x\<^sub>1 \<dots> x\<^sub>m"}~@{command "assume"}@{text "\<^sup>* a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} \\
+  \end{matharray}
+
+  Typically, the soundness proof is relatively straight-forward, often
+  just by canonical automated tools such as ``@{command "by"}~@{text
+  simp}'' or ``@{command "by"}~@{text blast}''.  Accordingly, the
+  ``@{text that}'' reduction above is declared as simplification and
+  introduction rule.
+
+  In a sense, @{command "obtain"} represents at the level of Isar
+  proofs what would be meta-logical existential quantifiers and
+  conjunctions.  This concept has a broad range of useful
+  applications, ranging from plain elimination (or introduction) of
+  object-level existential and conjunctions, to elimination over
+  results of symbolic evaluation of recursive definitions, for
+  example.  Also note that @{command "obtain"} without parameters acts
+  much like @{command "have"}, where the result is treated as a
+  genuine assumption.
+
+  An alternative name to be used instead of ``@{text that}'' above may
+  be given in parentheses.
+
+  \medskip The improper variant @{command "guess"} is similar to
+  @{command "obtain"}, but derives the obtained statement from the
+  course of reasoning!  The proof starts with a fixed goal @{text
+  thesis}.  The subsequent proof may refine this to anything of the
+  form like @{text "\<And>x\<^sub>1 \<dots> x\<^sub>m. \<phi>\<^sub>1 \<Longrightarrow> \<dots>
+  \<phi>\<^sub>n \<Longrightarrow> thesis"}, but must not introduce new subgoals.  The
+  final goal state is then used as reduction rule for the obtain
+  scheme described above.  Obtained parameters @{text "x\<^sub>1, \<dots>,
+  x\<^sub>m"} are marked as internal by default, which prevents the
+  proof context from being polluted by ad-hoc variables.  The variable
+  names and type constraints given as arguments for @{command "guess"}
+  specify a prefix of obtained parameters explicitly in the text.
+
+  It is important to note that the facts introduced by @{command
+  "obtain"} and @{command "guess"} may not be polymorphic: any
+  type-variables occurring here are fixed in the present context!
+*}
+
+
+section {* Calculational reasoning \label{sec:calculation} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "also"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "finally"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
+    @{command_def "moreover"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "ultimately"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
+    @{command_def "print_trans_rules"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{attribute trans} & : & @{text attribute} \\
+    @{attribute sym} & : & @{text attribute} \\
+    @{attribute symmetric} & : & @{text attribute} \\
+  \end{matharray}
+
+  Calculational proof is forward reasoning with implicit application
+  of transitivity rules (such those of @{text "="}, @{text "\<le>"},
+  @{text "<"}).  Isabelle/Isar maintains an auxiliary fact register
+  @{fact_ref calculation} for accumulating results obtained by
+  transitivity composed with the current result.  Command @{command
+  "also"} updates @{fact calculation} involving @{fact this}, while
+  @{command "finally"} exhibits the final @{fact calculation} by
+  forward chaining towards the next goal statement.  Both commands
+  require valid current facts, i.e.\ may occur only after commands
+  that produce theorems such as @{command "assume"}, @{command
+  "note"}, or some finished proof of @{command "have"}, @{command
+  "show"} etc.  The @{command "moreover"} and @{command "ultimately"}
+  commands are similar to @{command "also"} and @{command "finally"},
+  but only collect further results in @{fact calculation} without
+  applying any rules yet.
+
+  Also note that the implicit term abbreviation ``@{text "\<dots>"}'' has
+  its canonical application with calculational proofs.  It refers to
+  the argument of the preceding statement. (The argument of a curried
+  infix expression happens to be its right-hand side.)
+
+  Isabelle/Isar calculations are implicitly subject to block structure
+  in the sense that new threads of calculational reasoning are
+  commenced for any new block (as opened by a local goal, for
+  example).  This means that, apart from being able to nest
+  calculations, there is no separate \emph{begin-calculation} command
+  required.
+
+  \medskip The Isar calculation proof commands may be defined as
+  follows:\footnote{We suppress internal bookkeeping such as proper
+  handling of block-structure.}
+
+  \begin{matharray}{rcl}
+    @{command "also"}@{text "\<^sub>0"} & \equiv & @{command "note"}~@{text "calculation = this"} \\
+    @{command "also"}@{text "\<^sub>n+1"} & \equiv & @{command "note"}~@{text "calculation = trans [OF calculation this]"} \\[0.5ex]
+    @{command "finally"} & \equiv & @{command "also"}~@{command "from"}~@{text calculation} \\[0.5ex]
+    @{command "moreover"} & \equiv & @{command "note"}~@{text "calculation = calculation this"} \\
+    @{command "ultimately"} & \equiv & @{command "moreover"}~@{command "from"}~@{text calculation} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{command also} | @@{command finally}) ('(' @{syntax thmrefs} ')')?
+    ;
+    @@{attribute trans} (() | 'add' | 'del')
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "also"}~@{text "(a\<^sub>1 \<dots> a\<^sub>n)"} maintains the auxiliary
+  @{fact calculation} register as follows.  The first occurrence of
+  @{command "also"} in some calculational thread initializes @{fact
+  calculation} by @{fact this}. Any subsequent @{command "also"} on
+  the same level of block-structure updates @{fact calculation} by
+  some transitivity rule applied to @{fact calculation} and @{fact
+  this} (in that order).  Transitivity rules are picked from the
+  current context, unless alternative rules are given as explicit
+  arguments.
+
+  \item @{command "finally"}~@{text "(a\<^sub>1 \<dots> a\<^sub>n)"} maintaining @{fact
+  calculation} in the same way as @{command "also"}, and concludes the
+  current calculational thread.  The final result is exhibited as fact
+  for forward chaining towards the next goal. Basically, @{command
+  "finally"} just abbreviates @{command "also"}~@{command
+  "from"}~@{fact calculation}.  Typical idioms for concluding
+  calculational proofs are ``@{command "finally"}~@{command
+  "show"}~@{text ?thesis}~@{command "."}'' and ``@{command
+  "finally"}~@{command "have"}~@{text \<phi>}~@{command "."}''.
+
+  \item @{command "moreover"} and @{command "ultimately"} are
+  analogous to @{command "also"} and @{command "finally"}, but collect
+  results only, without applying rules.
+
+  \item @{command "print_trans_rules"} prints the list of transitivity
+  rules (for calculational commands @{command "also"} and @{command
+  "finally"}) and symmetry rules (for the @{attribute symmetric}
+  operation and single step elimination patters) of the current
+  context.
+
+  \item @{attribute trans} declares theorems as transitivity rules.
+
+  \item @{attribute sym} declares symmetry rules, as well as
+  @{attribute "Pure.elim"}@{text "?"} rules.
+
+  \item @{attribute symmetric} resolves a theorem with some rule
+  declared as @{attribute sym} in the current context.  For example,
+  ``@{command "assume"}~@{text "[symmetric]: x = y"}'' produces a
+  swapped fact derived from that assumption.
+
+  In structured proof texts it is often more appropriate to use an
+  explicit single-step elimination proof, such as ``@{command
+  "assume"}~@{text "x = y"}~@{command "then"}~@{command "have"}~@{text
+  "y = x"}~@{command ".."}''.
+
+  \end{description}
+*}
+
+
+section {* Proof by cases and induction \label{sec:cases-induct} *}
+
+subsection {* Rule contexts *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "case"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "print_cases"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{attribute_def case_names} & : & @{text attribute} \\
+    @{attribute_def case_conclusion} & : & @{text attribute} \\
+    @{attribute_def params} & : & @{text attribute} \\
+    @{attribute_def consumes} & : & @{text attribute} \\
+  \end{matharray}
+
+  The puristic way to build up Isar proof contexts is by explicit
+  language elements like @{command "fix"}, @{command "assume"},
+  @{command "let"} (see \secref{sec:proof-context}).  This is adequate
+  for plain natural deduction, but easily becomes unwieldy in concrete
+  verification tasks, which typically involve big induction rules with
+  several cases.
+
+  The @{command "case"} command provides a shorthand to refer to a
+  local context symbolically: certain proof methods provide an
+  environment of named ``cases'' of the form @{text "c: x\<^sub>1, \<dots>,
+  x\<^sub>m, \<phi>\<^sub>1, \<dots>, \<phi>\<^sub>n"}; the effect of ``@{command
+  "case"}~@{text c}'' is then equivalent to ``@{command "fix"}~@{text
+  "x\<^sub>1 \<dots> x\<^sub>m"}~@{command "assume"}~@{text "c: \<phi>\<^sub>1 \<dots>
+  \<phi>\<^sub>n"}''.  Term bindings may be covered as well, notably
+  @{variable ?case} for the main conclusion.
+
+  By default, the ``terminology'' @{text "x\<^sub>1, \<dots>, x\<^sub>m"} of
+  a case value is marked as hidden, i.e.\ there is no way to refer to
+  such parameters in the subsequent proof text.  After all, original
+  rule parameters stem from somewhere outside of the current proof
+  text.  By using the explicit form ``@{command "case"}~@{text "(c
+  y\<^sub>1 \<dots> y\<^sub>m)"}'' instead, the proof author is able to
+  chose local names that fit nicely into the current context.
+
+  \medskip It is important to note that proper use of @{command
+  "case"} does not provide means to peek at the current goal state,
+  which is not directly observable in Isar!  Nonetheless, goal
+  refinement commands do provide named cases @{text "goal\<^sub>i"}
+  for each subgoal @{text "i = 1, \<dots>, n"} of the resulting goal state.
+  Using this extra feature requires great care, because some bits of
+  the internal tactical machinery intrude the proof text.  In
+  particular, parameter names stemming from the left-over of automated
+  reasoning tools are usually quite unpredictable.
+
+  Under normal circumstances, the text of cases emerge from standard
+  elimination or induction rules, which in turn are derived from
+  previous theory specifications in a canonical way (say from
+  @{command "inductive"} definitions).
+
+  \medskip Proper cases are only available if both the proof method
+  and the rules involved support this.  By using appropriate
+  attributes, case names, conclusions, and parameters may be also
+  declared by hand.  Thus variant versions of rules that have been
+  derived manually become ready to use in advanced case analysis
+  later.
+
+  @{rail \<open>
+    @@{command case} (caseref | '(' caseref (('_' | @{syntax name}) *) ')')
+    ;
+    caseref: nameref attributes?
+    ;
+
+    @@{attribute case_names} ((@{syntax name} ( '[' (('_' | @{syntax name}) +) ']' ) ? ) +)
+    ;
+    @@{attribute case_conclusion} @{syntax name} (@{syntax name} * )
+    ;
+    @@{attribute params} ((@{syntax name} * ) + @'and')
+    ;
+    @@{attribute consumes} @{syntax int}?
+  \<close>}
+
+  \begin{description}
+  
+  \item @{command "case"}~@{text "(c x\<^sub>1 \<dots> x\<^sub>m)"} invokes a named local
+  context @{text "c: x\<^sub>1, \<dots>, x\<^sub>m, \<phi>\<^sub>1, \<dots>, \<phi>\<^sub>m"}, as provided by an
+  appropriate proof method (such as @{method_ref cases} and
+  @{method_ref induct}).  The command ``@{command "case"}~@{text "(c
+  x\<^sub>1 \<dots> x\<^sub>m)"}'' abbreviates ``@{command "fix"}~@{text "x\<^sub>1 \<dots>
+  x\<^sub>m"}~@{command "assume"}~@{text "c: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}''.
+
+  \item @{command "print_cases"} prints all local contexts of the
+  current state, using Isar proof language notation.
+  
+  \item @{attribute case_names}~@{text "c\<^sub>1 \<dots> c\<^sub>k"} declares names for
+  the local contexts of premises of a theorem; @{text "c\<^sub>1, \<dots>, c\<^sub>k"}
+  refers to the \emph{prefix} of the list of premises. Each of the
+  cases @{text "c\<^sub>i"} can be of the form @{text "c[h\<^sub>1 \<dots> h\<^sub>n]"} where
+  the @{text "h\<^sub>1 \<dots> h\<^sub>n"} are the names of the hypotheses in case @{text "c\<^sub>i"}
+  from left to right.
+  
+  \item @{attribute case_conclusion}~@{text "c d\<^sub>1 \<dots> d\<^sub>k"} declares
+  names for the conclusions of a named premise @{text c}; here @{text
+  "d\<^sub>1, \<dots>, d\<^sub>k"} refers to the prefix of arguments of a logical formula
+  built by nesting a binary connective (e.g.\ @{text "\<or>"}).
+  
+  Note that proof methods such as @{method induct} and @{method
+  coinduct} already provide a default name for the conclusion as a
+  whole.  The need to name subformulas only arises with cases that
+  split into several sub-cases, as in common co-induction rules.
+
+  \item @{attribute params}~@{text "p\<^sub>1 \<dots> p\<^sub>m \<AND> \<dots> q\<^sub>1 \<dots> q\<^sub>n"} renames
+  the innermost parameters of premises @{text "1, \<dots>, n"} of some
+  theorem.  An empty list of names may be given to skip positions,
+  leaving the present parameters unchanged.
+  
+  Note that the default usage of case rules does \emph{not} directly
+  expose parameters to the proof context.
+  
+  \item @{attribute consumes}~@{text n} declares the number of ``major
+  premises'' of a rule, i.e.\ the number of facts to be consumed when
+  it is applied by an appropriate proof method.  The default value of
+  @{attribute consumes} is @{text "n = 1"}, which is appropriate for
+  the usual kind of cases and induction rules for inductive sets (cf.\
+  \secref{sec:hol-inductive}).  Rules without any @{attribute
+  consumes} declaration given are treated as if @{attribute
+  consumes}~@{text 0} had been specified.
+
+  A negative @{text n} is interpreted relatively to the total number
+  of premises of the rule in the target context.  Thus its absolute
+  value specifies the remaining number of premises, after subtracting
+  the prefix of major premises as indicated above. This form of
+  declaration has the technical advantage of being stable under more
+  morphisms, notably those that export the result from a nested
+  @{command_ref context} with additional assumptions.
+
+  Note that explicit @{attribute consumes} declarations are only
+  rarely needed; this is already taken care of automatically by the
+  higher-level @{attribute cases}, @{attribute induct}, and
+  @{attribute coinduct} declarations.
+
+  \end{description}
+*}
+
+
+subsection {* Proof methods *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def cases} & : & @{text method} \\
+    @{method_def induct} & : & @{text method} \\
+    @{method_def induction} & : & @{text method} \\
+    @{method_def coinduct} & : & @{text method} \\
+  \end{matharray}
+
+  The @{method cases}, @{method induct}, @{method induction},
+  and @{method coinduct}
+  methods provide a uniform interface to common proof techniques over
+  datatypes, inductive predicates (or sets), recursive functions etc.
+  The corresponding rules may be specified and instantiated in a
+  casual manner.  Furthermore, these methods provide named local
+  contexts that may be invoked via the @{command "case"} proof command
+  within the subsequent proof text.  This accommodates compact proof
+  texts even when reasoning about large specifications.
+
+  The @{method induct} method also provides some additional
+  infrastructure in order to be applicable to structure statements
+  (either using explicit meta-level connectives, or including facts
+  and parameters separately).  This avoids cumbersome encoding of
+  ``strengthened'' inductive statements within the object-logic.
+
+  Method @{method induction} differs from @{method induct} only in
+  the names of the facts in the local context invoked by the @{command "case"}
+  command.
+
+  @{rail \<open>
+    @@{method cases} ('(' 'no_simp' ')')? \<newline>
+      (@{syntax insts} * @'and') rule?
+    ;
+    (@@{method induct} | @@{method induction})
+      ('(' 'no_simp' ')')? (definsts * @'and') \<newline> arbitrary? taking? rule?
+    ;
+    @@{method coinduct} @{syntax insts} taking rule?
+    ;
+
+    rule: ('type' | 'pred' | 'set') ':' (@{syntax nameref} +) | 'rule' ':' (@{syntax thmref} +)
+    ;
+    definst: @{syntax name} ('==' | '\<equiv>') @{syntax term} | '(' @{syntax term} ')' | @{syntax inst}
+    ;
+    definsts: ( definst * )
+    ;
+    arbitrary: 'arbitrary' ':' ((@{syntax term} * ) @'and' +)
+    ;
+    taking: 'taking' ':' @{syntax insts}
+  \<close>}
+
+  \begin{description}
+
+  \item @{method cases}~@{text "insts R"} applies method @{method
+  rule} with an appropriate case distinction theorem, instantiated to
+  the subjects @{text insts}.  Symbolic case names are bound according
+  to the rule's local contexts.
+
+  The rule is determined as follows, according to the facts and
+  arguments passed to the @{method cases} method:
+
+  \medskip
+  \begin{tabular}{llll}
+    facts           &                 & arguments   & rule \\\hline
+                    & @{method cases} &             & classical case split \\
+                    & @{method cases} & @{text t}   & datatype exhaustion (type of @{text t}) \\
+    @{text "\<turnstile> A t"} & @{method cases} & @{text "\<dots>"} & inductive predicate/set elimination (of @{text A}) \\
+    @{text "\<dots>"}     & @{method cases} & @{text "\<dots> rule: R"} & explicit rule @{text R} \\
+  \end{tabular}
+  \medskip
+
+  Several instantiations may be given, referring to the \emph{suffix}
+  of premises of the case rule; within each premise, the \emph{prefix}
+  of variables is instantiated.  In most situations, only a single
+  term needs to be specified; this refers to the first variable of the
+  last premise (it is usually the same for all cases).  The @{text
+  "(no_simp)"} option can be used to disable pre-simplification of
+  cases (see the description of @{method induct} below for details).
+
+  \item @{method induct}~@{text "insts R"} and
+  @{method induction}~@{text "insts R"} are analogous to the
+  @{method cases} method, but refer to induction rules, which are
+  determined as follows:
+
+  \medskip
+  \begin{tabular}{llll}
+    facts           &                  & arguments            & rule \\\hline
+                    & @{method induct} & @{text "P x"}        & datatype induction (type of @{text x}) \\
+    @{text "\<turnstile> A x"} & @{method induct} & @{text "\<dots>"}          & predicate/set induction (of @{text A}) \\
+    @{text "\<dots>"}     & @{method induct} & @{text "\<dots> rule: R"} & explicit rule @{text R} \\
+  \end{tabular}
+  \medskip
+  
+  Several instantiations may be given, each referring to some part of
+  a mutual inductive definition or datatype --- only related partial
+  induction rules may be used together, though.  Any of the lists of
+  terms @{text "P, x, \<dots>"} refers to the \emph{suffix} of variables
+  present in the induction rule.  This enables the writer to specify
+  only induction variables, or both predicates and variables, for
+  example.
+
+  Instantiations may be definitional: equations @{text "x \<equiv> t"}
+  introduce local definitions, which are inserted into the claim and
+  discharged after applying the induction rule.  Equalities reappear
+  in the inductive cases, but have been transformed according to the
+  induction principle being involved here.  In order to achieve
+  practically useful induction hypotheses, some variables occurring in
+  @{text t} need to be fixed (see below).  Instantiations of the form
+  @{text t}, where @{text t} is not a variable, are taken as a
+  shorthand for \mbox{@{text "x \<equiv> t"}}, where @{text x} is a fresh
+  variable. If this is not intended, @{text t} has to be enclosed in
+  parentheses.  By default, the equalities generated by definitional
+  instantiations are pre-simplified using a specific set of rules,
+  usually consisting of distinctness and injectivity theorems for
+  datatypes. This pre-simplification may cause some of the parameters
+  of an inductive case to disappear, or may even completely delete
+  some of the inductive cases, if one of the equalities occurring in
+  their premises can be simplified to @{text False}.  The @{text
+  "(no_simp)"} option can be used to disable pre-simplification.
+  Additional rules to be used in pre-simplification can be declared
+  using the @{attribute_def induct_simp} attribute.
+
+  The optional ``@{text "arbitrary: x\<^sub>1 \<dots> x\<^sub>m"}''
+  specification generalizes variables @{text "x\<^sub>1, \<dots>,
+  x\<^sub>m"} of the original goal before applying induction.  One can
+  separate variables by ``@{text "and"}'' to generalize them in other
+  goals then the first. Thus induction hypotheses may become
+  sufficiently general to get the proof through.  Together with
+  definitional instantiations, one may effectively perform induction
+  over expressions of a certain structure.
+  
+  The optional ``@{text "taking: t\<^sub>1 \<dots> t\<^sub>n"}''
+  specification provides additional instantiations of a prefix of
+  pending variables in the rule.  Such schematic induction rules
+  rarely occur in practice, though.
+
+  \item @{method coinduct}~@{text "inst R"} is analogous to the
+  @{method induct} method, but refers to coinduction rules, which are
+  determined as follows:
+
+  \medskip
+  \begin{tabular}{llll}
+    goal          &                    & arguments & rule \\\hline
+                  & @{method coinduct} & @{text x} & type coinduction (type of @{text x}) \\
+    @{text "A x"} & @{method coinduct} & @{text "\<dots>"} & predicate/set coinduction (of @{text A}) \\
+    @{text "\<dots>"}   & @{method coinduct} & @{text "\<dots> rule: R"} & explicit rule @{text R} \\
+  \end{tabular}
+  
+  Coinduction is the dual of induction.  Induction essentially
+  eliminates @{text "A x"} towards a generic result @{text "P x"},
+  while coinduction introduces @{text "A x"} starting with @{text "B
+  x"}, for a suitable ``bisimulation'' @{text B}.  The cases of a
+  coinduct rule are typically named after the predicates or sets being
+  covered, while the conclusions consist of several alternatives being
+  named after the individual destructor patterns.
+  
+  The given instantiation refers to the \emph{suffix} of variables
+  occurring in the rule's major premise, or conclusion if unavailable.
+  An additional ``@{text "taking: t\<^sub>1 \<dots> t\<^sub>n"}''
+  specification may be required in order to specify the bisimulation
+  to be used in the coinduction step.
+
+  \end{description}
+
+  Above methods produce named local contexts, as determined by the
+  instantiated rule as given in the text.  Beyond that, the @{method
+  induct} and @{method coinduct} methods guess further instantiations
+  from the goal specification itself.  Any persisting unresolved
+  schematic variables of the resulting rule will render the the
+  corresponding case invalid.  The term binding @{variable ?case} for
+  the conclusion will be provided with each case, provided that term
+  is fully specified.
+
+  The @{command "print_cases"} command prints all named cases present
+  in the current proof state.
+
+  \medskip Despite the additional infrastructure, both @{method cases}
+  and @{method coinduct} merely apply a certain rule, after
+  instantiation, while conforming due to the usual way of monotonic
+  natural deduction: the context of a structured statement @{text
+  "\<And>x\<^sub>1 \<dots> x\<^sub>m. \<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> \<dots>"}
+  reappears unchanged after the case split.
+
+  The @{method induct} method is fundamentally different in this
+  respect: the meta-level structure is passed through the
+  ``recursive'' course involved in the induction.  Thus the original
+  statement is basically replaced by separate copies, corresponding to
+  the induction hypotheses and conclusion; the original goal context
+  is no longer available.  Thus local assumptions, fixed parameters
+  and definitions effectively participate in the inductive rephrasing
+  of the original statement.
+
+  In @{method induct} proofs, local assumptions introduced by cases are split
+  into two different kinds: @{text hyps} stemming from the rule and
+  @{text prems} from the goal statement.  This is reflected in the
+  extracted cases accordingly, so invoking ``@{command "case"}~@{text
+  c}'' will provide separate facts @{text c.hyps} and @{text c.prems},
+  as well as fact @{text c} to hold the all-inclusive list.
+
+  In @{method induction} proofs, local assumptions introduced by cases are
+  split into three different kinds: @{text IH}, the induction hypotheses,
+  @{text hyps}, the remaining hypotheses stemming from the rule, and
+  @{text prems}, the assumptions from the goal statement. The names are
+  @{text c.IH}, @{text c.hyps} and @{text c.prems}, as above.
+
+
+  \medskip Facts presented to either method are consumed according to
+  the number of ``major premises'' of the rule involved, which is
+  usually 0 for plain cases and induction rules of datatypes etc.\ and
+  1 for rules of inductive predicates or sets and the like.  The
+  remaining facts are inserted into the goal verbatim before the
+  actual @{text cases}, @{text induct}, or @{text coinduct} rule is
+  applied.
+*}
+
+
+subsection {* Declaring rules *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "print_induct_rules"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{attribute_def cases} & : & @{text attribute} \\
+    @{attribute_def induct} & : & @{text attribute} \\
+    @{attribute_def coinduct} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{attribute cases} spec
+    ;
+    @@{attribute induct} spec
+    ;
+    @@{attribute coinduct} spec
+    ;
+
+    spec: (('type' | 'pred' | 'set') ':' @{syntax nameref}) | 'del'
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "print_induct_rules"} prints cases and induct rules
+  for predicates (or sets) and types of the current context.
+
+  \item @{attribute cases}, @{attribute induct}, and @{attribute
+  coinduct} (as attributes) declare rules for reasoning about
+  (co)inductive predicates (or sets) and types, using the
+  corresponding methods of the same name.  Certain definitional
+  packages of object-logics usually declare emerging cases and
+  induction rules as expected, so users rarely need to intervene.
+
+  Rules may be deleted via the @{text "del"} specification, which
+  covers all of the @{text "type"}/@{text "pred"}/@{text "set"}
+  sub-categories simultaneously.  For example, @{attribute
+  cases}~@{text del} removes any @{attribute cases} rules declared for
+  some type, predicate, or set.
+  
+  Manual rule declarations usually refer to the @{attribute
+  case_names} and @{attribute params} attributes to adjust names of
+  cases and parameters of a rule; the @{attribute consumes}
+  declaration is taken care of automatically: @{attribute
+  consumes}~@{text 0} is specified for ``type'' rules and @{attribute
+  consumes}~@{text 1} for ``predicate'' / ``set'' rules.
+
+  \end{description}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/Quick_Reference.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,231 @@
+theory Quick_Reference
+imports Base Main
+begin
+
+chapter {* Isabelle/Isar quick reference \label{ap:refcard} *}
+
+section {* Proof commands *}
+
+subsection {* Primitives and basic syntax *}
+
+text {*
+  \begin{tabular}{ll}
+    @{command "fix"}~@{text x} & augment context by @{text "\<And>x. \<box>"} \\
+    @{command "assume"}~@{text "a: \<phi>"} & augment context by @{text "\<phi> \<Longrightarrow> \<box>"} \\
+    @{command "then"} & indicate forward chaining of facts \\
+    @{command "have"}~@{text "a: \<phi>"} & prove local result \\
+    @{command "show"}~@{text "a: \<phi>"} & prove local result, refining some goal \\
+    @{command "using"}~@{text a} & indicate use of additional facts \\
+    @{command "unfolding"}~@{text a} & unfold definitional equations \\
+    @{command "proof"}~@{text "m\<^sub>1"}~\dots~@{command "qed"}~@{text "m\<^sub>2"} & indicate proof structure and refinements \\
+    @{command "{"}~@{text "\<dots>"}~@{command "}"} & indicate explicit blocks \\
+    @{command "next"} & switch blocks \\
+    @{command "note"}~@{text "a = b"} & reconsider facts \\
+    @{command "let"}~@{text "p = t"} & abbreviate terms by higher-order matching \\
+    @{command "write"}~@{text "c  (mx)"} & declare local mixfix syntax \\
+  \end{tabular}
+
+  \medskip
+
+  \begin{tabular}{rcl}
+    @{text "proof"} & = & @{text "prfx\<^sup>*"}~@{command "proof"}~@{text "method\<^sup>? stmt\<^sup>*"}~@{command "qed"}~@{text "method\<^sup>?"} \\
+    & @{text "|"} & @{text "prfx\<^sup>*"}~@{command "done"} \\
+    @{text prfx} & = & @{command "apply"}~@{text method} \\
+    & @{text "|"} & @{command "using"}~@{text "facts"} \\
+    & @{text "|"} & @{command "unfolding"}~@{text "facts"} \\
+    @{text stmt} & = & @{command "{"}~@{text "stmt\<^sup>*"}~@{command "}"} \\
+    & @{text "|"} & @{command "next"} \\
+    & @{text "|"} & @{command "note"}~@{text "name = facts"} \\
+    & @{text "|"} & @{command "let"}~@{text "term = term"} \\
+    & @{text "|"} & @{command "write"}~@{text "name (mixfix)"} \\
+    & @{text "|"} & @{command "fix"}~@{text "var\<^sup>+"} \\
+    & @{text "|"} & @{command "assume"}~@{text "name: props"} \\
+    & @{text "|"} & @{command "then"}@{text "\<^sup>?"}~@{text goal} \\
+    @{text goal} & = & @{command "have"}~@{text "name: props proof"} \\
+    & @{text "|"} & @{command "show"}~@{text "name: props proof"} \\
+  \end{tabular}
+*}
+
+
+subsection {* Abbreviations and synonyms *}
+
+text {*
+  \begin{tabular}{rcl}
+    @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} & @{text "\<equiv>"} &
+      @{command "proof"}~@{text "m\<^sub>1"}~@{command "qed"}~@{text "m\<^sub>2"} \\
+    @{command ".."} & @{text "\<equiv>"} & @{command "by"}~@{text rule} \\
+    @{command "."} & @{text "\<equiv>"} & @{command "by"}~@{text this} \\
+    @{command "hence"} & @{text "\<equiv>"} & @{command "then"}~@{command "have"} \\
+    @{command "thus"} & @{text "\<equiv>"} & @{command "then"}~@{command "show"} \\
+    @{command "from"}~@{text a} & @{text "\<equiv>"} & @{command "note"}~@{text a}~@{command "then"} \\
+    @{command "with"}~@{text a} & @{text "\<equiv>"} & @{command "from"}~@{text "a \<AND> this"} \\
+    @{command "from"}~@{text this} & @{text "\<equiv>"} & @{command "then"} \\
+    @{command "from"}~@{text this}~@{command "have"} & @{text "\<equiv>"} & @{command "hence"} \\
+    @{command "from"}~@{text this}~@{command "show"} & @{text "\<equiv>"} & @{command "thus"} \\
+  \end{tabular}
+*}
+
+
+subsection {* Derived elements *}
+
+text {*
+  \begin{tabular}{rcl}
+    @{command "also"}@{text "\<^sub>0"} & @{text "\<approx>"} &
+      @{command "note"}~@{text "calculation = this"} \\
+    @{command "also"}@{text "\<^sub>n\<^sub>+\<^sub>1"} & @{text "\<approx>"} &
+      @{command "note"}~@{text "calculation = trans [OF calculation this]"} \\
+    @{command "finally"} & @{text "\<approx>"} &
+      @{command "also"}~@{command "from"}~@{text calculation} \\[0.5ex]
+    @{command "moreover"} & @{text "\<approx>"} &
+      @{command "note"}~@{text "calculation = calculation this"} \\
+    @{command "ultimately"} & @{text "\<approx>"} &
+      @{command "moreover"}~@{command "from"}~@{text calculation} \\[0.5ex]
+    @{command "presume"}~@{text "a: \<phi>"} & @{text "\<approx>"} &
+      @{command "assume"}~@{text "a: \<phi>"} \\
+    @{command "def"}~@{text "a: x \<equiv> t"} & @{text "\<approx>"} &
+      @{command "fix"}~@{text x}~@{command "assume"}~@{text "a: x \<equiv> t"} \\
+    @{command "obtain"}~@{text "x \<WHERE> a: \<phi>"} & @{text "\<approx>"} &
+      @{text "\<dots>"}~@{command "fix"}~@{text x}~@{command "assume"}~@{text "a: \<phi>"} \\
+    @{command "case"}~@{text c} & @{text "\<approx>"} &
+      @{command "fix"}~@{text x}~@{command "assume"}~@{text "c: \<phi>"} \\
+    @{command "sorry"} & @{text "\<approx>"} &
+      @{command "by"}~@{text cheating} \\
+  \end{tabular}
+*}
+
+
+subsection {* Diagnostic commands *}
+
+text {*
+  \begin{tabular}{ll}
+    @{command "print_state"} & print current state \\
+    @{command "thm"}~@{text a} & print fact \\
+    @{command "prop"}~@{text \<phi>} & print proposition \\
+    @{command "term"}~@{text t} & print term \\
+    @{command "typ"}~@{text \<tau>} & print type \\
+  \end{tabular}
+*}
+
+
+section {* Proof methods *}
+
+text {*
+  \begin{tabular}{ll}
+    \multicolumn{2}{l}{\textbf{Single steps (forward-chaining facts)}} \\[0.5ex]
+    @{method assumption} & apply some assumption \\
+    @{method this} & apply current facts \\
+    @{method rule}~@{text a} & apply some rule  \\
+    @{method rule} & apply standard rule (default for @{command "proof"}) \\
+    @{method contradiction} & apply @{text "\<not>"} elimination rule (any order) \\
+    @{method cases}~@{text t} & case analysis (provides cases) \\
+    @{method induct}~@{text x} & proof by induction (provides cases) \\[2ex]
+
+    \multicolumn{2}{l}{\textbf{Repeated steps (inserting facts)}} \\[0.5ex]
+    @{method "-"} & no rules \\
+    @{method intro}~@{text a} & introduction rules \\
+    @{method intro_classes} & class introduction rules \\
+    @{method elim}~@{text a} & elimination rules \\
+    @{method unfold}~@{text a} & definitional rewrite rules \\[2ex]
+
+    \multicolumn{2}{l}{\textbf{Automated proof tools (inserting facts)}} \\[0.5ex]
+    @{method iprover} & intuitionistic proof search \\
+    @{method blast}, @{method fast} & Classical Reasoner \\
+    @{method simp}, @{method simp_all} & Simplifier (+ Splitter) \\
+    @{method auto}, @{method force} & Simplifier + Classical Reasoner \\
+    @{method arith} & Arithmetic procedures \\
+  \end{tabular}
+*}
+
+
+section {* Attributes *}
+
+text {*
+  \begin{tabular}{ll}
+    \multicolumn{2}{l}{\textbf{Rules}} \\[0.5ex]
+    @{attribute OF}~@{text a} & rule resolved with facts (skipping ``@{text _}'') \\
+    @{attribute of}~@{text t} & rule instantiated with terms (skipping ``@{text _}'') \\
+    @{attribute "where"}~@{text "x = t"} & rule instantiated with terms, by variable name \\
+    @{attribute symmetric} & resolution with symmetry rule \\
+    @{attribute THEN}~@{text b} & resolution with another rule \\
+    @{attribute rule_format} & result put into standard rule format \\
+    @{attribute elim_format} & destruct rule turned into elimination rule format \\[1ex]
+
+    \multicolumn{2}{l}{\textbf{Declarations}} \\[0.5ex]
+    @{attribute simp} & Simplifier rule \\
+    @{attribute intro}, @{attribute elim}, @{attribute dest} & Pure or Classical Reasoner rule \\
+    @{attribute iff} & Simplifier + Classical Reasoner rule \\
+    @{attribute split} & case split rule \\
+    @{attribute trans} & transitivity rule \\
+    @{attribute sym} & symmetry rule \\
+  \end{tabular}
+*}
+
+
+section {* Rule declarations and methods *}
+
+text {*
+  \begin{tabular}{l|lllll}
+      & @{method rule} & @{method iprover} & @{method blast} & @{method simp} & @{method auto} \\
+      &                &                   & @{method fast} & @{method simp_all} & @{method force} \\
+    \hline
+    @{attribute Pure.elim}@{text "!"} @{attribute Pure.intro}@{text "!"}
+      & @{text "\<times>"}    & @{text "\<times>"} \\
+    @{attribute Pure.elim} @{attribute Pure.intro}
+      & @{text "\<times>"}    & @{text "\<times>"} \\
+    @{attribute elim}@{text "!"} @{attribute intro}@{text "!"}
+      & @{text "\<times>"}    &                    & @{text "\<times>"}          &                     & @{text "\<times>"} \\
+    @{attribute elim} @{attribute intro}
+      & @{text "\<times>"}    &                    & @{text "\<times>"}          &                     & @{text "\<times>"} \\
+    @{attribute iff}
+      & @{text "\<times>"}    &                    & @{text "\<times>"}          & @{text "\<times>"}         & @{text "\<times>"} \\
+    @{attribute iff}@{text "?"}
+      & @{text "\<times>"} \\
+    @{attribute elim}@{text "?"} @{attribute intro}@{text "?"}
+      & @{text "\<times>"} \\
+    @{attribute simp}
+      &                &                    &                      & @{text "\<times>"}         & @{text "\<times>"} \\
+    @{attribute cong}
+      &                &                    &                      & @{text "\<times>"}         & @{text "\<times>"} \\
+    @{attribute split}
+      &                &                    &                      & @{text "\<times>"}         & @{text "\<times>"} \\
+  \end{tabular}
+*}
+
+
+section {* Emulating tactic scripts *}
+
+subsection {* Commands *}
+
+text {*
+  \begin{tabular}{ll}
+    @{command "apply"}~@{text m} & apply proof method at initial position \\
+    @{command "apply_end"}~@{text m} & apply proof method near terminal position \\
+    @{command "done"} & complete proof \\
+    @{command "defer"}~@{text n} & move subgoal to end \\
+    @{command "prefer"}~@{text n} & move subgoal to beginning \\
+    @{command "back"} & backtrack last command \\
+  \end{tabular}
+*}
+
+
+subsection {* Methods *}
+
+text {*
+  \begin{tabular}{ll}
+    @{method rule_tac}~@{text insts} & resolution (with instantiation) \\
+    @{method erule_tac}~@{text insts} & elim-resolution (with instantiation) \\
+    @{method drule_tac}~@{text insts} & destruct-resolution (with instantiation) \\
+    @{method frule_tac}~@{text insts} & forward-resolution (with instantiation) \\
+    @{method cut_tac}~@{text insts} & insert facts (with instantiation) \\
+    @{method thin_tac}~@{text \<phi>} & delete assumptions \\
+    @{method subgoal_tac}~@{text \<phi>} & new claims \\
+    @{method rename_tac}~@{text x} & rename innermost goal parameters \\
+    @{method rotate_tac}~@{text n} & rotate assumptions of goal \\
+    @{method tactic}~@{text "text"} & arbitrary ML tactic \\
+    @{method case_tac}~@{text t} & exhaustion (datatypes) \\
+    @{method induct_tac}~@{text x} & induction (datatypes) \\
+    @{method ind_cases}~@{text t} & exhaustion + simplification (inductive predicates) \\
+  \end{tabular}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/Spec.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,1347 @@
+theory Spec
+imports Base Main
+begin
+
+chapter {* Specifications *}
+
+text {*
+  The Isabelle/Isar theory format integrates specifications and
+  proofs, supporting interactive development with unlimited undo
+  operation.  There is an integrated document preparation system (see
+  \chref{ch:document-prep}), for typesetting formal developments
+  together with informal text.  The resulting hyper-linked PDF
+  documents can be used both for WWW presentation and printed copies.
+
+  The Isar proof language (see \chref{ch:proofs}) is embedded into the
+  theory language as a proper sub-language.  Proof mode is entered by
+  stating some @{command theorem} or @{command lemma} at the theory
+  level, and left again with the final conclusion (e.g.\ via @{command
+  qed}).  Some theory specification mechanisms also require a proof,
+  such as @{command typedef} in HOL, which demands non-emptiness of
+  the representing sets.
+*}
+
+
+section {* Defining theories \label{sec:begin-thy} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "theory"} & : & @{text "toplevel \<rightarrow> theory"} \\
+    @{command_def (global) "end"} & : & @{text "theory \<rightarrow> toplevel"} \\
+  \end{matharray}
+
+  Isabelle/Isar theories are defined via theory files, which may
+  contain both specifications and proofs; occasionally definitional
+  mechanisms also require some explicit proof.  The theory body may be
+  sub-structured by means of \emph{local theory targets}, such as
+  @{command "locale"} and @{command "class"}.
+
+  The first proper command of a theory is @{command "theory"}, which
+  indicates imports of previous theories and optional dependencies on
+  other source files (usually in ML).  Just preceding the initial
+  @{command "theory"} command there may be an optional @{command
+  "header"} declaration, which is only relevant to document
+  preparation: see also the other section markup commands in
+  \secref{sec:markup}.
+
+  A theory is concluded by a final @{command (global) "end"} command,
+  one that does not belong to a local theory target.  No further
+  commands may follow such a global @{command (global) "end"},
+  although some user-interfaces might pretend that trailing input is
+  admissible.
+
+  @{rail \<open>
+    @@{command theory} @{syntax name} imports keywords? \<newline> @'begin'
+    ;
+    imports: @'imports' (@{syntax name} +)
+    ;
+    keywords: @'keywords' (keyword_decls + @'and')
+    ;
+    keyword_decls: (@{syntax string} +)
+      ('::' @{syntax name} @{syntax tags})? ('==' @{syntax name})?
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "theory"}~@{text "A \<IMPORTS> B\<^sub>1 \<dots> B\<^sub>n \<BEGIN>"}
+  starts a new theory @{text A} based on the merge of existing
+  theories @{text "B\<^sub>1 \<dots> B\<^sub>n"}.  Due to the possibility to import more
+  than one ancestor, the resulting theory structure of an Isabelle
+  session forms a directed acyclic graph (DAG).  Isabelle takes care
+  that sources contributing to the development graph are always
+  up-to-date: changed files are automatically rechecked whenever a
+  theory header specification is processed.
+
+  The optional @{keyword_def "keywords"} specification declares outer
+  syntax (\chref{ch:outer-syntax}) that is introduced in this theory
+  later on (rare in end-user applications).  Both minor keywords and
+  major keywords of the Isar command language need to be specified, in
+  order to make parsing of proof documents work properly.  Command
+  keywords need to be classified according to their structural role in
+  the formal text.  Examples may be seen in Isabelle/HOL sources
+  itself, such as @{keyword "keywords"}~@{verbatim "\"typedef\""}
+  @{text ":: thy_goal"} or @{keyword "keywords"}~@{verbatim
+  "\"datatype\""} @{text ":: thy_decl"} for theory-level declarations
+  with and without proof, respectively.  Additional @{syntax tags}
+  provide defaults for document preparation (\secref{sec:tags}).
+
+  It is possible to specify an alternative completion via @{text "==
+  text"}, while the default is the corresponding keyword name.
+  
+  \item @{command (global) "end"} concludes the current theory
+  definition.  Note that some other commands, e.g.\ local theory
+  targets @{command locale} or @{command class} may involve a
+  @{keyword "begin"} that needs to be matched by @{command (local)
+  "end"}, according to the usual rules for nested blocks.
+
+  \end{description}
+*}
+
+
+section {* Local theory targets \label{sec:target} *}
+
+text {*
+  \begin{matharray}{rcll}
+    @{command_def "context"} & : & @{text "theory \<rightarrow> local_theory"} \\
+    @{command_def (local) "end"} & : & @{text "local_theory \<rightarrow> theory"} \\
+  \end{matharray}
+
+  A local theory target is a context managed separately within the
+  enclosing theory.  Contexts may introduce parameters (fixed
+  variables) and assumptions (hypotheses).  Definitions and theorems
+  depending on the context may be added incrementally later on.
+
+  \emph{Named contexts} refer to locales (cf.\ \secref{sec:locale}) or
+  type classes (cf.\ \secref{sec:class}); the name ``@{text "-"}''
+  signifies the global theory context.
+
+  \emph{Unnamed contexts} may introduce additional parameters and
+  assumptions, and results produced in the context are generalized
+  accordingly.  Such auxiliary contexts may be nested within other
+  targets, like @{command "locale"}, @{command "class"}, @{command
+  "instantiation"}, @{command "overloading"}.
+
+  @{rail \<open>
+    @@{command context} @{syntax nameref} @'begin'
+    ;
+    @@{command context} @{syntax_ref "includes"}? (@{syntax context_elem} * ) @'begin'
+    ;
+    @{syntax_def target}: '(' @'in' @{syntax nameref} ')'
+  \<close>}
+
+  \begin{description}
+  
+  \item @{command "context"}~@{text "c \<BEGIN>"} opens a named
+  context, by recommencing an existing locale or class @{text c}.
+  Note that locale and class definitions allow to include the
+  @{keyword "begin"} keyword as well, in order to continue the local
+  theory immediately after the initial specification.
+
+  \item @{command "context"}~@{text "bundles elements \<BEGIN>"} opens
+  an unnamed context, by extending the enclosing global or local
+  theory target by the given declaration bundles (\secref{sec:bundle})
+  and context elements (@{text "\<FIXES>"}, @{text "\<ASSUMES>"}
+  etc.).  This means any results stemming from definitions and proofs
+  in the extended context will be exported into the enclosing target
+  by lifting over extra parameters and premises.
+  
+  \item @{command (local) "end"} concludes the current local theory,
+  according to the nesting of contexts.  Note that a global @{command
+  (global) "end"} has a different meaning: it concludes the theory
+  itself (\secref{sec:begin-thy}).
+  
+  \item @{text "("}@{keyword_def "in"}~@{text "c)"} given after any
+  local theory command specifies an immediate target, e.g.\
+  ``@{command "definition"}~@{text "(\<IN> c) \<dots>"}'' or ``@{command
+  "theorem"}~@{text "(\<IN> c) \<dots>"}''.  This works both in a local or
+  global theory context; the current target context will be suspended
+  for this command only.  Note that ``@{text "(\<IN> -)"}'' will
+  always produce a global result independently of the current target
+  context.
+
+  \end{description}
+
+  The exact meaning of results produced within a local theory context
+  depends on the underlying target infrastructure (locale, type class
+  etc.).  The general idea is as follows, considering a context named
+  @{text c} with parameter @{text x} and assumption @{text "A[x]"}.
+  
+  Definitions are exported by introducing a global version with
+  additional arguments; a syntactic abbreviation links the long form
+  with the abstract version of the target context.  For example,
+  @{text "a \<equiv> t[x]"} becomes @{text "c.a ?x \<equiv> t[?x]"} at the theory
+  level (for arbitrary @{text "?x"}), together with a local
+  abbreviation @{text "c \<equiv> c.a x"} in the target context (for the
+  fixed parameter @{text x}).
+
+  Theorems are exported by discharging the assumptions and
+  generalizing the parameters of the context.  For example, @{text "a:
+  B[x]"} becomes @{text "c.a: A[?x] \<Longrightarrow> B[?x]"}, again for arbitrary
+  @{text "?x"}.
+
+  \medskip The Isabelle/HOL library contains numerous applications of
+  locales and classes, e.g.\ see @{file "~~/src/HOL/Algebra"}.  An
+  example for an unnamed auxiliary contexts is given in @{file
+  "~~/src/HOL/Isar_Examples/Group_Context.thy"}.  *}
+
+
+section {* Bundled declarations \label{sec:bundle} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "bundle"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "print_bundles"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow> "} \\
+    @{command_def "include"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "including"} & : & @{text "proof(prove) \<rightarrow> proof(prove)"} \\
+    @{keyword_def "includes"} & : & syntax \\
+  \end{matharray}
+
+  The outer syntax of fact expressions (\secref{sec:syn-att}) involves
+  theorems and attributes, which are evaluated in the context and
+  applied to it.  Attributes may declare theorems to the context, as
+  in @{text "this_rule [intro] that_rule [elim]"} for example.
+  Configuration options (\secref{sec:config}) are special declaration
+  attributes that operate on the context without a theorem, as in
+  @{text "[[show_types = false]]"} for example.
+
+  Expressions of this form may be defined as \emph{bundled
+  declarations} in the context, and included in other situations later
+  on.  Including declaration bundles augments a local context casually
+  without logical dependencies, which is in contrast to locales and
+  locale interpretation (\secref{sec:locale}).
+
+  @{rail \<open>
+    @@{command bundle} @{syntax target}? \<newline>
+    @{syntax name} '=' @{syntax thmrefs} (@'for' (@{syntax vars} + @'and'))?
+    ;
+    (@@{command include} | @@{command including}) (@{syntax nameref}+)
+    ;
+    @{syntax_def "includes"}: @'includes' (@{syntax nameref}+)
+  \<close>}
+
+  \begin{description}
+
+  \item @{command bundle}~@{text "b = decls"} defines a bundle of
+  declarations in the current context.  The RHS is similar to the one
+  of the @{command declare} command.  Bundles defined in local theory
+  targets are subject to transformations via morphisms, when moved
+  into different application contexts; this works analogously to any
+  other local theory specification.
+
+  \item @{command print_bundles} prints the named bundles that are
+  available in the current context.
+
+  \item @{command include}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} includes the declarations
+  from the given bundles into the current proof body context.  This is
+  analogous to @{command "note"} (\secref{sec:proof-facts}) with the
+  expanded bundles.
+
+  \item @{command including} is similar to @{command include}, but
+  works in proof refinement (backward mode).  This is analogous to
+  @{command "using"} (\secref{sec:proof-facts}) with the expanded
+  bundles.
+
+  \item @{keyword "includes"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} is similar to
+  @{command include}, but works in situations where a specification
+  context is constructed, notably for @{command context} and long
+  statements of @{command theorem} etc.
+
+  \end{description}
+
+  Here is an artificial example of bundling various configuration
+  options: *}
+
+bundle trace = [[simp_trace, linarith_trace, metis_trace, smt_trace]]
+
+lemma "x = x"
+  including trace by metis
+
+
+section {* Basic specification elements \label{sec:basic-spec} *}
+
+text {*
+  \begin{matharray}{rcll}
+    @{command_def "axiomatization"} & : & @{text "theory \<rightarrow> theory"} & (axiomatic!) \\
+    @{command_def "definition"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{attribute_def "defn"} & : & @{text attribute} \\
+    @{command_def "print_defn_rules"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow> "} \\
+    @{command_def "abbreviation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "print_abbrevs"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow> "} \\
+  \end{matharray}
+
+  These specification mechanisms provide a slightly more abstract view
+  than the underlying primitives of @{command "consts"}, @{command
+  "defs"} (see \secref{sec:consts}), and raw axioms.  In particular,
+  type-inference covers the whole specification as usual.
+
+  @{rail \<open>
+    @@{command axiomatization} @{syntax "fixes"}? (@'where' specs)?
+    ;
+    @@{command definition} @{syntax target}? \<newline>
+      (decl @'where')? @{syntax thmdecl}? @{syntax prop}
+    ;
+    @@{command abbreviation} @{syntax target}? @{syntax mode}? \<newline>
+      (decl @'where')? @{syntax prop}
+    ;
+
+    @{syntax_def "fixes"}: ((@{syntax name} ('::' @{syntax type})?
+      @{syntax mixfix}? | @{syntax vars}) + @'and')
+    ;
+    specs: (@{syntax thmdecl}? @{syntax props} + @'and')
+    ;
+    decl: @{syntax name} ('::' @{syntax type})? @{syntax mixfix}?
+  \<close>}
+
+  \begin{description}
+  
+  \item @{command "axiomatization"}~@{text "c\<^sub>1 \<dots> c\<^sub>m \<WHERE> \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}
+  introduces several constants simultaneously and states axiomatic
+  properties for these.  The constants are marked as being specified
+  once and for all, which prevents additional specifications being
+  issued later on.
+  
+  Note that axiomatic specifications are only appropriate when
+  declaring a new logical system; axiomatic specifications are
+  restricted to global theory contexts.  Normal applications should
+  only use definitional mechanisms!
+
+  \item @{command "definition"}~@{text "c \<WHERE> eq"} produces an
+  internal definition @{text "c \<equiv> t"} according to the specification
+  given as @{text eq}, which is then turned into a proven fact.  The
+  given proposition may deviate from internal meta-level equality
+  according to the rewrite rules declared as @{attribute defn} by the
+  object-logic.  This usually covers object-level equality @{text "x =
+  y"} and equivalence @{text "A \<leftrightarrow> B"}.  End-users normally need not
+  change the @{attribute defn} setup.
+  
+  Definitions may be presented with explicit arguments on the LHS, as
+  well as additional conditions, e.g.\ @{text "f x y = t"} instead of
+  @{text "f \<equiv> \<lambda>x y. t"} and @{text "y \<noteq> 0 \<Longrightarrow> g x y = u"} instead of an
+  unrestricted @{text "g \<equiv> \<lambda>x y. u"}.
+
+  \item @{command "print_defn_rules"} prints the definitional rewrite rules
+  declared via @{attribute defn} in the current context.
+
+  \item @{command "abbreviation"}~@{text "c \<WHERE> eq"} introduces a
+  syntactic constant which is associated with a certain term according
+  to the meta-level equality @{text eq}.
+  
+  Abbreviations participate in the usual type-inference process, but
+  are expanded before the logic ever sees them.  Pretty printing of
+  terms involves higher-order rewriting with rules stemming from
+  reverted abbreviations.  This needs some care to avoid overlapping
+  or looping syntactic replacements!
+  
+  The optional @{text mode} specification restricts output to a
+  particular print mode; using ``@{text input}'' here achieves the
+  effect of one-way abbreviations.  The mode may also include an
+  ``@{keyword "output"}'' qualifier that affects the concrete syntax
+  declared for abbreviations, cf.\ @{command "syntax"} in
+  \secref{sec:syn-trans}.
+  
+  \item @{command "print_abbrevs"} prints all constant abbreviations
+  of the current context.
+  
+  \end{description}
+*}
+
+
+section {* Generic declarations *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "declaration"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "syntax_declaration"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "declare"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+  \end{matharray}
+
+  Arbitrary operations on the background context may be wrapped-up as
+  generic declaration elements.  Since the underlying concept of local
+  theories may be subject to later re-interpretation, there is an
+  additional dependency on a morphism that tells the difference of the
+  original declaration context wrt.\ the application context
+  encountered later on.  A fact declaration is an important special
+  case: it consists of a theorem which is applied to the context by
+  means of an attribute.
+
+  @{rail \<open>
+    (@@{command declaration} | @@{command syntax_declaration})
+      ('(' @'pervasive' ')')? \<newline> @{syntax target}? @{syntax text}
+    ;
+    @@{command declare} @{syntax target}? (@{syntax thmrefs} + @'and')
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "declaration"}~@{text d} adds the declaration
+  function @{text d} of ML type @{ML_type declaration}, to the current
+  local theory under construction.  In later application contexts, the
+  function is transformed according to the morphisms being involved in
+  the interpretation hierarchy.
+
+  If the @{text "(pervasive)"} option is given, the corresponding
+  declaration is applied to all possible contexts involved, including
+  the global background theory.
+
+  \item @{command "syntax_declaration"} is similar to @{command
+  "declaration"}, but is meant to affect only ``syntactic'' tools by
+  convention (such as notation and type-checking information).
+
+  \item @{command "declare"}~@{text thms} declares theorems to the
+  current local theory context.  No theorem binding is involved here,
+  unlike @{command "theorems"} or @{command "lemmas"} (cf.\
+  \secref{sec:theorems}), so @{command "declare"} only has the effect
+  of applying attributes as included in the theorem specification.
+
+  \end{description}
+*}
+
+
+section {* Locales \label{sec:locale} *}
+
+text {*
+  A locale is a functor that maps parameters (including implicit type
+  parameters) and a specification to a list of declarations.  The
+  syntax of locales is modeled after the Isar proof context commands
+  (cf.\ \secref{sec:proof-context}).
+
+  Locale hierarchies are supported by maintaining a graph of
+  dependencies between locale instances in the global theory.
+  Dependencies may be introduced through import (where a locale is
+  defined as sublocale of the imported instances) or by proving that
+  an existing locale is a sublocale of one or several locale
+  instances.
+
+  A locale may be opened with the purpose of appending to its list of
+  declarations (cf.\ \secref{sec:target}).  When opening a locale
+  declarations from all dependencies are collected and are presented
+  as a local theory.  In this process, which is called \emph{roundup},
+  redundant locale instances are omitted.  A locale instance is
+  redundant if it is subsumed by an instance encountered earlier.  A
+  more detailed description of this process is available elsewhere
+  \cite{Ballarin2014}.
+*}
+
+
+subsection {* Locale expressions \label{sec:locale-expr} *}
+
+text {*
+  A \emph{locale expression} denotes a context composed of instances
+  of existing locales.  The context consists of the declaration
+  elements from the locale instances.  Redundant locale instances are
+  omitted according to roundup.
+
+  @{rail \<open>
+    @{syntax_def locale_expr}: (instance + '+') (@'for' (@{syntax "fixes"} + @'and'))?
+    ;
+    instance: (qualifier ':')? @{syntax nameref} (pos_insts | named_insts)
+    ;
+    qualifier: @{syntax name} ('?' | '!')?
+    ;
+    pos_insts: ('_' | @{syntax term})*
+    ;
+    named_insts: @'where' (@{syntax name} '=' @{syntax term} + @'and')
+  \<close>}
+
+  A locale instance consists of a reference to a locale and either
+  positional or named parameter instantiations.  Identical
+  instantiations (that is, those that instantiate a parameter by itself)
+  may be omitted.  The notation `@{text "_"}' enables to omit the
+  instantiation for a parameter inside a positional instantiation.
+
+  Terms in instantiations are from the context the locale expressions
+  is declared in.  Local names may be added to this context with the
+  optional @{keyword "for"} clause.  This is useful for shadowing names
+  bound in outer contexts, and for declaring syntax.  In addition,
+  syntax declarations from one instance are effective when parsing
+  subsequent instances of the same expression.
+
+  Instances have an optional qualifier which applies to names in
+  declarations.  Names include local definitions and theorem names.
+  If present, the qualifier itself is either optional
+  (``\texttt{?}''), which means that it may be omitted on input of the
+  qualified name, or mandatory (``\texttt{!}'').  If neither
+  ``\texttt{?}'' nor ``\texttt{!}'' are present, the command's default
+  is used.  For @{command "interpretation"} and @{command "interpret"}
+  the default is ``mandatory'', for @{command "locale"} and @{command
+  "sublocale"} the default is ``optional''.  Qualifiers play no role
+  in determining whether one locale instance subsumes another.
+*}
+
+
+subsection {* Locale declarations *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "locale"} & : & @{text "theory \<rightarrow> local_theory"} \\
+    @{command_def "print_locale"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "print_locales"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "locale_deps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{method_def intro_locales} & : & @{text method} \\
+    @{method_def unfold_locales} & : & @{text method} \\
+  \end{matharray}
+
+  \indexisarelem{fixes}\indexisarelem{constrains}\indexisarelem{assumes}
+  \indexisarelem{defines}\indexisarelem{notes}
+  @{rail \<open>
+    @@{command locale} @{syntax name} ('=' @{syntax locale})? @'begin'?
+    ;
+    @@{command print_locale} '!'? @{syntax nameref}
+    ;
+    @{syntax_def locale}: @{syntax context_elem}+ |
+      @{syntax locale_expr} ('+' (@{syntax context_elem}+))?
+    ;
+    @{syntax_def context_elem}:
+      @'fixes' (@{syntax "fixes"} + @'and') |
+      @'constrains' (@{syntax name} '::' @{syntax type} + @'and') |
+      @'assumes' (@{syntax props} + @'and') |
+      @'defines' (@{syntax thmdecl}? @{syntax prop} @{syntax prop_pat}? + @'and') |
+      @'notes' (@{syntax thmdef}? @{syntax thmrefs} + @'and')
+  \<close>}
+
+  \begin{description}
+  
+  \item @{command "locale"}~@{text "loc = import + body"} defines a
+  new locale @{text loc} as a context consisting of a certain view of
+  existing locales (@{text import}) plus some additional elements
+  (@{text body}).  Both @{text import} and @{text body} are optional;
+  the degenerate form @{command "locale"}~@{text loc} defines an empty
+  locale, which may still be useful to collect declarations of facts
+  later on.  Type-inference on locale expressions automatically takes
+  care of the most general typing that the combined context elements
+  may acquire.
+
+  The @{text import} consists of a locale expression; see
+  \secref{sec:proof-context} above.  Its @{keyword "for"} clause defines
+  the parameters of @{text import}.  These are parameters of
+  the defined locale.  Locale parameters whose instantiation is
+  omitted automatically extend the (possibly empty) @{keyword "for"}
+  clause: they are inserted at its beginning.  This means that these
+  parameters may be referred to from within the expression and also in
+  the subsequent context elements and provides a notational
+  convenience for the inheritance of parameters in locale
+  declarations.
+
+  The @{text body} consists of context elements.
+
+  \begin{description}
+
+  \item @{element "fixes"}~@{text "x :: \<tau> (mx)"} declares a local
+  parameter of type @{text \<tau>} and mixfix annotation @{text mx} (both
+  are optional).  The special syntax declaration ``@{text
+  "("}@{keyword_ref "structure"}@{text ")"}'' means that @{text x} may
+  be referenced implicitly in this context.
+
+  \item @{element "constrains"}~@{text "x :: \<tau>"} introduces a type
+  constraint @{text \<tau>} on the local parameter @{text x}.  This
+  element is deprecated.  The type constraint should be introduced in
+  the @{keyword "for"} clause or the relevant @{element "fixes"} element.
+
+  \item @{element "assumes"}~@{text "a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}
+  introduces local premises, similar to @{command "assume"} within a
+  proof (cf.\ \secref{sec:proof-context}).
+
+  \item @{element "defines"}~@{text "a: x \<equiv> t"} defines a previously
+  declared parameter.  This is similar to @{command "def"} within a
+  proof (cf.\ \secref{sec:proof-context}), but @{element "defines"}
+  takes an equational proposition instead of variable-term pair.  The
+  left-hand side of the equation may have additional arguments, e.g.\
+  ``@{element "defines"}~@{text "f x\<^sub>1 \<dots> x\<^sub>n \<equiv> t"}''.
+
+  \item @{element "notes"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"}
+  reconsiders facts within a local context.  Most notably, this may
+  include arbitrary declarations in any attribute specifications
+  included here, e.g.\ a local @{attribute simp} rule.
+
+  \end{description}
+
+  Both @{element "assumes"} and @{element "defines"} elements
+  contribute to the locale specification.  When defining an operation
+  derived from the parameters, @{command "definition"}
+  (\secref{sec:basic-spec}) is usually more appropriate.
+  
+  Note that ``@{text "(\<IS> p\<^sub>1 \<dots> p\<^sub>n)"}'' patterns given
+  in the syntax of @{element "assumes"} and @{element "defines"} above
+  are illegal in locale definitions.  In the long goal format of
+  \secref{sec:goals}, term bindings may be included as expected,
+  though.
+  
+  \medskip Locale specifications are ``closed up'' by
+  turning the given text into a predicate definition @{text
+  loc_axioms} and deriving the original assumptions as local lemmas
+  (modulo local definitions).  The predicate statement covers only the
+  newly specified assumptions, omitting the content of included locale
+  expressions.  The full cumulative view is only provided on export,
+  involving another predicate @{text loc} that refers to the complete
+  specification text.
+  
+  In any case, the predicate arguments are those locale parameters
+  that actually occur in the respective piece of text.  Also these
+  predicates operate at the meta-level in theory, but the locale
+  packages attempts to internalize statements according to the
+  object-logic setup (e.g.\ replacing @{text \<And>} by @{text \<forall>}, and
+  @{text "\<Longrightarrow>"} by @{text "\<longrightarrow>"} in HOL; see also
+  \secref{sec:object-logic}).  Separate introduction rules @{text
+  loc_axioms.intro} and @{text loc.intro} are provided as well.
+  
+  \item @{command "print_locale"}~@{text "locale"} prints the
+  contents of the named locale.  The command omits @{element "notes"}
+  elements by default.  Use @{command "print_locale"}@{text "!"} to
+  have them included.
+
+  \item @{command "print_locales"} prints the names of all locales
+  of the current theory.
+
+  \item @{command "locale_deps"} visualizes all locales and their
+  relations as a Hasse diagram. This includes locales defined as type
+  classes (\secref{sec:class}).  See also @{command
+  "print_dependencies"} below.
+
+  \item @{method intro_locales} and @{method unfold_locales}
+  repeatedly expand all introduction rules of locale predicates of the
+  theory.  While @{method intro_locales} only applies the @{text
+  loc.intro} introduction rules and therefore does not descend to
+  assumptions, @{method unfold_locales} is more aggressive and applies
+  @{text loc_axioms.intro} as well.  Both methods are aware of locale
+  specifications entailed by the context, both from target statements,
+  and from interpretations (see below).  New goals that are entailed
+  by the current context are discharged automatically.
+
+  \end{description}
+*}
+
+
+subsection {* Locale interpretation *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "interpretation"} & : & @{text "theory | local_theory \<rightarrow> proof(prove)"} \\
+    @{command_def "interpret"} & : & @{text "proof(state) | proof(chain) \<rightarrow> proof(prove)"} \\
+    @{command_def "sublocale"} & : & @{text "theory | local_theory \<rightarrow> proof(prove)"} \\
+    @{command_def "print_dependencies"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "print_interps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+  \end{matharray}
+
+  Locales may be instantiated, and the resulting instantiated
+  declarations added to the current context.  This requires proof (of
+  the instantiated specification) and is called \emph{locale
+  interpretation}.  Interpretation is possible in locales (@{command
+  "sublocale"}), global and local theories (@{command
+  "interpretation"}) and also within proof bodies (@{command
+  "interpret"}).
+
+  @{rail \<open>
+    @@{command interpretation} @{syntax locale_expr} equations?
+    ;
+    @@{command interpret} @{syntax locale_expr} equations?
+    ;
+    @@{command sublocale} (@{syntax nameref} ('<' | '\<subseteq>'))? @{syntax locale_expr} \<newline>
+      equations?
+    ;
+    @@{command print_dependencies} '!'? @{syntax locale_expr}
+    ;
+    @@{command print_interps} @{syntax nameref}
+    ;
+
+    equations: @'where' (@{syntax thmdecl}? @{syntax prop} + @'and')
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "interpretation"}~@{text "expr \<WHERE> eqns"}
+  interprets @{text expr} in a global or local theory.  The command
+  generates proof obligations for the instantiated specifications.
+  Once these are discharged by the user, instantiated declarations (in
+  particular, facts) are added to the theory in a post-processing
+  phase.
+
+  The command is aware of interpretations that are already active.
+  Post-processing is achieved through a variant of roundup that takes
+  the interpretations of the current global or local theory into
+  account.  In order to simplify the proof obligations according to
+  existing interpretations use methods @{method intro_locales} or
+  @{method unfold_locales}.
+
+  When adding declarations to locales, interpreted versions of these
+  declarations are added to the global theory for all interpretations
+  in the global theory as well. That is, interpretations in global
+  theories dynamically participate in any declarations added to
+  locales.
+
+  In contrast, the lifetime of an interpretation in a local theory is
+  limited to the current context block.  At the closing @{command end}
+  of the block the interpretation and its declarations disappear.
+  This enables establishing facts based on interpretations without
+  creating permanent links to the interpreted locale instances, as
+  would be the case with @{command sublocale}.
+  While @{command "interpretation"}~@{text "(\<IN> c)
+  \<dots>"} is technically possible, it is not useful since its result is
+  discarded immediately.
+
+  Free variables in the interpreted expression are allowed.  They are
+  turned into schematic variables in the generated declarations.  In
+  order to use a free variable whose name is already bound in the
+  context --- for example, because a constant of that name exists ---
+  add it to the @{keyword "for"} clause.
+
+  The equations @{text eqns} yield \emph{rewrite morphisms}, which are
+  unfolded during post-processing and are useful for interpreting
+  concepts introduced through definitions.  The equations must be
+  proved.
+
+  \item @{command "interpret"}~@{text "expr \<WHERE> eqns"} interprets
+  @{text expr} in the proof context and is otherwise similar to
+  interpretation in local theories.  Note that for @{command
+  "interpret"} the @{text eqns} should be
+  explicitly universally quantified.
+
+  \item @{command "sublocale"}~@{text "name \<subseteq> expr \<WHERE>
+  eqns"}
+  interprets @{text expr} in the locale @{text name}.  A proof that
+  the specification of @{text name} implies the specification of
+  @{text expr} is required.  As in the localized version of the
+  theorem command, the proof is in the context of @{text name}.  After
+  the proof obligation has been discharged, the locale hierarchy is
+  changed as if @{text name} imported @{text expr} (hence the name
+  @{command "sublocale"}).  When the context of @{text name} is
+  subsequently entered, traversing the locale hierarchy will involve
+  the locale instances of @{text expr}, and their declarations will be
+  added to the context.  This makes @{command "sublocale"}
+  dynamic: extensions of a locale that is instantiated in @{text expr}
+  may take place after the @{command "sublocale"} declaration and
+  still become available in the context.  Circular @{command
+  "sublocale"} declarations are allowed as long as they do not lead to
+  infinite chains.
+
+  If interpretations of @{text name} exist in the current global
+  theory, the command adds interpretations for @{text expr} as well,
+  with the same qualifier, although only for fragments of @{text expr}
+  that are not interpreted in the theory already.
+
+  The equations @{text eqns} amend the morphism through
+  which @{text expr} is interpreted.  This enables mapping definitions
+  from the interpreted locales to entities of @{text name} and can
+  help break infinite chains induced by circular @{command
+  "sublocale"} declarations.
+
+  In a named context block the @{command sublocale} command may also
+  be used, but the locale argument must be omitted.  The command then
+  refers to the locale (or class) target of the context block.
+
+  \item @{command "print_dependencies"}~@{text "expr"} is useful for
+  understanding the effect of an interpretation of @{text "expr"} in
+  the current context.  It lists all locale instances for which
+  interpretations would be added to the current context.  Variant
+  @{command "print_dependencies"}@{text "!"} does not generalize
+  parameters and assumes an empty context --- that is, it prints all
+  locale instances that would be considered for interpretation.  The
+  latter is useful for understanding the dependencies of a locale
+  expression.
+
+  \item @{command "print_interps"}~@{text "locale"} lists all
+  interpretations of @{text "locale"} in the current theory or proof
+  context, including those due to a combination of an @{command
+  "interpretation"} or @{command "interpret"} and one or several
+  @{command "sublocale"} declarations.
+
+  \end{description}
+
+  \begin{warn}
+    If a global theory inherits declarations (body elements) for a
+    locale from one parent and an interpretation of that locale from
+    another parent, the interpretation will not be applied to the
+    declarations.
+  \end{warn}
+
+  \begin{warn}
+    Since attributes are applied to interpreted theorems,
+    interpretation may modify the context of common proof tools, e.g.\
+    the Simplifier or Classical Reasoner.  As the behavior of such
+    tools is \emph{not} stable under interpretation morphisms, manual
+    declarations might have to be added to the target context of the
+    interpretation to revert such declarations.
+  \end{warn}
+
+  \begin{warn}
+    An interpretation in a local theory or proof context may subsume previous
+    interpretations.  This happens if the same specification fragment
+    is interpreted twice and the instantiation of the second
+    interpretation is more general than the interpretation of the
+    first.  The locale package does not attempt to remove subsumed
+    interpretations.
+  \end{warn}
+*}
+
+
+section {* Classes \label{sec:class} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "class"} & : & @{text "theory \<rightarrow> local_theory"} \\
+    @{command_def "instantiation"} & : & @{text "theory \<rightarrow> local_theory"} \\
+    @{command_def "instance"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command "instance"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
+    @{command_def "subclass"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "print_classes"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "class_deps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{method_def intro_classes} & : & @{text method} \\
+  \end{matharray}
+
+  A class is a particular locale with \emph{exactly one} type variable
+  @{text \<alpha>}.  Beyond the underlying locale, a corresponding type class
+  is established which is interpreted logically as axiomatic type
+  class \cite{Wenzel:1997:TPHOL} whose logical content are the
+  assumptions of the locale.  Thus, classes provide the full
+  generality of locales combined with the commodity of type classes
+  (notably type-inference).  See \cite{isabelle-classes} for a short
+  tutorial.
+
+  @{rail \<open>
+    @@{command class} class_spec @'begin'?
+    ;
+    class_spec: @{syntax name} '='
+      ((@{syntax nameref} '+' (@{syntax context_elem}+)) |
+        @{syntax nameref} | (@{syntax context_elem}+))      
+    ;
+    @@{command instantiation} (@{syntax nameref} + @'and') '::' @{syntax arity} @'begin'
+    ;
+    @@{command instance} (() | (@{syntax nameref} + @'and') '::' @{syntax arity} |
+      @{syntax nameref} ('<' | '\<subseteq>') @{syntax nameref} )
+    ;
+    @@{command subclass} @{syntax target}? @{syntax nameref}
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "class"}~@{text "c = superclasses + body"} defines
+  a new class @{text c}, inheriting from @{text superclasses}.  This
+  introduces a locale @{text c} with import of all locales @{text
+  superclasses}.
+
+  Any @{element "fixes"} in @{text body} are lifted to the global
+  theory level (\emph{class operations} @{text "f\<^sub>1, \<dots>,
+  f\<^sub>n"} of class @{text c}), mapping the local type parameter
+  @{text \<alpha>} to a schematic type variable @{text "?\<alpha> :: c"}.
+
+  Likewise, @{element "assumes"} in @{text body} are also lifted,
+  mapping each local parameter @{text "f :: \<tau>[\<alpha>]"} to its
+  corresponding global constant @{text "f :: \<tau>[?\<alpha> :: c]"}.  The
+  corresponding introduction rule is provided as @{text
+  c_class_axioms.intro}.  This rule should be rarely needed directly
+  --- the @{method intro_classes} method takes care of the details of
+  class membership proofs.
+
+  \item @{command "instantiation"}~@{text "t :: (s\<^sub>1, \<dots>, s\<^sub>n)s
+  \<BEGIN>"} opens a target (cf.\ \secref{sec:target}) which
+  allows to specify class operations @{text "f\<^sub>1, \<dots>, f\<^sub>n"} corresponding
+  to sort @{text s} at the particular type instance @{text "(\<alpha>\<^sub>1 :: s\<^sub>1,
+  \<dots>, \<alpha>\<^sub>n :: s\<^sub>n) t"}.  A plain @{command "instance"} command in the
+  target body poses a goal stating these type arities.  The target is
+  concluded by an @{command_ref (local) "end"} command.
+
+  Note that a list of simultaneous type constructors may be given;
+  this corresponds nicely to mutually recursive type definitions, e.g.\
+  in Isabelle/HOL.
+
+  \item @{command "instance"} in an instantiation target body sets
+  up a goal stating the type arities claimed at the opening @{command
+  "instantiation"}.  The proof would usually proceed by @{method
+  intro_classes}, and then establish the characteristic theorems of
+  the type classes involved.  After finishing the proof, the
+  background theory will be augmented by the proven type arities.
+
+  On the theory level, @{command "instance"}~@{text "t :: (s\<^sub>1, \<dots>,
+  s\<^sub>n)s"} provides a convenient way to instantiate a type class with no
+  need to specify operations: one can continue with the
+  instantiation proof immediately.
+
+  \item @{command "subclass"}~@{text c} in a class context for class
+  @{text d} sets up a goal stating that class @{text c} is logically
+  contained in class @{text d}.  After finishing the proof, class
+  @{text d} is proven to be subclass @{text c} and the locale @{text
+  c} is interpreted into @{text d} simultaneously.
+
+  A weakend form of this is available through a further variant of
+  @{command instance}:  @{command instance}~@{text "c\<^sub>1 \<subseteq> c\<^sub>2"} opens
+  a proof that class @{text "c\<^sub>2"} implies @{text "c\<^sub>1"} without reference
+  to the underlying locales;  this is useful if the properties to prove
+  the logical connection are not sufficent on the locale level but on
+  the theory level.
+
+  \item @{command "print_classes"} prints all classes in the current
+  theory.
+
+  \item @{command "class_deps"} visualizes all classes and their
+  subclass relations as a Hasse diagram.
+
+  \item @{method intro_classes} repeatedly expands all class
+  introduction rules of this theory.  Note that this method usually
+  needs not be named explicitly, as it is already included in the
+  default proof step (e.g.\ of @{command "proof"}).  In particular,
+  instantiation of trivial (syntactic) classes may be performed by a
+  single ``@{command ".."}'' proof step.
+
+  \end{description}
+*}
+
+
+subsection {* The class target *}
+
+text {*
+  %FIXME check
+
+  A named context may refer to a locale (cf.\ \secref{sec:target}).
+  If this locale is also a class @{text c}, apart from the common
+  locale target behaviour the following happens.
+
+  \begin{itemize}
+
+  \item Local constant declarations @{text "g[\<alpha>]"} referring to the
+  local type parameter @{text \<alpha>} and local parameters @{text "f[\<alpha>]"}
+  are accompanied by theory-level constants @{text "g[?\<alpha> :: c]"}
+  referring to theory-level class operations @{text "f[?\<alpha> :: c]"}.
+
+  \item Local theorem bindings are lifted as are assumptions.
+
+  \item Local syntax refers to local operations @{text "g[\<alpha>]"} and
+  global operations @{text "g[?\<alpha> :: c]"} uniformly.  Type inference
+  resolves ambiguities.  In rare cases, manual type annotations are
+  needed.
+  
+  \end{itemize}
+*}
+
+
+subsection {* Co-regularity of type classes and arities *}
+
+text {* The class relation together with the collection of
+  type-constructor arities must obey the principle of
+  \emph{co-regularity} as defined below.
+
+  \medskip For the subsequent formulation of co-regularity we assume
+  that the class relation is closed by transitivity and reflexivity.
+  Moreover the collection of arities @{text "t :: (\<^vec>s)c"} is
+  completed such that @{text "t :: (\<^vec>s)c"} and @{text "c \<subseteq> c'"}
+  implies @{text "t :: (\<^vec>s)c'"} for all such declarations.
+
+  Treating sorts as finite sets of classes (meaning the intersection),
+  the class relation @{text "c\<^sub>1 \<subseteq> c\<^sub>2"} is extended to sorts as
+  follows:
+  \[
+    @{text "s\<^sub>1 \<subseteq> s\<^sub>2 \<equiv> \<forall>c\<^sub>2 \<in> s\<^sub>2. \<exists>c\<^sub>1 \<in> s\<^sub>1. c\<^sub>1 \<subseteq> c\<^sub>2"}
+  \]
+
+  This relation on sorts is further extended to tuples of sorts (of
+  the same length) in the component-wise way.
+
+  \smallskip Co-regularity of the class relation together with the
+  arities relation means:
+  \[
+    @{text "t :: (\<^vec>s\<^sub>1)c\<^sub>1 \<Longrightarrow> t :: (\<^vec>s\<^sub>2)c\<^sub>2 \<Longrightarrow> c\<^sub>1 \<subseteq> c\<^sub>2 \<Longrightarrow> \<^vec>s\<^sub>1 \<subseteq> \<^vec>s\<^sub>2"}
+  \]
+  \noindent for all such arities.  In other words, whenever the result
+  classes of some type-constructor arities are related, then the
+  argument sorts need to be related in the same way.
+
+  \medskip Co-regularity is a very fundamental property of the
+  order-sorted algebra of types.  For example, it entails principle
+  types and most general unifiers, e.g.\ see \cite{nipkow-prehofer}.
+*}
+
+
+section {* Unrestricted overloading *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "overloading"} & : & @{text "theory \<rightarrow> local_theory"} \\
+  \end{matharray}
+
+  Isabelle/Pure's definitional schemes support certain forms of
+  overloading (see \secref{sec:consts}).  Overloading means that a
+  constant being declared as @{text "c :: \<alpha> decl"} may be
+  defined separately on type instances
+  @{text "c :: (\<beta>\<^sub>1, \<dots>, \<beta>\<^sub>n) t decl"}
+  for each type constructor @{text t}.  At most occassions
+  overloading will be used in a Haskell-like fashion together with
+  type classes by means of @{command "instantiation"} (see
+  \secref{sec:class}).  Sometimes low-level overloading is desirable.
+  The @{command "overloading"} target provides a convenient view for
+  end-users.
+
+  @{rail \<open>
+    @@{command overloading} ( spec + ) @'begin'
+    ;
+    spec: @{syntax name} ( '==' | '\<equiv>' ) @{syntax term} ( '(' @'unchecked' ')' )?
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "overloading"}~@{text "x\<^sub>1 \<equiv> c\<^sub>1 :: \<tau>\<^sub>1 \<AND> \<dots> x\<^sub>n \<equiv> c\<^sub>n :: \<tau>\<^sub>n \<BEGIN>"}
+  opens a theory target (cf.\ \secref{sec:target}) which allows to
+  specify constants with overloaded definitions.  These are identified
+  by an explicitly given mapping from variable names @{text "x\<^sub>i"} to
+  constants @{text "c\<^sub>i"} at particular type instances.  The
+  definitions themselves are established using common specification
+  tools, using the names @{text "x\<^sub>i"} as reference to the
+  corresponding constants.  The target is concluded by @{command
+  (local) "end"}.
+
+  A @{text "(unchecked)"} option disables global dependency checks for
+  the corresponding definition, which is occasionally useful for
+  exotic overloading (see \secref{sec:consts} for a precise description).
+  It is at the discretion of the user to avoid
+  malformed theory specifications!
+
+  \end{description}
+*}
+
+
+section {* Incorporating ML code \label{sec:ML} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "SML_file"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "ML_file"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "ML"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "ML_prf"} & : & @{text "proof \<rightarrow> proof"} \\
+    @{command_def "ML_val"} & : & @{text "any \<rightarrow>"} \\
+    @{command_def "ML_command"} & : & @{text "any \<rightarrow>"} \\
+    @{command_def "setup"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "local_setup"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "attribute_setup"} & : & @{text "theory \<rightarrow> theory"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{command SML_file} | @@{command ML_file}) @{syntax name}
+    ;
+    (@@{command ML} | @@{command ML_prf} | @@{command ML_val} |
+      @@{command ML_command} | @@{command setup} | @@{command local_setup}) @{syntax text}
+    ;
+    @@{command attribute_setup} @{syntax name} '=' @{syntax text} @{syntax text}?
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "SML_file"}~@{text "name"} reads and evaluates the
+  given Standard ML file.  Top-level SML bindings are stored within
+  the theory context; the initial environment is restricted to the
+  Standard ML implementation of Poly/ML, without the many add-ons of
+  Isabelle/ML.  Multiple @{command "SML_file"} commands may be used to
+  build larger Standard ML projects, independently of the regular
+  Isabelle/ML environment.
+
+  \item @{command "ML_file"}~@{text "name"} reads and evaluates the
+  given ML file.  The current theory context is passed down to the ML
+  toplevel and may be modified, using @{ML "Context.>>"} or derived ML
+  commands.  Top-level ML bindings are stored within the (global or
+  local) theory context.
+  
+  \item @{command "ML"}~@{text "text"} is similar to @{command
+  "ML_file"}, but evaluates directly the given @{text "text"}.
+  Top-level ML bindings are stored within the (global or local) theory
+  context.
+
+  \item @{command "ML_prf"} is analogous to @{command "ML"} but works
+  within a proof context.  Top-level ML bindings are stored within the
+  proof context in a purely sequential fashion, disregarding the
+  nested proof structure.  ML bindings introduced by @{command
+  "ML_prf"} are discarded at the end of the proof.
+
+  \item @{command "ML_val"} and @{command "ML_command"} are diagnostic
+  versions of @{command "ML"}, which means that the context may not be
+  updated.  @{command "ML_val"} echos the bindings produced at the ML
+  toplevel, but @{command "ML_command"} is silent.
+  
+  \item @{command "setup"}~@{text "text"} changes the current theory
+  context by applying @{text "text"}, which refers to an ML expression
+  of type @{ML_type "theory -> theory"}.  This enables to initialize
+  any object-logic specific tools and packages written in ML, for
+  example.
+
+  \item @{command "local_setup"} is similar to @{command "setup"} for
+  a local theory context, and an ML expression of type @{ML_type
+  "local_theory -> local_theory"}.  This allows to
+  invoke local theory specification packages without going through
+  concrete outer syntax, for example.
+
+  \item @{command "attribute_setup"}~@{text "name = text description"}
+  defines an attribute in the current theory.  The given @{text
+  "text"} has to be an ML expression of type
+  @{ML_type "attribute context_parser"}, cf.\ basic parsers defined in
+  structure @{ML_structure Args} and @{ML_structure Attrib}.
+
+  In principle, attributes can operate both on a given theorem and the
+  implicit context, although in practice only one is modified and the
+  other serves as parameter.  Here are examples for these two cases:
+
+  \end{description}
+*}
+
+  attribute_setup my_rule = {*
+    Attrib.thms >> (fn ths =>
+      Thm.rule_attribute
+        (fn context: Context.generic => fn th: thm =>
+          let val th' = th OF ths
+          in th' end)) *}
+
+  attribute_setup my_declaration = {*
+    Attrib.thms >> (fn ths =>
+      Thm.declaration_attribute
+        (fn th: thm => fn context: Context.generic =>
+          let val context' = context
+          in context' end)) *}
+
+
+section {* Primitive specification elements *}
+
+subsection {* Sorts *}
+
+text {*
+  \begin{matharray}{rcll}
+    @{command_def "default_sort"} & : & @{text "local_theory \<rightarrow> local_theory"}
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command default_sort} @{syntax sort}
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "default_sort"}~@{text s} makes sort @{text s} the
+  new default sort for any type variable that is given explicitly in
+  the text, but lacks a sort constraint (wrt.\ the current context).
+  Type variables generated by type inference are not affected.
+
+  Usually the default sort is only changed when defining a new
+  object-logic.  For example, the default sort in Isabelle/HOL is
+  @{class type}, the class of all HOL types.
+
+  When merging theories, the default sorts of the parents are
+  logically intersected, i.e.\ the representations as lists of classes
+  are joined.
+
+  \end{description}
+*}
+
+
+subsection {* Types and type abbreviations \label{sec:types-pure} *}
+
+text {*
+  \begin{matharray}{rcll}
+    @{command_def "type_synonym"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "typedecl"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command type_synonym} (@{syntax typespec} '=' @{syntax type} @{syntax mixfix}?)
+    ;
+    @@{command typedecl} @{syntax typespec} @{syntax mixfix}?
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "type_synonym"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = \<tau>"}
+  introduces a \emph{type synonym} @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} for the
+  existing type @{text "\<tau>"}.  Unlike actual type definitions, as are
+  available in Isabelle/HOL for example, type synonyms are merely
+  syntactic abbreviations without any logical significance.
+  Internally, type synonyms are fully expanded.
+  
+  \item @{command "typedecl"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} declares a new
+  type constructor @{text t}.  If the object-logic defines a base sort
+  @{text s}, then the constructor is declared to operate on that, via
+  the axiomatic type-class instance @{text "t :: (s, \<dots>, s)s"}.
+
+  \end{description}
+*}
+
+
+subsection {* Constants and definitions \label{sec:consts} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "consts"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "defs"} & : & @{text "theory \<rightarrow> theory"} \\
+  \end{matharray}
+
+  Definitions essentially express abbreviations within the logic.  The
+  simplest form of a definition is @{text "c :: \<sigma> \<equiv> t"}, where @{text
+  c} is a newly declared constant.  Isabelle also allows derived forms
+  where the arguments of @{text c} appear on the left, abbreviating a
+  prefix of @{text \<lambda>}-abstractions, e.g.\ @{text "c \<equiv> \<lambda>x y. t"} may be
+  written more conveniently as @{text "c x y \<equiv> t"}.  Moreover,
+  definitions may be weakened by adding arbitrary pre-conditions:
+  @{text "A \<Longrightarrow> c x y \<equiv> t"}.
+
+  \medskip The built-in well-formedness conditions for definitional
+  specifications are:
+
+  \begin{itemize}
+
+  \item Arguments (on the left-hand side) must be distinct variables.
+
+  \item All variables on the right-hand side must also appear on the
+  left-hand side.
+
+  \item All type variables on the right-hand side must also appear on
+  the left-hand side; this prohibits @{text "0 :: nat \<equiv> length ([] ::
+  \<alpha> list)"} for example.
+
+  \item The definition must not be recursive.  Most object-logics
+  provide definitional principles that can be used to express
+  recursion safely.
+
+  \end{itemize}
+
+  The right-hand side of overloaded definitions may mention overloaded constants
+  recursively at type instances corresponding to the immediate
+  argument types @{text "\<beta>\<^sub>1, \<dots>, \<beta>\<^sub>n"}.  Incomplete
+  specification patterns impose global constraints on all occurrences,
+  e.g.\ @{text "d :: \<alpha> \<times> \<alpha>"} on the left-hand side means that all
+  corresponding occurrences on some right-hand side need to be an
+  instance of this, general @{text "d :: \<alpha> \<times> \<beta>"} will be disallowed.
+
+  @{rail \<open>
+    @@{command consts} ((@{syntax name} '::' @{syntax type} @{syntax mixfix}?) +)
+    ;
+    @@{command defs} opt? (@{syntax axmdecl} @{syntax prop} +)
+    ;
+    opt: '(' @'unchecked'? @'overloaded'? ')'
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "consts"}~@{text "c :: \<sigma>"} declares constant @{text
+  c} to have any instance of type scheme @{text \<sigma>}.  The optional
+  mixfix annotations may attach concrete syntax to the constants
+  declared.
+  
+  \item @{command "defs"}~@{text "name: eqn"} introduces @{text eqn}
+  as a definitional axiom for some existing constant.
+  
+  The @{text "(unchecked)"} option disables global dependency checks
+  for this definition, which is occasionally useful for exotic
+  overloading.  It is at the discretion of the user to avoid malformed
+  theory specifications!
+  
+  The @{text "(overloaded)"} option declares definitions to be
+  potentially overloaded.  Unless this option is given, a warning
+  message would be issued for any definitional equation with a more
+  special type than that of the corresponding constant declaration.
+  
+  \end{description}
+*}
+
+
+section {* Naming existing theorems \label{sec:theorems} *}
+
+text {*
+  \begin{matharray}{rcll}
+    @{command_def "lemmas"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "theorems"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{command lemmas} | @@{command theorems}) @{syntax target}? \<newline>
+      (@{syntax thmdef}? @{syntax thmrefs} + @'and')
+      (@'for' (@{syntax vars} + @'and'))?
+  \<close>}
+
+  \begin{description}
+  
+  \item @{command "lemmas"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"}~@{keyword_def
+  "for"}~@{text "x\<^sub>1 \<dots> x\<^sub>m"} evaluates given facts (with attributes) in
+  the current context, which may be augmented by local variables.
+  Results are standardized before being stored, i.e.\ schematic
+  variables are renamed to enforce index @{text "0"} uniformly.
+
+  \item @{command "theorems"} is the same as @{command "lemmas"}, but
+  marks the result as a different kind of facts.
+
+  \end{description}
+*}
+
+
+section {* Oracles *}
+
+text {*
+  \begin{matharray}{rcll}
+    @{command_def "oracle"} & : & @{text "theory \<rightarrow> theory"} & (axiomatic!) \\
+  \end{matharray}
+
+  Oracles allow Isabelle to take advantage of external reasoners such
+  as arithmetic decision procedures, model checkers, fast tautology
+  checkers or computer algebra systems.  Invoked as an oracle, an
+  external reasoner can create arbitrary Isabelle theorems.
+
+  It is the responsibility of the user to ensure that the external
+  reasoner is as trustworthy as the application requires.  Another
+  typical source of errors is the linkup between Isabelle and the
+  external tool, not just its concrete implementation, but also the
+  required translation between two different logical environments.
+
+  Isabelle merely guarantees well-formedness of the propositions being
+  asserted, and records within the internal derivation object how
+  presumed theorems depend on unproven suppositions.
+
+  @{rail \<open>
+    @@{command oracle} @{syntax name} '=' @{syntax text}
+  \<close>}
+
+  \begin{description}
+
+  \item @{command "oracle"}~@{text "name = text"} turns the given ML
+  expression @{text "text"} of type @{ML_text "'a -> cterm"} into an
+  ML function of type @{ML_text "'a -> thm"}, which is bound to the
+  global identifier @{ML_text name}.  This acts like an infinitary
+  specification of axioms!  Invoking the oracle only works within the
+  scope of the resulting theory.
+
+  \end{description}
+
+  See @{file "~~/src/HOL/ex/Iff_Oracle.thy"} for a worked example of
+  defining a new primitive rule as oracle, and turning it into a proof
+  method.
+*}
+
+
+section {* Name spaces *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "hide_class"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "hide_type"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "hide_const"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "hide_fact"} & : & @{text "theory \<rightarrow> theory"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    ( @{command hide_class} | @{command hide_type} |
+      @{command hide_const} | @{command hide_fact} ) ('(' @'open' ')')? (@{syntax nameref} + )
+  \<close>}
+
+  Isabelle organizes any kind of name declarations (of types,
+  constants, theorems etc.) by separate hierarchically structured name
+  spaces.  Normally the user does not have to control the behavior of
+  name spaces by hand, yet the following commands provide some way to
+  do so.
+
+  \begin{description}
+
+  \item @{command "hide_class"}~@{text names} fully removes class
+  declarations from a given name space; with the @{text "(open)"}
+  option, only the base name is hidden.
+
+  Note that hiding name space accesses has no impact on logical
+  declarations --- they remain valid internally.  Entities that are no
+  longer accessible to the user are printed with the special qualifier
+  ``@{text "??"}'' prefixed to the full internal name.
+
+  \item @{command "hide_type"}, @{command "hide_const"}, and @{command
+  "hide_fact"} are similar to @{command "hide_class"}, but hide types,
+  constants, and facts, respectively.
+  
+  \end{description}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/Symbols.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,41 @@
+theory Symbols
+imports Base Main
+begin
+
+chapter {* Predefined Isabelle symbols \label{app:symbols} *}
+
+text {*
+  Isabelle supports an infinite number of non-ASCII symbols, which are
+  represented in source text as @{verbatim "\\"}@{verbatim "<"}@{text
+  name}@{verbatim ">"} (where @{text name} may be any identifier).  It
+  is left to front-end tools how to present these symbols to the user.
+  The collection of predefined standard symbols given below is
+  available by default for Isabelle document output, due to
+  appropriate definitions of @{verbatim "\\"}@{verbatim isasym}@{text
+  name} for each @{verbatim "\\"}@{verbatim "<"}@{text name}@{verbatim
+  ">"} in the @{verbatim isabellesym.sty} file.  Most of these symbols
+  are displayed properly in Isabelle/jEdit and {\LaTeX} generated from Isabelle.
+
+  Moreover, any single symbol (or ASCII character) may be prefixed by
+  @{verbatim "\<^sup>"} for superscript and @{verbatim "\<^sub>"} for subscript,
+  such as @{verbatim "A\<^sup>\<star>"} for @{text "A\<^sup>\<star>"} and @{verbatim "A\<^sub>1"} for
+  @{text "A\<^sub>1"}.  Sub- and superscripts that span a region of text can
+  be marked up with @{verbatim "\<^bsub>"}@{text "\<dots>"}@{verbatim
+  "\<^esub>"} and @{verbatim "\<^bsup>"}@{text "\<dots>"}@{verbatim "\<^esup>"}
+  respectively, but note that there are limitations in the typographic
+  rendering quality of this form.  Furthermore, all ASCII characters
+  and most other symbols may be printed in bold by prefixing
+  @{verbatim "\<^bold>"} such as @{verbatim "\<^bold>\<alpha>"} for @{text "\<^bold>\<alpha>"}.  Note that
+  @{verbatim "\<^sup>"}, @{verbatim "\<^sub>"}, @{verbatim "\<^bold>"} cannot be combined.
+
+  Further details of Isabelle document preparation are covered in
+  \chref{ch:document-prep}.
+
+  \begin{center}
+  \begin{isabellebody}
+  \input{syms}  
+  \end{isabellebody}
+  \end{center}
+*}
+
+end
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/Synopsis.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,1108 @@
+theory Synopsis
+imports Base Main
+begin
+
+chapter {* Synopsis *}
+
+section {* Notepad *}
+
+text {*
+  An Isar proof body serves as mathematical notepad to compose logical
+  content, consisting of types, terms, facts.
+*}
+
+
+subsection {* Types and terms *}
+
+notepad
+begin
+  txt {* Locally fixed entities: *}
+  fix x   -- {* local constant, without any type information yet *}
+  fix x :: 'a  -- {* variant with explicit type-constraint for subsequent use*}
+
+  fix a b
+  assume "a = b"  -- {* type assignment at first occurrence in concrete term *}
+
+  txt {* Definitions (non-polymorphic): *}
+  def x \<equiv> "t::'a"
+
+  txt {* Abbreviations (polymorphic): *}
+  let ?f = "\<lambda>x. x"
+  term "?f ?f"
+
+  txt {* Notation: *}
+  write x  ("***")
+end
+
+
+subsection {* Facts *}
+
+text {*
+  A fact is a simultaneous list of theorems.
+*}
+
+
+subsubsection {* Producing facts *}
+
+notepad
+begin
+
+  txt {* Via assumption (``lambda''): *}
+  assume a: A
+
+  txt {* Via proof (``let''): *}
+  have b: B sorry
+
+  txt {* Via abbreviation (``let''): *}
+  note c = a b
+
+end
+
+
+subsubsection {* Referencing facts *}
+
+notepad
+begin
+  txt {* Via explicit name: *}
+  assume a: A
+  note a
+
+  txt {* Via implicit name: *}
+  assume A
+  note this
+
+  txt {* Via literal proposition (unification with results from the proof text): *}
+  assume A
+  note `A`
+
+  assume "\<And>x. B x"
+  note `B a`
+  note `B b`
+end
+
+
+subsubsection {* Manipulating facts *}
+
+notepad
+begin
+  txt {* Instantiation: *}
+  assume a: "\<And>x. B x"
+  note a
+  note a [of b]
+  note a [where x = b]
+
+  txt {* Backchaining: *}
+  assume 1: A
+  assume 2: "A \<Longrightarrow> C"
+  note 2 [OF 1]
+  note 1 [THEN 2]
+
+  txt {* Symmetric results: *}
+  assume "x = y"
+  note this [symmetric]
+
+  assume "x \<noteq> y"
+  note this [symmetric]
+
+  txt {* Adhoc-simplification (take care!): *}
+  assume "P ([] @ xs)"
+  note this [simplified]
+end
+
+
+subsubsection {* Projections *}
+
+text {*
+  Isar facts consist of multiple theorems.  There is notation to project
+  interval ranges.
+*}
+
+notepad
+begin
+  assume stuff: A B C D
+  note stuff(1)
+  note stuff(2-3)
+  note stuff(2-)
+end
+
+
+subsubsection {* Naming conventions *}
+
+text {*
+  \begin{itemize}
+
+  \item Lower-case identifiers are usually preferred.
+
+  \item Facts can be named after the main term within the proposition.
+
+  \item Facts should \emph{not} be named after the command that
+  introduced them (@{command "assume"}, @{command "have"}).  This is
+  misleading and hard to maintain.
+
+  \item Natural numbers can be used as ``meaningless'' names (more
+  appropriate than @{text "a1"}, @{text "a2"} etc.)
+
+  \item Symbolic identifiers are supported (e.g. @{text "*"}, @{text
+  "**"}, @{text "***"}).
+
+  \end{itemize}
+*}
+
+
+subsection {* Block structure *}
+
+text {*
+  The formal notepad is block structured.  The fact produced by the last
+  entry of a block is exported into the outer context.
+*}
+
+notepad
+begin
+  {
+    have a: A sorry
+    have b: B sorry
+    note a b
+  }
+  note this
+  note `A`
+  note `B`
+end
+
+text {* Explicit blocks as well as implicit blocks of nested goal
+  statements (e.g.\ @{command have}) automatically introduce one extra
+  pair of parentheses in reserve.  The @{command next} command allows
+  to ``jump'' between these sub-blocks. *}
+
+notepad
+begin
+
+  {
+    have a: A sorry
+  next
+    have b: B
+    proof -
+      show B sorry
+    next
+      have c: C sorry
+    next
+      have d: D sorry
+    qed
+  }
+
+  txt {* Alternative version with explicit parentheses everywhere: *}
+
+  {
+    {
+      have a: A sorry
+    }
+    {
+      have b: B
+      proof -
+        {
+          show B sorry
+        }
+        {
+          have c: C sorry
+        }
+        {
+          have d: D sorry
+        }
+      qed
+    }
+  }
+
+end
+
+
+section {* Calculational reasoning \label{sec:calculations-synopsis} *}
+
+text {*
+  For example, see @{file "~~/src/HOL/Isar_Examples/Group.thy"}.
+*}
+
+
+subsection {* Special names in Isar proofs *}
+
+text {*
+  \begin{itemize}
+
+  \item term @{text "?thesis"} --- the main conclusion of the
+  innermost pending claim
+
+  \item term @{text "\<dots>"} --- the argument of the last explicitly
+    stated result (for infix application this is the right-hand side)
+
+  \item fact @{text "this"} --- the last result produced in the text
+
+  \end{itemize}
+*}
+
+notepad
+begin
+  have "x = y"
+  proof -
+    term ?thesis
+    show ?thesis sorry
+    term ?thesis  -- {* static! *}
+  qed
+  term "\<dots>"
+  thm this
+end
+
+text {* Calculational reasoning maintains the special fact called
+  ``@{text calculation}'' in the background.  Certain language
+  elements combine primary @{text this} with secondary @{text
+  calculation}. *}
+
+
+subsection {* Transitive chains *}
+
+text {* The Idea is to combine @{text this} and @{text calculation}
+  via typical @{text trans} rules (see also @{command
+  print_trans_rules}): *}
+
+thm trans
+thm less_trans
+thm less_le_trans
+
+notepad
+begin
+  txt {* Plain bottom-up calculation: *}
+  have "a = b" sorry
+  also
+  have "b = c" sorry
+  also
+  have "c = d" sorry
+  finally
+  have "a = d" .
+
+  txt {* Variant using the @{text "\<dots>"} abbreviation: *}
+  have "a = b" sorry
+  also
+  have "\<dots> = c" sorry
+  also
+  have "\<dots> = d" sorry
+  finally
+  have "a = d" .
+
+  txt {* Top-down version with explicit claim at the head: *}
+  have "a = d"
+  proof -
+    have "a = b" sorry
+    also
+    have "\<dots> = c" sorry
+    also
+    have "\<dots> = d" sorry
+    finally
+    show ?thesis .
+  qed
+next
+  txt {* Mixed inequalities (require suitable base type): *}
+  fix a b c d :: nat
+
+  have "a < b" sorry
+  also
+  have "b \<le> c" sorry
+  also
+  have "c = d" sorry
+  finally
+  have "a < d" .
+end
+
+
+subsubsection {* Notes *}
+
+text {*
+  \begin{itemize}
+
+  \item The notion of @{text trans} rule is very general due to the
+  flexibility of Isabelle/Pure rule composition.
+
+  \item User applications may declare their own rules, with some care
+  about the operational details of higher-order unification.
+
+  \end{itemize}
+*}
+
+
+subsection {* Degenerate calculations and bigstep reasoning *}
+
+text {* The Idea is to append @{text this} to @{text calculation},
+  without rule composition.  *}
+
+notepad
+begin
+  txt {* A vacuous proof: *}
+  have A sorry
+  moreover
+  have B sorry
+  moreover
+  have C sorry
+  ultimately
+  have A and B and C .
+next
+  txt {* Slightly more content (trivial bigstep reasoning): *}
+  have A sorry
+  moreover
+  have B sorry
+  moreover
+  have C sorry
+  ultimately
+  have "A \<and> B \<and> C" by blast
+next
+  txt {* More ambitious bigstep reasoning involving structured results: *}
+  have "A \<or> B \<or> C" sorry
+  moreover
+  { assume A have R sorry }
+  moreover
+  { assume B have R sorry }
+  moreover
+  { assume C have R sorry }
+  ultimately
+  have R by blast  -- {* ``big-bang integration'' of proof blocks (occasionally fragile) *}
+end
+
+
+section {* Induction *}
+
+subsection {* Induction as Natural Deduction *}
+
+text {* In principle, induction is just a special case of Natural
+  Deduction (see also \secref{sec:natural-deduction-synopsis}).  For
+  example: *}
+
+thm nat.induct
+print_statement nat.induct
+
+notepad
+begin
+  fix n :: nat
+  have "P n"
+  proof (rule nat.induct)  -- {* fragile rule application! *}
+    show "P 0" sorry
+  next
+    fix n :: nat
+    assume "P n"
+    show "P (Suc n)" sorry
+  qed
+end
+
+text {*
+  In practice, much more proof infrastructure is required.
+
+  The proof method @{method induct} provides:
+  \begin{itemize}
+
+  \item implicit rule selection and robust instantiation
+
+  \item context elements via symbolic case names
+
+  \item support for rule-structured induction statements, with local
+    parameters, premises, etc.
+
+  \end{itemize}
+*}
+
+notepad
+begin
+  fix n :: nat
+  have "P n"
+  proof (induct n)
+    case 0
+    show ?case sorry
+  next
+    case (Suc n)
+    from Suc.hyps show ?case sorry
+  qed
+end
+
+
+subsubsection {* Example *}
+
+text {*
+  The subsequent example combines the following proof patterns:
+  \begin{itemize}
+
+  \item outermost induction (over the datatype structure of natural
+  numbers), to decompose the proof problem in top-down manner
+
+  \item calculational reasoning (\secref{sec:calculations-synopsis})
+  to compose the result in each case
+
+  \item solving local claims within the calculation by simplification
+
+  \end{itemize}
+*}
+
+lemma
+  fixes n :: nat
+  shows "(\<Sum>i=0..n. i) = n * (n + 1) div 2"
+proof (induct n)
+  case 0
+  have "(\<Sum>i=0..0. i) = (0::nat)" by simp
+  also have "\<dots> = 0 * (0 + 1) div 2" by simp
+  finally show ?case .
+next
+  case (Suc n)
+  have "(\<Sum>i=0..Suc n. i) = (\<Sum>i=0..n. i) + (n + 1)" by simp
+  also have "\<dots> = n * (n + 1) div 2 + (n + 1)" by (simp add: Suc.hyps)
+  also have "\<dots> = (n * (n + 1) + 2 * (n + 1)) div 2" by simp
+  also have "\<dots> = (Suc n * (Suc n + 1)) div 2" by simp
+  finally show ?case .
+qed
+
+text {* This demonstrates how induction proofs can be done without
+  having to consider the raw Natural Deduction structure. *}
+
+
+subsection {* Induction with local parameters and premises *}
+
+text {* Idea: Pure rule statements are passed through the induction
+  rule.  This achieves convenient proof patterns, thanks to some
+  internal trickery in the @{method induct} method.
+
+  Important: Using compact HOL formulae with @{text "\<forall>/\<longrightarrow>"} is a
+  well-known anti-pattern! It would produce useless formal noise.
+*}
+
+notepad
+begin
+  fix n :: nat
+  fix P :: "nat \<Rightarrow> bool"
+  fix Q :: "'a \<Rightarrow> nat \<Rightarrow> bool"
+
+  have "P n"
+  proof (induct n)
+    case 0
+    show "P 0" sorry
+  next
+    case (Suc n)
+    from `P n` show "P (Suc n)" sorry
+  qed
+
+  have "A n \<Longrightarrow> P n"
+  proof (induct n)
+    case 0
+    from `A 0` show "P 0" sorry
+  next
+    case (Suc n)
+    from `A n \<Longrightarrow> P n`
+      and `A (Suc n)` show "P (Suc n)" sorry
+  qed
+
+  have "\<And>x. Q x n"
+  proof (induct n)
+    case 0
+    show "Q x 0" sorry
+  next
+    case (Suc n)
+    from `\<And>x. Q x n` show "Q x (Suc n)" sorry
+    txt {* Local quantification admits arbitrary instances: *}
+    note `Q a n` and `Q b n`
+  qed
+end
+
+
+subsection {* Implicit induction context *}
+
+text {* The @{method induct} method can isolate local parameters and
+  premises directly from the given statement.  This is convenient in
+  practical applications, but requires some understanding of what is
+  going on internally (as explained above).  *}
+
+notepad
+begin
+  fix n :: nat
+  fix Q :: "'a \<Rightarrow> nat \<Rightarrow> bool"
+
+  fix x :: 'a
+  assume "A x n"
+  then have "Q x n"
+  proof (induct n arbitrary: x)
+    case 0
+    from `A x 0` show "Q x 0" sorry
+  next
+    case (Suc n)
+    from `\<And>x. A x n \<Longrightarrow> Q x n`  -- {* arbitrary instances can be produced here *}
+      and `A x (Suc n)` show "Q x (Suc n)" sorry
+  qed
+end
+
+
+subsection {* Advanced induction with term definitions *}
+
+text {* Induction over subexpressions of a certain shape are delicate
+  to formalize.  The Isar @{method induct} method provides
+  infrastructure for this.
+
+  Idea: sub-expressions of the problem are turned into a defined
+  induction variable; often accompanied with fixing of auxiliary
+  parameters in the original expression.  *}
+
+notepad
+begin
+  fix a :: "'a \<Rightarrow> nat"
+  fix A :: "nat \<Rightarrow> bool"
+
+  assume "A (a x)"
+  then have "P (a x)"
+  proof (induct "a x" arbitrary: x)
+    case 0
+    note prem = `A (a x)`
+      and defn = `0 = a x`
+    show "P (a x)" sorry
+  next
+    case (Suc n)
+    note hyp = `\<And>x. n = a x \<Longrightarrow> A (a x) \<Longrightarrow> P (a x)`
+      and prem = `A (a x)`
+      and defn = `Suc n = a x`
+    show "P (a x)" sorry
+  qed
+end
+
+
+section {* Natural Deduction \label{sec:natural-deduction-synopsis} *}
+
+subsection {* Rule statements *}
+
+text {*
+  Isabelle/Pure ``theorems'' are always natural deduction rules,
+  which sometimes happen to consist of a conclusion only.
+
+  The framework connectives @{text "\<And>"} and @{text "\<Longrightarrow>"} indicate the
+  rule structure declaratively.  For example: *}
+
+thm conjI
+thm impI
+thm nat.induct
+
+text {*
+  The object-logic is embedded into the Pure framework via an implicit
+  derivability judgment @{term "Trueprop :: bool \<Rightarrow> prop"}.
+
+  Thus any HOL formulae appears atomic to the Pure framework, while
+  the rule structure outlines the corresponding proof pattern.
+
+  This can be made explicit as follows:
+*}
+
+notepad
+begin
+  write Trueprop  ("Tr")
+
+  thm conjI
+  thm impI
+  thm nat.induct
+end
+
+text {*
+  Isar provides first-class notation for rule statements as follows.
+*}
+
+print_statement conjI
+print_statement impI
+print_statement nat.induct
+
+
+subsubsection {* Examples *}
+
+text {*
+  Introductions and eliminations of some standard connectives of
+  the object-logic can be written as rule statements as follows.  (The
+  proof ``@{command "by"}~@{method blast}'' serves as sanity check.)
+*}
+
+lemma "(P \<Longrightarrow> False) \<Longrightarrow> \<not> P" by blast
+lemma "\<not> P \<Longrightarrow> P \<Longrightarrow> Q" by blast
+
+lemma "P \<Longrightarrow> Q \<Longrightarrow> P \<and> Q" by blast
+lemma "P \<and> Q \<Longrightarrow> (P \<Longrightarrow> Q \<Longrightarrow> R) \<Longrightarrow> R" by blast
+
+lemma "P \<Longrightarrow> P \<or> Q" by blast
+lemma "Q \<Longrightarrow> P \<or> Q" by blast
+lemma "P \<or> Q \<Longrightarrow> (P \<Longrightarrow> R) \<Longrightarrow> (Q \<Longrightarrow> R) \<Longrightarrow> R" by blast
+
+lemma "(\<And>x. P x) \<Longrightarrow> (\<forall>x. P x)" by blast
+lemma "(\<forall>x. P x) \<Longrightarrow> P x" by blast
+
+lemma "P x \<Longrightarrow> (\<exists>x. P x)" by blast
+lemma "(\<exists>x. P x) \<Longrightarrow> (\<And>x. P x \<Longrightarrow> R) \<Longrightarrow> R" by blast
+
+lemma "x \<in> A \<Longrightarrow> x \<in> B \<Longrightarrow> x \<in> A \<inter> B" by blast
+lemma "x \<in> A \<inter> B \<Longrightarrow> (x \<in> A \<Longrightarrow> x \<in> B \<Longrightarrow> R) \<Longrightarrow> R" by blast
+
+lemma "x \<in> A \<Longrightarrow> x \<in> A \<union> B" by blast
+lemma "x \<in> B \<Longrightarrow> x \<in> A \<union> B" by blast
+lemma "x \<in> A \<union> B \<Longrightarrow> (x \<in> A \<Longrightarrow> R) \<Longrightarrow> (x \<in> B \<Longrightarrow> R) \<Longrightarrow> R" by blast
+
+
+subsection {* Isar context elements *}
+
+text {* We derive some results out of the blue, using Isar context
+  elements and some explicit blocks.  This illustrates their meaning
+  wrt.\ Pure connectives, without goal states getting in the way.  *}
+
+notepad
+begin
+  {
+    fix x
+    have "B x" sorry
+  }
+  have "\<And>x. B x" by fact
+
+next
+
+  {
+    assume A
+    have B sorry
+  }
+  have "A \<Longrightarrow> B" by fact
+
+next
+
+  {
+    def x \<equiv> t
+    have "B x" sorry
+  }
+  have "B t" by fact
+
+next
+
+  {
+    obtain x :: 'a where "B x" sorry
+    have C sorry
+  }
+  have C by fact
+
+end
+
+
+subsection {* Pure rule composition *}
+
+text {*
+  The Pure framework provides means for:
+
+  \begin{itemize}
+
+    \item backward-chaining of rules by @{inference resolution}
+
+    \item closing of branches by @{inference assumption}
+
+  \end{itemize}
+
+  Both principles involve higher-order unification of @{text \<lambda>}-terms
+  modulo @{text "\<alpha>\<beta>\<eta>"}-equivalence (cf.\ Huet and Miller).  *}
+
+notepad
+begin
+  assume a: A and b: B
+  thm conjI
+  thm conjI [of A B]  -- "instantiation"
+  thm conjI [of A B, OF a b]  -- "instantiation and composition"
+  thm conjI [OF a b]  -- "composition via unification (trivial)"
+  thm conjI [OF `A` `B`]
+
+  thm conjI [OF disjI1]
+end
+
+text {* Note: Low-level rule composition is tedious and leads to
+  unreadable~/ unmaintainable expressions in the text.  *}
+
+
+subsection {* Structured backward reasoning *}
+
+text {* Idea: Canonical proof decomposition via @{command fix}~/
+  @{command assume}~/ @{command show}, where the body produces a
+  natural deduction rule to refine some goal.  *}
+
+notepad
+begin
+  fix A B :: "'a \<Rightarrow> bool"
+
+  have "\<And>x. A x \<Longrightarrow> B x"
+  proof -
+    fix x
+    assume "A x"
+    show "B x" sorry
+  qed
+
+  have "\<And>x. A x \<Longrightarrow> B x"
+  proof -
+    {
+      fix x
+      assume "A x"
+      show "B x" sorry
+    } -- "implicit block structure made explicit"
+    note `\<And>x. A x \<Longrightarrow> B x`
+      -- "side exit for the resulting rule"
+  qed
+end
+
+
+subsection {* Structured rule application *}
+
+text {*
+  Idea: Previous facts and new claims are composed with a rule from
+  the context (or background library).
+*}
+
+notepad
+begin
+  assume r1: "A \<Longrightarrow> B \<Longrightarrow> C"  -- {* simple rule (Horn clause) *}
+
+  have A sorry  -- "prefix of facts via outer sub-proof"
+  then have C
+  proof (rule r1)
+    show B sorry  -- "remaining rule premises via inner sub-proof"
+  qed
+
+  have C
+  proof (rule r1)
+    show A sorry
+    show B sorry
+  qed
+
+  have A and B sorry
+  then have C
+  proof (rule r1)
+  qed
+
+  have A and B sorry
+  then have C by (rule r1)
+
+next
+
+  assume r2: "A \<Longrightarrow> (\<And>x. B1 x \<Longrightarrow> B2 x) \<Longrightarrow> C"  -- {* nested rule *}
+
+  have A sorry
+  then have C
+  proof (rule r2)
+    fix x
+    assume "B1 x"
+    show "B2 x" sorry
+  qed
+
+  txt {* The compound rule premise @{prop "\<And>x. B1 x \<Longrightarrow> B2 x"} is better
+    addressed via @{command fix}~/ @{command assume}~/ @{command show}
+    in the nested proof body.  *}
+end
+
+
+subsection {* Example: predicate logic *}
+
+text {*
+  Using the above principles, standard introduction and elimination proofs
+  of predicate logic connectives of HOL work as follows.
+*}
+
+notepad
+begin
+  have "A \<longrightarrow> B" and A sorry
+  then have B ..
+
+  have A sorry
+  then have "A \<or> B" ..
+
+  have B sorry
+  then have "A \<or> B" ..
+
+  have "A \<or> B" sorry
+  then have C
+  proof
+    assume A
+    then show C sorry
+  next
+    assume B
+    then show C sorry
+  qed
+
+  have A and B sorry
+  then have "A \<and> B" ..
+
+  have "A \<and> B" sorry
+  then have A ..
+
+  have "A \<and> B" sorry
+  then have B ..
+
+  have False sorry
+  then have A ..
+
+  have True ..
+
+  have "\<not> A"
+  proof
+    assume A
+    then show False sorry
+  qed
+
+  have "\<not> A" and A sorry
+  then have B ..
+
+  have "\<forall>x. P x"
+  proof
+    fix x
+    show "P x" sorry
+  qed
+
+  have "\<forall>x. P x" sorry
+  then have "P a" ..
+
+  have "\<exists>x. P x"
+  proof
+    show "P a" sorry
+  qed
+
+  have "\<exists>x. P x" sorry
+  then have C
+  proof
+    fix a
+    assume "P a"
+    show C sorry
+  qed
+
+  txt {* Less awkward version using @{command obtain}: *}
+  have "\<exists>x. P x" sorry
+  then obtain a where "P a" ..
+end
+
+text {* Further variations to illustrate Isar sub-proofs involving
+  @{command show}: *}
+
+notepad
+begin
+  have "A \<and> B"
+  proof  -- {* two strictly isolated subproofs *}
+    show A sorry
+  next
+    show B sorry
+  qed
+
+  have "A \<and> B"
+  proof  -- {* one simultaneous sub-proof *}
+    show A and B sorry
+  qed
+
+  have "A \<and> B"
+  proof  -- {* two subproofs in the same context *}
+    show A sorry
+    show B sorry
+  qed
+
+  have "A \<and> B"
+  proof  -- {* swapped order *}
+    show B sorry
+    show A sorry
+  qed
+
+  have "A \<and> B"
+  proof  -- {* sequential subproofs *}
+    show A sorry
+    show B using `A` sorry
+  qed
+end
+
+
+subsubsection {* Example: set-theoretic operators *}
+
+text {* There is nothing special about logical connectives (@{text
+  "\<and>"}, @{text "\<or>"}, @{text "\<forall>"}, @{text "\<exists>"} etc.).  Operators from
+  set-theory or lattice-theory work analogously.  It is only a matter
+  of rule declarations in the library; rules can be also specified
+  explicitly.
+*}
+
+notepad
+begin
+  have "x \<in> A" and "x \<in> B" sorry
+  then have "x \<in> A \<inter> B" ..
+
+  have "x \<in> A" sorry
+  then have "x \<in> A \<union> B" ..
+
+  have "x \<in> B" sorry
+  then have "x \<in> A \<union> B" ..
+
+  have "x \<in> A \<union> B" sorry
+  then have C
+  proof
+    assume "x \<in> A"
+    then show C sorry
+  next
+    assume "x \<in> B"
+    then show C sorry
+  qed
+
+next
+  have "x \<in> \<Inter>A"
+  proof
+    fix a
+    assume "a \<in> A"
+    show "x \<in> a" sorry
+  qed
+
+  have "x \<in> \<Inter>A" sorry
+  then have "x \<in> a"
+  proof
+    show "a \<in> A" sorry
+  qed
+
+  have "a \<in> A" and "x \<in> a" sorry
+  then have "x \<in> \<Union>A" ..
+
+  have "x \<in> \<Union>A" sorry
+  then obtain a where "a \<in> A" and "x \<in> a" ..
+end
+
+
+section {* Generalized elimination and cases *}
+
+subsection {* General elimination rules *}
+
+text {*
+  The general format of elimination rules is illustrated by the
+  following typical representatives:
+*}
+
+thm exE     -- {* local parameter *}
+thm conjE   -- {* local premises *}
+thm disjE   -- {* split into cases *}
+
+text {*
+  Combining these characteristics leads to the following general scheme
+  for elimination rules with cases:
+
+  \begin{itemize}
+
+  \item prefix of assumptions (or ``major premises'')
+
+  \item one or more cases that enable to establish the main conclusion
+    in an augmented context
+
+  \end{itemize}
+*}
+
+notepad
+begin
+  assume r:
+    "A1 \<Longrightarrow> A2 \<Longrightarrow>  (* assumptions *)
+      (\<And>x y. B1 x y \<Longrightarrow> C1 x y \<Longrightarrow> R) \<Longrightarrow>  (* case 1 *)
+      (\<And>x y. B2 x y \<Longrightarrow> C2 x y \<Longrightarrow> R) \<Longrightarrow>  (* case 2 *)
+      R  (* main conclusion *)"
+
+  have A1 and A2 sorry
+  then have R
+  proof (rule r)
+    fix x y
+    assume "B1 x y" and "C1 x y"
+    show ?thesis sorry
+  next
+    fix x y
+    assume "B2 x y" and "C2 x y"
+    show ?thesis sorry
+  qed
+end
+
+text {* Here @{text "?thesis"} is used to refer to the unchanged goal
+  statement.  *}
+
+
+subsection {* Rules with cases *}
+
+text {*
+  Applying an elimination rule to some goal, leaves that unchanged
+  but allows to augment the context in the sub-proof of each case.
+
+  Isar provides some infrastructure to support this:
+
+  \begin{itemize}
+
+  \item native language elements to state eliminations
+
+  \item symbolic case names
+
+  \item method @{method cases} to recover this structure in a
+  sub-proof
+
+  \end{itemize}
+*}
+
+print_statement exE
+print_statement conjE
+print_statement disjE
+
+lemma
+  assumes A1 and A2  -- {* assumptions *}
+  obtains
+    (case1)  x y where "B1 x y" and "C1 x y"
+  | (case2)  x y where "B2 x y" and "C2 x y"
+  sorry
+
+
+subsubsection {* Example *}
+
+lemma tertium_non_datur:
+  obtains
+    (T)  A
+  | (F)  "\<not> A"
+  by blast
+
+notepad
+begin
+  fix x y :: 'a
+  have C
+  proof (cases "x = y" rule: tertium_non_datur)
+    case T
+    from `x = y` show ?thesis sorry
+  next
+    case F
+    from `x \<noteq> y` show ?thesis sorry
+  qed
+end
+
+
+subsubsection {* Example *}
+
+text {*
+  Isabelle/HOL specification mechanisms (datatype, inductive, etc.)
+  provide suitable derived cases rules.
+*}
+
+datatype foo = Foo | Bar foo
+
+notepad
+begin
+  fix x :: foo
+  have C
+  proof (cases x)
+    case Foo
+    from `x = Foo` show ?thesis sorry
+  next
+    case (Bar a)
+    from `x = Bar a` show ?thesis sorry
+  qed
+end
+
+
+subsection {* Obtaining local contexts *}
+
+text {* A single ``case'' branch may be inlined into Isar proof text
+  via @{command obtain}.  This proves @{prop "(\<And>x. B x \<Longrightarrow> thesis) \<Longrightarrow>
+  thesis"} on the spot, and augments the context afterwards.  *}
+
+notepad
+begin
+  fix B :: "'a \<Rightarrow> bool"
+
+  obtain x where "B x" sorry
+  note `B x`
+
+  txt {* Conclusions from this context may not mention @{term x} again! *}
+  {
+    obtain x where "B x" sorry
+    from `B x` have C sorry
+  }
+  note `C`
+end
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/document/build	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+set -e
+
+FORMAT="$1"
+VARIANT="$2"
+
+"$ISABELLE_TOOL" logo Isar
+
+cp "$ISABELLE_HOME/src/Doc/iman.sty" .
+cp "$ISABELLE_HOME/src/Doc/extra.sty" .
+cp "$ISABELLE_HOME/src/Doc/isar.sty" .
+cp "$ISABELLE_HOME/src/Doc/ttbox.sty" .
+cp "$ISABELLE_HOME/src/Doc/underscore.sty" .
+cp "$ISABELLE_HOME/src/Doc/manual.bib" .
+
+./showsymbols "$ISABELLE_HOME/lib/texinputs/isabellesym.sty" > syms.tex
+
+"$ISABELLE_HOME/src/Doc/prepare_document" "$FORMAT"
+
Binary file src/Doc/Isar-Ref/document/isar-vm.pdf has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/document/isar-vm.svg	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,460 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="543.02673"
+   height="215.66071"
+   id="svg2"
+   sodipodi:version="0.32"
+   inkscape:version="0.46"
+   version="1.0"
+   sodipodi:docname="isar-vm.svg"
+   inkscape:output_extension="org.inkscape.output.svg.inkscape">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="TriangleOutM"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="TriangleOutM"
+       style="overflow:visible">
+      <path
+         id="path4130"
+         d="M 5.77,0 L -2.88,5 L -2.88,-5 L 5.77,0 z"
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         transform="scale(0.4,0.4)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow1Mend"
+       style="overflow:visible">
+      <path
+         id="path3993"
+         d="M 0,0 L 5,-5 L -12.5,0 L 5,5 L 0,0 z"
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         transform="matrix(-0.4,0,0,-0.4,-4,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow1Lend"
+       style="overflow:visible">
+      <path
+         id="path3207"
+         d="M 0,0 L 5,-5 L -12.5,0 L 5,5 L 0,0 z"
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow1Lstart"
+       style="overflow:visible">
+      <path
+         id="path3204"
+         d="M 0,0 L 5,-5 L -12.5,0 L 5,5 L 0,0 z"
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         transform="matrix(0.8,0,0,0.8,10,0)" />
+    </marker>
+    <inkscape:perspective
+       sodipodi:type="inkscape:persp3d"
+       inkscape:vp_x="0 : 526.18109 : 1"
+       inkscape:vp_y="0 : 1000 : 0"
+       inkscape:vp_z="744.09448 : 526.18109 : 1"
+       inkscape:persp3d-origin="372.04724 : 350.78739 : 1"
+       id="perspective10" />
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     gridtolerance="10"
+     guidetolerance="10"
+     objecttolerance="10"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.4142136"
+     inkscape:cx="305.44602"
+     inkscape:cy="38.897723"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     showgrid="true"
+     inkscape:snap-global="true"
+     units="mm"
+     inkscape:window-width="1226"
+     inkscape:window-height="951"
+     inkscape:window-x="28"
+     inkscape:window-y="47">
+    <inkscape:grid
+       type="xygrid"
+       id="grid2383"
+       visible="true"
+       enabled="true"
+       units="mm"
+       spacingx="2.5mm"
+       spacingy="2.5mm"
+       empspacing="2" />
+  </sodipodi:namedview>
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-44.641342,-76.87234)">
+    <g
+       id="g3448"
+       transform="translate(70.838012,79.725562)">
+      <rect
+         ry="17.67767"
+         y="131.52507"
+         x="212.09882"
+         height="53.149605"
+         width="70.866142"
+         id="rect3407"
+         style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+      <text
+         sodipodi:linespacing="100%"
+         id="text3409"
+         y="164.06471"
+         x="223.50845"
+         style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+         xml:space="preserve"><tspan
+           style="font-size:16px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+           y="164.06471"
+           x="223.50845"
+           id="tspan3411"
+           sodipodi:role="line">chain</tspan></text>
+    </g>
+    <path
+       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:0.99921262;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       d="M 424.72469,236.82544 L 356.83209,236.82544 L 356.83209,236.82544"
+       id="path3458" />
+    <path
+       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:0.99921268;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       d="M 282.35183,236.82544 L 215.11403,236.82544 L 215.11403,236.82544"
+       id="path4771" />
+    <path
+       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:miter;marker-start:none;marker-mid:none;marker-end:url(#TriangleOutM);stroke-opacity:1"
+       d="M 424.69726,192.5341 L 215.13005,192.5341"
+       id="path4773" />
+    <path
+       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-opacity:1"
+       d="M 211.98429,148.24276 L 422.13162,148.24276"
+       id="path6883" />
+    <g
+       id="g3443"
+       transform="translate(70.866146,78.725567)">
+      <rect
+         ry="17.67767"
+         y="42.942394"
+         x="70.366531"
+         height="141.73228"
+         width="70.866142"
+         id="rect2586"
+         style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+      <text
+         sodipodi:linespacing="100%"
+         id="text3370"
+         y="116.62494"
+         x="79.682419"
+         style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+         xml:space="preserve"><tspan
+           style="font-size:16px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+           y="116.62494"
+           x="79.682419"
+           id="tspan3372"
+           sodipodi:role="line">prove</tspan></text>
+    </g>
+    <path
+       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-opacity:1"
+       d="M 176.66575,92.035445 L 176.66575,118.61025"
+       id="path7412" />
+    <path
+       sodipodi:type="arc"
+       style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="path9011"
+       sodipodi:cx="119.58662"
+       sodipodi:cy="266.74686"
+       sodipodi:rx="93.01181"
+       sodipodi:ry="53.149605"
+       d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
+       transform="matrix(0.2378166,0,0,-0.2269133,90.621413,253.06251)"
+       sodipodi:start="0.29223018"
+       sodipodi:end="5.9921036"
+       sodipodi:open="true" />
+    <g
+       id="g3453"
+       transform="translate(70.866151,78.725565)">
+      <rect
+         ry="17.67767"
+         y="42.942394"
+         x="353.83112"
+         height="141.73228"
+         width="70.866142"
+         id="rect3381"
+         style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+      <text
+         sodipodi:linespacing="100%"
+         id="text3383"
+         y="119.31244"
+         x="365.98294"
+         style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+         xml:space="preserve"><tspan
+           style="font-size:16px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+           y="119.31244"
+           x="365.98294"
+           sodipodi:role="line"
+           id="tspan3387">state</tspan></text>
+    </g>
+    <path
+       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-opacity:1"
+       d="M 460.13031,263.40024 L 460.13031,289.97505"
+       id="path7941" />
+    <path
+       sodipodi:type="arc"
+       style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="path10594"
+       sodipodi:cx="119.58662"
+       sodipodi:cy="266.74686"
+       sodipodi:rx="93.01181"
+       sodipodi:ry="53.149605"
+       d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
+       transform="matrix(-0.2378166,0,0,0.2269133,546.17466,132.00569)"
+       sodipodi:start="0.29223018"
+       sodipodi:end="5.9921036"
+       sodipodi:open="true" />
+    <path
+       sodipodi:type="arc"
+       style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:bevel;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="path12210"
+       sodipodi:cx="119.58662"
+       sodipodi:cy="266.74686"
+       sodipodi:rx="93.01181"
+       sodipodi:ry="53.149605"
+       d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
+       transform="matrix(-0.2378166,0,0,0.2269133,546.17465,87.714359)"
+       sodipodi:start="0.29223018"
+       sodipodi:end="5.9921036"
+       sodipodi:open="true" />
+    <path
+       sodipodi:type="arc"
+       style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:round;marker-start:none;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="path12212"
+       sodipodi:cx="119.58662"
+       sodipodi:cy="266.74686"
+       sodipodi:rx="93.01181"
+       sodipodi:ry="53.149605"
+       d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
+       transform="matrix(-0.2378166,0,0,0.2269133,546.17465,176.29703)"
+       sodipodi:start="0.29223018"
+       sodipodi:end="5.9921036"
+       sodipodi:open="true" />
+    <path
+       sodipodi:type="arc"
+       style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="path12214"
+       sodipodi:cx="119.58662"
+       sodipodi:cy="266.74686"
+       sodipodi:rx="93.01181"
+       sodipodi:ry="53.149605"
+       d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
+       transform="matrix(0,0.2378166,0.2269133,0,399.60191,71.056696)"
+       sodipodi:start="0.29223018"
+       sodipodi:end="5.9921036"
+       sodipodi:open="true" />
+    <text
+       xml:space="preserve"
+       style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+       x="173.49998"
+       y="97.094513"
+       id="text19307"
+       sodipodi:linespacing="100%"
+       transform="translate(17.216929,6.5104864)"><tspan
+         sodipodi:role="line"
+         id="tspan19309"
+         x="173.49998"
+         y="97.094513" /></text>
+    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+       x="185.52402"
+       y="110.07987"
+       id="text19311"
+       sodipodi:linespacing="100%"><tspan
+         sodipodi:role="line"
+         id="tspan19313"
+         x="185.52402"
+         y="110.07987">theorem</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+       x="389.99997"
+       y="11.594519"
+       id="text19315"
+       sodipodi:linespacing="100%"
+       transform="translate(17.216929,6.5104864)"><tspan
+         sodipodi:role="line"
+         id="tspan19317"
+         x="389.99997"
+         y="11.594519" /></text>
+    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+       x="468.98859"
+       y="280.47543"
+       id="text19319"
+       sodipodi:linespacing="100%"><tspan
+         sodipodi:role="line"
+         id="tspan19321"
+         x="468.98859"
+         y="280.47543">qed</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+       x="549.06946"
+       y="239.58423"
+       id="text19323"
+       sodipodi:linespacing="100%"><tspan
+         sodipodi:role="line"
+         id="tspan19325"
+         x="549.06946"
+         y="239.58423">qed</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+       x="549.39172"
+       y="191.26213"
+       id="text19327"
+       sodipodi:linespacing="100%"><tspan
+         sodipodi:role="line"
+         id="tspan19329"
+         x="549.39172"
+         y="191.26213">fix</tspan><tspan
+         sodipodi:role="line"
+         x="549.39172"
+         y="201.26213"
+         id="tspan19331">assume</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+       x="548.71301"
+       y="146.97079"
+       id="text19333"
+       sodipodi:linespacing="100%"><tspan
+         sodipodi:role="line"
+         id="tspan19335"
+         x="548.71301"
+         y="146.97079">{ }</tspan><tspan
+         sodipodi:role="line"
+         x="548.71301"
+         y="156.97079"
+         id="tspan19337">next</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+       x="477.84686"
+       y="98.264297"
+       id="text19339"
+       sodipodi:linespacing="100%"><tspan
+         sodipodi:role="line"
+         x="477.84686"
+         y="98.264297"
+         id="tspan19343">note</tspan><tspan
+         sodipodi:role="line"
+         x="477.84686"
+         y="108.2643"
+         id="tspan19358">let</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+       x="43.791733"
+       y="190.29289"
+       id="text19345"
+       sodipodi:linespacing="100%"><tspan
+         sodipodi:role="line"
+         id="tspan19347"
+         x="43.791733"
+         y="190.29289">using</tspan><tspan
+         sodipodi:role="line"
+         x="43.791733"
+         y="200.29289"
+         id="tspan19349">unfolding</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+       x="378.65891"
+       y="230.52518"
+       id="text19360"
+       sodipodi:linespacing="100%"><tspan
+         sodipodi:role="line"
+         id="tspan19362"
+         x="378.65891"
+         y="230.52518">then</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:150%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+       x="233.98795"
+       y="233.05347"
+       id="text19364"
+       sodipodi:linespacing="150%"><tspan
+         sodipodi:role="line"
+         x="233.98795"
+         y="233.05347"
+         id="tspan19368">have</tspan><tspan
+         sodipodi:role="line"
+         x="233.98795"
+         y="248.05347"
+         id="tspan19370">show</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:150%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+       x="305.89636"
+       y="188.76213"
+       id="text19374"
+       sodipodi:linespacing="150%"><tspan
+         sodipodi:role="line"
+         x="305.89636"
+         y="188.76213"
+         id="tspan19376">have</tspan><tspan
+         sodipodi:role="line"
+         x="305.89636"
+         y="203.76213"
+         id="tspan19378">show</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+       x="303.82324"
+       y="141.07895"
+       id="text19380"
+       sodipodi:linespacing="100%"><tspan
+         sodipodi:role="line"
+         id="tspan19382"
+         x="303.82324"
+         y="141.07895">proof</tspan></text>
+  </g>
+</svg>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/document/root.tex	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,95 @@
+\documentclass[12pt,a4paper,fleqn]{report}
+\usepackage[T1]{fontenc}
+\usepackage{amssymb}
+\usepackage{eurosym}
+\usepackage[english]{babel}
+\usepackage[only,bigsqcap]{stmaryrd}
+\usepackage{textcomp}
+\usepackage{latexsym}
+\usepackage{graphicx}
+\let\intorig=\int  %iman.sty redefines \int
+\usepackage{iman,extra,isar,proof}
+\usepackage[nohyphen,strings]{underscore}
+\usepackage{isabelle}
+\usepackage{isabellesym}
+\usepackage{railsetup}
+\usepackage{ttbox}
+\usepackage{supertabular}
+\usepackage{style}
+\usepackage{pdfsetup}
+
+\hyphenation{Isabelle}
+\hyphenation{Isar}
+
+\isadroptag{theory}
+\title{\includegraphics[scale=0.5]{isabelle_isar} \\[4ex] The Isabelle/Isar Reference Manual}
+\author{\emph{Makarius Wenzel} \\[3ex]
+  With Contributions by
+  Clemens Ballarin,
+  Stefan Berghofer, \\
+  Jasmin Blanchette,
+  Timothy Bourke,
+  Lukas Bulwahn, \\
+  Amine Chaieb,
+  Lucas Dixon,
+  Florian Haftmann, \\
+  Brian Huffman,
+  Gerwin Klein,
+  Alexander Krauss, \\
+  Ond\v{r}ej Kun\v{c}ar,
+  Andreas Lochbihler,
+  Tobias Nipkow, \\
+  Lars Noschinski,
+  David von Oheimb,
+  Larry Paulson, \\
+  Sebastian Skalberg,
+  Christian Sternagel
+}
+
+\makeindex
+
+\chardef\charbackquote=`\`
+\newcommand{\backquote}{\mbox{\tt\charbackquote}}
+
+
+\begin{document}
+
+\maketitle 
+
+\pagenumbering{roman}
+{\def\isamarkupchapter#1{\chapter*{#1}}\input{Preface.tex}}
+\tableofcontents
+\clearfirst
+
+\part{Basic Concepts}
+\input{Synopsis.tex}
+\input{Framework.tex}
+\input{First_Order_Logic.tex}
+\part{General Language Elements}
+\input{Outer_Syntax.tex}
+\input{Document_Preparation.tex}
+\input{Spec.tex}
+\input{Proof.tex}
+\input{Inner_Syntax.tex}
+\input{Misc.tex}
+\input{Generic.tex}
+\part{Isabelle/HOL}\label{part:hol}
+\input{HOL_Specific.tex}
+
+\part{Appendix}
+\appendix
+\input{Quick_Reference.tex}
+\let\int\intorig
+\input{Symbols.tex}
+\input{ML_Tactic.tex}
+
+\begingroup
+  \tocentry{\bibname}
+  \bibliographystyle{abbrv} \small\raggedright\frenchspacing
+  \bibliography{manual}
+\endgroup
+
+\tocentry{\indexname}
+\printindex
+
+\end{document}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/document/showsymbols	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,26 @@
+#!/usr/bin/env perl
+
+print "\\begin{supertabular}{ll\@{\\qquad}ll}\n";
+
+$eol = "&";
+
+while (<ARGV>) {
+    if (m/^\\newcommand\{\\isasym([A-Za-z]+)\}/) {
+       print "\\verb,\\<$1>, & {\\isasym$1} $eol\n";
+#       print "\\verb,\\<$1>, & \\isactrlbold{\\isasym$1}~{\\isasym$1} $eol\n";
+#       print "\\verb,\\<$1>, & B\\isactrlsup{\\isasym$1} $eol\n";
+#       print "\\verb,\\<$1>, & B\\isactrlsub{\\isasym$1} $eol\n";
+        if ("$eol" eq "&") {
+            $eol = "\\\\";
+        } else {
+            $eol = "&";
+        }
+    }
+}
+
+if ("$eol" eq "\\\\") {
+    print "$eol\n";
+}
+
+print "\\end{supertabular}\n";
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Isar-Ref/document/style.sty	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,48 @@
+%% toc
+\newcommand{\tocentry}[1]{\cleardoublepage\phantomsection\addcontentsline{toc}{chapter}{#1}
+\@mkboth{\MakeUppercase{#1}}{\MakeUppercase{#1}}}
+
+\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}
+
+%% references
+\newcommand{\partref}[1]{part~\ref{#1}}
+\newcommand{\secref}[1]{\S\ref{#1}}
+\newcommand{\chref}[1]{chapter~\ref{#1}}
+\newcommand{\Chref}[1]{Chapter~\ref{#1}}
+\newcommand{\appref}[1]{appendix~\ref{#1}}
+\newcommand{\Appref}[1]{Appendix~\ref{#1}}
+\newcommand{\figref}[1]{figure~\ref{#1}}
+\newcommand{\Figref}[1]{Figure~\ref{#1}}
+
+%% Isar
+\newcommand{\isasymBBAR}{{\,\newdimen{\tmpheight}\settoheight\tmpheight{\isacharbar}\rule{1pt}{\tmpheight}\,}}
+\isafoldtag{noproof}\def\isafoldnoproof{~\isafold{proof}}
+\newcommand{\isadigitreset}{\def\isadigit##1{##1}}
+\renewcommand{\isacommand}[1]{\isakeyword{\isadigitreset#1}}
+
+%% ML
+\newenvironment{mldecls}{\par\noindent\begingroup\def\isanewline{\\}\begin{tabular}{ll}}{\end{tabular}\medskip\endgroup}
+
+\renewcommand{\isatagML}{\begingroup\isabellestyle{default}\isastyle\isadigitreset}
+\renewcommand{\endisatagML}{\endgroup}
+
+%% math
+\newcommand{\isasymstrut}{\isamath{\mathstrut}}
+\newcommand{\isasymvartheta}{\isamath{\,\theta}}
+\newcommand{\isactrlvec}[1]{\emph{$\overline{#1}$}}
+\renewcommand{\isadigit}[1]{\isamath{#1}}
+\newcommand{\text}[1]{\mbox{#1}}
+
+%% global style options
+\pagestyle{headings}
+\sloppy
+
+\parindent 0pt\parskip 0.5ex
+
+\isabellestyle{literalunderscore}
+
+\newcommand{\isasymdash}{\isatext{\mbox{-}}}
+
+\railtermfont{\isabellestyle{tt}}
+\railnontermfont{\isabellestyle{literalunderscore}}
+\railnamefont{\isabellestyle{literalunderscore}}
--- a/src/Doc/IsarImplementation/Base.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-theory Base
-imports Main
-begin
-
-ML_file "../antiquote_setup.ML"
-
-end
--- a/src/Doc/IsarImplementation/Eq.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,126 +0,0 @@
-theory Eq
-imports Base
-begin
-
-chapter {* Equational reasoning *}
-
-text {* Equality is one of the most fundamental concepts of
-  mathematics.  The Isabelle/Pure logic (\chref{ch:logic}) provides a
-  builtin relation @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> prop"} that expresses equality
-  of arbitrary terms (or propositions) at the framework level, as
-  expressed by certain basic inference rules (\secref{sec:eq-rules}).
-
-  Equational reasoning means to replace equals by equals, using
-  reflexivity and transitivity to form chains of replacement steps,
-  and congruence rules to access sub-structures.  Conversions
-  (\secref{sec:conv}) provide a convenient framework to compose basic
-  equational steps to build specific equational reasoning tools.
-
-  Higher-order matching is able to provide suitable instantiations for
-  giving equality rules, which leads to the versatile concept of
-  @{text "\<lambda>"}-term rewriting (\secref{sec:rewriting}).  Internally
-  this is based on the general-purpose Simplifier engine of Isabelle,
-  which is more specific and more efficient than plain conversions.
-
-  Object-logics usually introduce specific notions of equality or
-  equivalence, and relate it with the Pure equality.  This enables to
-  re-use the Pure tools for equational reasoning for particular
-  object-logic connectives as well.
-*}
-
-
-section {* Basic equality rules \label{sec:eq-rules} *}
-
-text {* Isabelle/Pure uses @{text "\<equiv>"} for equality of arbitrary
-  terms, which includes equivalence of propositions of the logical
-  framework.  The conceptual axiomatization of the constant @{text "\<equiv>
-  :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> prop"} is given in \figref{fig:pure-equality}.  The
-  inference kernel presents slightly different equality rules, which
-  may be understood as derived rules from this minimal axiomatization.
-  The Pure theory also provides some theorems that express the same
-  reasoning schemes as theorems that can be composed like object-level
-  rules as explained in \secref{sec:obj-rules}.
-
-  For example, @{ML Thm.symmetric} as Pure inference is an ML function
-  that maps a theorem @{text "th"} stating @{text "t \<equiv> u"} to one
-  stating @{text "u \<equiv> t"}.  In contrast, @{thm [source]
-  Pure.symmetric} as Pure theorem expresses the same reasoning in
-  declarative form.  If used like @{text "th [THEN Pure.symmetric]"}
-  in Isar source notation, it achieves a similar effect as the ML
-  inference function, although the rule attribute @{attribute THEN} or
-  ML operator @{ML "op RS"} involve the full machinery of higher-order
-  unification (modulo @{text "\<beta>\<eta>"}-conversion) and lifting of @{text
-  "\<And>/\<Longrightarrow>"} contexts. *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Thm.reflexive: "cterm -> thm"} \\
-  @{index_ML Thm.symmetric: "thm -> thm"} \\
-  @{index_ML Thm.transitive: "thm -> thm -> thm"} \\
-  @{index_ML Thm.abstract_rule: "string -> cterm -> thm -> thm"} \\
-  @{index_ML Thm.combination: "thm -> thm -> thm"} \\[0.5ex]
-  @{index_ML Thm.equal_intr: "thm -> thm -> thm"} \\
-  @{index_ML Thm.equal_elim: "thm -> thm -> thm"} \\
-  \end{mldecls}
-
-  See also @{file "~~/src/Pure/thm.ML" } for further description of
-  these inference rules, and a few more for primitive @{text "\<beta>"} and
-  @{text "\<eta>"} conversions.  Note that @{text "\<alpha>"} conversion is
-  implicit due to the representation of terms with de-Bruijn indices
-  (\secref{sec:terms}). *}
-
-
-section {* Conversions \label{sec:conv} *}
-
-text {*
-  %FIXME
-
-  The classic article that introduces the concept of conversion (for
-  Cambridge LCF) is \cite{paulson:1983}.
-*}
-
-
-section {* Rewriting \label{sec:rewriting} *}
-
-text {* Rewriting normalizes a given term (theorem or goal) by
-  replacing instances of given equalities @{text "t \<equiv> u"} in subterms.
-  Rewriting continues until no rewrites are applicable to any subterm.
-  This may be used to unfold simple definitions of the form @{text "f
-  x\<^sub>1 \<dots> x\<^sub>n \<equiv> u"}, but is slightly more general than that.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML rewrite_rule: "Proof.context -> thm list -> thm -> thm"} \\
-  @{index_ML rewrite_goals_rule: "Proof.context -> thm list -> thm -> thm"} \\
-  @{index_ML rewrite_goal_tac: "Proof.context -> thm list -> int -> tactic"} \\
-  @{index_ML rewrite_goals_tac: "Proof.context -> thm list -> tactic"} \\
-  @{index_ML fold_goals_tac: "Proof.context -> thm list -> tactic"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML rewrite_rule}~@{text "ctxt rules thm"} rewrites the whole
-  theorem by the given rules.
-
-  \item @{ML rewrite_goals_rule}~@{text "ctxt rules thm"} rewrites the
-  outer premises of the given theorem.  Interpreting the same as a
-  goal state (\secref{sec:tactical-goals}) it means to rewrite all
-  subgoals (in the same manner as @{ML rewrite_goals_tac}).
-
-  \item @{ML rewrite_goal_tac}~@{text "ctxt rules i"} rewrites subgoal
-  @{text "i"} by the given rewrite rules.
-
-  \item @{ML rewrite_goals_tac}~@{text "ctxt rules"} rewrites all subgoals
-  by the given rewrite rules.
-
-  \item @{ML fold_goals_tac}~@{text "ctxt rules"} essentially uses @{ML
-  rewrite_goals_tac} with the symmetric form of each member of @{text
-  "rules"}, re-ordered to fold longer expression first.  This supports
-  to idea to fold primitive definitions that appear in expended form
-  in the proof state.
-
-  \end{description}
-*}
-
-end
--- a/src/Doc/IsarImplementation/Integration.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,291 +0,0 @@
-theory Integration
-imports Base
-begin
-
-chapter {* System integration *}
-
-section {* Isar toplevel \label{sec:isar-toplevel} *}
-
-text {* The Isar toplevel may be considered the central hub of the
-  Isabelle/Isar system, where all key components and sub-systems are
-  integrated into a single read-eval-print loop of Isar commands,
-  which also incorporates the underlying ML compiler.
-
-  Isabelle/Isar departs from the original ``LCF system architecture''
-  where ML was really The Meta Language for defining theories and
-  conducting proofs.  Instead, ML now only serves as the
-  implementation language for the system (and user extensions), while
-  the specific Isar toplevel supports the concepts of theory and proof
-  development natively.  This includes the graph structure of theories
-  and the block structure of proofs, support for unlimited undo,
-  facilities for tracing, debugging, timing, profiling etc.
-
-  \medskip The toplevel maintains an implicit state, which is
-  transformed by a sequence of transitions -- either interactively or
-  in batch-mode.
-
-  The toplevel state is a disjoint sum of empty @{text toplevel}, or
-  @{text theory}, or @{text proof}.  On entering the main Isar loop we
-  start with an empty toplevel.  A theory is commenced by giving a
-  @{text \<THEORY>} header; within a theory we may issue theory
-  commands such as @{text \<DEFINITION>}, or state a @{text
-  \<THEOREM>} to be proven.  Now we are within a proof state, with a
-  rich collection of Isar proof commands for structured proof
-  composition, or unstructured proof scripts.  When the proof is
-  concluded we get back to the theory, which is then updated by
-  storing the resulting fact.  Further theory declarations or theorem
-  statements with proofs may follow, until we eventually conclude the
-  theory development by issuing @{text \<END>}.  The resulting theory
-  is then stored within the theory database and we are back to the
-  empty toplevel.
-
-  In addition to these proper state transformations, there are also
-  some diagnostic commands for peeking at the toplevel state without
-  modifying it (e.g.\ \isakeyword{thm}, \isakeyword{term},
-  \isakeyword{print-cases}).
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type Toplevel.state} \\
-  @{index_ML_exception Toplevel.UNDEF} \\
-  @{index_ML Toplevel.is_toplevel: "Toplevel.state -> bool"} \\
-  @{index_ML Toplevel.theory_of: "Toplevel.state -> theory"} \\
-  @{index_ML Toplevel.proof_of: "Toplevel.state -> Proof.state"} \\
-  @{index_ML Toplevel.timing: "bool Unsynchronized.ref"} \\
-  @{index_ML Toplevel.profiling: "int Unsynchronized.ref"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type Toplevel.state} represents Isar toplevel
-  states, which are normally manipulated through the concept of
-  toplevel transitions only (\secref{sec:toplevel-transition}).  Also
-  note that a raw toplevel state is subject to the same linearity
-  restrictions as a theory context (cf.~\secref{sec:context-theory}).
-
-  \item @{ML Toplevel.UNDEF} is raised for undefined toplevel
-  operations.  Many operations work only partially for certain cases,
-  since @{ML_type Toplevel.state} is a sum type.
-
-  \item @{ML Toplevel.is_toplevel}~@{text "state"} checks for an empty
-  toplevel state.
-
-  \item @{ML Toplevel.theory_of}~@{text "state"} selects the
-  background theory of @{text "state"}, raises @{ML Toplevel.UNDEF}
-  for an empty toplevel state.
-
-  \item @{ML Toplevel.proof_of}~@{text "state"} selects the Isar proof
-  state if available, otherwise raises @{ML Toplevel.UNDEF}.
-
-  \item @{ML "Toplevel.timing := true"} makes the toplevel print timing
-  information for each Isar command being executed.
-
-  \item @{ML Toplevel.profiling}~@{ML_text ":="}~@{text "n"} controls
-  low-level profiling of the underlying ML runtime system.  For
-  Poly/ML, @{text "n = 1"} means time and @{text "n = 2"} space
-  profiling.
-
-  \end{description}
-*}
-
-text %mlantiq {*
-  \begin{matharray}{rcl}
-  @{ML_antiquotation_def "Isar.state"} & : & @{text ML_antiquotation} \\
-  \end{matharray}
-
-  \begin{description}
-
-  \item @{text "@{Isar.state}"} refers to Isar toplevel state at that
-  point --- as abstract value.
-
-  This only works for diagnostic ML commands, such as @{command
-  ML_val} or @{command ML_command}.
-
-  \end{description}
-*}
-
-
-subsection {* Toplevel transitions \label{sec:toplevel-transition} *}
-
-text {*
-  An Isar toplevel transition consists of a partial function on the
-  toplevel state, with additional information for diagnostics and
-  error reporting: there are fields for command name, source position,
-  optional source text, as well as flags for interactive-only commands
-  (which issue a warning in batch-mode), printing of result state,
-  etc.
-
-  The operational part is represented as the sequential union of a
-  list of partial functions, which are tried in turn until the first
-  one succeeds.  This acts like an outer case-expression for various
-  alternative state transitions.  For example, \isakeyword{qed} works
-  differently for a local proofs vs.\ the global ending of the main
-  proof.
-
-  Toplevel transitions are composed via transition transformers.
-  Internally, Isar commands are put together from an empty transition
-  extended by name and source position.  It is then left to the
-  individual command parser to turn the given concrete syntax into a
-  suitable transition transformer that adjoins actual operations on a
-  theory or proof state etc.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Toplevel.print: "Toplevel.transition -> Toplevel.transition"} \\
-  @{index_ML Toplevel.keep: "(Toplevel.state -> unit) ->
-  Toplevel.transition -> Toplevel.transition"} \\
-  @{index_ML Toplevel.theory: "(theory -> theory) ->
-  Toplevel.transition -> Toplevel.transition"} \\
-  @{index_ML Toplevel.theory_to_proof: "(theory -> Proof.state) ->
-  Toplevel.transition -> Toplevel.transition"} \\
-  @{index_ML Toplevel.proof: "(Proof.state -> Proof.state) ->
-  Toplevel.transition -> Toplevel.transition"} \\
-  @{index_ML Toplevel.proofs: "(Proof.state -> Proof.state Seq.result Seq.seq) ->
-  Toplevel.transition -> Toplevel.transition"} \\
-  @{index_ML Toplevel.end_proof: "(bool -> Proof.state -> Proof.context) ->
-  Toplevel.transition -> Toplevel.transition"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML Toplevel.print}~@{text "tr"} sets the print flag, which
-  causes the toplevel loop to echo the result state (in interactive
-  mode).
-
-  \item @{ML Toplevel.keep}~@{text "tr"} adjoins a diagnostic
-  function.
-
-  \item @{ML Toplevel.theory}~@{text "tr"} adjoins a theory
-  transformer.
-
-  \item @{ML Toplevel.theory_to_proof}~@{text "tr"} adjoins a global
-  goal function, which turns a theory into a proof state.  The theory
-  may be changed before entering the proof; the generic Isar goal
-  setup includes an argument that specifies how to apply the proven
-  result to the theory, when the proof is finished.
-
-  \item @{ML Toplevel.proof}~@{text "tr"} adjoins a deterministic
-  proof command, with a singleton result.
-
-  \item @{ML Toplevel.proofs}~@{text "tr"} adjoins a general proof
-  command, with zero or more result states (represented as a lazy
-  list).
-
-  \item @{ML Toplevel.end_proof}~@{text "tr"} adjoins a concluding
-  proof command, that returns the resulting theory, after storing the
-  resulting facts in the context etc.
-
-  \end{description}
-*}
-
-
-section {* Theory database \label{sec:theory-database} *}
-
-text {*
-  The theory database maintains a collection of theories, together
-  with some administrative information about their original sources,
-  which are held in an external store (i.e.\ some directory within the
-  regular file system).
-
-  The theory database is organized as a directed acyclic graph;
-  entries are referenced by theory name.  Although some additional
-  interfaces allow to include a directory specification as well, this
-  is only a hint to the underlying theory loader.  The internal theory
-  name space is flat!
-
-  Theory @{text A} is associated with the main theory file @{text
-  A}\verb,.thy,, which needs to be accessible through the theory
-  loader path.  Any number of additional ML source files may be
-  associated with each theory, by declaring these dependencies in the
-  theory header as @{text \<USES>}, and loading them consecutively
-  within the theory context.  The system keeps track of incoming ML
-  sources and associates them with the current theory.
-
-  The basic internal actions of the theory database are @{text
-  "update"} and @{text "remove"}:
-
-  \begin{itemize}
-
-  \item @{text "update A"} introduces a link of @{text "A"} with a
-  @{text "theory"} value of the same name; it asserts that the theory
-  sources are now consistent with that value;
-
-  \item @{text "remove A"} deletes entry @{text "A"} from the theory
-  database.
-  
-  \end{itemize}
-
-  These actions are propagated to sub- or super-graphs of a theory
-  entry as expected, in order to preserve global consistency of the
-  state of all loaded theories with the sources of the external store.
-  This implies certain causalities between actions: @{text "update"}
-  or @{text "remove"} of an entry will @{text "remove"} all
-  descendants.
-
-  \medskip There are separate user-level interfaces to operate on the
-  theory database directly or indirectly.  The primitive actions then
-  just happen automatically while working with the system.  In
-  particular, processing a theory header @{text "\<THEORY> A
-  \<IMPORTS> B\<^sub>1 \<dots> B\<^sub>n \<BEGIN>"} ensures that the
-  sub-graph of the collective imports @{text "B\<^sub>1 \<dots> B\<^sub>n"}
-  is up-to-date, too.  Earlier theories are reloaded as required, with
-  @{text update} actions proceeding in topological order according to
-  theory dependencies.  There may be also a wave of implied @{text
-  remove} actions for derived theory nodes until a stable situation
-  is achieved eventually.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML use_thy: "string -> unit"} \\
-  @{index_ML use_thys: "string list -> unit"} \\
-  @{index_ML Thy_Info.get_theory: "string -> theory"} \\
-  @{index_ML Thy_Info.remove_thy: "string -> unit"} \\[1ex]
-  @{index_ML Thy_Info.register_thy: "theory -> unit"} \\[1ex]
-  @{ML_text "datatype action = Update | Remove"} \\
-  @{index_ML Thy_Info.add_hook: "(Thy_Info.action -> string -> unit) -> unit"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML use_thy}~@{text A} ensures that theory @{text A} is fully
-  up-to-date wrt.\ the external file store, reloading outdated
-  ancestors as required.  In batch mode, the simultaneous @{ML
-  use_thys} should be used exclusively.
-
-  \item @{ML use_thys} is similar to @{ML use_thy}, but handles
-  several theories simultaneously.  Thus it acts like processing the
-  import header of a theory, without performing the merge of the
-  result.  By loading a whole sub-graph of theories like that, the
-  intrinsic parallelism can be exploited by the system, to speedup
-  loading.
-
-  \item @{ML Thy_Info.get_theory}~@{text A} retrieves the theory value
-  presently associated with name @{text A}.  Note that the result
-  might be outdated.
-
-  \item @{ML Thy_Info.remove_thy}~@{text A} deletes theory @{text A} and all
-  descendants from the theory database.
-
-  \item @{ML Thy_Info.register_thy}~@{text "text thy"} registers an
-  existing theory value with the theory loader database and updates
-  source version information according to the current file-system
-  state.
-
-  \item @{ML "Thy_Info.add_hook"}~@{text f} registers function @{text
-  f} as a hook for theory database actions.  The function will be
-  invoked with the action and theory name being involved; thus derived
-  actions may be performed in associated system components, e.g.\
-  maintaining the state of an editor for the theory sources.
-
-  The kind and order of actions occurring in practice depends both on
-  user interactions and the internal process of resolving theory
-  imports.  Hooks should not rely on a particular policy here!  Any
-  exceptions raised by the hook are ignored.
-
-  \end{description}
-*}
-
-end
--- a/src/Doc/IsarImplementation/Isar.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,585 +0,0 @@
-theory Isar
-imports Base
-begin
-
-chapter {* Isar language elements *}
-
-text {* The Isar proof language (see also
-  \cite[\S2]{isabelle-isar-ref}) consists of three main categories of
-  language elements as follows.
-
-  \begin{enumerate}
-
-  \item Proof \emph{commands} define the primary language of
-  transactions of the underlying Isar/VM interpreter.  Typical
-  examples are @{command "fix"}, @{command "assume"}, @{command
-  "show"}, @{command "proof"}, and @{command "qed"}.
-
-  Composing proof commands according to the rules of the Isar/VM leads
-  to expressions of structured proof text, such that both the machine
-  and the human reader can give it a meaning as formal reasoning.
-
-  \item Proof \emph{methods} define a secondary language of mixed
-  forward-backward refinement steps involving facts and goals.
-  Typical examples are @{method rule}, @{method unfold}, and @{method
-  simp}.
-
-  Methods can occur in certain well-defined parts of the Isar proof
-  language, say as arguments to @{command "proof"}, @{command "qed"},
-  or @{command "by"}.
-
-  \item \emph{Attributes} define a tertiary language of small
-  annotations to theorems being defined or referenced.  Attributes can
-  modify both the context and the theorem.
-
-  Typical examples are @{attribute intro} (which affects the context),
-  and @{attribute symmetric} (which affects the theorem).
-
-  \end{enumerate}
-*}
-
-
-section {* Proof commands *}
-
-text {* A \emph{proof command} is state transition of the Isar/VM
-  proof interpreter.
-
-  In principle, Isar proof commands could be defined in user-space as
-  well.  The system is built like that in the first place: one part of
-  the commands are primitive, the other part is defined as derived
-  elements.  Adding to the genuine structured proof language requires
-  profound understanding of the Isar/VM machinery, though, so this is
-  beyond the scope of this manual.
-
-  What can be done realistically is to define some diagnostic commands
-  that inspect the general state of the Isar/VM, and report some
-  feedback to the user.  Typically this involves checking of the
-  linguistic \emph{mode} of a proof state, or peeking at the pending
-  goals (if available).
-
-  Another common application is to define a toplevel command that
-  poses a problem to the user as Isar proof state and processes the
-  final result relatively to the context.  Thus a proof can be
-  incorporated into the context of some user-space tool, without
-  modifying the Isar proof language itself.  *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type Proof.state} \\
-  @{index_ML Proof.assert_forward: "Proof.state -> Proof.state"} \\
-  @{index_ML Proof.assert_chain: "Proof.state -> Proof.state"} \\
-  @{index_ML Proof.assert_backward: "Proof.state -> Proof.state"} \\
-  @{index_ML Proof.simple_goal: "Proof.state -> {context: Proof.context, goal: thm}"} \\
-  @{index_ML Proof.goal: "Proof.state ->
-  {context: Proof.context, facts: thm list, goal: thm}"} \\
-  @{index_ML Proof.raw_goal: "Proof.state ->
-  {context: Proof.context, facts: thm list, goal: thm}"} \\
-  @{index_ML Proof.theorem: "Method.text option ->
-  (thm list list -> Proof.context -> Proof.context) ->
-  (term * term list) list list -> Proof.context -> Proof.state"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type Proof.state} represents Isar proof states.
-  This is a block-structured configuration with proof context,
-  linguistic mode, and optional goal.  The latter consists of goal
-  context, goal facts (``@{text "using"}''), and tactical goal state
-  (see \secref{sec:tactical-goals}).
-
-  The general idea is that the facts shall contribute to the
-  refinement of some parts of the tactical goal --- how exactly is
-  defined by the proof method that is applied in that situation.
-
-  \item @{ML Proof.assert_forward}, @{ML Proof.assert_chain}, @{ML
-  Proof.assert_backward} are partial identity functions that fail
-  unless a certain linguistic mode is active, namely ``@{text
-  "proof(state)"}'', ``@{text "proof(chain)"}'', ``@{text
-  "proof(prove)"}'', respectively (using the terminology of
-  \cite{isabelle-isar-ref}).
-
-  It is advisable study the implementations of existing proof commands
-  for suitable modes to be asserted.
-
-  \item @{ML Proof.simple_goal}~@{text "state"} returns the structured
-  Isar goal (if available) in the form seen by ``simple'' methods
-  (like @{method simp} or @{method blast}).  The Isar goal facts are
-  already inserted as premises into the subgoals, which are presented
-  individually as in @{ML Proof.goal}.
-
-  \item @{ML Proof.goal}~@{text "state"} returns the structured Isar
-  goal (if available) in the form seen by regular methods (like
-  @{method rule}).  The auxiliary internal encoding of Pure
-  conjunctions is split into individual subgoals as usual.
-
-  \item @{ML Proof.raw_goal}~@{text "state"} returns the structured
-  Isar goal (if available) in the raw internal form seen by ``raw''
-  methods (like @{method induct}).  This form is rarely appropriate
-  for dignostic tools; @{ML Proof.simple_goal} or @{ML Proof.goal}
-  should be used in most situations.
-
-  \item @{ML Proof.theorem}~@{text "before_qed after_qed statement ctxt"}
-  initializes a toplevel Isar proof state within a given context.
-
-  The optional @{text "before_qed"} method is applied at the end of
-  the proof, just before extracting the result (this feature is rarely
-  used).
-
-  The @{text "after_qed"} continuation receives the extracted result
-  in order to apply it to the final context in a suitable way (e.g.\
-  storing named facts).  Note that at this generic level the target
-  context is specified as @{ML_type Proof.context}, but the usual
-  wrapping of toplevel proofs into command transactions will provide a
-  @{ML_type local_theory} here (\chref{ch:local-theory}).  This
-  affects the way how results are stored.
-
-  The @{text "statement"} is given as a nested list of terms, each
-  associated with optional @{keyword "is"} patterns as usual in the
-  Isar source language.  The original nested list structure over terms
-  is turned into one over theorems when @{text "after_qed"} is
-  invoked.
-
-  \end{description}
-*}
-
-
-text %mlantiq {*
-  \begin{matharray}{rcl}
-  @{ML_antiquotation_def "Isar.goal"} & : & @{text ML_antiquotation} \\
-  \end{matharray}
-
-  \begin{description}
-
-  \item @{text "@{Isar.goal}"} refers to the regular goal state (if
-  available) of the current proof state managed by the Isar toplevel
-  --- as abstract value.
-
-  This only works for diagnostic ML commands, such as @{command
-  ML_val} or @{command ML_command}.
-
-  \end{description}
-*}
-
-text %mlex {* The following example peeks at a certain goal configuration. *}
-
-notepad
-begin
-  have A and B and C
-    ML_val {*
-      val n = Thm.nprems_of (#goal @{Isar.goal});
-      @{assert} (n = 3);
-    *}
-    oops
-
-text {* Here we see 3 individual subgoals in the same way as regular
-  proof methods would do.  *}
-
-
-section {* Proof methods *}
-
-text {* A @{text "method"} is a function @{text "context \<rightarrow> thm\<^sup>* \<rightarrow> goal
-  \<rightarrow> (cases \<times> goal)\<^sup>*\<^sup>*"} that operates on the full Isar goal
-  configuration with context, goal facts, and tactical goal state and
-  enumerates possible follow-up goal states, with the potential
-  addition of named extensions of the proof context (\emph{cases}).
-  The latter feature is rarely used.
-
-  This means a proof method is like a structurally enhanced tactic
-  (cf.\ \secref{sec:tactics}).  The well-formedness conditions for
-  tactics need to hold for methods accordingly, with the following
-  additions.
-
-  \begin{itemize}
-
-  \item Goal addressing is further limited either to operate either
-  uniformly on \emph{all} subgoals, or specifically on the
-  \emph{first} subgoal.
-
-  Exception: old-style tactic emulations that are embedded into the
-  method space, e.g.\ @{method rule_tac}.
-
-  \item A non-trivial method always needs to make progress: an
-  identical follow-up goal state has to be avoided.\footnote{This
-  enables the user to write method expressions like @{text "meth\<^sup>+"}
-  without looping, while the trivial do-nothing case can be recovered
-  via @{text "meth\<^sup>?"}.}
-
-  Exception: trivial stuttering steps, such as ``@{method -}'' or
-  @{method succeed}.
-
-  \item Goal facts passed to the method must not be ignored.  If there
-  is no sensible use of facts outside the goal state, facts should be
-  inserted into the subgoals that are addressed by the method.
-
-  \end{itemize}
-
-  \medskip Syntactically, the language of proof methods appears as
-  arguments to Isar commands like @{command "by"} or @{command apply}.
-  User-space additions are reasonably easy by plugging suitable
-  method-valued parser functions into the framework, using the
-  @{command method_setup} command, for example.
-
-  To get a better idea about the range of possibilities, consider the
-  following Isar proof schemes.  This is the general form of
-  structured proof text:
-
-  \medskip
-  \begin{tabular}{l}
-  @{command from}~@{text "facts\<^sub>1"}~@{command have}~@{text "props"}~@{command using}~@{text "facts\<^sub>2"} \\
-  @{command proof}~@{text "(initial_method)"} \\
-  \quad@{text "body"} \\
-  @{command qed}~@{text "(terminal_method)"} \\
-  \end{tabular}
-  \medskip
-
-  The goal configuration consists of @{text "facts\<^sub>1"} and
-  @{text "facts\<^sub>2"} appended in that order, and various @{text
-  "props"} being claimed.  The @{text "initial_method"} is invoked
-  with facts and goals together and refines the problem to something
-  that is handled recursively in the proof @{text "body"}.  The @{text
-  "terminal_method"} has another chance to finish any remaining
-  subgoals, but it does not see the facts of the initial step.
-
-  \medskip This pattern illustrates unstructured proof scripts:
-
-  \medskip
-  \begin{tabular}{l}
-  @{command have}~@{text "props"} \\
-  \quad@{command using}~@{text "facts\<^sub>1"}~@{command apply}~@{text "method\<^sub>1"} \\
-  \quad@{command apply}~@{text "method\<^sub>2"} \\
-  \quad@{command using}~@{text "facts\<^sub>3"}~@{command apply}~@{text "method\<^sub>3"} \\
-  \quad@{command done} \\
-  \end{tabular}
-  \medskip
-
-  The @{text "method\<^sub>1"} operates on the original claim while
-  using @{text "facts\<^sub>1"}.  Since the @{command apply} command
-  structurally resets the facts, the @{text "method\<^sub>2"} will
-  operate on the remaining goal state without facts.  The @{text
-  "method\<^sub>3"} will see again a collection of @{text
-  "facts\<^sub>3"} that has been inserted into the script explicitly.
-
-  \medskip Empirically, any Isar proof method can be categorized as
-  follows.
-
-  \begin{enumerate}
-
-  \item \emph{Special method with cases} with named context additions
-  associated with the follow-up goal state.
-
-  Example: @{method "induct"}, which is also a ``raw'' method since it
-  operates on the internal representation of simultaneous claims as
-  Pure conjunction (\secref{sec:logic-aux}), instead of separate
-  subgoals (\secref{sec:tactical-goals}).
-
-  \item \emph{Structured method} with strong emphasis on facts outside
-  the goal state.
-
-  Example: @{method "rule"}, which captures the key ideas behind
-  structured reasoning in Isar in purest form.
-
-  \item \emph{Simple method} with weaker emphasis on facts, which are
-  inserted into subgoals to emulate old-style tactical as
-  ``premises''.
-
-  Examples: @{method "simp"}, @{method "blast"}, @{method "auto"}.
-
-  \item \emph{Old-style tactic emulation} with detailed numeric goal
-  addressing and explicit references to entities of the internal goal
-  state (which are otherwise invisible from proper Isar proof text).
-  The naming convention @{text "foo_tac"} makes this special
-  non-standard status clear.
-
-  Example: @{method "rule_tac"}.
-
-  \end{enumerate}
-
-  When implementing proof methods, it is advisable to study existing
-  implementations carefully and imitate the typical ``boiler plate''
-  for context-sensitive parsing and further combinators to wrap-up
-  tactic expressions as methods.\footnote{Aliases or abbreviations of
-  the standard method combinators should be avoided.  Note that from
-  Isabelle99 until Isabelle2009 the system did provide various odd
-  combinations of method wrappers that made user applications more
-  complicated than necessary.}
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type Proof.method} \\
-  @{index_ML METHOD_CASES: "(thm list -> cases_tactic) -> Proof.method"} \\
-  @{index_ML METHOD: "(thm list -> tactic) -> Proof.method"} \\
-  @{index_ML SIMPLE_METHOD: "tactic -> Proof.method"} \\
-  @{index_ML SIMPLE_METHOD': "(int -> tactic) -> Proof.method"} \\
-  @{index_ML Method.insert_tac: "thm list -> int -> tactic"} \\
-  @{index_ML Method.setup: "binding -> (Proof.context -> Proof.method) context_parser ->
-  string -> theory -> theory"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type Proof.method} represents proof methods as
-  abstract type.
-
-  \item @{ML METHOD_CASES}~@{text "(fn facts => cases_tactic)"} wraps
-  @{text cases_tactic} depending on goal facts as proof method with
-  cases; the goal context is passed via method syntax.
-
-  \item @{ML METHOD}~@{text "(fn facts => tactic)"} wraps @{text
-  tactic} depending on goal facts as regular proof method; the goal
-  context is passed via method syntax.
-
-  \item @{ML SIMPLE_METHOD}~@{text "tactic"} wraps a tactic that
-  addresses all subgoals uniformly as simple proof method.  Goal facts
-  are already inserted into all subgoals before @{text "tactic"} is
-  applied.
-
-  \item @{ML SIMPLE_METHOD'}~@{text "tactic"} wraps a tactic that
-  addresses a specific subgoal as simple proof method that operates on
-  subgoal 1.  Goal facts are inserted into the subgoal then the @{text
-  "tactic"} is applied.
-
-  \item @{ML Method.insert_tac}~@{text "facts i"} inserts @{text
-  "facts"} into subgoal @{text "i"}.  This is convenient to reproduce
-  part of the @{ML SIMPLE_METHOD} or @{ML SIMPLE_METHOD'} wrapping
-  within regular @{ML METHOD}, for example.
-
-  \item @{ML Method.setup}~@{text "name parser description"} provides
-  the functionality of the Isar command @{command method_setup} as ML
-  function.
-
-  \end{description}
-*}
-
-text %mlex {* See also @{command method_setup} in
-  \cite{isabelle-isar-ref} which includes some abstract examples.
-
-  \medskip The following toy examples illustrate how the goal facts
-  and state are passed to proof methods.  The pre-defined proof method
-  called ``@{method tactic}'' wraps ML source of type @{ML_type
-  tactic} (abstracted over @{ML_text facts}).  This allows immediate
-  experimentation without parsing of concrete syntax. *}
-
-notepad
-begin
-  assume a: A and b: B
-
-  have "A \<and> B"
-    apply (tactic {* rtac @{thm conjI} 1 *})
-    using a apply (tactic {* resolve_tac facts 1 *})
-    using b apply (tactic {* resolve_tac facts 1 *})
-    done
-
-  have "A \<and> B"
-    using a and b
-    ML_val "@{Isar.goal}"
-    apply (tactic {* Method.insert_tac facts 1 *})
-    apply (tactic {* (rtac @{thm conjI} THEN_ALL_NEW atac) 1 *})
-    done
-end
-
-text {* \medskip The next example implements a method that simplifies
-  the first subgoal by rewrite rules given as arguments.  *}
-
-method_setup my_simp = {*
-  Attrib.thms >> (fn thms => fn ctxt =>
-    SIMPLE_METHOD' (fn i =>
-      CHANGED (asm_full_simp_tac
-        (put_simpset HOL_basic_ss ctxt addsimps thms) i)))
-*} "rewrite subgoal by given rules"
-
-text {* The concrete syntax wrapping of @{command method_setup} always
-  passes-through the proof context at the end of parsing, but it is
-  not used in this example.
-
-  The @{ML Attrib.thms} parser produces a list of theorems from the
-  usual Isar syntax involving attribute expressions etc.\ (syntax
-  category @{syntax thmrefs}) \cite{isabelle-isar-ref}.  The resulting
-  @{ML_text thms} are added to @{ML HOL_basic_ss} which already
-  contains the basic Simplifier setup for HOL.
-
-  The tactic @{ML asm_full_simp_tac} is the one that is also used in
-  method @{method simp} by default.  The extra wrapping by the @{ML
-  CHANGED} tactical ensures progress of simplification: identical goal
-  states are filtered out explicitly to make the raw tactic conform to
-  standard Isar method behaviour.
-
-  \medskip Method @{method my_simp} can be used in Isar proofs like
-  this:
-*}
-
-notepad
-begin
-  fix a b c
-  assume a: "a = b"
-  assume b: "b = c"
-  have "a = c" by (my_simp a b)
-end
-
-text {* Here is a similar method that operates on all subgoals,
-  instead of just the first one. *}
-
-method_setup my_simp_all = {*
-  Attrib.thms >> (fn thms => fn ctxt =>
-    SIMPLE_METHOD
-      (CHANGED
-        (ALLGOALS (asm_full_simp_tac
-          (put_simpset HOL_basic_ss ctxt addsimps thms)))))
-*} "rewrite all subgoals by given rules"
-
-notepad
-begin
-  fix a b c
-  assume a: "a = b"
-  assume b: "b = c"
-  have "a = c" and "c = b" by (my_simp_all a b)
-end
-
-text {* \medskip Apart from explicit arguments, common proof methods
-  typically work with a default configuration provided by the context.
-  As a shortcut to rule management we use a cheap solution via functor
-  @{ML_functor Named_Thms} (see also @{file
-  "~~/src/Pure/Tools/named_thms.ML"}).  *}
-
-ML {*
-  structure My_Simps =
-    Named_Thms
-      (val name = @{binding my_simp} val description = "my_simp rule")
-*}
-setup My_Simps.setup
-
-text {* This provides ML access to a list of theorems in canonical
-  declaration order via @{ML My_Simps.get}.  The user can add or
-  delete rules via the attribute @{attribute my_simp}.  The actual
-  proof method is now defined as before, but we append the explicit
-  arguments and the rules from the context.  *}
-
-method_setup my_simp' = {*
-  Attrib.thms >> (fn thms => fn ctxt =>
-    SIMPLE_METHOD' (fn i =>
-      CHANGED (asm_full_simp_tac
-        (put_simpset HOL_basic_ss ctxt
-          addsimps (thms @ My_Simps.get ctxt)) i)))
-*} "rewrite subgoal by given rules and my_simp rules from the context"
-
-text {*
-  \medskip Method @{method my_simp'} can be used in Isar proofs
-  like this:
-*}
-
-notepad
-begin
-  fix a b c
-  assume [my_simp]: "a \<equiv> b"
-  assume [my_simp]: "b \<equiv> c"
-  have "a \<equiv> c" by my_simp'
-end
-
-text {* \medskip The @{method my_simp} variants defined above are
-  ``simple'' methods, i.e.\ the goal facts are merely inserted as goal
-  premises by the @{ML SIMPLE_METHOD'} or @{ML SIMPLE_METHOD} wrapper.
-  For proof methods that are similar to the standard collection of
-  @{method simp}, @{method blast}, @{method fast}, @{method auto}
-  there is little more that can be done.
-
-  Note that using the primary goal facts in the same manner as the
-  method arguments obtained via concrete syntax or the context does
-  not meet the requirement of ``strong emphasis on facts'' of regular
-  proof methods, because rewrite rules as used above can be easily
-  ignored.  A proof text ``@{command using}~@{text "foo"}~@{command
-  "by"}~@{text "my_simp"}'' where @{text "foo"} is not used would
-  deceive the reader.
-
-  \medskip The technical treatment of rules from the context requires
-  further attention.  Above we rebuild a fresh @{ML_type simpset} from
-  the arguments and \emph{all} rules retrieved from the context on
-  every invocation of the method.  This does not scale to really large
-  collections of rules, which easily emerges in the context of a big
-  theory library, for example.
-
-  This is an inherent limitation of the simplistic rule management via
-  functor @{ML_functor Named_Thms}, because it lacks tool-specific
-  storage and retrieval.  More realistic applications require
-  efficient index-structures that organize theorems in a customized
-  manner, such as a discrimination net that is indexed by the
-  left-hand sides of rewrite rules.  For variations on the Simplifier,
-  re-use of the existing type @{ML_type simpset} is adequate, but
-  scalability would require it be maintained statically within the
-  context data, not dynamically on each tool invocation.  *}
-
-
-section {* Attributes \label{sec:attributes} *}
-
-text {* An \emph{attribute} is a function @{text "context \<times> thm \<rightarrow>
-  context \<times> thm"}, which means both a (generic) context and a theorem
-  can be modified simultaneously.  In practice this mixed form is very
-  rare, instead attributes are presented either as \emph{declaration
-  attribute:} @{text "thm \<rightarrow> context \<rightarrow> context"} or \emph{rule
-  attribute:} @{text "context \<rightarrow> thm \<rightarrow> thm"}.
-
-  Attributes can have additional arguments via concrete syntax.  There
-  is a collection of context-sensitive parsers for various logical
-  entities (types, terms, theorems).  These already take care of
-  applying morphisms to the arguments when attribute expressions are
-  moved into a different context (see also \secref{sec:morphisms}).
-
-  When implementing declaration attributes, it is important to operate
-  exactly on the variant of the generic context that is provided by
-  the system, which is either global theory context or local proof
-  context.  In particular, the background theory of a local context
-  must not be modified in this situation! *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type attribute} \\
-  @{index_ML Thm.rule_attribute: "(Context.generic -> thm -> thm) -> attribute"} \\
-  @{index_ML Thm.declaration_attribute: "
-  (thm -> Context.generic -> Context.generic) -> attribute"} \\
-  @{index_ML Attrib.setup: "binding -> attribute context_parser ->
-  string -> theory -> theory"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type attribute} represents attributes as concrete
-  type alias.
-
-  \item @{ML Thm.rule_attribute}~@{text "(fn context => rule)"} wraps
-  a context-dependent rule (mapping on @{ML_type thm}) as attribute.
-
-  \item @{ML Thm.declaration_attribute}~@{text "(fn thm => decl)"}
-  wraps a theorem-dependent declaration (mapping on @{ML_type
-  Context.generic}) as attribute.
-
-  \item @{ML Attrib.setup}~@{text "name parser description"} provides
-  the functionality of the Isar command @{command attribute_setup} as
-  ML function.
-
-  \end{description}
-*}
-
-text %mlantiq {*
-  \begin{matharray}{rcl}
-  @{ML_antiquotation_def attributes} & : & @{text ML_antiquotation} \\
-  \end{matharray}
-
-  @{rail \<open>
-  @@{ML_antiquotation attributes} attributes
-  \<close>}
-
-  \begin{description}
-
-  \item @{text "@{attributes [\<dots>]}"} embeds attribute source
-  representation into the ML text, which is particularly useful with
-  declarations like @{ML Local_Theory.note}.  Attribute names are
-  internalized at compile time, but the source is unevaluated.  This
-  means attributes with formal arguments (types, terms, theorems) may
-  be subject to odd effects of dynamic scoping!
-
-  \end{description}
-*}
-
-text %mlex {* See also @{command attribute_setup} in
-  \cite{isabelle-isar-ref} which includes some abstract examples. *}
-
-end
--- a/src/Doc/IsarImplementation/Local_Theory.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,168 +0,0 @@
-theory Local_Theory
-imports Base
-begin
-
-chapter {* Local theory specifications \label{ch:local-theory} *}
-
-text {*
-  A \emph{local theory} combines aspects of both theory and proof
-  context (cf.\ \secref{sec:context}), such that definitional
-  specifications may be given relatively to parameters and
-  assumptions.  A local theory is represented as a regular proof
-  context, augmented by administrative data about the \emph{target
-  context}.
-
-  The target is usually derived from the background theory by adding
-  local @{text "\<FIX>"} and @{text "\<ASSUME>"} elements, plus
-  suitable modifications of non-logical context data (e.g.\ a special
-  type-checking discipline).  Once initialized, the target is ready to
-  absorb definitional primitives: @{text "\<DEFINE>"} for terms and
-  @{text "\<NOTE>"} for theorems.  Such definitions may get
-  transformed in a target-specific way, but the programming interface
-  hides such details.
-
-  Isabelle/Pure provides target mechanisms for locales, type-classes,
-  type-class instantiations, and general overloading.  In principle,
-  users can implement new targets as well, but this rather arcane
-  discipline is beyond the scope of this manual.  In contrast,
-  implementing derived definitional packages to be used within a local
-  theory context is quite easy: the interfaces are even simpler and
-  more abstract than the underlying primitives for raw theories.
-
-  Many definitional packages for local theories are available in
-  Isabelle.  Although a few old packages only work for global
-  theories, the standard way of implementing definitional packages in
-  Isabelle is via the local theory interface.
-*}
-
-
-section {* Definitional elements *}
-
-text {*
-  There are separate elements @{text "\<DEFINE> c \<equiv> t"} for terms, and
-  @{text "\<NOTE> b = thm"} for theorems.  Types are treated
-  implicitly, according to Hindley-Milner discipline (cf.\
-  \secref{sec:variables}).  These definitional primitives essentially
-  act like @{text "let"}-bindings within a local context that may
-  already contain earlier @{text "let"}-bindings and some initial
-  @{text "\<lambda>"}-bindings.  Thus we gain \emph{dependent definitions}
-  that are relative to an initial axiomatic context.  The following
-  diagram illustrates this idea of axiomatic elements versus
-  definitional elements:
-
-  \begin{center}
-  \begin{tabular}{|l|l|l|}
-  \hline
-  & @{text "\<lambda>"}-binding & @{text "let"}-binding \\
-  \hline
-  types & fixed @{text "\<alpha>"} & arbitrary @{text "\<beta>"} \\
-  terms & @{text "\<FIX> x :: \<tau>"} & @{text "\<DEFINE> c \<equiv> t"} \\
-  theorems & @{text "\<ASSUME> a: A"} & @{text "\<NOTE> b = \<^BG>B\<^EN>"} \\
-  \hline
-  \end{tabular}
-  \end{center}
-
-  A user package merely needs to produce suitable @{text "\<DEFINE>"}
-  and @{text "\<NOTE>"} elements according to the application.  For
-  example, a package for inductive definitions might first @{text
-  "\<DEFINE>"} a certain predicate as some fixed-point construction,
-  then @{text "\<NOTE>"} a proven result about monotonicity of the
-  functor involved here, and then produce further derived concepts via
-  additional @{text "\<DEFINE>"} and @{text "\<NOTE>"} elements.
-
-  The cumulative sequence of @{text "\<DEFINE>"} and @{text "\<NOTE>"}
-  produced at package runtime is managed by the local theory
-  infrastructure by means of an \emph{auxiliary context}.  Thus the
-  system holds up the impression of working within a fully abstract
-  situation with hypothetical entities: @{text "\<DEFINE> c \<equiv> t"}
-  always results in a literal fact @{text "\<^BG>c \<equiv> t\<^EN>"}, where
-  @{text "c"} is a fixed variable @{text "c"}.  The details about
-  global constants, name spaces etc. are handled internally.
-
-  So the general structure of a local theory is a sandwich of three
-  layers:
-
-  \begin{center}
-  \framebox{\quad auxiliary context \quad\framebox{\quad target context \quad\framebox{\quad background theory\quad}}}
-  \end{center}
-
-  When a definitional package is finished, the auxiliary context is
-  reset to the target context.  The target now holds definitions for
-  terms and theorems that stem from the hypothetical @{text
-  "\<DEFINE>"} and @{text "\<NOTE>"} elements, transformed by the
-  particular target policy (see \cite[\S4--5]{Haftmann-Wenzel:2009}
-  for details).  *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type local_theory: Proof.context} \\
-  @{index_ML Named_Target.init: "(local_theory -> local_theory) ->
-    string -> theory -> local_theory"} \\[1ex]
-  @{index_ML Local_Theory.define: "(binding * mixfix) * (Attrib.binding * term) ->
-    local_theory -> (term * (string * thm)) * local_theory"} \\
-  @{index_ML Local_Theory.note: "Attrib.binding * thm list ->
-    local_theory -> (string * thm list) * local_theory"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type local_theory} represents local theories.
-  Although this is merely an alias for @{ML_type Proof.context}, it is
-  semantically a subtype of the same: a @{ML_type local_theory} holds
-  target information as special context data.  Subtyping means that
-  any value @{text "lthy:"}~@{ML_type local_theory} can be also used
-  with operations on expecting a regular @{text "ctxt:"}~@{ML_type
-  Proof.context}.
-
-  \item @{ML Named_Target.init}~@{text "before_exit name thy"}
-  initializes a local theory derived from the given background theory.
-  An empty name refers to a \emph{global theory} context, and a
-  non-empty name refers to a @{command locale} or @{command class}
-  context (a fully-qualified internal name is expected here).  This is
-  useful for experimentation --- normally the Isar toplevel already
-  takes care to initialize the local theory context.  The given @{text
-  "before_exit"} function is invoked before leaving the context; in
-  most situations plain identity @{ML I} is sufficient.
-
-  \item @{ML Local_Theory.define}~@{text "((b, mx), (a, rhs))
-  lthy"} defines a local entity according to the specification that is
-  given relatively to the current @{text "lthy"} context.  In
-  particular the term of the RHS may refer to earlier local entities
-  from the auxiliary context, or hypothetical parameters from the
-  target context.  The result is the newly defined term (which is
-  always a fixed variable with exactly the same name as specified for
-  the LHS), together with an equational theorem that states the
-  definition as a hypothetical fact.
-
-  Unless an explicit name binding is given for the RHS, the resulting
-  fact will be called @{text "b_def"}.  Any given attributes are
-  applied to that same fact --- immediately in the auxiliary context
-  \emph{and} in any transformed versions stemming from target-specific
-  policies or any later interpretations of results from the target
-  context (think of @{command locale} and @{command interpretation},
-  for example).  This means that attributes should be usually plain
-  declarations such as @{attribute simp}, while non-trivial rules like
-  @{attribute simplified} are better avoided.
-
-  \item @{ML Local_Theory.note}~@{text "(a, ths) lthy"} is
-  analogous to @{ML Local_Theory.define}, but defines facts instead of
-  terms.  There is also a slightly more general variant @{ML
-  Local_Theory.notes} that defines several facts (with attribute
-  expressions) simultaneously.
-
-  This is essentially the internal version of the @{command lemmas}
-  command, or @{command declare} if an empty name binding is given.
-
-  \end{description}
-*}
-
-
-section {* Morphisms and declarations \label{sec:morphisms} *}
-
-text {*
-  %FIXME
-
-  See also \cite{Chaieb-Wenzel:2007}.
-*}
-
-end
--- a/src/Doc/IsarImplementation/Logic.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1462 +0,0 @@
-theory Logic
-imports Base
-begin
-
-chapter {* Primitive logic \label{ch:logic} *}
-
-text {*
-  The logical foundations of Isabelle/Isar are that of the Pure logic,
-  which has been introduced as a Natural Deduction framework in
-  \cite{paulson700}.  This is essentially the same logic as ``@{text
-  "\<lambda>HOL"}'' in the more abstract setting of Pure Type Systems (PTS)
-  \cite{Barendregt-Geuvers:2001}, although there are some key
-  differences in the specific treatment of simple types in
-  Isabelle/Pure.
-
-  Following type-theoretic parlance, the Pure logic consists of three
-  levels of @{text "\<lambda>"}-calculus with corresponding arrows, @{text
-  "\<Rightarrow>"} for syntactic function space (terms depending on terms), @{text
-  "\<And>"} for universal quantification (proofs depending on terms), and
-  @{text "\<Longrightarrow>"} for implication (proofs depending on proofs).
-
-  Derivations are relative to a logical theory, which declares type
-  constructors, constants, and axioms.  Theory declarations support
-  schematic polymorphism, which is strictly speaking outside the
-  logic.\footnote{This is the deeper logical reason, why the theory
-  context @{text "\<Theta>"} is separate from the proof context @{text "\<Gamma>"}
-  of the core calculus: type constructors, term constants, and facts
-  (proof constants) may involve arbitrary type schemes, but the type
-  of a locally fixed term parameter is also fixed!}
-*}
-
-
-section {* Types \label{sec:types} *}
-
-text {*
-  The language of types is an uninterpreted order-sorted first-order
-  algebra; types are qualified by ordered type classes.
-
-  \medskip A \emph{type class} is an abstract syntactic entity
-  declared in the theory context.  The \emph{subclass relation} @{text
-  "c\<^sub>1 \<subseteq> c\<^sub>2"} is specified by stating an acyclic
-  generating relation; the transitive closure is maintained
-  internally.  The resulting relation is an ordering: reflexive,
-  transitive, and antisymmetric.
-
-  A \emph{sort} is a list of type classes written as @{text "s = {c\<^sub>1,
-  \<dots>, c\<^sub>m}"}, it represents symbolic intersection.  Notationally, the
-  curly braces are omitted for singleton intersections, i.e.\ any
-  class @{text "c"} may be read as a sort @{text "{c}"}.  The ordering
-  on type classes is extended to sorts according to the meaning of
-  intersections: @{text "{c\<^sub>1, \<dots> c\<^sub>m} \<subseteq> {d\<^sub>1, \<dots>, d\<^sub>n}"} iff @{text
-  "\<forall>j. \<exists>i. c\<^sub>i \<subseteq> d\<^sub>j"}.  The empty intersection @{text "{}"} refers to
-  the universal sort, which is the largest element wrt.\ the sort
-  order.  Thus @{text "{}"} represents the ``full sort'', not the
-  empty one!  The intersection of all (finitely many) classes declared
-  in the current theory is the least element wrt.\ the sort ordering.
-
-  \medskip A \emph{fixed type variable} is a pair of a basic name
-  (starting with a @{text "'"} character) and a sort constraint, e.g.\
-  @{text "('a, s)"} which is usually printed as @{text "\<alpha>\<^sub>s"}.
-  A \emph{schematic type variable} is a pair of an indexname and a
-  sort constraint, e.g.\ @{text "(('a, 0), s)"} which is usually
-  printed as @{text "?\<alpha>\<^sub>s"}.
-
-  Note that \emph{all} syntactic components contribute to the identity
-  of type variables: basic name, index, and sort constraint.  The core
-  logic handles type variables with the same name but different sorts
-  as different, although the type-inference layer (which is outside
-  the core) rejects anything like that.
-
-  A \emph{type constructor} @{text "\<kappa>"} is a @{text "k"}-ary operator
-  on types declared in the theory.  Type constructor application is
-  written postfix as @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>k)\<kappa>"}.  For
-  @{text "k = 0"} the argument tuple is omitted, e.g.\ @{text "prop"}
-  instead of @{text "()prop"}.  For @{text "k = 1"} the parentheses
-  are omitted, e.g.\ @{text "\<alpha> list"} instead of @{text "(\<alpha>)list"}.
-  Further notation is provided for specific constructors, notably the
-  right-associative infix @{text "\<alpha> \<Rightarrow> \<beta>"} instead of @{text "(\<alpha>,
-  \<beta>)fun"}.
-  
-  The logical category \emph{type} is defined inductively over type
-  variables and type constructors as follows: @{text "\<tau> = \<alpha>\<^sub>s | ?\<alpha>\<^sub>s |
-  (\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>k)\<kappa>"}.
-
-  A \emph{type abbreviation} is a syntactic definition @{text
-  "(\<^vec>\<alpha>)\<kappa> = \<tau>"} of an arbitrary type expression @{text "\<tau>"} over
-  variables @{text "\<^vec>\<alpha>"}.  Type abbreviations appear as type
-  constructors in the syntax, but are expanded before entering the
-  logical core.
-
-  A \emph{type arity} declares the image behavior of a type
-  constructor wrt.\ the algebra of sorts: @{text "\<kappa> :: (s\<^sub>1, \<dots>,
-  s\<^sub>k)s"} means that @{text "(\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>k)\<kappa>"} is
-  of sort @{text "s"} if every argument type @{text "\<tau>\<^sub>i"} is
-  of sort @{text "s\<^sub>i"}.  Arity declarations are implicitly
-  completed, i.e.\ @{text "\<kappa> :: (\<^vec>s)c"} entails @{text "\<kappa> ::
-  (\<^vec>s)c'"} for any @{text "c' \<supseteq> c"}.
-
-  \medskip The sort algebra is always maintained as \emph{coregular},
-  which means that type arities are consistent with the subclass
-  relation: for any type constructor @{text "\<kappa>"}, and classes @{text
-  "c\<^sub>1 \<subseteq> c\<^sub>2"}, and arities @{text "\<kappa> ::
-  (\<^vec>s\<^sub>1)c\<^sub>1"} and @{text "\<kappa> ::
-  (\<^vec>s\<^sub>2)c\<^sub>2"} holds @{text "\<^vec>s\<^sub>1 \<subseteq>
-  \<^vec>s\<^sub>2"} component-wise.
-
-  The key property of a coregular order-sorted algebra is that sort
-  constraints can be solved in a most general fashion: for each type
-  constructor @{text "\<kappa>"} and sort @{text "s"} there is a most general
-  vector of argument sorts @{text "(s\<^sub>1, \<dots>, s\<^sub>k)"} such
-  that a type scheme @{text "(\<alpha>\<^bsub>s\<^sub>1\<^esub>, \<dots>,
-  \<alpha>\<^bsub>s\<^sub>k\<^esub>)\<kappa>"} is of sort @{text "s"}.
-  Consequently, type unification has most general solutions (modulo
-  equivalence of sorts), so type-inference produces primary types as
-  expected \cite{nipkow-prehofer}.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type class: string} \\
-  @{index_ML_type sort: "class list"} \\
-  @{index_ML_type arity: "string * sort list * sort"} \\
-  @{index_ML_type typ} \\
-  @{index_ML Term.map_atyps: "(typ -> typ) -> typ -> typ"} \\
-  @{index_ML Term.fold_atyps: "(typ -> 'a -> 'a) -> typ -> 'a -> 'a"} \\
-  \end{mldecls}
-  \begin{mldecls}
-  @{index_ML Sign.subsort: "theory -> sort * sort -> bool"} \\
-  @{index_ML Sign.of_sort: "theory -> typ * sort -> bool"} \\
-  @{index_ML Sign.add_type: "Proof.context -> binding * int * mixfix -> theory -> theory"} \\
-  @{index_ML Sign.add_type_abbrev: "Proof.context ->
-  binding * string list * typ -> theory -> theory"} \\
-  @{index_ML Sign.primitive_class: "binding * class list -> theory -> theory"} \\
-  @{index_ML Sign.primitive_classrel: "class * class -> theory -> theory"} \\
-  @{index_ML Sign.primitive_arity: "arity -> theory -> theory"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type class} represents type classes.
-
-  \item Type @{ML_type sort} represents sorts, i.e.\ finite
-  intersections of classes.  The empty list @{ML "[]: sort"} refers to
-  the empty class intersection, i.e.\ the ``full sort''.
-
-  \item Type @{ML_type arity} represents type arities.  A triple
-  @{text "(\<kappa>, \<^vec>s, s) : arity"} represents @{text "\<kappa> ::
-  (\<^vec>s)s"} as described above.
-
-  \item Type @{ML_type typ} represents types; this is a datatype with
-  constructors @{ML TFree}, @{ML TVar}, @{ML Type}.
-
-  \item @{ML Term.map_atyps}~@{text "f \<tau>"} applies the mapping @{text
-  "f"} to all atomic types (@{ML TFree}, @{ML TVar}) occurring in
-  @{text "\<tau>"}.
-
-  \item @{ML Term.fold_atyps}~@{text "f \<tau>"} iterates the operation
-  @{text "f"} over all occurrences of atomic types (@{ML TFree}, @{ML
-  TVar}) in @{text "\<tau>"}; the type structure is traversed from left to
-  right.
-
-  \item @{ML Sign.subsort}~@{text "thy (s\<^sub>1, s\<^sub>2)"}
-  tests the subsort relation @{text "s\<^sub>1 \<subseteq> s\<^sub>2"}.
-
-  \item @{ML Sign.of_sort}~@{text "thy (\<tau>, s)"} tests whether type
-  @{text "\<tau>"} is of sort @{text "s"}.
-
-  \item @{ML Sign.add_type}~@{text "ctxt (\<kappa>, k, mx)"} declares a
-  new type constructors @{text "\<kappa>"} with @{text "k"} arguments and
-  optional mixfix syntax.
-
-  \item @{ML Sign.add_type_abbrev}~@{text "ctxt (\<kappa>, \<^vec>\<alpha>, \<tau>)"}
-  defines a new type abbreviation @{text "(\<^vec>\<alpha>)\<kappa> = \<tau>"}.
-
-  \item @{ML Sign.primitive_class}~@{text "(c, [c\<^sub>1, \<dots>,
-  c\<^sub>n])"} declares a new class @{text "c"}, together with class
-  relations @{text "c \<subseteq> c\<^sub>i"}, for @{text "i = 1, \<dots>, n"}.
-
-  \item @{ML Sign.primitive_classrel}~@{text "(c\<^sub>1,
-  c\<^sub>2)"} declares the class relation @{text "c\<^sub>1 \<subseteq>
-  c\<^sub>2"}.
-
-  \item @{ML Sign.primitive_arity}~@{text "(\<kappa>, \<^vec>s, s)"} declares
-  the arity @{text "\<kappa> :: (\<^vec>s)s"}.
-
-  \end{description}
-*}
-
-text %mlantiq {*
-  \begin{matharray}{rcl}
-  @{ML_antiquotation_def "class"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "sort"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "type_name"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "type_abbrev"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "nonterminal"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "typ"} & : & @{text ML_antiquotation} \\
-  \end{matharray}
-
-  @{rail \<open>
-  @@{ML_antiquotation class} nameref
-  ;
-  @@{ML_antiquotation sort} sort
-  ;
-  (@@{ML_antiquotation type_name} |
-   @@{ML_antiquotation type_abbrev} |
-   @@{ML_antiquotation nonterminal}) nameref
-  ;
-  @@{ML_antiquotation typ} type
-  \<close>}
-
-  \begin{description}
-
-  \item @{text "@{class c}"} inlines the internalized class @{text
-  "c"} --- as @{ML_type string} literal.
-
-  \item @{text "@{sort s}"} inlines the internalized sort @{text "s"}
-  --- as @{ML_type "string list"} literal.
-
-  \item @{text "@{type_name c}"} inlines the internalized type
-  constructor @{text "c"} --- as @{ML_type string} literal.
-
-  \item @{text "@{type_abbrev c}"} inlines the internalized type
-  abbreviation @{text "c"} --- as @{ML_type string} literal.
-
-  \item @{text "@{nonterminal c}"} inlines the internalized syntactic
-  type~/ grammar nonterminal @{text "c"} --- as @{ML_type string}
-  literal.
-
-  \item @{text "@{typ \<tau>}"} inlines the internalized type @{text "\<tau>"}
-  --- as constructor term for datatype @{ML_type typ}.
-
-  \end{description}
-*}
-
-
-section {* Terms \label{sec:terms} *}
-
-text {*
-  The language of terms is that of simply-typed @{text "\<lambda>"}-calculus
-  with de-Bruijn indices for bound variables (cf.\ \cite{debruijn72}
-  or \cite{paulson-ml2}), with the types being determined by the
-  corresponding binders.  In contrast, free variables and constants
-  have an explicit name and type in each occurrence.
-
-  \medskip A \emph{bound variable} is a natural number @{text "b"},
-  which accounts for the number of intermediate binders between the
-  variable occurrence in the body and its binding position.  For
-  example, the de-Bruijn term @{text "\<lambda>\<^bsub>bool\<^esub>. \<lambda>\<^bsub>bool\<^esub>. 1 \<and> 0"} would
-  correspond to @{text "\<lambda>x\<^bsub>bool\<^esub>. \<lambda>y\<^bsub>bool\<^esub>. x \<and> y"} in a named
-  representation.  Note that a bound variable may be represented by
-  different de-Bruijn indices at different occurrences, depending on
-  the nesting of abstractions.
-
-  A \emph{loose variable} is a bound variable that is outside the
-  scope of local binders.  The types (and names) for loose variables
-  can be managed as a separate context, that is maintained as a stack
-  of hypothetical binders.  The core logic operates on closed terms,
-  without any loose variables.
-
-  A \emph{fixed variable} is a pair of a basic name and a type, e.g.\
-  @{text "(x, \<tau>)"} which is usually printed @{text "x\<^sub>\<tau>"} here.  A
-  \emph{schematic variable} is a pair of an indexname and a type,
-  e.g.\ @{text "((x, 0), \<tau>)"} which is likewise printed as @{text
-  "?x\<^sub>\<tau>"}.
-
-  \medskip A \emph{constant} is a pair of a basic name and a type,
-  e.g.\ @{text "(c, \<tau>)"} which is usually printed as @{text "c\<^sub>\<tau>"}
-  here.  Constants are declared in the context as polymorphic families
-  @{text "c :: \<sigma>"}, meaning that all substitution instances @{text
-  "c\<^sub>\<tau>"} for @{text "\<tau> = \<sigma>\<vartheta>"} are valid.
-
-  The vector of \emph{type arguments} of constant @{text "c\<^sub>\<tau>"} wrt.\
-  the declaration @{text "c :: \<sigma>"} is defined as the codomain of the
-  matcher @{text "\<vartheta> = {?\<alpha>\<^sub>1 \<mapsto> \<tau>\<^sub>1, \<dots>, ?\<alpha>\<^sub>n \<mapsto> \<tau>\<^sub>n}"} presented in
-  canonical order @{text "(\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>n)"}, corresponding to the
-  left-to-right occurrences of the @{text "\<alpha>\<^sub>i"} in @{text "\<sigma>"}.
-  Within a given theory context, there is a one-to-one correspondence
-  between any constant @{text "c\<^sub>\<tau>"} and the application @{text "c(\<tau>\<^sub>1,
-  \<dots>, \<tau>\<^sub>n)"} of its type arguments.  For example, with @{text "plus :: \<alpha>
-  \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>"}, the instance @{text "plus\<^bsub>nat \<Rightarrow> nat \<Rightarrow> nat\<^esub>"} corresponds to
-  @{text "plus(nat)"}.
-
-  Constant declarations @{text "c :: \<sigma>"} may contain sort constraints
-  for type variables in @{text "\<sigma>"}.  These are observed by
-  type-inference as expected, but \emph{ignored} by the core logic.
-  This means the primitive logic is able to reason with instances of
-  polymorphic constants that the user-level type-checker would reject
-  due to violation of type class restrictions.
-
-  \medskip An \emph{atomic term} is either a variable or constant.
-  The logical category \emph{term} is defined inductively over atomic
-  terms, with abstraction and application as follows: @{text "t = b |
-  x\<^sub>\<tau> | ?x\<^sub>\<tau> | c\<^sub>\<tau> | \<lambda>\<^sub>\<tau>. t | t\<^sub>1 t\<^sub>2"}.  Parsing and printing takes care of
-  converting between an external representation with named bound
-  variables.  Subsequently, we shall use the latter notation instead
-  of internal de-Bruijn representation.
-
-  The inductive relation @{text "t :: \<tau>"} assigns a (unique) type to a
-  term according to the structure of atomic terms, abstractions, and
-  applicatins:
-  \[
-  \infer{@{text "a\<^sub>\<tau> :: \<tau>"}}{}
-  \qquad
-  \infer{@{text "(\<lambda>x\<^sub>\<tau>. t) :: \<tau> \<Rightarrow> \<sigma>"}}{@{text "t :: \<sigma>"}}
-  \qquad
-  \infer{@{text "t u :: \<sigma>"}}{@{text "t :: \<tau> \<Rightarrow> \<sigma>"} & @{text "u :: \<tau>"}}
-  \]
-  A \emph{well-typed term} is a term that can be typed according to these rules.
-
-  Typing information can be omitted: type-inference is able to
-  reconstruct the most general type of a raw term, while assigning
-  most general types to all of its variables and constants.
-  Type-inference depends on a context of type constraints for fixed
-  variables, and declarations for polymorphic constants.
-
-  The identity of atomic terms consists both of the name and the type
-  component.  This means that different variables @{text
-  "x\<^bsub>\<tau>\<^sub>1\<^esub>"} and @{text "x\<^bsub>\<tau>\<^sub>2\<^esub>"} may become the same after
-  type instantiation.  Type-inference rejects variables of the same
-  name, but different types.  In contrast, mixed instances of
-  polymorphic constants occur routinely.
-
-  \medskip The \emph{hidden polymorphism} of a term @{text "t :: \<sigma>"}
-  is the set of type variables occurring in @{text "t"}, but not in
-  its type @{text "\<sigma>"}.  This means that the term implicitly depends
-  on type arguments that are not accounted in the result type, i.e.\
-  there are different type instances @{text "t\<vartheta> :: \<sigma>"} and
-  @{text "t\<vartheta>' :: \<sigma>"} with the same type.  This slightly
-  pathological situation notoriously demands additional care.
-
-  \medskip A \emph{term abbreviation} is a syntactic definition @{text
-  "c\<^sub>\<sigma> \<equiv> t"} of a closed term @{text "t"} of type @{text "\<sigma>"},
-  without any hidden polymorphism.  A term abbreviation looks like a
-  constant in the syntax, but is expanded before entering the logical
-  core.  Abbreviations are usually reverted when printing terms, using
-  @{text "t \<rightarrow> c\<^sub>\<sigma>"} as rules for higher-order rewriting.
-
-  \medskip Canonical operations on @{text "\<lambda>"}-terms include @{text
-  "\<alpha>\<beta>\<eta>"}-conversion: @{text "\<alpha>"}-conversion refers to capture-free
-  renaming of bound variables; @{text "\<beta>"}-conversion contracts an
-  abstraction applied to an argument term, substituting the argument
-  in the body: @{text "(\<lambda>x. b)a"} becomes @{text "b[a/x]"}; @{text
-  "\<eta>"}-conversion contracts vacuous application-abstraction: @{text
-  "\<lambda>x. f x"} becomes @{text "f"}, provided that the bound variable
-  does not occur in @{text "f"}.
-
-  Terms are normally treated modulo @{text "\<alpha>"}-conversion, which is
-  implicit in the de-Bruijn representation.  Names for bound variables
-  in abstractions are maintained separately as (meaningless) comments,
-  mostly for parsing and printing.  Full @{text "\<alpha>\<beta>\<eta>"}-conversion is
-  commonplace in various standard operations (\secref{sec:obj-rules})
-  that are based on higher-order unification and matching.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type term} \\
-  @{index_ML_op "aconv": "term * term -> bool"} \\
-  @{index_ML Term.map_types: "(typ -> typ) -> term -> term"} \\
-  @{index_ML Term.fold_types: "(typ -> 'a -> 'a) -> term -> 'a -> 'a"} \\
-  @{index_ML Term.map_aterms: "(term -> term) -> term -> term"} \\
-  @{index_ML Term.fold_aterms: "(term -> 'a -> 'a) -> term -> 'a -> 'a"} \\
-  \end{mldecls}
-  \begin{mldecls}
-  @{index_ML fastype_of: "term -> typ"} \\
-  @{index_ML lambda: "term -> term -> term"} \\
-  @{index_ML betapply: "term * term -> term"} \\
-  @{index_ML incr_boundvars: "int -> term -> term"} \\
-  @{index_ML Sign.declare_const: "Proof.context ->
-  (binding * typ) * mixfix -> theory -> term * theory"} \\
-  @{index_ML Sign.add_abbrev: "string -> binding * term ->
-  theory -> (term * term) * theory"} \\
-  @{index_ML Sign.const_typargs: "theory -> string * typ -> typ list"} \\
-  @{index_ML Sign.const_instance: "theory -> string * typ list -> typ"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type term} represents de-Bruijn terms, with comments
-  in abstractions, and explicitly named free variables and constants;
-  this is a datatype with constructors @{index_ML Bound}, @{index_ML
-  Free}, @{index_ML Var}, @{index_ML Const}, @{index_ML Abs},
-  @{index_ML_op "$"}.
-
-  \item @{text "t"}~@{ML_text aconv}~@{text "u"} checks @{text
-  "\<alpha>"}-equivalence of two terms.  This is the basic equality relation
-  on type @{ML_type term}; raw datatype equality should only be used
-  for operations related to parsing or printing!
-
-  \item @{ML Term.map_types}~@{text "f t"} applies the mapping @{text
-  "f"} to all types occurring in @{text "t"}.
-
-  \item @{ML Term.fold_types}~@{text "f t"} iterates the operation
-  @{text "f"} over all occurrences of types in @{text "t"}; the term
-  structure is traversed from left to right.
-
-  \item @{ML Term.map_aterms}~@{text "f t"} applies the mapping @{text
-  "f"} to all atomic terms (@{ML Bound}, @{ML Free}, @{ML Var}, @{ML
-  Const}) occurring in @{text "t"}.
-
-  \item @{ML Term.fold_aterms}~@{text "f t"} iterates the operation
-  @{text "f"} over all occurrences of atomic terms (@{ML Bound}, @{ML
-  Free}, @{ML Var}, @{ML Const}) in @{text "t"}; the term structure is
-  traversed from left to right.
-
-  \item @{ML fastype_of}~@{text "t"} determines the type of a
-  well-typed term.  This operation is relatively slow, despite the
-  omission of any sanity checks.
-
-  \item @{ML lambda}~@{text "a b"} produces an abstraction @{text
-  "\<lambda>a. b"}, where occurrences of the atomic term @{text "a"} in the
-  body @{text "b"} are replaced by bound variables.
-
-  \item @{ML betapply}~@{text "(t, u)"} produces an application @{text
-  "t u"}, with topmost @{text "\<beta>"}-conversion if @{text "t"} is an
-  abstraction.
-
-  \item @{ML incr_boundvars}~@{text "j"} increments a term's dangling
-  bound variables by the offset @{text "j"}.  This is required when
-  moving a subterm into a context where it is enclosed by a different
-  number of abstractions.  Bound variables with a matching abstraction
-  are unaffected.
-
-  \item @{ML Sign.declare_const}~@{text "ctxt ((c, \<sigma>), mx)"} declares
-  a new constant @{text "c :: \<sigma>"} with optional mixfix syntax.
-
-  \item @{ML Sign.add_abbrev}~@{text "print_mode (c, t)"}
-  introduces a new term abbreviation @{text "c \<equiv> t"}.
-
-  \item @{ML Sign.const_typargs}~@{text "thy (c, \<tau>)"} and @{ML
-  Sign.const_instance}~@{text "thy (c, [\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>n])"}
-  convert between two representations of polymorphic constants: full
-  type instance vs.\ compact type arguments form.
-
-  \end{description}
-*}
-
-text %mlantiq {*
-  \begin{matharray}{rcl}
-  @{ML_antiquotation_def "const_name"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "const_abbrev"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "const"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "term"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "prop"} & : & @{text ML_antiquotation} \\
-  \end{matharray}
-
-  @{rail \<open>
-  (@@{ML_antiquotation const_name} |
-   @@{ML_antiquotation const_abbrev}) nameref
-  ;
-  @@{ML_antiquotation const} ('(' (type + ',') ')')?
-  ;
-  @@{ML_antiquotation term} term
-  ;
-  @@{ML_antiquotation prop} prop
-  \<close>}
-
-  \begin{description}
-
-  \item @{text "@{const_name c}"} inlines the internalized logical
-  constant name @{text "c"} --- as @{ML_type string} literal.
-
-  \item @{text "@{const_abbrev c}"} inlines the internalized
-  abbreviated constant name @{text "c"} --- as @{ML_type string}
-  literal.
-
-  \item @{text "@{const c(\<^vec>\<tau>)}"} inlines the internalized
-  constant @{text "c"} with precise type instantiation in the sense of
-  @{ML Sign.const_instance} --- as @{ML Const} constructor term for
-  datatype @{ML_type term}.
-
-  \item @{text "@{term t}"} inlines the internalized term @{text "t"}
-  --- as constructor term for datatype @{ML_type term}.
-
-  \item @{text "@{prop \<phi>}"} inlines the internalized proposition
-  @{text "\<phi>"} --- as constructor term for datatype @{ML_type term}.
-
-  \end{description}
-*}
-
-
-section {* Theorems \label{sec:thms} *}
-
-text {*
-  A \emph{proposition} is a well-typed term of type @{text "prop"}, a
-  \emph{theorem} is a proven proposition (depending on a context of
-  hypotheses and the background theory).  Primitive inferences include
-  plain Natural Deduction rules for the primary connectives @{text
-  "\<And>"} and @{text "\<Longrightarrow>"} of the framework.  There is also a builtin
-  notion of equality/equivalence @{text "\<equiv>"}.
-*}
-
-
-subsection {* Primitive connectives and rules \label{sec:prim-rules} *}
-
-text {*
-  The theory @{text "Pure"} contains constant declarations for the
-  primitive connectives @{text "\<And>"}, @{text "\<Longrightarrow>"}, and @{text "\<equiv>"} of
-  the logical framework, see \figref{fig:pure-connectives}.  The
-  derivability judgment @{text "A\<^sub>1, \<dots>, A\<^sub>n \<turnstile> B"} is
-  defined inductively by the primitive inferences given in
-  \figref{fig:prim-rules}, with the global restriction that the
-  hypotheses must \emph{not} contain any schematic variables.  The
-  builtin equality is conceptually axiomatized as shown in
-  \figref{fig:pure-equality}, although the implementation works
-  directly with derived inferences.
-
-  \begin{figure}[htb]
-  \begin{center}
-  \begin{tabular}{ll}
-  @{text "all :: (\<alpha> \<Rightarrow> prop) \<Rightarrow> prop"} & universal quantification (binder @{text "\<And>"}) \\
-  @{text "\<Longrightarrow> :: prop \<Rightarrow> prop \<Rightarrow> prop"} & implication (right associative infix) \\
-  @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> prop"} & equality relation (infix) \\
-  \end{tabular}
-  \caption{Primitive connectives of Pure}\label{fig:pure-connectives}
-  \end{center}
-  \end{figure}
-
-  \begin{figure}[htb]
-  \begin{center}
-  \[
-  \infer[@{text "(axiom)"}]{@{text "\<turnstile> A"}}{@{text "A \<in> \<Theta>"}}
-  \qquad
-  \infer[@{text "(assume)"}]{@{text "A \<turnstile> A"}}{}
-  \]
-  \[
-  \infer[@{text "(\<And>\<hyphen>intro)"}]{@{text "\<Gamma> \<turnstile> \<And>x. B[x]"}}{@{text "\<Gamma> \<turnstile> B[x]"} & @{text "x \<notin> \<Gamma>"}}
-  \qquad
-  \infer[@{text "(\<And>\<hyphen>elim)"}]{@{text "\<Gamma> \<turnstile> B[a]"}}{@{text "\<Gamma> \<turnstile> \<And>x. B[x]"}}
-  \]
-  \[
-  \infer[@{text "(\<Longrightarrow>\<hyphen>intro)"}]{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
-  \qquad
-  \infer[@{text "(\<Longrightarrow>\<hyphen>elim)"}]{@{text "\<Gamma>\<^sub>1 \<union> \<Gamma>\<^sub>2 \<turnstile> B"}}{@{text "\<Gamma>\<^sub>1 \<turnstile> A \<Longrightarrow> B"} & @{text "\<Gamma>\<^sub>2 \<turnstile> A"}}
-  \]
-  \caption{Primitive inferences of Pure}\label{fig:prim-rules}
-  \end{center}
-  \end{figure}
-
-  \begin{figure}[htb]
-  \begin{center}
-  \begin{tabular}{ll}
-  @{text "\<turnstile> (\<lambda>x. b[x]) a \<equiv> b[a]"} & @{text "\<beta>"}-conversion \\
-  @{text "\<turnstile> x \<equiv> x"} & reflexivity \\
-  @{text "\<turnstile> x \<equiv> y \<Longrightarrow> P x \<Longrightarrow> P y"} & substitution \\
-  @{text "\<turnstile> (\<And>x. f x \<equiv> g x) \<Longrightarrow> f \<equiv> g"} & extensionality \\
-  @{text "\<turnstile> (A \<Longrightarrow> B) \<Longrightarrow> (B \<Longrightarrow> A) \<Longrightarrow> A \<equiv> B"} & logical equivalence \\
-  \end{tabular}
-  \caption{Conceptual axiomatization of Pure equality}\label{fig:pure-equality}
-  \end{center}
-  \end{figure}
-
-  The introduction and elimination rules for @{text "\<And>"} and @{text
-  "\<Longrightarrow>"} are analogous to formation of dependently typed @{text
-  "\<lambda>"}-terms representing the underlying proof objects.  Proof terms
-  are irrelevant in the Pure logic, though; they cannot occur within
-  propositions.  The system provides a runtime option to record
-  explicit proof terms for primitive inferences, see also
-  \secref{sec:proof-terms}.  Thus all three levels of @{text
-  "\<lambda>"}-calculus become explicit: @{text "\<Rightarrow>"} for terms, and @{text
-  "\<And>/\<Longrightarrow>"} for proofs (cf.\ \cite{Berghofer-Nipkow:2000:TPHOL}).
-
-  Observe that locally fixed parameters (as in @{text
-  "\<And>\<hyphen>intro"}) need not be recorded in the hypotheses, because
-  the simple syntactic types of Pure are always inhabitable.
-  ``Assumptions'' @{text "x :: \<tau>"} for type-membership are only
-  present as long as some @{text "x\<^sub>\<tau>"} occurs in the statement
-  body.\footnote{This is the key difference to ``@{text "\<lambda>HOL"}'' in
-  the PTS framework \cite{Barendregt-Geuvers:2001}, where hypotheses
-  @{text "x : A"} are treated uniformly for propositions and types.}
-
-  \medskip The axiomatization of a theory is implicitly closed by
-  forming all instances of type and term variables: @{text "\<turnstile>
-  A\<vartheta>"} holds for any substitution instance of an axiom
-  @{text "\<turnstile> A"}.  By pushing substitutions through derivations
-  inductively, we also get admissible @{text "generalize"} and @{text
-  "instantiate"} rules as shown in \figref{fig:subst-rules}.
-
-  \begin{figure}[htb]
-  \begin{center}
-  \[
-  \infer{@{text "\<Gamma> \<turnstile> B[?\<alpha>]"}}{@{text "\<Gamma> \<turnstile> B[\<alpha>]"} & @{text "\<alpha> \<notin> \<Gamma>"}}
-  \quad
-  \infer[\quad@{text "(generalize)"}]{@{text "\<Gamma> \<turnstile> B[?x]"}}{@{text "\<Gamma> \<turnstile> B[x]"} & @{text "x \<notin> \<Gamma>"}}
-  \]
-  \[
-  \infer{@{text "\<Gamma> \<turnstile> B[\<tau>]"}}{@{text "\<Gamma> \<turnstile> B[?\<alpha>]"}}
-  \quad
-  \infer[\quad@{text "(instantiate)"}]{@{text "\<Gamma> \<turnstile> B[t]"}}{@{text "\<Gamma> \<turnstile> B[?x]"}}
-  \]
-  \caption{Admissible substitution rules}\label{fig:subst-rules}
-  \end{center}
-  \end{figure}
-
-  Note that @{text "instantiate"} does not require an explicit
-  side-condition, because @{text "\<Gamma>"} may never contain schematic
-  variables.
-
-  In principle, variables could be substituted in hypotheses as well,
-  but this would disrupt the monotonicity of reasoning: deriving
-  @{text "\<Gamma>\<vartheta> \<turnstile> B\<vartheta>"} from @{text "\<Gamma> \<turnstile> B"} is
-  correct, but @{text "\<Gamma>\<vartheta> \<supseteq> \<Gamma>"} does not necessarily hold:
-  the result belongs to a different proof context.
-
-  \medskip An \emph{oracle} is a function that produces axioms on the
-  fly.  Logically, this is an instance of the @{text "axiom"} rule
-  (\figref{fig:prim-rules}), but there is an operational difference.
-  The system always records oracle invocations within derivations of
-  theorems by a unique tag.
-
-  Axiomatizations should be limited to the bare minimum, typically as
-  part of the initial logical basis of an object-logic formalization.
-  Later on, theories are usually developed in a strictly definitional
-  fashion, by stating only certain equalities over new constants.
-
-  A \emph{simple definition} consists of a constant declaration @{text
-  "c :: \<sigma>"} together with an axiom @{text "\<turnstile> c \<equiv> t"}, where @{text "t
-  :: \<sigma>"} is a closed term without any hidden polymorphism.  The RHS
-  may depend on further defined constants, but not @{text "c"} itself.
-  Definitions of functions may be presented as @{text "c \<^vec>x \<equiv>
-  t"} instead of the puristic @{text "c \<equiv> \<lambda>\<^vec>x. t"}.
-
-  An \emph{overloaded definition} consists of a collection of axioms
-  for the same constant, with zero or one equations @{text
-  "c((\<^vec>\<alpha>)\<kappa>) \<equiv> t"} for each type constructor @{text "\<kappa>"} (for
-  distinct variables @{text "\<^vec>\<alpha>"}).  The RHS may mention
-  previously defined constants as above, or arbitrary constants @{text
-  "d(\<alpha>\<^sub>i)"} for some @{text "\<alpha>\<^sub>i"} projected from @{text
-  "\<^vec>\<alpha>"}.  Thus overloaded definitions essentially work by
-  primitive recursion over the syntactic structure of a single type
-  argument.  See also \cite[\S4.3]{Haftmann-Wenzel:2006:classes}.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Logic.all: "term -> term -> term"} \\
-  @{index_ML Logic.mk_implies: "term * term -> term"} \\
-  \end{mldecls}
-  \begin{mldecls}
-  @{index_ML_type ctyp} \\
-  @{index_ML_type cterm} \\
-  @{index_ML Thm.ctyp_of: "theory -> typ -> ctyp"} \\
-  @{index_ML Thm.cterm_of: "theory -> term -> cterm"} \\
-  @{index_ML Thm.apply: "cterm -> cterm -> cterm"} \\
-  @{index_ML Thm.lambda: "cterm -> cterm -> cterm"} \\
-  @{index_ML Thm.all: "cterm -> cterm -> cterm"} \\
-  @{index_ML Drule.mk_implies: "cterm * cterm -> cterm"} \\
-  \end{mldecls}
-  \begin{mldecls}
-  @{index_ML_type thm} \\
-  @{index_ML Thm.peek_status: "thm -> {oracle: bool, unfinished: bool, failed: bool}"} \\
-  @{index_ML Thm.transfer: "theory -> thm -> thm"} \\
-  @{index_ML Thm.assume: "cterm -> thm"} \\
-  @{index_ML Thm.forall_intr: "cterm -> thm -> thm"} \\
-  @{index_ML Thm.forall_elim: "cterm -> thm -> thm"} \\
-  @{index_ML Thm.implies_intr: "cterm -> thm -> thm"} \\
-  @{index_ML Thm.implies_elim: "thm -> thm -> thm"} \\
-  @{index_ML Thm.generalize: "string list * string list -> int -> thm -> thm"} \\
-  @{index_ML Thm.instantiate: "(ctyp * ctyp) list * (cterm * cterm) list -> thm -> thm"} \\
-  @{index_ML Thm.add_axiom: "Proof.context ->
-  binding * term -> theory -> (string * thm) * theory"} \\
-  @{index_ML Thm.add_oracle: "binding * ('a -> cterm) -> theory ->
-  (string * ('a -> thm)) * theory"} \\
-  @{index_ML Thm.add_def: "Proof.context -> bool -> bool ->
-  binding * term -> theory -> (string * thm) * theory"} \\
-  \end{mldecls}
-  \begin{mldecls}
-  @{index_ML Theory.add_deps: "Proof.context -> string ->
-  string * typ -> (string * typ) list -> theory -> theory"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML Thm.peek_status}~@{text "thm"} informs about the current
-  status of the derivation object behind the given theorem.  This is a
-  snapshot of a potentially ongoing (parallel) evaluation of proofs.
-  The three Boolean values indicate the following: @{verbatim oracle}
-  if the finished part contains some oracle invocation; @{verbatim
-  unfinished} if some future proofs are still pending; @{verbatim
-  failed} if some future proof has failed, rendering the theorem
-  invalid!
-
-  \item @{ML Logic.all}~@{text "a B"} produces a Pure quantification
-  @{text "\<And>a. B"}, where occurrences of the atomic term @{text "a"} in
-  the body proposition @{text "B"} are replaced by bound variables.
-  (See also @{ML lambda} on terms.)
-
-  \item @{ML Logic.mk_implies}~@{text "(A, B)"} produces a Pure
-  implication @{text "A \<Longrightarrow> B"}.
-
-  \item Types @{ML_type ctyp} and @{ML_type cterm} represent certified
-  types and terms, respectively.  These are abstract datatypes that
-  guarantee that its values have passed the full well-formedness (and
-  well-typedness) checks, relative to the declarations of type
-  constructors, constants etc.\ in the background theory.  The
-  abstract types @{ML_type ctyp} and @{ML_type cterm} are part of the
-  same inference kernel that is mainly responsible for @{ML_type thm}.
-  Thus syntactic operations on @{ML_type ctyp} and @{ML_type cterm}
-  are located in the @{ML_structure Thm} module, even though theorems are
-  not yet involved at that stage.
-
-  \item @{ML Thm.ctyp_of}~@{text "thy \<tau>"} and @{ML
-  Thm.cterm_of}~@{text "thy t"} explicitly checks types and terms,
-  respectively.  This also involves some basic normalizations, such
-  expansion of type and term abbreviations from the theory context.
-  Full re-certification is relatively slow and should be avoided in
-  tight reasoning loops.
-
-  \item @{ML Thm.apply}, @{ML Thm.lambda}, @{ML Thm.all}, @{ML
-  Drule.mk_implies} etc.\ compose certified terms (or propositions)
-  incrementally.  This is equivalent to @{ML Thm.cterm_of} after
-  unchecked @{ML_op "$"}, @{ML lambda}, @{ML Logic.all}, @{ML
-  Logic.mk_implies} etc., but there can be a big difference in
-  performance when large existing entities are composed by a few extra
-  constructions on top.  There are separate operations to decompose
-  certified terms and theorems to produce certified terms again.
-
-  \item Type @{ML_type thm} represents proven propositions.  This is
-  an abstract datatype that guarantees that its values have been
-  constructed by basic principles of the @{ML_structure Thm} module.
-  Every @{ML_type thm} value refers its background theory,
-  cf.\ \secref{sec:context-theory}.
-
-  \item @{ML Thm.transfer}~@{text "thy thm"} transfers the given
-  theorem to a \emph{larger} theory, see also \secref{sec:context}.
-  This formal adjustment of the background context has no logical
-  significance, but is occasionally required for formal reasons, e.g.\
-  when theorems that are imported from more basic theories are used in
-  the current situation.
-
-  \item @{ML Thm.assume}, @{ML Thm.forall_intr}, @{ML
-  Thm.forall_elim}, @{ML Thm.implies_intr}, and @{ML Thm.implies_elim}
-  correspond to the primitive inferences of \figref{fig:prim-rules}.
-
-  \item @{ML Thm.generalize}~@{text "(\<^vec>\<alpha>, \<^vec>x)"}
-  corresponds to the @{text "generalize"} rules of
-  \figref{fig:subst-rules}.  Here collections of type and term
-  variables are generalized simultaneously, specified by the given
-  basic names.
-
-  \item @{ML Thm.instantiate}~@{text "(\<^vec>\<alpha>\<^sub>s,
-  \<^vec>x\<^sub>\<tau>)"} corresponds to the @{text "instantiate"} rules
-  of \figref{fig:subst-rules}.  Type variables are substituted before
-  term variables.  Note that the types in @{text "\<^vec>x\<^sub>\<tau>"}
-  refer to the instantiated versions.
-
-  \item @{ML Thm.add_axiom}~@{text "ctxt (name, A)"} declares an
-  arbitrary proposition as axiom, and retrieves it as a theorem from
-  the resulting theory, cf.\ @{text "axiom"} in
-  \figref{fig:prim-rules}.  Note that the low-level representation in
-  the axiom table may differ slightly from the returned theorem.
-
-  \item @{ML Thm.add_oracle}~@{text "(binding, oracle)"} produces a named
-  oracle rule, essentially generating arbitrary axioms on the fly,
-  cf.\ @{text "axiom"} in \figref{fig:prim-rules}.
-
-  \item @{ML Thm.add_def}~@{text "ctxt unchecked overloaded (name, c
-  \<^vec>x \<equiv> t)"} states a definitional axiom for an existing constant
-  @{text "c"}.  Dependencies are recorded via @{ML Theory.add_deps},
-  unless the @{text "unchecked"} option is set.  Note that the
-  low-level representation in the axiom table may differ slightly from
-  the returned theorem.
-
-  \item @{ML Theory.add_deps}~@{text "ctxt name c\<^sub>\<tau> \<^vec>d\<^sub>\<sigma>"}
-  declares dependencies of a named specification for constant @{text
-  "c\<^sub>\<tau>"}, relative to existing specifications for constants @{text
-  "\<^vec>d\<^sub>\<sigma>"}.
-
-  \end{description}
-*}
-
-
-text %mlantiq {*
-  \begin{matharray}{rcl}
-  @{ML_antiquotation_def "ctyp"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "cterm"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "cprop"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "thm"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "thms"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "lemma"} & : & @{text ML_antiquotation} \\
-  \end{matharray}
-
-  @{rail \<open>
-  @@{ML_antiquotation ctyp} typ
-  ;
-  @@{ML_antiquotation cterm} term
-  ;
-  @@{ML_antiquotation cprop} prop
-  ;
-  @@{ML_antiquotation thm} thmref
-  ;
-  @@{ML_antiquotation thms} thmrefs
-  ;
-  @@{ML_antiquotation lemma} ('(' @'open' ')')? ((prop +) + @'and') \<newline>
-    @'by' method method?
-  \<close>}
-
-  \begin{description}
-
-  \item @{text "@{ctyp \<tau>}"} produces a certified type wrt.\ the
-  current background theory --- as abstract value of type @{ML_type
-  ctyp}.
-
-  \item @{text "@{cterm t}"} and @{text "@{cprop \<phi>}"} produce a
-  certified term wrt.\ the current background theory --- as abstract
-  value of type @{ML_type cterm}.
-
-  \item @{text "@{thm a}"} produces a singleton fact --- as abstract
-  value of type @{ML_type thm}.
-
-  \item @{text "@{thms a}"} produces a general fact --- as abstract
-  value of type @{ML_type "thm list"}.
-
-  \item @{text "@{lemma \<phi> by meth}"} produces a fact that is proven on
-  the spot according to the minimal proof, which imitates a terminal
-  Isar proof.  The result is an abstract value of type @{ML_type thm}
-  or @{ML_type "thm list"}, depending on the number of propositions
-  given here.
-
-  The internal derivation object lacks a proper theorem name, but it
-  is formally closed, unless the @{text "(open)"} option is specified
-  (this may impact performance of applications with proof terms).
-
-  Since ML antiquotations are always evaluated at compile-time, there
-  is no run-time overhead even for non-trivial proofs.  Nonetheless,
-  the justification is syntactically limited to a single @{command
-  "by"} step.  More complex Isar proofs should be done in regular
-  theory source, before compiling the corresponding ML text that uses
-  the result.
-
-  \end{description}
-
-*}
-
-
-subsection {* Auxiliary connectives \label{sec:logic-aux} *}
-
-text {* Theory @{text "Pure"} provides a few auxiliary connectives
-  that are defined on top of the primitive ones, see
-  \figref{fig:pure-aux}.  These special constants are useful in
-  certain internal encodings, and are normally not directly exposed to
-  the user.
-
-  \begin{figure}[htb]
-  \begin{center}
-  \begin{tabular}{ll}
-  @{text "conjunction :: prop \<Rightarrow> prop \<Rightarrow> prop"} & (infix @{text "&&&"}) \\
-  @{text "\<turnstile> A &&& B \<equiv> (\<And>C. (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C)"} \\[1ex]
-  @{text "prop :: prop \<Rightarrow> prop"} & (prefix @{text "#"}, suppressed) \\
-  @{text "#A \<equiv> A"} \\[1ex]
-  @{text "term :: \<alpha> \<Rightarrow> prop"} & (prefix @{text "TERM"}) \\
-  @{text "term x \<equiv> (\<And>A. A \<Longrightarrow> A)"} \\[1ex]
-  @{text "type :: \<alpha> itself"} & (prefix @{text "TYPE"}) \\
-  @{text "(unspecified)"} \\
-  \end{tabular}
-  \caption{Definitions of auxiliary connectives}\label{fig:pure-aux}
-  \end{center}
-  \end{figure}
-
-  The introduction @{text "A \<Longrightarrow> B \<Longrightarrow> A &&& B"}, and eliminations
-  (projections) @{text "A &&& B \<Longrightarrow> A"} and @{text "A &&& B \<Longrightarrow> B"} are
-  available as derived rules.  Conjunction allows to treat
-  simultaneous assumptions and conclusions uniformly, e.g.\ consider
-  @{text "A \<Longrightarrow> B \<Longrightarrow> C &&& D"}.  In particular, the goal mechanism
-  represents multiple claims as explicit conjunction internally, but
-  this is refined (via backwards introduction) into separate sub-goals
-  before the user commences the proof; the final result is projected
-  into a list of theorems using eliminations (cf.\
-  \secref{sec:tactical-goals}).
-
-  The @{text "prop"} marker (@{text "#"}) makes arbitrarily complex
-  propositions appear as atomic, without changing the meaning: @{text
-  "\<Gamma> \<turnstile> A"} and @{text "\<Gamma> \<turnstile> #A"} are interchangeable.  See
-  \secref{sec:tactical-goals} for specific operations.
-
-  The @{text "term"} marker turns any well-typed term into a derivable
-  proposition: @{text "\<turnstile> TERM t"} holds unconditionally.  Although
-  this is logically vacuous, it allows to treat terms and proofs
-  uniformly, similar to a type-theoretic framework.
-
-  The @{text "TYPE"} constructor is the canonical representative of
-  the unspecified type @{text "\<alpha> itself"}; it essentially injects the
-  language of types into that of terms.  There is specific notation
-  @{text "TYPE(\<tau>)"} for @{text "TYPE\<^bsub>\<tau> itself\<^esub>"}.
-  Although being devoid of any particular meaning, the term @{text
-  "TYPE(\<tau>)"} accounts for the type @{text "\<tau>"} within the term
-  language.  In particular, @{text "TYPE(\<alpha>)"} may be used as formal
-  argument in primitive definitions, in order to circumvent hidden
-  polymorphism (cf.\ \secref{sec:terms}).  For example, @{text "c
-  TYPE(\<alpha>) \<equiv> A[\<alpha>]"} defines @{text "c :: \<alpha> itself \<Rightarrow> prop"} in terms of
-  a proposition @{text "A"} that depends on an additional type
-  argument, which is essentially a predicate on types.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Conjunction.intr: "thm -> thm -> thm"} \\
-  @{index_ML Conjunction.elim: "thm -> thm * thm"} \\
-  @{index_ML Drule.mk_term: "cterm -> thm"} \\
-  @{index_ML Drule.dest_term: "thm -> cterm"} \\
-  @{index_ML Logic.mk_type: "typ -> term"} \\
-  @{index_ML Logic.dest_type: "term -> typ"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML Conjunction.intr} derives @{text "A &&& B"} from @{text
-  "A"} and @{text "B"}.
-
-  \item @{ML Conjunction.elim} derives @{text "A"} and @{text "B"}
-  from @{text "A &&& B"}.
-
-  \item @{ML Drule.mk_term} derives @{text "TERM t"}.
-
-  \item @{ML Drule.dest_term} recovers term @{text "t"} from @{text
-  "TERM t"}.
-
-  \item @{ML Logic.mk_type}~@{text "\<tau>"} produces the term @{text
-  "TYPE(\<tau>)"}.
-
-  \item @{ML Logic.dest_type}~@{text "TYPE(\<tau>)"} recovers the type
-  @{text "\<tau>"}.
-
-  \end{description}
-*}
-
-
-subsection {* Sort hypotheses *}
-
-text {* Type variables are decorated with sorts, as explained in
-  \secref{sec:types}.  This constrains type instantiation to certain
-  ranges of types: variable @{text "\<alpha>\<^sub>s"} may only be assigned to types
-  @{text "\<tau>"} that belong to sort @{text "s"}.  Within the logic, sort
-  constraints act like implicit preconditions on the result @{text
-  "\<lparr>\<alpha>\<^sub>1 : s\<^sub>1\<rparr>, \<dots>, \<lparr>\<alpha>\<^sub>n : s\<^sub>n\<rparr>, \<Gamma> \<turnstile> \<phi>"} where the type variables @{text
-  "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} cover the propositions @{text "\<Gamma>"}, @{text "\<phi>"}, as
-  well as the proof of @{text "\<Gamma> \<turnstile> \<phi>"}.
-
-  These \emph{sort hypothesis} of a theorem are passed monotonically
-  through further derivations.  They are redundant, as long as the
-  statement of a theorem still contains the type variables that are
-  accounted here.  The logical significance of sort hypotheses is
-  limited to the boundary case where type variables disappear from the
-  proposition, e.g.\ @{text "\<lparr>\<alpha>\<^sub>s : s\<rparr> \<turnstile> \<phi>"}.  Since such dangling type
-  variables can be renamed arbitrarily without changing the
-  proposition @{text "\<phi>"}, the inference kernel maintains sort
-  hypotheses in anonymous form @{text "s \<turnstile> \<phi>"}.
-
-  In most practical situations, such extra sort hypotheses may be
-  stripped in a final bookkeeping step, e.g.\ at the end of a proof:
-  they are typically left over from intermediate reasoning with type
-  classes that can be satisfied by some concrete type @{text "\<tau>"} of
-  sort @{text "s"} to replace the hypothetical type variable @{text
-  "\<alpha>\<^sub>s"}.  *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Thm.extra_shyps: "thm -> sort list"} \\
-  @{index_ML Thm.strip_shyps: "thm -> thm"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML Thm.extra_shyps}~@{text "thm"} determines the extraneous
-  sort hypotheses of the given theorem, i.e.\ the sorts that are not
-  present within type variables of the statement.
-
-  \item @{ML Thm.strip_shyps}~@{text "thm"} removes any extraneous
-  sort hypotheses that can be witnessed from the type signature.
-
-  \end{description}
-*}
-
-text %mlex {* The following artificial example demonstrates the
-  derivation of @{prop False} with a pending sort hypothesis involving
-  a logically empty sort.  *}
-
-class empty =
-  assumes bad: "\<And>(x::'a) y. x \<noteq> y"
-
-theorem (in empty) false: False
-  using bad by blast
-
-ML {*
-  @{assert} (Thm.extra_shyps @{thm false} = [@{sort empty}])
-*}
-
-text {* Thanks to the inference kernel managing sort hypothesis
-  according to their logical significance, this example is merely an
-  instance of \emph{ex falso quodlibet consequitur} --- not a collapse
-  of the logical framework! *}
-
-
-section {* Object-level rules \label{sec:obj-rules} *}
-
-text {*
-  The primitive inferences covered so far mostly serve foundational
-  purposes.  User-level reasoning usually works via object-level rules
-  that are represented as theorems of Pure.  Composition of rules
-  involves \emph{backchaining}, \emph{higher-order unification} modulo
-  @{text "\<alpha>\<beta>\<eta>"}-conversion of @{text "\<lambda>"}-terms, and so-called
-  \emph{lifting} of rules into a context of @{text "\<And>"} and @{text
-  "\<Longrightarrow>"} connectives.  Thus the full power of higher-order Natural
-  Deduction in Isabelle/Pure becomes readily available.
-*}
-
-
-subsection {* Hereditary Harrop Formulae *}
-
-text {*
-  The idea of object-level rules is to model Natural Deduction
-  inferences in the style of Gentzen \cite{Gentzen:1935}, but we allow
-  arbitrary nesting similar to \cite{extensions91}.  The most basic
-  rule format is that of a \emph{Horn Clause}:
-  \[
-  \infer{@{text "A"}}{@{text "A\<^sub>1"} & @{text "\<dots>"} & @{text "A\<^sub>n"}}
-  \]
-  where @{text "A, A\<^sub>1, \<dots>, A\<^sub>n"} are atomic propositions
-  of the framework, usually of the form @{text "Trueprop B"}, where
-  @{text "B"} is a (compound) object-level statement.  This
-  object-level inference corresponds to an iterated implication in
-  Pure like this:
-  \[
-  @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> A"}
-  \]
-  As an example consider conjunction introduction: @{text "A \<Longrightarrow> B \<Longrightarrow> A \<and>
-  B"}.  Any parameters occurring in such rule statements are
-  conceptionally treated as arbitrary:
-  \[
-  @{text "\<And>x\<^sub>1 \<dots> x\<^sub>m. A\<^sub>1 x\<^sub>1 \<dots> x\<^sub>m \<Longrightarrow> \<dots> A\<^sub>n x\<^sub>1 \<dots> x\<^sub>m \<Longrightarrow> A x\<^sub>1 \<dots> x\<^sub>m"}
-  \]
-
-  Nesting of rules means that the positions of @{text "A\<^sub>i"} may
-  again hold compound rules, not just atomic propositions.
-  Propositions of this format are called \emph{Hereditary Harrop
-  Formulae} in the literature \cite{Miller:1991}.  Here we give an
-  inductive characterization as follows:
-
-  \medskip
-  \begin{tabular}{ll}
-  @{text "\<^bold>x"} & set of variables \\
-  @{text "\<^bold>A"} & set of atomic propositions \\
-  @{text "\<^bold>H  =  \<And>\<^bold>x\<^sup>*. \<^bold>H\<^sup>* \<Longrightarrow> \<^bold>A"} & set of Hereditary Harrop Formulas \\
-  \end{tabular}
-  \medskip
-
-  Thus we essentially impose nesting levels on propositions formed
-  from @{text "\<And>"} and @{text "\<Longrightarrow>"}.  At each level there is a prefix
-  of parameters and compound premises, concluding an atomic
-  proposition.  Typical examples are @{text "\<longrightarrow>"}-introduction @{text
-  "(A \<Longrightarrow> B) \<Longrightarrow> A \<longrightarrow> B"} or mathematical induction @{text "P 0 \<Longrightarrow> (\<And>n. P n
-  \<Longrightarrow> P (Suc n)) \<Longrightarrow> P n"}.  Even deeper nesting occurs in well-founded
-  induction @{text "(\<And>x. (\<And>y. y \<prec> x \<Longrightarrow> P y) \<Longrightarrow> P x) \<Longrightarrow> P x"}, but this
-  already marks the limit of rule complexity that is usually seen in
-  practice.
-
-  \medskip Regular user-level inferences in Isabelle/Pure always
-  maintain the following canonical form of results:
-
-  \begin{itemize}
-
-  \item Normalization by @{text "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"},
-  which is a theorem of Pure, means that quantifiers are pushed in
-  front of implication at each level of nesting.  The normal form is a
-  Hereditary Harrop Formula.
-
-  \item The outermost prefix of parameters is represented via
-  schematic variables: instead of @{text "\<And>\<^vec>x. \<^vec>H \<^vec>x
-  \<Longrightarrow> A \<^vec>x"} we have @{text "\<^vec>H ?\<^vec>x \<Longrightarrow> A ?\<^vec>x"}.
-  Note that this representation looses information about the order of
-  parameters, and vacuous quantifiers vanish automatically.
-
-  \end{itemize}
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Simplifier.norm_hhf: "Proof.context -> thm -> thm"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML Simplifier.norm_hhf}~@{text "ctxt thm"} normalizes the given
-  theorem according to the canonical form specified above.  This is
-  occasionally helpful to repair some low-level tools that do not
-  handle Hereditary Harrop Formulae properly.
-
-  \end{description}
-*}
-
-
-subsection {* Rule composition *}
-
-text {*
-  The rule calculus of Isabelle/Pure provides two main inferences:
-  @{inference resolution} (i.e.\ back-chaining of rules) and
-  @{inference assumption} (i.e.\ closing a branch), both modulo
-  higher-order unification.  There are also combined variants, notably
-  @{inference elim_resolution} and @{inference dest_resolution}.
-
-  To understand the all-important @{inference resolution} principle,
-  we first consider raw @{inference_def composition} (modulo
-  higher-order unification with substitution @{text "\<vartheta>"}):
-  \[
-  \infer[(@{inference_def composition})]{@{text "\<^vec>A\<vartheta> \<Longrightarrow> C\<vartheta>"}}
-  {@{text "\<^vec>A \<Longrightarrow> B"} & @{text "B' \<Longrightarrow> C"} & @{text "B\<vartheta> = B'\<vartheta>"}}
-  \]
-  Here the conclusion of the first rule is unified with the premise of
-  the second; the resulting rule instance inherits the premises of the
-  first and conclusion of the second.  Note that @{text "C"} can again
-  consist of iterated implications.  We can also permute the premises
-  of the second rule back-and-forth in order to compose with @{text
-  "B'"} in any position (subsequently we shall always refer to
-  position 1 w.l.o.g.).
-
-  In @{inference composition} the internal structure of the common
-  part @{text "B"} and @{text "B'"} is not taken into account.  For
-  proper @{inference resolution} we require @{text "B"} to be atomic,
-  and explicitly observe the structure @{text "\<And>\<^vec>x. \<^vec>H
-  \<^vec>x \<Longrightarrow> B' \<^vec>x"} of the premise of the second rule.  The
-  idea is to adapt the first rule by ``lifting'' it into this context,
-  by means of iterated application of the following inferences:
-  \[
-  \infer[(@{inference_def imp_lift})]{@{text "(\<^vec>H \<Longrightarrow> \<^vec>A) \<Longrightarrow> (\<^vec>H \<Longrightarrow> B)"}}{@{text "\<^vec>A \<Longrightarrow> B"}}
-  \]
-  \[
-  \infer[(@{inference_def all_lift})]{@{text "(\<And>\<^vec>x. \<^vec>A (?\<^vec>a \<^vec>x)) \<Longrightarrow> (\<And>\<^vec>x. B (?\<^vec>a \<^vec>x))"}}{@{text "\<^vec>A ?\<^vec>a \<Longrightarrow> B ?\<^vec>a"}}
-  \]
-  By combining raw composition with lifting, we get full @{inference
-  resolution} as follows:
-  \[
-  \infer[(@{inference_def resolution})]
-  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>A (?\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
-  {\begin{tabular}{l}
-    @{text "\<^vec>A ?\<^vec>a \<Longrightarrow> B ?\<^vec>a"} \\
-    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
-    @{text "(\<lambda>\<^vec>x. B (?\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
-   \end{tabular}}
-  \]
-
-  Continued resolution of rules allows to back-chain a problem towards
-  more and sub-problems.  Branches are closed either by resolving with
-  a rule of 0 premises, or by producing a ``short-circuit'' within a
-  solved situation (again modulo unification):
-  \[
-  \infer[(@{inference_def assumption})]{@{text "C\<vartheta>"}}
-  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> A \<^vec>x) \<Longrightarrow> C"} & @{text "A\<vartheta> = H\<^sub>i\<vartheta>"}~~\text{(for some~@{text i})}}
-  \]
-
-  %FIXME @{inference_def elim_resolution}, @{inference_def dest_resolution}
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_op "RSN": "thm * (int * thm) -> thm"} \\
-  @{index_ML_op "RS": "thm * thm -> thm"} \\
-
-  @{index_ML_op "RLN": "thm list * (int * thm list) -> thm list"} \\
-  @{index_ML_op "RL": "thm list * thm list -> thm list"} \\
-
-  @{index_ML_op "MRS": "thm list * thm -> thm"} \\
-  @{index_ML_op "OF": "thm * thm list -> thm"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{text "rule\<^sub>1 RSN (i, rule\<^sub>2)"} resolves the conclusion of
-  @{text "rule\<^sub>1"} with the @{text i}-th premise of @{text "rule\<^sub>2"},
-  according to the @{inference resolution} principle explained above.
-  Unless there is precisely one resolvent it raises exception @{ML
-  THM}.
-
-  This corresponds to the rule attribute @{attribute THEN} in Isar
-  source language.
-
-  \item @{text "rule\<^sub>1 RS rule\<^sub>2"} abbreviates @{text "rule\<^sub>1 RSN (1,
-  rule\<^sub>2)"}.
-
-  \item @{text "rules\<^sub>1 RLN (i, rules\<^sub>2)"} joins lists of rules.  For
-  every @{text "rule\<^sub>1"} in @{text "rules\<^sub>1"} and @{text "rule\<^sub>2"} in
-  @{text "rules\<^sub>2"}, it resolves the conclusion of @{text "rule\<^sub>1"} with
-  the @{text "i"}-th premise of @{text "rule\<^sub>2"}, accumulating multiple
-  results in one big list.  Note that such strict enumerations of
-  higher-order unifications can be inefficient compared to the lazy
-  variant seen in elementary tactics like @{ML resolve_tac}.
-
-  \item @{text "rules\<^sub>1 RL rules\<^sub>2"} abbreviates @{text "rules\<^sub>1 RLN (1,
-  rules\<^sub>2)"}.
-
-  \item @{text "[rule\<^sub>1, \<dots>, rule\<^sub>n] MRS rule"} resolves @{text "rule\<^sub>i"}
-  against premise @{text "i"} of @{text "rule"}, for @{text "i = n, \<dots>,
-  1"}.  By working from right to left, newly emerging premises are
-  concatenated in the result, without interfering.
-
-  \item @{text "rule OF rules"} is an alternative notation for @{text
-  "rules MRS rule"}, which makes rule composition look more like
-  function application.  Note that the argument @{text "rules"} need
-  not be atomic.
-
-  This corresponds to the rule attribute @{attribute OF} in Isar
-  source language.
-
-  \end{description}
-*}
-
-
-section {* Proof terms \label{sec:proof-terms} *}
-
-text {* The Isabelle/Pure inference kernel can record the proof of
-  each theorem as a proof term that contains all logical inferences in
-  detail.  Rule composition by resolution (\secref{sec:obj-rules}) and
-  type-class reasoning is broken down to primitive rules of the
-  logical framework.  The proof term can be inspected by a separate
-  proof-checker, for example.
-
-  According to the well-known \emph{Curry-Howard isomorphism}, a proof
-  can be viewed as a @{text "\<lambda>"}-term. Following this idea, proofs in
-  Isabelle are internally represented by a datatype similar to the one
-  for terms described in \secref{sec:terms}.  On top of these
-  syntactic terms, two more layers of @{text "\<lambda>"}-calculus are added,
-  which correspond to @{text "\<And>x :: \<alpha>. B x"} and @{text "A \<Longrightarrow> B"}
-  according to the propositions-as-types principle.  The resulting
-  3-level @{text "\<lambda>"}-calculus resembles ``@{text "\<lambda>HOL"}'' in the
-  more abstract setting of Pure Type Systems (PTS)
-  \cite{Barendregt-Geuvers:2001}, if some fine points like schematic
-  polymorphism and type classes are ignored.
-
-  \medskip\emph{Proof abstractions} of the form @{text "\<^bold>\<lambda>x :: \<alpha>. prf"}
-  or @{text "\<^bold>\<lambda>p : A. prf"} correspond to introduction of @{text
-  "\<And>"}/@{text "\<Longrightarrow>"}, and \emph{proof applications} of the form @{text
-  "p \<cdot> t"} or @{text "p \<bullet> q"} correspond to elimination of @{text
-  "\<And>"}/@{text "\<Longrightarrow>"}.  Actual types @{text "\<alpha>"}, propositions @{text
-  "A"}, and terms @{text "t"} might be suppressed and reconstructed
-  from the overall proof term.
-
-  \medskip Various atomic proofs indicate special situations within
-  the proof construction as follows.
-
-  A \emph{bound proof variable} is a natural number @{text "b"} that
-  acts as de-Bruijn index for proof term abstractions.
-
-  A \emph{minimal proof} ``@{text "?"}'' is a dummy proof term.  This
-  indicates some unrecorded part of the proof.
-
-  @{text "Hyp A"} refers to some pending hypothesis by giving its
-  proposition.  This indicates an open context of implicit hypotheses,
-  similar to loose bound variables or free variables within a term
-  (\secref{sec:terms}).
-
-  An \emph{axiom} or \emph{oracle} @{text "a : A[\<^vec>\<tau>]"} refers
-  some postulated @{text "proof constant"}, which is subject to
-  schematic polymorphism of theory content, and the particular type
-  instantiation may be given explicitly.  The vector of types @{text
-  "\<^vec>\<tau>"} refers to the schematic type variables in the generic
-  proposition @{text "A"} in canonical order.
-
-  A \emph{proof promise} @{text "a : A[\<^vec>\<tau>]"} is a placeholder
-  for some proof of polymorphic proposition @{text "A"}, with explicit
-  type instantiation as given by the vector @{text "\<^vec>\<tau>"}, as
-  above.  Unlike axioms or oracles, proof promises may be
-  \emph{fulfilled} eventually, by substituting @{text "a"} by some
-  particular proof @{text "q"} at the corresponding type instance.
-  This acts like Hindley-Milner @{text "let"}-polymorphism: a generic
-  local proof definition may get used at different type instances, and
-  is replaced by the concrete instance eventually.
-
-  A \emph{named theorem} wraps up some concrete proof as a closed
-  formal entity, in the manner of constant definitions for proof
-  terms.  The \emph{proof body} of such boxed theorems involves some
-  digest about oracles and promises occurring in the original proof.
-  This allows the inference kernel to manage this critical information
-  without the full overhead of explicit proof terms.
-*}
-
-
-subsection {* Reconstructing and checking proof terms *}
-
-text {* Fully explicit proof terms can be large, but most of this
-  information is redundant and can be reconstructed from the context.
-  Therefore, the Isabelle/Pure inference kernel records only
-  \emph{implicit} proof terms, by omitting all typing information in
-  terms, all term and type labels of proof abstractions, and some
-  argument terms of applications @{text "p \<cdot> t"} (if possible).
-
-  There are separate operations to reconstruct the full proof term
-  later on, using \emph{higher-order pattern unification}
-  \cite{nipkow-patterns,Berghofer-Nipkow:2000:TPHOL}.
-
-  The \emph{proof checker} expects a fully reconstructed proof term,
-  and can turn it into a theorem by replaying its primitive inferences
-  within the kernel.  *}
-
-
-subsection {* Concrete syntax of proof terms *}
-
-text {* The concrete syntax of proof terms is a slight extension of
-  the regular inner syntax of Isabelle/Pure \cite{isabelle-isar-ref}.
-  Its main syntactic category @{syntax (inner) proof} is defined as
-  follows:
-
-  \begin{center}
-  \begin{supertabular}{rclr}
-
-  @{syntax_def (inner) proof} & = & @{verbatim Lam} @{text params} @{verbatim "."} @{text proof} \\
-    & @{text "|"} & @{text "\<^bold>\<lambda>"} @{text "params"} @{verbatim "."} @{text proof} \\
-    & @{text "|"} & @{text proof} @{verbatim "%"} @{text any} \\
-    & @{text "|"} & @{text proof} @{text "\<cdot>"} @{text any} \\
-    & @{text "|"} & @{text proof} @{verbatim "%%"} @{text proof} \\
-    & @{text "|"} & @{text proof} @{text "\<bullet>"} @{text proof} \\
-    & @{text "|"} & @{text "id  |  longid"} \\
-  \\
-
-  @{text param} & = & @{text idt} \\
-    & @{text "|"} & @{text idt} @{verbatim ":"} @{text prop} \\
-    & @{text "|"} & @{verbatim "("} @{text param} @{verbatim ")"} \\
-  \\
-
-  @{text params} & = & @{text param} \\
-    & @{text "|"} & @{text param} @{text params} \\
-
-  \end{supertabular}
-  \end{center}
-
-  Implicit term arguments in partial proofs are indicated by ``@{text
-  "_"}''.  Type arguments for theorems and axioms may be specified
-  using @{text "p \<cdot> TYPE(type)"} (they must appear before any other
-  term argument of a theorem or axiom, but may be omitted altogether).
-
-  \medskip There are separate read and print operations for proof
-  terms, in order to avoid conflicts with the regular term language.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type proof} \\
-  @{index_ML_type proof_body} \\
-  @{index_ML proofs: "int Unsynchronized.ref"} \\
-  @{index_ML Reconstruct.reconstruct_proof:
-  "theory -> term -> proof -> proof"} \\
-  @{index_ML Reconstruct.expand_proof: "theory ->
-  (string * term option) list -> proof -> proof"} \\
-  @{index_ML Proof_Checker.thm_of_proof: "theory -> proof -> thm"} \\
-  @{index_ML Proof_Syntax.read_proof: "theory -> bool -> bool -> string -> proof"} \\
-  @{index_ML Proof_Syntax.pretty_proof: "Proof.context -> proof -> Pretty.T"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type proof} represents proof terms; this is a
-  datatype with constructors @{index_ML Abst}, @{index_ML AbsP},
-  @{index_ML_op "%"}, @{index_ML_op "%%"}, @{index_ML PBound},
-  @{index_ML MinProof}, @{index_ML Hyp}, @{index_ML PAxm}, @{index_ML
-  Oracle}, @{index_ML Promise}, @{index_ML PThm} as explained above.
-  %FIXME OfClass (!?)
-
-  \item Type @{ML_type proof_body} represents the nested proof
-  information of a named theorem, consisting of a digest of oracles
-  and named theorem over some proof term.  The digest only covers the
-  directly visible part of the proof: in order to get the full
-  information, the implicit graph of nested theorems needs to be
-  traversed (e.g.\ using @{ML Proofterm.fold_body_thms}).
-
-  \item @{ML Thm.proof_of}~@{text "thm"} and @{ML
-  Thm.proof_body_of}~@{text "thm"} produce the proof term or proof
-  body (with digest of oracles and theorems) from a given theorem.
-  Note that this involves a full join of internal futures that fulfill
-  pending proof promises, and thus disrupts the natural bottom-up
-  construction of proofs by introducing dynamic ad-hoc dependencies.
-  Parallel performance may suffer by inspecting proof terms at
-  run-time.
-
-  \item @{ML proofs} specifies the detail of proof recording within
-  @{ML_type thm} values produced by the inference kernel: @{ML 0}
-  records only the names of oracles, @{ML 1} records oracle names and
-  propositions, @{ML 2} additionally records full proof terms.
-  Officially named theorems that contribute to a result are recorded
-  in any case.
-
-  \item @{ML Reconstruct.reconstruct_proof}~@{text "thy prop prf"}
-  turns the implicit proof term @{text "prf"} into a full proof of the
-  given proposition.
-
-  Reconstruction may fail if @{text "prf"} is not a proof of @{text
-  "prop"}, or if it does not contain sufficient information for
-  reconstruction.  Failure may only happen for proofs that are
-  constructed manually, but not for those produced automatically by
-  the inference kernel.
-
-  \item @{ML Reconstruct.expand_proof}~@{text "thy [thm\<^sub>1, \<dots>, thm\<^sub>n]
-  prf"} expands and reconstructs the proofs of all specified theorems,
-  with the given (full) proof.  Theorems that are not unique specified
-  via their name may be disambiguated by giving their proposition.
-
-  \item @{ML Proof_Checker.thm_of_proof}~@{text "thy prf"} turns the
-  given (full) proof into a theorem, by replaying it using only
-  primitive rules of the inference kernel.
-
-  \item @{ML Proof_Syntax.read_proof}~@{text "thy b\<^sub>1 b\<^sub>2 s"} reads in a
-  proof term. The Boolean flags indicate the use of sort and type
-  information.  Usually, typing information is left implicit and is
-  inferred during proof reconstruction.  %FIXME eliminate flags!?
-
-  \item @{ML Proof_Syntax.pretty_proof}~@{text "ctxt prf"}
-  pretty-prints the given proof term.
-
-  \end{description}
-*}
-
-text %mlex {* Detailed proof information of a theorem may be retrieved
-  as follows: *}
-
-lemma ex: "A \<and> B \<longrightarrow> B \<and> A"
-proof
-  assume "A \<and> B"
-  then obtain B and A ..
-  then show "B \<and> A" ..
-qed
-
-ML_val {*
-  (*proof body with digest*)
-  val body = Proofterm.strip_thm (Thm.proof_body_of @{thm ex});
-
-  (*proof term only*)
-  val prf = Proofterm.proof_of body;
-  Pretty.writeln (Proof_Syntax.pretty_proof @{context} prf);
-
-  (*all theorems used in the graph of nested proofs*)
-  val all_thms =
-    Proofterm.fold_body_thms
-      (fn (name, _, _) => insert (op =) name) [body] [];
-*}
-
-text {* The result refers to various basic facts of Isabelle/HOL:
-  @{thm [source] HOL.impI}, @{thm [source] HOL.conjE}, @{thm [source]
-  HOL.conjI} etc.  The combinator @{ML Proofterm.fold_body_thms}
-  recursively explores the graph of the proofs of all theorems being
-  used here.
-
-  \medskip Alternatively, we may produce a proof term manually, and
-  turn it into a theorem as follows: *}
-
-ML_val {*
-  val thy = @{theory};
-  val prf =
-    Proof_Syntax.read_proof thy true false
-      "impI \<cdot> _ \<cdot> _ \<bullet> \
-      \   (\<^bold>\<lambda>H: _. \
-      \     conjE \<cdot> _ \<cdot> _ \<cdot> _ \<bullet> H \<bullet> \
-      \       (\<^bold>\<lambda>(H: _) Ha: _. conjI \<cdot> _ \<cdot> _ \<bullet> Ha \<bullet> H))";
-  val thm =
-    prf
-    |> Reconstruct.reconstruct_proof thy @{prop "A \<and> B \<longrightarrow> B \<and> A"}
-    |> Proof_Checker.thm_of_proof thy
-    |> Drule.export_without_context;
-*}
-
-text {* \medskip See also @{file "~~/src/HOL/Proofs/ex/XML_Data.thy"}
-  for further examples, with export and import of proof terms via
-  XML/ML data representation.
-*}
-
-end
--- a/src/Doc/IsarImplementation/ML.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2116 +0,0 @@
-theory "ML"
-imports Base
-begin
-
-chapter {* Isabelle/ML *}
-
-text {* Isabelle/ML is best understood as a certain culture based on
-  Standard ML.  Thus it is not a new programming language, but a
-  certain way to use SML at an advanced level within the Isabelle
-  environment.  This covers a variety of aspects that are geared
-  towards an efficient and robust platform for applications of formal
-  logic with fully foundational proof construction --- according to
-  the well-known \emph{LCF principle}.  There is specific
-  infrastructure with library modules to address the needs of this
-  difficult task.  For example, the raw parallel programming model of
-  Poly/ML is presented as considerably more abstract concept of
-  \emph{future values}, which is then used to augment the inference
-  kernel, proof interpreter, and theory loader accordingly.
-
-  The main aspects of Isabelle/ML are introduced below.  These
-  first-hand explanations should help to understand how proper
-  Isabelle/ML is to be read and written, and to get access to the
-  wealth of experience that is expressed in the source text and its
-  history of changes.\footnote{See
-  @{url "http://isabelle.in.tum.de/repos/isabelle"} for the full
-  Mercurial history.  There are symbolic tags to refer to official
-  Isabelle releases, as opposed to arbitrary \emph{tip} versions that
-  merely reflect snapshots that are never really up-to-date.}  *}
-
-
-section {* Style and orthography *}
-
-text {* The sources of Isabelle/Isar are optimized for
-  \emph{readability} and \emph{maintainability}.  The main purpose is
-  to tell an informed reader what is really going on and how things
-  really work.  This is a non-trivial aim, but it is supported by a
-  certain style of writing Isabelle/ML that has emerged from long
-  years of system development.\footnote{See also the interesting style
-  guide for OCaml
-  @{url "http://caml.inria.fr/resources/doc/guides/guidelines.en.html"}
-  which shares many of our means and ends.}
-
-  The main principle behind any coding style is \emph{consistency}.
-  For a single author of a small program this merely means ``choose
-  your style and stick to it''.  A complex project like Isabelle, with
-  long years of development and different contributors, requires more
-  standardization.  A coding style that is changed every few years or
-  with every new contributor is no style at all, because consistency
-  is quickly lost.  Global consistency is hard to achieve, though.
-  Nonetheless, one should always strive at least for local consistency
-  of modules and sub-systems, without deviating from some general
-  principles how to write Isabelle/ML.
-
-  In a sense, good coding style is like an \emph{orthography} for the
-  sources: it helps to read quickly over the text and see through the
-  main points, without getting distracted by accidental presentation
-  of free-style code.
-*}
-
-
-subsection {* Header and sectioning *}
-
-text {* Isabelle source files have a certain standardized header
-  format (with precise spacing) that follows ancient traditions
-  reaching back to the earliest versions of the system by Larry
-  Paulson.  See @{file "~~/src/Pure/thm.ML"}, for example.
-
-  The header includes at least @{verbatim Title} and @{verbatim
-  Author} entries, followed by a prose description of the purpose of
-  the module.  The latter can range from a single line to several
-  paragraphs of explanations.
-
-  The rest of the file is divided into sections, subsections,
-  subsubsections, paragraphs etc.\ using a simple layout via ML
-  comments as follows.
-
-\begin{verbatim}
-(*** section ***)
-
-(** subsection **)
-
-(* subsubsection *)
-
-(*short paragraph*)
-
-(*
-  long paragraph,
-  with more text
-*)
-\end{verbatim}
-
-  As in regular typography, there is some extra space \emph{before}
-  section headings that are adjacent to plain text (not other headings
-  as in the example above).
-
-  \medskip The precise wording of the prose text given in these
-  headings is chosen carefully to introduce the main theme of the
-  subsequent formal ML text.
-*}
-
-
-subsection {* Naming conventions *}
-
-text {* Since ML is the primary medium to express the meaning of the
-  source text, naming of ML entities requires special care.
-
-  \paragraph{Notation.}  A name consists of 1--3 \emph{words} (rarely
-  4, but not more) that are separated by underscore.  There are three
-  variants concerning upper or lower case letters, which are used for
-  certain ML categories as follows:
-
-  \medskip
-  \begin{tabular}{lll}
-  variant & example & ML categories \\\hline
-  lower-case & @{ML_text foo_bar} & values, types, record fields \\
-  capitalized & @{ML_text Foo_Bar} & datatype constructors, structures, functors \\
-  upper-case & @{ML_text FOO_BAR} & special values, exception constructors, signatures \\
-  \end{tabular}
-  \medskip
-
-  For historical reasons, many capitalized names omit underscores,
-  e.g.\ old-style @{ML_text FooBar} instead of @{ML_text Foo_Bar}.
-  Genuine mixed-case names are \emph{not} used, because clear division
-  of words is essential for readability.\footnote{Camel-case was
-  invented to workaround the lack of underscore in some early
-  non-ASCII character sets.  Later it became habitual in some language
-  communities that are now strong in numbers.}
-
-  A single (capital) character does not count as ``word'' in this
-  respect: some Isabelle/ML names are suffixed by extra markers like
-  this: @{ML_text foo_barT}.
-
-  Name variants are produced by adding 1--3 primes, e.g.\ @{ML_text
-  foo'}, @{ML_text foo''}, or @{ML_text foo'''}, but not @{ML_text
-  foo''''} or more.  Decimal digits scale better to larger numbers,
-  e.g.\ @{ML_text foo0}, @{ML_text foo1}, @{ML_text foo42}.
-
-  \paragraph{Scopes.}  Apart from very basic library modules, ML
-  structures are not ``opened'', but names are referenced with
-  explicit qualification, as in @{ML Syntax.string_of_term} for
-  example.  When devising names for structures and their components it
-  is important aim at eye-catching compositions of both parts, because
-  this is how they are seen in the sources and documentation.  For the
-  same reasons, aliases of well-known library functions should be
-  avoided.
-
-  Local names of function abstraction or case/let bindings are
-  typically shorter, sometimes using only rudiments of ``words'',
-  while still avoiding cryptic shorthands.  An auxiliary function
-  called @{ML_text helper}, @{ML_text aux}, or @{ML_text f} is
-  considered bad style.
-
-  Example:
-
-  \begin{verbatim}
-  (* RIGHT *)
-
-  fun print_foo ctxt foo =
-    let
-      fun print t = ... Syntax.string_of_term ctxt t ...
-    in ... end;
-
-
-  (* RIGHT *)
-
-  fun print_foo ctxt foo =
-    let
-      val string_of_term = Syntax.string_of_term ctxt;
-      fun print t = ... string_of_term t ...
-    in ... end;
-
-
-  (* WRONG *)
-
-  val string_of_term = Syntax.string_of_term;
-
-  fun print_foo ctxt foo =
-    let
-      fun aux t = ... string_of_term ctxt t ...
-    in ... end;
-
-  \end{verbatim}
-
-
-  \paragraph{Specific conventions.} Here are some specific name forms
-  that occur frequently in the sources.
-
-  \begin{itemize}
-
-  \item A function that maps @{ML_text foo} to @{ML_text bar} is
-  called @{ML_text foo_to_bar} or @{ML_text bar_of_foo} (never
-  @{ML_text foo2bar}, @{ML_text bar_from_foo}, @{ML_text
-  bar_for_foo}, or @{ML_text bar4foo}).
-
-  \item The name component @{ML_text legacy} means that the operation
-  is about to be discontinued soon.
-
-  \item The name component @{ML_text old} means that this is historic
-  material that might disappear at some later stage.
-
-  \item The name component @{ML_text global} means that this works
-  with the background theory instead of the regular local context
-  (\secref{sec:context}), sometimes for historical reasons, sometimes
-  due a genuine lack of locality of the concept involved, sometimes as
-  a fall-back for the lack of a proper context in the application
-  code.  Whenever there is a non-global variant available, the
-  application should be migrated to use it with a proper local
-  context.
-
-  \item Variables of the main context types of the Isabelle/Isar
-  framework (\secref{sec:context} and \chref{ch:local-theory}) have
-  firm naming conventions as follows:
-
-  \begin{itemize}
-
-  \item theories are called @{ML_text thy}, rarely @{ML_text theory}
-  (never @{ML_text thry})
-
-  \item proof contexts are called @{ML_text ctxt}, rarely @{ML_text
-  context} (never @{ML_text ctx})
-
-  \item generic contexts are called @{ML_text context}, rarely
-  @{ML_text ctxt}
-
-  \item local theories are called @{ML_text lthy}, except for local
-  theories that are treated as proof context (which is a semantic
-  super-type)
-
-  \end{itemize}
-
-  Variations with primed or decimal numbers are always possible, as
-  well as sematic prefixes like @{ML_text foo_thy} or @{ML_text
-  bar_ctxt}, but the base conventions above need to be preserved.
-  This allows to visualize the their data flow via plain regular
-  expressions in the editor.
-
-  \item The main logical entities (\secref{ch:logic}) have established
-  naming convention as follows:
-
-  \begin{itemize}
-
-  \item sorts are called @{ML_text S}
-
-  \item types are called @{ML_text T}, @{ML_text U}, or @{ML_text
-  ty} (never @{ML_text t})
-
-  \item terms are called @{ML_text t}, @{ML_text u}, or @{ML_text
-  tm} (never @{ML_text trm})
-
-  \item certified types are called @{ML_text cT}, rarely @{ML_text
-  T}, with variants as for types
-
-  \item certified terms are called @{ML_text ct}, rarely @{ML_text
-  t}, with variants as for terms (never @{ML_text ctrm})
-
-  \item theorems are called @{ML_text th}, or @{ML_text thm}
-
-  \end{itemize}
-
-  Proper semantic names override these conventions completely.  For
-  example, the left-hand side of an equation (as a term) can be called
-  @{ML_text lhs} (not @{ML_text lhs_tm}).  Or a term that is known
-  to be a variable can be called @{ML_text v} or @{ML_text x}.
-
-  \item Tactics (\secref{sec:tactics}) are sufficiently important to
-  have specific naming conventions.  The name of a basic tactic
-  definition always has a @{ML_text "_tac"} suffix, the subgoal index
-  (if applicable) is always called @{ML_text i}, and the goal state
-  (if made explicit) is usually called @{ML_text st} instead of the
-  somewhat misleading @{ML_text thm}.  Any other arguments are given
-  before the latter two, and the general context is given first.
-  Example:
-
-  \begin{verbatim}
-  fun my_tac ctxt arg1 arg2 i st = ...
-  \end{verbatim}
-
-  Note that the goal state @{ML_text st} above is rarely made
-  explicit, if tactic combinators (tacticals) are used as usual.
-
-  \end{itemize}
-*}
-
-
-subsection {* General source layout *}
-
-text {* The general Isabelle/ML source layout imitates regular
-  type-setting to some extent, augmented by the requirements for
-  deeply nested expressions that are commonplace in functional
-  programming.
-
-  \paragraph{Line length} is 80 characters according to ancient
-  standards, but we allow as much as 100 characters (not
-  more).\footnote{Readability requires to keep the beginning of a line
-  in view while watching its end.  Modern wide-screen displays do not
-  change the way how the human brain works.  Sources also need to be
-  printable on plain paper with reasonable font-size.} The extra 20
-  characters acknowledge the space requirements due to qualified
-  library references in Isabelle/ML.
-
-  \paragraph{White-space} is used to emphasize the structure of
-  expressions, following mostly standard conventions for mathematical
-  typesetting, as can be seen in plain {\TeX} or {\LaTeX}.  This
-  defines positioning of spaces for parentheses, punctuation, and
-  infixes as illustrated here:
-
-  \begin{verbatim}
-  val x = y + z * (a + b);
-  val pair = (a, b);
-  val record = {foo = 1, bar = 2};
-  \end{verbatim}
-
-  Lines are normally broken \emph{after} an infix operator or
-  punctuation character.  For example:
-
-  \begin{verbatim}
-  val x =
-    a +
-    b +
-    c;
-
-  val tuple =
-   (a,
-    b,
-    c);
-  \end{verbatim}
-
-  Some special infixes (e.g.\ @{ML_text "|>"}) work better at the
-  start of the line, but punctuation is always at the end.
-
-  Function application follows the tradition of @{text "\<lambda>"}-calculus,
-  not informal mathematics.  For example: @{ML_text "f a b"} for a
-  curried function, or @{ML_text "g (a, b)"} for a tupled function.
-  Note that the space between @{ML_text g} and the pair @{ML_text
-  "(a, b)"} follows the important principle of
-  \emph{compositionality}: the layout of @{ML_text "g p"} does not
-  change when @{ML_text "p"} is refined to the concrete pair
-  @{ML_text "(a, b)"}.
-
-  \paragraph{Indentation} uses plain spaces, never hard
-  tabulators.\footnote{Tabulators were invented to move the carriage
-  of a type-writer to certain predefined positions.  In software they
-  could be used as a primitive run-length compression of consecutive
-  spaces, but the precise result would depend on non-standardized
-  editor configuration.}
-
-  Each level of nesting is indented by 2 spaces, sometimes 1, very
-  rarely 4, never 8 or any other odd number.
-
-  Indentation follows a simple logical format that only depends on the
-  nesting depth, not the accidental length of the text that initiates
-  a level of nesting.  Example:
-
-  \begin{verbatim}
-  (* RIGHT *)
-
-  if b then
-    expr1_part1
-    expr1_part2
-  else
-    expr2_part1
-    expr2_part2
-
-
-  (* WRONG *)
-
-  if b then expr1_part1
-            expr1_part2
-  else expr2_part1
-       expr2_part2
-  \end{verbatim}
-
-  The second form has many problems: it assumes a fixed-width font
-  when viewing the sources, it uses more space on the line and thus
-  makes it hard to observe its strict length limit (working against
-  \emph{readability}), it requires extra editing to adapt the layout
-  to changes of the initial text (working against
-  \emph{maintainability}) etc.
-
-  \medskip For similar reasons, any kind of two-dimensional or tabular
-  layouts, ASCII-art with lines or boxes of asterisks etc.\ should be
-  avoided.
-
-  \paragraph{Complex expressions} that consist of multi-clausal
-  function definitions, @{ML_text handle}, @{ML_text case},
-  @{ML_text let} (and combinations) require special attention.  The
-  syntax of Standard ML is quite ambitious and admits a lot of
-  variance that can distort the meaning of the text.
-
-  Clauses of @{ML_text fun}, @{ML_text fn}, @{ML_text handle},
-  @{ML_text case} get extra indentation to indicate the nesting
-  clearly.  Example:
-
-  \begin{verbatim}
-  (* RIGHT *)
-
-  fun foo p1 =
-        expr1
-    | foo p2 =
-        expr2
-
-
-  (* WRONG *)
-
-  fun foo p1 =
-    expr1
-    | foo p2 =
-    expr2
-  \end{verbatim}
-
-  Body expressions consisting of @{ML_text case} or @{ML_text let}
-  require care to maintain compositionality, to prevent loss of
-  logical indentation where it is especially important to see the
-  structure of the text.  Example:
-
-  \begin{verbatim}
-  (* RIGHT *)
-
-  fun foo p1 =
-        (case e of
-          q1 => ...
-        | q2 => ...)
-    | foo p2 =
-        let
-          ...
-        in
-          ...
-        end
-
-
-  (* WRONG *)
-
-  fun foo p1 = case e of
-      q1 => ...
-    | q2 => ...
-    | foo p2 =
-    let
-      ...
-    in
-      ...
-    end
-  \end{verbatim}
-
-  Extra parentheses around @{ML_text case} expressions are optional,
-  but help to analyse the nesting based on character matching in the
-  editor.
-
-  \medskip There are two main exceptions to the overall principle of
-  compositionality in the layout of complex expressions.
-
-  \begin{enumerate}
-
-  \item @{ML_text "if"} expressions are iterated as if there would be
-  a multi-branch conditional in SML, e.g.
-
-  \begin{verbatim}
-  (* RIGHT *)
-
-  if b1 then e1
-  else if b2 then e2
-  else e3
-  \end{verbatim}
-
-  \item @{ML_text fn} abstractions are often layed-out as if they
-  would lack any structure by themselves.  This traditional form is
-  motivated by the possibility to shift function arguments back and
-  forth wrt.\ additional combinators.  Example:
-
-  \begin{verbatim}
-  (* RIGHT *)
-
-  fun foo x y = fold (fn z =>
-    expr)
-  \end{verbatim}
-
-  Here the visual appearance is that of three arguments @{ML_text x},
-  @{ML_text y}, @{ML_text z}.
-
-  \end{enumerate}
-
-  Such weakly structured layout should be use with great care.  Here
-  are some counter-examples involving @{ML_text let} expressions:
-
-  \begin{verbatim}
-  (* WRONG *)
-
-  fun foo x = let
-      val y = ...
-    in ... end
-
-
-  (* WRONG *)
-
-  fun foo x = let
-    val y = ...
-  in ... end
-
-
-  (* WRONG *)
-
-  fun foo x =
-  let
-    val y = ...
-  in ... end
-  \end{verbatim}
-
-  \medskip In general the source layout is meant to emphasize the
-  structure of complex language expressions, not to pretend that SML
-  had a completely different syntax (say that of Haskell or Java).
-*}
-
-
-section {* SML embedded into Isabelle/Isar *}
-
-text {* ML and Isar are intertwined via an open-ended bootstrap
-  process that provides more and more programming facilities and
-  logical content in an alternating manner.  Bootstrapping starts from
-  the raw environment of existing implementations of Standard ML
-  (mainly Poly/ML, but also SML/NJ).
-
-  Isabelle/Pure marks the point where the original ML toplevel is
-  superseded by the Isar toplevel that maintains a uniform context for
-  arbitrary ML values (see also \secref{sec:context}).  This formal
-  environment holds ML compiler bindings, logical entities, and many
-  other things.  Raw SML is never encountered again after the initial
-  bootstrap of Isabelle/Pure.
-
-  Object-logics like Isabelle/HOL are built within the
-  Isabelle/ML/Isar environment by introducing suitable theories with
-  associated ML modules, either inlined or as separate files.  Thus
-  Isabelle/HOL is defined as a regular user-space application within
-  the Isabelle framework.  Further add-on tools can be implemented in
-  ML within the Isar context in the same manner: ML is part of the
-  standard repertoire of Isabelle, and there is no distinction between
-  ``user'' and ``developer'' in this respect.
-*}
-
-
-subsection {* Isar ML commands *}
-
-text {* The primary Isar source language provides facilities to ``open
-  a window'' to the underlying ML compiler.  Especially see the Isar
-  commands @{command_ref "ML_file"} and @{command_ref "ML"}: both work the
-  same way, only the source text is provided via a file vs.\ inlined,
-  respectively.  Apart from embedding ML into the main theory
-  definition like that, there are many more commands that refer to ML
-  source, such as @{command_ref setup} or @{command_ref declaration}.
-  Even more fine-grained embedding of ML into Isar is encountered in
-  the proof method @{method_ref tactic}, which refines the pending
-  goal state via a given expression of type @{ML_type tactic}.
-*}
-
-text %mlex {* The following artificial example demonstrates some ML
-  toplevel declarations within the implicit Isar theory context.  This
-  is regular functional programming without referring to logical
-  entities yet.
-*}
-
-ML {*
-  fun factorial 0 = 1
-    | factorial n = n * factorial (n - 1)
-*}
-
-text {* Here the ML environment is already managed by Isabelle, i.e.\
-  the @{ML factorial} function is not yet accessible in the preceding
-  paragraph, nor in a different theory that is independent from the
-  current one in the import hierarchy.
-
-  Removing the above ML declaration from the source text will remove
-  any trace of this definition as expected.  The Isabelle/ML toplevel
-  environment is managed in a \emph{stateless} way: unlike the raw ML
-  toplevel there are no global side-effects involved
-  here.\footnote{Such a stateless compilation environment is also a
-  prerequisite for robust parallel compilation within independent
-  nodes of the implicit theory development graph.}
-
-  \medskip The next example shows how to embed ML into Isar proofs, using
- @{command_ref "ML_prf"} instead of Instead of @{command_ref "ML"}.
-  As illustrated below, the effect on the ML environment is local to
-  the whole proof body, ignoring the block structure.
-*}
-
-notepad
-begin
-  ML_prf %"ML" {* val a = 1 *}
-  {
-    ML_prf %"ML" {* val b = a + 1 *}
-  } -- {* Isar block structure ignored by ML environment *}
-  ML_prf %"ML" {* val c = b + 1 *}
-end
-
-text {* By side-stepping the normal scoping rules for Isar proof
-  blocks, embedded ML code can refer to the different contexts and
-  manipulate corresponding entities, e.g.\ export a fact from a block
-  context.
-
-  \medskip Two further ML commands are useful in certain situations:
-  @{command_ref ML_val} and @{command_ref ML_command} are
-  \emph{diagnostic} in the sense that there is no effect on the
-  underlying environment, and can thus used anywhere (even outside a
-  theory).  The examples below produce long strings of digits by
-  invoking @{ML factorial}: @{command ML_val} already takes care of
-  printing the ML toplevel result, but @{command ML_command} is silent
-  so we produce an explicit output message.  *}
-
-ML_val {* factorial 100 *}
-ML_command {* writeln (string_of_int (factorial 100)) *}
-
-notepad
-begin
-  ML_val {* factorial 100 *}
-  ML_command {* writeln (string_of_int (factorial 100)) *}
-end
-
-
-subsection {* Compile-time context *}
-
-text {* Whenever the ML compiler is invoked within Isabelle/Isar, the
-  formal context is passed as a thread-local reference variable.  Thus
-  ML code may access the theory context during compilation, by reading
-  or writing the (local) theory under construction.  Note that such
-  direct access to the compile-time context is rare.  In practice it
-  is typically done via some derived ML functions instead.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML ML_Context.the_generic_context: "unit -> Context.generic"} \\
-  @{index_ML "Context.>>": "(Context.generic -> Context.generic) -> unit"} \\
-  @{index_ML ML_Thms.bind_thms: "string * thm list -> unit"} \\
-  @{index_ML ML_Thms.bind_thm: "string * thm -> unit"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML "ML_Context.the_generic_context ()"} refers to the theory
-  context of the ML toplevel --- at compile time.  ML code needs to
-  take care to refer to @{ML "ML_Context.the_generic_context ()"}
-  correctly.  Recall that evaluation of a function body is delayed
-  until actual run-time.
-
-  \item @{ML "Context.>>"}~@{text f} applies context transformation
-  @{text f} to the implicit context of the ML toplevel.
-
-  \item @{ML ML_Thms.bind_thms}~@{text "(name, thms)"} stores a list of
-  theorems produced in ML both in the (global) theory context and the
-  ML toplevel, associating it with the provided name.  Theorems are
-  put into a global ``standard'' format before being stored.
-
-  \item @{ML ML_Thms.bind_thm} is similar to @{ML ML_Thms.bind_thms} but refers to a
-  singleton fact.
-
-  \end{description}
-
-  It is important to note that the above functions are really
-  restricted to the compile time, even though the ML compiler is
-  invoked at run-time.  The majority of ML code either uses static
-  antiquotations (\secref{sec:ML-antiq}) or refers to the theory or
-  proof context at run-time, by explicit functional abstraction.
-*}
-
-
-subsection {* Antiquotations \label{sec:ML-antiq} *}
-
-text {* A very important consequence of embedding SML into Isar is the
-  concept of \emph{ML antiquotation}.  The standard token language of
-  ML is augmented by special syntactic entities of the following form:
-
-  @{rail \<open>
-  @{syntax_def antiquote}: '@{' nameref args '}'
-  \<close>}
-
-  Here @{syntax nameref} and @{syntax args} are regular outer syntax
-  categories \cite{isabelle-isar-ref}.  Attributes and proof methods
-  use similar syntax.
-
-  \medskip A regular antiquotation @{text "@{name args}"} processes
-  its arguments by the usual means of the Isar source language, and
-  produces corresponding ML source text, either as literal
-  \emph{inline} text (e.g. @{text "@{term t}"}) or abstract
-  \emph{value} (e.g. @{text "@{thm th}"}).  This pre-compilation
-  scheme allows to refer to formal entities in a robust manner, with
-  proper static scoping and with some degree of logical checking of
-  small portions of the code.
-*}
-
-
-subsection {* Printing ML values *}
-
-text {* The ML compiler knows about the structure of values according
-  to their static type, and can print them in the manner of the
-  toplevel loop, although the details are non-portable.  The
-  antiquotations @{ML_antiquotation_def "make_string"} and
-  @{ML_antiquotation_def "print"} provide a quasi-portable way to
-  refer to this potential capability of the underlying ML system in
-  generic Isabelle/ML sources.
-
-  This is occasionally useful for diagnostic or demonstration
-  purposes.  Note that production-quality tools require proper
-  user-level error messages. *}
-
-text %mlantiq {*
-  \begin{matharray}{rcl}
-  @{ML_antiquotation_def "make_string"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "print"} & : & @{text ML_antiquotation} \\
-  \end{matharray}
-
-  @{rail \<open>
-  @@{ML_antiquotation make_string}
-  ;
-  @@{ML_antiquotation print} @{syntax name}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{text "@{make_string}"} inlines a function to print arbitrary
-  values similar to the ML toplevel.  The result is compiler dependent
-  and may fall back on "?" in certain situations.
-
-  \item @{text "@{print f}"} uses the ML function @{text "f: string ->
-  unit"} to output the result of @{text "@{make_string}"} above,
-  together with the source position of the antiquotation.  The default
-  output function is @{ML writeln}.
-
-  \end{description}
-*}
-
-text %mlex {* The following artificial examples show how to produce
-  adhoc output of ML values for debugging purposes. *}
-
-ML {*
-  val x = 42;
-  val y = true;
-
-  writeln (@{make_string} {x = x, y = y});
-
-  @{print} {x = x, y = y};
-  @{print tracing} {x = x, y = y};
-*}
-
-
-section {* Canonical argument order \label{sec:canonical-argument-order} *}
-
-text {* Standard ML is a language in the tradition of @{text
-  "\<lambda>"}-calculus and \emph{higher-order functional programming},
-  similar to OCaml, Haskell, or Isabelle/Pure and HOL as logical
-  languages.  Getting acquainted with the native style of representing
-  functions in that setting can save a lot of extra boiler-plate of
-  redundant shuffling of arguments, auxiliary abstractions etc.
-
-  Functions are usually \emph{curried}: the idea of turning arguments
-  of type @{text "\<tau>\<^sub>i"} (for @{text "i \<in> {1, \<dots> n}"}) into a result of
-  type @{text "\<tau>"} is represented by the iterated function space
-  @{text "\<tau>\<^sub>1 \<rightarrow> \<dots> \<rightarrow> \<tau>\<^sub>n \<rightarrow> \<tau>"}.  This is isomorphic to the well-known
-  encoding via tuples @{text "\<tau>\<^sub>1 \<times> \<dots> \<times> \<tau>\<^sub>n \<rightarrow> \<tau>"}, but the curried
-  version fits more smoothly into the basic calculus.\footnote{The
-  difference is even more significant in higher-order logic, because
-  the redundant tuple structure needs to be accommodated by formal
-  reasoning.}
-
-  Currying gives some flexiblity due to \emph{partial application}.  A
-  function @{text "f: \<tau>\<^sub>1 \<rightarrow> \<tau>\<^sub>2 \<rightarrow> \<tau>"} can be applied to @{text "x: \<tau>\<^sub>1"}
-  and the remaining @{text "(f x): \<tau>\<^sub>2 \<rightarrow> \<tau>"} passed to another function
-  etc.  How well this works in practice depends on the order of
-  arguments.  In the worst case, arguments are arranged erratically,
-  and using a function in a certain situation always requires some
-  glue code.  Thus we would get exponentially many oppurtunities to
-  decorate the code with meaningless permutations of arguments.
-
-  This can be avoided by \emph{canonical argument order}, which
-  observes certain standard patterns and minimizes adhoc permutations
-  in their application.  In Isabelle/ML, large portions of text can be
-  written without auxiliary operations like @{text "swap: \<alpha> \<times> \<beta> \<rightarrow> \<beta> \<times>
-  \<alpha>"} or @{text "C: (\<alpha> \<rightarrow> \<beta> \<rightarrow> \<gamma>) \<rightarrow> (\<beta> \<rightarrow> \<alpha> \<rightarrow> \<gamma>)"} (the latter not
-  present in the Isabelle/ML library).
-
-  \medskip The basic idea is that arguments that vary less are moved
-  further to the left than those that vary more.  Two particularly
-  important categories of functions are \emph{selectors} and
-  \emph{updates}.
-
-  The subsequent scheme is based on a hypothetical set-like container
-  of type @{text "\<beta>"} that manages elements of type @{text "\<alpha>"}.  Both
-  the names and types of the associated operations are canonical for
-  Isabelle/ML.
-
-  \begin{center}
-  \begin{tabular}{ll}
-  kind & canonical name and type \\\hline
-  selector & @{text "member: \<beta> \<rightarrow> \<alpha> \<rightarrow> bool"} \\
-  update & @{text "insert: \<alpha> \<rightarrow> \<beta> \<rightarrow> \<beta>"} \\
-  \end{tabular}
-  \end{center}
-
-  Given a container @{text "B: \<beta>"}, the partially applied @{text
-  "member B"} is a predicate over elements @{text "\<alpha> \<rightarrow> bool"}, and
-  thus represents the intended denotation directly.  It is customary
-  to pass the abstract predicate to further operations, not the
-  concrete container.  The argument order makes it easy to use other
-  combinators: @{text "forall (member B) list"} will check a list of
-  elements for membership in @{text "B"} etc. Often the explicit
-  @{text "list"} is pointless and can be contracted to @{text "forall
-  (member B)"} to get directly a predicate again.
-
-  In contrast, an update operation varies the container, so it moves
-  to the right: @{text "insert a"} is a function @{text "\<beta> \<rightarrow> \<beta>"} to
-  insert a value @{text "a"}.  These can be composed naturally as
-  @{text "insert c \<circ> insert b \<circ> insert a"}.  The slightly awkward
-  inversion of the composition order is due to conventional
-  mathematical notation, which can be easily amended as explained
-  below.
-*}
-
-
-subsection {* Forward application and composition *}
-
-text {* Regular function application and infix notation works best for
-  relatively deeply structured expressions, e.g.\ @{text "h (f x y + g
-  z)"}.  The important special case of \emph{linear transformation}
-  applies a cascade of functions @{text "f\<^sub>n (\<dots> (f\<^sub>1 x))"}.  This
-  becomes hard to read and maintain if the functions are themselves
-  given as complex expressions.  The notation can be significantly
-  improved by introducing \emph{forward} versions of application and
-  composition as follows:
-
-  \medskip
-  \begin{tabular}{lll}
-  @{text "x |> f"} & @{text "\<equiv>"} & @{text "f x"} \\
-  @{text "(f #> g) x"} & @{text "\<equiv>"} & @{text "x |> f |> g"} \\
-  \end{tabular}
-  \medskip
-
-  This enables to write conveniently @{text "x |> f\<^sub>1 |> \<dots> |> f\<^sub>n"} or
-  @{text "f\<^sub>1 #> \<dots> #> f\<^sub>n"} for its functional abstraction over @{text
-  "x"}.
-
-  \medskip There is an additional set of combinators to accommodate
-  multiple results (via pairs) that are passed on as multiple
-  arguments (via currying).
-
-  \medskip
-  \begin{tabular}{lll}
-  @{text "(x, y) |-> f"} & @{text "\<equiv>"} & @{text "f x y"} \\
-  @{text "(f #-> g) x"} & @{text "\<equiv>"} & @{text "x |> f |-> g"} \\
-  \end{tabular}
-  \medskip
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_op "|> ": "'a * ('a -> 'b) -> 'b"} \\
-  @{index_ML_op "|-> ": "('c * 'a) * ('c -> 'a -> 'b) -> 'b"} \\
-  @{index_ML_op "#> ": "('a -> 'b) * ('b -> 'c) -> 'a -> 'c"} \\
-  @{index_ML_op "#-> ": "('a -> 'c * 'b) * ('c -> 'b -> 'd) -> 'a -> 'd"} \\
-  \end{mldecls}
-*}
-
-
-subsection {* Canonical iteration *}
-
-text {* As explained above, a function @{text "f: \<alpha> \<rightarrow> \<beta> \<rightarrow> \<beta>"} can be
-  understood as update on a configuration of type @{text "\<beta>"},
-  parametrized by arguments of type @{text "\<alpha>"}.  Given @{text "a: \<alpha>"}
-  the partial application @{text "(f a): \<beta> \<rightarrow> \<beta>"} operates
-  homogeneously on @{text "\<beta>"}.  This can be iterated naturally over a
-  list of parameters @{text "[a\<^sub>1, \<dots>, a\<^sub>n]"} as @{text "f a\<^sub>1 #> \<dots> #> f a\<^sub>n"}.
-  The latter expression is again a function @{text "\<beta> \<rightarrow> \<beta>"}.
-  It can be applied to an initial configuration @{text "b: \<beta>"} to
-  start the iteration over the given list of arguments: each @{text
-  "a"} in @{text "a\<^sub>1, \<dots>, a\<^sub>n"} is applied consecutively by updating a
-  cumulative configuration.
-
-  The @{text fold} combinator in Isabelle/ML lifts a function @{text
-  "f"} as above to its iterated version over a list of arguments.
-  Lifting can be repeated, e.g.\ @{text "(fold \<circ> fold) f"} iterates
-  over a list of lists as expected.
-
-  The variant @{text "fold_rev"} works inside-out over the list of
-  arguments, such that @{text "fold_rev f \<equiv> fold f \<circ> rev"} holds.
-
-  The @{text "fold_map"} combinator essentially performs @{text
-  "fold"} and @{text "map"} simultaneously: each application of @{text
-  "f"} produces an updated configuration together with a side-result;
-  the iteration collects all such side-results as a separate list.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML fold: "('a -> 'b -> 'b) -> 'a list -> 'b -> 'b"} \\
-  @{index_ML fold_rev: "('a -> 'b -> 'b) -> 'a list -> 'b -> 'b"} \\
-  @{index_ML fold_map: "('a -> 'b -> 'c * 'b) -> 'a list -> 'b -> 'c list * 'b"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML fold}~@{text f} lifts the parametrized update function
-  @{text "f"} to a list of parameters.
-
-  \item @{ML fold_rev}~@{text "f"} is similar to @{ML fold}~@{text
-  "f"}, but works inside-out.
-
-  \item @{ML fold_map}~@{text "f"} lifts the parametrized update
-  function @{text "f"} (with side-result) to a list of parameters and
-  cumulative side-results.
-
-  \end{description}
-
-  \begin{warn}
-  The literature on functional programming provides a multitude of
-  combinators called @{text "foldl"}, @{text "foldr"} etc.  SML97
-  provides its own variations as @{ML List.foldl} and @{ML
-  List.foldr}, while the classic Isabelle library also has the
-  historic @{ML Library.foldl} and @{ML Library.foldr}.  To avoid
-  unnecessary complication and confusion, all these historical
-  versions should be ignored, and @{ML fold} (or @{ML fold_rev}) used
-  exclusively.
-  \end{warn}
-*}
-
-text %mlex {* The following example shows how to fill a text buffer
-  incrementally by adding strings, either individually or from a given
-  list.
-*}
-
-ML {*
-  val s =
-    Buffer.empty
-    |> Buffer.add "digits: "
-    |> fold (Buffer.add o string_of_int) (0 upto 9)
-    |> Buffer.content;
-
-  @{assert} (s = "digits: 0123456789");
-*}
-
-text {* Note how @{ML "fold (Buffer.add o string_of_int)"} above saves
-  an extra @{ML "map"} over the given list.  This kind of peephole
-  optimization reduces both the code size and the tree structures in
-  memory (``deforestation''), but it requires some practice to read
-  and write fluently.
-
-  \medskip The next example elaborates the idea of canonical
-  iteration, demonstrating fast accumulation of tree content using a
-  text buffer.
-*}
-
-ML {*
-  datatype tree = Text of string | Elem of string * tree list;
-
-  fun slow_content (Text txt) = txt
-    | slow_content (Elem (name, ts)) =
-        "<" ^ name ^ ">" ^
-        implode (map slow_content ts) ^
-        "</" ^ name ^ ">"
-
-  fun add_content (Text txt) = Buffer.add txt
-    | add_content (Elem (name, ts)) =
-        Buffer.add ("<" ^ name ^ ">") #>
-        fold add_content ts #>
-        Buffer.add ("</" ^ name ^ ">");
-
-  fun fast_content tree =
-    Buffer.empty |> add_content tree |> Buffer.content;
-*}
-
-text {* The slow part of @{ML slow_content} is the @{ML implode} of
-  the recursive results, because it copies previously produced strings
-  again.
-
-  The incremental @{ML add_content} avoids this by operating on a
-  buffer that is passed through in a linear fashion.  Using @{ML_text
-  "#>"} and contraction over the actual buffer argument saves some
-  additional boiler-plate.  Of course, the two @{ML "Buffer.add"}
-  invocations with concatenated strings could have been split into
-  smaller parts, but this would have obfuscated the source without
-  making a big difference in allocations.  Here we have done some
-  peephole-optimization for the sake of readability.
-
-  Another benefit of @{ML add_content} is its ``open'' form as a
-  function on buffers that can be continued in further linear
-  transformations, folding etc.  Thus it is more compositional than
-  the naive @{ML slow_content}.  As realistic example, compare the
-  old-style @{ML "Term.maxidx_of_term: term -> int"} with the newer
-  @{ML "Term.maxidx_term: term -> int -> int"} in Isabelle/Pure.
-
-  Note that @{ML fast_content} above is only defined as example.  In
-  many practical situations, it is customary to provide the
-  incremental @{ML add_content} only and leave the initialization and
-  termination to the concrete application by the user.
-*}
-
-
-section {* Message output channels \label{sec:message-channels} *}
-
-text {* Isabelle provides output channels for different kinds of
-  messages: regular output, high-volume tracing information, warnings,
-  and errors.
-
-  Depending on the user interface involved, these messages may appear
-  in different text styles or colours.  The standard output for
-  terminal sessions prefixes each line of warnings by @{verbatim
-  "###"} and errors by @{verbatim "***"}, but leaves anything else
-  unchanged.
-
-  Messages are associated with the transaction context of the running
-  Isar command.  This enables the front-end to manage commands and
-  resulting messages together.  For example, after deleting a command
-  from a given theory document version, the corresponding message
-  output can be retracted from the display.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML writeln: "string -> unit"} \\
-  @{index_ML tracing: "string -> unit"} \\
-  @{index_ML warning: "string -> unit"} \\
-  @{index_ML error: "string -> 'a"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML writeln}~@{text "text"} outputs @{text "text"} as regular
-  message.  This is the primary message output operation of Isabelle
-  and should be used by default.
-
-  \item @{ML tracing}~@{text "text"} outputs @{text "text"} as special
-  tracing message, indicating potential high-volume output to the
-  front-end (hundreds or thousands of messages issued by a single
-  command).  The idea is to allow the user-interface to downgrade the
-  quality of message display to achieve higher throughput.
-
-  Note that the user might have to take special actions to see tracing
-  output, e.g.\ switch to a different output window.  So this channel
-  should not be used for regular output.
-
-  \item @{ML warning}~@{text "text"} outputs @{text "text"} as
-  warning, which typically means some extra emphasis on the front-end
-  side (color highlighting, icons, etc.).
-
-  \item @{ML error}~@{text "text"} raises exception @{ML ERROR}~@{text
-  "text"} and thus lets the Isar toplevel print @{text "text"} on the
-  error channel, which typically means some extra emphasis on the
-  front-end side (color highlighting, icons, etc.).
-
-  This assumes that the exception is not handled before the command
-  terminates.  Handling exception @{ML ERROR}~@{text "text"} is a
-  perfectly legal alternative: it means that the error is absorbed
-  without any message output.
-
-  \begin{warn}
-  The actual error channel is accessed via @{ML Output.error_message}, but
-  the old interaction protocol of Proof~General \emph{crashes} if that
-  function is used in regular ML code: error output and toplevel
-  command failure always need to coincide in classic TTY interaction.
-  \end{warn}
-
-  \end{description}
-
-  \begin{warn}
-  Regular Isabelle/ML code should output messages exclusively by the
-  official channels.  Using raw I/O on \emph{stdout} or \emph{stderr}
-  instead (e.g.\ via @{ML TextIO.output}) is apt to cause problems in
-  the presence of parallel and asynchronous processing of Isabelle
-  theories.  Such raw output might be displayed by the front-end in
-  some system console log, with a low chance that the user will ever
-  see it.  Moreover, as a genuine side-effect on global process
-  channels, there is no proper way to retract output when Isar command
-  transactions are reset by the system.
-  \end{warn}
-
-  \begin{warn}
-  The message channels should be used in a message-oriented manner.
-  This means that multi-line output that logically belongs together is
-  issued by a \emph{single} invocation of @{ML writeln} etc.\ with the
-  functional concatenation of all message constituents.
-  \end{warn}
-*}
-
-text %mlex {* The following example demonstrates a multi-line
-  warning.  Note that in some situations the user sees only the first
-  line, so the most important point should be made first.
-*}
-
-ML_command {*
-  warning (cat_lines
-   ["Beware the Jabberwock, my son!",
-    "The jaws that bite, the claws that catch!",
-    "Beware the Jubjub Bird, and shun",
-    "The frumious Bandersnatch!"]);
-*}
-
-
-section {* Exceptions \label{sec:exceptions} *}
-
-text {* The Standard ML semantics of strict functional evaluation
-  together with exceptions is rather well defined, but some delicate
-  points need to be observed to avoid that ML programs go wrong
-  despite static type-checking.  Exceptions in Isabelle/ML are
-  subsequently categorized as follows.
-
-  \paragraph{Regular user errors.}  These are meant to provide
-  informative feedback about malformed input etc.
-
-  The \emph{error} function raises the corresponding \emph{ERROR}
-  exception, with a plain text message as argument.  \emph{ERROR}
-  exceptions can be handled internally, in order to be ignored, turned
-  into other exceptions, or cascaded by appending messages.  If the
-  corresponding Isabelle/Isar command terminates with an \emph{ERROR}
-  exception state, the toplevel will print the result on the error
-  channel (see \secref{sec:message-channels}).
-
-  It is considered bad style to refer to internal function names or
-  values in ML source notation in user error messages.
-
-  Grammatical correctness of error messages can be improved by
-  \emph{omitting} final punctuation: messages are often concatenated
-  or put into a larger context (e.g.\ augmented with source position).
-  By not insisting in the final word at the origin of the error, the
-  system can perform its administrative tasks more easily and
-  robustly.
-
-  \paragraph{Program failures.}  There is a handful of standard
-  exceptions that indicate general failure situations, or failures of
-  core operations on logical entities (types, terms, theorems,
-  theories, see \chref{ch:logic}).
-
-  These exceptions indicate a genuine breakdown of the program, so the
-  main purpose is to determine quickly what has happened where.
-  Traditionally, the (short) exception message would include the name
-  of an ML function, although this is no longer necessary, because the
-  ML runtime system prints a detailed source position of the
-  corresponding @{ML_text raise} keyword.
-
-  \medskip User modules can always introduce their own custom
-  exceptions locally, e.g.\ to organize internal failures robustly
-  without overlapping with existing exceptions.  Exceptions that are
-  exposed in module signatures require extra care, though, and should
-  \emph{not} be introduced by default.  Surprise by users of a module
-  can be often minimized by using plain user errors instead.
-
-  \paragraph{Interrupts.}  These indicate arbitrary system events:
-  both the ML runtime system and the Isabelle/ML infrastructure signal
-  various exceptional situations by raising the special
-  \emph{Interrupt} exception in user code.
-
-  This is the one and only way that physical events can intrude an
-  Isabelle/ML program.  Such an interrupt can mean out-of-memory,
-  stack overflow, timeout, internal signaling of threads, or the user
-  producing a console interrupt manually etc.  An Isabelle/ML program
-  that intercepts interrupts becomes dependent on physical effects of
-  the environment.  Even worse, exception handling patterns that are
-  too general by accident, e.g.\ by mispelled exception constructors,
-  will cover interrupts unintentionally and thus render the program
-  semantics ill-defined.
-
-  Note that the Interrupt exception dates back to the original SML90
-  language definition.  It was excluded from the SML97 version to
-  avoid its malign impact on ML program semantics, but without
-  providing a viable alternative.  Isabelle/ML recovers physical
-  interruptibility (which is an indispensable tool to implement
-  managed evaluation of command transactions), but requires user code
-  to be strictly transparent wrt.\ interrupts.
-
-  \begin{warn}
-  Isabelle/ML user code needs to terminate promptly on interruption,
-  without guessing at its meaning to the system infrastructure.
-  Temporary handling of interrupts for cleanup of global resources
-  etc.\ needs to be followed immediately by re-raising of the original
-  exception.
-  \end{warn}
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML try: "('a -> 'b) -> 'a -> 'b option"} \\
-  @{index_ML can: "('a -> 'b) -> 'a -> bool"} \\
-  @{index_ML_exception ERROR: string} \\
-  @{index_ML_exception Fail: string} \\
-  @{index_ML Exn.is_interrupt: "exn -> bool"} \\
-  @{index_ML reraise: "exn -> 'a"} \\
-  @{index_ML Runtime.exn_trace: "(unit -> 'a) -> 'a"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML try}~@{text "f x"} makes the partiality of evaluating
-  @{text "f x"} explicit via the option datatype.  Interrupts are
-  \emph{not} handled here, i.e.\ this form serves as safe replacement
-  for the \emph{unsafe} version @{ML_text "(SOME"}~@{text "f
-  x"}~@{ML_text "handle _ => NONE)"} that is occasionally seen in
-  books about SML97, not Isabelle/ML.
-
-  \item @{ML can} is similar to @{ML try} with more abstract result.
-
-  \item @{ML ERROR}~@{text "msg"} represents user errors; this
-  exception is normally raised indirectly via the @{ML error} function
-  (see \secref{sec:message-channels}).
-
-  \item @{ML Fail}~@{text "msg"} represents general program failures.
-
-  \item @{ML Exn.is_interrupt} identifies interrupts robustly, without
-  mentioning concrete exception constructors in user code.  Handled
-  interrupts need to be re-raised promptly!
-
-  \item @{ML reraise}~@{text "exn"} raises exception @{text "exn"}
-  while preserving its implicit position information (if possible,
-  depending on the ML platform).
-
-  \item @{ML Runtime.exn_trace}~@{ML_text "(fn () =>"}~@{text
-  "e"}@{ML_text ")"} evaluates expression @{text "e"} while printing
-  a full trace of its stack of nested exceptions (if possible,
-  depending on the ML platform).
-
-  Inserting @{ML Runtime.exn_trace} into ML code temporarily is
-  useful for debugging, but not suitable for production code.
-
-  \end{description}
-*}
-
-text %mlantiq {*
-  \begin{matharray}{rcl}
-  @{ML_antiquotation_def "assert"} & : & @{text ML_antiquotation} \\
-  \end{matharray}
-
-  \begin{description}
-
-  \item @{text "@{assert}"} inlines a function
-  @{ML_type "bool -> unit"} that raises @{ML Fail} if the argument is
-  @{ML false}.  Due to inlining the source position of failed
-  assertions is included in the error output.
-
-  \end{description}
-*}
-
-
-section {* Strings of symbols \label{sec:symbols} *}
-
-text {* A \emph{symbol} constitutes the smallest textual unit in
-  Isabelle/ML --- raw ML characters are normally not encountered at
-  all!  Isabelle strings consist of a sequence of symbols, represented
-  as a packed string or an exploded list of strings.  Each symbol is
-  in itself a small string, which has either one of the following
-  forms:
-
-  \begin{enumerate}
-
-  \item a single ASCII character ``@{text "c"}'', for example
-  ``\verb,a,'',
-
-  \item a codepoint according to UTF8 (non-ASCII byte sequence),
-
-  \item a regular symbol ``\verb,\,\verb,<,@{text "ident"}\verb,>,'',
-  for example ``\verb,\,\verb,<alpha>,'',
-
-  \item a control symbol ``\verb,\,\verb,<^,@{text "ident"}\verb,>,'',
-  for example ``\verb,\,\verb,<^bold>,'',
-
-  \item a raw symbol ``\verb,\,\verb,<^raw:,@{text text}\verb,>,''
-  where @{text text} consists of printable characters excluding
-  ``\verb,.,'' and ``\verb,>,'', for example
-  ``\verb,\,\verb,<^raw:$\sum_{i = 1}^n$>,'',
-
-  \item a numbered raw control symbol ``\verb,\,\verb,<^raw,@{text
-  n}\verb,>, where @{text n} consists of digits, for example
-  ``\verb,\,\verb,<^raw42>,''.
-
-  \end{enumerate}
-
-  The @{text "ident"} syntax for symbol names is @{text "letter
-  (letter | digit)\<^sup>*"}, where @{text "letter = A..Za..z"} and @{text
-  "digit = 0..9"}.  There are infinitely many regular symbols and
-  control symbols, but a fixed collection of standard symbols is
-  treated specifically.  For example, ``\verb,\,\verb,<alpha>,'' is
-  classified as a letter, which means it may occur within regular
-  Isabelle identifiers.
-
-  The character set underlying Isabelle symbols is 7-bit ASCII, but
-  8-bit character sequences are passed-through unchanged.  Unicode/UCS
-  data in UTF-8 encoding is processed in a non-strict fashion, such
-  that well-formed code sequences are recognized
-  accordingly.\footnote{Note that ISO-Latin-1 differs from UTF-8 only
-  in some special punctuation characters that even have replacements
-  within the standard collection of Isabelle symbols.  Text consisting
-  of ASCII plus accented letters can be processed in either encoding.}
-  Unicode provides its own collection of mathematical symbols, but
-  within the core Isabelle/ML world there is no link to the standard
-  collection of Isabelle regular symbols.
-
-  \medskip Output of Isabelle symbols depends on the print mode
-  \cite{isabelle-isar-ref}.  For example, the standard {\LaTeX}
-  setup of the Isabelle document preparation system would present
-  ``\verb,\,\verb,<alpha>,'' as @{text "\<alpha>"}, and
-  ``\verb,\,\verb,<^bold>,\verb,\,\verb,<alpha>,'' as @{text "\<^bold>\<alpha>"}.
-  On-screen rendering usually works by mapping a finite subset of
-  Isabelle symbols to suitable Unicode characters.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type "Symbol.symbol": string} \\
-  @{index_ML Symbol.explode: "string -> Symbol.symbol list"} \\
-  @{index_ML Symbol.is_letter: "Symbol.symbol -> bool"} \\
-  @{index_ML Symbol.is_digit: "Symbol.symbol -> bool"} \\
-  @{index_ML Symbol.is_quasi: "Symbol.symbol -> bool"} \\
-  @{index_ML Symbol.is_blank: "Symbol.symbol -> bool"} \\
-  \end{mldecls}
-  \begin{mldecls}
-  @{index_ML_type "Symbol.sym"} \\
-  @{index_ML Symbol.decode: "Symbol.symbol -> Symbol.sym"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type "Symbol.symbol"} represents individual Isabelle
-  symbols.
-
-  \item @{ML "Symbol.explode"}~@{text "str"} produces a symbol list
-  from the packed form.  This function supersedes @{ML
-  "String.explode"} for virtually all purposes of manipulating text in
-  Isabelle!\footnote{The runtime overhead for exploded strings is
-  mainly that of the list structure: individual symbols that happen to
-  be a singleton string do not require extra memory in Poly/ML.}
-
-  \item @{ML "Symbol.is_letter"}, @{ML "Symbol.is_digit"}, @{ML
-  "Symbol.is_quasi"}, @{ML "Symbol.is_blank"} classify standard
-  symbols according to fixed syntactic conventions of Isabelle, cf.\
-  \cite{isabelle-isar-ref}.
-
-  \item Type @{ML_type "Symbol.sym"} is a concrete datatype that
-  represents the different kinds of symbols explicitly, with
-  constructors @{ML "Symbol.Char"}, @{ML "Symbol.Sym"}, @{ML
-  "Symbol.UTF8"}, @{ML "Symbol.Ctrl"}, @{ML "Symbol.Raw"}.
-
-  \item @{ML "Symbol.decode"} converts the string representation of a
-  symbol into the datatype version.
-
-  \end{description}
-
-  \paragraph{Historical note.} In the original SML90 standard the
-  primitive ML type @{ML_type char} did not exists, and @{ML_text
-  "explode: string -> string list"} produced a list of singleton
-  strings like @{ML "raw_explode: string -> string list"} in
-  Isabelle/ML today.  When SML97 came out, Isabelle did not adopt its
-  somewhat anachronistic 8-bit or 16-bit characters, but the idea of
-  exploding a string into a list of small strings was extended to
-  ``symbols'' as explained above.  Thus Isabelle sources can refer to
-  an infinite store of user-defined symbols, without having to worry
-  about the multitude of Unicode encodings that have emerged over the
-  years.  *}
-
-
-section {* Basic data types *}
-
-text {* The basis library proposal of SML97 needs to be treated with
-  caution.  Many of its operations simply do not fit with important
-  Isabelle/ML conventions (like ``canonical argument order'', see
-  \secref{sec:canonical-argument-order}), others cause problems with
-  the parallel evaluation model of Isabelle/ML (such as @{ML
-  TextIO.print} or @{ML OS.Process.system}).
-
-  Subsequently we give a brief overview of important operations on
-  basic ML data types.
-*}
-
-
-subsection {* Characters *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type char} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type char} is \emph{not} used.  The smallest textual
-  unit in Isabelle is represented as a ``symbol'' (see
-  \secref{sec:symbols}).
-
-  \end{description}
-*}
-
-
-subsection {* Strings *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type string} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type string} represents immutable vectors of 8-bit
-  characters.  There are operations in SML to convert back and forth
-  to actual byte vectors, which are seldom used.
-
-  This historically important raw text representation is used for
-  Isabelle-specific purposes with the following implicit substructures
-  packed into the string content:
-
-  \begin{enumerate}
-
-  \item sequence of Isabelle symbols (see also \secref{sec:symbols}),
-  with @{ML Symbol.explode} as key operation;
-
-  \item XML tree structure via YXML (see also \cite{isabelle-sys}),
-  with @{ML YXML.parse_body} as key operation.
-
-  \end{enumerate}
-
-  Note that Isabelle/ML string literals may refer Isabelle symbols
-  like ``\verb,\,\verb,<alpha>,'' natively, \emph{without} escaping
-  the backslash.  This is a consequence of Isabelle treating all
-  source text as strings of symbols, instead of raw characters.
-
-  \end{description}
-*}
-
-text %mlex {* The subsequent example illustrates the difference of
-  physical addressing of bytes versus logical addressing of symbols in
-  Isabelle strings.
-*}
-
-ML_val {*
-  val s = "\<A>";
-
-  @{assert} (length (Symbol.explode s) = 1);
-  @{assert} (size s = 4);
-*}
-
-text {* Note that in Unicode renderings of the symbol @{text "\<A>"},
-  variations of encodings like UTF-8 or UTF-16 pose delicate questions
-  about the multi-byte representations its codepoint, which is outside
-  of the 16-bit address space of the original Unicode standard from
-  the 1990-ies.  In Isabelle/ML it is just ``\verb,\,\verb,<A>,''
-  literally, using plain ASCII characters beyond any doubts. *}
-
-
-subsection {* Integers *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type int} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type int} represents regular mathematical integers,
-  which are \emph{unbounded}.  Overflow never happens in
-  practice.\footnote{The size limit for integer bit patterns in memory
-  is 64\,MB for 32-bit Poly/ML, and much higher for 64-bit systems.}
-  This works uniformly for all supported ML platforms (Poly/ML and
-  SML/NJ).
-
-  Literal integers in ML text are forced to be of this one true
-  integer type --- adhoc overloading of SML97 is disabled.
-
-  Structure @{ML_structure IntInf} of SML97 is obsolete and superseded by
-  @{ML_structure Int}.  Structure @{ML_structure Integer} in @{file
-  "~~/src/Pure/General/integer.ML"} provides some additional
-  operations.
-
-  \end{description}
-*}
-
-
-subsection {* Time *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type Time.time} \\
-  @{index_ML seconds: "real -> Time.time"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type Time.time} represents time abstractly according
-  to the SML97 basis library definition.  This is adequate for
-  internal ML operations, but awkward in concrete time specifications.
-
-  \item @{ML seconds}~@{text "s"} turns the concrete scalar @{text
-  "s"} (measured in seconds) into an abstract time value.  Floating
-  point numbers are easy to use as configuration options in the
-  context (see \secref{sec:config-options}) or system preferences that
-  are maintained externally.
-
-  \end{description}
-*}
-
-
-subsection {* Options *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Option.map: "('a -> 'b) -> 'a option -> 'b option"} \\
-  @{index_ML is_some: "'a option -> bool"} \\
-  @{index_ML is_none: "'a option -> bool"} \\
-  @{index_ML the: "'a option -> 'a"} \\
-  @{index_ML these: "'a list option -> 'a list"} \\
-  @{index_ML the_list: "'a option -> 'a list"} \\
-  @{index_ML the_default: "'a -> 'a option -> 'a"} \\
-  \end{mldecls}
-*}
-
-text {* Apart from @{ML Option.map} most other operations defined in
-  structure @{ML_structure Option} are alien to Isabelle/ML an never
-  used.  The operations shown above are defined in @{file
-  "~~/src/Pure/General/basics.ML"}.  *}
-
-
-subsection {* Lists *}
-
-text {* Lists are ubiquitous in ML as simple and light-weight
-  ``collections'' for many everyday programming tasks.  Isabelle/ML
-  provides important additions and improvements over operations that
-  are predefined in the SML97 library. *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML cons: "'a -> 'a list -> 'a list"} \\
-  @{index_ML member: "('b * 'a -> bool) -> 'a list -> 'b -> bool"} \\
-  @{index_ML insert: "('a * 'a -> bool) -> 'a -> 'a list -> 'a list"} \\
-  @{index_ML remove: "('b * 'a -> bool) -> 'b -> 'a list -> 'a list"} \\
-  @{index_ML update: "('a * 'a -> bool) -> 'a -> 'a list -> 'a list"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML cons}~@{text "x xs"} evaluates to @{text "x :: xs"}.
-
-  Tupled infix operators are a historical accident in Standard ML.
-  The curried @{ML cons} amends this, but it should be only used when
-  partial application is required.
-
-  \item @{ML member}, @{ML insert}, @{ML remove}, @{ML update} treat
-  lists as a set-like container that maintains the order of elements.
-  See @{file "~~/src/Pure/library.ML"} for the full specifications
-  (written in ML).  There are some further derived operations like
-  @{ML union} or @{ML inter}.
-
-  Note that @{ML insert} is conservative about elements that are
-  already a @{ML member} of the list, while @{ML update} ensures that
-  the latest entry is always put in front.  The latter discipline is
-  often more appropriate in declarations of context data
-  (\secref{sec:context-data}) that are issued by the user in Isar
-  source: later declarations take precedence over earlier ones.
-
-  \end{description}
-*}
-
-text %mlex {* Using canonical @{ML fold} together with @{ML cons} (or
-  similar standard operations) alternates the orientation of data.
-  The is quite natural and should not be altered forcible by inserting
-  extra applications of @{ML rev}.  The alternative @{ML fold_rev} can
-  be used in the few situations, where alternation should be
-  prevented.
-*}
-
-ML {*
-  val items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
-
-  val list1 = fold cons items [];
-  @{assert} (list1 = rev items);
-
-  val list2 = fold_rev cons items [];
-  @{assert} (list2 = items);
-*}
-
-text {* The subsequent example demonstrates how to \emph{merge} two
-  lists in a natural way. *}
-
-ML {*
-  fun merge_lists eq (xs, ys) = fold_rev (insert eq) ys xs;
-*}
-
-text {* Here the first list is treated conservatively: only the new
-  elements from the second list are inserted.  The inside-out order of
-  insertion via @{ML fold_rev} attempts to preserve the order of
-  elements in the result.
-
-  This way of merging lists is typical for context data
-  (\secref{sec:context-data}).  See also @{ML merge} as defined in
-  @{file "~~/src/Pure/library.ML"}.
-*}
-
-
-subsection {* Association lists *}
-
-text {* The operations for association lists interpret a concrete list
-  of pairs as a finite function from keys to values.  Redundant
-  representations with multiple occurrences of the same key are
-  implicitly normalized: lookup and update only take the first
-  occurrence into account.
-*}
-
-text {*
-  \begin{mldecls}
-  @{index_ML AList.lookup: "('a * 'b -> bool) -> ('b * 'c) list -> 'a -> 'c option"} \\
-  @{index_ML AList.defined: "('a * 'b -> bool) -> ('b * 'c) list -> 'a -> bool"} \\
-  @{index_ML AList.update: "('a * 'a -> bool) -> 'a * 'b -> ('a * 'b) list -> ('a * 'b) list"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML AList.lookup}, @{ML AList.defined}, @{ML AList.update}
-  implement the main ``framework operations'' for mappings in
-  Isabelle/ML, following standard conventions for their names and
-  types.
-
-  Note that a function called @{text lookup} is obliged to express its
-  partiality via an explicit option element.  There is no choice to
-  raise an exception, without changing the name to something like
-  @{text "the_element"} or @{text "get"}.
-
-  The @{text "defined"} operation is essentially a contraction of @{ML
-  is_some} and @{text "lookup"}, but this is sufficiently frequent to
-  justify its independent existence.  This also gives the
-  implementation some opportunity for peep-hole optimization.
-
-  \end{description}
-
-  Association lists are adequate as simple and light-weight
-  implementation of finite mappings in many practical situations.  A
-  more heavy-duty table structure is defined in @{file
-  "~~/src/Pure/General/table.ML"}; that version scales easily to
-  thousands or millions of elements.
-*}
-
-
-subsection {* Unsynchronized references *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type "'a Unsynchronized.ref"} \\
-  @{index_ML Unsynchronized.ref: "'a -> 'a Unsynchronized.ref"} \\
-  @{index_ML "!": "'a Unsynchronized.ref -> 'a"} \\
-  @{index_ML_op ":=": "'a Unsynchronized.ref * 'a -> unit"} \\
-  \end{mldecls}
-*}
-
-text {* Due to ubiquitous parallelism in Isabelle/ML (see also
-  \secref{sec:multi-threading}), the mutable reference cells of
-  Standard ML are notorious for causing problems.  In a highly
-  parallel system, both correctness \emph{and} performance are easily
-  degraded when using mutable data.
-
-  The unwieldy name of @{ML Unsynchronized.ref} for the constructor
-  for references in Isabelle/ML emphasizes the inconveniences caused by
-  mutability.  Existing operations @{ML "!"}  and @{ML_op ":="} are
-  unchanged, but should be used with special precautions, say in a
-  strictly local situation that is guaranteed to be restricted to
-  sequential evaluation --- now and in the future.
-
-  \begin{warn}
-  Never @{ML_text "open Unsynchronized"}, not even in a local scope!
-  Pretending that mutable state is no problem is a very bad idea.
-  \end{warn}
-*}
-
-
-section {* Thread-safe programming \label{sec:multi-threading} *}
-
-text {* Multi-threaded execution has become an everyday reality in
-  Isabelle since Poly/ML 5.2.1 and Isabelle2008.  Isabelle/ML provides
-  implicit and explicit parallelism by default, and there is no way
-  for user-space tools to ``opt out''.  ML programs that are purely
-  functional, output messages only via the official channels
-  (\secref{sec:message-channels}), and do not intercept interrupts
-  (\secref{sec:exceptions}) can participate in the multi-threaded
-  environment immediately without further ado.
-
-  More ambitious tools with more fine-grained interaction with the
-  environment need to observe the principles explained below.
-*}
-
-
-subsection {* Multi-threading with shared memory *}
-
-text {* Multiple threads help to organize advanced operations of the
-  system, such as real-time conditions on command transactions,
-  sub-components with explicit communication, general asynchronous
-  interaction etc.  Moreover, parallel evaluation is a prerequisite to
-  make adequate use of the CPU resources that are available on
-  multi-core systems.\footnote{Multi-core computing does not mean that
-  there are ``spare cycles'' to be wasted.  It means that the
-  continued exponential speedup of CPU performance due to ``Moore's
-  Law'' follows different rules: clock frequency has reached its peak
-  around 2005, and applications need to be parallelized in order to
-  avoid a perceived loss of performance.  See also
-  \cite{Sutter:2005}.}
-
-  Isabelle/Isar exploits the inherent structure of theories and proofs
-  to support \emph{implicit parallelism} to a large extent.  LCF-style
-  theorem provides almost ideal conditions for that, see also
-  \cite{Wenzel:2009}.  This means, significant parts of theory and
-  proof checking is parallelized by default.  In Isabelle2013, a
-  maximum speedup-factor of 3.5 on 4 cores and 6.5 on 8 cores can be
-  expected.
-
-  \medskip ML threads lack the memory protection of separate
-  processes, and operate concurrently on shared heap memory.  This has
-  the advantage that results of independent computations are directly
-  available to other threads: abstract values can be passed without
-  copying or awkward serialization that is typically required for
-  separate processes.
-
-  To make shared-memory multi-threading work robustly and efficiently,
-  some programming guidelines need to be observed.  While the ML
-  system is responsible to maintain basic integrity of the
-  representation of ML values in memory, the application programmer
-  needs to ensure that multi-threaded execution does not break the
-  intended semantics.
-
-  \begin{warn}
-  To participate in implicit parallelism, tools need to be
-  thread-safe.  A single ill-behaved tool can affect the stability and
-  performance of the whole system.
-  \end{warn}
-
-  Apart from observing the principles of thread-safeness passively,
-  advanced tools may also exploit parallelism actively, e.g.\ by using
-  ``future values'' (\secref{sec:futures}) or the more basic library
-  functions for parallel list operations (\secref{sec:parlist}).
-
-  \begin{warn}
-  Parallel computing resources are managed centrally by the
-  Isabelle/ML infrastructure.  User programs must not fork their own
-  ML threads to perform computations.
-  \end{warn}
-*}
-
-
-subsection {* Critical shared resources *}
-
-text {* Thread-safeness is mainly concerned about concurrent
-  read/write access to shared resources, which are outside the purely
-  functional world of ML.  This covers the following in particular.
-
-  \begin{itemize}
-
-  \item Global references (or arrays), i.e.\ mutable memory cells that
-  persist over several invocations of associated
-  operations.\footnote{This is independent of the visibility of such
-  mutable values in the toplevel scope.}
-
-  \item Global state of the running Isabelle/ML process, i.e.\ raw I/O
-  channels, environment variables, current working directory.
-
-  \item Writable resources in the file-system that are shared among
-  different threads or external processes.
-
-  \end{itemize}
-
-  Isabelle/ML provides various mechanisms to avoid critical shared
-  resources in most situations.  As last resort there are some
-  mechanisms for explicit synchronization.  The following guidelines
-  help to make Isabelle/ML programs work smoothly in a concurrent
-  environment.
-
-  \begin{itemize}
-
-  \item Avoid global references altogether.  Isabelle/Isar maintains a
-  uniform context that incorporates arbitrary data declared by user
-  programs (\secref{sec:context-data}).  This context is passed as
-  plain value and user tools can get/map their own data in a purely
-  functional manner.  Configuration options within the context
-  (\secref{sec:config-options}) provide simple drop-in replacements
-  for historic reference variables.
-
-  \item Keep components with local state information re-entrant.
-  Instead of poking initial values into (private) global references, a
-  new state record can be created on each invocation, and passed
-  through any auxiliary functions of the component.  The state record
-  may well contain mutable references, without requiring any special
-  synchronizations, as long as each invocation gets its own copy.
-
-  \item Avoid raw output on @{text "stdout"} or @{text "stderr"}.  The
-  Poly/ML library is thread-safe for each individual output operation,
-  but the ordering of parallel invocations is arbitrary.  This means
-  raw output will appear on some system console with unpredictable
-  interleaving of atomic chunks.
-
-  Note that this does not affect regular message output channels
-  (\secref{sec:message-channels}).  An official message is associated
-  with the command transaction from where it originates, independently
-  of other transactions.  This means each running Isar command has
-  effectively its own set of message channels, and interleaving can
-  only happen when commands use parallelism internally (and only at
-  message boundaries).
-
-  \item Treat environment variables and the current working directory
-  of the running process as strictly read-only.
-
-  \item Restrict writing to the file-system to unique temporary files.
-  Isabelle already provides a temporary directory that is unique for
-  the running process, and there is a centralized source of unique
-  serial numbers in Isabelle/ML.  Thus temporary files that are passed
-  to to some external process will be always disjoint, and thus
-  thread-safe.
-
-  \end{itemize}
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML File.tmp_path: "Path.T -> Path.T"} \\
-  @{index_ML serial_string: "unit -> string"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML File.tmp_path}~@{text "path"} relocates the base
-  component of @{text "path"} into the unique temporary directory of
-  the running Isabelle/ML process.
-
-  \item @{ML serial_string}~@{text "()"} creates a new serial number
-  that is unique over the runtime of the Isabelle/ML process.
-
-  \end{description}
-*}
-
-text %mlex {* The following example shows how to create unique
-  temporary file names.
-*}
-
-ML {*
-  val tmp1 = File.tmp_path (Path.basic ("foo" ^ serial_string ()));
-  val tmp2 = File.tmp_path (Path.basic ("foo" ^ serial_string ()));
-  @{assert} (tmp1 <> tmp2);
-*}
-
-
-subsection {* Explicit synchronization *}
-
-text {* Isabelle/ML also provides some explicit synchronization
-  mechanisms, for the rare situations where mutable shared resources
-  are really required.  These are based on the synchronizations
-  primitives of Poly/ML, which have been adapted to the specific
-  assumptions of the concurrent Isabelle/ML environment.  User code
-  must not use the Poly/ML primitives directly!
-
-  \medskip The most basic synchronization concept is a single
-  \emph{critical section} (also called ``monitor'' in the literature).
-  A thread that enters the critical section prevents all other threads
-  from doing the same.  A thread that is already within the critical
-  section may re-enter it in an idempotent manner.
-
-  Such centralized locking is convenient, because it prevents
-  deadlocks by construction.
-
-  \medskip More fine-grained locking works via \emph{synchronized
-  variables}.  An explicit state component is associated with
-  mechanisms for locking and signaling.  There are operations to
-  await a condition, change the state, and signal the change to all
-  other waiting threads.
-
-  Here the synchronized access to the state variable is \emph{not}
-  re-entrant: direct or indirect nesting within the same thread will
-  cause a deadlock!
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML NAMED_CRITICAL: "string -> (unit -> 'a) -> 'a"} \\
-  @{index_ML CRITICAL: "(unit -> 'a) -> 'a"} \\
-  \end{mldecls}
-  \begin{mldecls}
-  @{index_ML_type "'a Synchronized.var"} \\
-  @{index_ML Synchronized.var: "string -> 'a -> 'a Synchronized.var"} \\
-  @{index_ML Synchronized.guarded_access: "'a Synchronized.var ->
-  ('a -> ('b * 'a) option) -> 'b"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML NAMED_CRITICAL}~@{text "name e"} evaluates @{text "e ()"}
-  within the central critical section of Isabelle/ML.  No other thread
-  may do so at the same time, but non-critical parallel execution will
-  continue.  The @{text "name"} argument is used for tracing and might
-  help to spot sources of congestion.
-
-  Entering the critical section without contention is very fast.  Each
-  thread should stay within the critical section only very briefly,
-  otherwise parallel performance may degrade.
-
-  \item @{ML CRITICAL} is the same as @{ML NAMED_CRITICAL} with empty
-  name argument.
-
-  \item Type @{ML_type "'a Synchronized.var"} represents synchronized
-  variables with state of type @{ML_type 'a}.
-
-  \item @{ML Synchronized.var}~@{text "name x"} creates a synchronized
-  variable that is initialized with value @{text "x"}.  The @{text
-  "name"} is used for tracing.
-
-  \item @{ML Synchronized.guarded_access}~@{text "var f"} lets the
-  function @{text "f"} operate within a critical section on the state
-  @{text "x"} as follows: if @{text "f x"} produces @{ML NONE}, it
-  continues to wait on the internal condition variable, expecting that
-  some other thread will eventually change the content in a suitable
-  manner; if @{text "f x"} produces @{ML SOME}~@{text "(y, x')"} it is
-  satisfied and assigns the new state value @{text "x'"}, broadcasts a
-  signal to all waiting threads on the associated condition variable,
-  and returns the result @{text "y"}.
-
-  \end{description}
-
-  There are some further variants of the @{ML
-  Synchronized.guarded_access} combinator, see @{file
-  "~~/src/Pure/Concurrent/synchronized.ML"} for details.
-*}
-
-text %mlex {* The following example implements a counter that produces
-  positive integers that are unique over the runtime of the Isabelle
-  process:
-*}
-
-ML {*
-  local
-    val counter = Synchronized.var "counter" 0;
-  in
-    fun next () =
-      Synchronized.guarded_access counter
-        (fn i =>
-          let val j = i + 1
-          in SOME (j, j) end);
-  end;
-*}
-
-ML {*
-  val a = next ();
-  val b = next ();
-  @{assert} (a <> b);
-*}
-
-text {* \medskip See @{file "~~/src/Pure/Concurrent/mailbox.ML"} how
-  to implement a mailbox as synchronized variable over a purely
-  functional queue. *}
-
-
-section {* Managed evaluation *}
-
-text {* Execution of Standard ML follows the model of strict
-  functional evaluation with optional exceptions.  Evaluation happens
-  whenever some function is applied to (sufficiently many)
-  arguments. The result is either an explicit value or an implicit
-  exception.
-
-  \emph{Managed evaluation} in Isabelle/ML organizes expressions and
-  results to control certain physical side-conditions, to say more
-  specifically when and how evaluation happens.  For example, the
-  Isabelle/ML library supports lazy evaluation with memoing, parallel
-  evaluation via futures, asynchronous evaluation via promises,
-  evaluation with time limit etc.
-
-  \medskip An \emph{unevaluated expression} is represented either as
-  unit abstraction @{verbatim "fn () => a"} of type
-  @{verbatim "unit -> 'a"} or as regular function
-  @{verbatim "fn a => b"} of type @{verbatim "'a -> 'b"}.  Both forms
-  occur routinely, and special care is required to tell them apart ---
-  the static type-system of SML is only of limited help here.
-
-  The first form is more intuitive: some combinator @{text "(unit ->
-  'a) -> 'a"} applies the given function to @{text "()"} to initiate
-  the postponed evaluation process.  The second form is more flexible:
-  some combinator @{text "('a -> 'b) -> 'a -> 'b"} acts like a
-  modified form of function application; several such combinators may
-  be cascaded to modify a given function, before it is ultimately
-  applied to some argument.
-
-  \medskip \emph{Reified results} make the disjoint sum of regular
-  values versions exceptional situations explicit as ML datatype:
-  @{text "'a result = Res of 'a | Exn of exn"}.  This is typically
-  used for administrative purposes, to store the overall outcome of an
-  evaluation process.
-
-  \emph{Parallel exceptions} aggregate reified results, such that
-  multiple exceptions are digested as a collection in canonical form
-  that identifies exceptions according to their original occurrence.
-  This is particular important for parallel evaluation via futures
-  \secref{sec:futures}, which are organized as acyclic graph of
-  evaluations that depend on other evaluations: exceptions stemming
-  from shared sub-graphs are exposed exactly once and in the order of
-  their original occurrence (e.g.\ when printed at the toplevel).
-  Interrupt counts as neutral element here: it is treated as minimal
-  information about some canceled evaluation process, and is absorbed
-  by the presence of regular program exceptions.  *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type "'a Exn.result"} \\
-  @{index_ML Exn.capture: "('a -> 'b) -> 'a -> 'b Exn.result"} \\
-  @{index_ML Exn.interruptible_capture: "('a -> 'b) -> 'a -> 'b Exn.result"} \\
-  @{index_ML Exn.release: "'a Exn.result -> 'a"} \\
-  @{index_ML Par_Exn.release_all: "'a Exn.result list -> 'a list"} \\
-  @{index_ML Par_Exn.release_first: "'a Exn.result list -> 'a list"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type "'a Exn.result"} represents the disjoint sum of
-  ML results explicitly, with constructor @{ML Exn.Res} for regular
-  values and @{ML "Exn.Exn"} for exceptions.
-
-  \item @{ML Exn.capture}~@{text "f x"} manages the evaluation of
-  @{text "f x"} such that exceptions are made explicit as @{ML
-  "Exn.Exn"}.  Note that this includes physical interrupts (see also
-  \secref{sec:exceptions}), so the same precautions apply to user
-  code: interrupts must not be absorbed accidentally!
-
-  \item @{ML Exn.interruptible_capture} is similar to @{ML
-  Exn.capture}, but interrupts are immediately re-raised as required
-  for user code.
-
-  \item @{ML Exn.release}~@{text "result"} releases the original
-  runtime result, exposing its regular value or raising the reified
-  exception.
-
-  \item @{ML Par_Exn.release_all}~@{text "results"} combines results
-  that were produced independently (e.g.\ by parallel evaluation).  If
-  all results are regular values, that list is returned.  Otherwise,
-  the collection of all exceptions is raised, wrapped-up as collective
-  parallel exception.  Note that the latter prevents access to
-  individual exceptions by conventional @{verbatim "handle"} of SML.
-
-  \item @{ML Par_Exn.release_first} is similar to @{ML
-  Par_Exn.release_all}, but only the first exception that has occurred
-  in the original evaluation process is raised again, the others are
-  ignored.  That single exception may get handled by conventional
-  means in SML.
-
-  \end{description}
-*}
-
-
-subsection {* Parallel skeletons \label{sec:parlist} *}
-
-text {*
-  Algorithmic skeletons are combinators that operate on lists in
-  parallel, in the manner of well-known @{text map}, @{text exists},
-  @{text forall} etc.  Management of futures (\secref{sec:futures})
-  and their results as reified exceptions is wrapped up into simple
-  programming interfaces that resemble the sequential versions.
-
-  What remains is the application-specific problem to present
-  expressions with suitable \emph{granularity}: each list element
-  corresponds to one evaluation task.  If the granularity is too
-  coarse, the available CPUs are not saturated.  If it is too
-  fine-grained, CPU cycles are wasted due to the overhead of
-  organizing parallel processing.  In the worst case, parallel
-  performance will be less than the sequential counterpart!
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Par_List.map: "('a -> 'b) -> 'a list -> 'b list"} \\
-  @{index_ML Par_List.get_some: "('a -> 'b option) -> 'a list -> 'b option"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML Par_List.map}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} is like @{ML
-  "map"}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"}, but the evaluation of @{text "f x\<^sub>i"}
-  for @{text "i = 1, \<dots>, n"} is performed in parallel.
-
-  An exception in any @{text "f x\<^sub>i"} cancels the overall evaluation
-  process.  The final result is produced via @{ML
-  Par_Exn.release_first} as explained above, which means the first
-  program exception that happened to occur in the parallel evaluation
-  is propagated, and all other failures are ignored.
-
-  \item @{ML Par_List.get_some}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} produces some
-  @{text "f x\<^sub>i"} that is of the form @{text "SOME y\<^sub>i"}, if that
-  exists, otherwise @{text "NONE"}.  Thus it is similar to @{ML
-  Library.get_first}, but subject to a non-deterministic parallel
-  choice process.  The first successful result cancels the overall
-  evaluation process; other exceptions are propagated as for @{ML
-  Par_List.map}.
-
-  This generic parallel choice combinator is the basis for derived
-  forms, such as @{ML Par_List.find_some}, @{ML Par_List.exists}, @{ML
-  Par_List.forall}.
-
-  \end{description}
-*}
-
-text %mlex {* Subsequently, the Ackermann function is evaluated in
-  parallel for some ranges of arguments. *}
-
-ML_val {*
-  fun ackermann 0 n = n + 1
-    | ackermann m 0 = ackermann (m - 1) 1
-    | ackermann m n = ackermann (m - 1) (ackermann m (n - 1));
-
-  Par_List.map (ackermann 2) (500 upto 1000);
-  Par_List.map (ackermann 3) (5 upto 10);
-*}
-
-
-subsection {* Lazy evaluation *}
-
-text {*
-  %FIXME
-
-  See also @{file "~~/src/Pure/Concurrent/lazy.ML"}.
-*}
-
-
-subsection {* Future values \label{sec:futures} *}
-
-text {*
-  %FIXME
-
-  See also @{file "~~/src/Pure/Concurrent/future.ML"}.
-*}
-
-
-end
--- a/src/Doc/IsarImplementation/Prelim.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1069 +0,0 @@
-theory Prelim
-imports Base
-begin
-
-chapter {* Preliminaries *}
-
-section {* Contexts \label{sec:context} *}
-
-text {*
-  A logical context represents the background that is required for
-  formulating statements and composing proofs.  It acts as a medium to
-  produce formal content, depending on earlier material (declarations,
-  results etc.).
-
-  For example, derivations within the Isabelle/Pure logic can be
-  described as a judgment @{text "\<Gamma> \<turnstile>\<^sub>\<Theta> \<phi>"}, which means that a
-  proposition @{text "\<phi>"} is derivable from hypotheses @{text "\<Gamma>"}
-  within the theory @{text "\<Theta>"}.  There are logical reasons for
-  keeping @{text "\<Theta>"} and @{text "\<Gamma>"} separate: theories can be
-  liberal about supporting type constructors and schematic
-  polymorphism of constants and axioms, while the inner calculus of
-  @{text "\<Gamma> \<turnstile> \<phi>"} is strictly limited to Simple Type Theory (with
-  fixed type variables in the assumptions).
-
-  \medskip Contexts and derivations are linked by the following key
-  principles:
-
-  \begin{itemize}
-
-  \item Transfer: monotonicity of derivations admits results to be
-  transferred into a \emph{larger} context, i.e.\ @{text "\<Gamma> \<turnstile>\<^sub>\<Theta>
-  \<phi>"} implies @{text "\<Gamma>' \<turnstile>\<^sub>\<Theta>\<^sub>' \<phi>"} for contexts @{text "\<Theta>'
-  \<supseteq> \<Theta>"} and @{text "\<Gamma>' \<supseteq> \<Gamma>"}.
-
-  \item Export: discharge of hypotheses admits results to be exported
-  into a \emph{smaller} context, i.e.\ @{text "\<Gamma>' \<turnstile>\<^sub>\<Theta> \<phi>"}
-  implies @{text "\<Gamma> \<turnstile>\<^sub>\<Theta> \<Delta> \<Longrightarrow> \<phi>"} where @{text "\<Gamma>' \<supseteq> \<Gamma>"} and
-  @{text "\<Delta> = \<Gamma>' - \<Gamma>"}.  Note that @{text "\<Theta>"} remains unchanged here,
-  only the @{text "\<Gamma>"} part is affected.
-
-  \end{itemize}
-
-  \medskip By modeling the main characteristics of the primitive
-  @{text "\<Theta>"} and @{text "\<Gamma>"} above, and abstracting over any
-  particular logical content, we arrive at the fundamental notions of
-  \emph{theory context} and \emph{proof context} in Isabelle/Isar.
-  These implement a certain policy to manage arbitrary \emph{context
-  data}.  There is a strongly-typed mechanism to declare new kinds of
-  data at compile time.
-
-  The internal bootstrap process of Isabelle/Pure eventually reaches a
-  stage where certain data slots provide the logical content of @{text
-  "\<Theta>"} and @{text "\<Gamma>"} sketched above, but this does not stop there!
-  Various additional data slots support all kinds of mechanisms that
-  are not necessarily part of the core logic.
-
-  For example, there would be data for canonical introduction and
-  elimination rules for arbitrary operators (depending on the
-  object-logic and application), which enables users to perform
-  standard proof steps implicitly (cf.\ the @{text "rule"} method
-  \cite{isabelle-isar-ref}).
-
-  \medskip Thus Isabelle/Isar is able to bring forth more and more
-  concepts successively.  In particular, an object-logic like
-  Isabelle/HOL continues the Isabelle/Pure setup by adding specific
-  components for automated reasoning (classical reasoner, tableau
-  prover, structured induction etc.) and derived specification
-  mechanisms (inductive predicates, recursive functions etc.).  All of
-  this is ultimately based on the generic data management by theory
-  and proof contexts introduced here.
-*}
-
-
-subsection {* Theory context \label{sec:context-theory} *}
-
-text {* A \emph{theory} is a data container with explicit name and
-  unique identifier.  Theories are related by a (nominal) sub-theory
-  relation, which corresponds to the dependency graph of the original
-  construction; each theory is derived from a certain sub-graph of
-  ancestor theories.  To this end, the system maintains a set of
-  symbolic ``identification stamps'' within each theory.
-
-  The @{text "merge"} operation produces the least upper bound of two
-  theories, which actually degenerates into absorption of one theory
-  into the other (according to the nominal sub-theory relation).
-
-  The @{text "begin"} operation starts a new theory by importing
-  several parent theories and entering a special mode of nameless
-  incremental updates, until the final @{text "end"} operation is
-  performed.
-
-  \medskip The example in \figref{fig:ex-theory} below shows a theory
-  graph derived from @{text "Pure"}, with theory @{text "Length"}
-  importing @{text "Nat"} and @{text "List"}.  The body of @{text
-  "Length"} consists of a sequence of updates, resulting in locally a
-  linear sub-theory relation for each intermediate step.
-
-  \begin{figure}[htb]
-  \begin{center}
-  \begin{tabular}{rcccl}
-        &            & @{text "Pure"} \\
-        &            & @{text "\<down>"} \\
-        &            & @{text "FOL"} \\
-        & $\swarrow$ &              & $\searrow$ & \\
-  @{text "Nat"} &    &              &            & @{text "List"} \\
-        & $\searrow$ &              & $\swarrow$ \\
-        &            & @{text "Length"} \\
-        &            & \multicolumn{3}{l}{~~@{keyword "begin"}} \\
-        &            & $\vdots$~~ \\
-        &            & \multicolumn{3}{l}{~~@{command "end"}} \\
-  \end{tabular}
-  \caption{A theory definition depending on ancestors}\label{fig:ex-theory}
-  \end{center}
-  \end{figure}
-
-  \medskip Derived formal entities may retain a reference to the
-  background theory in order to indicate the formal context from which
-  they were produced.  This provides an immutable certificate of the
-  background theory.  *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type theory} \\
-  @{index_ML Theory.eq_thy: "theory * theory -> bool"} \\
-  @{index_ML Theory.subthy: "theory * theory -> bool"} \\
-  @{index_ML Theory.merge: "theory * theory -> theory"} \\
-  @{index_ML Theory.begin_theory: "string * Position.T -> theory list -> theory"} \\
-  @{index_ML Theory.parents_of: "theory -> theory list"} \\
-  @{index_ML Theory.ancestors_of: "theory -> theory list"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type theory} represents theory contexts.
-
-  \item @{ML "Theory.eq_thy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} check strict
-  identity of two theories.
-
-  \item @{ML "Theory.subthy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} compares theories
-  according to the intrinsic graph structure of the construction.
-  This sub-theory relation is a nominal approximation of inclusion
-  (@{text "\<subseteq>"}) of the corresponding content (according to the
-  semantics of the ML modules that implement the data).
-
-  \item @{ML "Theory.merge"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} absorbs one theory
-  into the other.  This version of ad-hoc theory merge fails for
-  unrelated theories!
-
-  \item @{ML "Theory.begin_theory"}~@{text "name parents"} constructs
-  a new theory based on the given parents.  This ML function is
-  normally not invoked directly.
-
-  \item @{ML "Theory.parents_of"}~@{text "thy"} returns the direct
-  ancestors of @{text thy}.
-
-  \item @{ML "Theory.ancestors_of"}~@{text "thy"} returns all
-  ancestors of @{text thy} (not including @{text thy} itself).
-
-  \end{description}
-*}
-
-text %mlantiq {*
-  \begin{matharray}{rcl}
-  @{ML_antiquotation_def "theory"} & : & @{text ML_antiquotation} \\
-  @{ML_antiquotation_def "theory_context"} & : & @{text ML_antiquotation} \\
-  \end{matharray}
-
-  @{rail \<open>
-  @@{ML_antiquotation theory} nameref?
-  ;
-  @@{ML_antiquotation theory_context} nameref
-  \<close>}
-
-  \begin{description}
-
-  \item @{text "@{theory}"} refers to the background theory of the
-  current context --- as abstract value.
-
-  \item @{text "@{theory A}"} refers to an explicitly named ancestor
-  theory @{text "A"} of the background theory of the current context
-  --- as abstract value.
-
-  \item @{text "@{theory_context A}"} is similar to @{text "@{theory
-  A}"}, but presents the result as initial @{ML_type Proof.context}
-  (see also @{ML Proof_Context.init_global}).
-
-  \end{description}
-*}
-
-
-subsection {* Proof context \label{sec:context-proof} *}
-
-text {* A proof context is a container for pure data that refers to
-  the theory from which it is derived. The @{text "init"} operation
-  creates a proof context from a given theory. There is an explicit
-  @{text "transfer"} operation to force resynchronization with updates
-  to the background theory -- this is rarely required in practice.
-
-  Entities derived in a proof context need to record logical
-  requirements explicitly, since there is no separate context
-  identification or symbolic inclusion as for theories.  For example,
-  hypotheses used in primitive derivations (cf.\ \secref{sec:thms})
-  are recorded separately within the sequent @{text "\<Gamma> \<turnstile> \<phi>"}, just to
-  make double sure.  Results could still leak into an alien proof
-  context due to programming errors, but Isabelle/Isar includes some
-  extra validity checks in critical positions, notably at the end of a
-  sub-proof.
-
-  Proof contexts may be manipulated arbitrarily, although the common
-  discipline is to follow block structure as a mental model: a given
-  context is extended consecutively, and results are exported back
-  into the original context.  Note that an Isar proof state models
-  block-structured reasoning explicitly, using a stack of proof
-  contexts internally.  For various technical reasons, the background
-  theory of an Isar proof state must not be changed while the proof is
-  still under construction!
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type Proof.context} \\
-  @{index_ML Proof_Context.init_global: "theory -> Proof.context"} \\
-  @{index_ML Proof_Context.theory_of: "Proof.context -> theory"} \\
-  @{index_ML Proof_Context.transfer: "theory -> Proof.context -> Proof.context"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type Proof.context} represents proof contexts.
-
-  \item @{ML Proof_Context.init_global}~@{text "thy"} produces a proof
-  context derived from @{text "thy"}, initializing all data.
-
-  \item @{ML Proof_Context.theory_of}~@{text "ctxt"} selects the
-  background theory from @{text "ctxt"}.
-
-  \item @{ML Proof_Context.transfer}~@{text "thy ctxt"} promotes the
-  background theory of @{text "ctxt"} to the super theory @{text
-  "thy"}.
-
-  \end{description}
-*}
-
-text %mlantiq {*
-  \begin{matharray}{rcl}
-  @{ML_antiquotation_def "context"} & : & @{text ML_antiquotation} \\
-  \end{matharray}
-
-  \begin{description}
-
-  \item @{text "@{context}"} refers to \emph{the} context at
-  compile-time --- as abstract value.  Independently of (local) theory
-  or proof mode, this always produces a meaningful result.
-
-  This is probably the most common antiquotation in interactive
-  experimentation with ML inside Isar.
-
-  \end{description}
-*}
-
-
-subsection {* Generic contexts \label{sec:generic-context} *}
-
-text {*
-  A generic context is the disjoint sum of either a theory or proof
-  context.  Occasionally, this enables uniform treatment of generic
-  context data, typically extra-logical information.  Operations on
-  generic contexts include the usual injections, partial selections,
-  and combinators for lifting operations on either component of the
-  disjoint sum.
-
-  Moreover, there are total operations @{text "theory_of"} and @{text
-  "proof_of"} to convert a generic context into either kind: a theory
-  can always be selected from the sum, while a proof context might
-  have to be constructed by an ad-hoc @{text "init"} operation, which
-  incurs a small runtime overhead.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type Context.generic} \\
-  @{index_ML Context.theory_of: "Context.generic -> theory"} \\
-  @{index_ML Context.proof_of: "Context.generic -> Proof.context"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type Context.generic} is the direct sum of @{ML_type
-  "theory"} and @{ML_type "Proof.context"}, with the datatype
-  constructors @{ML "Context.Theory"} and @{ML "Context.Proof"}.
-
-  \item @{ML Context.theory_of}~@{text "context"} always produces a
-  theory from the generic @{text "context"}, using @{ML
-  "Proof_Context.theory_of"} as required.
-
-  \item @{ML Context.proof_of}~@{text "context"} always produces a
-  proof context from the generic @{text "context"}, using @{ML
-  "Proof_Context.init_global"} as required (note that this re-initializes the
-  context data with each invocation).
-
-  \end{description}
-*}
-
-
-subsection {* Context data \label{sec:context-data} *}
-
-text {* The main purpose of theory and proof contexts is to manage
-  arbitrary (pure) data.  New data types can be declared incrementally
-  at compile time.  There are separate declaration mechanisms for any
-  of the three kinds of contexts: theory, proof, generic.
-
-  \paragraph{Theory data} declarations need to implement the following
-  SML signature:
-
-  \medskip
-  \begin{tabular}{ll}
-  @{text "\<type> T"} & representing type \\
-  @{text "\<val> empty: T"} & empty default value \\
-  @{text "\<val> extend: T \<rightarrow> T"} & re-initialize on import \\
-  @{text "\<val> merge: T \<times> T \<rightarrow> T"} & join on import \\
-  \end{tabular}
-  \medskip
-
-  The @{text "empty"} value acts as initial default for \emph{any}
-  theory that does not declare actual data content; @{text "extend"}
-  is acts like a unitary version of @{text "merge"}.
-
-  Implementing @{text "merge"} can be tricky.  The general idea is
-  that @{text "merge (data\<^sub>1, data\<^sub>2)"} inserts those parts of @{text
-  "data\<^sub>2"} into @{text "data\<^sub>1"} that are not yet present, while
-  keeping the general order of things.  The @{ML Library.merge}
-  function on plain lists may serve as canonical template.
-
-  Particularly note that shared parts of the data must not be
-  duplicated by naive concatenation, or a theory graph that is like a
-  chain of diamonds would cause an exponential blowup!
-
-  \paragraph{Proof context data} declarations need to implement the
-  following SML signature:
-
-  \medskip
-  \begin{tabular}{ll}
-  @{text "\<type> T"} & representing type \\
-  @{text "\<val> init: theory \<rightarrow> T"} & produce initial value \\
-  \end{tabular}
-  \medskip
-
-  The @{text "init"} operation is supposed to produce a pure value
-  from the given background theory and should be somehow
-  ``immediate''.  Whenever a proof context is initialized, which
-  happens frequently, the the system invokes the @{text "init"}
-  operation of \emph{all} theory data slots ever declared.  This also
-  means that one needs to be economic about the total number of proof
-  data declarations in the system, i.e.\ each ML module should declare
-  at most one, sometimes two data slots for its internal use.
-  Repeated data declarations to simulate a record type should be
-  avoided!
-
-  \paragraph{Generic data} provides a hybrid interface for both theory
-  and proof data.  The @{text "init"} operation for proof contexts is
-  predefined to select the current data value from the background
-  theory.
-
-  \bigskip Any of the above data declarations over type @{text "T"}
-  result in an ML structure with the following signature:
-
-  \medskip
-  \begin{tabular}{ll}
-  @{text "get: context \<rightarrow> T"} \\
-  @{text "put: T \<rightarrow> context \<rightarrow> context"} \\
-  @{text "map: (T \<rightarrow> T) \<rightarrow> context \<rightarrow> context"} \\
-  \end{tabular}
-  \medskip
-
-  These other operations provide exclusive access for the particular
-  kind of context (theory, proof, or generic context).  This interface
-  observes the ML discipline for types and scopes: there is no other
-  way to access the corresponding data slot of a context.  By keeping
-  these operations private, an Isabelle/ML module may maintain
-  abstract values authentically.  *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_functor Theory_Data} \\
-  @{index_ML_functor Proof_Data} \\
-  @{index_ML_functor Generic_Data} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML_functor Theory_Data}@{text "(spec)"} declares data for
-  type @{ML_type theory} according to the specification provided as
-  argument structure.  The resulting structure provides data init and
-  access operations as described above.
-
-  \item @{ML_functor Proof_Data}@{text "(spec)"} is analogous to
-  @{ML_functor Theory_Data} for type @{ML_type Proof.context}.
-
-  \item @{ML_functor Generic_Data}@{text "(spec)"} is analogous to
-  @{ML_functor Theory_Data} for type @{ML_type Context.generic}.
-
-  \end{description}
-*}
-
-text %mlex {*
-  The following artificial example demonstrates theory
-  data: we maintain a set of terms that are supposed to be wellformed
-  wrt.\ the enclosing theory.  The public interface is as follows:
-*}
-
-ML {*
-  signature WELLFORMED_TERMS =
-  sig
-    val get: theory -> term list
-    val add: term -> theory -> theory
-  end;
-*}
-
-text {* The implementation uses private theory data internally, and
-  only exposes an operation that involves explicit argument checking
-  wrt.\ the given theory. *}
-
-ML {*
-  structure Wellformed_Terms: WELLFORMED_TERMS =
-  struct
-
-  structure Terms = Theory_Data
-  (
-    type T = term Ord_List.T;
-    val empty = [];
-    val extend = I;
-    fun merge (ts1, ts2) =
-      Ord_List.union Term_Ord.fast_term_ord ts1 ts2;
-  );
-
-  val get = Terms.get;
-
-  fun add raw_t thy =
-    let
-      val t = Sign.cert_term thy raw_t;
-    in
-      Terms.map (Ord_List.insert Term_Ord.fast_term_ord t) thy
-    end;
-
-  end;
-*}
-
-text {* Type @{ML_type "term Ord_List.T"} is used for reasonably
-  efficient representation of a set of terms: all operations are
-  linear in the number of stored elements.  Here we assume that users
-  of this module do not care about the declaration order, since that
-  data structure forces its own arrangement of elements.
-
-  Observe how the @{ML_text merge} operation joins the data slots of
-  the two constituents: @{ML Ord_List.union} prevents duplication of
-  common data from different branches, thus avoiding the danger of
-  exponential blowup.  Plain list append etc.\ must never be used for
-  theory data merges!
-
-  \medskip Our intended invariant is achieved as follows:
-  \begin{enumerate}
-
-  \item @{ML Wellformed_Terms.add} only admits terms that have passed
-  the @{ML Sign.cert_term} check of the given theory at that point.
-
-  \item Wellformedness in the sense of @{ML Sign.cert_term} is
-  monotonic wrt.\ the sub-theory relation.  So our data can move
-  upwards in the hierarchy (via extension or merges), and maintain
-  wellformedness without further checks.
-
-  \end{enumerate}
-
-  Note that all basic operations of the inference kernel (which
-  includes @{ML Sign.cert_term}) observe this monotonicity principle,
-  but other user-space tools don't.  For example, fully-featured
-  type-inference via @{ML Syntax.check_term} (cf.\
-  \secref{sec:term-check}) is not necessarily monotonic wrt.\ the
-  background theory, since constraints of term constants can be
-  modified by later declarations, for example.
-
-  In most cases, user-space context data does not have to take such
-  invariants too seriously.  The situation is different in the
-  implementation of the inference kernel itself, which uses the very
-  same data mechanisms for types, constants, axioms etc.
-*}
-
-
-subsection {* Configuration options \label{sec:config-options} *}
-
-text {* A \emph{configuration option} is a named optional value of
-  some basic type (Boolean, integer, string) that is stored in the
-  context.  It is a simple application of general context data
-  (\secref{sec:context-data}) that is sufficiently common to justify
-  customized setup, which includes some concrete declarations for
-  end-users using existing notation for attributes (cf.\
-  \secref{sec:attributes}).
-
-  For example, the predefined configuration option @{attribute
-  show_types} controls output of explicit type constraints for
-  variables in printed terms (cf.\ \secref{sec:read-print}).  Its
-  value can be modified within Isar text like this:
-*}
-
-declare [[show_types = false]]
-  -- {* declaration within (local) theory context *}
-
-notepad
-begin
-  note [[show_types = true]]
-    -- {* declaration within proof (forward mode) *}
-  term x
-
-  have "x = x"
-    using [[show_types = false]]
-      -- {* declaration within proof (backward mode) *}
-    ..
-end
-
-text {* Configuration options that are not set explicitly hold a
-  default value that can depend on the application context.  This
-  allows to retrieve the value from another slot within the context,
-  or fall back on a global preference mechanism, for example.
-
-  The operations to declare configuration options and get/map their
-  values are modeled as direct replacements for historic global
-  references, only that the context is made explicit.  This allows
-  easy configuration of tools, without relying on the execution order
-  as required for old-style mutable references.  *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Config.get: "Proof.context -> 'a Config.T -> 'a"} \\
-  @{index_ML Config.map: "'a Config.T -> ('a -> 'a) -> Proof.context -> Proof.context"} \\
-  @{index_ML Attrib.setup_config_bool: "binding -> (Context.generic -> bool) ->
-  bool Config.T"} \\
-  @{index_ML Attrib.setup_config_int: "binding -> (Context.generic -> int) ->
-  int Config.T"} \\
-  @{index_ML Attrib.setup_config_real: "binding -> (Context.generic -> real) ->
-  real Config.T"} \\
-  @{index_ML Attrib.setup_config_string: "binding -> (Context.generic -> string) ->
-  string Config.T"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML Config.get}~@{text "ctxt config"} gets the value of
-  @{text "config"} in the given context.
-
-  \item @{ML Config.map}~@{text "config f ctxt"} updates the context
-  by updating the value of @{text "config"}.
-
-  \item @{text "config ="}~@{ML Attrib.setup_config_bool}~@{text "name
-  default"} creates a named configuration option of type @{ML_type
-  bool}, with the given @{text "default"} depending on the application
-  context.  The resulting @{text "config"} can be used to get/map its
-  value in a given context.  There is an implicit update of the
-  background theory that registers the option as attribute with some
-  concrete syntax.
-
-  \item @{ML Attrib.config_int}, @{ML Attrib.config_real}, and @{ML
-  Attrib.config_string} work like @{ML Attrib.config_bool}, but for
-  types @{ML_type int} and @{ML_type string}, respectively.
-
-  \end{description}
-*}
-
-text %mlex {* The following example shows how to declare and use a
-  Boolean configuration option called @{text "my_flag"} with constant
-  default value @{ML false}.  *}
-
-ML {*
-  val my_flag =
-    Attrib.setup_config_bool @{binding my_flag} (K false)
-*}
-
-text {* Now the user can refer to @{attribute my_flag} in
-  declarations, while ML tools can retrieve the current value from the
-  context via @{ML Config.get}.  *}
-
-ML_val {* @{assert} (Config.get @{context} my_flag = false) *}
-
-declare [[my_flag = true]]
-
-ML_val {* @{assert} (Config.get @{context} my_flag = true) *}
-
-notepad
-begin
-  {
-    note [[my_flag = false]]
-    ML_val {* @{assert} (Config.get @{context} my_flag = false) *}
-  }
-  ML_val {* @{assert} (Config.get @{context} my_flag = true) *}
-end
-
-text {* Here is another example involving ML type @{ML_type real}
-  (floating-point numbers). *}
-
-ML {*
-  val airspeed_velocity =
-    Attrib.setup_config_real @{binding airspeed_velocity} (K 0.0)
-*}
-
-declare [[airspeed_velocity = 10]]
-declare [[airspeed_velocity = 9.9]]
-
-
-section {* Names \label{sec:names} *}
-
-text {* In principle, a name is just a string, but there are various
-  conventions for representing additional structure.  For example,
-  ``@{text "Foo.bar.baz"}'' is considered as a long name consisting of
-  qualifier @{text "Foo.bar"} and base name @{text "baz"}.  The
-  individual constituents of a name may have further substructure,
-  e.g.\ the string ``\verb,\,\verb,<alpha>,'' encodes as a single
-  symbol (\secref{sec:symbols}).
-
-  \medskip Subsequently, we shall introduce specific categories of
-  names.  Roughly speaking these correspond to logical entities as
-  follows:
-  \begin{itemize}
-
-  \item Basic names (\secref{sec:basic-name}): free and bound
-  variables.
-
-  \item Indexed names (\secref{sec:indexname}): schematic variables.
-
-  \item Long names (\secref{sec:long-name}): constants of any kind
-  (type constructors, term constants, other concepts defined in user
-  space).  Such entities are typically managed via name spaces
-  (\secref{sec:name-space}).
-
-  \end{itemize}
-*}
-
-
-subsection {* Basic names \label{sec:basic-name} *}
-
-text {*
-  A \emph{basic name} essentially consists of a single Isabelle
-  identifier.  There are conventions to mark separate classes of basic
-  names, by attaching a suffix of underscores: one underscore means
-  \emph{internal name}, two underscores means \emph{Skolem name},
-  three underscores means \emph{internal Skolem name}.
-
-  For example, the basic name @{text "foo"} has the internal version
-  @{text "foo_"}, with Skolem versions @{text "foo__"} and @{text
-  "foo___"}, respectively.
-
-  These special versions provide copies of the basic name space, apart
-  from anything that normally appears in the user text.  For example,
-  system generated variables in Isar proof contexts are usually marked
-  as internal, which prevents mysterious names like @{text "xaa"} to
-  appear in human-readable text.
-
-  \medskip Manipulating binding scopes often requires on-the-fly
-  renamings.  A \emph{name context} contains a collection of already
-  used names.  The @{text "declare"} operation adds names to the
-  context.
-
-  The @{text "invents"} operation derives a number of fresh names from
-  a given starting point.  For example, the first three names derived
-  from @{text "a"} are @{text "a"}, @{text "b"}, @{text "c"}.
-
-  The @{text "variants"} operation produces fresh names by
-  incrementing tentative names as base-26 numbers (with digits @{text
-  "a..z"}) until all clashes are resolved.  For example, name @{text
-  "foo"} results in variants @{text "fooa"}, @{text "foob"}, @{text
-  "fooc"}, \dots, @{text "fooaa"}, @{text "fooab"} etc.; each renaming
-  step picks the next unused variant from this sequence.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Name.internal: "string -> string"} \\
-  @{index_ML Name.skolem: "string -> string"} \\
-  \end{mldecls}
-  \begin{mldecls}
-  @{index_ML_type Name.context} \\
-  @{index_ML Name.context: Name.context} \\
-  @{index_ML Name.declare: "string -> Name.context -> Name.context"} \\
-  @{index_ML Name.invent: "Name.context -> string -> int -> string list"} \\
-  @{index_ML Name.variant: "string -> Name.context -> string * Name.context"} \\
-  \end{mldecls}
-  \begin{mldecls}
-  @{index_ML Variable.names_of: "Proof.context -> Name.context"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML Name.internal}~@{text "name"} produces an internal name
-  by adding one underscore.
-
-  \item @{ML Name.skolem}~@{text "name"} produces a Skolem name by
-  adding two underscores.
-
-  \item Type @{ML_type Name.context} represents the context of already
-  used names; the initial value is @{ML "Name.context"}.
-
-  \item @{ML Name.declare}~@{text "name"} enters a used name into the
-  context.
-
-  \item @{ML Name.invent}~@{text "context name n"} produces @{text
-  "n"} fresh names derived from @{text "name"}.
-
-  \item @{ML Name.variant}~@{text "name context"} produces a fresh
-  variant of @{text "name"}; the result is declared to the context.
-
-  \item @{ML Variable.names_of}~@{text "ctxt"} retrieves the context
-  of declared type and term variable names.  Projecting a proof
-  context down to a primitive name context is occasionally useful when
-  invoking lower-level operations.  Regular management of ``fresh
-  variables'' is done by suitable operations of structure @{ML_structure
-  Variable}, which is also able to provide an official status of
-  ``locally fixed variable'' within the logical environment (cf.\
-  \secref{sec:variables}).
-
-  \end{description}
-*}
-
-text %mlex {* The following simple examples demonstrate how to produce
-  fresh names from the initial @{ML Name.context}. *}
-
-ML {*
-  val list1 = Name.invent Name.context "a" 5;
-  @{assert} (list1 = ["a", "b", "c", "d", "e"]);
-
-  val list2 =
-    #1 (fold_map Name.variant ["x", "x", "a", "a", "'a", "'a"] Name.context);
-  @{assert} (list2 = ["x", "xa", "a", "aa", "'a", "'aa"]);
-*}
-
-text {* \medskip The same works relatively to the formal context as
-  follows. *}
-
-locale ex = fixes a b c :: 'a
-begin
-
-ML {*
-  val names = Variable.names_of @{context};
-
-  val list1 = Name.invent names "a" 5;
-  @{assert} (list1 = ["d", "e", "f", "g", "h"]);
-
-  val list2 =
-    #1 (fold_map Name.variant ["x", "x", "a", "a", "'a", "'a"] names);
-  @{assert} (list2 = ["x", "xa", "aa", "ab", "'aa", "'ab"]);
-*}
-
-end
-
-
-subsection {* Indexed names \label{sec:indexname} *}
-
-text {*
-  An \emph{indexed name} (or @{text "indexname"}) is a pair of a basic
-  name and a natural number.  This representation allows efficient
-  renaming by incrementing the second component only.  The canonical
-  way to rename two collections of indexnames apart from each other is
-  this: determine the maximum index @{text "maxidx"} of the first
-  collection, then increment all indexes of the second collection by
-  @{text "maxidx + 1"}; the maximum index of an empty collection is
-  @{text "-1"}.
-
-  Occasionally, basic names are injected into the same pair type of
-  indexed names: then @{text "(x, -1)"} is used to encode the basic
-  name @{text "x"}.
-
-  \medskip Isabelle syntax observes the following rules for
-  representing an indexname @{text "(x, i)"} as a packed string:
-
-  \begin{itemize}
-
-  \item @{text "?x"} if @{text "x"} does not end with a digit and @{text "i = 0"},
-
-  \item @{text "?xi"} if @{text "x"} does not end with a digit,
-
-  \item @{text "?x.i"} otherwise.
-
-  \end{itemize}
-
-  Indexnames may acquire large index numbers after several maxidx
-  shifts have been applied.  Results are usually normalized towards
-  @{text "0"} at certain checkpoints, notably at the end of a proof.
-  This works by producing variants of the corresponding basic name
-  components.  For example, the collection @{text "?x1, ?x7, ?x42"}
-  becomes @{text "?x, ?xa, ?xb"}.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type indexname: "string * int"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type indexname} represents indexed names.  This is
-  an abbreviation for @{ML_type "string * int"}.  The second component
-  is usually non-negative, except for situations where @{text "(x,
-  -1)"} is used to inject basic names into this type.  Other negative
-  indexes should not be used.
-
-  \end{description}
-*}
-
-
-subsection {* Long names \label{sec:long-name} *}
-
-text {* A \emph{long name} consists of a sequence of non-empty name
-  components.  The packed representation uses a dot as separator, as
-  in ``@{text "A.b.c"}''.  The last component is called \emph{base
-  name}, the remaining prefix is called \emph{qualifier} (which may be
-  empty).  The qualifier can be understood as the access path to the
-  named entity while passing through some nested block-structure,
-  although our free-form long names do not really enforce any strict
-  discipline.
-
-  For example, an item named ``@{text "A.b.c"}'' may be understood as
-  a local entity @{text "c"}, within a local structure @{text "b"},
-  within a global structure @{text "A"}.  In practice, long names
-  usually represent 1--3 levels of qualification.  User ML code should
-  not make any assumptions about the particular structure of long
-  names!
-
-  The empty name is commonly used as an indication of unnamed
-  entities, or entities that are not entered into the corresponding
-  name space, whenever this makes any sense.  The basic operations on
-  long names map empty names again to empty names.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Long_Name.base_name: "string -> string"} \\
-  @{index_ML Long_Name.qualifier: "string -> string"} \\
-  @{index_ML Long_Name.append: "string -> string -> string"} \\
-  @{index_ML Long_Name.implode: "string list -> string"} \\
-  @{index_ML Long_Name.explode: "string -> string list"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML Long_Name.base_name}~@{text "name"} returns the base name
-  of a long name.
-
-  \item @{ML Long_Name.qualifier}~@{text "name"} returns the qualifier
-  of a long name.
-
-  \item @{ML Long_Name.append}~@{text "name\<^sub>1 name\<^sub>2"} appends two long
-  names.
-
-  \item @{ML Long_Name.implode}~@{text "names"} and @{ML
-  Long_Name.explode}~@{text "name"} convert between the packed string
-  representation and the explicit list form of long names.
-
-  \end{description}
-*}
-
-
-subsection {* Name spaces \label{sec:name-space} *}
-
-text {* A @{text "name space"} manages a collection of long names,
-  together with a mapping between partially qualified external names
-  and fully qualified internal names (in both directions).  Note that
-  the corresponding @{text "intern"} and @{text "extern"} operations
-  are mostly used for parsing and printing only!  The @{text
-  "declare"} operation augments a name space according to the accesses
-  determined by a given binding, and a naming policy from the context.
-
-  \medskip A @{text "binding"} specifies details about the prospective
-  long name of a newly introduced formal entity.  It consists of a
-  base name, prefixes for qualification (separate ones for system
-  infrastructure and user-space mechanisms), a slot for the original
-  source position, and some additional flags.
-
-  \medskip A @{text "naming"} provides some additional details for
-  producing a long name from a binding.  Normally, the naming is
-  implicit in the theory or proof context.  The @{text "full"}
-  operation (and its variants for different context types) produces a
-  fully qualified internal name to be entered into a name space.  The
-  main equation of this ``chemical reaction'' when binding new
-  entities in a context is as follows:
-
-  \medskip
-  \begin{tabular}{l}
-  @{text "binding + naming \<longrightarrow> long name + name space accesses"}
-  \end{tabular}
-
-  \bigskip As a general principle, there is a separate name space for
-  each kind of formal entity, e.g.\ fact, logical constant, type
-  constructor, type class.  It is usually clear from the occurrence in
-  concrete syntax (or from the scope) which kind of entity a name
-  refers to.  For example, the very same name @{text "c"} may be used
-  uniformly for a constant, type constructor, and type class.
-
-  There are common schemes to name derived entities systematically
-  according to the name of the main logical entity involved, e.g.\
-  fact @{text "c.intro"} for a canonical introduction rule related to
-  constant @{text "c"}.  This technique of mapping names from one
-  space into another requires some care in order to avoid conflicts.
-  In particular, theorem names derived from a type constructor or type
-  class should get an additional suffix in addition to the usual
-  qualification.  This leads to the following conventions for derived
-  names:
-
-  \medskip
-  \begin{tabular}{ll}
-  logical entity & fact name \\\hline
-  constant @{text "c"} & @{text "c.intro"} \\
-  type @{text "c"} & @{text "c_type.intro"} \\
-  class @{text "c"} & @{text "c_class.intro"} \\
-  \end{tabular}
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type binding} \\
-  @{index_ML Binding.empty: binding} \\
-  @{index_ML Binding.name: "string -> binding"} \\
-  @{index_ML Binding.qualify: "bool -> string -> binding -> binding"} \\
-  @{index_ML Binding.prefix: "bool -> string -> binding -> binding"} \\
-  @{index_ML Binding.conceal: "binding -> binding"} \\
-  @{index_ML Binding.print: "binding -> string"} \\
-  \end{mldecls}
-  \begin{mldecls}
-  @{index_ML_type Name_Space.naming} \\
-  @{index_ML Name_Space.default_naming: Name_Space.naming} \\
-  @{index_ML Name_Space.add_path: "string -> Name_Space.naming -> Name_Space.naming"} \\
-  @{index_ML Name_Space.full_name: "Name_Space.naming -> binding -> string"} \\
-  \end{mldecls}
-  \begin{mldecls}
-  @{index_ML_type Name_Space.T} \\
-  @{index_ML Name_Space.empty: "string -> Name_Space.T"} \\
-  @{index_ML Name_Space.merge: "Name_Space.T * Name_Space.T -> Name_Space.T"} \\
-  @{index_ML Name_Space.declare: "Context.generic -> bool ->
-  binding -> Name_Space.T -> string * Name_Space.T"} \\
-  @{index_ML Name_Space.intern: "Name_Space.T -> string -> string"} \\
-  @{index_ML Name_Space.extern: "Proof.context -> Name_Space.T -> string -> string"} \\
-  @{index_ML Name_Space.is_concealed: "Name_Space.T -> string -> bool"}
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type binding} represents the abstract concept of
-  name bindings.
-
-  \item @{ML Binding.empty} is the empty binding.
-
-  \item @{ML Binding.name}~@{text "name"} produces a binding with base
-  name @{text "name"}.  Note that this lacks proper source position
-  information; see also the ML antiquotation @{ML_antiquotation
-  binding}.
-
-  \item @{ML Binding.qualify}~@{text "mandatory name binding"}
-  prefixes qualifier @{text "name"} to @{text "binding"}.  The @{text
-  "mandatory"} flag tells if this name component always needs to be
-  given in name space accesses --- this is mostly @{text "false"} in
-  practice.  Note that this part of qualification is typically used in
-  derived specification mechanisms.
-
-  \item @{ML Binding.prefix} is similar to @{ML Binding.qualify}, but
-  affects the system prefix.  This part of extra qualification is
-  typically used in the infrastructure for modular specifications,
-  notably ``local theory targets'' (see also \chref{ch:local-theory}).
-
-  \item @{ML Binding.conceal}~@{text "binding"} indicates that the
-  binding shall refer to an entity that serves foundational purposes
-  only.  This flag helps to mark implementation details of
-  specification mechanism etc.  Other tools should not depend on the
-  particulars of concealed entities (cf.\ @{ML
-  Name_Space.is_concealed}).
-
-  \item @{ML Binding.print}~@{text "binding"} produces a string
-  representation for human-readable output, together with some formal
-  markup that might get used in GUI front-ends, for example.
-
-  \item Type @{ML_type Name_Space.naming} represents the abstract
-  concept of a naming policy.
-
-  \item @{ML Name_Space.default_naming} is the default naming policy.
-  In a theory context, this is usually augmented by a path prefix
-  consisting of the theory name.
-
-  \item @{ML Name_Space.add_path}~@{text "path naming"} augments the
-  naming policy by extending its path component.
-
-  \item @{ML Name_Space.full_name}~@{text "naming binding"} turns a
-  name binding (usually a basic name) into the fully qualified
-  internal name, according to the given naming policy.
-
-  \item Type @{ML_type Name_Space.T} represents name spaces.
-
-  \item @{ML Name_Space.empty}~@{text "kind"} and @{ML Name_Space.merge}~@{text
-  "(space\<^sub>1, space\<^sub>2)"} are the canonical operations for
-  maintaining name spaces according to theory data management
-  (\secref{sec:context-data}); @{text "kind"} is a formal comment
-  to characterize the purpose of a name space.
-
-  \item @{ML Name_Space.declare}~@{text "context strict binding
-  space"} enters a name binding as fully qualified internal name into
-  the name space, using the naming of the context.
-
-  \item @{ML Name_Space.intern}~@{text "space name"} internalizes a
-  (partially qualified) external name.
-
-  This operation is mostly for parsing!  Note that fully qualified
-  names stemming from declarations are produced via @{ML
-  "Name_Space.full_name"} and @{ML "Name_Space.declare"}
-  (or their derivatives for @{ML_type theory} and
-  @{ML_type Proof.context}).
-
-  \item @{ML Name_Space.extern}~@{text "ctxt space name"} externalizes a
-  (fully qualified) internal name.
-
-  This operation is mostly for printing!  User code should not rely on
-  the precise result too much.
-
-  \item @{ML Name_Space.is_concealed}~@{text "space name"} indicates
-  whether @{text "name"} refers to a strictly private entity that
-  other tools are supposed to ignore!
-
-  \end{description}
-*}
-
-text %mlantiq {*
-  \begin{matharray}{rcl}
-  @{ML_antiquotation_def "binding"} & : & @{text ML_antiquotation} \\
-  \end{matharray}
-
-  @{rail \<open>
-  @@{ML_antiquotation binding} name
-  \<close>}
-
-  \begin{description}
-
-  \item @{text "@{binding name}"} produces a binding with base name
-  @{text "name"} and the source position taken from the concrete
-  syntax of this antiquotation.  In many situations this is more
-  appropriate than the more basic @{ML Binding.name} function.
-
-  \end{description}
-*}
-
-text %mlex {* The following example yields the source position of some
-  concrete binding inlined into the text:
-*}
-
-ML {* Binding.pos_of @{binding here} *}
-
-text {* \medskip That position can be also printed in a message as
-  follows: *}
-
-ML_command {*
-  writeln
-    ("Look here" ^ Position.here (Binding.pos_of @{binding here}))
-*}
-
-text {* This illustrates a key virtue of formalized bindings as
-  opposed to raw specifications of base names: the system can use this
-  additional information for feedback given to the user (error
-  messages etc.).
-
-  \medskip The following example refers to its source position
-  directly, which is occasionally useful for experimentation and
-  diagnostic purposes: *}
-
-ML_command {*
-  warning ("Look here" ^ Position.here @{here})
-*}
-
-end
--- a/src/Doc/IsarImplementation/Proof.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,492 +0,0 @@
-theory Proof
-imports Base
-begin
-
-chapter {* Structured proofs *}
-
-section {* Variables \label{sec:variables} *}
-
-text {*
-  Any variable that is not explicitly bound by @{text "\<lambda>"}-abstraction
-  is considered as ``free''.  Logically, free variables act like
-  outermost universal quantification at the sequent level: @{text
-  "A\<^sub>1(x), \<dots>, A\<^sub>n(x) \<turnstile> B(x)"} means that the result
-  holds \emph{for all} values of @{text "x"}.  Free variables for
-  terms (not types) can be fully internalized into the logic: @{text
-  "\<turnstile> B(x)"} and @{text "\<turnstile> \<And>x. B(x)"} are interchangeable, provided
-  that @{text "x"} does not occur elsewhere in the context.
-  Inspecting @{text "\<turnstile> \<And>x. B(x)"} more closely, we see that inside the
-  quantifier, @{text "x"} is essentially ``arbitrary, but fixed'',
-  while from outside it appears as a place-holder for instantiation
-  (thanks to @{text "\<And>"} elimination).
-
-  The Pure logic represents the idea of variables being either inside
-  or outside the current scope by providing separate syntactic
-  categories for \emph{fixed variables} (e.g.\ @{text "x"}) vs.\
-  \emph{schematic variables} (e.g.\ @{text "?x"}).  Incidently, a
-  universal result @{text "\<turnstile> \<And>x. B(x)"} has the HHF normal form @{text
-  "\<turnstile> B(?x)"}, which represents its generality without requiring an
-  explicit quantifier.  The same principle works for type variables:
-  @{text "\<turnstile> B(?\<alpha>)"} represents the idea of ``@{text "\<turnstile> \<forall>\<alpha>. B(\<alpha>)"}''
-  without demanding a truly polymorphic framework.
-
-  \medskip Additional care is required to treat type variables in a
-  way that facilitates type-inference.  In principle, term variables
-  depend on type variables, which means that type variables would have
-  to be declared first.  For example, a raw type-theoretic framework
-  would demand the context to be constructed in stages as follows:
-  @{text "\<Gamma> = \<alpha>: type, x: \<alpha>, a: A(x\<^sub>\<alpha>)"}.
-
-  We allow a slightly less formalistic mode of operation: term
-  variables @{text "x"} are fixed without specifying a type yet
-  (essentially \emph{all} potential occurrences of some instance
-  @{text "x\<^sub>\<tau>"} are fixed); the first occurrence of @{text "x"}
-  within a specific term assigns its most general type, which is then
-  maintained consistently in the context.  The above example becomes
-  @{text "\<Gamma> = x: term, \<alpha>: type, A(x\<^sub>\<alpha>)"}, where type @{text
-  "\<alpha>"} is fixed \emph{after} term @{text "x"}, and the constraint
-  @{text "x :: \<alpha>"} is an implicit consequence of the occurrence of
-  @{text "x\<^sub>\<alpha>"} in the subsequent proposition.
-
-  This twist of dependencies is also accommodated by the reverse
-  operation of exporting results from a context: a type variable
-  @{text "\<alpha>"} is considered fixed as long as it occurs in some fixed
-  term variable of the context.  For example, exporting @{text "x:
-  term, \<alpha>: type \<turnstile> x\<^sub>\<alpha> \<equiv> x\<^sub>\<alpha>"} produces in the first step @{text "x: term
-  \<turnstile> x\<^sub>\<alpha> \<equiv> x\<^sub>\<alpha>"} for fixed @{text "\<alpha>"}, and only in the second step
-  @{text "\<turnstile> ?x\<^sub>?\<^sub>\<alpha> \<equiv> ?x\<^sub>?\<^sub>\<alpha>"} for schematic @{text "?x"} and @{text "?\<alpha>"}.
-  The following Isar source text illustrates this scenario.
-*}
-
-notepad
-begin
-  {
-    fix x  -- {* all potential occurrences of some @{text "x::\<tau>"} are fixed *}
-    {
-      have "x::'a \<equiv> x"  -- {* implicit type assigment by concrete occurrence *}
-        by (rule reflexive)
-    }
-    thm this  -- {* result still with fixed type @{text "'a"} *}
-  }
-  thm this  -- {* fully general result for arbitrary @{text "?x::?'a"} *}
-end
-
-text {* The Isabelle/Isar proof context manages the details of term
-  vs.\ type variables, with high-level principles for moving the
-  frontier between fixed and schematic variables.
-
-  The @{text "add_fixes"} operation explictly declares fixed
-  variables; the @{text "declare_term"} operation absorbs a term into
-  a context by fixing new type variables and adding syntactic
-  constraints.
-
-  The @{text "export"} operation is able to perform the main work of
-  generalizing term and type variables as sketched above, assuming
-  that fixing variables and terms have been declared properly.
-
-  There @{text "import"} operation makes a generalized fact a genuine
-  part of the context, by inventing fixed variables for the schematic
-  ones.  The effect can be reversed by using @{text "export"} later,
-  potentially with an extended context; the result is equivalent to
-  the original modulo renaming of schematic variables.
-
-  The @{text "focus"} operation provides a variant of @{text "import"}
-  for nested propositions (with explicit quantification): @{text
-  "\<And>x\<^sub>1 \<dots> x\<^sub>n. B(x\<^sub>1, \<dots>, x\<^sub>n)"} is
-  decomposed by inventing fixed variables @{text "x\<^sub>1, \<dots>,
-  x\<^sub>n"} for the body.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Variable.add_fixes: "
-  string list -> Proof.context -> string list * Proof.context"} \\
-  @{index_ML Variable.variant_fixes: "
-  string list -> Proof.context -> string list * Proof.context"} \\
-  @{index_ML Variable.declare_term: "term -> Proof.context -> Proof.context"} \\
-  @{index_ML Variable.declare_constraints: "term -> Proof.context -> Proof.context"} \\
-  @{index_ML Variable.export: "Proof.context -> Proof.context -> thm list -> thm list"} \\
-  @{index_ML Variable.polymorphic: "Proof.context -> term list -> term list"} \\
-  @{index_ML Variable.import: "bool -> thm list -> Proof.context ->
-  (((ctyp * ctyp) list * (cterm * cterm) list) * thm list) * Proof.context"} \\
-  @{index_ML Variable.focus: "term -> Proof.context ->
-  ((string * (string * typ)) list * term) * Proof.context"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML Variable.add_fixes}~@{text "xs ctxt"} fixes term
-  variables @{text "xs"}, returning the resulting internal names.  By
-  default, the internal representation coincides with the external
-  one, which also means that the given variables must not be fixed
-  already.  There is a different policy within a local proof body: the
-  given names are just hints for newly invented Skolem variables.
-
-  \item @{ML Variable.variant_fixes} is similar to @{ML
-  Variable.add_fixes}, but always produces fresh variants of the given
-  names.
-
-  \item @{ML Variable.declare_term}~@{text "t ctxt"} declares term
-  @{text "t"} to belong to the context.  This automatically fixes new
-  type variables, but not term variables.  Syntactic constraints for
-  type and term variables are declared uniformly, though.
-
-  \item @{ML Variable.declare_constraints}~@{text "t ctxt"} declares
-  syntactic constraints from term @{text "t"}, without making it part
-  of the context yet.
-
-  \item @{ML Variable.export}~@{text "inner outer thms"} generalizes
-  fixed type and term variables in @{text "thms"} according to the
-  difference of the @{text "inner"} and @{text "outer"} context,
-  following the principles sketched above.
-
-  \item @{ML Variable.polymorphic}~@{text "ctxt ts"} generalizes type
-  variables in @{text "ts"} as far as possible, even those occurring
-  in fixed term variables.  The default policy of type-inference is to
-  fix newly introduced type variables, which is essentially reversed
-  with @{ML Variable.polymorphic}: here the given terms are detached
-  from the context as far as possible.
-
-  \item @{ML Variable.import}~@{text "open thms ctxt"} invents fixed
-  type and term variables for the schematic ones occurring in @{text
-  "thms"}.  The @{text "open"} flag indicates whether the fixed names
-  should be accessible to the user, otherwise newly introduced names
-  are marked as ``internal'' (\secref{sec:names}).
-
-  \item @{ML Variable.focus}~@{text B} decomposes the outermost @{text
-  "\<And>"} prefix of proposition @{text "B"}.
-
-  \end{description}
-*}
-
-text %mlex {* The following example shows how to work with fixed term
-  and type parameters and with type-inference.  *}
-
-ML {*
-  (*static compile-time context -- for testing only*)
-  val ctxt0 = @{context};
-
-  (*locally fixed parameters -- no type assignment yet*)
-  val ([x, y], ctxt1) = ctxt0 |> Variable.add_fixes ["x", "y"];
-
-  (*t1: most general fixed type; t1': most general arbitrary type*)
-  val t1 = Syntax.read_term ctxt1 "x";
-  val t1' = singleton (Variable.polymorphic ctxt1) t1;
-
-  (*term u enforces specific type assignment*)
-  val u = Syntax.read_term ctxt1 "(x::nat) \<equiv> y";
-
-  (*official declaration of u -- propagates constraints etc.*)
-  val ctxt2 = ctxt1 |> Variable.declare_term u;
-  val t2 = Syntax.read_term ctxt2 "x";  (*x::nat is enforced*)
-*}
-
-text {* In the above example, the starting context is derived from the
-  toplevel theory, which means that fixed variables are internalized
-  literally: @{text "x"} is mapped again to @{text "x"}, and
-  attempting to fix it again in the subsequent context is an error.
-  Alternatively, fixed parameters can be renamed explicitly as
-  follows: *}
-
-ML {*
-  val ctxt0 = @{context};
-  val ([x1, x2, x3], ctxt1) =
-    ctxt0 |> Variable.variant_fixes ["x", "x", "x"];
-*}
-
-text {* The following ML code can now work with the invented names of
-  @{text x1}, @{text x2}, @{text x3}, without depending on
-  the details on the system policy for introducing these variants.
-  Recall that within a proof body the system always invents fresh
-  ``skolem constants'', e.g.\ as follows: *}
-
-notepad
-begin
-  ML_prf %"ML" {*
-    val ctxt0 = @{context};
-
-    val ([x1], ctxt1) = ctxt0 |> Variable.add_fixes ["x"];
-    val ([x2], ctxt2) = ctxt1 |> Variable.add_fixes ["x"];
-    val ([x3], ctxt3) = ctxt2 |> Variable.add_fixes ["x"];
-
-    val ([y1, y2], ctxt4) =
-      ctxt3 |> Variable.variant_fixes ["y", "y"];
-  *}
-end
-
-text {* In this situation @{ML Variable.add_fixes} and @{ML
-  Variable.variant_fixes} are very similar, but identical name
-  proposals given in a row are only accepted by the second version.
-  *}
-
-
-section {* Assumptions \label{sec:assumptions} *}
-
-text {*
-  An \emph{assumption} is a proposition that it is postulated in the
-  current context.  Local conclusions may use assumptions as
-  additional facts, but this imposes implicit hypotheses that weaken
-  the overall statement.
-
-  Assumptions are restricted to fixed non-schematic statements, i.e.\
-  all generality needs to be expressed by explicit quantifiers.
-  Nevertheless, the result will be in HHF normal form with outermost
-  quantifiers stripped.  For example, by assuming @{text "\<And>x :: \<alpha>. P
-  x"} we get @{text "\<And>x :: \<alpha>. P x \<turnstile> P ?x"} for schematic @{text "?x"}
-  of fixed type @{text "\<alpha>"}.  Local derivations accumulate more and
-  more explicit references to hypotheses: @{text "A\<^sub>1, \<dots>,
-  A\<^sub>n \<turnstile> B"} where @{text "A\<^sub>1, \<dots>, A\<^sub>n"} needs to
-  be covered by the assumptions of the current context.
-
-  \medskip The @{text "add_assms"} operation augments the context by
-  local assumptions, which are parameterized by an arbitrary @{text
-  "export"} rule (see below).
-
-  The @{text "export"} operation moves facts from a (larger) inner
-  context into a (smaller) outer context, by discharging the
-  difference of the assumptions as specified by the associated export
-  rules.  Note that the discharged portion is determined by the
-  difference of contexts, not the facts being exported!  There is a
-  separate flag to indicate a goal context, where the result is meant
-  to refine an enclosing sub-goal of a structured proof state.
-
-  \medskip The most basic export rule discharges assumptions directly
-  by means of the @{text "\<Longrightarrow>"} introduction rule:
-  \[
-  \infer[(@{text "\<Longrightarrow>\<hyphen>intro"})]{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
-  \]
-
-  The variant for goal refinements marks the newly introduced
-  premises, which causes the canonical Isar goal refinement scheme to
-  enforce unification with local premises within the goal:
-  \[
-  \infer[(@{text "#\<Longrightarrow>\<hyphen>intro"})]{@{text "\<Gamma> - A \<turnstile> #A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
-  \]
-
-  \medskip Alternative versions of assumptions may perform arbitrary
-  transformations on export, as long as the corresponding portion of
-  hypotheses is removed from the given facts.  For example, a local
-  definition works by fixing @{text "x"} and assuming @{text "x \<equiv> t"},
-  with the following export rule to reverse the effect:
-  \[
-  \infer[(@{text "\<equiv>\<hyphen>expand"})]{@{text "\<Gamma> - (x \<equiv> t) \<turnstile> B t"}}{@{text "\<Gamma> \<turnstile> B x"}}
-  \]
-  This works, because the assumption @{text "x \<equiv> t"} was introduced in
-  a context with @{text "x"} being fresh, so @{text "x"} does not
-  occur in @{text "\<Gamma>"} here.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type Assumption.export} \\
-  @{index_ML Assumption.assume: "Proof.context -> cterm -> thm"} \\
-  @{index_ML Assumption.add_assms:
-    "Assumption.export ->
-  cterm list -> Proof.context -> thm list * Proof.context"} \\
-  @{index_ML Assumption.add_assumes: "
-  cterm list -> Proof.context -> thm list * Proof.context"} \\
-  @{index_ML Assumption.export: "bool -> Proof.context -> Proof.context -> thm -> thm"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type Assumption.export} represents arbitrary export
-  rules, which is any function of type @{ML_type "bool -> cterm list
-  -> thm -> thm"}, where the @{ML_type "bool"} indicates goal mode,
-  and the @{ML_type "cterm list"} the collection of assumptions to be
-  discharged simultaneously.
-
-  \item @{ML Assumption.assume}~@{text "ctxt A"} turns proposition @{text
-  "A"} into a primitive assumption @{text "A \<turnstile> A'"}, where the
-  conclusion @{text "A'"} is in HHF normal form.
-
-  \item @{ML Assumption.add_assms}~@{text "r As"} augments the context
-  by assumptions @{text "As"} with export rule @{text "r"}.  The
-  resulting facts are hypothetical theorems as produced by the raw
-  @{ML Assumption.assume}.
-
-  \item @{ML Assumption.add_assumes}~@{text "As"} is a special case of
-  @{ML Assumption.add_assms} where the export rule performs @{text
-  "\<Longrightarrow>\<hyphen>intro"} or @{text "#\<Longrightarrow>\<hyphen>intro"}, depending on goal
-  mode.
-
-  \item @{ML Assumption.export}~@{text "is_goal inner outer thm"}
-  exports result @{text "thm"} from the the @{text "inner"} context
-  back into the @{text "outer"} one; @{text "is_goal = true"} means
-  this is a goal context.  The result is in HHF normal form.  Note
-  that @{ML "Proof_Context.export"} combines @{ML "Variable.export"}
-  and @{ML "Assumption.export"} in the canonical way.
-
-  \end{description}
-*}
-
-text %mlex {* The following example demonstrates how rules can be
-  derived by building up a context of assumptions first, and exporting
-  some local fact afterwards.  We refer to @{theory Pure} equality
-  here for testing purposes.
-*}
-
-ML {*
-  (*static compile-time context -- for testing only*)
-  val ctxt0 = @{context};
-
-  val ([eq], ctxt1) =
-    ctxt0 |> Assumption.add_assumes [@{cprop "x \<equiv> y"}];
-  val eq' = Thm.symmetric eq;
-
-  (*back to original context -- discharges assumption*)
-  val r = Assumption.export false ctxt1 ctxt0 eq';
-*}
-
-text {* Note that the variables of the resulting rule are not
-  generalized.  This would have required to fix them properly in the
-  context beforehand, and export wrt.\ variables afterwards (cf.\ @{ML
-  Variable.export} or the combined @{ML "Proof_Context.export"}).  *}
-
-
-section {* Structured goals and results \label{sec:struct-goals} *}
-
-text {*
-  Local results are established by monotonic reasoning from facts
-  within a context.  This allows common combinations of theorems,
-  e.g.\ via @{text "\<And>/\<Longrightarrow>"} elimination, resolution rules, or equational
-  reasoning, see \secref{sec:thms}.  Unaccounted context manipulations
-  should be avoided, notably raw @{text "\<And>/\<Longrightarrow>"} introduction or ad-hoc
-  references to free variables or assumptions not present in the proof
-  context.
-
-  \medskip The @{text "SUBPROOF"} combinator allows to structure a
-  tactical proof recursively by decomposing a selected sub-goal:
-  @{text "(\<And>x. A(x) \<Longrightarrow> B(x)) \<Longrightarrow> \<dots>"} is turned into @{text "B(x) \<Longrightarrow> \<dots>"}
-  after fixing @{text "x"} and assuming @{text "A(x)"}.  This means
-  the tactic needs to solve the conclusion, but may use the premise as
-  a local fact, for locally fixed variables.
-
-  The family of @{text "FOCUS"} combinators is similar to @{text
-  "SUBPROOF"}, but allows to retain schematic variables and pending
-  subgoals in the resulting goal state.
-
-  The @{text "prove"} operation provides an interface for structured
-  backwards reasoning under program control, with some explicit sanity
-  checks of the result.  The goal context can be augmented by
-  additional fixed variables (cf.\ \secref{sec:variables}) and
-  assumptions (cf.\ \secref{sec:assumptions}), which will be available
-  as local facts during the proof and discharged into implications in
-  the result.  Type and term variables are generalized as usual,
-  according to the context.
-
-  The @{text "obtain"} operation produces results by eliminating
-  existing facts by means of a given tactic.  This acts like a dual
-  conclusion: the proof demonstrates that the context may be augmented
-  by parameters and assumptions, without affecting any conclusions
-  that do not mention these parameters.  See also
-  \cite{isabelle-isar-ref} for the user-level @{command obtain} and
-  @{command guess} elements.  Final results, which may not refer to
-  the parameters in the conclusion, need to exported explicitly into
-  the original context.  *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML SUBPROOF: "(Subgoal.focus -> tactic) ->
-  Proof.context -> int -> tactic"} \\
-  @{index_ML Subgoal.FOCUS: "(Subgoal.focus -> tactic) ->
-  Proof.context -> int -> tactic"} \\
-  @{index_ML Subgoal.FOCUS_PREMS: "(Subgoal.focus -> tactic) ->
-  Proof.context -> int -> tactic"} \\
-  @{index_ML Subgoal.FOCUS_PARAMS: "(Subgoal.focus -> tactic) ->
-  Proof.context -> int -> tactic"} \\
-  @{index_ML Subgoal.focus: "Proof.context -> int -> thm -> Subgoal.focus * thm"} \\
-  @{index_ML Subgoal.focus_prems: "Proof.context -> int -> thm -> Subgoal.focus * thm"} \\
-  @{index_ML Subgoal.focus_params: "Proof.context -> int -> thm -> Subgoal.focus * thm"} \\
-  \end{mldecls}
-
-  \begin{mldecls}
-  @{index_ML Goal.prove: "Proof.context -> string list -> term list -> term ->
-  ({prems: thm list, context: Proof.context} -> tactic) -> thm"} \\
-  @{index_ML Goal.prove_multi: "Proof.context -> string list -> term list -> term list ->
-  ({prems: thm list, context: Proof.context} -> tactic) -> thm list"} \\
-  \end{mldecls}
-  \begin{mldecls}
-  @{index_ML Obtain.result: "(Proof.context -> tactic) -> thm list ->
-  Proof.context -> ((string * cterm) list * thm list) * Proof.context"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML SUBPROOF}~@{text "tac ctxt i"} decomposes the structure
-  of the specified sub-goal, producing an extended context and a
-  reduced goal, which needs to be solved by the given tactic.  All
-  schematic parameters of the goal are imported into the context as
-  fixed ones, which may not be instantiated in the sub-proof.
-
-  \item @{ML Subgoal.FOCUS}, @{ML Subgoal.FOCUS_PREMS}, and @{ML
-  Subgoal.FOCUS_PARAMS} are similar to @{ML SUBPROOF}, but are
-  slightly more flexible: only the specified parts of the subgoal are
-  imported into the context, and the body tactic may introduce new
-  subgoals and schematic variables.
-
-  \item @{ML Subgoal.focus}, @{ML Subgoal.focus_prems}, @{ML
-  Subgoal.focus_params} extract the focus information from a goal
-  state in the same way as the corresponding tacticals above.  This is
-  occasionally useful to experiment without writing actual tactics
-  yet.
-
-  \item @{ML Goal.prove}~@{text "ctxt xs As C tac"} states goal @{text
-  "C"} in the context augmented by fixed variables @{text "xs"} and
-  assumptions @{text "As"}, and applies tactic @{text "tac"} to solve
-  it.  The latter may depend on the local assumptions being presented
-  as facts.  The result is in HHF normal form.
-
-  \item @{ML Goal.prove_multi} is simular to @{ML Goal.prove}, but
-  states several conclusions simultaneously.  The goal is encoded by
-  means of Pure conjunction; @{ML Goal.conjunction_tac} will turn this
-  into a collection of individual subgoals.
-
-  \item @{ML Obtain.result}~@{text "tac thms ctxt"} eliminates the
-  given facts using a tactic, which results in additional fixed
-  variables and assumptions in the context.  Final results need to be
-  exported explicitly.
-
-  \end{description}
-*}
-
-text %mlex {* The following minimal example illustrates how to access
-  the focus information of a structured goal state. *}
-
-notepad
-begin
-  fix A B C :: "'a \<Rightarrow> bool"
-
-  have "\<And>x. A x \<Longrightarrow> B x \<Longrightarrow> C x"
-    ML_val
-    {*
-      val {goal, context = goal_ctxt, ...} = @{Isar.goal};
-      val (focus as {params, asms, concl, ...}, goal') =
-        Subgoal.focus goal_ctxt 1 goal;
-      val [A, B] = #prems focus;
-      val [(_, x)] = #params focus;
-    *}
-    oops
-
-text {* \medskip The next example demonstrates forward-elimination in
-  a local context, using @{ML Obtain.result}.  *}
-
-notepad
-begin
-  assume ex: "\<exists>x. B x"
-
-  ML_prf %"ML" {*
-    val ctxt0 = @{context};
-    val (([(_, x)], [B]), ctxt1) = ctxt0
-      |> Obtain.result (fn _ => etac @{thm exE} 1) [@{thm ex}];
-  *}
-  ML_prf %"ML" {*
-    singleton (Proof_Context.export ctxt1 ctxt0) @{thm refl};
-  *}
-  ML_prf %"ML" {*
-    Proof_Context.export ctxt1 ctxt0 [Thm.reflexive x]
-      handle ERROR msg => (warning msg; []);
-  *}
-end
-
-end
--- a/src/Doc/IsarImplementation/Syntax.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,151 +0,0 @@
-theory Syntax
-imports Base
-begin
-
-chapter {* Concrete syntax and type-checking *}
-
-text {* Pure @{text "\<lambda>"}-calculus as introduced in \chref{ch:logic} is
-  an adequate foundation for logical languages --- in the tradition of
-  \emph{higher-order abstract syntax} --- but end-users require
-  additional means for reading and printing of terms and types.  This
-  important add-on outside the logical core is called \emph{inner
-  syntax} in Isabelle jargon, as opposed to the \emph{outer syntax} of
-  the theory and proof language (cf.\ \cite{isabelle-isar-ref}).
-
-  For example, according to \cite{church40} quantifiers are
-  represented as higher-order constants @{text "All :: ('a \<Rightarrow> bool) \<Rightarrow>
-  bool"} such that @{text "All (\<lambda>x::'a. B x)"} faithfully represents
-  the idea that is displayed as @{text "\<forall>x::'a. B x"} via @{keyword
-  "binder"} notation.  Moreover, type-inference in the style of
-  Hindley-Milner \cite{hindleymilner} (and extensions) enables users
-  to write @{text "\<forall>x. B x"} concisely, when the type @{text "'a"} is
-  already clear from the context.\footnote{Type-inference taken to the
-  extreme can easily confuse users, though.  Beginners often stumble
-  over unexpectedly general types inferred by the system.}
-
-  \medskip The main inner syntax operations are \emph{read} for
-  parsing together with type-checking, and \emph{pretty} for formatted
-  output.  See also \secref{sec:read-print}.
-
-  Furthermore, the input and output syntax layers are sub-divided into
-  separate phases for \emph{concrete syntax} versus \emph{abstract
-  syntax}, see also \secref{sec:parse-unparse} and
-  \secref{sec:term-check}, respectively.  This results in the
-  following decomposition of the main operations:
-
-  \begin{itemize}
-
-  \item @{text "read = parse; check"}
-
-  \item @{text "pretty = uncheck; unparse"}
-
-  \end{itemize}
-
-  Some specification package might thus intercept syntax processing at
-  a well-defined stage after @{text "parse"}, to a augment the
-  resulting pre-term before full type-reconstruction is performed by
-  @{text "check"}, for example.  Note that the formal status of bound
-  variables, versus free variables, versus constants must not be
-  changed here! *}
-
-
-section {* Reading and pretty printing \label{sec:read-print} *}
-
-text {* Read and print operations are roughly dual to each other, such
-  that for the user @{text "s' = pretty (read s)"} looks similar to
-  the original source text @{text "s"}, but the details depend on many
-  side-conditions.  There are also explicit options to control
-  suppressing of type information in the output.  The default
-  configuration routinely looses information, so @{text "t' = read
-  (pretty t)"} might fail, produce a differently typed term, or a
-  completely different term in the face of syntactic overloading!  *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Syntax.read_typ: "Proof.context -> string -> typ"} \\
-  @{index_ML Syntax.read_term: "Proof.context -> string -> term"} \\
-  @{index_ML Syntax.read_prop: "Proof.context -> string -> term"} \\
-  @{index_ML Syntax.pretty_typ: "Proof.context -> typ -> Pretty.T"} \\
-  @{index_ML Syntax.pretty_term: "Proof.context -> term -> Pretty.T"} \\
-  \end{mldecls}
-
-  %FIXME description
-*}
-
-
-section {* Parsing and unparsing \label{sec:parse-unparse} *}
-
-text {* Parsing and unparsing converts between actual source text and
-  a certain \emph{pre-term} format, where all bindings and scopes are
-  resolved faithfully.  Thus the names of free variables or constants
-  are already determined in the sense of the logical context, but type
-  information might is still missing.  Pre-terms support an explicit
-  language of \emph{type constraints} that may be augmented by user
-  code to guide the later \emph{check} phase, for example.
-
-  Actual parsing is based on traditional lexical analysis and Earley
-  parsing for arbitrary context-free grammars.  The user can specify
-  this via mixfix annotations.  Moreover, there are \emph{syntax
-  translations} that can be augmented by the user, either
-  declaratively via @{command translations} or programmatically via
-  @{command parse_translation}, @{command print_translation} etc.  The
-  final scope resolution is performed by the system, according to name
-  spaces for types, constants etc.\ determined by the context.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Syntax.parse_typ: "Proof.context -> string -> typ"} \\
-  @{index_ML Syntax.parse_term: "Proof.context -> string -> term"} \\
-  @{index_ML Syntax.parse_prop: "Proof.context -> string -> term"} \\
-  @{index_ML Syntax.unparse_typ: "Proof.context -> typ -> Pretty.T"} \\
-  @{index_ML Syntax.unparse_term: "Proof.context -> term -> Pretty.T"} \\
-  \end{mldecls}
-
-  %FIXME description
-*}
-
-
-section {* Checking and unchecking \label{sec:term-check} *}
-
-text {* These operations define the transition from pre-terms and
-  fully-annotated terms in the sense of the logical core
-  (\chref{ch:logic}).
-
-  The \emph{check} phase is meant to subsume a variety of mechanisms
-  in the manner of ``type-inference'' or ``type-reconstruction'' or
-  ``type-improvement'', not just type-checking in the narrow sense.
-  The \emph{uncheck} phase is roughly dual, it prunes type-information
-  before pretty printing.
-
-  A typical add-on for the check/uncheck syntax layer is the @{command
-  abbreviation} mechanism.  Here the user specifies syntactic
-  definitions that are managed by the system as polymorphic @{text
-  "let"} bindings.  These are expanded during the @{text "check"}
-  phase, and contracted during the @{text "uncheck"} phase, without
-  affecting the type-assignment of the given terms.
-
-  \medskip The precise meaning of type checking depends on the context
-  --- additional check/uncheck plugins might be defined in user space!
-
-  For example, the @{command class} command defines a context where
-  @{text "check"} treats certain type instances of overloaded
-  constants according to the ``dictionary construction'' of its
-  logical foundation.  This involves ``type improvement''
-  (specialization of slightly too general types) and replacement by
-  certain locale parameters.  See also \cite{Haftmann-Wenzel:2009}.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Syntax.check_typs: "Proof.context -> typ list -> typ list"} \\
-  @{index_ML Syntax.check_terms: "Proof.context -> term list -> term list"} \\
-  @{index_ML Syntax.check_props: "Proof.context -> term list -> term list"} \\
-  @{index_ML Syntax.uncheck_typs: "Proof.context -> typ list -> typ list"} \\
-  @{index_ML Syntax.uncheck_terms: "Proof.context -> term list -> term list"} \\
-  \end{mldecls}
-
-  %FIXME description
-*}
-
-end
--- a/src/Doc/IsarImplementation/Tactic.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,939 +0,0 @@
-theory Tactic
-imports Base
-begin
-
-chapter {* Tactical reasoning *}
-
-text {* Tactical reasoning works by refining an initial claim in a
-  backwards fashion, until a solved form is reached.  A @{text "goal"}
-  consists of several subgoals that need to be solved in order to
-  achieve the main statement; zero subgoals means that the proof may
-  be finished.  A @{text "tactic"} is a refinement operation that maps
-  a goal to a lazy sequence of potential successors.  A @{text
-  "tactical"} is a combinator for composing tactics.  *}
-
-
-section {* Goals \label{sec:tactical-goals} *}
-
-text {*
-  Isabelle/Pure represents a goal as a theorem stating that the
-  subgoals imply the main goal: @{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow>
-  C"}.  The outermost goal structure is that of a Horn Clause: i.e.\
-  an iterated implication without any quantifiers\footnote{Recall that
-  outermost @{text "\<And>x. \<phi>[x]"} is always represented via schematic
-  variables in the body: @{text "\<phi>[?x]"}.  These variables may get
-  instantiated during the course of reasoning.}.  For @{text "n = 0"}
-  a goal is called ``solved''.
-
-  The structure of each subgoal @{text "A\<^sub>i"} is that of a
-  general Hereditary Harrop Formula @{text "\<And>x\<^sub>1 \<dots>
-  \<And>x\<^sub>k. H\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> H\<^sub>m \<Longrightarrow> B"}.  Here @{text
-  "x\<^sub>1, \<dots>, x\<^sub>k"} are goal parameters, i.e.\
-  arbitrary-but-fixed entities of certain types, and @{text
-  "H\<^sub>1, \<dots>, H\<^sub>m"} are goal hypotheses, i.e.\ facts that may
-  be assumed locally.  Together, this forms the goal context of the
-  conclusion @{text B} to be established.  The goal hypotheses may be
-  again arbitrary Hereditary Harrop Formulas, although the level of
-  nesting rarely exceeds 1--2 in practice.
-
-  The main conclusion @{text C} is internally marked as a protected
-  proposition, which is represented explicitly by the notation @{text
-  "#C"} here.  This ensures that the decomposition into subgoals and
-  main conclusion is well-defined for arbitrarily structured claims.
-
-  \medskip Basic goal management is performed via the following
-  Isabelle/Pure rules:
-
-  \[
-  \infer[@{text "(init)"}]{@{text "C \<Longrightarrow> #C"}}{} \qquad
-  \infer[@{text "(finish)"}]{@{text "C"}}{@{text "#C"}}
-  \]
-
-  \medskip The following low-level variants admit general reasoning
-  with protected propositions:
-
-  \[
-  \infer[@{text "(protect n)"}]{@{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> #C"}}{@{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> C"}}
-  \]
-  \[
-  \infer[@{text "(conclude)"}]{@{text "A \<Longrightarrow> \<dots> \<Longrightarrow> C"}}{@{text "A \<Longrightarrow> \<dots> \<Longrightarrow> #C"}}
-  \]
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML Goal.init: "cterm -> thm"} \\
-  @{index_ML Goal.finish: "Proof.context -> thm -> thm"} \\
-  @{index_ML Goal.protect: "int -> thm -> thm"} \\
-  @{index_ML Goal.conclude: "thm -> thm"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML "Goal.init"}~@{text C} initializes a tactical goal from
-  the well-formed proposition @{text C}.
-
-  \item @{ML "Goal.finish"}~@{text "ctxt thm"} checks whether theorem
-  @{text "thm"} is a solved goal (no subgoals), and concludes the
-  result by removing the goal protection.  The context is only
-  required for printing error messages.
-
-  \item @{ML "Goal.protect"}~@{text "n thm"} protects the statement
-  of theorem @{text "thm"}.  The parameter @{text n} indicates the
-  number of premises to be retained.
-
-  \item @{ML "Goal.conclude"}~@{text "thm"} removes the goal
-  protection, even if there are pending subgoals.
-
-  \end{description}
-*}
-
-
-section {* Tactics\label{sec:tactics} *}
-
-text {* A @{text "tactic"} is a function @{text "goal \<rightarrow> goal\<^sup>*\<^sup>*"} that
-  maps a given goal state (represented as a theorem, cf.\
-  \secref{sec:tactical-goals}) to a lazy sequence of potential
-  successor states.  The underlying sequence implementation is lazy
-  both in head and tail, and is purely functional in \emph{not}
-  supporting memoing.\footnote{The lack of memoing and the strict
-  nature of SML requires some care when working with low-level
-  sequence operations, to avoid duplicate or premature evaluation of
-  results.  It also means that modified runtime behavior, such as
-  timeout, is very hard to achieve for general tactics.}
-
-  An \emph{empty result sequence} means that the tactic has failed: in
-  a compound tactic expression other tactics might be tried instead,
-  or the whole refinement step might fail outright, producing a
-  toplevel error message in the end.  When implementing tactics from
-  scratch, one should take care to observe the basic protocol of
-  mapping regular error conditions to an empty result; only serious
-  faults should emerge as exceptions.
-
-  By enumerating \emph{multiple results}, a tactic can easily express
-  the potential outcome of an internal search process.  There are also
-  combinators for building proof tools that involve search
-  systematically, see also \secref{sec:tacticals}.
-
-  \medskip As explained before, a goal state essentially consists of a
-  list of subgoals that imply the main goal (conclusion).  Tactics may
-  operate on all subgoals or on a particularly specified subgoal, but
-  must not change the main conclusion (apart from instantiating
-  schematic goal variables).
-
-  Tactics with explicit \emph{subgoal addressing} are of the form
-  @{text "int \<rightarrow> tactic"} and may be applied to a particular subgoal
-  (counting from 1).  If the subgoal number is out of range, the
-  tactic should fail with an empty result sequence, but must not raise
-  an exception!
-
-  Operating on a particular subgoal means to replace it by an interval
-  of zero or more subgoals in the same place; other subgoals must not
-  be affected, apart from instantiating schematic variables ranging
-  over the whole goal state.
-
-  A common pattern of composing tactics with subgoal addressing is to
-  try the first one, and then the second one only if the subgoal has
-  not been solved yet.  Special care is required here to avoid bumping
-  into unrelated subgoals that happen to come after the original
-  subgoal.  Assuming that there is only a single initial subgoal is a
-  very common error when implementing tactics!
-
-  Tactics with internal subgoal addressing should expose the subgoal
-  index as @{text "int"} argument in full generality; a hardwired
-  subgoal 1 is not acceptable.
-  
-  \medskip The main well-formedness conditions for proper tactics are
-  summarized as follows.
-
-  \begin{itemize}
-
-  \item General tactic failure is indicated by an empty result, only
-  serious faults may produce an exception.
-
-  \item The main conclusion must not be changed, apart from
-  instantiating schematic variables.
-
-  \item A tactic operates either uniformly on all subgoals, or
-  specifically on a selected subgoal (without bumping into unrelated
-  subgoals).
-
-  \item Range errors in subgoal addressing produce an empty result.
-
-  \end{itemize}
-
-  Some of these conditions are checked by higher-level goal
-  infrastructure (\secref{sec:struct-goals}); others are not checked
-  explicitly, and violating them merely results in ill-behaved tactics
-  experienced by the user (e.g.\ tactics that insist in being
-  applicable only to singleton goals, or prevent composition via
-  standard tacticals such as @{ML REPEAT}).
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_type tactic: "thm -> thm Seq.seq"} \\
-  @{index_ML no_tac: tactic} \\
-  @{index_ML all_tac: tactic} \\
-  @{index_ML print_tac: "string -> tactic"} \\[1ex]
-  @{index_ML PRIMITIVE: "(thm -> thm) -> tactic"} \\[1ex]
-  @{index_ML SUBGOAL: "(term * int -> tactic) -> int -> tactic"} \\
-  @{index_ML CSUBGOAL: "(cterm * int -> tactic) -> int -> tactic"} \\
-  @{index_ML SELECT_GOAL: "tactic -> int -> tactic"} \\
-  @{index_ML PREFER_GOAL: "tactic -> int -> tactic"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item Type @{ML_type tactic} represents tactics.  The
-  well-formedness conditions described above need to be observed.  See
-  also @{file "~~/src/Pure/General/seq.ML"} for the underlying
-  implementation of lazy sequences.
-
-  \item Type @{ML_type "int -> tactic"} represents tactics with
-  explicit subgoal addressing, with well-formedness conditions as
-  described above.
-
-  \item @{ML no_tac} is a tactic that always fails, returning the
-  empty sequence.
-
-  \item @{ML all_tac} is a tactic that always succeeds, returning a
-  singleton sequence with unchanged goal state.
-
-  \item @{ML print_tac}~@{text "message"} is like @{ML all_tac}, but
-  prints a message together with the goal state on the tracing
-  channel.
-
-  \item @{ML PRIMITIVE}~@{text rule} turns a primitive inference rule
-  into a tactic with unique result.  Exception @{ML THM} is considered
-  a regular tactic failure and produces an empty result; other
-  exceptions are passed through.
-
-  \item @{ML SUBGOAL}~@{text "(fn (subgoal, i) => tactic)"} is the
-  most basic form to produce a tactic with subgoal addressing.  The
-  given abstraction over the subgoal term and subgoal number allows to
-  peek at the relevant information of the full goal state.  The
-  subgoal range is checked as required above.
-
-  \item @{ML CSUBGOAL} is similar to @{ML SUBGOAL}, but passes the
-  subgoal as @{ML_type cterm} instead of raw @{ML_type term}.  This
-  avoids expensive re-certification in situations where the subgoal is
-  used directly for primitive inferences.
-
-  \item @{ML SELECT_GOAL}~@{text "tac i"} confines a tactic to the
-  specified subgoal @{text "i"}.  This rearranges subgoals and the
-  main goal protection (\secref{sec:tactical-goals}), while retaining
-  the syntactic context of the overall goal state (concerning
-  schematic variables etc.).
-
-  \item @{ML PREFER_GOAL}~@{text "tac i"} rearranges subgoals to put
-  @{text "i"} in front.  This is similar to @{ML SELECT_GOAL}, but
-  without changing the main goal protection.
-
-  \end{description}
-*}
-
-
-subsection {* Resolution and assumption tactics \label{sec:resolve-assume-tac} *}
-
-text {* \emph{Resolution} is the most basic mechanism for refining a
-  subgoal using a theorem as object-level rule.
-  \emph{Elim-resolution} is particularly suited for elimination rules:
-  it resolves with a rule, proves its first premise by assumption, and
-  finally deletes that assumption from any new subgoals.
-  \emph{Destruct-resolution} is like elim-resolution, but the given
-  destruction rules are first turned into canonical elimination
-  format.  \emph{Forward-resolution} is like destruct-resolution, but
-  without deleting the selected assumption.  The @{text "r/e/d/f"}
-  naming convention is maintained for several different kinds of
-  resolution rules and tactics.
-
-  Assumption tactics close a subgoal by unifying some of its premises
-  against its conclusion.
-
-  \medskip All the tactics in this section operate on a subgoal
-  designated by a positive integer.  Other subgoals might be affected
-  indirectly, due to instantiation of schematic variables.
-
-  There are various sources of non-determinism, the tactic result
-  sequence enumerates all possibilities of the following choices (if
-  applicable):
-
-  \begin{enumerate}
-
-  \item selecting one of the rules given as argument to the tactic;
-
-  \item selecting a subgoal premise to eliminate, unifying it against
-  the first premise of the rule;
-
-  \item unifying the conclusion of the subgoal to the conclusion of
-  the rule.
-
-  \end{enumerate}
-
-  Recall that higher-order unification may produce multiple results
-  that are enumerated here.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML resolve_tac: "thm list -> int -> tactic"} \\
-  @{index_ML eresolve_tac: "thm list -> int -> tactic"} \\
-  @{index_ML dresolve_tac: "thm list -> int -> tactic"} \\
-  @{index_ML forward_tac: "thm list -> int -> tactic"} \\
-  @{index_ML biresolve_tac: "(bool * thm) list -> int -> tactic"} \\[1ex]
-  @{index_ML assume_tac: "int -> tactic"} \\
-  @{index_ML eq_assume_tac: "int -> tactic"} \\[1ex]
-  @{index_ML match_tac: "thm list -> int -> tactic"} \\
-  @{index_ML ematch_tac: "thm list -> int -> tactic"} \\
-  @{index_ML dmatch_tac: "thm list -> int -> tactic"} \\
-  @{index_ML bimatch_tac: "(bool * thm) list -> int -> tactic"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML resolve_tac}~@{text "thms i"} refines the goal state
-  using the given theorems, which should normally be introduction
-  rules.  The tactic resolves a rule's conclusion with subgoal @{text
-  i}, replacing it by the corresponding versions of the rule's
-  premises.
-
-  \item @{ML eresolve_tac}~@{text "thms i"} performs elim-resolution
-  with the given theorems, which are normally be elimination rules.
-
-  Note that @{ML "eresolve_tac [asm_rl]"} is equivalent to @{ML
-  assume_tac}, which facilitates mixing of assumption steps with
-  genuine eliminations.
-
-  \item @{ML dresolve_tac}~@{text "thms i"} performs
-  destruct-resolution with the given theorems, which should normally
-  be destruction rules.  This replaces an assumption by the result of
-  applying one of the rules.
-
-  \item @{ML forward_tac} is like @{ML dresolve_tac} except that the
-  selected assumption is not deleted.  It applies a rule to an
-  assumption, adding the result as a new assumption.
-
-  \item @{ML biresolve_tac}~@{text "brls i"} refines the proof state
-  by resolution or elim-resolution on each rule, as indicated by its
-  flag.  It affects subgoal @{text "i"} of the proof state.
-
-  For each pair @{text "(flag, rule)"}, it applies resolution if the
-  flag is @{text "false"} and elim-resolution if the flag is @{text
-  "true"}.  A single tactic call handles a mixture of introduction and
-  elimination rules, which is useful to organize the search process
-  systematically in proof tools.
-
-  \item @{ML assume_tac}~@{text i} attempts to solve subgoal @{text i}
-  by assumption (modulo higher-order unification).
-
-  \item @{ML eq_assume_tac} is similar to @{ML assume_tac}, but checks
-  only for immediate @{text "\<alpha>"}-convertibility instead of using
-  unification.  It succeeds (with a unique next state) if one of the
-  assumptions is equal to the subgoal's conclusion.  Since it does not
-  instantiate variables, it cannot make other subgoals unprovable.
-
-  \item @{ML match_tac}, @{ML ematch_tac}, @{ML dmatch_tac}, and @{ML
-  bimatch_tac} are similar to @{ML resolve_tac}, @{ML eresolve_tac},
-  @{ML dresolve_tac}, and @{ML biresolve_tac}, respectively, but do
-  not instantiate schematic variables in the goal state.%
-\footnote{Strictly speaking, matching means to treat the unknowns in the goal
-  state as constants, but these tactics merely discard unifiers that would
-  update the goal state. In rare situations (where the conclusion and 
-  goal state have flexible terms at the same position), the tactic
-  will fail even though an acceptable unifier exists.}
-  These tactics were written for a specific application within the classical reasoner.
-
-  Flexible subgoals are not updated at will, but are left alone.
-  \end{description}
-*}
-
-
-subsection {* Explicit instantiation within a subgoal context *}
-
-text {* The main resolution tactics (\secref{sec:resolve-assume-tac})
-  use higher-order unification, which works well in many practical
-  situations despite its daunting theoretical properties.
-  Nonetheless, there are important problem classes where unguided
-  higher-order unification is not so useful.  This typically involves
-  rules like universal elimination, existential introduction, or
-  equational substitution.  Here the unification problem involves
-  fully flexible @{text "?P ?x"} schemes, which are hard to manage
-  without further hints.
-
-  By providing a (small) rigid term for @{text "?x"} explicitly, the
-  remaining unification problem is to assign a (large) term to @{text
-  "?P"}, according to the shape of the given subgoal.  This is
-  sufficiently well-behaved in most practical situations.
-
-  \medskip Isabelle provides separate versions of the standard @{text
-  "r/e/d/f"} resolution tactics that allow to provide explicit
-  instantiations of unknowns of the given rule, wrt.\ terms that refer
-  to the implicit context of the selected subgoal.
-
-  An instantiation consists of a list of pairs of the form @{text
-  "(?x, t)"}, where @{text ?x} is a schematic variable occurring in
-  the given rule, and @{text t} is a term from the current proof
-  context, augmented by the local goal parameters of the selected
-  subgoal; cf.\ the @{text "focus"} operation described in
-  \secref{sec:variables}.
-
-  Entering the syntactic context of a subgoal is a brittle operation,
-  because its exact form is somewhat accidental, and the choice of
-  bound variable names depends on the presence of other local and
-  global names.  Explicit renaming of subgoal parameters prior to
-  explicit instantiation might help to achieve a bit more robustness.
-
-  Type instantiations may be given as well, via pairs like @{text
-  "(?'a, \<tau>)"}.  Type instantiations are distinguished from term
-  instantiations by the syntactic form of the schematic variable.
-  Types are instantiated before terms are.  Since term instantiation
-  already performs simple type-inference, so explicit type
-  instantiations are seldom necessary.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML res_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
-  @{index_ML eres_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
-  @{index_ML dres_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
-  @{index_ML forw_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
-  @{index_ML subgoal_tac: "Proof.context -> string -> int -> tactic"} \\
-  @{index_ML thin_tac: "Proof.context -> string -> int -> tactic"} \\
-  @{index_ML rename_tac: "string list -> int -> tactic"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML res_inst_tac}~@{text "ctxt insts thm i"} instantiates the
-  rule @{text thm} with the instantiations @{text insts}, as described
-  above, and then performs resolution on subgoal @{text i}.
-  
-  \item @{ML eres_inst_tac} is like @{ML res_inst_tac}, but performs
-  elim-resolution.
-
-  \item @{ML dres_inst_tac} is like @{ML res_inst_tac}, but performs
-  destruct-resolution.
-
-  \item @{ML forw_inst_tac} is like @{ML dres_inst_tac} except that
-  the selected assumption is not deleted.
-
-  \item @{ML subgoal_tac}~@{text "ctxt \<phi> i"} adds the proposition
-  @{text "\<phi>"} as local premise to subgoal @{text "i"}, and poses the
-  same as a new subgoal @{text "i + 1"} (in the original context).
-
-  \item @{ML thin_tac}~@{text "ctxt \<phi> i"} deletes the specified
-  premise from subgoal @{text i}.  Note that @{text \<phi>} may contain
-  schematic variables, to abbreviate the intended proposition; the
-  first matching subgoal premise will be deleted.  Removing useless
-  premises from a subgoal increases its readability and can make
-  search tactics run faster.
-
-  \item @{ML rename_tac}~@{text "names i"} renames the innermost
-  parameters of subgoal @{text i} according to the provided @{text
-  names} (which need to be distinct indentifiers).
-
-  \end{description}
-
-  For historical reasons, the above instantiation tactics take
-  unparsed string arguments, which makes them hard to use in general
-  ML code.  The slightly more advanced @{ML Subgoal.FOCUS} combinator
-  of \secref{sec:struct-goals} allows to refer to internal goal
-  structure with explicit context management.
-*}
-
-
-subsection {* Rearranging goal states *}
-
-text {* In rare situations there is a need to rearrange goal states:
-  either the overall collection of subgoals, or the local structure of
-  a subgoal.  Various administrative tactics allow to operate on the
-  concrete presentation these conceptual sets of formulae. *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML rotate_tac: "int -> int -> tactic"} \\
-  @{index_ML distinct_subgoals_tac: tactic} \\
-  @{index_ML flexflex_tac: tactic} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML rotate_tac}~@{text "n i"} rotates the premises of subgoal
-  @{text i} by @{text n} positions: from right to left if @{text n} is
-  positive, and from left to right if @{text n} is negative.
-
-  \item @{ML distinct_subgoals_tac} removes duplicate subgoals from a
-  proof state.  This is potentially inefficient.
-
-  \item @{ML flexflex_tac} removes all flex-flex pairs from the proof
-  state by applying the trivial unifier.  This drastic step loses
-  information.  It is already part of the Isar infrastructure for
-  facts resulting from goals, and rarely needs to be invoked manually.
-
-  Flex-flex constraints arise from difficult cases of higher-order
-  unification.  To prevent this, use @{ML res_inst_tac} to instantiate
-  some variables in a rule.  Normally flex-flex constraints can be
-  ignored; they often disappear as unknowns get instantiated.
-
-  \end{description}
-*}
-
-
-subsection {* Raw composition: resolution without lifting *}
-
-text {*
-  Raw composition of two rules means resolving them without prior
-  lifting or renaming of unknowns.  This low-level operation, which
-  underlies the resolution tactics, may occasionally be useful for
-  special effects.  Schematic variables are not renamed by default, so
-  beware of clashes!
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML compose_tac: "(bool * thm * int) -> int -> tactic"} \\
-  @{index_ML Drule.compose: "thm * int * thm -> thm"} \\
-  @{index_ML_op COMP: "thm * thm -> thm"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML compose_tac}~@{text "(flag, rule, m) i"} refines subgoal
-  @{text "i"} using @{text "rule"}, without lifting.  The @{text
-  "rule"} is taken to have the form @{text "\<psi>\<^sub>1 \<Longrightarrow> \<dots> \<psi>\<^sub>m \<Longrightarrow> \<psi>"}, where
-  @{text "\<psi>"} need not be atomic; thus @{text "m"} determines the
-  number of new subgoals.  If @{text "flag"} is @{text "true"} then it
-  performs elim-resolution --- it solves the first premise of @{text
-  "rule"} by assumption and deletes that assumption.
-
-  \item @{ML Drule.compose}~@{text "(thm\<^sub>1, i, thm\<^sub>2)"} uses @{text "thm\<^sub>1"},
-  regarded as an atomic formula, to solve premise @{text "i"} of
-  @{text "thm\<^sub>2"}.  Let @{text "thm\<^sub>1"} and @{text "thm\<^sub>2"} be @{text
-  "\<psi>"} and @{text "\<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> \<phi>"}.  The unique @{text "s"} that
-  unifies @{text "\<psi>"} and @{text "\<phi>\<^sub>i"} yields the theorem @{text "(\<phi>\<^sub>1 \<Longrightarrow>
-  \<dots> \<phi>\<^sub>i\<^sub>-\<^sub>1 \<Longrightarrow> \<phi>\<^sub>i\<^sub>+\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> \<phi>)s"}.  Multiple results are considered as
-  error (exception @{ML THM}).
-
-  \item @{text "thm\<^sub>1 COMP thm\<^sub>2"} is the same as @{text "Drule.compose
-  (thm\<^sub>1, 1, thm\<^sub>2)"}.
-
-  \end{description}
-
-  \begin{warn}
-  These low-level operations are stepping outside the structure
-  imposed by regular rule resolution.  Used without understanding of
-  the consequences, they may produce results that cause problems with
-  standard rules and tactics later on.
-  \end{warn}
-*}
-
-
-section {* Tacticals \label{sec:tacticals} *}
-
-text {* A \emph{tactical} is a functional combinator for building up
-  complex tactics from simpler ones.  Common tacticals perform
-  sequential composition, disjunctive choice, iteration, or goal
-  addressing.  Various search strategies may be expressed via
-  tacticals.
-*}
-
-
-subsection {* Combining tactics *}
-
-text {* Sequential composition and alternative choices are the most
-  basic ways to combine tactics, similarly to ``@{verbatim ","}'' and
-  ``@{verbatim "|"}'' in Isar method notation.  This corresponds to
-  @{ML_op "THEN"} and @{ML_op "ORELSE"} in ML, but there are further
-  possibilities for fine-tuning alternation of tactics such as @{ML_op
-  "APPEND"}.  Further details become visible in ML due to explicit
-  subgoal addressing.
-*}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML_op "THEN": "tactic * tactic -> tactic"} \\
-  @{index_ML_op "ORELSE": "tactic * tactic -> tactic"} \\
-  @{index_ML_op "APPEND": "tactic * tactic -> tactic"} \\
-  @{index_ML "EVERY": "tactic list -> tactic"} \\
-  @{index_ML "FIRST": "tactic list -> tactic"} \\[0.5ex]
-
-  @{index_ML_op "THEN'": "('a -> tactic) * ('a -> tactic) -> 'a -> tactic"} \\
-  @{index_ML_op "ORELSE'": "('a -> tactic) * ('a -> tactic) -> 'a -> tactic"} \\
-  @{index_ML_op "APPEND'": "('a -> tactic) * ('a -> tactic) -> 'a -> tactic"} \\
-  @{index_ML "EVERY'": "('a -> tactic) list -> 'a -> tactic"} \\
-  @{index_ML "FIRST'": "('a -> tactic) list -> 'a -> tactic"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{text "tac\<^sub>1"}~@{ML_op THEN}~@{text "tac\<^sub>2"} is the sequential
-  composition of @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"}.  Applied to a goal
-  state, it returns all states reachable in two steps by applying
-  @{text "tac\<^sub>1"} followed by @{text "tac\<^sub>2"}.  First, it applies @{text
-  "tac\<^sub>1"} to the goal state, getting a sequence of possible next
-  states; then, it applies @{text "tac\<^sub>2"} to each of these and
-  concatenates the results to produce again one flat sequence of
-  states.
-
-  \item @{text "tac\<^sub>1"}~@{ML_op ORELSE}~@{text "tac\<^sub>2"} makes a choice
-  between @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"}.  Applied to a state, it
-  tries @{text "tac\<^sub>1"} and returns the result if non-empty; if @{text
-  "tac\<^sub>1"} fails then it uses @{text "tac\<^sub>2"}.  This is a deterministic
-  choice: if @{text "tac\<^sub>1"} succeeds then @{text "tac\<^sub>2"} is excluded
-  from the result.
-
-  \item @{text "tac\<^sub>1"}~@{ML_op APPEND}~@{text "tac\<^sub>2"} concatenates the
-  possible results of @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"}.  Unlike
-  @{ML_op "ORELSE"} there is \emph{no commitment} to either tactic, so
-  @{ML_op "APPEND"} helps to avoid incompleteness during search, at
-  the cost of potential inefficiencies.
-
-  \item @{ML EVERY}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>n]"} abbreviates @{text
-  "tac\<^sub>1"}~@{ML_op THEN}~@{text "\<dots>"}~@{ML_op THEN}~@{text "tac\<^sub>n"}.
-  Note that @{ML "EVERY []"} is the same as @{ML all_tac}: it always
-  succeeds.
-
-  \item @{ML FIRST}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>n]"} abbreviates @{text
-  "tac\<^sub>1"}~@{ML_op ORELSE}~@{text "\<dots>"}~@{ML_op "ORELSE"}~@{text
-  "tac\<^sub>n"}.  Note that @{ML "FIRST []"} is the same as @{ML no_tac}: it
-  always fails.
-
-  \item @{ML_op "THEN'"} is the lifted version of @{ML_op "THEN"}, for
-  tactics with explicit subgoal addressing.  So @{text
-  "(tac\<^sub>1"}~@{ML_op THEN'}~@{text "tac\<^sub>2) i"} is the same as @{text
-  "(tac\<^sub>1 i"}~@{ML_op THEN}~@{text "tac\<^sub>2 i)"}.
-
-  The other primed tacticals work analogously.
-
-  \end{description}
-*}
-
-
-subsection {* Repetition tacticals *}
-
-text {* These tacticals provide further control over repetition of
-  tactics, beyond the stylized forms of ``@{verbatim "?"}''  and
-  ``@{verbatim "+"}'' in Isar method expressions. *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML "TRY": "tactic -> tactic"} \\
-  @{index_ML "REPEAT": "tactic -> tactic"} \\
-  @{index_ML "REPEAT1": "tactic -> tactic"} \\
-  @{index_ML "REPEAT_DETERM": "tactic -> tactic"} \\
-  @{index_ML "REPEAT_DETERM_N": "int -> tactic -> tactic"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML TRY}~@{text "tac"} applies @{text "tac"} to the goal
-  state and returns the resulting sequence, if non-empty; otherwise it
-  returns the original state.  Thus, it applies @{text "tac"} at most
-  once.
-
-  Note that for tactics with subgoal addressing, the combinator can be
-  applied via functional composition: @{ML "TRY"}~@{ML_op o}~@{text
-  "tac"}.  There is no need for @{verbatim TRY'}.
-
-  \item @{ML REPEAT}~@{text "tac"} applies @{text "tac"} to the goal
-  state and, recursively, to each element of the resulting sequence.
-  The resulting sequence consists of those states that make @{text
-  "tac"} fail.  Thus, it applies @{text "tac"} as many times as
-  possible (including zero times), and allows backtracking over each
-  invocation of @{text "tac"}.  @{ML REPEAT} is more general than @{ML
-  REPEAT_DETERM}, but requires more space.
-
-  \item @{ML REPEAT1}~@{text "tac"} is like @{ML REPEAT}~@{text "tac"}
-  but it always applies @{text "tac"} at least once, failing if this
-  is impossible.
-
-  \item @{ML REPEAT_DETERM}~@{text "tac"} applies @{text "tac"} to the
-  goal state and, recursively, to the head of the resulting sequence.
-  It returns the first state to make @{text "tac"} fail.  It is
-  deterministic, discarding alternative outcomes.
-
-  \item @{ML REPEAT_DETERM_N}~@{text "n tac"} is like @{ML
-  REPEAT_DETERM}~@{text "tac"} but the number of repetitions is bound
-  by @{text "n"} (where @{ML "~1"} means @{text "\<infinity>"}).
-
-  \end{description}
-*}
-
-text %mlex {* The basic tactics and tacticals considered above follow
-  some algebraic laws:
-
-  \begin{itemize}
-
-  \item @{ML all_tac} is the identity element of the tactical @{ML_op
-  "THEN"}.
-
-  \item @{ML no_tac} is the identity element of @{ML_op "ORELSE"} and
-  @{ML_op "APPEND"}.  Also, it is a zero element for @{ML_op "THEN"},
-  which means that @{text "tac"}~@{ML_op THEN}~@{ML no_tac} is
-  equivalent to @{ML no_tac}.
-
-  \item @{ML TRY} and @{ML REPEAT} can be expressed as (recursive)
-  functions over more basic combinators (ignoring some internal
-  implementation tricks):
-
-  \end{itemize}
-*}
-
-ML {*
-  fun TRY tac = tac ORELSE all_tac;
-  fun REPEAT tac st = ((tac THEN REPEAT tac) ORELSE all_tac) st;
-*}
-
-text {* If @{text "tac"} can return multiple outcomes then so can @{ML
-  REPEAT}~@{text "tac"}.  @{ML REPEAT} uses @{ML_op "ORELSE"} and not
-  @{ML_op "APPEND"}, it applies @{text "tac"} as many times as
-  possible in each outcome.
-
-  \begin{warn}
-  Note the explicit abstraction over the goal state in the ML
-  definition of @{ML REPEAT}.  Recursive tacticals must be coded in
-  this awkward fashion to avoid infinite recursion of eager functional
-  evaluation in Standard ML.  The following attempt would make @{ML
-  REPEAT}~@{text "tac"} loop:
-  \end{warn}
-*}
-
-ML {*
-  (*BAD -- does not terminate!*)
-  fun REPEAT tac = (tac THEN REPEAT tac) ORELSE all_tac;
-*}
-
-
-subsection {* Applying tactics to subgoal ranges *}
-
-text {* Tactics with explicit subgoal addressing
-  @{ML_type "int -> tactic"} can be used together with tacticals that
-  act like ``subgoal quantifiers'': guided by success of the body
-  tactic a certain range of subgoals is covered.  Thus the body tactic
-  is applied to \emph{all} subgoals, \emph{some} subgoal etc.
-
-  Suppose that the goal state has @{text "n \<ge> 0"} subgoals.  Many of
-  these tacticals address subgoal ranges counting downwards from
-  @{text "n"} towards @{text "1"}.  This has the fortunate effect that
-  newly emerging subgoals are concatenated in the result, without
-  interfering each other.  Nonetheless, there might be situations
-  where a different order is desired. *}
-
-text %mlref {*
-  \begin{mldecls}
-  @{index_ML ALLGOALS: "(int -> tactic) -> tactic"} \\
-  @{index_ML SOMEGOAL: "(int -> tactic) -> tactic"} \\
-  @{index_ML FIRSTGOAL: "(int -> tactic) -> tactic"} \\
-  @{index_ML HEADGOAL: "(int -> tactic) -> tactic"} \\
-  @{index_ML REPEAT_SOME: "(int -> tactic) -> tactic"} \\
-  @{index_ML REPEAT_FIRST: "(int -> tactic) -> tactic"} \\
-  @{index_ML RANGE: "(int -> tactic) list -> int -> tactic"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML ALLGOALS}~@{text "tac"} is equivalent to @{text "tac
-  n"}~@{ML_op THEN}~@{text "\<dots>"}~@{ML_op THEN}~@{text "tac 1"}.  It
-  applies the @{text tac} to all the subgoals, counting downwards.
-
-  \item @{ML SOMEGOAL}~@{text "tac"} is equivalent to @{text "tac
-  n"}~@{ML_op ORELSE}~@{text "\<dots>"}~@{ML_op ORELSE}~@{text "tac 1"}.  It
-  applies @{text "tac"} to one subgoal, counting downwards.
-
-  \item @{ML FIRSTGOAL}~@{text "tac"} is equivalent to @{text "tac
-  1"}~@{ML_op ORELSE}~@{text "\<dots>"}~@{ML_op ORELSE}~@{text "tac n"}.  It
-  applies @{text "tac"} to one subgoal, counting upwards.
-
-  \item @{ML HEADGOAL}~@{text "tac"} is equivalent to @{text "tac 1"}.
-  It applies @{text "tac"} unconditionally to the first subgoal.
-
-  \item @{ML REPEAT_SOME}~@{text "tac"} applies @{text "tac"} once or
-  more to a subgoal, counting downwards.
-
-  \item @{ML REPEAT_FIRST}~@{text "tac"} applies @{text "tac"} once or
-  more to a subgoal, counting upwards.
-
-  \item @{ML RANGE}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>k] i"} is equivalent to
-  @{text "tac\<^sub>k (i + k - 1)"}~@{ML_op THEN}~@{text "\<dots>"}~@{ML_op
-  THEN}~@{text "tac\<^sub>1 i"}.  It applies the given list of tactics to the
-  corresponding range of subgoals, counting downwards.
-
-  \end{description}
-*}
-
-
-subsection {* Control and search tacticals *}
-
-text {* A predicate on theorems @{ML_type "thm -> bool"} can test
-  whether a goal state enjoys some desirable property --- such as
-  having no subgoals.  Tactics that search for satisfactory goal
-  states are easy to express.  The main search procedures,
-  depth-first, breadth-first and best-first, are provided as
-  tacticals.  They generate the search tree by repeatedly applying a
-  given tactic.  *}
-
-
-text %mlref ""
-
-subsubsection {* Filtering a tactic's results *}
-
-text {*
-  \begin{mldecls}
-  @{index_ML FILTER: "(thm -> bool) -> tactic -> tactic"} \\
-  @{index_ML CHANGED: "tactic -> tactic"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML FILTER}~@{text "sat tac"} applies @{text "tac"} to the
-  goal state and returns a sequence consisting of those result goal
-  states that are satisfactory in the sense of @{text "sat"}.
-
-  \item @{ML CHANGED}~@{text "tac"} applies @{text "tac"} to the goal
-  state and returns precisely those states that differ from the
-  original state (according to @{ML Thm.eq_thm}).  Thus @{ML
-  CHANGED}~@{text "tac"} always has some effect on the state.
-
-  \end{description}
-*}
-
-
-subsubsection {* Depth-first search *}
-
-text {*
-  \begin{mldecls}
-  @{index_ML DEPTH_FIRST: "(thm -> bool) -> tactic -> tactic"} \\
-  @{index_ML DEPTH_SOLVE: "tactic -> tactic"} \\
-  @{index_ML DEPTH_SOLVE_1: "tactic -> tactic"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML DEPTH_FIRST}~@{text "sat tac"} returns the goal state if
-  @{text "sat"} returns true.  Otherwise it applies @{text "tac"},
-  then recursively searches from each element of the resulting
-  sequence.  The code uses a stack for efficiency, in effect applying
-  @{text "tac"}~@{ML_op THEN}~@{ML DEPTH_FIRST}~@{text "sat tac"} to
-  the state.
-
-  \item @{ML DEPTH_SOLVE}@{text "tac"} uses @{ML DEPTH_FIRST} to
-  search for states having no subgoals.
-
-  \item @{ML DEPTH_SOLVE_1}~@{text "tac"} uses @{ML DEPTH_FIRST} to
-  search for states having fewer subgoals than the given state.  Thus,
-  it insists upon solving at least one subgoal.
-
-  \end{description}
-*}
-
-
-subsubsection {* Other search strategies *}
-
-text {*
-  \begin{mldecls}
-  @{index_ML BREADTH_FIRST: "(thm -> bool) -> tactic -> tactic"} \\
-  @{index_ML BEST_FIRST: "(thm -> bool) * (thm -> int) -> tactic -> tactic"} \\
-  @{index_ML THEN_BEST_FIRST: "tactic -> (thm -> bool) * (thm -> int) -> tactic -> tactic"} \\
-  \end{mldecls}
-
-  These search strategies will find a solution if one exists.
-  However, they do not enumerate all solutions; they terminate after
-  the first satisfactory result from @{text "tac"}.
-
-  \begin{description}
-
-  \item @{ML BREADTH_FIRST}~@{text "sat tac"} uses breadth-first
-  search to find states for which @{text "sat"} is true.  For most
-  applications, it is too slow.
-
-  \item @{ML BEST_FIRST}~@{text "(sat, dist) tac"} does a heuristic
-  search, using @{text "dist"} to estimate the distance from a
-  satisfactory state (in the sense of @{text "sat"}).  It maintains a
-  list of states ordered by distance.  It applies @{text "tac"} to the
-  head of this list; if the result contains any satisfactory states,
-  then it returns them.  Otherwise, @{ML BEST_FIRST} adds the new
-  states to the list, and continues.
-
-  The distance function is typically @{ML size_of_thm}, which computes
-  the size of the state.  The smaller the state, the fewer and simpler
-  subgoals it has.
-
-  \item @{ML THEN_BEST_FIRST}~@{text "tac\<^sub>0 (sat, dist) tac"} is like
-  @{ML BEST_FIRST}, except that the priority queue initially contains
-  the result of applying @{text "tac\<^sub>0"} to the goal state.  This
-  tactical permits separate tactics for starting the search and
-  continuing the search.
-
-  \end{description}
-*}
-
-
-subsubsection {* Auxiliary tacticals for searching *}
-
-text {*
-  \begin{mldecls}
-  @{index_ML COND: "(thm -> bool) -> tactic -> tactic -> tactic"} \\
-  @{index_ML IF_UNSOLVED: "tactic -> tactic"} \\
-  @{index_ML SOLVE: "tactic -> tactic"} \\
-  @{index_ML DETERM: "tactic -> tactic"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML COND}~@{text "sat tac\<^sub>1 tac\<^sub>2"} applies @{text "tac\<^sub>1"} to
-  the goal state if it satisfies predicate @{text "sat"}, and applies
-  @{text "tac\<^sub>2"}.  It is a conditional tactical in that only one of
-  @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"} is applied to a goal state.
-  However, both @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"} are evaluated
-  because ML uses eager evaluation.
-
-  \item @{ML IF_UNSOLVED}~@{text "tac"} applies @{text "tac"} to the
-  goal state if it has any subgoals, and simply returns the goal state
-  otherwise.  Many common tactics, such as @{ML resolve_tac}, fail if
-  applied to a goal state that has no subgoals.
-
-  \item @{ML SOLVE}~@{text "tac"} applies @{text "tac"} to the goal
-  state and then fails iff there are subgoals left.
-
-  \item @{ML DETERM}~@{text "tac"} applies @{text "tac"} to the goal
-  state and returns the head of the resulting sequence.  @{ML DETERM}
-  limits the search space by making its argument deterministic.
-
-  \end{description}
-*}
-
-
-subsubsection {* Predicates and functions useful for searching *}
-
-text {*
-  \begin{mldecls}
-  @{index_ML has_fewer_prems: "int -> thm -> bool"} \\
-  @{index_ML Thm.eq_thm: "thm * thm -> bool"} \\
-  @{index_ML Thm.eq_thm_prop: "thm * thm -> bool"} \\
-  @{index_ML size_of_thm: "thm -> int"} \\
-  \end{mldecls}
-
-  \begin{description}
-
-  \item @{ML has_fewer_prems}~@{text "n thm"} reports whether @{text
-  "thm"} has fewer than @{text "n"} premises.
-
-  \item @{ML Thm.eq_thm}~@{text "(thm\<^sub>1, thm\<^sub>2)"} reports whether @{text
-  "thm\<^sub>1"} and @{text "thm\<^sub>2"} are equal.  Both theorems must have the
-  same conclusions, the same set of hypotheses, and the same set of sort
-  hypotheses.  Names of bound variables are ignored as usual.
-
-  \item @{ML Thm.eq_thm_prop}~@{text "(thm\<^sub>1, thm\<^sub>2)"} reports whether
-  the propositions of @{text "thm\<^sub>1"} and @{text "thm\<^sub>2"} are equal.
-  Names of bound variables are ignored.
-
-  \item @{ML size_of_thm}~@{text "thm"} computes the size of @{text
-  "thm"}, namely the number of variables, constants and abstractions
-  in its conclusion.  It may serve as a distance function for
-  @{ML BEST_FIRST}.
-
-  \end{description}
-*}
-
-end
--- a/src/Doc/IsarImplementation/document/build	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,18 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-FORMAT="$1"
-VARIANT="$2"
-
-"$ISABELLE_TOOL" logo Isar
-
-cp "$ISABELLE_HOME/src/Doc/iman.sty" .
-cp "$ISABELLE_HOME/src/Doc/extra.sty" .
-cp "$ISABELLE_HOME/src/Doc/isar.sty" .
-cp "$ISABELLE_HOME/src/Doc/ttbox.sty" .
-cp "$ISABELLE_HOME/src/Doc/underscore.sty" .
-cp "$ISABELLE_HOME/src/Doc/manual.bib" .
-
-"$ISABELLE_HOME/src/Doc/prepare_document" "$FORMAT"
-
--- a/src/Doc/IsarImplementation/document/root.tex	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,125 +0,0 @@
-\documentclass[12pt,a4paper,fleqn]{report}
-\usepackage[T1]{fontenc}
-\usepackage{latexsym,graphicx}
-\usepackage[refpage]{nomencl}
-\usepackage{iman,extra,isar,proof}
-\usepackage[nohyphen,strings]{underscore}
-\usepackage{isabelle}
-\usepackage{isabellesym}
-\usepackage{railsetup}
-\usepackage{ttbox}
-\usepackage{supertabular}
-\usepackage{style}
-\usepackage{pdfsetup}
-
-
-\hyphenation{Isabelle}
-\hyphenation{Isar}
-
-\isadroptag{theory}
-\title{\includegraphics[scale=0.5]{isabelle_isar}
-  \\[4ex] The Isabelle/Isar Implementation}
-\author{\emph{Makarius Wenzel}  \\[3ex]
-  With Contributions by
-  Stefan Berghofer, \\
-  Florian Haftmann
-  and Larry Paulson
-}
-
-\makeindex
-
-
-\begin{document}
-
-\maketitle
-
-\begin{abstract}
-  We describe the key concepts underlying the Isabelle/Isar
-  implementation, including ML references for the most important
-  functions.  The aim is to give some insight into the overall system
-  architecture, and provide clues on implementing applications within
-  this framework.
-\end{abstract}
-
-\vspace*{2.5cm}
-\begin{quote}
-
-  {\small\em Isabelle was not designed; it evolved.  Not everyone
-    likes this idea.  Specification experts rightly abhor
-    trial-and-error programming.  They suggest that no one should
-    write a program without first writing a complete formal
-    specification. But university departments are not software houses.
-    Programs like Isabelle are not products: when they have served
-    their purpose, they are discarded.}
-
-  Lawrence C. Paulson, ``Isabelle: The Next 700 Theorem Provers''
-
-  \vspace*{1cm}
-
-  {\small\em As I did 20 years ago, I still fervently believe that the
-    only way to make software secure, reliable, and fast is to make it
-    small.  Fight features.}
-
-  Andrew S. Tanenbaum
-
-  \vspace*{1cm}
-
-  {\small\em One thing that UNIX does not need is more features. It is
-    successful in part because it has a small number of good ideas
-    that work well together. Merely adding features does not make it
-    easier for users to do things --- it just makes the manual
-    thicker. The right solution in the right place is always more
-    effective than haphazard hacking.}
-
-  Rob Pike and Brian W. Kernighan
-
-  \vspace*{1cm}
-
-  {\small\em If you look at software today, through the lens of the
-    history of engineering, it's certainly engineering of a sort--but
-    it's the kind of engineering that people without the concept of
-    the arch did. Most software today is very much like an Egyptian
-    pyramid with millions of bricks piled on top of each other, with
-    no structural integrity, but just done by brute force and
-    thousands of slaves.}
-
-  Alan Kay
-
-\end{quote}
-
-\thispagestyle{empty}\clearpage
-
-\pagenumbering{roman}
-\tableofcontents
-\listoffigures
-\clearfirst
-
-\setcounter{chapter}{-1}
-
-\input{ML.tex}
-\input{Prelim.tex}
-\input{Logic.tex}
-\input{Syntax.tex}
-\input{Tactic.tex}
-\input{Eq.tex}
-\input{Proof.tex}
-\input{Isar.tex}
-\input{Local_Theory.tex}
-\input{Integration.tex}
-
-\begingroup
-\tocentry{\bibname}
-\bibliographystyle{abbrv} \small\raggedright\frenchspacing
-\bibliography{manual}
-\endgroup
-
-\tocentry{\indexname}
-\printindex
-
-\end{document}
-
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: t
-%%% End:
--- a/src/Doc/IsarImplementation/document/style.sty	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,68 +0,0 @@
-%% toc
-\newcommand{\tocentry}[1]{\cleardoublepage\phantomsection\addcontentsline{toc}{chapter}{#1}
-\@mkboth{\MakeUppercase{#1}}{\MakeUppercase{#1}}}
-
-%% references
-\newcommand{\secref}[1]{\S\ref{#1}}
-\newcommand{\chref}[1]{chapter~\ref{#1}}
-\newcommand{\figref}[1]{figure~\ref{#1}}
-
-%% math
-\newcommand{\text}[1]{\mbox{#1}}
-\newcommand{\isasymvartheta}{\isamath{\theta}}
-\newcommand{\isactrlvec}[1]{\emph{$\vec{#1}$}}
-\newcommand{\isactrlBG}{\isacharbackquoteopen}
-\newcommand{\isactrlEN}{\isacharbackquoteclose}
-
-\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}
-
-\pagestyle{headings}
-\sloppy
-\binperiod
-
-\parindent 0pt\parskip 0.5ex
-
-\renewcommand{\isadigit}[1]{\isamath{#1}}
-
-\newenvironment{mldecls}{\par\noindent\begingroup\footnotesize\def\isanewline{\\}\begin{tabular}{l}}{\end{tabular}\smallskip\endgroup}
-
-\isafoldtag{FIXME}
-
-\isakeeptag{mlref}
-\renewcommand{\isatagmlref}{\subsection*{\makebox[0pt][r]{\fbox{ML}~~}Reference}}
-\renewcommand{\endisatagmlref}{}
-
-\isakeeptag{mlantiq}
-\renewcommand{\isatagmlantiq}{\subsection*{\makebox[0pt][r]{\fbox{ML}~~}Antiquotations}}
-\renewcommand{\endisatagmlantiq}{}
-
-\isakeeptag{mlex}
-\renewcommand{\isatagmlex}{\subsection*{\makebox[0pt][r]{\fbox{ML}~~}Examples}}
-\renewcommand{\endisatagmlex}{}
-
-\renewcommand{\isatagML}{\begingroup\isabellestyle{default}\isastyle\def\isadigit##1{##1}}
-\renewcommand{\endisatagML}{\endgroup}
-
-\newcommand{\minorcmd}[1]{{\sf #1}}
-\newcommand{\isasymtype}{\minorcmd{type}}
-\newcommand{\isasymval}{\minorcmd{val}}
-
-\newcommand{\isasymFIX}{\isakeyword{fix}}
-\newcommand{\isasymASSUME}{\isakeyword{assume}}
-\newcommand{\isasymDEFINE}{\isakeyword{define}}
-\newcommand{\isasymNOTE}{\isakeyword{note}}
-\newcommand{\isasymGUESS}{\isakeyword{guess}}
-\newcommand{\isasymOBTAIN}{\isakeyword{obtain}}
-\newcommand{\isasymTHEORY}{\isakeyword{theory}}
-\newcommand{\isasymUSES}{\isakeyword{uses}}
-\newcommand{\isasymEND}{\isakeyword{end}}
-\newcommand{\isasymCONSTS}{\isakeyword{consts}}
-\newcommand{\isasymDEFS}{\isakeyword{defs}}
-\newcommand{\isasymTHEOREM}{\isakeyword{theorem}}
-\newcommand{\isasymDEFINITION}{\isakeyword{definition}}
-
-\isabellestyle{literal}
-
-\railtermfont{\isabellestyle{tt}}
-\railnontermfont{\isabellestyle{itunderscore}}
-\railnamefont{\isabellestyle{itunderscore}}
--- a/src/Doc/IsarRef/Base.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-theory Base
-imports Pure
-begin
-
-ML_file "../antiquote_setup.ML"
-
-end
--- a/src/Doc/IsarRef/Document_Preparation.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,589 +0,0 @@
-theory Document_Preparation
-imports Base Main
-begin
-
-chapter {* Document preparation \label{ch:document-prep} *}
-
-text {* Isabelle/Isar provides a simple document preparation system
-  based on {PDF-\LaTeX}, with support for hyperlinks and bookmarks
-  within that format.  This allows to produce papers, books, theses
-  etc.\ from Isabelle theory sources.
-
-  {\LaTeX} output is generated while processing a \emph{session} in
-  batch mode, as explained in the \emph{The Isabelle System Manual}
-  \cite{isabelle-sys}.  The main Isabelle tools to get started with
-  document preparation are @{tool_ref mkroot} and @{tool_ref build}.
-
-  The classic Isabelle/HOL tutorial \cite{isabelle-hol-book} also
-  explains some aspects of theory presentation.  *}
-
-
-section {* Markup commands \label{sec:markup} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "header"} & : & @{text "toplevel \<rightarrow> toplevel"} \\[0.5ex]
-    @{command_def "chapter"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "section"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "subsection"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "subsubsection"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "text"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "text_raw"} & : & @{text "local_theory \<rightarrow> local_theory"} \\[0.5ex]
-    @{command_def "sect"} & : & @{text "proof \<rightarrow> proof"} \\
-    @{command_def "subsect"} & : & @{text "proof \<rightarrow> proof"} \\
-    @{command_def "subsubsect"} & : & @{text "proof \<rightarrow> proof"} \\
-    @{command_def "txt"} & : & @{text "proof \<rightarrow> proof"} \\
-    @{command_def "txt_raw"} & : & @{text "proof \<rightarrow> proof"} \\
-  \end{matharray}
-
-  Markup commands provide a structured way to insert text into the
-  document generated from a theory.  Each markup command takes a
-  single @{syntax text} argument, which is passed as argument to a
-  corresponding {\LaTeX} macro.  The default macros provided by
-  @{file "~~/lib/texinputs/isabelle.sty"} can be redefined according
-  to the needs of the underlying document and {\LaTeX} styles.
-
-  Note that formal comments (\secref{sec:comments}) are similar to
-  markup commands, but have a different status within Isabelle/Isar
-  syntax.
-
-  @{rail \<open>
-    (@@{command chapter} | @@{command section} | @@{command subsection} |
-      @@{command subsubsection} | @@{command text}) @{syntax target}? @{syntax text}
-    ;
-    (@@{command header} | @@{command text_raw} | @@{command sect} | @@{command subsect} |
-      @@{command subsubsect} | @@{command txt} | @@{command txt_raw}) @{syntax text}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command header} provides plain text markup just preceding
-  the formal beginning of a theory.  The corresponding {\LaTeX} macro
-  is @{verbatim "\\isamarkupheader"}, which acts like @{command
-  section} by default.
-  
-  \item @{command chapter}, @{command section}, @{command subsection},
-  and @{command subsubsection} mark chapter and section headings
-  within the main theory body or local theory targets.  The
-  corresponding {\LaTeX} macros are @{verbatim "\\isamarkupchapter"},
-  @{verbatim "\\isamarkupsection"}, @{verbatim
-  "\\isamarkupsubsection"} etc.
-
-  \item @{command sect}, @{command subsect}, and @{command subsubsect}
-  mark section headings within proofs.  The corresponding {\LaTeX}
-  macros are @{verbatim "\\isamarkupsect"}, @{verbatim
-  "\\isamarkupsubsect"} etc.
-
-  \item @{command text} and @{command txt} specify paragraphs of plain
-  text.  This corresponds to a {\LaTeX} environment @{verbatim
-  "\\begin{isamarkuptext}"} @{text "\<dots>"} @{verbatim
-  "\\end{isamarkuptext}"} etc.
-
-  \item @{command text_raw} and @{command txt_raw} insert {\LaTeX}
-  source into the output, without additional markup.  Thus the full
-  range of document manipulations becomes available, at the risk of
-  messing up document output.
-
-  \end{description}
-
-  Except for @{command "text_raw"} and @{command "txt_raw"}, the text
-  passed to any of the above markup commands may refer to formal
-  entities via \emph{document antiquotations}, see also
-  \secref{sec:antiq}.  These are interpreted in the present theory or
-  proof context, or the named @{text "target"}.
-
-  \medskip The proof markup commands closely resemble those for theory
-  specifications, but have a different formal status and produce
-  different {\LaTeX} macros.  The default definitions coincide for
-  analogous commands such as @{command section} and @{command sect}.
-*}
-
-
-section {* Document Antiquotations \label{sec:antiq} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{antiquotation_def "theory"} & : & @{text antiquotation} \\
-    @{antiquotation_def "thm"} & : & @{text antiquotation} \\
-    @{antiquotation_def "lemma"} & : & @{text antiquotation} \\
-    @{antiquotation_def "prop"} & : & @{text antiquotation} \\
-    @{antiquotation_def "term"} & : & @{text antiquotation} \\
-    @{antiquotation_def term_type} & : & @{text antiquotation} \\
-    @{antiquotation_def typeof} & : & @{text antiquotation} \\
-    @{antiquotation_def const} & : & @{text antiquotation} \\
-    @{antiquotation_def abbrev} & : & @{text antiquotation} \\
-    @{antiquotation_def typ} & : & @{text antiquotation} \\
-    @{antiquotation_def type} & : & @{text antiquotation} \\
-    @{antiquotation_def class} & : & @{text antiquotation} \\
-    @{antiquotation_def "text"} & : & @{text antiquotation} \\
-    @{antiquotation_def goals} & : & @{text antiquotation} \\
-    @{antiquotation_def subgoals} & : & @{text antiquotation} \\
-    @{antiquotation_def prf} & : & @{text antiquotation} \\
-    @{antiquotation_def full_prf} & : & @{text antiquotation} \\
-    @{antiquotation_def ML} & : & @{text antiquotation} \\
-    @{antiquotation_def ML_op} & : & @{text antiquotation} \\
-    @{antiquotation_def ML_type} & : & @{text antiquotation} \\
-    @{antiquotation_def ML_structure} & : & @{text antiquotation} \\
-    @{antiquotation_def ML_functor} & : & @{text antiquotation} \\
-    @{antiquotation_def "file"} & : & @{text antiquotation} \\
-    @{antiquotation_def "url"} & : & @{text antiquotation} \\
-  \end{matharray}
-
-  The overall content of an Isabelle/Isar theory may alternate between
-  formal and informal text.  The main body consists of formal
-  specification and proof commands, interspersed with markup commands
-  (\secref{sec:markup}) or document comments (\secref{sec:comments}).
-  The argument of markup commands quotes informal text to be printed
-  in the resulting document, but may again refer to formal entities
-  via \emph{document antiquotations}.
-
-  For example, embedding of ``@{text [source=false] "@{term [show_types] \"f x = a + x\"}"}''
-  within a text block makes
-  \isa{{\isacharparenleft}f{\isasymColon}{\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a{\isacharparenright}\ {\isacharparenleft}x{\isasymColon}{\isacharprime}a{\isacharparenright}\ {\isacharequal}\ {\isacharparenleft}a{\isasymColon}{\isacharprime}a{\isacharparenright}\ {\isacharplus}\ x} appear in the final {\LaTeX} document.
-
-  Antiquotations usually spare the author tedious typing of logical
-  entities in full detail.  Even more importantly, some degree of
-  consistency-checking between the main body of formal text and its
-  informal explanation is achieved, since terms and types appearing in
-  antiquotations are checked within the current theory or proof
-  context.
-
-  %% FIXME less monolithic presentation, move to individual sections!?
-  @{rail \<open>
-    '@{' antiquotation '}'
-    ;
-    @{syntax_def antiquotation}:
-      @@{antiquotation theory} options @{syntax name} |
-      @@{antiquotation thm} options styles @{syntax thmrefs} |
-      @@{antiquotation lemma} options @{syntax prop} @'by' @{syntax method} @{syntax method}? |
-      @@{antiquotation prop} options styles @{syntax prop} |
-      @@{antiquotation term} options styles @{syntax term} |
-      @@{antiquotation (HOL) value} options styles @{syntax term} |
-      @@{antiquotation term_type} options styles @{syntax term} |
-      @@{antiquotation typeof} options styles @{syntax term} |
-      @@{antiquotation const} options @{syntax term} |
-      @@{antiquotation abbrev} options @{syntax term} |
-      @@{antiquotation typ} options @{syntax type} |
-      @@{antiquotation type} options @{syntax name} |
-      @@{antiquotation class} options @{syntax name} |
-      @@{antiquotation text} options @{syntax name}
-    ;
-    @{syntax antiquotation}:
-      @@{antiquotation goals} options |
-      @@{antiquotation subgoals} options |
-      @@{antiquotation prf} options @{syntax thmrefs} |
-      @@{antiquotation full_prf} options @{syntax thmrefs} |
-      @@{antiquotation ML} options @{syntax name} |
-      @@{antiquotation ML_op} options @{syntax name} |
-      @@{antiquotation ML_type} options @{syntax name} |
-      @@{antiquotation ML_structure} options @{syntax name} |
-      @@{antiquotation ML_functor} options @{syntax name} |
-      @@{antiquotation "file"} options @{syntax name} |
-      @@{antiquotation file_unchecked} options @{syntax name} |
-      @@{antiquotation url} options @{syntax name}
-    ;
-    options: '[' (option * ',') ']'
-    ;
-    option: @{syntax name} | @{syntax name} '=' @{syntax name}
-    ;
-    styles: '(' (style + ',') ')'
-    ;
-    style: (@{syntax name} +)
-  \<close>}
-
-  Note that the syntax of antiquotations may \emph{not} include source
-  comments @{verbatim "(*"}~@{text "\<dots>"}~@{verbatim "*)"} nor verbatim
-  text @{verbatim "{"}@{verbatim "*"}~@{text "\<dots>"}~@{verbatim
-  "*"}@{verbatim "}"}.
-
-  \begin{description}
-  
-  \item @{text "@{theory A}"} prints the name @{text "A"}, which is
-  guaranteed to refer to a valid ancestor theory in the current
-  context.
-
-  \item @{text "@{thm a\<^sub>1 \<dots> a\<^sub>n}"} prints theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}.
-  Full fact expressions are allowed here, including attributes
-  (\secref{sec:syn-att}).
-
-  \item @{text "@{prop \<phi>}"} prints a well-typed proposition @{text
-  "\<phi>"}.
-
-  \item @{text "@{lemma \<phi> by m}"} proves a well-typed proposition
-  @{text "\<phi>"} by method @{text m} and prints the original @{text "\<phi>"}.
-
-  \item @{text "@{term t}"} prints a well-typed term @{text "t"}.
-  
-  \item @{text "@{value t}"} evaluates a term @{text "t"} and prints
-  its result, see also @{command_ref (HOL) value}.
-
-  \item @{text "@{term_type t}"} prints a well-typed term @{text "t"}
-  annotated with its type.
-
-  \item @{text "@{typeof t}"} prints the type of a well-typed term
-  @{text "t"}.
-
-  \item @{text "@{const c}"} prints a logical or syntactic constant
-  @{text "c"}.
-  
-  \item @{text "@{abbrev c x\<^sub>1 \<dots> x\<^sub>n}"} prints a constant abbreviation
-  @{text "c x\<^sub>1 \<dots> x\<^sub>n \<equiv> rhs"} as defined in the current context.
-
-  \item @{text "@{typ \<tau>}"} prints a well-formed type @{text "\<tau>"}.
-
-  \item @{text "@{type \<kappa>}"} prints a (logical or syntactic) type
-    constructor @{text "\<kappa>"}.
-
-  \item @{text "@{class c}"} prints a class @{text c}.
-
-  \item @{text "@{text s}"} prints uninterpreted source text @{text
-  s}.  This is particularly useful to print portions of text according
-  to the Isabelle document style, without demanding well-formedness,
-  e.g.\ small pieces of terms that should not be parsed or
-  type-checked yet.
-
-  \item @{text "@{goals}"} prints the current \emph{dynamic} goal
-  state.  This is mainly for support of tactic-emulation scripts
-  within Isar.  Presentation of goal states does not conform to the
-  idea of human-readable proof documents!
-
-  When explaining proofs in detail it is usually better to spell out
-  the reasoning via proper Isar proof commands, instead of peeking at
-  the internal machine configuration.
-  
-  \item @{text "@{subgoals}"} is similar to @{text "@{goals}"}, but
-  does not print the main goal.
-  
-  \item @{text "@{prf a\<^sub>1 \<dots> a\<^sub>n}"} prints the (compact) proof terms
-  corresponding to the theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}. Note that this
-  requires proof terms to be switched on for the current logic
-  session.
-  
-  \item @{text "@{full_prf a\<^sub>1 \<dots> a\<^sub>n}"} is like @{text "@{prf a\<^sub>1 \<dots>
-  a\<^sub>n}"}, but prints the full proof terms, i.e.\ also displays
-  information omitted in the compact proof term, which is denoted by
-  ``@{text _}'' placeholders there.
-  
-  \item @{text "@{ML s}"}, @{text "@{ML_op s}"}, @{text "@{ML_type
-  s}"}, @{text "@{ML_structure s}"}, and @{text "@{ML_functor s}"}
-  check text @{text s} as ML value, infix operator, type, structure,
-  and functor respectively.  The source is printed verbatim.
-
-  \item @{text "@{file path}"} checks that @{text "path"} refers to a
-  file (or directory) and prints it verbatim.
-
-  \item @{text "@{file_unchecked path}"} is like @{text "@{file
-  path}"}, but does not check the existence of the @{text "path"}
-  within the file-system.
-
-  \item @{text "@{url name}"} produces markup for the given URL, which
-  results in an active hyperlink within the text.
-
-  \end{description}
-*}
-
-
-subsection {* Styled antiquotations *}
-
-text {* The antiquotations @{text thm}, @{text prop} and @{text
-  term} admit an extra \emph{style} specification to modify the
-  printed result.  A style is specified by a name with a possibly
-  empty number of arguments;  multiple styles can be sequenced with
-  commas.  The following standard styles are available:
-
-  \begin{description}
-  
-  \item @{text lhs} extracts the first argument of any application
-  form with at least two arguments --- typically meta-level or
-  object-level equality, or any other binary relation.
-  
-  \item @{text rhs} is like @{text lhs}, but extracts the second
-  argument.
-  
-  \item @{text "concl"} extracts the conclusion @{text C} from a rule
-  in Horn-clause normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}.
-  
-  \item @{text "prem"} @{text n} extract premise number
-  @{text "n"} from from a rule in Horn-clause
-  normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}
-
-  \end{description}
-*}
-
-
-subsection {* General options *}
-
-text {* The following options are available to tune the printed output
-  of antiquotations.  Note that many of these coincide with system and
-  configuration options of the same names.
-
-  \begin{description}
-
-  \item @{antiquotation_option_def show_types}~@{text "= bool"} and
-  @{antiquotation_option_def show_sorts}~@{text "= bool"} control
-  printing of explicit type and sort constraints.
-
-  \item @{antiquotation_option_def show_structs}~@{text "= bool"}
-  controls printing of implicit structures.
-
-  \item @{antiquotation_option_def show_abbrevs}~@{text "= bool"}
-  controls folding of abbreviations.
-
-  \item @{antiquotation_option_def names_long}~@{text "= bool"} forces
-  names of types and constants etc.\ to be printed in their fully
-  qualified internal form.
-
-  \item @{antiquotation_option_def names_short}~@{text "= bool"}
-  forces names of types and constants etc.\ to be printed unqualified.
-  Note that internalizing the output again in the current context may
-  well yield a different result.
-
-  \item @{antiquotation_option_def names_unique}~@{text "= bool"}
-  determines whether the printed version of qualified names should be
-  made sufficiently long to avoid overlap with names declared further
-  back.  Set to @{text false} for more concise output.
-
-  \item @{antiquotation_option_def eta_contract}~@{text "= bool"}
-  prints terms in @{text \<eta>}-contracted form.
-
-  \item @{antiquotation_option_def display}~@{text "= bool"} indicates
-  if the text is to be output as multi-line ``display material'',
-  rather than a small piece of text without line breaks (which is the
-  default).
-
-  In this mode the embedded entities are printed in the same style as
-  the main theory text.
-
-  \item @{antiquotation_option_def break}~@{text "= bool"} controls
-  line breaks in non-display material.
-
-  \item @{antiquotation_option_def quotes}~@{text "= bool"} indicates
-  if the output should be enclosed in double quotes.
-
-  \item @{antiquotation_option_def mode}~@{text "= name"} adds @{text
-  name} to the print mode to be used for presentation.  Note that the
-  standard setup for {\LaTeX} output is already present by default,
-  including the modes @{text latex} and @{text xsymbols}.
-
-  \item @{antiquotation_option_def margin}~@{text "= nat"} and
-  @{antiquotation_option_def indent}~@{text "= nat"} change the margin
-  or indentation for pretty printing of display material.
-
-  \item @{antiquotation_option_def goals_limit}~@{text "= nat"}
-  determines the maximum number of subgoals to be printed (for goal-based
-  antiquotation).
-
-  \item @{antiquotation_option_def source}~@{text "= bool"} prints the
-  original source text of the antiquotation arguments, rather than its
-  internal representation.  Note that formal checking of
-  @{antiquotation "thm"}, @{antiquotation "term"}, etc. is still
-  enabled; use the @{antiquotation "text"} antiquotation for unchecked
-  output.
-
-  Regular @{text "term"} and @{text "typ"} antiquotations with @{text
-  "source = false"} involve a full round-trip from the original source
-  to an internalized logical entity back to a source form, according
-  to the syntax of the current context.  Thus the printed output is
-  not under direct control of the author, it may even fluctuate a bit
-  as the underlying theory is changed later on.
-
-  In contrast, @{antiquotation_option source}~@{text "= true"}
-  admits direct printing of the given source text, with the desirable
-  well-formedness check in the background, but without modification of
-  the printed text.
-
-  \end{description}
-
-  For boolean flags, ``@{text "name = true"}'' may be abbreviated as
-  ``@{text name}''.  All of the above flags are disabled by default,
-  unless changed specifically for a logic session in the corresponding
-  @{verbatim "ROOT"} file.  *}
-
-
-section {* Markup via command tags \label{sec:tags} *}
-
-text {* Each Isabelle/Isar command may be decorated by additional
-  presentation tags, to indicate some modification in the way it is
-  printed in the document.
-
-  @{rail \<open>
-    @{syntax_def tags}: ( tag * )
-    ;
-    tag: '%' (@{syntax ident} | @{syntax string})
-  \<close>}
-
-  Some tags are pre-declared for certain classes of commands, serving
-  as default markup if no tags are given in the text:
-
-  \medskip
-  \begin{tabular}{ll}
-    @{text "theory"} & theory begin/end \\
-    @{text "proof"} & all proof commands \\
-    @{text "ML"} & all commands involving ML code \\
-  \end{tabular}
-
-  \medskip The Isabelle document preparation system
-  \cite{isabelle-sys} allows tagged command regions to be presented
-  specifically, e.g.\ to fold proof texts, or drop parts of the text
-  completely.
-
-  For example ``@{command "by"}~@{text "%invisible auto"}'' causes
-  that piece of proof to be treated as @{text invisible} instead of
-  @{text "proof"} (the default), which may be shown or hidden
-  depending on the document setup.  In contrast, ``@{command
-  "by"}~@{text "%visible auto"}'' forces this text to be shown
-  invariably.
-
-  Explicit tag specifications within a proof apply to all subsequent
-  commands of the same level of nesting.  For example, ``@{command
-  "proof"}~@{text "%visible \<dots>"}~@{command "qed"}'' forces the whole
-  sub-proof to be typeset as @{text visible} (unless some of its parts
-  are tagged differently).
-
-  \medskip Command tags merely produce certain markup environments for
-  type-setting.  The meaning of these is determined by {\LaTeX}
-  macros, as defined in @{file "~~/lib/texinputs/isabelle.sty"} or
-  by the document author.  The Isabelle document preparation tools
-  also provide some high-level options to specify the meaning of
-  arbitrary tags to ``keep'', ``drop'', or ``fold'' the corresponding
-  parts of the text.  Logic sessions may also specify ``document
-  versions'', where given tags are interpreted in some particular way.
-  Again see \cite{isabelle-sys} for further details.
-*}
-
-
-section {* Railroad diagrams *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{antiquotation_def "rail"} & : & @{text antiquotation} \\
-  \end{matharray}
-
-  @{rail \<open>
-    'rail' (@{syntax string} | @{syntax cartouche})
-  \<close>}
-
-  The @{antiquotation rail} antiquotation allows to include syntax
-  diagrams into Isabelle documents.  {\LaTeX} requires the style file
-  @{file "~~/lib/texinputs/pdfsetup.sty"}, which can be used via
-  @{verbatim "\\usepackage{pdfsetup}"} in @{verbatim "root.tex"}, for
-  example.
-
-  The rail specification language is quoted here as Isabelle @{syntax
-  string} or text @{syntax "cartouche"}; it has its own grammar given
-  below.
-
-  \begingroup
-  \def\isasymnewline{\isatext{\tt\isacharbackslash<newline>}}
-  @{rail \<open>
-  rule? + ';'
-  ;
-  rule: ((identifier | @{syntax antiquotation}) ':')? body
-  ;
-  body: concatenation + '|'
-  ;
-  concatenation: ((atom '?'?) +) (('*' | '+') atom?)?
-  ;
-  atom: '(' body? ')' | identifier |
-    '@'? (string | @{syntax antiquotation}) |
-    '\<newline>'
-  \<close>}
-  \endgroup
-
-  The lexical syntax of @{text "identifier"} coincides with that of
-  @{syntax ident} in regular Isabelle syntax, but @{text string} uses
-  single quotes instead of double quotes of the standard @{syntax
-  string} category.
-
-  Each @{text rule} defines a formal language (with optional name),
-  using a notation that is similar to EBNF or regular expressions with
-  recursion.  The meaning and visual appearance of these rail language
-  elements is illustrated by the following representative examples.
-
-  \begin{itemize}
-
-  \item Empty @{verbatim "()"}
-
-  @{rail \<open>()\<close>}
-
-  \item Nonterminal @{verbatim "A"}
-
-  @{rail \<open>A\<close>}
-
-  \item Nonterminal via Isabelle antiquotation
-  @{verbatim "@{syntax method}"}
-
-  @{rail \<open>@{syntax method}\<close>}
-
-  \item Terminal @{verbatim "'xyz'"}
-
-  @{rail \<open>'xyz'\<close>}
-
-  \item Terminal in keyword style @{verbatim "@'xyz'"}
-
-  @{rail \<open>@'xyz'\<close>}
-
-  \item Terminal via Isabelle antiquotation
-  @{verbatim "@@{method rule}"}
-
-  @{rail \<open>@@{method rule}\<close>}
-
-  \item Concatenation @{verbatim "A B C"}
-
-  @{rail \<open>A B C\<close>}
-
-  \item Newline inside concatenation
-  @{verbatim "A B C \<newline> D E F"}
-
-  @{rail \<open>A B C \<newline> D E F\<close>}
-
-  \item Variants @{verbatim "A | B | C"}
-
-  @{rail \<open>A | B | C\<close>}
-
-  \item Option @{verbatim "A ?"}
-
-  @{rail \<open>A ?\<close>}
-
-  \item Repetition @{verbatim "A *"}
-
-  @{rail \<open>A *\<close>}
-
-  \item Repetition with separator @{verbatim "A * sep"}
-
-  @{rail \<open>A * sep\<close>}
-
-  \item Strict repetition @{verbatim "A +"}
-
-  @{rail \<open>A +\<close>}
-
-  \item Strict repetition with separator @{verbatim "A + sep"}
-
-  @{rail \<open>A + sep\<close>}
-
-  \end{itemize}
-*}
-
-
-section {* Draft presentation *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "display_drafts"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command display_drafts} (@{syntax name} +)
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "display_drafts"}~@{text paths} performs simple output of a
-  given list of raw source files. Only those symbols that do not require
-  additional {\LaTeX} packages are displayed properly, everything else is left
-  verbatim.
-
-  \end{description}
-*}
-
-end
--- a/src/Doc/IsarRef/First_Order_Logic.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,520 +0,0 @@
-
-header {* Example: First-Order Logic *}
-
-theory %visible First_Order_Logic
-imports Base  (* FIXME Pure!? *)
-begin
-
-text {*
-  \noindent In order to commence a new object-logic within
-  Isabelle/Pure we introduce abstract syntactic categories @{text "i"}
-  for individuals and @{text "o"} for object-propositions.  The latter
-  is embedded into the language of Pure propositions by means of a
-  separate judgment.
-*}
-
-typedecl i
-typedecl o
-
-judgment
-  Trueprop :: "o \<Rightarrow> prop"    ("_" 5)
-
-text {*
-  \noindent Note that the object-logic judgement is implicit in the
-  syntax: writing @{prop A} produces @{term "Trueprop A"} internally.
-  From the Pure perspective this means ``@{prop A} is derivable in the
-  object-logic''.
-*}
-
-
-subsection {* Equational reasoning \label{sec:framework-ex-equal} *}
-
-text {*
-  Equality is axiomatized as a binary predicate on individuals, with
-  reflexivity as introduction, and substitution as elimination
-  principle.  Note that the latter is particularly convenient in a
-  framework like Isabelle, because syntactic congruences are
-  implicitly produced by unification of @{term "B x"} against
-  expressions containing occurrences of @{term x}.
-*}
-
-axiomatization
-  equal :: "i \<Rightarrow> i \<Rightarrow> o"  (infix "=" 50)
-where
-  refl [intro]: "x = x" and
-  subst [elim]: "x = y \<Longrightarrow> B x \<Longrightarrow> B y"
-
-text {*
-  \noindent Substitution is very powerful, but also hard to control in
-  full generality.  We derive some common symmetry~/ transitivity
-  schemes of @{term equal} as particular consequences.
-*}
-
-theorem sym [sym]:
-  assumes "x = y"
-  shows "y = x"
-proof -
-  have "x = x" ..
-  with `x = y` show "y = x" ..
-qed
-
-theorem forw_subst [trans]:
-  assumes "y = x" and "B x"
-  shows "B y"
-proof -
-  from `y = x` have "x = y" ..
-  from this and `B x` show "B y" ..
-qed
-
-theorem back_subst [trans]:
-  assumes "B x" and "x = y"
-  shows "B y"
-proof -
-  from `x = y` and `B x`
-  show "B y" ..
-qed
-
-theorem trans [trans]:
-  assumes "x = y" and "y = z"
-  shows "x = z"
-proof -
-  from `y = z` and `x = y`
-  show "x = z" ..
-qed
-
-
-subsection {* Basic group theory *}
-
-text {*
-  As an example for equational reasoning we consider some bits of
-  group theory.  The subsequent locale definition postulates group
-  operations and axioms; we also derive some consequences of this
-  specification.
-*}
-
-locale group =
-  fixes prod :: "i \<Rightarrow> i \<Rightarrow> i"  (infix "\<circ>" 70)
-    and inv :: "i \<Rightarrow> i"  ("(_\<inverse>)" [1000] 999)
-    and unit :: i  ("1")
-  assumes assoc: "(x \<circ> y) \<circ> z = x \<circ> (y \<circ> z)"
-    and left_unit:  "1 \<circ> x = x"
-    and left_inv: "x\<inverse> \<circ> x = 1"
-begin
-
-theorem right_inv: "x \<circ> x\<inverse> = 1"
-proof -
-  have "x \<circ> x\<inverse> = 1 \<circ> (x \<circ> x\<inverse>)" by (rule left_unit [symmetric])
-  also have "\<dots> = (1 \<circ> x) \<circ> x\<inverse>" by (rule assoc [symmetric])
-  also have "1 = (x\<inverse>)\<inverse> \<circ> x\<inverse>" by (rule left_inv [symmetric])
-  also have "\<dots> \<circ> x = (x\<inverse>)\<inverse> \<circ> (x\<inverse> \<circ> x)" by (rule assoc)
-  also have "x\<inverse> \<circ> x = 1" by (rule left_inv)
-  also have "((x\<inverse>)\<inverse> \<circ> \<dots>) \<circ> x\<inverse> = (x\<inverse>)\<inverse> \<circ> (1 \<circ> x\<inverse>)" by (rule assoc)
-  also have "1 \<circ> x\<inverse> = x\<inverse>" by (rule left_unit)
-  also have "(x\<inverse>)\<inverse> \<circ> \<dots> = 1" by (rule left_inv)
-  finally show "x \<circ> x\<inverse> = 1" .
-qed
-
-theorem right_unit: "x \<circ> 1 = x"
-proof -
-  have "1 = x\<inverse> \<circ> x" by (rule left_inv [symmetric])
-  also have "x \<circ> \<dots> = (x \<circ> x\<inverse>) \<circ> x" by (rule assoc [symmetric])
-  also have "x \<circ> x\<inverse> = 1" by (rule right_inv)
-  also have "\<dots> \<circ> x = x" by (rule left_unit)
-  finally show "x \<circ> 1 = x" .
-qed
-
-text {*
-  \noindent Reasoning from basic axioms is often tedious.  Our proofs
-  work by producing various instances of the given rules (potentially
-  the symmetric form) using the pattern ``@{command have}~@{text
-  eq}~@{command "by"}~@{text "(rule r)"}'' and composing the chain of
-  results via @{command also}/@{command finally}.  These steps may
-  involve any of the transitivity rules declared in
-  \secref{sec:framework-ex-equal}, namely @{thm trans} in combining
-  the first two results in @{thm right_inv} and in the final steps of
-  both proofs, @{thm forw_subst} in the first combination of @{thm
-  right_unit}, and @{thm back_subst} in all other calculational steps.
-
-  Occasional substitutions in calculations are adequate, but should
-  not be over-emphasized.  The other extreme is to compose a chain by
-  plain transitivity only, with replacements occurring always in
-  topmost position. For example:
-*}
-
-(*<*)
-theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
-proof -
-  assume [symmetric, defn]: "\<And>x y. (x \<equiv> y) \<equiv> Trueprop (x = y)"
-(*>*)
-  have "x \<circ> 1 = x \<circ> (x\<inverse> \<circ> x)" unfolding left_inv ..
-  also have "\<dots> = (x \<circ> x\<inverse>) \<circ> x" unfolding assoc ..
-  also have "\<dots> = 1 \<circ> x" unfolding right_inv ..
-  also have "\<dots> = x" unfolding left_unit ..
-  finally have "x \<circ> 1 = x" .
-(*<*)
-qed
-(*>*)
-
-text {*
-  \noindent Here we have re-used the built-in mechanism for unfolding
-  definitions in order to normalize each equational problem.  A more
-  realistic object-logic would include proper setup for the Simplifier
-  (\secref{sec:simplifier}), the main automated tool for equational
-  reasoning in Isabelle.  Then ``@{command unfolding}~@{thm
-  left_inv}~@{command ".."}'' would become ``@{command "by"}~@{text
-  "(simp only: left_inv)"}'' etc.
-*}
-
-end
-
-
-subsection {* Propositional logic \label{sec:framework-ex-prop} *}
-
-text {*
-  We axiomatize basic connectives of propositional logic: implication,
-  disjunction, and conjunction.  The associated rules are modeled
-  after Gentzen's system of Natural Deduction \cite{Gentzen:1935}.
-*}
-
-axiomatization
-  imp :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<longrightarrow>" 25) where
-  impI [intro]: "(A \<Longrightarrow> B) \<Longrightarrow> A \<longrightarrow> B" and
-  impD [dest]: "(A \<longrightarrow> B) \<Longrightarrow> A \<Longrightarrow> B"
-
-axiomatization
-  disj :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<or>" 30) where
-  disjI\<^sub>1 [intro]: "A \<Longrightarrow> A \<or> B" and
-  disjI\<^sub>2 [intro]: "B \<Longrightarrow> A \<or> B" and
-  disjE [elim]: "A \<or> B \<Longrightarrow> (A \<Longrightarrow> C) \<Longrightarrow> (B \<Longrightarrow> C) \<Longrightarrow> C"
-
-axiomatization
-  conj :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<and>" 35) where
-  conjI [intro]: "A \<Longrightarrow> B \<Longrightarrow> A \<and> B" and
-  conjD\<^sub>1: "A \<and> B \<Longrightarrow> A" and
-  conjD\<^sub>2: "A \<and> B \<Longrightarrow> B"
-
-text {*
-  \noindent The conjunctive destructions have the disadvantage that
-  decomposing @{prop "A \<and> B"} involves an immediate decision which
-  component should be projected.  The more convenient simultaneous
-  elimination @{prop "A \<and> B \<Longrightarrow> (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C"} can be derived as
-  follows:
-*}
-
-theorem conjE [elim]:
-  assumes "A \<and> B"
-  obtains A and B
-proof
-  from `A \<and> B` show A by (rule conjD\<^sub>1)
-  from `A \<and> B` show B by (rule conjD\<^sub>2)
-qed
-
-text {*
-  \noindent Here is an example of swapping conjuncts with a single
-  intermediate elimination step:
-*}
-
-(*<*)
-lemma "\<And>A. PROP A \<Longrightarrow> PROP A"
-proof -
-(*>*)
-  assume "A \<and> B"
-  then obtain B and A ..
-  then have "B \<and> A" ..
-(*<*)
-qed
-(*>*)
-
-text {*
-  \noindent Note that the analogous elimination rule for disjunction
-  ``@{text "\<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"}'' coincides with
-  the original axiomatization of @{thm disjE}.
-
-  \medskip We continue propositional logic by introducing absurdity
-  with its characteristic elimination.  Plain truth may then be
-  defined as a proposition that is trivially true.
-*}
-
-axiomatization
-  false :: o  ("\<bottom>") where
-  falseE [elim]: "\<bottom> \<Longrightarrow> A"
-
-definition
-  true :: o  ("\<top>") where
-  "\<top> \<equiv> \<bottom> \<longrightarrow> \<bottom>"
-
-theorem trueI [intro]: \<top>
-  unfolding true_def ..
-
-text {*
-  \medskip\noindent Now negation represents an implication towards
-  absurdity:
-*}
-
-definition
-  not :: "o \<Rightarrow> o"  ("\<not> _" [40] 40) where
-  "\<not> A \<equiv> A \<longrightarrow> \<bottom>"
-
-theorem notI [intro]:
-  assumes "A \<Longrightarrow> \<bottom>"
-  shows "\<not> A"
-unfolding not_def
-proof
-  assume A
-  then show \<bottom> by (rule `A \<Longrightarrow> \<bottom>`)
-qed
-
-theorem notE [elim]:
-  assumes "\<not> A" and A
-  shows B
-proof -
-  from `\<not> A` have "A \<longrightarrow> \<bottom>" unfolding not_def .
-  from `A \<longrightarrow> \<bottom>` and `A` have \<bottom> ..
-  then show B ..
-qed
-
-
-subsection {* Classical logic *}
-
-text {*
-  Subsequently we state the principle of classical contradiction as a
-  local assumption.  Thus we refrain from forcing the object-logic
-  into the classical perspective.  Within that context, we may derive
-  well-known consequences of the classical principle.
-*}
-
-locale classical =
-  assumes classical: "(\<not> C \<Longrightarrow> C) \<Longrightarrow> C"
-begin
-
-theorem double_negation:
-  assumes "\<not> \<not> C"
-  shows C
-proof (rule classical)
-  assume "\<not> C"
-  with `\<not> \<not> C` show C ..
-qed
-
-theorem tertium_non_datur: "C \<or> \<not> C"
-proof (rule double_negation)
-  show "\<not> \<not> (C \<or> \<not> C)"
-  proof
-    assume "\<not> (C \<or> \<not> C)"
-    have "\<not> C"
-    proof
-      assume C then have "C \<or> \<not> C" ..
-      with `\<not> (C \<or> \<not> C)` show \<bottom> ..
-    qed
-    then have "C \<or> \<not> C" ..
-    with `\<not> (C \<or> \<not> C)` show \<bottom> ..
-  qed
-qed
-
-text {*
-  \noindent These examples illustrate both classical reasoning and
-  non-trivial propositional proofs in general.  All three rules
-  characterize classical logic independently, but the original rule is
-  already the most convenient to use, because it leaves the conclusion
-  unchanged.  Note that @{prop "(\<not> C \<Longrightarrow> C) \<Longrightarrow> C"} fits again into our
-  format for eliminations, despite the additional twist that the
-  context refers to the main conclusion.  So we may write @{thm
-  classical} as the Isar statement ``@{text "\<OBTAINS> \<not> thesis"}''.
-  This also explains nicely how classical reasoning really works:
-  whatever the main @{text thesis} might be, we may always assume its
-  negation!
-*}
-
-end
-
-
-subsection {* Quantifiers \label{sec:framework-ex-quant} *}
-
-text {*
-  Representing quantifiers is easy, thanks to the higher-order nature
-  of the underlying framework.  According to the well-known technique
-  introduced by Church \cite{church40}, quantifiers are operators on
-  predicates, which are syntactically represented as @{text "\<lambda>"}-terms
-  of type @{typ "i \<Rightarrow> o"}.  Binder notation turns @{text "All (\<lambda>x. B
-  x)"} into @{text "\<forall>x. B x"} etc.
-*}
-
-axiomatization
-  All :: "(i \<Rightarrow> o) \<Rightarrow> o"  (binder "\<forall>" 10) where
-  allI [intro]: "(\<And>x. B x) \<Longrightarrow> \<forall>x. B x" and
-  allD [dest]: "(\<forall>x. B x) \<Longrightarrow> B a"
-
-axiomatization
-  Ex :: "(i \<Rightarrow> o) \<Rightarrow> o"  (binder "\<exists>" 10) where
-  exI [intro]: "B a \<Longrightarrow> (\<exists>x. B x)" and
-  exE [elim]: "(\<exists>x. B x) \<Longrightarrow> (\<And>x. B x \<Longrightarrow> C) \<Longrightarrow> C"
-
-text {*
-  \noindent The statement of @{thm exE} corresponds to ``@{text
-  "\<ASSUMES> \<exists>x. B x \<OBTAINS> x \<WHERE> B x"}'' in Isar.  In the
-  subsequent example we illustrate quantifier reasoning involving all
-  four rules:
-*}
-
-theorem
-  assumes "\<exists>x. \<forall>y. R x y"
-  shows "\<forall>y. \<exists>x. R x y"
-proof    -- {* @{text "\<forall>"} introduction *}
-  obtain x where "\<forall>y. R x y" using `\<exists>x. \<forall>y. R x y` ..    -- {* @{text "\<exists>"} elimination *}
-  fix y have "R x y" using `\<forall>y. R x y` ..    -- {* @{text "\<forall>"} destruction *}
-  then show "\<exists>x. R x y" ..    -- {* @{text "\<exists>"} introduction *}
-qed
-
-
-subsection {* Canonical reasoning patterns *}
-
-text {*
-  The main rules of first-order predicate logic from
-  \secref{sec:framework-ex-prop} and \secref{sec:framework-ex-quant}
-  can now be summarized as follows, using the native Isar statement
-  format of \secref{sec:framework-stmt}.
-
-  \medskip
-  \begin{tabular}{l}
-  @{text "impI: \<ASSUMES> A \<Longrightarrow> B \<SHOWS> A \<longrightarrow> B"} \\
-  @{text "impD: \<ASSUMES> A \<longrightarrow> B \<AND> A \<SHOWS> B"} \\[1ex]
-
-  @{text "disjI\<^sub>1: \<ASSUMES> A \<SHOWS> A \<or> B"} \\
-  @{text "disjI\<^sub>2: \<ASSUMES> B \<SHOWS> A \<or> B"} \\
-  @{text "disjE: \<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"} \\[1ex]
-
-  @{text "conjI: \<ASSUMES> A \<AND> B \<SHOWS> A \<and> B"} \\
-  @{text "conjE: \<ASSUMES> A \<and> B \<OBTAINS> A \<AND> B"} \\[1ex]
-
-  @{text "falseE: \<ASSUMES> \<bottom> \<SHOWS> A"} \\
-  @{text "trueI: \<SHOWS> \<top>"} \\[1ex]
-
-  @{text "notI: \<ASSUMES> A \<Longrightarrow> \<bottom> \<SHOWS> \<not> A"} \\
-  @{text "notE: \<ASSUMES> \<not> A \<AND> A \<SHOWS> B"} \\[1ex]
-
-  @{text "allI: \<ASSUMES> \<And>x. B x \<SHOWS> \<forall>x. B x"} \\
-  @{text "allE: \<ASSUMES> \<forall>x. B x \<SHOWS> B a"} \\[1ex]
-
-  @{text "exI: \<ASSUMES> B a \<SHOWS> \<exists>x. B x"} \\
-  @{text "exE: \<ASSUMES> \<exists>x. B x \<OBTAINS> a \<WHERE> B a"}
-  \end{tabular}
-  \medskip
-
-  \noindent This essentially provides a declarative reading of Pure
-  rules as Isar reasoning patterns: the rule statements tells how a
-  canonical proof outline shall look like.  Since the above rules have
-  already been declared as @{attribute (Pure) intro}, @{attribute
-  (Pure) elim}, @{attribute (Pure) dest} --- each according to its
-  particular shape --- we can immediately write Isar proof texts as
-  follows:
-*}
-
-(*<*)
-theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
-proof -
-(*>*)
-
-  txt_raw {*\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "A \<longrightarrow> B"
-  proof
-    assume A
-    show B sorry %noproof
-  qed
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "A \<longrightarrow> B" and A sorry %noproof
-  then have B ..
-
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have A sorry %noproof
-  then have "A \<or> B" ..
-
-  have B sorry %noproof
-  then have "A \<or> B" ..
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "A \<or> B" sorry %noproof
-  then have C
-  proof
-    assume A
-    then show C sorry %noproof
-  next
-    assume B
-    then show C sorry %noproof
-  qed
-
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have A and B sorry %noproof
-  then have "A \<and> B" ..
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "A \<and> B" sorry %noproof
-  then obtain A and B ..
-
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<bottom>" sorry %noproof
-  then have A ..
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<top>" ..
-
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<not> A"
-  proof
-    assume A
-    then show "\<bottom>" sorry %noproof
-  qed
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<not> A" and A sorry %noproof
-  then have B ..
-
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<forall>x. B x"
-  proof
-    fix x
-    show "B x" sorry %noproof
-  qed
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<forall>x. B x" sorry %noproof
-  then have "B a" ..
-
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<exists>x. B x"
-  proof
-    show "B a" sorry %noproof
-  qed
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<exists>x. B x" sorry %noproof
-  then obtain a where "B a" ..
-
-  txt_raw {*\end{minipage}*}
-
-(*<*)
-qed
-(*>*)
-
-text {*
-  \bigskip\noindent Of course, these proofs are merely examples.  As
-  sketched in \secref{sec:framework-subproof}, there is a fair amount
-  of flexibility in expressing Pure deductions in Isar.  Here the user
-  is asked to express himself adequately, aiming at proof texts of
-  literary quality.
-*}
-
-end %visible
--- a/src/Doc/IsarRef/Framework.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1016 +0,0 @@
-theory Framework
-imports Base Main
-begin
-
-chapter {* The Isabelle/Isar Framework \label{ch:isar-framework} *}
-
-text {*
-  Isabelle/Isar
-  \cite{Wenzel:1999:TPHOL,Wenzel-PhD,Nipkow-TYPES02,Wenzel-Paulson:2006,Wenzel:2006:Festschrift}
-  is intended as a generic framework for developing formal
-  mathematical documents with full proof checking.  Definitions and
-  proofs are organized as theories.  An assembly of theory sources may
-  be presented as a printed document; see also
-  \chref{ch:document-prep}.
-
-  The main objective of Isar is the design of a human-readable
-  structured proof language, which is called the ``primary proof
-  format'' in Isar terminology.  Such a primary proof language is
-  somewhere in the middle between the extremes of primitive proof
-  objects and actual natural language.  In this respect, Isar is a bit
-  more formalistic than Mizar
-  \cite{Trybulec:1993:MizarFeatures,Rudnicki:1992:MizarOverview,Wiedijk:1999:Mizar},
-  using logical symbols for certain reasoning schemes where Mizar
-  would prefer English words; see \cite{Wenzel-Wiedijk:2002} for
-  further comparisons of these systems.
-
-  So Isar challenges the traditional way of recording informal proofs
-  in mathematical prose, as well as the common tendency to see fully
-  formal proofs directly as objects of some logical calculus (e.g.\
-  @{text "\<lambda>"}-terms in a version of type theory).  In fact, Isar is
-  better understood as an interpreter of a simple block-structured
-  language for describing the data flow of local facts and goals,
-  interspersed with occasional invocations of proof methods.
-  Everything is reduced to logical inferences internally, but these
-  steps are somewhat marginal compared to the overall bookkeeping of
-  the interpretation process.  Thanks to careful design of the syntax
-  and semantics of Isar language elements, a formal record of Isar
-  instructions may later appear as an intelligible text to the
-  attentive reader.
-
-  The Isar proof language has emerged from careful analysis of some
-  inherent virtues of the existing logical framework of Isabelle/Pure
-  \cite{paulson-found,paulson700}, notably composition of higher-order
-  natural deduction rules, which is a generalization of Gentzen's
-  original calculus \cite{Gentzen:1935}.  The approach of generic
-  inference systems in Pure is continued by Isar towards actual proof
-  texts.
-
-  Concrete applications require another intermediate layer: an
-  object-logic.  Isabelle/HOL \cite{isa-tutorial} (simply-typed
-  set-theory) is being used most of the time; Isabelle/ZF
-  \cite{isabelle-ZF} is less extensively developed, although it would
-  probably fit better for classical mathematics.
-
-  \medskip In order to illustrate natural deduction in Isar, we shall
-  refer to the background theory and library of Isabelle/HOL.  This
-  includes common notions of predicate logic, naive set-theory etc.\
-  using fairly standard mathematical notation.  From the perspective
-  of generic natural deduction there is nothing special about the
-  logical connectives of HOL (@{text "\<and>"}, @{text "\<or>"}, @{text "\<forall>"},
-  @{text "\<exists>"}, etc.), only the resulting reasoning principles are
-  relevant to the user.  There are similar rules available for
-  set-theory operators (@{text "\<inter>"}, @{text "\<union>"}, @{text "\<Inter>"}, @{text
-  "\<Union>"}, etc.), or any other theory developed in the library (lattice
-  theory, topology etc.).
-
-  Subsequently we briefly review fragments of Isar proof texts
-  corresponding directly to such general deduction schemes.  The
-  examples shall refer to set-theory, to minimize the danger of
-  understanding connectives of predicate logic as something special.
-
-  \medskip The following deduction performs @{text "\<inter>"}-introduction,
-  working forwards from assumptions towards the conclusion.  We give
-  both the Isar text, and depict the primitive rule involved, as
-  determined by unification of the problem against rules that are
-  declared in the library context.
-*}
-
-text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
-
-(*<*)
-notepad
-begin
-(*>*)
-    assume "x \<in> A" and "x \<in> B"
-    then have "x \<in> A \<inter> B" ..
-(*<*)
-end
-(*>*)
-
-text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
-
-text {*
-  \infer{@{prop "x \<in> A \<inter> B"}}{@{prop "x \<in> A"} & @{prop "x \<in> B"}}
-*}
-
-text_raw {*\end{minipage}*}
-
-text {*
-  \medskip\noindent Note that @{command assume} augments the proof
-  context, @{command then} indicates that the current fact shall be
-  used in the next step, and @{command have} states an intermediate
-  goal.  The two dots ``@{command ".."}'' refer to a complete proof of
-  this claim, using the indicated facts and a canonical rule from the
-  context.  We could have been more explicit here by spelling out the
-  final proof step via the @{command "by"} command:
-*}
-
-(*<*)
-notepad
-begin
-(*>*)
-    assume "x \<in> A" and "x \<in> B"
-    then have "x \<in> A \<inter> B" by (rule IntI)
-(*<*)
-end
-(*>*)
-
-text {*
-  \noindent The format of the @{text "\<inter>"}-introduction rule represents
-  the most basic inference, which proceeds from given premises to a
-  conclusion, without any nested proof context involved.
-
-  The next example performs backwards introduction on @{term "\<Inter>\<A>"},
-  the intersection of all sets within a given set.  This requires a
-  nested proof of set membership within a local context, where @{term
-  A} is an arbitrary-but-fixed member of the collection:
-*}
-
-text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
-
-(*<*)
-notepad
-begin
-(*>*)
-    have "x \<in> \<Inter>\<A>"
-    proof
-      fix A
-      assume "A \<in> \<A>"
-      show "x \<in> A" sorry %noproof
-    qed
-(*<*)
-end
-(*>*)
-
-text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
-
-text {*
-  \infer{@{prop "x \<in> \<Inter>\<A>"}}{\infer*{@{prop "x \<in> A"}}{@{text "[A][A \<in> \<A>]"}}}
-*}
-
-text_raw {*\end{minipage}*}
-
-text {*
-  \medskip\noindent This Isar reasoning pattern again refers to the
-  primitive rule depicted above.  The system determines it in the
-  ``@{command proof}'' step, which could have been spelt out more
-  explicitly as ``@{command proof}~@{text "(rule InterI)"}''.  Note
-  that the rule involves both a local parameter @{term "A"} and an
-  assumption @{prop "A \<in> \<A>"} in the nested reasoning.  This kind of
-  compound rule typically demands a genuine sub-proof in Isar, working
-  backwards rather than forwards as seen before.  In the proof body we
-  encounter the @{command fix}-@{command assume}-@{command show}
-  outline of nested sub-proofs that is typical for Isar.  The final
-  @{command show} is like @{command have} followed by an additional
-  refinement of the enclosing claim, using the rule derived from the
-  proof body.
-
-  \medskip The next example involves @{term "\<Union>\<A>"}, which can be
-  characterized as the set of all @{term "x"} such that @{prop "\<exists>A. x
-  \<in> A \<and> A \<in> \<A>"}.  The elimination rule for @{prop "x \<in> \<Union>\<A>"} does
-  not mention @{text "\<exists>"} and @{text "\<and>"} at all, but admits to obtain
-  directly a local @{term "A"} such that @{prop "x \<in> A"} and @{prop "A
-  \<in> \<A>"} hold.  This corresponds to the following Isar proof and
-  inference rule, respectively:
-*}
-
-text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
-
-(*<*)
-notepad
-begin
-(*>*)
-    assume "x \<in> \<Union>\<A>"
-    then have C
-    proof
-      fix A
-      assume "x \<in> A" and "A \<in> \<A>"
-      show C sorry %noproof
-    qed
-(*<*)
-end
-(*>*)
-
-text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
-
-text {*
-  \infer{@{prop "C"}}{@{prop "x \<in> \<Union>\<A>"} & \infer*{@{prop "C"}~}{@{text "[A][x \<in> A, A \<in> \<A>]"}}}
-*}
-
-text_raw {*\end{minipage}*}
-
-text {*
-  \medskip\noindent Although the Isar proof follows the natural
-  deduction rule closely, the text reads not as natural as
-  anticipated.  There is a double occurrence of an arbitrary
-  conclusion @{prop "C"}, which represents the final result, but is
-  irrelevant for now.  This issue arises for any elimination rule
-  involving local parameters.  Isar provides the derived language
-  element @{command obtain}, which is able to perform the same
-  elimination proof more conveniently:
-*}
-
-(*<*)
-notepad
-begin
-(*>*)
-    assume "x \<in> \<Union>\<A>"
-    then obtain A where "x \<in> A" and "A \<in> \<A>" ..
-(*<*)
-end
-(*>*)
-
-text {*
-  \noindent Here we avoid to mention the final conclusion @{prop "C"}
-  and return to plain forward reasoning.  The rule involved in the
-  ``@{command ".."}'' proof is the same as before.
-*}
-
-
-section {* The Pure framework \label{sec:framework-pure} *}
-
-text {*
-  The Pure logic \cite{paulson-found,paulson700} is an intuitionistic
-  fragment of higher-order logic \cite{church40}.  In type-theoretic
-  parlance, there are three levels of @{text "\<lambda>"}-calculus with
-  corresponding arrows @{text "\<Rightarrow>"}/@{text "\<And>"}/@{text "\<Longrightarrow>"}:
-
-  \medskip
-  \begin{tabular}{ll}
-  @{text "\<alpha> \<Rightarrow> \<beta>"} & syntactic function space (terms depending on terms) \\
-  @{text "\<And>x. B(x)"} & universal quantification (proofs depending on terms) \\
-  @{text "A \<Longrightarrow> B"} & implication (proofs depending on proofs) \\
-  \end{tabular}
-  \medskip
-
-  \noindent Here only the types of syntactic terms, and the
-  propositions of proof terms have been shown.  The @{text
-  "\<lambda>"}-structure of proofs can be recorded as an optional feature of
-  the Pure inference kernel \cite{Berghofer-Nipkow:2000:TPHOL}, but
-  the formal system can never depend on them due to \emph{proof
-  irrelevance}.
-
-  On top of this most primitive layer of proofs, Pure implements a
-  generic calculus for nested natural deduction rules, similar to
-  \cite{Schroeder-Heister:1984}.  Here object-logic inferences are
-  internalized as formulae over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
-  Combining such rule statements may involve higher-order unification
-  \cite{paulson-natural}.
-*}
-
-
-subsection {* Primitive inferences *}
-
-text {*
-  Term syntax provides explicit notation for abstraction @{text "\<lambda>x ::
-  \<alpha>. b(x)"} and application @{text "b a"}, while types are usually
-  implicit thanks to type-inference; terms of type @{text "prop"} are
-  called propositions.  Logical statements are composed via @{text "\<And>x
-  :: \<alpha>. B(x)"} and @{text "A \<Longrightarrow> B"}.  Primitive reasoning operates on
-  judgments of the form @{text "\<Gamma> \<turnstile> \<phi>"}, with standard introduction
-  and elimination rules for @{text "\<And>"} and @{text "\<Longrightarrow>"} that refer to
-  fixed parameters @{text "x\<^sub>1, \<dots>, x\<^sub>m"} and hypotheses
-  @{text "A\<^sub>1, \<dots>, A\<^sub>n"} from the context @{text "\<Gamma>"};
-  the corresponding proof terms are left implicit.  The subsequent
-  inference rules define @{text "\<Gamma> \<turnstile> \<phi>"} inductively, relative to a
-  collection of axioms:
-
-  \[
-  \infer{@{text "\<turnstile> A"}}{(@{text "A"} \text{~axiom})}
-  \qquad
-  \infer{@{text "A \<turnstile> A"}}{}
-  \]
-
-  \[
-  \infer{@{text "\<Gamma> \<turnstile> \<And>x. B(x)"}}{@{text "\<Gamma> \<turnstile> B(x)"} & @{text "x \<notin> \<Gamma>"}}
-  \qquad
-  \infer{@{text "\<Gamma> \<turnstile> B(a)"}}{@{text "\<Gamma> \<turnstile> \<And>x. B(x)"}}
-  \]
-
-  \[
-  \infer{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
-  \qquad
-  \infer{@{text "\<Gamma>\<^sub>1 \<union> \<Gamma>\<^sub>2 \<turnstile> B"}}{@{text "\<Gamma>\<^sub>1 \<turnstile> A \<Longrightarrow> B"} & @{text "\<Gamma>\<^sub>2 \<turnstile> A"}}
-  \]
-
-  Furthermore, Pure provides a built-in equality @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow>
-  prop"} with axioms for reflexivity, substitution, extensionality,
-  and @{text "\<alpha>\<beta>\<eta>"}-conversion on @{text "\<lambda>"}-terms.
-
-  \medskip An object-logic introduces another layer on top of Pure,
-  e.g.\ with types @{text "i"} for individuals and @{text "o"} for
-  propositions, term constants @{text "Trueprop :: o \<Rightarrow> prop"} as
-  (implicit) derivability judgment and connectives like @{text "\<and> :: o
-  \<Rightarrow> o \<Rightarrow> o"} or @{text "\<forall> :: (i \<Rightarrow> o) \<Rightarrow> o"}, and axioms for object-level
-  rules such as @{text "conjI: A \<Longrightarrow> B \<Longrightarrow> A \<and> B"} or @{text "allI: (\<And>x. B
-  x) \<Longrightarrow> \<forall>x. B x"}.  Derived object rules are represented as theorems of
-  Pure.  After the initial object-logic setup, further axiomatizations
-  are usually avoided; plain definitions and derived principles are
-  used exclusively.
-*}
-
-
-subsection {* Reasoning with rules \label{sec:framework-resolution} *}
-
-text {*
-  Primitive inferences mostly serve foundational purposes.  The main
-  reasoning mechanisms of Pure operate on nested natural deduction
-  rules expressed as formulae, using @{text "\<And>"} to bind local
-  parameters and @{text "\<Longrightarrow>"} to express entailment.  Multiple
-  parameters and premises are represented by repeating these
-  connectives in a right-associative manner.
-
-  Since @{text "\<And>"} and @{text "\<Longrightarrow>"} commute thanks to the theorem
-  @{prop "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"}, we may assume w.l.o.g.\
-  that rule statements always observe the normal form where
-  quantifiers are pulled in front of implications at each level of
-  nesting.  This means that any Pure proposition may be presented as a
-  \emph{Hereditary Harrop Formula} \cite{Miller:1991} which is of the
-  form @{text "\<And>x\<^sub>1 \<dots> x\<^sub>m. H\<^sub>1 \<Longrightarrow> \<dots> H\<^sub>n \<Longrightarrow>
-  A"} for @{text "m, n \<ge> 0"}, and @{text "A"} atomic, and @{text
-  "H\<^sub>1, \<dots>, H\<^sub>n"} being recursively of the same format.
-  Following the convention that outermost quantifiers are implicit,
-  Horn clauses @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> A"} are a special
-  case of this.
-
-  For example, @{text "\<inter>"}-introduction rule encountered before is
-  represented as a Pure theorem as follows:
-  \[
-  @{text "IntI:"}~@{prop "x \<in> A \<Longrightarrow> x \<in> B \<Longrightarrow> x \<in> A \<inter> B"}
-  \]
-
-  \noindent This is a plain Horn clause, since no further nesting on
-  the left is involved.  The general @{text "\<Inter>"}-introduction
-  corresponds to a Hereditary Harrop Formula with one additional level
-  of nesting:
-  \[
-  @{text "InterI:"}~@{prop "(\<And>A. A \<in> \<A> \<Longrightarrow> x \<in> A) \<Longrightarrow> x \<in> \<Inter>\<A>"}
-  \]
-
-  \medskip Goals are also represented as rules: @{text "A\<^sub>1 \<Longrightarrow>
-  \<dots> A\<^sub>n \<Longrightarrow> C"} states that the sub-goals @{text "A\<^sub>1, \<dots>,
-  A\<^sub>n"} entail the result @{text "C"}; for @{text "n = 0"} the
-  goal is finished.  To allow @{text "C"} being a rule statement
-  itself, we introduce the protective marker @{text "# :: prop \<Rightarrow>
-  prop"}, which is defined as identity and hidden from the user.  We
-  initialize and finish goal states as follows:
-
-  \[
-  \begin{array}{c@ {\qquad}c}
-  \infer[(@{inference_def init})]{@{text "C \<Longrightarrow> #C"}}{} &
-  \infer[(@{inference_def finish})]{@{text C}}{@{text "#C"}}
-  \end{array}
-  \]
-
-  \noindent Goal states are refined in intermediate proof steps until
-  a finished form is achieved.  Here the two main reasoning principles
-  are @{inference resolution}, for back-chaining a rule against a
-  sub-goal (replacing it by zero or more sub-goals), and @{inference
-  assumption}, for solving a sub-goal (finding a short-circuit with
-  local assumptions).  Below @{text "\<^vec>x"} stands for @{text
-  "x\<^sub>1, \<dots>, x\<^sub>n"} (@{text "n \<ge> 0"}).
-
-  \[
-  \infer[(@{inference_def resolution})]
-  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>A (\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
-  {\begin{tabular}{rl}
-    @{text "rule:"} &
-    @{text "\<^vec>A \<^vec>a \<Longrightarrow> B \<^vec>a"} \\
-    @{text "goal:"} &
-    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
-    @{text "goal unifier:"} &
-    @{text "(\<lambda>\<^vec>x. B (\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
-   \end{tabular}}
-  \]
-
-  \medskip
-
-  \[
-  \infer[(@{inference_def assumption})]{@{text "C\<vartheta>"}}
-  {\begin{tabular}{rl}
-    @{text "goal:"} &
-    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> A \<^vec>x) \<Longrightarrow> C"} \\
-    @{text "assm unifier:"} & @{text "A\<vartheta> = H\<^sub>i\<vartheta>"}~~\text{(for some~@{text "H\<^sub>i"})} \\
-   \end{tabular}}
-  \]
-
-  The following trace illustrates goal-oriented reasoning in
-  Isabelle/Pure:
-
-  {\footnotesize
-  \medskip
-  \begin{tabular}{r@ {\quad}l}
-  @{text "(A \<and> B \<Longrightarrow> B \<and> A) \<Longrightarrow> #(A \<and> B \<Longrightarrow> B \<and> A)"} & @{text "(init)"} \\
-  @{text "(A \<and> B \<Longrightarrow> B) \<Longrightarrow> (A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution B \<Longrightarrow> A \<Longrightarrow> B \<and> A)"} \\
-  @{text "(A \<and> B \<Longrightarrow> A \<and> B) \<Longrightarrow> (A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution A \<and> B \<Longrightarrow> B)"} \\
-  @{text "(A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(assumption)"} \\
-  @{text "(A \<and> B \<Longrightarrow> A \<and> B) \<Longrightarrow> #\<dots>"} & @{text "(resolution A \<and> B \<Longrightarrow> A)"} \\
-  @{text "#\<dots>"} & @{text "(assumption)"} \\
-  @{text "A \<and> B \<Longrightarrow> B \<and> A"} & @{text "(finish)"} \\
-  \end{tabular}
-  \medskip
-  }
-
-  Compositions of @{inference assumption} after @{inference
-  resolution} occurs quite often, typically in elimination steps.
-  Traditional Isabelle tactics accommodate this by a combined
-  @{inference_def elim_resolution} principle.  In contrast, Isar uses
-  a slightly more refined combination, where the assumptions to be
-  closed are marked explicitly, using again the protective marker
-  @{text "#"}:
-
-  \[
-  \infer[(@{inference refinement})]
-  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>G' (\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
-  {\begin{tabular}{rl}
-    @{text "sub\<hyphen>proof:"} &
-    @{text "\<^vec>G \<^vec>a \<Longrightarrow> B \<^vec>a"} \\
-    @{text "goal:"} &
-    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
-    @{text "goal unifier:"} &
-    @{text "(\<lambda>\<^vec>x. B (\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
-    @{text "assm unifiers:"} &
-    @{text "(\<lambda>\<^vec>x. G\<^sub>j (\<^vec>a \<^vec>x))\<vartheta> = #H\<^sub>i\<vartheta>"} \\
-    & \quad (for each marked @{text "G\<^sub>j"} some @{text "#H\<^sub>i"}) \\
-   \end{tabular}}
-  \]
-
-  \noindent Here the @{text "sub\<hyphen>proof"} rule stems from the
-  main @{command fix}-@{command assume}-@{command show} outline of
-  Isar (cf.\ \secref{sec:framework-subproof}): each assumption
-  indicated in the text results in a marked premise @{text "G"} above.
-  The marking enforces resolution against one of the sub-goal's
-  premises.  Consequently, @{command fix}-@{command assume}-@{command
-  show} enables to fit the result of a sub-proof quite robustly into a
-  pending sub-goal, while maintaining a good measure of flexibility.
-*}
-
-
-section {* The Isar proof language \label{sec:framework-isar} *}
-
-text {*
-  Structured proofs are presented as high-level expressions for
-  composing entities of Pure (propositions, facts, and goals).  The
-  Isar proof language allows to organize reasoning within the
-  underlying rule calculus of Pure, but Isar is not another logical
-  calculus!
-
-  Isar is an exercise in sound minimalism.  Approximately half of the
-  language is introduced as primitive, the rest defined as derived
-  concepts.  The following grammar describes the core language
-  (category @{text "proof"}), which is embedded into theory
-  specification elements such as @{command theorem}; see also
-  \secref{sec:framework-stmt} for the separate category @{text
-  "statement"}.
-
-  \medskip
-  \begin{tabular}{rcl}
-    @{text "theory\<hyphen>stmt"} & = & @{command "theorem"}~@{text "statement proof  |"}~~@{command "definition"}~@{text "\<dots>  |  \<dots>"} \\[1ex]
-
-    @{text "proof"} & = & @{text "prfx\<^sup>*"}~@{command "proof"}~@{text "method\<^sup>? stmt\<^sup>*"}~@{command "qed"}~@{text "method\<^sup>?"} \\[1ex]
-
-    @{text prfx} & = & @{command "using"}~@{text "facts"} \\
-    & @{text "|"} & @{command "unfolding"}~@{text "facts"} \\
-
-    @{text stmt} & = & @{command "{"}~@{text "stmt\<^sup>*"}~@{command "}"} \\
-    & @{text "|"} & @{command "next"} \\
-    & @{text "|"} & @{command "note"}~@{text "name = facts"} \\
-    & @{text "|"} & @{command "let"}~@{text "term = term"} \\
-    & @{text "|"} & @{command "fix"}~@{text "var\<^sup>+"} \\
-    & @{text "|"} & @{command assume}~@{text "\<guillemotleft>inference\<guillemotright> name: props"} \\
-    & @{text "|"} & @{command "then"}@{text "\<^sup>?"}~@{text goal} \\
-    @{text goal} & = & @{command "have"}~@{text "name: props proof"} \\
-    & @{text "|"} & @{command "show"}~@{text "name: props proof"} \\
-  \end{tabular}
-
-  \medskip Simultaneous propositions or facts may be separated by the
-  @{keyword "and"} keyword.
-
-  \medskip The syntax for terms and propositions is inherited from
-  Pure (and the object-logic).  A @{text "pattern"} is a @{text
-  "term"} with schematic variables, to be bound by higher-order
-  matching.
-
-  \medskip Facts may be referenced by name or proposition.  For
-  example, the result of ``@{command have}~@{text "a: A \<langle>proof\<rangle>"}''
-  becomes available both as @{text "a"} and
-  \isacharbackquoteopen@{text "A"}\isacharbackquoteclose.  Moreover,
-  fact expressions may involve attributes that modify either the
-  theorem or the background context.  For example, the expression
-  ``@{text "a [OF b]"}'' refers to the composition of two facts
-  according to the @{inference resolution} inference of
-  \secref{sec:framework-resolution}, while ``@{text "a [intro]"}''
-  declares a fact as introduction rule in the context.
-
-  The special fact called ``@{fact this}'' always refers to the last
-  result, as produced by @{command note}, @{command assume}, @{command
-  have}, or @{command show}.  Since @{command note} occurs
-  frequently together with @{command then} we provide some
-  abbreviations:
-
-  \medskip
-  \begin{tabular}{rcl}
-    @{command from}~@{text a} & @{text "\<equiv>"} & @{command note}~@{text a}~@{command then} \\
-    @{command with}~@{text a} & @{text "\<equiv>"} & @{command from}~@{text "a \<AND> this"} \\
-  \end{tabular}
-  \medskip
-
-  The @{text "method"} category is essentially a parameter and may be
-  populated later.  Methods use the facts indicated by @{command
-  "then"} or @{command using}, and then operate on the goal state.
-  Some basic methods are predefined: ``@{method "-"}'' leaves the goal
-  unchanged, ``@{method this}'' applies the facts as rules to the
-  goal, ``@{method (Pure) "rule"}'' applies the facts to another rule and the
-  result to the goal (both ``@{method this}'' and ``@{method (Pure) rule}''
-  refer to @{inference resolution} of
-  \secref{sec:framework-resolution}).  The secondary arguments to
-  ``@{method (Pure) rule}'' may be specified explicitly as in ``@{text "(rule
-  a)"}'', or picked from the context.  In the latter case, the system
-  first tries rules declared as @{attribute (Pure) elim} or
-  @{attribute (Pure) dest}, followed by those declared as @{attribute
-  (Pure) intro}.
-
-  The default method for @{command proof} is ``@{method (Pure) rule}''
-  (arguments picked from the context), for @{command qed} it is
-  ``@{method "-"}''.  Further abbreviations for terminal proof steps
-  are ``@{command "by"}~@{text "method\<^sub>1 method\<^sub>2"}'' for
-  ``@{command proof}~@{text "method\<^sub>1"}~@{command qed}~@{text
-  "method\<^sub>2"}'', and ``@{command ".."}'' for ``@{command
-  "by"}~@{method (Pure) rule}, and ``@{command "."}'' for ``@{command
-  "by"}~@{method this}''.  The @{command unfolding} element operates
-  directly on the current facts and goal by applying equalities.
-
-  \medskip Block structure can be indicated explicitly by ``@{command
-  "{"}~@{text "\<dots>"}~@{command "}"}'', although the body of a sub-proof
-  already involves implicit nesting.  In any case, @{command next}
-  jumps into the next section of a block, i.e.\ it acts like closing
-  an implicit block scope and opening another one; there is no direct
-  correspondence to subgoals here.
-
-  The remaining elements @{command fix} and @{command assume} build up
-  a local context (see \secref{sec:framework-context}), while
-  @{command show} refines a pending sub-goal by the rule resulting
-  from a nested sub-proof (see \secref{sec:framework-subproof}).
-  Further derived concepts will support calculational reasoning (see
-  \secref{sec:framework-calc}).
-*}
-
-
-subsection {* Context elements \label{sec:framework-context} *}
-
-text {*
-  In judgments @{text "\<Gamma> \<turnstile> \<phi>"} of the primitive framework, @{text "\<Gamma>"}
-  essentially acts like a proof context.  Isar elaborates this idea
-  towards a higher-level notion, with additional information for
-  type-inference, term abbreviations, local facts, hypotheses etc.
-
-  The element @{command fix}~@{text "x :: \<alpha>"} declares a local
-  parameter, i.e.\ an arbitrary-but-fixed entity of a given type; in
-  results exported from the context, @{text "x"} may become anything.
-  The @{command assume}~@{text "\<guillemotleft>inference\<guillemotright>"} element provides a
-  general interface to hypotheses: ``@{command assume}~@{text
-  "\<guillemotleft>inference\<guillemotright> A"}'' produces @{text "A \<turnstile> A"} locally, while the
-  included inference tells how to discharge @{text A} from results
-  @{text "A \<turnstile> B"} later on.  There is no user-syntax for @{text
-  "\<guillemotleft>inference\<guillemotright>"}, i.e.\ it may only occur internally when derived
-  commands are defined in ML.
-
-  At the user-level, the default inference for @{command assume} is
-  @{inference discharge} as given below.  The additional variants
-  @{command presume} and @{command def} are defined as follows:
-
-  \medskip
-  \begin{tabular}{rcl}
-    @{command presume}~@{text A} & @{text "\<equiv>"} & @{command assume}~@{text "\<guillemotleft>weak\<hyphen>discharge\<guillemotright> A"} \\
-    @{command def}~@{text "x \<equiv> a"} & @{text "\<equiv>"} & @{command fix}~@{text x}~@{command assume}~@{text "\<guillemotleft>expansion\<guillemotright> x \<equiv> a"} \\
-  \end{tabular}
-  \medskip
-
-  \[
-  \infer[(@{inference_def discharge})]{@{text "\<strut>\<Gamma> - A \<turnstile> #A \<Longrightarrow> B"}}{@{text "\<strut>\<Gamma> \<turnstile> B"}}
-  \]
-  \[
-  \infer[(@{inference_def "weak\<hyphen>discharge"})]{@{text "\<strut>\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<strut>\<Gamma> \<turnstile> B"}}
-  \]
-  \[
-  \infer[(@{inference_def expansion})]{@{text "\<strut>\<Gamma> - (x \<equiv> a) \<turnstile> B a"}}{@{text "\<strut>\<Gamma> \<turnstile> B x"}}
-  \]
-
-  \medskip Note that @{inference discharge} and @{inference
-  "weak\<hyphen>discharge"} differ in the marker for @{prop A}, which is
-  relevant when the result of a @{command fix}-@{command
-  assume}-@{command show} outline is composed with a pending goal,
-  cf.\ \secref{sec:framework-subproof}.
-
-  The most interesting derived context element in Isar is @{command
-  obtain} \cite[\S5.3]{Wenzel-PhD}, which supports generalized
-  elimination steps in a purely forward manner.  The @{command obtain}
-  command takes a specification of parameters @{text "\<^vec>x"} and
-  assumptions @{text "\<^vec>A"} to be added to the context, together
-  with a proof of a case rule stating that this extension is
-  conservative (i.e.\ may be removed from closed results later on):
-
-  \medskip
-  \begin{tabular}{l}
-  @{text "\<langle>facts\<rangle>"}~~@{command obtain}~@{text "\<^vec>x \<WHERE> \<^vec>A \<^vec>x  \<langle>proof\<rangle> \<equiv>"} \\[0.5ex]
-  \quad @{command have}~@{text "case: \<And>thesis. (\<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis) \<Longrightarrow> thesis\<rangle>"} \\
-  \quad @{command proof}~@{method "-"} \\
-  \qquad @{command fix}~@{text thesis} \\
-  \qquad @{command assume}~@{text "[intro]: \<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis"} \\
-  \qquad @{command show}~@{text thesis}~@{command using}~@{text "\<langle>facts\<rangle> \<langle>proof\<rangle>"} \\
-  \quad @{command qed} \\
-  \quad @{command fix}~@{text "\<^vec>x"}~@{command assume}~@{text "\<guillemotleft>elimination case\<guillemotright> \<^vec>A \<^vec>x"} \\
-  \end{tabular}
-  \medskip
-
-  \[
-  \infer[(@{inference elimination})]{@{text "\<Gamma> \<turnstile> B"}}{
-    \begin{tabular}{rl}
-    @{text "case:"} &
-    @{text "\<Gamma> \<turnstile> \<And>thesis. (\<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis) \<Longrightarrow> thesis"} \\[0.2ex]
-    @{text "result:"} &
-    @{text "\<Gamma> \<union> \<^vec>A \<^vec>y \<turnstile> B"} \\[0.2ex]
-    \end{tabular}}
-  \]
-
-  \noindent Here the name ``@{text thesis}'' is a specific convention
-  for an arbitrary-but-fixed proposition; in the primitive natural
-  deduction rules shown before we have occasionally used @{text C}.
-  The whole statement of ``@{command obtain}~@{text x}~@{keyword
-  "where"}~@{text "A x"}'' may be read as a claim that @{text "A x"}
-  may be assumed for some arbitrary-but-fixed @{text "x"}.  Also note
-  that ``@{command obtain}~@{text "A \<AND> B"}'' without parameters
-  is similar to ``@{command have}~@{text "A \<AND> B"}'', but the
-  latter involves multiple sub-goals.
-
-  \medskip The subsequent Isar proof texts explain all context
-  elements introduced above using the formal proof language itself.
-  After finishing a local proof within a block, we indicate the
-  exported result via @{command note}.
-*}
-
-(*<*)
-theorem True
-proof
-(*>*)
-  txt_raw {* \begin{minipage}[t]{0.45\textwidth} *}
-  {
-    fix x
-    have "B x" sorry %noproof
-  }
-  note `\<And>x. B x`
-  txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
-  {
-    assume A
-    have B sorry %noproof
-  }
-  note `A \<Longrightarrow> B`
-  txt_raw {* \end{minipage}\\[3ex]\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
-  {
-    def x \<equiv> a
-    have "B x" sorry %noproof
-  }
-  note `B a`
-  txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
-  {
-    obtain x where "A x" sorry %noproof
-    have B sorry %noproof
-  }
-  note `B`
-  txt_raw {* \end{minipage} *}
-(*<*)
-qed
-(*>*)
-
-text {*
-  \bigskip\noindent This illustrates the meaning of Isar context
-  elements without goals getting in between.
-*}
-
-subsection {* Structured statements \label{sec:framework-stmt} *}
-
-text {*
-  The category @{text "statement"} of top-level theorem specifications
-  is defined as follows:
-
-  \medskip
-  \begin{tabular}{rcl}
-  @{text "statement"} & @{text "\<equiv>"} & @{text "name: props \<AND> \<dots>"} \\
-  & @{text "|"} & @{text "context\<^sup>* conclusion"} \\[0.5ex]
-
-  @{text "context"} & @{text "\<equiv>"} & @{text "\<FIXES> vars \<AND> \<dots>"} \\
-  & @{text "|"} & @{text "\<ASSUMES> name: props \<AND> \<dots>"} \\
-
-  @{text "conclusion"} & @{text "\<equiv>"} & @{text "\<SHOWS> name: props \<AND> \<dots>"} \\
-  & @{text "|"} & @{text "\<OBTAINS> vars \<AND> \<dots> \<WHERE> name: props \<AND> \<dots>"} \\
-  & & \quad @{text "\<BBAR> \<dots>"} \\
-  \end{tabular}
-
-  \medskip\noindent A simple @{text "statement"} consists of named
-  propositions.  The full form admits local context elements followed
-  by the actual conclusions, such as ``@{keyword "fixes"}~@{text
-  x}~@{keyword "assumes"}~@{text "A x"}~@{keyword "shows"}~@{text "B
-  x"}''.  The final result emerges as a Pure rule after discharging
-  the context: @{prop "\<And>x. A x \<Longrightarrow> B x"}.
-
-  The @{keyword "obtains"} variant is another abbreviation defined
-  below; unlike @{command obtain} (cf.\
-  \secref{sec:framework-context}) there may be several ``cases''
-  separated by ``@{text "\<BBAR>"}'', each consisting of several
-  parameters (@{text "vars"}) and several premises (@{text "props"}).
-  This specifies multi-branch elimination rules.
-
-  \medskip
-  \begin{tabular}{l}
-  @{text "\<OBTAINS> \<^vec>x \<WHERE> \<^vec>A \<^vec>x   \<BBAR>   \<dots>   \<equiv>"} \\[0.5ex]
-  \quad @{text "\<FIXES> thesis"} \\
-  \quad @{text "\<ASSUMES> [intro]: \<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis  \<AND>  \<dots>"} \\
-  \quad @{text "\<SHOWS> thesis"} \\
-  \end{tabular}
-  \medskip
-
-  Presenting structured statements in such an ``open'' format usually
-  simplifies the subsequent proof, because the outer structure of the
-  problem is already laid out directly.  E.g.\ consider the following
-  canonical patterns for @{text "\<SHOWS>"} and @{text "\<OBTAINS>"},
-  respectively:
-*}
-
-text_raw {*\begin{minipage}{0.5\textwidth}*}
-
-theorem
-  fixes x and y
-  assumes "A x" and "B y"
-  shows "C x y"
-proof -
-  from `A x` and `B y`
-  show "C x y" sorry %noproof
-qed
-
-text_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
-
-theorem
-  obtains x and y
-  where "A x" and "B y"
-proof -
-  have "A a" and "B b" sorry %noproof
-  then show thesis ..
-qed
-
-text_raw {*\end{minipage}*}
-
-text {*
-  \medskip\noindent Here local facts \isacharbackquoteopen@{text "A
-  x"}\isacharbackquoteclose\ and \isacharbackquoteopen@{text "B
-  y"}\isacharbackquoteclose\ are referenced immediately; there is no
-  need to decompose the logical rule structure again.  In the second
-  proof the final ``@{command then}~@{command show}~@{text
-  thesis}~@{command ".."}''  involves the local rule case @{text "\<And>x
-  y. A x \<Longrightarrow> B y \<Longrightarrow> thesis"} for the particular instance of terms @{text
-  "a"} and @{text "b"} produced in the body.
-*}
-
-
-subsection {* Structured proof refinement \label{sec:framework-subproof} *}
-
-text {*
-  By breaking up the grammar for the Isar proof language, we may
-  understand a proof text as a linear sequence of individual proof
-  commands.  These are interpreted as transitions of the Isar virtual
-  machine (Isar/VM), which operates on a block-structured
-  configuration in single steps.  This allows users to write proof
-  texts in an incremental manner, and inspect intermediate
-  configurations for debugging.
-
-  The basic idea is analogous to evaluating algebraic expressions on a
-  stack machine: @{text "(a + b) \<cdot> c"} then corresponds to a sequence
-  of single transitions for each symbol @{text "(, a, +, b, ), \<cdot>, c"}.
-  In Isar the algebraic values are facts or goals, and the operations
-  are inferences.
-
-  \medskip The Isar/VM state maintains a stack of nodes, each node
-  contains the local proof context, the linguistic mode, and a pending
-  goal (optional).  The mode determines the type of transition that
-  may be performed next, it essentially alternates between forward and
-  backward reasoning, with an intermediate stage for chained facts
-  (see \figref{fig:isar-vm}).
-
-  \begin{figure}[htb]
-  \begin{center}
-  \includegraphics[width=0.8\textwidth]{isar-vm}
-  \end{center}
-  \caption{Isar/VM modes}\label{fig:isar-vm}
-  \end{figure}
-
-  For example, in @{text "state"} mode Isar acts like a mathematical
-  scratch-pad, accepting declarations like @{command fix}, @{command
-  assume}, and claims like @{command have}, @{command show}.  A goal
-  statement changes the mode to @{text "prove"}, which means that we
-  may now refine the problem via @{command unfolding} or @{command
-  proof}.  Then we are again in @{text "state"} mode of a proof body,
-  which may issue @{command show} statements to solve pending
-  sub-goals.  A concluding @{command qed} will return to the original
-  @{text "state"} mode one level upwards.  The subsequent Isar/VM
-  trace indicates block structure, linguistic mode, goal state, and
-  inferences:
-*}
-
-text_raw {* \begingroup\footnotesize *}
-(*<*)notepad begin
-(*>*)
-  txt_raw {* \begin{minipage}[t]{0.18\textwidth} *}
-  have "A \<longrightarrow> B"
-  proof
-    assume A
-    show B
-      sorry %noproof
-  qed
-  txt_raw {* \end{minipage}\quad
-\begin{minipage}[t]{0.06\textwidth}
-@{text "begin"} \\
-\\
-\\
-@{text "begin"} \\
-@{text "end"} \\
-@{text "end"} \\
-\end{minipage}
-\begin{minipage}[t]{0.08\textwidth}
-@{text "prove"} \\
-@{text "state"} \\
-@{text "state"} \\
-@{text "prove"} \\
-@{text "state"} \\
-@{text "state"} \\
-\end{minipage}\begin{minipage}[t]{0.35\textwidth}
-@{text "(A \<longrightarrow> B) \<Longrightarrow> #(A \<longrightarrow> B)"} \\
-@{text "(A \<Longrightarrow> B) \<Longrightarrow> #(A \<longrightarrow> B)"} \\
-\\
-\\
-@{text "#(A \<longrightarrow> B)"} \\
-@{text "A \<longrightarrow> B"} \\
-\end{minipage}\begin{minipage}[t]{0.4\textwidth}
-@{text "(init)"} \\
-@{text "(resolution impI)"} \\
-\\
-\\
-@{text "(refinement #A \<Longrightarrow> B)"} \\
-@{text "(finish)"} \\
-\end{minipage} *}
-(*<*)
-end
-(*>*)
-text_raw {* \endgroup *}
-
-text {*
-  \noindent Here the @{inference refinement} inference from
-  \secref{sec:framework-resolution} mediates composition of Isar
-  sub-proofs nicely.  Observe that this principle incorporates some
-  degree of freedom in proof composition.  In particular, the proof
-  body allows parameters and assumptions to be re-ordered, or commuted
-  according to Hereditary Harrop Form.  Moreover, context elements
-  that are not used in a sub-proof may be omitted altogether.  For
-  example:
-*}
-
-text_raw {*\begin{minipage}{0.5\textwidth}*}
-
-(*<*)
-notepad
-begin
-(*>*)
-  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
-  proof -
-    fix x and y
-    assume "A x" and "B y"
-    show "C x y" sorry %noproof
-  qed
-
-txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
-
-(*<*)
-next
-(*>*)
-  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
-  proof -
-    fix x assume "A x"
-    fix y assume "B y"
-    show "C x y" sorry %noproof
-  qed
-
-txt_raw {*\end{minipage}\\[3ex]\begin{minipage}{0.5\textwidth}*}
-
-(*<*)
-next
-(*>*)
-  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
-  proof -
-    fix y assume "B y"
-    fix x assume "A x"
-    show "C x y" sorry
-  qed
-
-txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
-(*<*)
-next
-(*>*)
-  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
-  proof -
-    fix y assume "B y"
-    fix x
-    show "C x y" sorry
-  qed
-(*<*)
-end
-(*>*)
-
-text_raw {*\end{minipage}*}
-
-text {*
-  \medskip\noindent Such ``peephole optimizations'' of Isar texts are
-  practically important to improve readability, by rearranging
-  contexts elements according to the natural flow of reasoning in the
-  body, while still observing the overall scoping rules.
-
-  \medskip This illustrates the basic idea of structured proof
-  processing in Isar.  The main mechanisms are based on natural
-  deduction rule composition within the Pure framework.  In
-  particular, there are no direct operations on goal states within the
-  proof body.  Moreover, there is no hidden automated reasoning
-  involved, just plain unification.
-*}
-
-
-subsection {* Calculational reasoning \label{sec:framework-calc} *}
-
-text {*
-  The existing Isar infrastructure is sufficiently flexible to support
-  calculational reasoning (chains of transitivity steps) as derived
-  concept.  The generic proof elements introduced below depend on
-  rules declared as @{attribute trans} in the context.  It is left to
-  the object-logic to provide a suitable rule collection for mixed
-  relations of @{text "="}, @{text "<"}, @{text "\<le>"}, @{text "\<subset>"},
-  @{text "\<subseteq>"} etc.  Due to the flexibility of rule composition
-  (\secref{sec:framework-resolution}), substitution of equals by
-  equals is covered as well, even substitution of inequalities
-  involving monotonicity conditions; see also \cite[\S6]{Wenzel-PhD}
-  and \cite{Bauer-Wenzel:2001}.
-
-  The generic calculational mechanism is based on the observation that
-  rules such as @{text "trans:"}~@{prop "x = y \<Longrightarrow> y = z \<Longrightarrow> x = z"}
-  proceed from the premises towards the conclusion in a deterministic
-  fashion.  Thus we may reason in forward mode, feeding intermediate
-  results into rules selected from the context.  The course of
-  reasoning is organized by maintaining a secondary fact called
-  ``@{fact calculation}'', apart from the primary ``@{fact this}''
-  already provided by the Isar primitives.  In the definitions below,
-  @{attribute OF} refers to @{inference resolution}
-  (\secref{sec:framework-resolution}) with multiple rule arguments,
-  and @{text "trans"} represents to a suitable rule from the context:
-
-  \begin{matharray}{rcl}
-    @{command "also"}@{text "\<^sub>0"} & \equiv & @{command "note"}~@{text "calculation = this"} \\
-    @{command "also"}@{text "\<^sub>n\<^sub>+\<^sub>1"} & \equiv & @{command "note"}~@{text "calculation = trans [OF calculation this]"} \\[0.5ex]
-    @{command "finally"} & \equiv & @{command "also"}~@{command "from"}~@{text calculation} \\
-  \end{matharray}
-
-  \noindent The start of a calculation is determined implicitly in the
-  text: here @{command also} sets @{fact calculation} to the current
-  result; any subsequent occurrence will update @{fact calculation} by
-  combination with the next result and a transitivity rule.  The
-  calculational sequence is concluded via @{command finally}, where
-  the final result is exposed for use in a concluding claim.
-
-  Here is a canonical proof pattern, using @{command have} to
-  establish the intermediate results:
-*}
-
-(*<*)
-notepad
-begin
-(*>*)
-  have "a = b" sorry
-  also have "\<dots> = c" sorry
-  also have "\<dots> = d" sorry
-  finally have "a = d" .
-(*<*)
-end
-(*>*)
-
-text {*
-  \noindent The term ``@{text "\<dots>"}'' above is a special abbreviation
-  provided by the Isabelle/Isar syntax layer: it statically refers to
-  the right-hand side argument of the previous statement given in the
-  text.  Thus it happens to coincide with relevant sub-expressions in
-  the calculational chain, but the exact correspondence is dependent
-  on the transitivity rules being involved.
-
-  \medskip Symmetry rules such as @{prop "x = y \<Longrightarrow> y = x"} are like
-  transitivities with only one premise.  Isar maintains a separate
-  rule collection declared via the @{attribute sym} attribute, to be
-  used in fact expressions ``@{text "a [symmetric]"}'', or single-step
-  proofs ``@{command assume}~@{text "x = y"}~@{command then}~@{command
-  have}~@{text "y = x"}~@{command ".."}''.
-*}
-
-end
\ No newline at end of file
--- a/src/Doc/IsarRef/Generic.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2016 +0,0 @@
-theory Generic
-imports Base Main
-begin
-
-chapter {* Generic tools and packages \label{ch:gen-tools} *}
-
-section {* Configuration options \label{sec:config} *}
-
-text {* Isabelle/Pure maintains a record of named configuration
-  options within the theory or proof context, with values of type
-  @{ML_type bool}, @{ML_type int}, @{ML_type real}, or @{ML_type
-  string}.  Tools may declare options in ML, and then refer to these
-  values (relative to the context).  Thus global reference variables
-  are easily avoided.  The user may change the value of a
-  configuration option by means of an associated attribute of the same
-  name.  This form of context declaration works particularly well with
-  commands such as @{command "declare"} or @{command "using"} like
-  this:
-*}
-
-declare [[show_main_goal = false]]
-
-notepad
-begin
-  note [[show_main_goal = true]]
-end
-
-text {* For historical reasons, some tools cannot take the full proof
-  context into account and merely refer to the background theory.
-  This is accommodated by configuration options being declared as
-  ``global'', which may not be changed within a local context.
-
-  \begin{matharray}{rcll}
-    @{command_def "print_options"} & : & @{text "context \<rightarrow>"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @{syntax name} ('=' ('true' | 'false' | @{syntax int} | @{syntax float} | @{syntax name}))?
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command "print_options"} prints the available configuration
-  options, with names, types, and current values.
-  
-  \item @{text "name = value"} as an attribute expression modifies the
-  named option, with the syntax of the value depending on the option's
-  type.  For @{ML_type bool} the default value is @{text true}.  Any
-  attempt to change a global option in a local context is ignored.
-
-  \end{description}
-*}
-
-
-section {* Basic proof tools *}
-
-subsection {* Miscellaneous methods and attributes \label{sec:misc-meth-att} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def unfold} & : & @{text method} \\
-    @{method_def fold} & : & @{text method} \\
-    @{method_def insert} & : & @{text method} \\[0.5ex]
-    @{method_def erule}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def drule}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def frule}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def intro} & : & @{text method} \\
-    @{method_def elim} & : & @{text method} \\
-    @{method_def succeed} & : & @{text method} \\
-    @{method_def fail} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{method fold} | @@{method unfold} | @@{method insert}) @{syntax thmrefs}
-    ;
-    (@@{method erule} | @@{method drule} | @@{method frule})
-      ('(' @{syntax nat} ')')? @{syntax thmrefs}
-    ;
-    (@@{method intro} | @@{method elim}) @{syntax thmrefs}?
-  \<close>}
-
-  \begin{description}
-  
-  \item @{method unfold}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{method fold}~@{text
-  "a\<^sub>1 \<dots> a\<^sub>n"} expand (or fold back) the given definitions throughout
-  all goals; any chained facts provided are inserted into the goal and
-  subject to rewriting as well.
-
-  \item @{method insert}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} inserts theorems as facts
-  into all goals of the proof state.  Note that current facts
-  indicated for forward chaining are ignored.
-
-  \item @{method erule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, @{method
-  drule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, and @{method frule}~@{text
-  "a\<^sub>1 \<dots> a\<^sub>n"} are similar to the basic @{method rule}
-  method (see \secref{sec:pure-meth-att}), but apply rules by
-  elim-resolution, destruct-resolution, and forward-resolution,
-  respectively \cite{isabelle-implementation}.  The optional natural
-  number argument (default 0) specifies additional assumption steps to
-  be performed here.
-
-  Note that these methods are improper ones, mainly serving for
-  experimentation and tactic script emulation.  Different modes of
-  basic rule application are usually expressed in Isar at the proof
-  language level, rather than via implicit proof state manipulations.
-  For example, a proper single-step elimination would be done using
-  the plain @{method rule} method, with forward chaining of current
-  facts.
-
-  \item @{method intro} and @{method elim} repeatedly refine some goal
-  by intro- or elim-resolution, after having inserted any chained
-  facts.  Exactly the rules given as arguments are taken into account;
-  this allows fine-tuned decomposition of a proof problem, in contrast
-  to common automated tools.
-
-  \item @{method succeed} yields a single (unchanged) result; it is
-  the identity of the ``@{text ","}'' method combinator (cf.\
-  \secref{sec:proof-meth}).
-
-  \item @{method fail} yields an empty result sequence; it is the
-  identity of the ``@{text "|"}'' method combinator (cf.\
-  \secref{sec:proof-meth}).
-
-  \end{description}
-
-  \begin{matharray}{rcl}
-    @{attribute_def tagged} & : & @{text attribute} \\
-    @{attribute_def untagged} & : & @{text attribute} \\[0.5ex]
-    @{attribute_def THEN} & : & @{text attribute} \\
-    @{attribute_def unfolded} & : & @{text attribute} \\
-    @{attribute_def folded} & : & @{text attribute} \\
-    @{attribute_def abs_def} & : & @{text attribute} \\[0.5ex]
-    @{attribute_def rotated} & : & @{text attribute} \\
-    @{attribute_def (Pure) elim_format} & : & @{text attribute} \\
-    @{attribute_def no_vars}@{text "\<^sup>*"} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{attribute tagged} @{syntax name} @{syntax name}
-    ;
-    @@{attribute untagged} @{syntax name}
-    ;
-    @@{attribute THEN} ('[' @{syntax nat} ']')? @{syntax thmref}
-    ;
-    (@@{attribute unfolded} | @@{attribute folded}) @{syntax thmrefs}
-    ;
-    @@{attribute rotated} @{syntax int}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{attribute tagged}~@{text "name value"} and @{attribute
-  untagged}~@{text name} add and remove \emph{tags} of some theorem.
-  Tags may be any list of string pairs that serve as formal comment.
-  The first string is considered the tag name, the second its value.
-  Note that @{attribute untagged} removes any tags of the same name.
-
-  \item @{attribute THEN}~@{text a} composes rules by resolution; it
-  resolves with the first premise of @{text a} (an alternative
-  position may be also specified).  See also @{ML_op "RS"} in
-  \cite{isabelle-implementation}.
-  
-  \item @{attribute unfolded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{attribute
-  folded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} expand and fold back again the given
-  definitions throughout a rule.
-
-  \item @{attribute abs_def} turns an equation of the form @{prop "f x
-  y \<equiv> t"} into @{prop "f \<equiv> \<lambda>x y. t"}, which ensures that @{method
-  simp} or @{method unfold} steps always expand it.  This also works
-  for object-logic equality.
-
-  \item @{attribute rotated}~@{text n} rotate the premises of a
-  theorem by @{text n} (default 1).
-
-  \item @{attribute (Pure) elim_format} turns a destruction rule into
-  elimination rule format, by resolving with the rule @{prop "PROP A \<Longrightarrow>
-  (PROP A \<Longrightarrow> PROP B) \<Longrightarrow> PROP B"}.
-  
-  Note that the Classical Reasoner (\secref{sec:classical}) provides
-  its own version of this operation.
-
-  \item @{attribute no_vars} replaces schematic variables by free
-  ones; this is mainly for tuning output of pretty printed theorems.
-
-  \end{description}
-*}
-
-
-subsection {* Low-level equational reasoning *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def subst} & : & @{text method} \\
-    @{method_def hypsubst} & : & @{text method} \\
-    @{method_def split} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method subst} ('(' 'asm' ')')? \<newline> ('(' (@{syntax nat}+) ')')? @{syntax thmref}
-    ;
-    @@{method split} @{syntax thmrefs}
-  \<close>}
-
-  These methods provide low-level facilities for equational reasoning
-  that are intended for specialized applications only.  Normally,
-  single step calculations would be performed in a structured text
-  (see also \secref{sec:calculation}), while the Simplifier methods
-  provide the canonical way for automated normalization (see
-  \secref{sec:simplifier}).
-
-  \begin{description}
-
-  \item @{method subst}~@{text eq} performs a single substitution step
-  using rule @{text eq}, which may be either a meta or object
-  equality.
-
-  \item @{method subst}~@{text "(asm) eq"} substitutes in an
-  assumption.
-
-  \item @{method subst}~@{text "(i \<dots> j) eq"} performs several
-  substitutions in the conclusion. The numbers @{text i} to @{text j}
-  indicate the positions to substitute at.  Positions are ordered from
-  the top of the term tree moving down from left to right. For
-  example, in @{text "(a + b) + (c + d)"} there are three positions
-  where commutativity of @{text "+"} is applicable: 1 refers to @{text
-  "a + b"}, 2 to the whole term, and 3 to @{text "c + d"}.
-
-  If the positions in the list @{text "(i \<dots> j)"} are non-overlapping
-  (e.g.\ @{text "(2 3)"} in @{text "(a + b) + (c + d)"}) you may
-  assume all substitutions are performed simultaneously.  Otherwise
-  the behaviour of @{text subst} is not specified.
-
-  \item @{method subst}~@{text "(asm) (i \<dots> j) eq"} performs the
-  substitutions in the assumptions. The positions refer to the
-  assumptions in order from left to right.  For example, given in a
-  goal of the form @{text "P (a + b) \<Longrightarrow> P (c + d) \<Longrightarrow> \<dots>"}, position 1 of
-  commutativity of @{text "+"} is the subterm @{text "a + b"} and
-  position 2 is the subterm @{text "c + d"}.
-
-  \item @{method hypsubst} performs substitution using some
-  assumption; this only works for equations of the form @{text "x =
-  t"} where @{text x} is a free or bound variable.
-
-  \item @{method split}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} performs single-step case
-  splitting using the given rules.  Splitting is performed in the
-  conclusion or some assumption of the subgoal, depending of the
-  structure of the rule.
-  
-  Note that the @{method simp} method already involves repeated
-  application of split rules as declared in the current context, using
-  @{attribute split}, for example.
-
-  \end{description}
-*}
-
-
-subsection {* Further tactic emulations \label{sec:tactics} *}
-
-text {*
-  The following improper proof methods emulate traditional tactics.
-  These admit direct access to the goal state, which is normally
-  considered harmful!  In particular, this may involve both numbered
-  goal addressing (default 1), and dynamic instantiation within the
-  scope of some subgoal.
-
-  \begin{warn}
-    Dynamic instantiations refer to universally quantified parameters
-    of a subgoal (the dynamic context) rather than fixed variables and
-    term abbreviations of a (static) Isar context.
-  \end{warn}
-
-  Tactic emulation methods, unlike their ML counterparts, admit
-  simultaneous instantiation from both dynamic and static contexts.
-  If names occur in both contexts goal parameters hide locally fixed
-  variables.  Likewise, schematic variables refer to term
-  abbreviations, if present in the static context.  Otherwise the
-  schematic variable is interpreted as a schematic variable and left
-  to be solved by unification with certain parts of the subgoal.
-
-  Note that the tactic emulation proof methods in Isabelle/Isar are
-  consistently named @{text foo_tac}.  Note also that variable names
-  occurring on left hand sides of instantiations must be preceded by a
-  question mark if they coincide with a keyword or contain dots.  This
-  is consistent with the attribute @{attribute "where"} (see
-  \secref{sec:pure-meth-att}).
-
-  \begin{matharray}{rcl}
-    @{method_def rule_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def erule_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def drule_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def frule_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def cut_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def thin_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def subgoal_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def rename_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def rotate_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def tactic}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def raw_tactic}@{text "\<^sup>*"} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{method rule_tac} | @@{method erule_tac} | @@{method drule_tac} |
-      @@{method frule_tac} | @@{method cut_tac} | @@{method thin_tac}) @{syntax goal_spec}? \<newline>
-    ( dynamic_insts @'in' @{syntax thmref} | @{syntax thmrefs} )
-    ;
-    @@{method subgoal_tac} @{syntax goal_spec}? (@{syntax prop} +)
-    ;
-    @@{method rename_tac} @{syntax goal_spec}? (@{syntax name} +)
-    ;
-    @@{method rotate_tac} @{syntax goal_spec}? @{syntax int}?
-    ;
-    (@@{method tactic} | @@{method raw_tactic}) @{syntax text}
-    ;
-
-    dynamic_insts: ((@{syntax name} '=' @{syntax term}) + @'and')
-  \<close>}
-
-\begin{description}
-
-  \item @{method rule_tac} etc. do resolution of rules with explicit
-  instantiation.  This works the same way as the ML tactics @{ML
-  res_inst_tac} etc. (see \cite{isabelle-implementation})
-
-  Multiple rules may be only given if there is no instantiation; then
-  @{method rule_tac} is the same as @{ML resolve_tac} in ML (see
-  \cite{isabelle-implementation}).
-
-  \item @{method cut_tac} inserts facts into the proof state as
-  assumption of a subgoal; instantiations may be given as well.  Note
-  that the scope of schematic variables is spread over the main goal
-  statement and rule premises are turned into new subgoals.  This is
-  in contrast to the regular method @{method insert} which inserts
-  closed rule statements.
-
-  \item @{method thin_tac}~@{text \<phi>} deletes the specified premise
-  from a subgoal.  Note that @{text \<phi>} may contain schematic
-  variables, to abbreviate the intended proposition; the first
-  matching subgoal premise will be deleted.  Removing useless premises
-  from a subgoal increases its readability and can make search tactics
-  run faster.
-
-  \item @{method subgoal_tac}~@{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} adds the propositions
-  @{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} as local premises to a subgoal, and poses the same
-  as new subgoals (in the original context).
-
-  \item @{method rename_tac}~@{text "x\<^sub>1 \<dots> x\<^sub>n"} renames parameters of a
-  goal according to the list @{text "x\<^sub>1, \<dots>, x\<^sub>n"}, which refers to the
-  \emph{suffix} of variables.
-
-  \item @{method rotate_tac}~@{text n} rotates the premises of a
-  subgoal by @{text n} positions: from right to left if @{text n} is
-  positive, and from left to right if @{text n} is negative; the
-  default value is 1.
-
-  \item @{method tactic}~@{text "text"} produces a proof method from
-  any ML text of type @{ML_type tactic}.  Apart from the usual ML
-  environment and the current proof context, the ML code may refer to
-  the locally bound values @{ML_text facts}, which indicates any
-  current facts used for forward-chaining.
-
-  \item @{method raw_tactic} is similar to @{method tactic}, but
-  presents the goal state in its raw internal form, where simultaneous
-  subgoals appear as conjunction of the logical framework instead of
-  the usual split into several subgoals.  While feature this is useful
-  for debugging of complex method definitions, it should not never
-  appear in production theories.
-
-  \end{description}
-*}
-
-
-section {* The Simplifier \label{sec:simplifier} *}
-
-text {* The Simplifier performs conditional and unconditional
-  rewriting and uses contextual information: rule declarations in the
-  background theory or local proof context are taken into account, as
-  well as chained facts and subgoal premises (``local assumptions'').
-  There are several general hooks that allow to modify the
-  simplification strategy, or incorporate other proof tools that solve
-  sub-problems, produce rewrite rules on demand etc.
-
-  The rewriting strategy is always strictly bottom up, except for
-  congruence rules, which are applied while descending into a term.
-  Conditions in conditional rewrite rules are solved recursively
-  before the rewrite rule is applied.
-
-  The default Simplifier setup of major object logics (HOL, HOLCF,
-  FOL, ZF) makes the Simplifier ready for immediate use, without
-  engaging into the internal structures.  Thus it serves as
-  general-purpose proof tool with the main focus on equational
-  reasoning, and a bit more than that.
-*}
-
-
-subsection {* Simplification methods \label{sec:simp-meth} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def simp} & : & @{text method} \\
-    @{method_def simp_all} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{method simp} | @@{method simp_all}) opt? (@{syntax simpmod} * )
-    ;
-
-    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use' | 'asm_lr' ) ')'
-    ;
-    @{syntax_def simpmod}: ('add' | 'del' | 'only' | 'split' (() | 'add' | 'del') |
-      'cong' (() | 'add' | 'del')) ':' @{syntax thmrefs}
-  \<close>}
-
-  \begin{description}
-
-  \item @{method simp} invokes the Simplifier on the first subgoal,
-  after inserting chained facts as additional goal premises; further
-  rule declarations may be included via @{text "(simp add: facts)"}.
-  The proof method fails if the subgoal remains unchanged after
-  simplification.
-
-  Note that the original goal premises and chained facts are subject
-  to simplification themselves, while declarations via @{text
-  "add"}/@{text "del"} merely follow the policies of the object-logic
-  to extract rewrite rules from theorems, without further
-  simplification.  This may lead to slightly different behavior in
-  either case, which might be required precisely like that in some
-  boundary situations to perform the intended simplification step!
-
-  \medskip The @{text only} modifier first removes all other rewrite
-  rules, looper tactics (including split rules), congruence rules, and
-  then behaves like @{text add}.  Implicit solvers remain, which means
-  that trivial rules like reflexivity or introduction of @{text
-  "True"} are available to solve the simplified subgoals, but also
-  non-trivial tools like linear arithmetic in HOL.  The latter may
-  lead to some surprise of the meaning of ``only'' in Isabelle/HOL
-  compared to English!
-
-  \medskip The @{text split} modifiers add or delete rules for the
-  Splitter (see also \secref{sec:simp-strategies} on the looper).
-  This works only if the Simplifier method has been properly setup to
-  include the Splitter (all major object logics such HOL, HOLCF, FOL,
-  ZF do this already).
-
-  There is also a separate @{method_ref split} method available for
-  single-step case splitting.  The effect of repeatedly applying
-  @{text "(split thms)"} can be imitated by ``@{text "(simp only:
-  split: thms)"}''.
-
-  \medskip The @{text cong} modifiers add or delete Simplifier
-  congruence rules (see also \secref{sec:simp-rules}); the default is
-  to add.
-
-  \item @{method simp_all} is similar to @{method simp}, but acts on
-  all goals, working backwards from the last to the first one as usual
-  in Isabelle.\footnote{The order is irrelevant for goals without
-  schematic variables, so simplification might actually be performed
-  in parallel here.}
-
-  Chained facts are inserted into all subgoals, before the
-  simplification process starts.  Further rule declarations are the
-  same as for @{method simp}.
-
-  The proof method fails if all subgoals remain unchanged after
-  simplification.
-
-  \end{description}
-
-  By default the Simplifier methods above take local assumptions fully
-  into account, using equational assumptions in the subsequent
-  normalization process, or simplifying assumptions themselves.
-  Further options allow to fine-tune the behavior of the Simplifier
-  in this respect, corresponding to a variety of ML tactics as
-  follows.\footnote{Unlike the corresponding Isar proof methods, the
-  ML tactics do not insist in changing the goal state.}
-
-  \begin{center}
-  \small
-  \begin{supertabular}{|l|l|p{0.3\textwidth}|}
-  \hline
-  Isar method & ML tactic & behavior \\\hline
-
-  @{text "(simp (no_asm))"} & @{ML simp_tac} & assumptions are ignored
-  completely \\\hline
-
-  @{text "(simp (no_asm_simp))"} & @{ML asm_simp_tac} & assumptions
-  are used in the simplification of the conclusion but are not
-  themselves simplified \\\hline
-
-  @{text "(simp (no_asm_use))"} & @{ML full_simp_tac} & assumptions
-  are simplified but are not used in the simplification of each other
-  or the conclusion \\\hline
-
-  @{text "(simp)"} & @{ML asm_full_simp_tac} & assumptions are used in
-  the simplification of the conclusion and to simplify other
-  assumptions \\\hline
-
-  @{text "(simp (asm_lr))"} & @{ML asm_lr_simp_tac} & compatibility
-  mode: an assumption is only used for simplifying assumptions which
-  are to the right of it \\\hline
-
-  \end{supertabular}
-  \end{center}
-*}
-
-
-subsubsection {* Examples *}
-
-text {* We consider basic algebraic simplifications in Isabelle/HOL.
-  The rather trivial goal @{prop "0 + (x + 0) = x + 0 + 0"} looks like
-  a good candidate to be solved by a single call of @{method simp}:
-*}
-
-lemma "0 + (x + 0) = x + 0 + 0" apply simp? oops
-
-text {* The above attempt \emph{fails}, because @{term "0"} and @{term
-  "op +"} in the HOL library are declared as generic type class
-  operations, without stating any algebraic laws yet.  More specific
-  types are required to get access to certain standard simplifications
-  of the theory context, e.g.\ like this: *}
-
-lemma fixes x :: nat shows "0 + (x + 0) = x + 0 + 0" by simp
-lemma fixes x :: int shows "0 + (x + 0) = x + 0 + 0" by simp
-lemma fixes x :: "'a :: monoid_add" shows "0 + (x + 0) = x + 0 + 0" by simp
-
-text {*
-  \medskip In many cases, assumptions of a subgoal are also needed in
-  the simplification process.  For example:
-*}
-
-lemma fixes x :: nat shows "x = 0 \<Longrightarrow> x + x = 0" by simp
-lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" apply simp oops
-lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" using assms by simp
-
-text {* As seen above, local assumptions that shall contribute to
-  simplification need to be part of the subgoal already, or indicated
-  explicitly for use by the subsequent method invocation.  Both too
-  little or too much information can make simplification fail, for
-  different reasons.
-
-  In the next example the malicious assumption @{prop "\<And>x::nat. f x =
-  g (f (g x))"} does not contribute to solve the problem, but makes
-  the default @{method simp} method loop: the rewrite rule @{text "f
-  ?x \<equiv> g (f (g ?x))"} extracted from the assumption does not
-  terminate.  The Simplifier notices certain simple forms of
-  nontermination, but not this one.  The problem can be solved
-  nonetheless, by ignoring assumptions via special options as
-  explained before:
-*}
-
-lemma "(\<And>x::nat. f x = g (f (g x))) \<Longrightarrow> f 0 = f 0 + 0"
-  by (simp (no_asm))
-
-text {* The latter form is typical for long unstructured proof
-  scripts, where the control over the goal content is limited.  In
-  structured proofs it is usually better to avoid pushing too many
-  facts into the goal state in the first place.  Assumptions in the
-  Isar proof context do not intrude the reasoning if not used
-  explicitly.  This is illustrated for a toplevel statement and a
-  local proof body as follows:
-*}
-
-lemma
-  assumes "\<And>x::nat. f x = g (f (g x))"
-  shows "f 0 = f 0 + 0" by simp
-
-notepad
-begin
-  assume "\<And>x::nat. f x = g (f (g x))"
-  have "f 0 = f 0 + 0" by simp
-end
-
-text {* \medskip Because assumptions may simplify each other, there
-  can be very subtle cases of nontermination. For example, the regular
-  @{method simp} method applied to @{prop "P (f x) \<Longrightarrow> y = x \<Longrightarrow> f x = f y
-  \<Longrightarrow> Q"} gives rise to the infinite reduction sequence
-  \[
-  @{text "P (f x)"} \stackrel{@{text "f x \<equiv> f y"}}{\longmapsto}
-  @{text "P (f y)"} \stackrel{@{text "y \<equiv> x"}}{\longmapsto}
-  @{text "P (f x)"} \stackrel{@{text "f x \<equiv> f y"}}{\longmapsto} \cdots
-  \]
-  whereas applying the same to @{prop "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow>
-  Q"} terminates (without solving the goal):
-*}
-
-lemma "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow> Q"
-  apply simp
-  oops
-
-text {* See also \secref{sec:simp-config} for options to enable
-  Simplifier trace mode, which often helps to diagnose problems with
-  rewrite systems.
-*}
-
-
-subsection {* Declaring rules \label{sec:simp-rules} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{attribute_def simp} & : & @{text attribute} \\
-    @{attribute_def split} & : & @{text attribute} \\
-    @{attribute_def cong} & : & @{text attribute} \\
-    @{command_def "print_simpset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{attribute simp} | @@{attribute split} | @@{attribute cong})
-      (() | 'add' | 'del')
-  \<close>}
-
-  \begin{description}
-
-  \item @{attribute simp} declares rewrite rules, by adding or
-  deleting them from the simpset within the theory or proof context.
-  Rewrite rules are theorems expressing some form of equality, for
-  example:
-
-  @{text "Suc ?m + ?n = ?m + Suc ?n"} \\
-  @{text "?P \<and> ?P \<longleftrightarrow> ?P"} \\
-  @{text "?A \<union> ?B \<equiv> {x. x \<in> ?A \<or> x \<in> ?B}"}
-
-  \smallskip
-  Conditional rewrites such as @{text "?m < ?n \<Longrightarrow> ?m div ?n = 0"} are
-  also permitted; the conditions can be arbitrary formulas.
-
-  \medskip Internally, all rewrite rules are translated into Pure
-  equalities, theorems with conclusion @{text "lhs \<equiv> rhs"}. The
-  simpset contains a function for extracting equalities from arbitrary
-  theorems, which is usually installed when the object-logic is
-  configured initially. For example, @{text "\<not> ?x \<in> {}"} could be
-  turned into @{text "?x \<in> {} \<equiv> False"}. Theorems that are declared as
-  @{attribute simp} and local assumptions within a goal are treated
-  uniformly in this respect.
-
-  The Simplifier accepts the following formats for the @{text "lhs"}
-  term:
-
-  \begin{enumerate}
-
-  \item First-order patterns, considering the sublanguage of
-  application of constant operators to variable operands, without
-  @{text "\<lambda>"}-abstractions or functional variables.
-  For example:
-
-  @{text "(?x + ?y) + ?z \<equiv> ?x + (?y + ?z)"} \\
-  @{text "f (f ?x ?y) ?z \<equiv> f ?x (f ?y ?z)"}
-
-  \item Higher-order patterns in the sense of \cite{nipkow-patterns}.
-  These are terms in @{text "\<beta>"}-normal form (this will always be the
-  case unless you have done something strange) where each occurrence
-  of an unknown is of the form @{text "?F x\<^sub>1 \<dots> x\<^sub>n"}, where the
-  @{text "x\<^sub>i"} are distinct bound variables.
-
-  For example, @{text "(\<forall>x. ?P x \<and> ?Q x) \<equiv> (\<forall>x. ?P x) \<and> (\<forall>x. ?Q x)"}
-  or its symmetric form, since the @{text "rhs"} is also a
-  higher-order pattern.
-
-  \item Physical first-order patterns over raw @{text "\<lambda>"}-term
-  structure without @{text "\<alpha>\<beta>\<eta>"}-equality; abstractions and bound
-  variables are treated like quasi-constant term material.
-
-  For example, the rule @{text "?f ?x \<in> range ?f = True"} rewrites the
-  term @{text "g a \<in> range g"} to @{text "True"}, but will fail to
-  match @{text "g (h b) \<in> range (\<lambda>x. g (h x))"}. However, offending
-  subterms (in our case @{text "?f ?x"}, which is not a pattern) can
-  be replaced by adding new variables and conditions like this: @{text
-  "?y = ?f ?x \<Longrightarrow> ?y \<in> range ?f = True"} is acceptable as a conditional
-  rewrite rule of the second category since conditions can be
-  arbitrary terms.
-
-  \end{enumerate}
-
-  \item @{attribute split} declares case split rules.
-
-  \item @{attribute cong} declares congruence rules to the Simplifier
-  context.
-
-  Congruence rules are equalities of the form @{text [display]
-  "\<dots> \<Longrightarrow> f ?x\<^sub>1 \<dots> ?x\<^sub>n = f ?y\<^sub>1 \<dots> ?y\<^sub>n"}
-
-  This controls the simplification of the arguments of @{text f}.  For
-  example, some arguments can be simplified under additional
-  assumptions: @{text [display] "?P\<^sub>1 \<longleftrightarrow> ?Q\<^sub>1 \<Longrightarrow> (?Q\<^sub>1 \<Longrightarrow> ?P\<^sub>2 \<longleftrightarrow> ?Q\<^sub>2) \<Longrightarrow>
-  (?P\<^sub>1 \<longrightarrow> ?P\<^sub>2) \<longleftrightarrow> (?Q\<^sub>1 \<longrightarrow> ?Q\<^sub>2)"}
-
-  Given this rule, the simplifier assumes @{text "?Q\<^sub>1"} and extracts
-  rewrite rules from it when simplifying @{text "?P\<^sub>2"}.  Such local
-  assumptions are effective for rewriting formulae such as @{text "x =
-  0 \<longrightarrow> y + x = y"}.
-
-  %FIXME
-  %The local assumptions are also provided as theorems to the solver;
-  %see \secref{sec:simp-solver} below.
-
-  \medskip The following congruence rule for bounded quantifiers also
-  supplies contextual information --- about the bound variable:
-  @{text [display] "(?A = ?B) \<Longrightarrow> (\<And>x. x \<in> ?B \<Longrightarrow> ?P x \<longleftrightarrow> ?Q x) \<Longrightarrow>
-    (\<forall>x \<in> ?A. ?P x) \<longleftrightarrow> (\<forall>x \<in> ?B. ?Q x)"}
-
-  \medskip This congruence rule for conditional expressions can
-  supply contextual information for simplifying the arms:
-  @{text [display] "?p = ?q \<Longrightarrow> (?q \<Longrightarrow> ?a = ?c) \<Longrightarrow> (\<not> ?q \<Longrightarrow> ?b = ?d) \<Longrightarrow>
-    (if ?p then ?a else ?b) = (if ?q then ?c else ?d)"}
-
-  A congruence rule can also \emph{prevent} simplification of some
-  arguments.  Here is an alternative congruence rule for conditional
-  expressions that conforms to non-strict functional evaluation:
-  @{text [display] "?p = ?q \<Longrightarrow> (if ?p then ?a else ?b) = (if ?q then ?a else ?b)"}
-
-  Only the first argument is simplified; the others remain unchanged.
-  This can make simplification much faster, but may require an extra
-  case split over the condition @{text "?q"} to prove the goal.
-
-  \item @{command "print_simpset"} prints the collection of rules
-  declared to the Simplifier, which is also known as ``simpset''
-  internally.
-
-  For historical reasons, simpsets may occur independently from the
-  current context, but are conceptually dependent on it.  When the
-  Simplifier is invoked via one of its main entry points in the Isar
-  source language (as proof method \secref{sec:simp-meth} or rule
-  attribute \secref{sec:simp-meth}), its simpset is derived from the
-  current proof context, and carries a back-reference to that for
-  other tools that might get invoked internally (e.g.\ simplification
-  procedures \secref{sec:simproc}).  A mismatch of the context of the
-  simpset and the context of the problem being simplified may lead to
-  unexpected results.
-
-  \end{description}
-
-  The implicit simpset of the theory context is propagated
-  monotonically through the theory hierarchy: forming a new theory,
-  the union of the simpsets of its imports are taken as starting
-  point.  Also note that definitional packages like @{command
-  "datatype"}, @{command "primrec"}, @{command "fun"} routinely
-  declare Simplifier rules to the target context, while plain
-  @{command "definition"} is an exception in \emph{not} declaring
-  anything.
-
-  \medskip It is up the user to manipulate the current simpset further
-  by explicitly adding or deleting theorems as simplification rules,
-  or installing other tools via simplification procedures
-  (\secref{sec:simproc}).  Good simpsets are hard to design.  Rules
-  that obviously simplify, like @{text "?n + 0 \<equiv> ?n"} are good
-  candidates for the implicit simpset, unless a special
-  non-normalizing behavior of certain operations is intended.  More
-  specific rules (such as distributive laws, which duplicate subterms)
-  should be added only for specific proof steps.  Conversely,
-  sometimes a rule needs to be deleted just for some part of a proof.
-  The need of frequent additions or deletions may indicate a poorly
-  designed simpset.
-
-  \begin{warn}
-  The union of simpsets from theory imports (as described above) is
-  not always a good starting point for the new theory.  If some
-  ancestors have deleted simplification rules because they are no
-  longer wanted, while others have left those rules in, then the union
-  will contain the unwanted rules, and thus have to be deleted again
-  in the theory body.
-  \end{warn}
-*}
-
-
-subsection {* Ordered rewriting with permutative rules *}
-
-text {* A rewrite rule is \emph{permutative} if the left-hand side and
-  right-hand side are the equal up to renaming of variables.  The most
-  common permutative rule is commutativity: @{text "?x + ?y = ?y +
-  ?x"}.  Other examples include @{text "(?x - ?y) - ?z = (?x - ?z) -
-  ?y"} in arithmetic and @{text "insert ?x (insert ?y ?A) = insert ?y
-  (insert ?x ?A)"} for sets.  Such rules are common enough to merit
-  special attention.
-
-  Because ordinary rewriting loops given such rules, the Simplifier
-  employs a special strategy, called \emph{ordered rewriting}.
-  Permutative rules are detected and only applied if the rewriting
-  step decreases the redex wrt.\ a given term ordering.  For example,
-  commutativity rewrites @{text "b + a"} to @{text "a + b"}, but then
-  stops, because the redex cannot be decreased further in the sense of
-  the term ordering.
-
-  The default is lexicographic ordering of term structure, but this
-  could be also changed locally for special applications via
-  @{index_ML Simplifier.set_termless} in Isabelle/ML.
-
-  \medskip Permutative rewrite rules are declared to the Simplifier
-  just like other rewrite rules.  Their special status is recognized
-  automatically, and their application is guarded by the term ordering
-  accordingly. *}
-
-
-subsubsection {* Rewriting with AC operators *}
-
-text {* Ordered rewriting is particularly effective in the case of
-  associative-commutative operators.  (Associativity by itself is not
-  permutative.)  When dealing with an AC-operator @{text "f"}, keep
-  the following points in mind:
-
-  \begin{itemize}
-
-  \item The associative law must always be oriented from left to
-  right, namely @{text "f (f x y) z = f x (f y z)"}.  The opposite
-  orientation, if used with commutativity, leads to looping in
-  conjunction with the standard term order.
-
-  \item To complete your set of rewrite rules, you must add not just
-  associativity (A) and commutativity (C) but also a derived rule
-  \emph{left-commutativity} (LC): @{text "f x (f y z) = f y (f x z)"}.
-
-  \end{itemize}
-
-  Ordered rewriting with the combination of A, C, and LC sorts a term
-  lexicographically --- the rewriting engine imitates bubble-sort.
-*}
-
-locale AC_example =
-  fixes f :: "'a \<Rightarrow> 'a \<Rightarrow> 'a"  (infix "\<bullet>" 60)
-  assumes assoc: "(x \<bullet> y) \<bullet> z = x \<bullet> (y \<bullet> z)"
-  assumes commute: "x \<bullet> y = y \<bullet> x"
-begin
-
-lemma left_commute: "x \<bullet> (y \<bullet> z) = y \<bullet> (x \<bullet> z)"
-proof -
-  have "(x \<bullet> y) \<bullet> z = (y \<bullet> x) \<bullet> z" by (simp only: commute)
-  then show ?thesis by (simp only: assoc)
-qed
-
-lemmas AC_rules = assoc commute left_commute
-
-text {* Thus the Simplifier is able to establish equalities with
-  arbitrary permutations of subterms, by normalizing to a common
-  standard form.  For example: *}
-
-lemma "(b \<bullet> c) \<bullet> a = xxx"
-  apply (simp only: AC_rules)
-  txt {* @{subgoals} *}
-  oops
-
-lemma "(b \<bullet> c) \<bullet> a = a \<bullet> (b \<bullet> c)" by (simp only: AC_rules)
-lemma "(b \<bullet> c) \<bullet> a = c \<bullet> (b \<bullet> a)" by (simp only: AC_rules)
-lemma "(b \<bullet> c) \<bullet> a = (c \<bullet> b) \<bullet> a" by (simp only: AC_rules)
-
-end
-
-text {* Martin and Nipkow \cite{martin-nipkow} discuss the theory and
-  give many examples; other algebraic structures are amenable to
-  ordered rewriting, such as boolean rings.  The Boyer-Moore theorem
-  prover \cite{bm88book} also employs ordered rewriting.
-*}
-
-
-subsubsection {* Re-orienting equalities *}
-
-text {* Another application of ordered rewriting uses the derived rule
-  @{thm [source] eq_commute}: @{thm [source = false] eq_commute} to
-  reverse equations.
-
-  This is occasionally useful to re-orient local assumptions according
-  to the term ordering, when other built-in mechanisms of
-  reorientation and mutual simplification fail to apply.  *}
-
-
-subsection {* Configuration options \label{sec:simp-config} *}
-
-text {*
-  \begin{tabular}{rcll}
-    @{attribute_def simp_depth_limit} & : & @{text attribute} & default @{text 100} \\
-    @{attribute_def simp_trace} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def simp_trace_depth_limit} & : & @{text attribute} & default @{text 1} \\
-    @{attribute_def simp_debug} & : & @{text attribute} & default @{text false} \\
-  \end{tabular}
-  \medskip
-
-  These configurations options control further aspects of the Simplifier.
-  See also \secref{sec:config}.
-
-  \begin{description}
-
-  \item @{attribute simp_depth_limit} limits the number of recursive
-  invocations of the Simplifier during conditional rewriting.
-
-  \item @{attribute simp_trace} makes the Simplifier output internal
-  operations.  This includes rewrite steps, but also bookkeeping like
-  modifications of the simpset.
-
-  \item @{attribute simp_trace_depth_limit} limits the effect of
-  @{attribute simp_trace} to the given depth of recursive Simplifier
-  invocations (when solving conditions of rewrite rules).
-
-  \item @{attribute simp_debug} makes the Simplifier output some extra
-  information about internal operations.  This includes any attempted
-  invocation of simplification procedures.
-
-  \end{description}
-*}
-
-
-subsection {* Simplification procedures \label{sec:simproc} *}
-
-text {* Simplification procedures are ML functions that produce proven
-  rewrite rules on demand.  They are associated with higher-order
-  patterns that approximate the left-hand sides of equations.  The
-  Simplifier first matches the current redex against one of the LHS
-  patterns; if this succeeds, the corresponding ML function is
-  invoked, passing the Simplifier context and redex term.  Thus rules
-  may be specifically fashioned for particular situations, resulting
-  in a more powerful mechanism than term rewriting by a fixed set of
-  rules.
-
-  Any successful result needs to be a (possibly conditional) rewrite
-  rule @{text "t \<equiv> u"} that is applicable to the current redex.  The
-  rule will be applied just as any ordinary rewrite rule.  It is
-  expected to be already in \emph{internal form}, bypassing the
-  automatic preprocessing of object-level equivalences.
-
-  \begin{matharray}{rcl}
-    @{command_def "simproc_setup"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    simproc & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command simproc_setup} @{syntax name} '(' (@{syntax term} + '|') ')' '='
-      @{syntax text} \<newline> (@'identifier' (@{syntax nameref}+))?
-    ;
-
-    @@{attribute simproc} (('add' ':')? | 'del' ':') (@{syntax name}+)
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "simproc_setup"} defines a named simplification
-  procedure that is invoked by the Simplifier whenever any of the
-  given term patterns match the current redex.  The implementation,
-  which is provided as ML source text, needs to be of type @{ML_type
-  "morphism -> simpset -> cterm -> thm option"}, where the @{ML_type
-  cterm} represents the current redex @{text r} and the result is
-  supposed to be some proven rewrite rule @{text "r \<equiv> r'"} (or a
-  generalized version), or @{ML NONE} to indicate failure.  The
-  @{ML_type simpset} argument holds the full context of the current
-  Simplifier invocation, including the actual Isar proof context.  The
-  @{ML_type morphism} informs about the difference of the original
-  compilation context wrt.\ the one of the actual application later
-  on.  The optional @{keyword "identifier"} specifies theorems that
-  represent the logical content of the abstract theory of this
-  simproc.
-
-  Morphisms and identifiers are only relevant for simprocs that are
-  defined within a local target context, e.g.\ in a locale.
-
-  \item @{text "simproc add: name"} and @{text "simproc del: name"}
-  add or delete named simprocs to the current Simplifier context.  The
-  default is to add a simproc.  Note that @{command "simproc_setup"}
-  already adds the new simproc to the subsequent context.
-
-  \end{description}
-*}
-
-
-subsubsection {* Example *}
-
-text {* The following simplification procedure for @{thm
-  [source=false, show_types] unit_eq} in HOL performs fine-grained
-  control over rule application, beyond higher-order pattern matching.
-  Declaring @{thm unit_eq} as @{attribute simp} directly would make
-  the simplifier loop!  Note that a version of this simplification
-  procedure is already active in Isabelle/HOL.  *}
-
-simproc_setup unit ("x::unit") = {*
-  fn _ => fn _ => fn ct =>
-    if HOLogic.is_unit (term_of ct) then NONE
-    else SOME (mk_meta_eq @{thm unit_eq})
-*}
-
-text {* Since the Simplifier applies simplification procedures
-  frequently, it is important to make the failure check in ML
-  reasonably fast. *}
-
-
-subsection {* Configurable Simplifier strategies \label{sec:simp-strategies} *}
-
-text {* The core term-rewriting engine of the Simplifier is normally
-  used in combination with some add-on components that modify the
-  strategy and allow to integrate other non-Simplifier proof tools.
-  These may be reconfigured in ML as explained below.  Even if the
-  default strategies of object-logics like Isabelle/HOL are used
-  unchanged, it helps to understand how the standard Simplifier
-  strategies work. *}
-
-
-subsubsection {* The subgoaler *}
-
-text {*
-  \begin{mldecls}
-  @{index_ML Simplifier.set_subgoaler: "(Proof.context -> int -> tactic) ->
-  Proof.context -> Proof.context"} \\
-  @{index_ML Simplifier.prems_of: "Proof.context -> thm list"} \\
-  \end{mldecls}
-
-  The subgoaler is the tactic used to solve subgoals arising out of
-  conditional rewrite rules or congruence rules.  The default should
-  be simplification itself.  In rare situations, this strategy may
-  need to be changed.  For example, if the premise of a conditional
-  rule is an instance of its conclusion, as in @{text "Suc ?m < ?n \<Longrightarrow>
-  ?m < ?n"}, the default strategy could loop.  % FIXME !??
-
-  \begin{description}
-
-  \item @{ML Simplifier.set_subgoaler}~@{text "tac ctxt"} sets the
-  subgoaler of the context to @{text "tac"}.  The tactic will
-  be applied to the context of the running Simplifier instance.
-
-  \item @{ML Simplifier.prems_of}~@{text "ctxt"} retrieves the current
-  set of premises from the context.  This may be non-empty only if
-  the Simplifier has been told to utilize local assumptions in the
-  first place (cf.\ the options in \secref{sec:simp-meth}).
-
-  \end{description}
-
-  As an example, consider the following alternative subgoaler:
-*}
-
-ML {*
-  fun subgoaler_tac ctxt =
-    assume_tac ORELSE'
-    resolve_tac (Simplifier.prems_of ctxt) ORELSE'
-    asm_simp_tac ctxt
-*}
-
-text {* This tactic first tries to solve the subgoal by assumption or
-  by resolving with with one of the premises, calling simplification
-  only if that fails. *}
-
-
-subsubsection {* The solver *}
-
-text {*
-  \begin{mldecls}
-  @{index_ML_type solver} \\
-  @{index_ML Simplifier.mk_solver: "string ->
-  (Proof.context -> int -> tactic) -> solver"} \\
-  @{index_ML_op setSolver: "Proof.context * solver -> Proof.context"} \\
-  @{index_ML_op addSolver: "Proof.context * solver -> Proof.context"} \\
-  @{index_ML_op setSSolver: "Proof.context * solver -> Proof.context"} \\
-  @{index_ML_op addSSolver: "Proof.context * solver -> Proof.context"} \\
-  \end{mldecls}
-
-  A solver is a tactic that attempts to solve a subgoal after
-  simplification.  Its core functionality is to prove trivial subgoals
-  such as @{prop "True"} and @{text "t = t"}, but object-logics might
-  be more ambitious.  For example, Isabelle/HOL performs a restricted
-  version of linear arithmetic here.
-
-  Solvers are packaged up in abstract type @{ML_type solver}, with
-  @{ML Simplifier.mk_solver} as the only operation to create a solver.
-
-  \medskip Rewriting does not instantiate unknowns.  For example,
-  rewriting alone cannot prove @{text "a \<in> ?A"} since this requires
-  instantiating @{text "?A"}.  The solver, however, is an arbitrary
-  tactic and may instantiate unknowns as it pleases.  This is the only
-  way the Simplifier can handle a conditional rewrite rule whose
-  condition contains extra variables.  When a simplification tactic is
-  to be combined with other provers, especially with the Classical
-  Reasoner, it is important whether it can be considered safe or not.
-  For this reason a simpset contains two solvers: safe and unsafe.
-
-  The standard simplification strategy solely uses the unsafe solver,
-  which is appropriate in most cases.  For special applications where
-  the simplification process is not allowed to instantiate unknowns
-  within the goal, simplification starts with the safe solver, but may
-  still apply the ordinary unsafe one in nested simplifications for
-  conditional rules or congruences. Note that in this way the overall
-  tactic is not totally safe: it may instantiate unknowns that appear
-  also in other subgoals.
-
-  \begin{description}
-
-  \item @{ML Simplifier.mk_solver}~@{text "name tac"} turns @{text
-  "tac"} into a solver; the @{text "name"} is only attached as a
-  comment and has no further significance.
-
-  \item @{text "ctxt setSSolver solver"} installs @{text "solver"} as
-  the safe solver of @{text "ctxt"}.
-
-  \item @{text "ctxt addSSolver solver"} adds @{text "solver"} as an
-  additional safe solver; it will be tried after the solvers which had
-  already been present in @{text "ctxt"}.
-
-  \item @{text "ctxt setSolver solver"} installs @{text "solver"} as the
-  unsafe solver of @{text "ctxt"}.
-
-  \item @{text "ctxt addSolver solver"} adds @{text "solver"} as an
-  additional unsafe solver; it will be tried after the solvers which
-  had already been present in @{text "ctxt"}.
-
-  \end{description}
-
-  \medskip The solver tactic is invoked with the context of the
-  running Simplifier.  Further operations
-  may be used to retrieve relevant information, such as the list of
-  local Simplifier premises via @{ML Simplifier.prems_of} --- this
-  list may be non-empty only if the Simplifier runs in a mode that
-  utilizes local assumptions (see also \secref{sec:simp-meth}).  The
-  solver is also presented the full goal including its assumptions in
-  any case.  Thus it can use these (e.g.\ by calling @{ML
-  assume_tac}), even if the Simplifier proper happens to ignore local
-  premises at the moment.
-
-  \medskip As explained before, the subgoaler is also used to solve
-  the premises of congruence rules.  These are usually of the form
-  @{text "s = ?x"}, where @{text "s"} needs to be simplified and
-  @{text "?x"} needs to be instantiated with the result.  Typically,
-  the subgoaler will invoke the Simplifier at some point, which will
-  eventually call the solver.  For this reason, solver tactics must be
-  prepared to solve goals of the form @{text "t = ?x"}, usually by
-  reflexivity.  In particular, reflexivity should be tried before any
-  of the fancy automated proof tools.
-
-  It may even happen that due to simplification the subgoal is no
-  longer an equality.  For example, @{text "False \<longleftrightarrow> ?Q"} could be
-  rewritten to @{text "\<not> ?Q"}.  To cover this case, the solver could
-  try resolving with the theorem @{text "\<not> False"} of the
-  object-logic.
-
-  \medskip
-
-  \begin{warn}
-  If a premise of a congruence rule cannot be proved, then the
-  congruence is ignored.  This should only happen if the rule is
-  \emph{conditional} --- that is, contains premises not of the form
-  @{text "t = ?x"}.  Otherwise it indicates that some congruence rule,
-  or possibly the subgoaler or solver, is faulty.
-  \end{warn}
-*}
-
-
-subsubsection {* The looper *}
-
-text {*
-  \begin{mldecls}
-  @{index_ML_op setloop: "Proof.context *
-  (Proof.context -> int -> tactic) -> Proof.context"} \\
-  @{index_ML_op addloop: "Proof.context *
-  (string * (Proof.context -> int -> tactic))
-  -> Proof.context"} \\
-  @{index_ML_op delloop: "Proof.context * string -> Proof.context"} \\
-  @{index_ML Splitter.add_split: "thm -> Proof.context -> Proof.context"} \\
-  @{index_ML Splitter.del_split: "thm -> Proof.context -> Proof.context"} \\
-  \end{mldecls}
-
-  The looper is a list of tactics that are applied after
-  simplification, in case the solver failed to solve the simplified
-  goal.  If the looper succeeds, the simplification process is started
-  all over again.  Each of the subgoals generated by the looper is
-  attacked in turn, in reverse order.
-
-  A typical looper is \emph{case splitting}: the expansion of a
-  conditional.  Another possibility is to apply an elimination rule on
-  the assumptions.  More adventurous loopers could start an induction.
-
-  \begin{description}
-
-  \item @{text "ctxt setloop tac"} installs @{text "tac"} as the only
-  looper tactic of @{text "ctxt"}.
-
-  \item @{text "ctxt addloop (name, tac)"} adds @{text "tac"} as an
-  additional looper tactic with name @{text "name"}, which is
-  significant for managing the collection of loopers.  The tactic will
-  be tried after the looper tactics that had already been present in
-  @{text "ctxt"}.
-
-  \item @{text "ctxt delloop name"} deletes the looper tactic that was
-  associated with @{text "name"} from @{text "ctxt"}.
-
-  \item @{ML Splitter.add_split}~@{text "thm ctxt"} adds split tactics
-  for @{text "thm"} as additional looper tactics of @{text "ctxt"}.
-
-  \item @{ML Splitter.del_split}~@{text "thm ctxt"} deletes the split
-  tactic corresponding to @{text thm} from the looper tactics of
-  @{text "ctxt"}.
-
-  \end{description}
-
-  The splitter replaces applications of a given function; the
-  right-hand side of the replacement can be anything.  For example,
-  here is a splitting rule for conditional expressions:
-
-  @{text [display] "?P (if ?Q ?x ?y) \<longleftrightarrow> (?Q \<longrightarrow> ?P ?x) \<and> (\<not> ?Q \<longrightarrow> ?P ?y)"}
-
-  Another example is the elimination operator for Cartesian products
-  (which happens to be called @{text split} in Isabelle/HOL:
-
-  @{text [display] "?P (split ?f ?p) \<longleftrightarrow> (\<forall>a b. ?p = (a, b) \<longrightarrow> ?P (f a b))"}
-
-  For technical reasons, there is a distinction between case splitting
-  in the conclusion and in the premises of a subgoal.  The former is
-  done by @{ML Splitter.split_tac} with rules like @{thm [source]
-  split_if} or @{thm [source] option.split}, which do not split the
-  subgoal, while the latter is done by @{ML Splitter.split_asm_tac}
-  with rules like @{thm [source] split_if_asm} or @{thm [source]
-  option.split_asm}, which split the subgoal.  The function @{ML
-  Splitter.add_split} automatically takes care of which tactic to
-  call, analyzing the form of the rules given as argument; it is the
-  same operation behind @{text "split"} attribute or method modifier
-  syntax in the Isar source language.
-
-  Case splits should be allowed only when necessary; they are
-  expensive and hard to control.  Case-splitting on if-expressions in
-  the conclusion is usually beneficial, so it is enabled by default in
-  Isabelle/HOL and Isabelle/FOL/ZF.
-
-  \begin{warn}
-  With @{ML Splitter.split_asm_tac} as looper component, the
-  Simplifier may split subgoals!  This might cause unexpected problems
-  in tactic expressions that silently assume 0 or 1 subgoals after
-  simplification.
-  \end{warn}
-*}
-
-
-subsection {* Forward simplification \label{sec:simp-forward} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{attribute_def simplified} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{attribute simplified} opt? @{syntax thmrefs}?
-    ;
-
-    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use') ')'
-  \<close>}
-
-  \begin{description}
-  
-  \item @{attribute simplified}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} causes a theorem to
-  be simplified, either by exactly the specified rules @{text "a\<^sub>1, \<dots>,
-  a\<^sub>n"}, or the implicit Simplifier context if no arguments are given.
-  The result is fully simplified by default, including assumptions and
-  conclusion; the options @{text no_asm} etc.\ tune the Simplifier in
-  the same way as the for the @{text simp} method.
-
-  Note that forward simplification restricts the simplifier to its
-  most basic operation of term rewriting; solver and looper tactics
-  (\secref{sec:simp-strategies}) are \emph{not} involved here.  The
-  @{attribute simplified} attribute should be only rarely required
-  under normal circumstances.
-
-  \end{description}
-*}
-
-
-section {* The Classical Reasoner \label{sec:classical} *}
-
-subsection {* Basic concepts *}
-
-text {* Although Isabelle is generic, many users will be working in
-  some extension of classical first-order logic.  Isabelle/ZF is built
-  upon theory FOL, while Isabelle/HOL conceptually contains
-  first-order logic as a fragment.  Theorem-proving in predicate logic
-  is undecidable, but many automated strategies have been developed to
-  assist in this task.
-
-  Isabelle's classical reasoner is a generic package that accepts
-  certain information about a logic and delivers a suite of automatic
-  proof tools, based on rules that are classified and declared in the
-  context.  These proof procedures are slow and simplistic compared
-  with high-end automated theorem provers, but they can save
-  considerable time and effort in practice.  They can prove theorems
-  such as Pelletier's \cite{pelletier86} problems 40 and 41 in a few
-  milliseconds (including full proof reconstruction): *}
-
-lemma "(\<exists>y. \<forall>x. F x y \<longleftrightarrow> F x x) \<longrightarrow> \<not> (\<forall>x. \<exists>y. \<forall>z. F z y \<longleftrightarrow> \<not> F z x)"
-  by blast
-
-lemma "(\<forall>z. \<exists>y. \<forall>x. f x y \<longleftrightarrow> f x z \<and> \<not> f x x) \<longrightarrow> \<not> (\<exists>z. \<forall>x. f x z)"
-  by blast
-
-text {* The proof tools are generic.  They are not restricted to
-  first-order logic, and have been heavily used in the development of
-  the Isabelle/HOL library and applications.  The tactics can be
-  traced, and their components can be called directly; in this manner,
-  any proof can be viewed interactively.  *}
-
-
-subsubsection {* The sequent calculus *}
-
-text {* Isabelle supports natural deduction, which is easy to use for
-  interactive proof.  But natural deduction does not easily lend
-  itself to automation, and has a bias towards intuitionism.  For
-  certain proofs in classical logic, it can not be called natural.
-  The \emph{sequent calculus}, a generalization of natural deduction,
-  is easier to automate.
-
-  A \textbf{sequent} has the form @{text "\<Gamma> \<turnstile> \<Delta>"}, where @{text "\<Gamma>"}
-  and @{text "\<Delta>"} are sets of formulae.\footnote{For first-order
-  logic, sequents can equivalently be made from lists or multisets of
-  formulae.} The sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} is
-  \textbf{valid} if @{text "P\<^sub>1 \<and> \<dots> \<and> P\<^sub>m"} implies @{text "Q\<^sub>1 \<or> \<dots> \<or>
-  Q\<^sub>n"}.  Thus @{text "P\<^sub>1, \<dots>, P\<^sub>m"} represent assumptions, each of which
-  is true, while @{text "Q\<^sub>1, \<dots>, Q\<^sub>n"} represent alternative goals.  A
-  sequent is \textbf{basic} if its left and right sides have a common
-  formula, as in @{text "P, Q \<turnstile> Q, R"}; basic sequents are trivially
-  valid.
-
-  Sequent rules are classified as \textbf{right} or \textbf{left},
-  indicating which side of the @{text "\<turnstile>"} symbol they operate on.
-  Rules that operate on the right side are analogous to natural
-  deduction's introduction rules, and left rules are analogous to
-  elimination rules.  The sequent calculus analogue of @{text "(\<longrightarrow>I)"}
-  is the rule
-  \[
-  \infer[@{text "(\<longrightarrow>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<longrightarrow> Q"}}{@{text "P, \<Gamma> \<turnstile> \<Delta>, Q"}}
-  \]
-  Applying the rule backwards, this breaks down some implication on
-  the right side of a sequent; @{text "\<Gamma>"} and @{text "\<Delta>"} stand for
-  the sets of formulae that are unaffected by the inference.  The
-  analogue of the pair @{text "(\<or>I1)"} and @{text "(\<or>I2)"} is the
-  single rule
-  \[
-  \infer[@{text "(\<or>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<or> Q"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P, Q"}}
-  \]
-  This breaks down some disjunction on the right side, replacing it by
-  both disjuncts.  Thus, the sequent calculus is a kind of
-  multiple-conclusion logic.
-
-  To illustrate the use of multiple formulae on the right, let us
-  prove the classical theorem @{text "(P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}.  Working
-  backwards, we reduce this formula to a basic sequent:
-  \[
-  \infer[@{text "(\<or>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}}
-    {\infer[@{text "(\<longrightarrow>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q), (Q \<longrightarrow> P)"}}
-      {\infer[@{text "(\<longrightarrow>R)"}]{@{text "P \<turnstile> Q, (Q \<longrightarrow> P)"}}
-        {@{text "P, Q \<turnstile> Q, P"}}}}
-  \]
-
-  This example is typical of the sequent calculus: start with the
-  desired theorem and apply rules backwards in a fairly arbitrary
-  manner.  This yields a surprisingly effective proof procedure.
-  Quantifiers add only few complications, since Isabelle handles
-  parameters and schematic variables.  See \cite[Chapter
-  10]{paulson-ml2} for further discussion.  *}
-
-
-subsubsection {* Simulating sequents by natural deduction *}
-
-text {* Isabelle can represent sequents directly, as in the
-  object-logic LK.  But natural deduction is easier to work with, and
-  most object-logics employ it.  Fortunately, we can simulate the
-  sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} by the Isabelle formula
-  @{text "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>2 \<Longrightarrow> ... \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> Q\<^sub>1"} where the order of
-  the assumptions and the choice of @{text "Q\<^sub>1"} are arbitrary.
-  Elim-resolution plays a key role in simulating sequent proofs.
-
-  We can easily handle reasoning on the left.  Elim-resolution with
-  the rules @{text "(\<or>E)"}, @{text "(\<bottom>E)"} and @{text "(\<exists>E)"} achieves
-  a similar effect as the corresponding sequent rules.  For the other
-  connectives, we use sequent-style elimination rules instead of
-  destruction rules such as @{text "(\<and>E1, 2)"} and @{text "(\<forall>E)"}.
-  But note that the rule @{text "(\<not>L)"} has no effect under our
-  representation of sequents!
-  \[
-  \infer[@{text "(\<not>L)"}]{@{text "\<not> P, \<Gamma> \<turnstile> \<Delta>"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P"}}
-  \]
-
-  What about reasoning on the right?  Introduction rules can only
-  affect the formula in the conclusion, namely @{text "Q\<^sub>1"}.  The
-  other right-side formulae are represented as negated assumptions,
-  @{text "\<not> Q\<^sub>2, \<dots>, \<not> Q\<^sub>n"}.  In order to operate on one of these, it
-  must first be exchanged with @{text "Q\<^sub>1"}.  Elim-resolution with the
-  @{text swap} rule has this effect: @{text "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"}
-
-  To ensure that swaps occur only when necessary, each introduction
-  rule is converted into a swapped form: it is resolved with the
-  second premise of @{text "(swap)"}.  The swapped form of @{text
-  "(\<and>I)"}, which might be called @{text "(\<not>\<and>E)"}, is
-  @{text [display] "\<not> (P \<and> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> (\<not> R \<Longrightarrow> Q) \<Longrightarrow> R"}
-
-  Similarly, the swapped form of @{text "(\<longrightarrow>I)"} is
-  @{text [display] "\<not> (P \<longrightarrow> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P \<Longrightarrow> Q) \<Longrightarrow> R"}
-
-  Swapped introduction rules are applied using elim-resolution, which
-  deletes the negated formula.  Our representation of sequents also
-  requires the use of ordinary introduction rules.  If we had no
-  regard for readability of intermediate goal states, we could treat
-  the right side more uniformly by representing sequents as @{text
-  [display] "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> \<bottom>"}
-*}
-
-
-subsubsection {* Extra rules for the sequent calculus *}
-
-text {* As mentioned, destruction rules such as @{text "(\<and>E1, 2)"} and
-  @{text "(\<forall>E)"} must be replaced by sequent-style elimination rules.
-  In addition, we need rules to embody the classical equivalence
-  between @{text "P \<longrightarrow> Q"} and @{text "\<not> P \<or> Q"}.  The introduction
-  rules @{text "(\<or>I1, 2)"} are replaced by a rule that simulates
-  @{text "(\<or>R)"}: @{text [display] "(\<not> Q \<Longrightarrow> P) \<Longrightarrow> P \<or> Q"}
-
-  The destruction rule @{text "(\<longrightarrow>E)"} is replaced by @{text [display]
-  "(P \<longrightarrow> Q) \<Longrightarrow> (\<not> P \<Longrightarrow> R) \<Longrightarrow> (Q \<Longrightarrow> R) \<Longrightarrow> R"}
-
-  Quantifier replication also requires special rules.  In classical
-  logic, @{text "\<exists>x. P x"} is equivalent to @{text "\<not> (\<forall>x. \<not> P x)"};
-  the rules @{text "(\<exists>R)"} and @{text "(\<forall>L)"} are dual:
-  \[
-  \infer[@{text "(\<exists>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x"}}{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x, P t"}}
-  \qquad
-  \infer[@{text "(\<forall>L)"}]{@{text "\<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}{@{text "P t, \<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}
-  \]
-  Thus both kinds of quantifier may be replicated.  Theorems requiring
-  multiple uses of a universal formula are easy to invent; consider
-  @{text [display] "(\<forall>x. P x \<longrightarrow> P (f x)) \<and> P a \<longrightarrow> P (f\<^sup>n a)"} for any
-  @{text "n > 1"}.  Natural examples of the multiple use of an
-  existential formula are rare; a standard one is @{text "\<exists>x. \<forall>y. P x
-  \<longrightarrow> P y"}.
-
-  Forgoing quantifier replication loses completeness, but gains
-  decidability, since the search space becomes finite.  Many useful
-  theorems can be proved without replication, and the search generally
-  delivers its verdict in a reasonable time.  To adopt this approach,
-  represent the sequent rules @{text "(\<exists>R)"}, @{text "(\<exists>L)"} and
-  @{text "(\<forall>R)"} by @{text "(\<exists>I)"}, @{text "(\<exists>E)"} and @{text "(\<forall>I)"},
-  respectively, and put @{text "(\<forall>E)"} into elimination form: @{text
-  [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"}
-
-  Elim-resolution with this rule will delete the universal formula
-  after a single use.  To replicate universal quantifiers, replace the
-  rule by @{text [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"}
-
-  To replicate existential quantifiers, replace @{text "(\<exists>I)"} by
-  @{text [display] "(\<not> (\<exists>x. P x) \<Longrightarrow> P t) \<Longrightarrow> \<exists>x. P x"}
-
-  All introduction rules mentioned above are also useful in swapped
-  form.
-
-  Replication makes the search space infinite; we must apply the rules
-  with care.  The classical reasoner distinguishes between safe and
-  unsafe rules, applying the latter only when there is no alternative.
-  Depth-first search may well go down a blind alley; best-first search
-  is better behaved in an infinite search space.  However, quantifier
-  replication is too expensive to prove any but the simplest theorems.
-*}
-
-
-subsection {* Rule declarations *}
-
-text {* The proof tools of the Classical Reasoner depend on
-  collections of rules declared in the context, which are classified
-  as introduction, elimination or destruction and as \emph{safe} or
-  \emph{unsafe}.  In general, safe rules can be attempted blindly,
-  while unsafe rules must be used with care.  A safe rule must never
-  reduce a provable goal to an unprovable set of subgoals.
-
-  The rule @{text "P \<Longrightarrow> P \<or> Q"} is unsafe because it reduces @{text "P
-  \<or> Q"} to @{text "P"}, which might turn out as premature choice of an
-  unprovable subgoal.  Any rule is unsafe whose premises contain new
-  unknowns.  The elimination rule @{text "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"} is
-  unsafe, since it is applied via elim-resolution, which discards the
-  assumption @{text "\<forall>x. P x"} and replaces it by the weaker
-  assumption @{text "P t"}.  The rule @{text "P t \<Longrightarrow> \<exists>x. P x"} is
-  unsafe for similar reasons.  The quantifier duplication rule @{text
-  "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"} is unsafe in a different sense:
-  since it keeps the assumption @{text "\<forall>x. P x"}, it is prone to
-  looping.  In classical first-order logic, all rules are safe except
-  those mentioned above.
-
-  The safe~/ unsafe distinction is vague, and may be regarded merely
-  as a way of giving some rules priority over others.  One could argue
-  that @{text "(\<or>E)"} is unsafe, because repeated application of it
-  could generate exponentially many subgoals.  Induction rules are
-  unsafe because inductive proofs are difficult to set up
-  automatically.  Any inference is unsafe that instantiates an unknown
-  in the proof state --- thus matching must be used, rather than
-  unification.  Even proof by assumption is unsafe if it instantiates
-  unknowns shared with other subgoals.
-
-  \begin{matharray}{rcl}
-    @{command_def "print_claset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{attribute_def intro} & : & @{text attribute} \\
-    @{attribute_def elim} & : & @{text attribute} \\
-    @{attribute_def dest} & : & @{text attribute} \\
-    @{attribute_def rule} & : & @{text attribute} \\
-    @{attribute_def iff} & : & @{text attribute} \\
-    @{attribute_def swapped} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{attribute intro} | @@{attribute elim} | @@{attribute dest}) ('!' | () | '?') @{syntax nat}?
-    ;
-    @@{attribute rule} 'del'
-    ;
-    @@{attribute iff} (((() | 'add') '?'?) | 'del')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "print_claset"} prints the collection of rules
-  declared to the Classical Reasoner, i.e.\ the @{ML_type claset}
-  within the context.
-
-  \item @{attribute intro}, @{attribute elim}, and @{attribute dest}
-  declare introduction, elimination, and destruction rules,
-  respectively.  By default, rules are considered as \emph{unsafe}
-  (i.e.\ not applied blindly without backtracking), while ``@{text
-  "!"}'' classifies as \emph{safe}.  Rule declarations marked by
-  ``@{text "?"}'' coincide with those of Isabelle/Pure, cf.\
-  \secref{sec:pure-meth-att} (i.e.\ are only applied in single steps
-  of the @{method rule} method).  The optional natural number
-  specifies an explicit weight argument, which is ignored by the
-  automated reasoning tools, but determines the search order of single
-  rule steps.
-
-  Introduction rules are those that can be applied using ordinary
-  resolution.  Their swapped forms are generated internally, which
-  will be applied using elim-resolution.  Elimination rules are
-  applied using elim-resolution.  Rules are sorted by the number of
-  new subgoals they will yield; rules that generate the fewest
-  subgoals will be tried first.  Otherwise, later declarations take
-  precedence over earlier ones.
-
-  Rules already present in the context with the same classification
-  are ignored.  A warning is printed if the rule has already been
-  added with some other classification, but the rule is added anyway
-  as requested.
-
-  \item @{attribute rule}~@{text del} deletes all occurrences of a
-  rule from the classical context, regardless of its classification as
-  introduction~/ elimination~/ destruction and safe~/ unsafe.
-
-  \item @{attribute iff} declares logical equivalences to the
-  Simplifier and the Classical reasoner at the same time.
-  Non-conditional rules result in a safe introduction and elimination
-  pair; conditional ones are considered unsafe.  Rules with negative
-  conclusion are automatically inverted (using @{text "\<not>"}-elimination
-  internally).
-
-  The ``@{text "?"}'' version of @{attribute iff} declares rules to
-  the Isabelle/Pure context only, and omits the Simplifier
-  declaration.
-
-  \item @{attribute swapped} turns an introduction rule into an
-  elimination, by resolving with the classical swap principle @{text
-  "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"} in the second position.  This is mainly for
-  illustrative purposes: the Classical Reasoner already swaps rules
-  internally as explained above.
-
-  \end{description}
-*}
-
-
-subsection {* Structured methods *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def rule} & : & @{text method} \\
-    @{method_def contradiction} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method rule} @{syntax thmrefs}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{method rule} as offered by the Classical Reasoner is a
-  refinement over the Pure one (see \secref{sec:pure-meth-att}).  Both
-  versions work the same, but the classical version observes the
-  classical rule context in addition to that of Isabelle/Pure.
-
-  Common object logics (HOL, ZF, etc.) declare a rich collection of
-  classical rules (even if these would qualify as intuitionistic
-  ones), but only few declarations to the rule context of
-  Isabelle/Pure (\secref{sec:pure-meth-att}).
-
-  \item @{method contradiction} solves some goal by contradiction,
-  deriving any result from both @{text "\<not> A"} and @{text A}.  Chained
-  facts, which are guaranteed to participate, may appear in either
-  order.
-
-  \end{description}
-*}
-
-
-subsection {* Fully automated methods *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def blast} & : & @{text method} \\
-    @{method_def auto} & : & @{text method} \\
-    @{method_def force} & : & @{text method} \\
-    @{method_def fast} & : & @{text method} \\
-    @{method_def slow} & : & @{text method} \\
-    @{method_def best} & : & @{text method} \\
-    @{method_def fastforce} & : & @{text method} \\
-    @{method_def slowsimp} & : & @{text method} \\
-    @{method_def bestsimp} & : & @{text method} \\
-    @{method_def deepen} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method blast} @{syntax nat}? (@{syntax clamod} * )
-    ;
-    @@{method auto} (@{syntax nat} @{syntax nat})? (@{syntax clasimpmod} * )
-    ;
-    @@{method force} (@{syntax clasimpmod} * )
-    ;
-    (@@{method fast} | @@{method slow} | @@{method best}) (@{syntax clamod} * )
-    ;
-    (@@{method fastforce} | @@{method slowsimp} | @@{method bestsimp})
-      (@{syntax clasimpmod} * )
-    ;
-    @@{method deepen} (@{syntax nat} ?) (@{syntax clamod} * )
-    ;
-    @{syntax_def clamod}:
-      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del') ':' @{syntax thmrefs}
-    ;
-    @{syntax_def clasimpmod}: ('simp' (() | 'add' | 'del' | 'only') |
-      ('cong' | 'split') (() | 'add' | 'del') |
-      'iff' (((() | 'add') '?'?) | 'del') |
-      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del')) ':' @{syntax thmrefs}
-  \<close>}
-
-  \begin{description}
-
-  \item @{method blast} is a separate classical tableau prover that
-  uses the same classical rule declarations as explained before.
-
-  Proof search is coded directly in ML using special data structures.
-  A successful proof is then reconstructed using regular Isabelle
-  inferences.  It is faster and more powerful than the other classical
-  reasoning tools, but has major limitations too.
-
-  \begin{itemize}
-
-  \item It does not use the classical wrapper tacticals, such as the
-  integration with the Simplifier of @{method fastforce}.
-
-  \item It does not perform higher-order unification, as needed by the
-  rule @{thm [source=false] rangeI} in HOL.  There are often
-  alternatives to such rules, for example @{thm [source=false]
-  range_eqI}.
-
-  \item Function variables may only be applied to parameters of the
-  subgoal.  (This restriction arises because the prover does not use
-  higher-order unification.)  If other function variables are present
-  then the prover will fail with the message \texttt{Function Var's
-  argument not a bound variable}.
-
-  \item Its proof strategy is more general than @{method fast} but can
-  be slower.  If @{method blast} fails or seems to be running forever,
-  try @{method fast} and the other proof tools described below.
-
-  \end{itemize}
-
-  The optional integer argument specifies a bound for the number of
-  unsafe steps used in a proof.  By default, @{method blast} starts
-  with a bound of 0 and increases it successively to 20.  In contrast,
-  @{text "(blast lim)"} tries to prove the goal using a search bound
-  of @{text "lim"}.  Sometimes a slow proof using @{method blast} can
-  be made much faster by supplying the successful search bound to this
-  proof method instead.
-
-  \item @{method auto} combines classical reasoning with
-  simplification.  It is intended for situations where there are a lot
-  of mostly trivial subgoals; it proves all the easy ones, leaving the
-  ones it cannot prove.  Occasionally, attempting to prove the hard
-  ones may take a long time.
-
-  The optional depth arguments in @{text "(auto m n)"} refer to its
-  builtin classical reasoning procedures: @{text m} (default 4) is for
-  @{method blast}, which is tried first, and @{text n} (default 2) is
-  for a slower but more general alternative that also takes wrappers
-  into account.
-
-  \item @{method force} is intended to prove the first subgoal
-  completely, using many fancy proof tools and performing a rather
-  exhaustive search.  As a result, proof attempts may take rather long
-  or diverge easily.
-
-  \item @{method fast}, @{method best}, @{method slow} attempt to
-  prove the first subgoal using sequent-style reasoning as explained
-  before.  Unlike @{method blast}, they construct proofs directly in
-  Isabelle.
-
-  There is a difference in search strategy and back-tracking: @{method
-  fast} uses depth-first search and @{method best} uses best-first
-  search (guided by a heuristic function: normally the total size of
-  the proof state).
-
-  Method @{method slow} is like @{method fast}, but conducts a broader
-  search: it may, when backtracking from a failed proof attempt, undo
-  even the step of proving a subgoal by assumption.
-
-  \item @{method fastforce}, @{method slowsimp}, @{method bestsimp}
-  are like @{method fast}, @{method slow}, @{method best},
-  respectively, but use the Simplifier as additional wrapper. The name
-  @{method fastforce}, reflects the behaviour of this popular method
-  better without requiring an understanding of its implementation.
-
-  \item @{method deepen} works by exhaustive search up to a certain
-  depth.  The start depth is 4 (unless specified explicitly), and the
-  depth is increased iteratively up to 10.  Unsafe rules are modified
-  to preserve the formula they act on, so that it be used repeatedly.
-  This method can prove more goals than @{method fast}, but is much
-  slower, for example if the assumptions have many universal
-  quantifiers.
-
-  \end{description}
-
-  Any of the above methods support additional modifiers of the context
-  of classical (and simplifier) rules, but the ones related to the
-  Simplifier are explicitly prefixed by @{text simp} here.  The
-  semantics of these ad-hoc rule declarations is analogous to the
-  attributes given before.  Facts provided by forward chaining are
-  inserted into the goal before commencing proof search.
-*}
-
-
-subsection {* Partially automated methods *}
-
-text {* These proof methods may help in situations when the
-  fully-automated tools fail.  The result is a simpler subgoal that
-  can be tackled by other means, such as by manual instantiation of
-  quantifiers.
-
-  \begin{matharray}{rcl}
-    @{method_def safe} & : & @{text method} \\
-    @{method_def clarify} & : & @{text method} \\
-    @{method_def clarsimp} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{method safe} | @@{method clarify}) (@{syntax clamod} * )
-    ;
-    @@{method clarsimp} (@{syntax clasimpmod} * )
-  \<close>}
-
-  \begin{description}
-
-  \item @{method safe} repeatedly performs safe steps on all subgoals.
-  It is deterministic, with at most one outcome.
-
-  \item @{method clarify} performs a series of safe steps without
-  splitting subgoals; see also @{method clarify_step}.
-
-  \item @{method clarsimp} acts like @{method clarify}, but also does
-  simplification.  Note that if the Simplifier context includes a
-  splitter for the premises, the subgoal may still be split.
-
-  \end{description}
-*}
-
-
-subsection {* Single-step tactics *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def safe_step} & : & @{text method} \\
-    @{method_def inst_step} & : & @{text method} \\
-    @{method_def step} & : & @{text method} \\
-    @{method_def slow_step} & : & @{text method} \\
-    @{method_def clarify_step} & : &  @{text method} \\
-  \end{matharray}
-
-  These are the primitive tactics behind the automated proof methods
-  of the Classical Reasoner.  By calling them yourself, you can
-  execute these procedures one step at a time.
-
-  \begin{description}
-
-  \item @{method safe_step} performs a safe step on the first subgoal.
-  The safe wrapper tacticals are applied to a tactic that may include
-  proof by assumption or Modus Ponens (taking care not to instantiate
-  unknowns), or substitution.
-
-  \item @{method inst_step} is like @{method safe_step}, but allows
-  unknowns to be instantiated.
-
-  \item @{method step} is the basic step of the proof procedure, it
-  operates on the first subgoal.  The unsafe wrapper tacticals are
-  applied to a tactic that tries @{method safe}, @{method inst_step},
-  or applies an unsafe rule from the context.
-
-  \item @{method slow_step} resembles @{method step}, but allows
-  backtracking between using safe rules with instantiation (@{method
-  inst_step}) and using unsafe rules.  The resulting search space is
-  larger.
-
-  \item @{method clarify_step} performs a safe step on the first
-  subgoal; no splitting step is applied.  For example, the subgoal
-  @{text "A \<and> B"} is left as a conjunction.  Proof by assumption,
-  Modus Ponens, etc., may be performed provided they do not
-  instantiate unknowns.  Assumptions of the form @{text "x = t"} may
-  be eliminated.  The safe wrapper tactical is applied.
-
-  \end{description}
-*}
-
-
-subsection {* Modifying the search step *}
-
-text {*
-  \begin{mldecls}
-    @{index_ML_type wrapper: "(int -> tactic) -> (int -> tactic)"} \\[0.5ex]
-    @{index_ML_op addSWrapper: "Proof.context *
-  (string * (Proof.context -> wrapper)) -> Proof.context"} \\
-    @{index_ML_op addSbefore: "Proof.context *
-  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
-    @{index_ML_op addSafter: "Proof.context *
-  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
-    @{index_ML_op delSWrapper: "Proof.context * string -> Proof.context"} \\[0.5ex]
-    @{index_ML_op addWrapper: "Proof.context *
-  (string * (Proof.context -> wrapper)) -> Proof.context"} \\
-    @{index_ML_op addbefore: "Proof.context *
-  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
-    @{index_ML_op addafter: "Proof.context *
-  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
-    @{index_ML_op delWrapper: "Proof.context * string -> Proof.context"} \\[0.5ex]
-    @{index_ML addSss: "Proof.context -> Proof.context"} \\
-    @{index_ML addss: "Proof.context -> Proof.context"} \\
-  \end{mldecls}
-
-  The proof strategy of the Classical Reasoner is simple.  Perform as
-  many safe inferences as possible; or else, apply certain safe rules,
-  allowing instantiation of unknowns; or else, apply an unsafe rule.
-  The tactics also eliminate assumptions of the form @{text "x = t"}
-  by substitution if they have been set up to do so.  They may perform
-  a form of Modus Ponens: if there are assumptions @{text "P \<longrightarrow> Q"} and
-  @{text "P"}, then replace @{text "P \<longrightarrow> Q"} by @{text "Q"}.
-
-  The classical reasoning tools --- except @{method blast} --- allow
-  to modify this basic proof strategy by applying two lists of
-  arbitrary \emph{wrapper tacticals} to it.  The first wrapper list,
-  which is considered to contain safe wrappers only, affects @{method
-  safe_step} and all the tactics that call it.  The second one, which
-  may contain unsafe wrappers, affects the unsafe parts of @{method
-  step}, @{method slow_step}, and the tactics that call them.  A
-  wrapper transforms each step of the search, for example by
-  attempting other tactics before or after the original step tactic.
-  All members of a wrapper list are applied in turn to the respective
-  step tactic.
-
-  Initially the two wrapper lists are empty, which means no
-  modification of the step tactics. Safe and unsafe wrappers are added
-  to a claset with the functions given below, supplying them with
-  wrapper names.  These names may be used to selectively delete
-  wrappers.
-
-  \begin{description}
-
-  \item @{text "ctxt addSWrapper (name, wrapper)"} adds a new wrapper,
-  which should yield a safe tactic, to modify the existing safe step
-  tactic.
-
-  \item @{text "ctxt addSbefore (name, tac)"} adds the given tactic as a
-  safe wrapper, such that it is tried \emph{before} each safe step of
-  the search.
-
-  \item @{text "ctxt addSafter (name, tac)"} adds the given tactic as a
-  safe wrapper, such that it is tried when a safe step of the search
-  would fail.
-
-  \item @{text "ctxt delSWrapper name"} deletes the safe wrapper with
-  the given name.
-
-  \item @{text "ctxt addWrapper (name, wrapper)"} adds a new wrapper to
-  modify the existing (unsafe) step tactic.
-
-  \item @{text "ctxt addbefore (name, tac)"} adds the given tactic as an
-  unsafe wrapper, such that it its result is concatenated
-  \emph{before} the result of each unsafe step.
-
-  \item @{text "ctxt addafter (name, tac)"} adds the given tactic as an
-  unsafe wrapper, such that it its result is concatenated \emph{after}
-  the result of each unsafe step.
-
-  \item @{text "ctxt delWrapper name"} deletes the unsafe wrapper with
-  the given name.
-
-  \item @{text "addSss"} adds the simpset of the context to its
-  classical set. The assumptions and goal will be simplified, in a
-  rather safe way, after each safe step of the search.
-
-  \item @{text "addss"} adds the simpset of the context to its
-  classical set. The assumptions and goal will be simplified, before
-  the each unsafe step of the search.
-
-  \end{description}
-*}
-
-
-section {* Object-logic setup \label{sec:object-logic} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "judgment"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{method_def atomize} & : & @{text method} \\
-    @{attribute_def atomize} & : & @{text attribute} \\
-    @{attribute_def rule_format} & : & @{text attribute} \\
-    @{attribute_def rulify} & : & @{text attribute} \\
-  \end{matharray}
-
-  The very starting point for any Isabelle object-logic is a ``truth
-  judgment'' that links object-level statements to the meta-logic
-  (with its minimal language of @{text prop} that covers universal
-  quantification @{text "\<And>"} and implication @{text "\<Longrightarrow>"}).
-
-  Common object-logics are sufficiently expressive to internalize rule
-  statements over @{text "\<And>"} and @{text "\<Longrightarrow>"} within their own
-  language.  This is useful in certain situations where a rule needs
-  to be viewed as an atomic statement from the meta-level perspective,
-  e.g.\ @{text "\<And>x. x \<in> A \<Longrightarrow> P x"} versus @{text "\<forall>x \<in> A. P x"}.
-
-  From the following language elements, only the @{method atomize}
-  method and @{attribute rule_format} attribute are occasionally
-  required by end-users, the rest is for those who need to setup their
-  own object-logic.  In the latter case existing formulations of
-  Isabelle/FOL or Isabelle/HOL may be taken as realistic examples.
-
-  Generic tools may refer to the information provided by object-logic
-  declarations internally.
-
-  @{rail \<open>
-    @@{command judgment} @{syntax name} '::' @{syntax type} @{syntax mixfix}?
-    ;
-    @@{attribute atomize} ('(' 'full' ')')?
-    ;
-    @@{attribute rule_format} ('(' 'noasm' ')')?
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command "judgment"}~@{text "c :: \<sigma> (mx)"} declares constant
-  @{text c} as the truth judgment of the current object-logic.  Its
-  type @{text \<sigma>} should specify a coercion of the category of
-  object-level propositions to @{text prop} of the Pure meta-logic;
-  the mixfix annotation @{text "(mx)"} would typically just link the
-  object language (internally of syntactic category @{text logic})
-  with that of @{text prop}.  Only one @{command "judgment"}
-  declaration may be given in any theory development.
-  
-  \item @{method atomize} (as a method) rewrites any non-atomic
-  premises of a sub-goal, using the meta-level equations declared via
-  @{attribute atomize} (as an attribute) beforehand.  As a result,
-  heavily nested goals become amenable to fundamental operations such
-  as resolution (cf.\ the @{method (Pure) rule} method).  Giving the ``@{text
-  "(full)"}'' option here means to turn the whole subgoal into an
-  object-statement (if possible), including the outermost parameters
-  and assumptions as well.
-
-  A typical collection of @{attribute atomize} rules for a particular
-  object-logic would provide an internalization for each of the
-  connectives of @{text "\<And>"}, @{text "\<Longrightarrow>"}, and @{text "\<equiv>"}.
-  Meta-level conjunction should be covered as well (this is
-  particularly important for locales, see \secref{sec:locale}).
-
-  \item @{attribute rule_format} rewrites a theorem by the equalities
-  declared as @{attribute rulify} rules in the current object-logic.
-  By default, the result is fully normalized, including assumptions
-  and conclusions at any depth.  The @{text "(no_asm)"} option
-  restricts the transformation to the conclusion of a rule.
-
-  In common object-logics (HOL, FOL, ZF), the effect of @{attribute
-  rule_format} is to replace (bounded) universal quantification
-  (@{text "\<forall>"}) and implication (@{text "\<longrightarrow>"}) by the corresponding
-  rule statements over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
-
-  \end{description}
-*}
-
-
-section {* Tracing higher-order unification *}
-
-text {*
-  \begin{tabular}{rcll}
-    @{attribute_def unify_trace_simp} & : & @{text "attribute"} & default @{text "false"} \\
-    @{attribute_def unify_trace_types} & : & @{text "attribute"} & default @{text "false"} \\
-    @{attribute_def unify_trace_bound} & : & @{text "attribute"} & default @{text "50"} \\
-    @{attribute_def unify_search_bound} & : & @{text "attribute"} & default @{text "60"} \\
-  \end{tabular}
-  \medskip
-
-  Higher-order unification works well in most practical situations,
-  but sometimes needs extra care to identify problems.  These tracing
-  options may help.
-
-  \begin{description}
-
-  \item @{attribute unify_trace_simp} controls tracing of the
-  simplification phase of higher-order unification.
-
-  \item @{attribute unify_trace_types} controls warnings of
-  incompleteness, when unification is not considering all possible
-  instantiations of schematic type variables.
-
-  \item @{attribute unify_trace_bound} determines the depth where
-  unification starts to print tracing information once it reaches
-  depth; 0 for full tracing.  At the default value, tracing
-  information is almost never printed in practice.
-
-  \item @{attribute unify_search_bound} prevents unification from
-  searching past the given depth.  Because of this bound, higher-order
-  unification cannot return an infinite sequence, though it can return
-  an exponentially long one.  The search rarely approaches the default
-  value in practice.  If the search is cut off, unification prints a
-  warning ``Unification bound exceeded''.
-
-  \end{description}
-
-  \begin{warn}
-  Options for unification cannot be modified in a local context.  Only
-  the global theory content is taken into account.
-  \end{warn}
-*}
-
-end
--- a/src/Doc/IsarRef/HOL_Specific.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2677 +0,0 @@
-theory HOL_Specific
-imports Base Main "~~/src/HOL/Library/Old_Recdef" "~~/src/Tools/Adhoc_Overloading"
-begin
-
-chapter {* Higher-Order Logic *}
-
-text {* Isabelle/HOL is based on Higher-Order Logic, a polymorphic
-  version of Church's Simple Theory of Types.  HOL can be best
-  understood as a simply-typed version of classical set theory.  The
-  logic was first implemented in Gordon's HOL system
-  \cite{mgordon-hol}.  It extends Church's original logic
-  \cite{church40} by explicit type variables (naive polymorphism) and
-  a sound axiomatization scheme for new types based on subsets of
-  existing types.
-
-  Andrews's book \cite{andrews86} is a full description of the
-  original Church-style higher-order logic, with proofs of correctness
-  and completeness wrt.\ certain set-theoretic interpretations.  The
-  particular extensions of Gordon-style HOL are explained semantically
-  in two chapters of the 1993 HOL book \cite{pitts93}.
-
-  Experience with HOL over decades has demonstrated that higher-order
-  logic is widely applicable in many areas of mathematics and computer
-  science.  In a sense, Higher-Order Logic is simpler than First-Order
-  Logic, because there are fewer restrictions and special cases.  Note
-  that HOL is \emph{weaker} than FOL with axioms for ZF set theory,
-  which is traditionally considered the standard foundation of regular
-  mathematics, but for most applications this does not matter.  If you
-  prefer ML to Lisp, you will probably prefer HOL to ZF.
-
-  \medskip The syntax of HOL follows @{text "\<lambda>"}-calculus and
-  functional programming.  Function application is curried.  To apply
-  the function @{text f} of type @{text "\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> \<tau>\<^sub>3"} to the
-  arguments @{text a} and @{text b} in HOL, you simply write @{text "f
-  a b"} (as in ML or Haskell).  There is no ``apply'' operator; the
-  existing application of the Pure @{text "\<lambda>"}-calculus is re-used.
-  Note that in HOL @{text "f (a, b)"} means ``@{text "f"} applied to
-  the pair @{text "(a, b)"} (which is notation for @{text "Pair a
-  b"}).  The latter typically introduces extra formal efforts that can
-  be avoided by currying functions by default.  Explicit tuples are as
-  infrequent in HOL formalizations as in good ML or Haskell programs.
-
-  \medskip Isabelle/HOL has a distinct feel, compared to other
-  object-logics like Isabelle/ZF.  It identifies object-level types
-  with meta-level types, taking advantage of the default
-  type-inference mechanism of Isabelle/Pure.  HOL fully identifies
-  object-level functions with meta-level functions, with native
-  abstraction and application.
-
-  These identifications allow Isabelle to support HOL particularly
-  nicely, but they also mean that HOL requires some sophistication
-  from the user.  In particular, an understanding of Hindley-Milner
-  type-inference with type-classes, which are both used extensively in
-  the standard libraries and applications.  Beginners can set
-  @{attribute show_types} or even @{attribute show_sorts} to get more
-  explicit information about the result of type-inference.  *}
-
-
-chapter {* Derived specification elements *}
-
-section {* Inductive and coinductive definitions \label{sec:hol-inductive} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "inductive"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def (HOL) "inductive_set"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def (HOL) "coinductive"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def (HOL) "coinductive_set"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "print_inductives"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{attribute_def (HOL) mono} & : & @{text attribute} \\
-  \end{matharray}
-
-  An \emph{inductive definition} specifies the least predicate or set
-  @{text R} closed under given rules: applying a rule to elements of
-  @{text R} yields a result within @{text R}.  For example, a
-  structural operational semantics is an inductive definition of an
-  evaluation relation.
-
-  Dually, a \emph{coinductive definition} specifies the greatest
-  predicate or set @{text R} that is consistent with given rules:
-  every element of @{text R} can be seen as arising by applying a rule
-  to elements of @{text R}.  An important example is using
-  bisimulation relations to formalise equivalence of processes and
-  infinite data structures.
-
-  Both inductive and coinductive definitions are based on the
-  Knaster-Tarski fixed-point theorem for complete lattices.  The
-  collection of introduction rules given by the user determines a
-  functor on subsets of set-theoretic relations.  The required
-  monotonicity of the recursion scheme is proven as a prerequisite to
-  the fixed-point definition and the resulting consequences.  This
-  works by pushing inclusion through logical connectives and any other
-  operator that might be wrapped around recursive occurrences of the
-  defined relation: there must be a monotonicity theorem of the form
-  @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for each premise @{text "\<M> R t"} in an
-  introduction rule.  The default rule declarations of Isabelle/HOL
-  already take care of most common situations.
-
-  @{rail \<open>
-    (@@{command (HOL) inductive} | @@{command (HOL) inductive_set} |
-      @@{command (HOL) coinductive} | @@{command (HOL) coinductive_set})
-    @{syntax target}? \<newline>
-    @{syntax "fixes"} (@'for' @{syntax "fixes"})? (@'where' clauses)? \<newline>
-    (@'monos' @{syntax thmrefs})?
-    ;
-    clauses: (@{syntax thmdecl}? @{syntax prop} + '|')
-    ;
-    @@{attribute (HOL) mono} (() | 'add' | 'del')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "inductive"} and @{command (HOL)
-  "coinductive"} define (co)inductive predicates from the introduction
-  rules.
-
-  The propositions given as @{text "clauses"} in the @{keyword
-  "where"} part are either rules of the usual @{text "\<And>/\<Longrightarrow>"} format
-  (with arbitrary nesting), or equalities using @{text "\<equiv>"}.  The
-  latter specifies extra-logical abbreviations in the sense of
-  @{command_ref abbreviation}.  Introducing abstract syntax
-  simultaneously with the actual introduction rules is occasionally
-  useful for complex specifications.
-
-  The optional @{keyword "for"} part contains a list of parameters of
-  the (co)inductive predicates that remain fixed throughout the
-  definition, in contrast to arguments of the relation that may vary
-  in each occurrence within the given @{text "clauses"}.
-
-  The optional @{keyword "monos"} declaration contains additional
-  \emph{monotonicity theorems}, which are required for each operator
-  applied to a recursive set in the introduction rules.
-
-  \item @{command (HOL) "inductive_set"} and @{command (HOL)
-  "coinductive_set"} are wrappers for to the previous commands for
-  native HOL predicates.  This allows to define (co)inductive sets,
-  where multiple arguments are simulated via tuples.
-
-  \item @{command "print_inductives"} prints (co)inductive definitions and
-  monotonicity rules.
-
-  \item @{attribute (HOL) mono} declares monotonicity rules in the
-  context.  These rule are involved in the automated monotonicity
-  proof of the above inductive and coinductive definitions.
-
-  \end{description}
-*}
-
-
-subsection {* Derived rules *}
-
-text {* A (co)inductive definition of @{text R} provides the following
-  main theorems:
-
-  \begin{description}
-
-  \item @{text R.intros} is the list of introduction rules as proven
-  theorems, for the recursive predicates (or sets).  The rules are
-  also available individually, using the names given them in the
-  theory file;
-
-  \item @{text R.cases} is the case analysis (or elimination) rule;
-
-  \item @{text R.induct} or @{text R.coinduct} is the (co)induction
-  rule;
-
-  \item @{text R.simps} is the equation unrolling the fixpoint of the
-  predicate one step.
-
-  \end{description}
-
-  When several predicates @{text "R\<^sub>1, \<dots>, R\<^sub>n"} are
-  defined simultaneously, the list of introduction rules is called
-  @{text "R\<^sub>1_\<dots>_R\<^sub>n.intros"}, the case analysis rules are
-  called @{text "R\<^sub>1.cases, \<dots>, R\<^sub>n.cases"}, and the list
-  of mutual induction rules is called @{text
-  "R\<^sub>1_\<dots>_R\<^sub>n.inducts"}.
-*}
-
-
-subsection {* Monotonicity theorems *}
-
-text {* The context maintains a default set of theorems that are used
-  in monotonicity proofs.  New rules can be declared via the
-  @{attribute (HOL) mono} attribute.  See the main Isabelle/HOL
-  sources for some examples.  The general format of such monotonicity
-  theorems is as follows:
-
-  \begin{itemize}
-
-  \item Theorems of the form @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for proving
-  monotonicity of inductive definitions whose introduction rules have
-  premises involving terms such as @{text "\<M> R t"}.
-
-  \item Monotonicity theorems for logical operators, which are of the
-  general form @{text "(\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> (\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> \<longrightarrow> \<dots>"}.  For example, in
-  the case of the operator @{text "\<or>"}, the corresponding theorem is
-  \[
-  \infer{@{text "P\<^sub>1 \<or> P\<^sub>2 \<longrightarrow> Q\<^sub>1 \<or> Q\<^sub>2"}}{@{text "P\<^sub>1 \<longrightarrow> Q\<^sub>1"} & @{text "P\<^sub>2 \<longrightarrow> Q\<^sub>2"}}
-  \]
-
-  \item De Morgan style equations for reasoning about the ``polarity''
-  of expressions, e.g.
-  \[
-  @{prop "\<not> \<not> P \<longleftrightarrow> P"} \qquad\qquad
-  @{prop "\<not> (P \<and> Q) \<longleftrightarrow> \<not> P \<or> \<not> Q"}
-  \]
-
-  \item Equations for reducing complex operators to more primitive
-  ones whose monotonicity can easily be proved, e.g.
-  \[
-  @{prop "(P \<longrightarrow> Q) \<longleftrightarrow> \<not> P \<or> Q"} \qquad\qquad
-  @{prop "Ball A P \<equiv> \<forall>x. x \<in> A \<longrightarrow> P x"}
-  \]
-
-  \end{itemize}
-*}
-
-subsubsection {* Examples *}
-
-text {* The finite powerset operator can be defined inductively like this: *}
-
-inductive_set Fin :: "'a set \<Rightarrow> 'a set set" for A :: "'a set"
-where
-  empty: "{} \<in> Fin A"
-| insert: "a \<in> A \<Longrightarrow> B \<in> Fin A \<Longrightarrow> insert a B \<in> Fin A"
-
-text {* The accessible part of a relation is defined as follows: *}
-
-inductive acc :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> bool"
-  for r :: "'a \<Rightarrow> 'a \<Rightarrow> bool"  (infix "\<prec>" 50)
-where acc: "(\<And>y. y \<prec> x \<Longrightarrow> acc r y) \<Longrightarrow> acc r x"
-
-text {* Common logical connectives can be easily characterized as
-non-recursive inductive definitions with parameters, but without
-arguments. *}
-
-inductive AND for A B :: bool
-where "A \<Longrightarrow> B \<Longrightarrow> AND A B"
-
-inductive OR for A B :: bool
-where "A \<Longrightarrow> OR A B"
-  | "B \<Longrightarrow> OR A B"
-
-inductive EXISTS for B :: "'a \<Rightarrow> bool"
-where "B a \<Longrightarrow> EXISTS B"
-
-text {* Here the @{text "cases"} or @{text "induct"} rules produced by
-  the @{command inductive} package coincide with the expected
-  elimination rules for Natural Deduction.  Already in the original
-  article by Gerhard Gentzen \cite{Gentzen:1935} there is a hint that
-  each connective can be characterized by its introductions, and the
-  elimination can be constructed systematically. *}
-
-
-section {* Recursive functions \label{sec:recursion} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "primrec"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def (HOL) "fun"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def (HOL) "function"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
-    @{command_def (HOL) "termination"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
-    @{command_def (HOL) "fun_cases"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) primrec} @{syntax target}? @{syntax "fixes"} @'where' equations
-    ;
-    (@@{command (HOL) fun} | @@{command (HOL) function}) @{syntax target}? functionopts?
-      @{syntax "fixes"} \<newline> @'where' equations
-    ;
-
-    equations: (@{syntax thmdecl}? @{syntax prop} + '|')
-    ;
-    functionopts: '(' (('sequential' | 'domintros') + ',') ')'
-    ;
-    @@{command (HOL) termination} @{syntax term}?
-    ;
-    @@{command (HOL) fun_cases} (@{syntax thmdecl}? @{syntax prop} + @'and')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "primrec"} defines primitive recursive
-  functions over datatypes (see also @{command_ref (HOL) datatype} and
-  @{command_ref (HOL) rep_datatype}).  The given @{text equations}
-  specify reduction rules that are produced by instantiating the
-  generic combinator for primitive recursion that is available for
-  each datatype.
-
-  Each equation needs to be of the form:
-
-  @{text [display] "f x\<^sub>1 \<dots> x\<^sub>m (C y\<^sub>1 \<dots> y\<^sub>k) z\<^sub>1 \<dots> z\<^sub>n = rhs"}
-
-  such that @{text C} is a datatype constructor, @{text rhs} contains
-  only the free variables on the left-hand side (or from the context),
-  and all recursive occurrences of @{text "f"} in @{text "rhs"} are of
-  the form @{text "f \<dots> y\<^sub>i \<dots>"} for some @{text i}.  At most one
-  reduction rule for each constructor can be given.  The order does
-  not matter.  For missing constructors, the function is defined to
-  return a default value, but this equation is made difficult to
-  access for users.
-
-  The reduction rules are declared as @{attribute simp} by default,
-  which enables standard proof methods like @{method simp} and
-  @{method auto} to normalize expressions of @{text "f"} applied to
-  datatype constructions, by simulating symbolic computation via
-  rewriting.
-
-  \item @{command (HOL) "function"} defines functions by general
-  wellfounded recursion. A detailed description with examples can be
-  found in \cite{isabelle-function}. The function is specified by a
-  set of (possibly conditional) recursive equations with arbitrary
-  pattern matching. The command generates proof obligations for the
-  completeness and the compatibility of patterns.
-
-  The defined function is considered partial, and the resulting
-  simplification rules (named @{text "f.psimps"}) and induction rule
-  (named @{text "f.pinduct"}) are guarded by a generated domain
-  predicate @{text "f_dom"}. The @{command (HOL) "termination"}
-  command can then be used to establish that the function is total.
-
-  \item @{command (HOL) "fun"} is a shorthand notation for ``@{command
-  (HOL) "function"}~@{text "(sequential)"}, followed by automated
-  proof attempts regarding pattern matching and termination.  See
-  \cite{isabelle-function} for further details.
-
-  \item @{command (HOL) "termination"}~@{text f} commences a
-  termination proof for the previously defined function @{text f}.  If
-  this is omitted, the command refers to the most recent function
-  definition.  After the proof is closed, the recursive equations and
-  the induction principle is established.
-
-  \item @{command (HOL) "fun_cases"} generates specialized elimination
-  rules for function equations. It expects one or more function equations
-  and produces rules that eliminate the given equalities, following the cases
-  given in the function definition.
-  \end{description}
-
-  Recursive definitions introduced by the @{command (HOL) "function"}
-  command accommodate reasoning by induction (cf.\ @{method induct}):
-  rule @{text "f.induct"} refers to a specific induction rule, with
-  parameters named according to the user-specified equations. Cases
-  are numbered starting from 1.  For @{command (HOL) "primrec"}, the
-  induction principle coincides with structural recursion on the
-  datatype where the recursion is carried out.
-
-  The equations provided by these packages may be referred later as
-  theorem list @{text "f.simps"}, where @{text f} is the (collective)
-  name of the functions defined.  Individual equations may be named
-  explicitly as well.
-
-  The @{command (HOL) "function"} command accepts the following
-  options.
-
-  \begin{description}
-
-  \item @{text sequential} enables a preprocessor which disambiguates
-  overlapping patterns by making them mutually disjoint.  Earlier
-  equations take precedence over later ones.  This allows to give the
-  specification in a format very similar to functional programming.
-  Note that the resulting simplification and induction rules
-  correspond to the transformed specification, not the one given
-  originally. This usually means that each equation given by the user
-  may result in several theorems.  Also note that this automatic
-  transformation only works for ML-style datatype patterns.
-
-  \item @{text domintros} enables the automated generation of
-  introduction rules for the domain predicate. While mostly not
-  needed, they can be helpful in some proofs about partial functions.
-
-  \end{description}
-*}
-
-subsubsection {* Example: evaluation of expressions *}
-
-text {* Subsequently, we define mutual datatypes for arithmetic and
-  boolean expressions, and use @{command primrec} for evaluation
-  functions that follow the same recursive structure. *}
-
-datatype 'a aexp =
-    IF "'a bexp"  "'a aexp"  "'a aexp"
-  | Sum "'a aexp"  "'a aexp"
-  | Diff "'a aexp"  "'a aexp"
-  | Var 'a
-  | Num nat
-and 'a bexp =
-    Less "'a aexp"  "'a aexp"
-  | And "'a bexp"  "'a bexp"
-  | Neg "'a bexp"
-
-
-text {* \medskip Evaluation of arithmetic and boolean expressions *}
-
-primrec evala :: "('a \<Rightarrow> nat) \<Rightarrow> 'a aexp \<Rightarrow> nat"
-  and evalb :: "('a \<Rightarrow> nat) \<Rightarrow> 'a bexp \<Rightarrow> bool"
-where
-  "evala env (IF b a1 a2) = (if evalb env b then evala env a1 else evala env a2)"
-| "evala env (Sum a1 a2) = evala env a1 + evala env a2"
-| "evala env (Diff a1 a2) = evala env a1 - evala env a2"
-| "evala env (Var v) = env v"
-| "evala env (Num n) = n"
-| "evalb env (Less a1 a2) = (evala env a1 < evala env a2)"
-| "evalb env (And b1 b2) = (evalb env b1 \<and> evalb env b2)"
-| "evalb env (Neg b) = (\<not> evalb env b)"
-
-text {* Since the value of an expression depends on the value of its
-  variables, the functions @{const evala} and @{const evalb} take an
-  additional parameter, an \emph{environment} that maps variables to
-  their values.
-
-  \medskip Substitution on expressions can be defined similarly.  The
-  mapping @{text f} of type @{typ "'a \<Rightarrow> 'a aexp"} given as a
-  parameter is lifted canonically on the types @{typ "'a aexp"} and
-  @{typ "'a bexp"}, respectively.
-*}
-
-primrec substa :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a aexp \<Rightarrow> 'b aexp"
-  and substb :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a bexp \<Rightarrow> 'b bexp"
-where
-  "substa f (IF b a1 a2) = IF (substb f b) (substa f a1) (substa f a2)"
-| "substa f (Sum a1 a2) = Sum (substa f a1) (substa f a2)"
-| "substa f (Diff a1 a2) = Diff (substa f a1) (substa f a2)"
-| "substa f (Var v) = f v"
-| "substa f (Num n) = Num n"
-| "substb f (Less a1 a2) = Less (substa f a1) (substa f a2)"
-| "substb f (And b1 b2) = And (substb f b1) (substb f b2)"
-| "substb f (Neg b) = Neg (substb f b)"
-
-text {* In textbooks about semantics one often finds substitution
-  theorems, which express the relationship between substitution and
-  evaluation.  For @{typ "'a aexp"} and @{typ "'a bexp"}, we can prove
-  such a theorem by mutual induction, followed by simplification.
-*}
-
-lemma subst_one:
-  "evala env (substa (Var (v := a')) a) = evala (env (v := evala env a')) a"
-  "evalb env (substb (Var (v := a')) b) = evalb (env (v := evala env a')) b"
-  by (induct a and b) simp_all
-
-lemma subst_all:
-  "evala env (substa s a) = evala (\<lambda>x. evala env (s x)) a"
-  "evalb env (substb s b) = evalb (\<lambda>x. evala env (s x)) b"
-  by (induct a and b) simp_all
-
-
-subsubsection {* Example: a substitution function for terms *}
-
-text {* Functions on datatypes with nested recursion are also defined
-  by mutual primitive recursion. *}
-
-datatype ('a, 'b) "term" = Var 'a | App 'b "('a, 'b) term list"
-
-text {* A substitution function on type @{typ "('a, 'b) term"} can be
-  defined as follows, by working simultaneously on @{typ "('a, 'b)
-  term list"}: *}
-
-primrec subst_term :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term \<Rightarrow> ('a, 'b) term" and
-  subst_term_list :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term list \<Rightarrow> ('a, 'b) term list"
-where
-  "subst_term f (Var a) = f a"
-| "subst_term f (App b ts) = App b (subst_term_list f ts)"
-| "subst_term_list f [] = []"
-| "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
-
-text {* The recursion scheme follows the structure of the unfolded
-  definition of type @{typ "('a, 'b) term"}.  To prove properties of this
-  substitution function, mutual induction is needed:
-*}
-
-lemma "subst_term (subst_term f1 \<circ> f2) t = subst_term f1 (subst_term f2 t)" and
-  "subst_term_list (subst_term f1 \<circ> f2) ts = subst_term_list f1 (subst_term_list f2 ts)"
-  by (induct t and ts) simp_all
-
-
-subsubsection {* Example: a map function for infinitely branching trees *}
-
-text {* Defining functions on infinitely branching datatypes by
-  primitive recursion is just as easy.
-*}
-
-datatype 'a tree = Atom 'a | Branch "nat \<Rightarrow> 'a tree"
-
-primrec map_tree :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a tree \<Rightarrow> 'b tree"
-where
-  "map_tree f (Atom a) = Atom (f a)"
-| "map_tree f (Branch ts) = Branch (\<lambda>x. map_tree f (ts x))"
-
-text {* Note that all occurrences of functions such as @{text ts}
-  above must be applied to an argument.  In particular, @{term
-  "map_tree f \<circ> ts"} is not allowed here. *}
-
-text {* Here is a simple composition lemma for @{term map_tree}: *}
-
-lemma "map_tree g (map_tree f t) = map_tree (g \<circ> f) t"
-  by (induct t) simp_all
-
-
-subsection {* Proof methods related to recursive definitions *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) pat_completeness} & : & @{text method} \\
-    @{method_def (HOL) relation} & : & @{text method} \\
-    @{method_def (HOL) lexicographic_order} & : & @{text method} \\
-    @{method_def (HOL) size_change} & : & @{text method} \\
-    @{method_def (HOL) induction_schema} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method (HOL) relation} @{syntax term}
-    ;
-    @@{method (HOL) lexicographic_order} (@{syntax clasimpmod} * )
-    ;
-    @@{method (HOL) size_change} ( orders (@{syntax clasimpmod} * ) )
-    ;
-    @@{method (HOL) induction_schema}
-    ;
-    orders: ( 'max' | 'min' | 'ms' ) *
-  \<close>}
-
-  \begin{description}
-
-  \item @{method (HOL) pat_completeness} is a specialized method to
-  solve goals regarding the completeness of pattern matching, as
-  required by the @{command (HOL) "function"} package (cf.\
-  \cite{isabelle-function}).
-
-  \item @{method (HOL) relation}~@{text R} introduces a termination
-  proof using the relation @{text R}.  The resulting proof state will
-  contain goals expressing that @{text R} is wellfounded, and that the
-  arguments of recursive calls decrease with respect to @{text R}.
-  Usually, this method is used as the initial proof step of manual
-  termination proofs.
-
-  \item @{method (HOL) "lexicographic_order"} attempts a fully
-  automated termination proof by searching for a lexicographic
-  combination of size measures on the arguments of the function. The
-  method accepts the same arguments as the @{method auto} method,
-  which it uses internally to prove local descents.  The @{syntax
-  clasimpmod} modifiers are accepted (as for @{method auto}).
-
-  In case of failure, extensive information is printed, which can help
-  to analyse the situation (cf.\ \cite{isabelle-function}).
-
-  \item @{method (HOL) "size_change"} also works on termination goals,
-  using a variation of the size-change principle, together with a
-  graph decomposition technique (see \cite{krauss_phd} for details).
-  Three kinds of orders are used internally: @{text max}, @{text min},
-  and @{text ms} (multiset), which is only available when the theory
-  @{text Multiset} is loaded. When no order kinds are given, they are
-  tried in order. The search for a termination proof uses SAT solving
-  internally.
-
-  For local descent proofs, the @{syntax clasimpmod} modifiers are
-  accepted (as for @{method auto}).
-
-  \item @{method (HOL) induction_schema} derives user-specified
-  induction rules from well-founded induction and completeness of
-  patterns. This factors out some operations that are done internally
-  by the function package and makes them available separately. See
-  @{file "~~/src/HOL/ex/Induction_Schema.thy"} for examples.
-
-  \end{description}
-*}
-
-
-subsection {* Functions with explicit partiality *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "partial_function"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{attribute_def (HOL) "partial_function_mono"} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) partial_function} @{syntax target}?
-      '(' @{syntax nameref} ')' @{syntax "fixes"} \<newline>
-      @'where' @{syntax thmdecl}? @{syntax prop}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "partial_function"}~@{text "(mode)"} defines
-  recursive functions based on fixpoints in complete partial
-  orders. No termination proof is required from the user or
-  constructed internally. Instead, the possibility of non-termination
-  is modelled explicitly in the result type, which contains an
-  explicit bottom element.
-
-  Pattern matching and mutual recursion are currently not supported.
-  Thus, the specification consists of a single function described by a
-  single recursive equation.
-
-  There are no fixed syntactic restrictions on the body of the
-  function, but the induced functional must be provably monotonic
-  wrt.\ the underlying order.  The monotonicity proof is performed
-  internally, and the definition is rejected when it fails. The proof
-  can be influenced by declaring hints using the
-  @{attribute (HOL) partial_function_mono} attribute.
-
-  The mandatory @{text mode} argument specifies the mode of operation
-  of the command, which directly corresponds to a complete partial
-  order on the result type. By default, the following modes are
-  defined:
-
-  \begin{description}
-
-  \item @{text option} defines functions that map into the @{type
-  option} type. Here, the value @{term None} is used to model a
-  non-terminating computation. Monotonicity requires that if @{term
-  None} is returned by a recursive call, then the overall result must
-  also be @{term None}. This is best achieved through the use of the
-  monadic operator @{const "Option.bind"}.
-
-  \item @{text tailrec} defines functions with an arbitrary result
-  type and uses the slightly degenerated partial order where @{term
-  "undefined"} is the bottom element.  Now, monotonicity requires that
-  if @{term undefined} is returned by a recursive call, then the
-  overall result must also be @{term undefined}. In practice, this is
-  only satisfied when each recursive call is a tail call, whose result
-  is directly returned. Thus, this mode of operation allows the
-  definition of arbitrary tail-recursive functions.
-
-  \end{description}
-
-  Experienced users may define new modes by instantiating the locale
-  @{const "partial_function_definitions"} appropriately.
-
-  \item @{attribute (HOL) partial_function_mono} declares rules for
-  use in the internal monotonicity proofs of partial function
-  definitions.
-
-  \end{description}
-
-*}
-
-
-subsection {* Old-style recursive function definitions (TFL) *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "recdef"} & : & @{text "theory \<rightarrow> theory)"} \\
-    @{command_def (HOL) "recdef_tc"}@{text "\<^sup>*"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
-  \end{matharray}
-
-  The old TFL commands @{command (HOL) "recdef"} and @{command (HOL)
-  "recdef_tc"} for defining recursive are mostly obsolete; @{command
-  (HOL) "function"} or @{command (HOL) "fun"} should be used instead.
-
-  @{rail \<open>
-    @@{command (HOL) recdef} ('(' @'permissive' ')')? \<newline>
-      @{syntax name} @{syntax term} (@{syntax prop} +) hints?
-    ;
-    recdeftc @{syntax thmdecl}? tc
-    ;
-    hints: '(' @'hints' ( recdefmod * ) ')'
-    ;
-    recdefmod: (('recdef_simp' | 'recdef_cong' | 'recdef_wf')
-      (() | 'add' | 'del') ':' @{syntax thmrefs}) | @{syntax clasimpmod}
-    ;
-    tc: @{syntax nameref} ('(' @{syntax nat} ')')?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "recdef"} defines general well-founded
-  recursive functions (using the TFL package), see also
-  \cite{isabelle-HOL}.  The ``@{text "(permissive)"}'' option tells
-  TFL to recover from failed proof attempts, returning unfinished
-  results.  The @{text recdef_simp}, @{text recdef_cong}, and @{text
-  recdef_wf} hints refer to auxiliary rules to be used in the internal
-  automated proof process of TFL.  Additional @{syntax clasimpmod}
-  declarations may be given to tune the context of the Simplifier
-  (cf.\ \secref{sec:simplifier}) and Classical reasoner (cf.\
-  \secref{sec:classical}).
-
-  \item @{command (HOL) "recdef_tc"}~@{text "c (i)"} recommences the
-  proof for leftover termination condition number @{text i} (default
-  1) as generated by a @{command (HOL) "recdef"} definition of
-  constant @{text c}.
-
-  Note that in most cases, @{command (HOL) "recdef"} is able to finish
-  its internal proofs without manual intervention.
-
-  \end{description}
-
-  \medskip Hints for @{command (HOL) "recdef"} may be also declared
-  globally, using the following attributes.
-
-  \begin{matharray}{rcl}
-    @{attribute_def (HOL) recdef_simp} & : & @{text attribute} \\
-    @{attribute_def (HOL) recdef_cong} & : & @{text attribute} \\
-    @{attribute_def (HOL) recdef_wf} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{attribute (HOL) recdef_simp} | @@{attribute (HOL) recdef_cong} |
-      @@{attribute (HOL) recdef_wf}) (() | 'add' | 'del')
-  \<close>}
-*}
-
-
-section {* Datatypes \label{sec:hol-datatype} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "datatype"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "rep_datatype"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) datatype} (spec + @'and')
-    ;
-    @@{command (HOL) rep_datatype} ('(' (@{syntax name} +) ')')? (@{syntax term} +)
-    ;
-
-    spec: @{syntax typespec_sorts} @{syntax mixfix}? '=' (cons + '|')
-    ;
-    cons: @{syntax name} (@{syntax type} * ) @{syntax mixfix}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "datatype"} defines inductive datatypes in
-  HOL.
-
-  \item @{command (HOL) "rep_datatype"} represents existing types as
-  datatypes.
-
-  For foundational reasons, some basic types such as @{typ nat}, @{typ
-  "'a \<times> 'b"}, @{typ "'a + 'b"}, @{typ bool} and @{typ unit} are
-  introduced by more primitive means using @{command_ref typedef}.  To
-  recover the rich infrastructure of @{command datatype} (e.g.\ rules
-  for @{method cases} and @{method induct} and the primitive recursion
-  combinators), such types may be represented as actual datatypes
-  later.  This is done by specifying the constructors of the desired
-  type, and giving a proof of the induction rule, distinctness and
-  injectivity of constructors.
-
-  For example, see @{file "~~/src/HOL/Sum_Type.thy"} for the
-  representation of the primitive sum type as fully-featured datatype.
-
-  \end{description}
-
-  The generated rules for @{method induct} and @{method cases} provide
-  case names according to the given constructors, while parameters are
-  named after the types (see also \secref{sec:cases-induct}).
-
-  See \cite{isabelle-HOL} for more details on datatypes, but beware of
-  the old-style theory syntax being used there!  Apart from proper
-  proof methods for case-analysis and induction, there are also
-  emulations of ML tactics @{method (HOL) case_tac} and @{method (HOL)
-  induct_tac} available, see \secref{sec:hol-induct-tac}; these admit
-  to refer directly to the internal structure of subgoals (including
-  internally bound parameters).
-*}
-
-
-subsubsection {* Examples *}
-
-text {* We define a type of finite sequences, with slightly different
-  names than the existing @{typ "'a list"} that is already in @{theory
-  Main}: *}
-
-datatype 'a seq = Empty | Seq 'a "'a seq"
-
-text {* We can now prove some simple lemma by structural induction: *}
-
-lemma "Seq x xs \<noteq> xs"
-proof (induct xs arbitrary: x)
-  case Empty
-  txt {* This case can be proved using the simplifier: the freeness
-    properties of the datatype are already declared as @{attribute
-    simp} rules. *}
-  show "Seq x Empty \<noteq> Empty"
-    by simp
-next
-  case (Seq y ys)
-  txt {* The step case is proved similarly. *}
-  show "Seq x (Seq y ys) \<noteq> Seq y ys"
-    using `Seq y ys \<noteq> ys` by simp
-qed
-
-text {* Here is a more succinct version of the same proof: *}
-
-lemma "Seq x xs \<noteq> xs"
-  by (induct xs arbitrary: x) simp_all
-
-
-section {* Records \label{sec:hol-record} *}
-
-text {*
-  In principle, records merely generalize the concept of tuples, where
-  components may be addressed by labels instead of just position.  The
-  logical infrastructure of records in Isabelle/HOL is slightly more
-  advanced, though, supporting truly extensible record schemes.  This
-  admits operations that are polymorphic with respect to record
-  extension, yielding ``object-oriented'' effects like (single)
-  inheritance.  See also \cite{NaraschewskiW-TPHOLs98} for more
-  details on object-oriented verification and record subtyping in HOL.
-*}
-
-
-subsection {* Basic concepts *}
-
-text {*
-  Isabelle/HOL supports both \emph{fixed} and \emph{schematic} records
-  at the level of terms and types.  The notation is as follows:
-
-  \begin{center}
-  \begin{tabular}{l|l|l}
-    & record terms & record types \\ \hline
-    fixed & @{text "\<lparr>x = a, y = b\<rparr>"} & @{text "\<lparr>x :: A, y :: B\<rparr>"} \\
-    schematic & @{text "\<lparr>x = a, y = b, \<dots> = m\<rparr>"} &
-      @{text "\<lparr>x :: A, y :: B, \<dots> :: M\<rparr>"} \\
-  \end{tabular}
-  \end{center}
-
-  \noindent The ASCII representation of @{text "\<lparr>x = a\<rparr>"} is @{text
-  "(| x = a |)"}.
-
-  A fixed record @{text "\<lparr>x = a, y = b\<rparr>"} has field @{text x} of value
-  @{text a} and field @{text y} of value @{text b}.  The corresponding
-  type is @{text "\<lparr>x :: A, y :: B\<rparr>"}, assuming that @{text "a :: A"}
-  and @{text "b :: B"}.
-
-  A record scheme like @{text "\<lparr>x = a, y = b, \<dots> = m\<rparr>"} contains fields
-  @{text x} and @{text y} as before, but also possibly further fields
-  as indicated by the ``@{text "\<dots>"}'' notation (which is actually part
-  of the syntax).  The improper field ``@{text "\<dots>"}'' of a record
-  scheme is called the \emph{more part}.  Logically it is just a free
-  variable, which is occasionally referred to as ``row variable'' in
-  the literature.  The more part of a record scheme may be
-  instantiated by zero or more further components.  For example, the
-  previous scheme may get instantiated to @{text "\<lparr>x = a, y = b, z =
-  c, \<dots> = m'\<rparr>"}, where @{text m'} refers to a different more part.
-  Fixed records are special instances of record schemes, where
-  ``@{text "\<dots>"}'' is properly terminated by the @{text "() :: unit"}
-  element.  In fact, @{text "\<lparr>x = a, y = b\<rparr>"} is just an abbreviation
-  for @{text "\<lparr>x = a, y = b, \<dots> = ()\<rparr>"}.
-
-  \medskip Two key observations make extensible records in a simply
-  typed language like HOL work out:
-
-  \begin{enumerate}
-
-  \item the more part is internalized, as a free term or type
-  variable,
-
-  \item field names are externalized, they cannot be accessed within
-  the logic as first-class values.
-
-  \end{enumerate}
-
-  \medskip In Isabelle/HOL record types have to be defined explicitly,
-  fixing their field names and types, and their (optional) parent
-  record.  Afterwards, records may be formed using above syntax, while
-  obeying the canonical order of fields as given by their declaration.
-  The record package provides several standard operations like
-  selectors and updates.  The common setup for various generic proof
-  tools enable succinct reasoning patterns.  See also the Isabelle/HOL
-  tutorial \cite{isabelle-hol-book} for further instructions on using
-  records in practice.
-*}
-
-
-subsection {* Record specifications *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "record"} & : & @{text "theory \<rightarrow> theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) record} @{syntax typespec_sorts} '=' \<newline>
-      (@{syntax type} '+')? (constdecl +)
-    ;
-    constdecl: @{syntax name} '::' @{syntax type} @{syntax mixfix}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "record"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t = \<tau> + c\<^sub>1 :: \<sigma>\<^sub>1
-  \<dots> c\<^sub>n :: \<sigma>\<^sub>n"} defines extensible record type @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"},
-  derived from the optional parent record @{text "\<tau>"} by adding new
-  field components @{text "c\<^sub>i :: \<sigma>\<^sub>i"} etc.
-
-  The type variables of @{text "\<tau>"} and @{text "\<sigma>\<^sub>i"} need to be
-  covered by the (distinct) parameters @{text "\<alpha>\<^sub>1, \<dots>,
-  \<alpha>\<^sub>m"}.  Type constructor @{text t} has to be new, while @{text
-  \<tau>} needs to specify an instance of an existing record type.  At
-  least one new field @{text "c\<^sub>i"} has to be specified.
-  Basically, field names need to belong to a unique record.  This is
-  not a real restriction in practice, since fields are qualified by
-  the record name internally.
-
-  The parent record specification @{text \<tau>} is optional; if omitted
-  @{text t} becomes a root record.  The hierarchy of all records
-  declared within a theory context forms a forest structure, i.e.\ a
-  set of trees starting with a root record each.  There is no way to
-  merge multiple parent records!
-
-  For convenience, @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"} is made a
-  type abbreviation for the fixed record type @{text "\<lparr>c\<^sub>1 ::
-  \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n\<rparr>"}, likewise is @{text
-  "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m, \<zeta>) t_scheme"} made an abbreviation for
-  @{text "\<lparr>c\<^sub>1 :: \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n, \<dots> ::
-  \<zeta>\<rparr>"}.
-
-  \end{description}
-*}
-
-
-subsection {* Record operations *}
-
-text {*
-  Any record definition of the form presented above produces certain
-  standard operations.  Selectors and updates are provided for any
-  field, including the improper one ``@{text more}''.  There are also
-  cumulative record constructor functions.  To simplify the
-  presentation below, we assume for now that @{text "(\<alpha>\<^sub>1, \<dots>,
-  \<alpha>\<^sub>m) t"} is a root record with fields @{text "c\<^sub>1 ::
-  \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n"}.
-
-  \medskip \textbf{Selectors} and \textbf{updates} are available for
-  any field (including ``@{text more}''):
-
-  \begin{matharray}{lll}
-    @{text "c\<^sub>i"} & @{text "::"} & @{text "\<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<sigma>\<^sub>i"} \\
-    @{text "c\<^sub>i_update"} & @{text "::"} & @{text "\<sigma>\<^sub>i \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
-  \end{matharray}
-
-  There is special syntax for application of updates: @{text "r\<lparr>x :=
-  a\<rparr>"} abbreviates term @{text "x_update a r"}.  Further notation for
-  repeated updates is also available: @{text "r\<lparr>x := a\<rparr>\<lparr>y := b\<rparr>\<lparr>z :=
-  c\<rparr>"} may be written @{text "r\<lparr>x := a, y := b, z := c\<rparr>"}.  Note that
-  because of postfix notation the order of fields shown here is
-  reverse than in the actual term.  Since repeated updates are just
-  function applications, fields may be freely permuted in @{text "\<lparr>x
-  := a, y := b, z := c\<rparr>"}, as far as logical equality is concerned.
-  Thus commutativity of independent updates can be proven within the
-  logic for any two fields, but not as a general theorem.
-
-  \medskip The \textbf{make} operation provides a cumulative record
-  constructor function:
-
-  \begin{matharray}{lll}
-    @{text "t.make"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
-  \end{matharray}
-
-  \medskip We now reconsider the case of non-root records, which are
-  derived of some parent.  In general, the latter may depend on
-  another parent as well, resulting in a list of \emph{ancestor
-  records}.  Appending the lists of fields of all ancestors results in
-  a certain field prefix.  The record package automatically takes care
-  of this by lifting operations over this context of ancestor fields.
-  Assuming that @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"} has ancestor
-  fields @{text "b\<^sub>1 :: \<rho>\<^sub>1, \<dots>, b\<^sub>k :: \<rho>\<^sub>k"},
-  the above record operations will get the following types:
-
-  \medskip
-  \begin{tabular}{lll}
-    @{text "c\<^sub>i"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<sigma>\<^sub>i"} \\
-    @{text "c\<^sub>i_update"} & @{text "::"} & @{text "\<sigma>\<^sub>i \<Rightarrow>
-      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow>
-      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
-    @{text "t.make"} & @{text "::"} & @{text "\<rho>\<^sub>1 \<Rightarrow> \<dots> \<rho>\<^sub>k \<Rightarrow> \<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow>
-      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
-  \end{tabular}
-  \medskip
-
-  \noindent Some further operations address the extension aspect of a
-  derived record scheme specifically: @{text "t.fields"} produces a
-  record fragment consisting of exactly the new fields introduced here
-  (the result may serve as a more part elsewhere); @{text "t.extend"}
-  takes a fixed record and adds a given more part; @{text
-  "t.truncate"} restricts a record scheme to a fixed record.
-
-  \medskip
-  \begin{tabular}{lll}
-    @{text "t.fields"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
-    @{text "t.extend"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr> \<Rightarrow>
-      \<zeta> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
-    @{text "t.truncate"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
-  \end{tabular}
-  \medskip
-
-  \noindent Note that @{text "t.make"} and @{text "t.fields"} coincide
-  for root records.
-*}
-
-
-subsection {* Derived rules and proof tools *}
-
-text {*
-  The record package proves several results internally, declaring
-  these facts to appropriate proof tools.  This enables users to
-  reason about record structures quite conveniently.  Assume that
-  @{text t} is a record type as specified above.
-
-  \begin{enumerate}
-
-  \item Standard conversions for selectors or updates applied to
-  record constructor terms are made part of the default Simplifier
-  context; thus proofs by reduction of basic operations merely require
-  the @{method simp} method without further arguments.  These rules
-  are available as @{text "t.simps"}, too.
-
-  \item Selectors applied to updated records are automatically reduced
-  by an internal simplification procedure, which is also part of the
-  standard Simplifier setup.
-
-  \item Inject equations of a form analogous to @{prop "(x, y) = (x',
-  y') \<equiv> x = x' \<and> y = y'"} are declared to the Simplifier and Classical
-  Reasoner as @{attribute iff} rules.  These rules are available as
-  @{text "t.iffs"}.
-
-  \item The introduction rule for record equality analogous to @{text
-  "x r = x r' \<Longrightarrow> y r = y r' \<dots> \<Longrightarrow> r = r'"} is declared to the Simplifier,
-  and as the basic rule context as ``@{attribute intro}@{text "?"}''.
-  The rule is called @{text "t.equality"}.
-
-  \item Representations of arbitrary record expressions as canonical
-  constructor terms are provided both in @{method cases} and @{method
-  induct} format (cf.\ the generic proof methods of the same name,
-  \secref{sec:cases-induct}).  Several variations are available, for
-  fixed records, record schemes, more parts etc.
-
-  The generic proof methods are sufficiently smart to pick the most
-  sensible rule according to the type of the indicated record
-  expression: users just need to apply something like ``@{text "(cases
-  r)"}'' to a certain proof problem.
-
-  \item The derived record operations @{text "t.make"}, @{text
-  "t.fields"}, @{text "t.extend"}, @{text "t.truncate"} are \emph{not}
-  treated automatically, but usually need to be expanded by hand,
-  using the collective fact @{text "t.defs"}.
-
-  \end{enumerate}
-*}
-
-
-subsubsection {* Examples *}
-
-text {* See @{file "~~/src/HOL/ex/Records.thy"}, for example. *}
-
-section {* Typedef axiomatization \label{sec:hol-typedef} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "typedef"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
-  \end{matharray}
-
-  A Gordon/HOL-style type definition is a certain axiom scheme that
-  identifies a new type with a subset of an existing type.  More
-  precisely, the new type is defined by exhibiting an existing type
-  @{text \<tau>}, a set @{text "A :: \<tau> set"}, and a theorem that proves
-  @{prop "\<exists>x. x \<in> A"}.  Thus @{text A} is a non-empty subset of @{text
-  \<tau>}, and the new type denotes this subset.  New functions are
-  postulated that establish an isomorphism between the new type and
-  the subset.  In general, the type @{text \<tau>} may involve type
-  variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} which means that the type definition
-  produces a type constructor @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} depending on
-  those type arguments.
-
-  The axiomatization can be considered a ``definition'' in the sense
-  of the particular set-theoretic interpretation of HOL
-  \cite{pitts93}, where the universe of types is required to be
-  downwards-closed wrt.\ arbitrary non-empty subsets.  Thus genuinely
-  new types introduced by @{command "typedef"} stay within the range
-  of HOL models by construction.  Note that @{command_ref
-  type_synonym} from Isabelle/Pure merely introduces syntactic
-  abbreviations, without any logical significance.
-
-  @{rail \<open>
-    @@{command (HOL) typedef} abs_type '=' rep_set
-    ;
-    abs_type: @{syntax typespec_sorts} @{syntax mixfix}?
-    ;
-    rep_set: @{syntax term} (@'morphisms' @{syntax name} @{syntax name})?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "typedef"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = A"}
-  axiomatizes a type definition in the background theory of the
-  current context, depending on a non-emptiness result of the set
-  @{text A} that needs to be proven here.  The set @{text A} may
-  contain type variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} as specified on the LHS,
-  but no term variables.
-
-  Even though a local theory specification, the newly introduced type
-  constructor cannot depend on parameters or assumptions of the
-  context: this is structurally impossible in HOL.  In contrast, the
-  non-emptiness proof may use local assumptions in unusual situations,
-  which could result in different interpretations in target contexts:
-  the meaning of the bijection between the representing set @{text A}
-  and the new type @{text t} may then change in different application
-  contexts.
-
-  For @{command (HOL) "typedef"}~@{text "t = A"} the newly introduced
-  type @{text t} is accompanied by a pair of morphisms to relate it to
-  the representing set over the old type.  By default, the injection
-  from type to set is called @{text Rep_t} and its inverse @{text
-  Abs_t}: An explicit @{keyword (HOL) "morphisms"} specification
-  allows to provide alternative names.
-
-  The core axiomatization uses the locale predicate @{const
-  type_definition} as defined in Isabelle/HOL.  Various basic
-  consequences of that are instantiated accordingly, re-using the
-  locale facts with names derived from the new type constructor.  Thus
-  the generic @{thm type_definition.Rep} is turned into the specific
-  @{text "Rep_t"}, for example.
-
-  Theorems @{thm type_definition.Rep}, @{thm
-  type_definition.Rep_inverse}, and @{thm type_definition.Abs_inverse}
-  provide the most basic characterization as a corresponding
-  injection/surjection pair (in both directions).  The derived rules
-  @{thm type_definition.Rep_inject} and @{thm
-  type_definition.Abs_inject} provide a more convenient version of
-  injectivity, suitable for automated proof tools (e.g.\ in
-  declarations involving @{attribute simp} or @{attribute iff}).
-  Furthermore, the rules @{thm type_definition.Rep_cases}~/ @{thm
-  type_definition.Rep_induct}, and @{thm type_definition.Abs_cases}~/
-  @{thm type_definition.Abs_induct} provide alternative views on
-  surjectivity.  These rules are already declared as set or type rules
-  for the generic @{method cases} and @{method induct} methods,
-  respectively.
-
-  \end{description}
-
-  \begin{warn}
-  If you introduce a new type axiomatically, i.e.\ via @{command_ref
-  typedecl} and @{command_ref axiomatization}, the minimum requirement
-  is that it has a non-empty model, to avoid immediate collapse of the
-  HOL logic.  Moreover, one needs to demonstrate that the
-  interpretation of such free-form axiomatizations can coexist with
-  that of the regular @{command_def typedef} scheme, and any extension
-  that other people might have introduced elsewhere.
-  \end{warn}
-*}
-
-subsubsection {* Examples *}
-
-text {* Type definitions permit the introduction of abstract data
-  types in a safe way, namely by providing models based on already
-  existing types.  Given some abstract axiomatic description @{text P}
-  of a type, this involves two steps:
-
-  \begin{enumerate}
-
-  \item Find an appropriate type @{text \<tau>} and subset @{text A} which
-  has the desired properties @{text P}, and make a type definition
-  based on this representation.
-
-  \item Prove that @{text P} holds for @{text \<tau>} by lifting @{text P}
-  from the representation.
-
-  \end{enumerate}
-
-  You can later forget about the representation and work solely in
-  terms of the abstract properties @{text P}.
-
-  \medskip The following trivial example pulls a three-element type
-  into existence within the formal logical environment of HOL. *}
-
-typedef three = "{(True, True), (True, False), (False, True)}"
-  by blast
-
-definition "One = Abs_three (True, True)"
-definition "Two = Abs_three (True, False)"
-definition "Three = Abs_three (False, True)"
-
-lemma three_distinct: "One \<noteq> Two"  "One \<noteq> Three"  "Two \<noteq> Three"
-  by (simp_all add: One_def Two_def Three_def Abs_three_inject)
-
-lemma three_cases:
-  fixes x :: three obtains "x = One" | "x = Two" | "x = Three"
-  by (cases x) (auto simp: One_def Two_def Three_def Abs_three_inject)
-
-text {* Note that such trivial constructions are better done with
-  derived specification mechanisms such as @{command datatype}: *}
-
-datatype three' = One' | Two' | Three'
-
-text {* This avoids re-doing basic definitions and proofs from the
-  primitive @{command typedef} above. *}
-
-
-
-section {* Functorial structure of types *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "functor"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) functor} (@{syntax name} ':')? @{syntax term}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "functor"}~@{text "prefix: m"} allows to
-  prove and register properties about the functorial structure of type
-  constructors.  These properties then can be used by other packages
-  to deal with those type constructors in certain type constructions.
-  Characteristic theorems are noted in the current local theory.  By
-  default, they are prefixed with the base name of the type
-  constructor, an explicit prefix can be given alternatively.
-
-  The given term @{text "m"} is considered as \emph{mapper} for the
-  corresponding type constructor and must conform to the following
-  type pattern:
-
-  \begin{matharray}{lll}
-    @{text "m"} & @{text "::"} &
-      @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>k \<Rightarrow> (\<^vec>\<alpha>\<^sub>n) t \<Rightarrow> (\<^vec>\<beta>\<^sub>n) t"} \\
-  \end{matharray}
-
-  \noindent where @{text t} is the type constructor, @{text
-  "\<^vec>\<alpha>\<^sub>n"} and @{text "\<^vec>\<beta>\<^sub>n"} are distinct
-  type variables free in the local theory and @{text "\<sigma>\<^sub>1"},
-  \ldots, @{text "\<sigma>\<^sub>k"} is a subsequence of @{text "\<alpha>\<^sub>1 \<Rightarrow>
-  \<beta>\<^sub>1"}, @{text "\<beta>\<^sub>1 \<Rightarrow> \<alpha>\<^sub>1"}, \ldots,
-  @{text "\<alpha>\<^sub>n \<Rightarrow> \<beta>\<^sub>n"}, @{text "\<beta>\<^sub>n \<Rightarrow>
-  \<alpha>\<^sub>n"}.
-
-  \end{description}
-*}
-
-
-section {* Quotient types *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "quotient_type"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
-    @{command_def (HOL) "quotient_definition"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
-    @{command_def (HOL) "print_quotmapsQ3"} & : & @{text "context \<rightarrow>"}\\
-    @{command_def (HOL) "print_quotientsQ3"} & : & @{text "context \<rightarrow>"}\\
-    @{command_def (HOL) "print_quotconsts"} & : & @{text "context \<rightarrow>"}\\
-    @{method_def (HOL) "lifting"} & : & @{text method} \\
-    @{method_def (HOL) "lifting_setup"} & : & @{text method} \\
-    @{method_def (HOL) "descending"} & : & @{text method} \\
-    @{method_def (HOL) "descending_setup"} & : & @{text method} \\
-    @{method_def (HOL) "partiality_descending"} & : & @{text method} \\
-    @{method_def (HOL) "partiality_descending_setup"} & : & @{text method} \\
-    @{method_def (HOL) "regularize"} & : & @{text method} \\
-    @{method_def (HOL) "injection"} & : & @{text method} \\
-    @{method_def (HOL) "cleaning"} & : & @{text method} \\
-    @{attribute_def (HOL) "quot_thm"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "quot_lifted"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "quot_respect"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "quot_preserve"} & : & @{text attribute} \\
-  \end{matharray}
-
-  The quotient package defines a new quotient type given a raw type
-  and a partial equivalence relation. The package also historically 
-  includes automation for transporting definitions and theorems. 
-  But most of this automation was superseded by the Lifting and Transfer
-  packages. The user should consider using these two new packages for
-  lifting definitions and transporting theorems.
-
-  @{rail \<open>
-    @@{command (HOL) quotient_type} (spec)
-    ;
-    spec: @{syntax typespec} @{syntax mixfix}? '=' \<newline>
-     @{syntax type} '/' ('partial' ':')? @{syntax term} \<newline>
-     (@'morphisms' @{syntax name} @{syntax name})? (@'parametric' @{syntax thmref})?
-  \<close>}
-
-  @{rail \<open>
-    @@{command (HOL) quotient_definition} constdecl? @{syntax thmdecl}? \<newline>
-    @{syntax term} 'is' @{syntax term}
-    ;
-    constdecl: @{syntax name} ('::' @{syntax type})? @{syntax mixfix}?
-  \<close>}
-
-  @{rail \<open>
-    @@{method (HOL) lifting} @{syntax thmrefs}?
-    ;
-    @@{method (HOL) lifting_setup} @{syntax thmrefs}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "quotient_type"} defines a new quotient type @{text \<tau>}. The
-  injection from a quotient type to a raw type is called @{text
-  rep_\<tau>}, its inverse @{text abs_\<tau>} unless explicit @{keyword (HOL)
-  "morphisms"} specification provides alternative names. @{command
-  (HOL) "quotient_type"} requires the user to prove that the relation
-  is an equivalence relation (predicate @{text equivp}), unless the
-  user specifies explicitly @{text partial} in which case the
-  obligation is @{text part_equivp}.  A quotient defined with @{text
-  partial} is weaker in the sense that less things can be proved
-  automatically.
-
-  The command internally proves a Quotient theorem and sets up the Lifting
-  package by the command @{command (HOL) setup_lifting}. Thus the Lifting 
-  and Transfer packages can be used also with quotient types defined by
-  @{command (HOL) "quotient_type"} without any extra set-up. The parametricity 
-  theorem for the equivalence relation R can be provided as an extra argument 
-  of the command and is passed to the corresponding internal call of @{command (HOL) setup_lifting}.
-  This theorem allows the Lifting package to generate a stronger transfer rule for equality.
-  
-  \end{description}
-
-  The most of the rest of the package was superseded by the Lifting and Transfer
-  packages. The user should consider using these two new packages for
-  lifting definitions and transporting theorems.
-
-  \begin{description}  
-
-  \item @{command (HOL) "quotient_definition"} defines a constant on
-  the quotient type.
-
-  \item @{command (HOL) "print_quotmapsQ3"} prints quotient map
-  functions.
-
-  \item @{command (HOL) "print_quotientsQ3"} prints quotients.
-
-  \item @{command (HOL) "print_quotconsts"} prints quotient constants.
-
-  \item @{method (HOL) "lifting"} and @{method (HOL) "lifting_setup"}
-    methods match the current goal with the given raw theorem to be
-    lifted producing three new subgoals: regularization, injection and
-    cleaning subgoals. @{method (HOL) "lifting"} tries to apply the
-    heuristics for automatically solving these three subgoals and
-    leaves only the subgoals unsolved by the heuristics to the user as
-    opposed to @{method (HOL) "lifting_setup"} which leaves the three
-    subgoals unsolved.
-
-  \item @{method (HOL) "descending"} and @{method (HOL)
-    "descending_setup"} try to guess a raw statement that would lift
-    to the current subgoal. Such statement is assumed as a new subgoal
-    and @{method (HOL) "descending"} continues in the same way as
-    @{method (HOL) "lifting"} does. @{method (HOL) "descending"} tries
-    to solve the arising regularization, injection and cleaning
-    subgoals with the analogous method @{method (HOL)
-    "descending_setup"} which leaves the four unsolved subgoals.
-
-  \item @{method (HOL) "partiality_descending"} finds the regularized
-    theorem that would lift to the current subgoal, lifts it and
-    leaves as a subgoal. This method can be used with partial
-    equivalence quotients where the non regularized statements would
-    not be true. @{method (HOL) "partiality_descending_setup"} leaves
-    the injection and cleaning subgoals unchanged.
-
-  \item @{method (HOL) "regularize"} applies the regularization
-    heuristics to the current subgoal.
-
-  \item @{method (HOL) "injection"} applies the injection heuristics
-    to the current goal using the stored quotient respectfulness
-    theorems.
-
-  \item @{method (HOL) "cleaning"} applies the injection cleaning
-    heuristics to the current subgoal using the stored quotient
-    preservation theorems.
-
-  \item @{attribute (HOL) quot_lifted} attribute tries to
-    automatically transport the theorem to the quotient type.
-    The attribute uses all the defined quotients types and quotient
-    constants often producing undesired results or theorems that
-    cannot be lifted.
-
-  \item @{attribute (HOL) quot_respect} and @{attribute (HOL)
-    quot_preserve} attributes declare a theorem as a respectfulness
-    and preservation theorem respectively.  These are stored in the
-    local theory store and used by the @{method (HOL) "injection"}
-    and @{method (HOL) "cleaning"} methods respectively.
-
-  \item @{attribute (HOL) quot_thm} declares that a certain theorem
-    is a quotient extension theorem. Quotient extension theorems
-    allow for quotienting inside container types. Given a polymorphic
-    type that serves as a container, a map function defined for this
-    container using @{command (HOL) "functor"} and a relation
-    map defined for for the container type, the quotient extension
-    theorem should be @{term "Quotient3 R Abs Rep \<Longrightarrow> Quotient3
-    (rel_map R) (map Abs) (map Rep)"}. Quotient extension theorems
-    are stored in a database and are used all the steps of lifting
-    theorems.
-
-  \end{description}
-*}
-
-
-section {* Definition by specification \label{sec:hol-specification} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "specification"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) specification} '(' (decl +) ')' \<newline>
-      (@{syntax thmdecl}? @{syntax prop} +)
-    ;
-    decl: (@{syntax name} ':')? @{syntax term} ('(' @'overloaded' ')')?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "specification"}~@{text "decls \<phi>"} sets up a
-  goal stating the existence of terms with the properties specified to
-  hold for the constants given in @{text decls}.  After finishing the
-  proof, the theory will be augmented with definitions for the given
-  constants, as well as with theorems stating the properties for these
-  constants.
-
-  @{text decl} declares a constant to be defined by the
-  specification given.  The definition for the constant @{text c} is
-  bound to the name @{text c_def} unless a theorem name is given in
-  the declaration.  Overloaded constants should be declared as such.
-
-  \end{description}
-*}
-
-
-section {* Adhoc overloading of constants *}
-
-text {*
-  \begin{tabular}{rcll}
-  @{command_def "adhoc_overloading"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-  @{command_def "no_adhoc_overloading"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-  @{attribute_def "show_variants"} & : & @{text "attribute"} & default @{text false} \\
-  \end{tabular}
-
-  \medskip
-
-  Adhoc overloading allows to overload a constant depending on
-  its type. Typically this involves the introduction of an
-  uninterpreted constant (used for input and output) and the addition
-  of some variants (used internally). For examples see
-  @{file "~~/src/HOL/ex/Adhoc_Overloading_Examples.thy"} and
-  @{file "~~/src/HOL/Library/Monad_Syntax.thy"}.
-
-  @{rail \<open>
-    (@@{command adhoc_overloading} | @@{command no_adhoc_overloading})
-      (@{syntax nameref} (@{syntax term} + ) + @'and')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "adhoc_overloading"}~@{text "c v\<^sub>1 ... v\<^sub>n"}
-  associates variants with an existing constant.
-
-  \item @{command "no_adhoc_overloading"} is similar to
-  @{command "adhoc_overloading"}, but removes the specified variants
-  from the present context.
-  
-  \item @{attribute "show_variants"} controls printing of variants
-  of overloaded constants. If enabled, the internally used variants
-  are printed instead of their respective overloaded constants. This
-  is occasionally useful to check whether the system agrees with a
-  user's expectations about derived variants.
-
-  \end{description}
-*}
-
-chapter {* Proof tools *}
-
-section {* Adhoc tuples *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{attribute_def (HOL) split_format}@{text "\<^sup>*"} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{attribute (HOL) split_format} ('(' 'complete' ')')?
-  \<close>}
-
-  \begin{description}
-
-  \item @{attribute (HOL) split_format}\ @{text "(complete)"} causes
-  arguments in function applications to be represented canonically
-  according to their tuple type structure.
-
-  Note that this operation tends to invent funny names for new local
-  parameters introduced.
-
-  \end{description}
-*}
-
-
-section {* Transfer package *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) "transfer"} & : & @{text method} \\
-    @{method_def (HOL) "transfer'"} & : & @{text method} \\
-    @{method_def (HOL) "transfer_prover"} & : & @{text method} \\
-    @{attribute_def (HOL) "Transfer.transferred"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "untransferred"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "transfer_rule"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "transfer_domain_rule"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "relator_eq"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "relator_domain"} & : & @{text attribute} \\
-  \end{matharray}
-
-  \begin{description}
-
-  \item @{method (HOL) "transfer"} method replaces the current subgoal
-    with a logically equivalent one that uses different types and
-    constants. The replacement of types and constants is guided by the
-    database of transfer rules. Goals are generalized over all free
-    variables by default; this is necessary for variables whose types
-    change, but can be overridden for specific variables with e.g.
-    @{text "transfer fixing: x y z"}.
-
-  \item @{method (HOL) "transfer'"} is a variant of @{method (HOL)
-    transfer} that allows replacing a subgoal with one that is
-    logically stronger (rather than equivalent). For example, a
-    subgoal involving equality on a quotient type could be replaced
-    with a subgoal involving equality (instead of the corresponding
-    equivalence relation) on the underlying raw type.
-
-  \item @{method (HOL) "transfer_prover"} method assists with proving
-    a transfer rule for a new constant, provided the constant is
-    defined in terms of other constants that already have transfer
-    rules. It should be applied after unfolding the constant
-    definitions.
-
-  \item @{attribute (HOL) "untransferred"} proves the same equivalent theorem
-     as @{method (HOL) "transfer"} internally does.
-
-  \item @{attribute (HOL) Transfer.transferred} works in the opposite
-    direction than @{method (HOL) "transfer'"}. E.g., given the transfer
-    relation @{text "ZN x n \<equiv> (x = int n)"}, corresponding transfer rules and the theorem
-    @{text "\<forall>x::int \<in> {0..}. x < x + 1"}, the attribute would prove 
-    @{text "\<forall>n::nat. n < n + 1"}. The attribute is still in experimental
-    phase of development.
-
-  \item @{attribute (HOL) "transfer_rule"} attribute maintains a
-    collection of transfer rules, which relate constants at two
-    different types. Typical transfer rules may relate different type
-    instances of the same polymorphic constant, or they may relate an
-    operation on a raw type to a corresponding operation on an
-    abstract type (quotient or subtype). For example:
-
-    @{text "((A ===> B) ===> list_all2 A ===> list_all2 B) map map"}\\
-    @{text "(cr_int ===> cr_int ===> cr_int) (\<lambda>(x,y) (u,v). (x+u, y+v)) plus"}
-
-    Lemmas involving predicates on relations can also be registered
-    using the same attribute. For example:
-
-    @{text "bi_unique A \<Longrightarrow> (list_all2 A ===> op =) distinct distinct"}\\
-    @{text "\<lbrakk>bi_unique A; bi_unique B\<rbrakk> \<Longrightarrow> bi_unique (rel_prod A B)"}
-
-  \item @{attribute (HOL) "transfer_domain_rule"} attribute maintains a collection
-    of rules, which specify a domain of a transfer relation by a predicate.
-    E.g., given the transfer relation @{text "ZN x n \<equiv> (x = int n)"}, 
-    one can register the following transfer domain rule: 
-    @{text "Domainp ZN = (\<lambda>x. x \<ge> 0)"}. The rules allow the package to produce
-    more readable transferred goals, e.g., when quantifiers are transferred.
-
-  \item @{attribute (HOL) relator_eq} attribute collects identity laws
-    for relators of various type constructors, e.g. @{text "list_all2
-    (op =) = (op =)"}. The @{method (HOL) transfer} method uses these
-    lemmas to infer transfer rules for non-polymorphic constants on
-    the fly.
-
-  \item @{attribute_def (HOL) "relator_domain"} attribute collects rules 
-    describing domains of relators by predicators. E.g., @{text "Domainp A = P \<Longrightarrow>
-    Domainp (list_all2 A) = (list_all P)"}. This allows the package to lift transfer
-    domain rules through type constructors.
-
-  \end{description}
-
-  Theoretical background can be found in \cite{Huffman-Kuncar:2013:lifting_transfer}.
-*}
-
-
-section {* Lifting package *}
-
-text {*
-  The Lifting package allows users to lift terms of the raw type to the abstract type, which is 
-  a necessary step in building a library for an abstract type. Lifting defines a new constant 
-  by combining coercion functions (Abs and Rep) with the raw term. It also proves an appropriate 
-  transfer rule for the Transfer package and, if possible, an equation for the code generator.
-
-  The Lifting package provides two main commands: @{command (HOL) "setup_lifting"} for initializing 
-  the package to work with a new type, and @{command (HOL) "lift_definition"} for lifting constants. 
-  The Lifting package works with all four kinds of type abstraction: type copies, subtypes, 
-  total quotients and partial quotients.
-
-  Theoretical background can be found in \cite{Huffman-Kuncar:2013:lifting_transfer}.
-
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "setup_lifting"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
-    @{command_def (HOL) "lift_definition"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
-    @{command_def (HOL) "lifting_forget"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
-    @{command_def (HOL) "lifting_update"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
-    @{command_def (HOL) "print_quot_maps"} & : & @{text "context \<rightarrow>"}\\
-    @{command_def (HOL) "print_quotients"} & : & @{text "context \<rightarrow>"}\\
-    @{attribute_def (HOL) "quot_map"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "invariant_commute"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "reflexivity_rule"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "relator_mono"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "relator_distr"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "quot_del"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "lifting_restore"} & : & @{text attribute} \\   
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) setup_lifting} ('(' 'no_code' ')')? \<newline>
-      @{syntax thmref} @{syntax thmref}? (@'parametric' @{syntax thmref})?;
-  \<close>}
-
-  @{rail \<open>
-    @@{command (HOL) lift_definition} @{syntax name} '::' @{syntax type}  @{syntax mixfix}? \<newline>
-      'is' @{syntax term} (@'parametric' @{syntax thmref})?;
-  \<close>}
-
-  @{rail \<open>
-    @@{command (HOL) lifting_forget} @{syntax nameref};
-  \<close>}
-
-  @{rail \<open>
-    @@{command (HOL) lifting_update} @{syntax nameref};
-  \<close>}
-
-  @{rail \<open>
-    @@{attribute (HOL) lifting_restore} @{syntax thmref} (@{syntax thmref} @{syntax thmref})?;
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "setup_lifting"} Sets up the Lifting package
-    to work with a user-defined type. 
-    The command supports two modes. The first one is a low-level mode when 
-    the user must provide as a first
-    argument of @{command (HOL) "setup_lifting"} a
-    quotient theorem @{text "Quotient R Abs Rep T"}. The
-    package configures a transfer rule for equality, a domain transfer
-    rules and sets up the @{command_def (HOL) "lift_definition"}
-    command to work with the abstract type. An optional theorem @{text "reflp R"}, which certifies that 
-    the equivalence relation R is total,
-    can be provided as a second argument. This allows the package to generate stronger transfer
-    rules. And finally, the parametricity theorem for R can be provided as a third argument.
-    This allows the package to generate a stronger transfer rule for equality.
-
-    Users generally will not prove the @{text Quotient} theorem manually for 
-    new types, as special commands exist to automate the process.
-    
-    When a new subtype is defined by @{command (HOL) typedef}, @{command (HOL) "lift_definition"} 
-    can be used in its
-    second mode, where only the type_definition theorem @{text "type_definition Rep Abs A"}
-    is used as an argument of the command. The command internally proves the corresponding 
-    Quotient theorem and registers it with @{command (HOL) setup_lifting} using its first mode.
-
-    For quotients, the command @{command (HOL) quotient_type} can be used. The command defines 
-    a new quotient type and similarly to the previous case, the corresponding Quotient theorem is proved 
-    and registered by @{command (HOL) setup_lifting}.
-    
-    The command @{command (HOL) "setup_lifting"} also sets up the code generator
-    for the new type. Later on, when a new constant is defined by @{command (HOL) "lift_definition"},
-    the Lifting package proves and registers a code equation (if there is one) for the new constant.
-    If the option @{text "no_code"} is specified, the Lifting package does not set up the code
-    generator and as a consequence no code equations involving an abstract type are registered
-    by @{command (HOL) "lift_definition"}.
-
-  \item @{command (HOL) "lift_definition"} @{text "f :: \<tau>"} @{keyword (HOL) "is"} @{text t}
-    Defines a new function @{text f} with an abstract type @{text \<tau>}
-    in terms of a corresponding operation @{text t} on a
-    representation type. More formally, if @{text "t :: \<sigma>"}, then
-    the command builds a term @{text "F"} as a corresponding combination of abstraction 
-    and representation functions such that @{text "F :: \<sigma> \<Rightarrow> \<tau>" } and 
-    defines @{text f} is as @{text "f \<equiv> F t"}.
-    The term @{text t} does not have to be necessarily a constant but it can be any term.
-
-    The command opens a proof environment and the user must discharge 
-    a respectfulness proof obligation. For a type copy, i.e., a typedef with @{text
-    UNIV}, the obligation is discharged automatically. The proof goal is
-    presented in a user-friendly, readable form. A respectfulness
-    theorem in the standard format @{text f.rsp} and a transfer rule
-    @{text f.transfer} for the Transfer package are generated by the
-    package.
-
-    The user can specify a parametricity theorem for @{text t} after the keyword 
-    @{keyword "parametric"}, which allows the command
-    to generate a parametric transfer rule for @{text f}.
-
-    For each constant defined through trivial quotients (type copies or
-    subtypes) @{text f.rep_eq} is generated. The equation is a code certificate
-    that defines @{text f} using the representation function.
-
-    For each constant @{text f.abs_eq} is generated. The equation is unconditional
-    for total quotients. The equation defines @{text f} using
-    the abstraction function.
-
-    Integration with [@{attribute code} abstract]: For subtypes (e.g.,
-    corresponding to a datatype invariant, such as dlist), @{command
-    (HOL) "lift_definition"} uses a code certificate theorem
-    @{text f.rep_eq} as a code equation.
-
-    Integration with [@{attribute code} equation]: For total quotients, @{command
-    (HOL) "lift_definition"} uses @{text f.abs_eq} as a code equation.
-
-  \item @{command (HOL) lifting_forget} and  @{command (HOL) lifting_update}
-    These two commands serve for storing and deleting the set-up of
-    the Lifting package and corresponding transfer rules defined by this package.
-    This is useful for hiding of type construction details of an abstract type 
-    when the construction is finished but it still allows additions to this construction
-    when this is later necessary.
-
-    Whenever the Lifting package is set up with a new abstract type @{text "\<tau>"} by  
-    @{command_def (HOL) "lift_definition"}, the package defines a new bundle
-    that is called @{text "\<tau>.lifting"}. This bundle already includes set-up for the Lifting package. 
-    The new transfer rules
-    introduced by @{command (HOL) "lift_definition"} can be stored in the bundle by
-    the command @{command (HOL) "lifting_update"} @{text "\<tau>.lifting"}.
-
-    The command @{command (HOL) "lifting_forget"} @{text "\<tau>.lifting"} deletes set-up of the Lifting 
-    package
-    for @{text \<tau>} and deletes all the transfer rules that were introduced
-    by @{command (HOL) "lift_definition"} using @{text \<tau>} as an abstract type.
-
-    The stored set-up in a bundle can be reintroduced by the Isar commands for including a bundle
-    (@{command "include"}, @{keyword "includes"} and @{command "including"}).
-
-  \item @{command (HOL) "print_quot_maps"} prints stored quotient map
-    theorems.
-
-  \item @{command (HOL) "print_quotients"} prints stored quotient
-    theorems.
-
-  \item @{attribute (HOL) quot_map} registers a quotient map
-    theorem. E.g., @{text "Quotient R Abs Rep T \<Longrightarrow> 
-    Quotient (list_all2 R) (map Abs) (map Rep) (list_all2 T)"}. 
-    For examples see @{file
-    "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} or Lifting_*.thy files
-    in the same directory.
-
-  \item @{attribute (HOL) invariant_commute} registers a theorem that
-    shows a relationship between the constant @{text
-    Lifting.invariant} (used for internal encoding of proper subtypes)
-    and a relator.  Such theorems allows the package to hide @{text
-    Lifting.invariant} from a user in a user-readable form of a
-    respectfulness theorem. For examples see @{file
-    "~~/src/HOL/List.thy"} or Lifting_*.thy files in the same directory.
-
-  \item @{attribute (HOL) reflexivity_rule} registers a theorem that shows
-    that a relator respects left-totality and left_uniqueness. For examples 
-    see @{file "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} or Lifting_*.thy files 
-    in the same directory.
-    The property is used in a reflexivity prover, which is used for discharging respectfulness
-    theorems for type copies and also for discharging assumptions of abstraction function equations.
-
-  \item @{attribute (HOL) "relator_mono"} registers a property describing a monotonicity of a relator.
-    E.g., @{text "A \<le> B \<Longrightarrow> list_all2 A \<le> list_all2 B"}. For examples 
-    see @{file "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} 
-    or Lifting_*.thy files in the same directory.
-    This property is needed for proving a stronger transfer rule in @{command_def (HOL) "lift_definition"}
-    when a parametricity theorem for the raw term is specified.
-
-  \item @{attribute (HOL) "relator_distr"} registers a property describing a distributivity
-    of the relation composition and a relator. E.g., 
-    @{text "list_all2 R \<circ>\<circ> list_all2 S = list_all2 (R \<circ>\<circ> S)"}. 
-    This property is needed for proving a stronger transfer rule in @{command_def (HOL) "lift_definition"}
-    when a parametricity theorem for the raw term is specified.
-    When this equality does not hold unconditionally (e.g., for the function type), the user can specified
-    each direction separately and also register multiple theorems with different set of assumptions.
-    This attribute can be used only after the monotonicity property was already registered by
-    @{attribute (HOL) "relator_mono"}. For examples 
-    see @{file "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} 
-    or Lifting_*.thy files in the same directory.
-
-  \item @{attribute (HOL) quot_del} deletes a corresponding Quotient theorem
-    from the Lifting infrastructure and thus de-register the corresponding quotient. 
-    This effectively causes that @{command (HOL) lift_definition}  will not
-    do any lifting for the corresponding type. This attribute is rather used for low-level
-    manipulation with set-up of the Lifting package because @{command (HOL) lifting_forget} is
-    preferred for normal usage.
-
-  \item @{attribute (HOL) lifting_restore} @{text "Quotient_thm pcr_def pcr_cr_eq_thm"} 
-    registers the Quotient theorem @{text Quotient_thm} in the Lifting infrastructure 
-    and thus sets up lifting for an abstract type @{text \<tau>} (that is defined by @{text Quotient_thm}).
-    Optional theorems @{text pcr_def} and @{text pcr_cr_eq_thm} can be specified to register 
-    the parametrized
-    correspondence relation for @{text \<tau>}. E.g., for @{text "'a dlist"}, @{text pcr_def} is
-    @{text "pcr_dlist A \<equiv> list_all2 A \<circ>\<circ> cr_dlist"} and @{text pcr_cr_eq_thm} is 
-    @{text "pcr_dlist op= = op="}.
-    This attribute is rather used for low-level
-    manipulation with set-up of the Lifting package because using of the bundle @{text \<tau>.lifting} 
-    together with the commands @{command (HOL) lifting_forget} and @{command (HOL) lifting_update} is
-    preferred for normal usage.
-
-  \end{description}
-*}
-
-
-section {* Coercive subtyping *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{attribute_def (HOL) coercion} & : & @{text attribute} \\
-    @{attribute_def (HOL) coercion_enabled} & : & @{text attribute} \\
-    @{attribute_def (HOL) coercion_map} & : & @{text attribute} \\
-  \end{matharray}
-
-  Coercive subtyping allows the user to omit explicit type
-  conversions, also called \emph{coercions}.  Type inference will add
-  them as necessary when parsing a term. See
-  \cite{traytel-berghofer-nipkow-2011} for details.
-
-  @{rail \<open>
-    @@{attribute (HOL) coercion} (@{syntax term})?
-    ;
-    @@{attribute (HOL) coercion_map} (@{syntax term})?
-  \<close>}
-
-  \begin{description}
-
-  \item @{attribute (HOL) "coercion"}~@{text "f"} registers a new
-  coercion function @{text "f :: \<sigma>\<^sub>1 \<Rightarrow> \<sigma>\<^sub>2"} where @{text "\<sigma>\<^sub>1"} and
-  @{text "\<sigma>\<^sub>2"} are type constructors without arguments.  Coercions are
-  composed by the inference algorithm if needed.  Note that the type
-  inference algorithm is complete only if the registered coercions
-  form a lattice.
-
-  \item @{attribute (HOL) "coercion_map"}~@{text "map"} registers a
-  new map function to lift coercions through type constructors. The
-  function @{text "map"} must conform to the following type pattern
-
-  \begin{matharray}{lll}
-    @{text "map"} & @{text "::"} &
-      @{text "f\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> f\<^sub>n \<Rightarrow> (\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t \<Rightarrow> (\<beta>\<^sub>1, \<dots>, \<beta>\<^sub>n) t"} \\
-  \end{matharray}
-
-  where @{text "t"} is a type constructor and @{text "f\<^sub>i"} is of type
-  @{text "\<alpha>\<^sub>i \<Rightarrow> \<beta>\<^sub>i"} or @{text "\<beta>\<^sub>i \<Rightarrow> \<alpha>\<^sub>i"}.  Registering a map function
-  overwrites any existing map function for this particular type
-  constructor.
-
-  \item @{attribute (HOL) "coercion_enabled"} enables the coercion
-  inference algorithm.
-
-  \end{description}
-*}
-
-
-section {* Arithmetic proof support *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) arith} & : & @{text method} \\
-    @{attribute_def (HOL) arith} & : & @{text attribute} \\
-    @{attribute_def (HOL) arith_split} & : & @{text attribute} \\
-  \end{matharray}
-
-  \begin{description}
-
-  \item @{method (HOL) arith} decides linear arithmetic problems (on
-  types @{text nat}, @{text int}, @{text real}).  Any current facts
-  are inserted into the goal before running the procedure.
-
-  \item @{attribute (HOL) arith} declares facts that are supplied to
-  the arithmetic provers implicitly.
-
-  \item @{attribute (HOL) arith_split} attribute declares case split
-  rules to be expanded before @{method (HOL) arith} is invoked.
-
-  \end{description}
-
-  Note that a simpler (but faster) arithmetic prover is already
-  invoked by the Simplifier.
-*}
-
-
-section {* Intuitionistic proof search *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) iprover} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method (HOL) iprover} (@{syntax rulemod} *)
-  \<close>}
-
-  \begin{description}
-
-  \item @{method (HOL) iprover} performs intuitionistic proof search,
-  depending on specifically declared rules from the context, or given
-  as explicit arguments.  Chained facts are inserted into the goal
-  before commencing proof search.
-
-  Rules need to be classified as @{attribute (Pure) intro},
-  @{attribute (Pure) elim}, or @{attribute (Pure) dest}; here the
-  ``@{text "!"}'' indicator refers to ``safe'' rules, which may be
-  applied aggressively (without considering back-tracking later).
-  Rules declared with ``@{text "?"}'' are ignored in proof search (the
-  single-step @{method (Pure) rule} method still observes these).  An
-  explicit weight annotation may be given as well; otherwise the
-  number of rule premises will be taken into account here.
-
-  \end{description}
-*}
-
-
-section {* Model Elimination and Resolution *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) "meson"} & : & @{text method} \\
-    @{method_def (HOL) "metis"} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method (HOL) meson} @{syntax thmrefs}?
-    ;
-    @@{method (HOL) metis}
-      ('(' ('partial_types' | 'full_types' | 'no_types' | @{syntax name}) ')')?
-      @{syntax thmrefs}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{method (HOL) meson} implements Loveland's model elimination
-  procedure \cite{loveland-78}.  See @{file
-  "~~/src/HOL/ex/Meson_Test.thy"} for examples.
-
-  \item @{method (HOL) metis} combines ordered resolution and ordered
-  paramodulation to find first-order (or mildly higher-order) proofs.
-  The first optional argument specifies a type encoding; see the
-  Sledgehammer manual \cite{isabelle-sledgehammer} for details.  The
-  directory @{file "~~/src/HOL/Metis_Examples"} contains several small
-  theories developed to a large extent using @{method (HOL) metis}.
-
-  \end{description}
-*}
-
-
-section {* Algebraic reasoning via Gr\"obner bases *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) "algebra"} & : & @{text method} \\
-    @{attribute_def (HOL) algebra} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method (HOL) algebra}
-      ('add' ':' @{syntax thmrefs})?
-      ('del' ':' @{syntax thmrefs})?
-    ;
-    @@{attribute (HOL) algebra} (() | 'add' | 'del')
-  \<close>}
-
-  \begin{description}
-
-  \item @{method (HOL) algebra} performs algebraic reasoning via
-  Gr\"obner bases, see also \cite{Chaieb-Wenzel:2007} and
-  \cite[\S3.2]{Chaieb-thesis}. The method handles deals with two main
-  classes of problems:
-
-  \begin{enumerate}
-
-  \item Universal problems over multivariate polynomials in a
-  (semi)-ring/field/idom; the capabilities of the method are augmented
-  according to properties of these structures. For this problem class
-  the method is only complete for algebraically closed fields, since
-  the underlying method is based on Hilbert's Nullstellensatz, where
-  the equivalence only holds for algebraically closed fields.
-
-  The problems can contain equations @{text "p = 0"} or inequations
-  @{text "q \<noteq> 0"} anywhere within a universal problem statement.
-
-  \item All-exists problems of the following restricted (but useful)
-  form:
-
-  @{text [display] "\<forall>x\<^sub>1 \<dots> x\<^sub>n.
-    e\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<and> \<dots> \<and> e\<^sub>m(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<longrightarrow>
-    (\<exists>y\<^sub>1 \<dots> y\<^sub>k.
-      p\<^sub>1\<^sub>1(x\<^sub>1, \<dots> ,x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>1\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0 \<and>
-      \<dots> \<and>
-      p\<^sub>t\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>t\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0)"}
-
-  Here @{text "e\<^sub>1, \<dots>, e\<^sub>n"} and the @{text "p\<^sub>i\<^sub>j"} are multivariate
-  polynomials only in the variables mentioned as arguments.
-
-  \end{enumerate}
-
-  The proof method is preceded by a simplification step, which may be
-  modified by using the form @{text "(algebra add: ths\<^sub>1 del: ths\<^sub>2)"}.
-  This acts like declarations for the Simplifier
-  (\secref{sec:simplifier}) on a private simpset for this tool.
-
-  \item @{attribute algebra} (as attribute) manages the default
-  collection of pre-simplification rules of the above proof method.
-
-  \end{description}
-*}
-
-
-subsubsection {* Example *}
-
-text {* The subsequent example is from geometry: collinearity is
-  invariant by rotation.  *}
-
-type_synonym point = "int \<times> int"
-
-fun collinear :: "point \<Rightarrow> point \<Rightarrow> point \<Rightarrow> bool" where
-  "collinear (Ax, Ay) (Bx, By) (Cx, Cy) \<longleftrightarrow>
-    (Ax - Bx) * (By - Cy) = (Ay - By) * (Bx - Cx)"
-
-lemma collinear_inv_rotation:
-  assumes "collinear (Ax, Ay) (Bx, By) (Cx, Cy)" and "c\<^sup>2 + s\<^sup>2 = 1"
-  shows "collinear (Ax * c - Ay * s, Ay * c + Ax * s)
-    (Bx * c - By * s, By * c + Bx * s) (Cx * c - Cy * s, Cy * c + Cx * s)"
-  using assms by (algebra add: collinear.simps)
-
-text {*
- See also @{file "~~/src/HOL/ex/Groebner_Examples.thy"}.
-*}
-
-
-section {* Coherent Logic *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) "coherent"} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method (HOL) coherent} @{syntax thmrefs}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{method (HOL) coherent} solves problems of \emph{Coherent
-  Logic} \cite{Bezem-Coquand:2005}, which covers applications in
-  confluence theory, lattice theory and projective geometry.  See
-  @{file "~~/src/HOL/ex/Coherent.thy"} for some examples.
-
-  \end{description}
-*}
-
-
-section {* Proving propositions *}
-
-text {*
-  In addition to the standard proof methods, a number of diagnosis
-  tools search for proofs and provide an Isar proof snippet on success.
-  These tools are available via the following commands.
-
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "solve_direct"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
-    @{command_def (HOL) "try"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
-    @{command_def (HOL) "try0"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
-    @{command_def (HOL) "sledgehammer"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
-    @{command_def (HOL) "sledgehammer_params"} & : & @{text "theory \<rightarrow> theory"}
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) try}
-    ;
-
-    @@{command (HOL) try0} ( ( ( 'simp' | 'intro' | 'elim' | 'dest' ) ':' @{syntax thmrefs} ) + ) ?
-      @{syntax nat}?
-    ;
-
-    @@{command (HOL) sledgehammer} ( '[' args ']' )? facts? @{syntax nat}?
-    ;
-
-    @@{command (HOL) sledgehammer_params} ( ( '[' args ']' ) ? )
-    ;
-    args: ( @{syntax name} '=' value + ',' )
-    ;
-    facts: '(' ( ( ( ( 'add' | 'del' ) ':' ) ? @{syntax thmrefs} ) + ) ? ')'
-  \<close>} % FIXME check args "value"
-
-  \begin{description}
-
-  \item @{command (HOL) "solve_direct"} checks whether the current
-  subgoals can be solved directly by an existing theorem. Duplicate
-  lemmas can be detected in this way.
-
-  \item @{command (HOL) "try0"} attempts to prove a subgoal
-  using a combination of standard proof methods (@{method auto},
-  @{method simp}, @{method blast}, etc.).  Additional facts supplied
-  via @{text "simp:"}, @{text "intro:"}, @{text "elim:"}, and @{text
-  "dest:"} are passed to the appropriate proof methods.
-
-  \item @{command (HOL) "try"} attempts to prove or disprove a subgoal
-  using a combination of provers and disprovers (@{command (HOL)
-  "solve_direct"}, @{command (HOL) "quickcheck"}, @{command (HOL)
-  "try0"}, @{command (HOL) "sledgehammer"}, @{command (HOL)
-  "nitpick"}).
-
-  \item @{command (HOL) "sledgehammer"} attempts to prove a subgoal
-  using external automatic provers (resolution provers and SMT
-  solvers). See the Sledgehammer manual \cite{isabelle-sledgehammer}
-  for details.
-
-  \item @{command (HOL) "sledgehammer_params"} changes @{command (HOL)
-  "sledgehammer"} configuration options persistently.
-
-  \end{description}
-*}
-
-
-section {* Checking and refuting propositions *}
-
-text {*
-  Identifying incorrect propositions usually involves evaluation of
-  particular assignments and systematic counterexample search.  This
-  is supported by the following commands.
-
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "value"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def (HOL) "values"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def (HOL) "quickcheck"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
-    @{command_def (HOL) "nitpick"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
-    @{command_def (HOL) "quickcheck_params"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "nitpick_params"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "quickcheck_generator"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "find_unused_assms"} & : & @{text "context \<rightarrow>"}
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) value} ( '[' @{syntax name} ']' )? modes? @{syntax term}
-    ;
-
-    @@{command (HOL) values} modes? @{syntax nat}? @{syntax term}
-    ;
-
-    (@@{command (HOL) quickcheck} | @@{command (HOL) nitpick})
-      ( '[' args ']' )? @{syntax nat}?
-    ;
-
-    (@@{command (HOL) quickcheck_params} |
-      @@{command (HOL) nitpick_params}) ( '[' args ']' )?
-    ;
-
-    @@{command (HOL) quickcheck_generator} @{syntax nameref} \<newline>
-      'operations:' ( @{syntax term} +)
-    ;
-
-    @@{command (HOL) find_unused_assms} @{syntax name}?
-    ;
-    modes: '(' (@{syntax name} +) ')'
-    ;
-    args: ( @{syntax name} '=' value + ',' )
-  \<close>} % FIXME check "value"
-
-  \begin{description}
-
-  \item @{command (HOL) "value"}~@{text t} evaluates and prints a
-  term; optionally @{text modes} can be specified, which are appended
-  to the current print mode; see \secref{sec:print-modes}.
-  Internally, the evaluation is performed by registered evaluators,
-  which are invoked sequentially until a result is returned.
-  Alternatively a specific evaluator can be selected using square
-  brackets; typical evaluators use the current set of code equations
-  to normalize and include @{text simp} for fully symbolic evaluation
-  using the simplifier, @{text nbe} for \emph{normalization by
-  evaluation} and \emph{code} for code generation in SML.
-
-  \item @{command (HOL) "values"}~@{text t} enumerates a set
-  comprehension by evaluation and prints its values up to the given
-  number of solutions; optionally @{text modes} can be specified,
-  which are appended to the current print mode; see
-  \secref{sec:print-modes}.
-
-  \item @{command (HOL) "quickcheck"} tests the current goal for
-  counterexamples using a series of assignments for its free
-  variables; by default the first subgoal is tested, an other can be
-  selected explicitly using an optional goal index.  Assignments can
-  be chosen exhausting the search space up to a given size, or using a
-  fixed number of random assignments in the search space, or exploring
-  the search space symbolically using narrowing.  By default,
-  quickcheck uses exhaustive testing.  A number of configuration
-  options are supported for @{command (HOL) "quickcheck"}, notably:
-
-    \begin{description}
-
-    \item[@{text tester}] specifies which testing approach to apply.
-    There are three testers, @{text exhaustive}, @{text random}, and
-    @{text narrowing}.  An unknown configuration option is treated as
-    an argument to tester, making @{text "tester ="} optional.  When
-    multiple testers are given, these are applied in parallel.  If no
-    tester is specified, quickcheck uses the testers that are set
-    active, i.e., configurations @{attribute
-    quickcheck_exhaustive_active}, @{attribute
-    quickcheck_random_active}, @{attribute
-    quickcheck_narrowing_active} are set to true.
-
-    \item[@{text size}] specifies the maximum size of the search space
-    for assignment values.
-
-    \item[@{text genuine_only}] sets quickcheck only to return genuine
-    counterexample, but not potentially spurious counterexamples due
-    to underspecified functions.
-
-    \item[@{text abort_potential}] sets quickcheck to abort once it
-    found a potentially spurious counterexample and to not continue
-    to search for a further genuine counterexample.
-    For this option to be effective, the @{text genuine_only} option
-    must be set to false.
-
-    \item[@{text eval}] takes a term or a list of terms and evaluates
-    these terms under the variable assignment found by quickcheck.
-    This option is currently only supported by the default
-    (exhaustive) tester.
-
-    \item[@{text iterations}] sets how many sets of assignments are
-    generated for each particular size.
-
-    \item[@{text no_assms}] specifies whether assumptions in
-    structured proofs should be ignored.
-
-    \item[@{text locale}] specifies how to process conjectures in
-    a locale context, i.e., they can be interpreted or expanded.
-    The option is a whitespace-separated list of the two words
-    @{text interpret} and @{text expand}. The list determines the
-    order they are employed. The default setting is to first use
-    interpretations and then test the expanded conjecture.
-    The option is only provided as attribute declaration, but not
-    as parameter to the command.
-
-    \item[@{text timeout}] sets the time limit in seconds.
-
-    \item[@{text default_type}] sets the type(s) generally used to
-    instantiate type variables.
-
-    \item[@{text report}] if set quickcheck reports how many tests
-    fulfilled the preconditions.
-
-    \item[@{text use_subtype}] if set quickcheck automatically lifts
-    conjectures to registered subtypes if possible, and tests the
-    lifted conjecture.
-
-    \item[@{text quiet}] if set quickcheck does not output anything
-    while testing.
-
-    \item[@{text verbose}] if set quickcheck informs about the current
-    size and cardinality while testing.
-
-    \item[@{text expect}] can be used to check if the user's
-    expectation was met (@{text no_expectation}, @{text
-    no_counterexample}, or @{text counterexample}).
-
-    \end{description}
-
-  These option can be given within square brackets.
-
-  Using the following type classes, the testers generate values and convert
-  them back into Isabelle terms for displaying counterexamples.
-    \begin{description}
-    \item[@{text exhaustive}] The parameters of the type classes @{class exhaustive}
-      and @{class full_exhaustive} implement the testing. They take a 
-      testing function as a parameter, which takes a value of type @{typ "'a"}
-      and optionally produces a counterexample, and a size parameter for the test values.
-      In @{class full_exhaustive}, the testing function parameter additionally 
-      expects a lazy term reconstruction in the type @{typ Code_Evaluation.term}
-      of the tested value.
-
-      The canonical implementation for @{text exhaustive} testers calls the given
-      testing function on all values up to the given size and stops as soon
-      as a counterexample is found.
-
-    \item[@{text random}] The operation @{const Quickcheck_Random.random}
-      of the type class @{class random} generates a pseudo-random
-      value of the given size and a lazy term reconstruction of the value
-      in the type @{typ Code_Evaluation.term}. A pseudo-randomness generator
-      is defined in theory @{theory Random}.
-      
-    \item[@{text narrowing}] implements Haskell's Lazy Smallcheck~\cite{runciman-naylor-lindblad}
-      using the type classes @{class narrowing} and @{class partial_term_of}.
-      Variables in the current goal are initially represented as symbolic variables.
-      If the execution of the goal tries to evaluate one of them, the test engine
-      replaces it with refinements provided by @{const narrowing}.
-      Narrowing views every value as a sum-of-products which is expressed using the operations
-      @{const Quickcheck_Narrowing.cons} (embedding a value),
-      @{const Quickcheck_Narrowing.apply} (product) and @{const Quickcheck_Narrowing.sum} (sum).
-      The refinement should enable further evaluation of the goal.
-
-      For example, @{const narrowing} for the list type @{typ "'a :: narrowing list"}
-      can be recursively defined as
-      @{term "Quickcheck_Narrowing.sum (Quickcheck_Narrowing.cons [])
-                (Quickcheck_Narrowing.apply
-                  (Quickcheck_Narrowing.apply
-                    (Quickcheck_Narrowing.cons (op #))
-                    narrowing)
-                  narrowing)"}.
-      If a symbolic variable of type @{typ "_ list"} is evaluated, it is replaced by (i)~the empty
-      list @{term "[]"} and (ii)~by a non-empty list whose head and tail can then be recursively
-      refined if needed.
-
-      To reconstruct counterexamples, the operation @{const partial_term_of} transforms
-      @{text narrowing}'s deep representation of terms to the type @{typ Code_Evaluation.term}.
-      The deep representation models symbolic variables as
-      @{const Quickcheck_Narrowing.Narrowing_variable}, which are normally converted to
-      @{const Code_Evaluation.Free}, and refined values as
-      @{term "Quickcheck_Narrowing.Narrowing_constructor i args"}, where @{term "i :: integer"}
-      denotes the index in the sum of refinements. In the above example for lists,
-      @{term "0"} corresponds to @{term "[]"} and @{term "1"}
-      to @{term "op #"}.
-
-      The command @{command (HOL) "code_datatype"} sets up @{const partial_term_of}
-      such that the @{term "i"}-th refinement is interpreted as the @{term "i"}-th constructor,
-      but it does not ensures consistency with @{const narrowing}.
-    \end{description}
-
-  \item @{command (HOL) "quickcheck_params"} changes @{command (HOL)
-  "quickcheck"} configuration options persistently.
-
-  \item @{command (HOL) "quickcheck_generator"} creates random and
-  exhaustive value generators for a given type and operations.  It
-  generates values by using the operations as if they were
-  constructors of that type.
-
-  \item @{command (HOL) "nitpick"} tests the current goal for
-  counterexamples using a reduction to first-order relational
-  logic. See the Nitpick manual \cite{isabelle-nitpick} for details.
-
-  \item @{command (HOL) "nitpick_params"} changes @{command (HOL)
-  "nitpick"} configuration options persistently.
-
-  \item @{command (HOL) "find_unused_assms"} finds potentially superfluous
-  assumptions in theorems using quickcheck.
-  It takes the theory name to be checked for superfluous assumptions as
-  optional argument. If not provided, it checks the current theory.
-  Options to the internal quickcheck invocations can be changed with
-  common configuration declarations.
-
-  \end{description}
-*}
-
-
-section {* Unstructured case analysis and induction \label{sec:hol-induct-tac} *}
-
-text {*
-  The following tools of Isabelle/HOL support cases analysis and
-  induction in unstructured tactic scripts; see also
-  \secref{sec:cases-induct} for proper Isar versions of similar ideas.
-
-  \begin{matharray}{rcl}
-    @{method_def (HOL) case_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def (HOL) induct_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def (HOL) ind_cases}@{text "\<^sup>*"} & : & @{text method} \\
-    @{command_def (HOL) "inductive_cases"}@{text "\<^sup>*"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method (HOL) case_tac} @{syntax goal_spec}? @{syntax term} rule?
-    ;
-    @@{method (HOL) induct_tac} @{syntax goal_spec}? (@{syntax insts} * @'and') rule?
-    ;
-    @@{method (HOL) ind_cases} (@{syntax prop}+) (@'for' (@{syntax name}+))?
-    ;
-    @@{command (HOL) inductive_cases} (@{syntax thmdecl}? (@{syntax prop}+) + @'and')
-    ;
-    rule: 'rule' ':' @{syntax thmref}
-  \<close>}
-
-  \begin{description}
-
-  \item @{method (HOL) case_tac} and @{method (HOL) induct_tac} admit
-  to reason about inductive types.  Rules are selected according to
-  the declarations by the @{attribute cases} and @{attribute induct}
-  attributes, cf.\ \secref{sec:cases-induct}.  The @{command (HOL)
-  datatype} package already takes care of this.
-
-  These unstructured tactics feature both goal addressing and dynamic
-  instantiation.  Note that named rule cases are \emph{not} provided
-  as would be by the proper @{method cases} and @{method induct} proof
-  methods (see \secref{sec:cases-induct}).  Unlike the @{method
-  induct} method, @{method induct_tac} does not handle structured rule
-  statements, only the compact object-logic conclusion of the subgoal
-  being addressed.
-
-  \item @{method (HOL) ind_cases} and @{command (HOL)
-  "inductive_cases"} provide an interface to the internal @{ML_text
-  mk_cases} operation.  Rules are simplified in an unrestricted
-  forward manner.
-
-  While @{method (HOL) ind_cases} is a proof method to apply the
-  result immediately as elimination rules, @{command (HOL)
-  "inductive_cases"} provides case split theorems at the theory level
-  for later use.  The @{keyword "for"} argument of the @{method (HOL)
-  ind_cases} method allows to specify a list of variables that should
-  be generalized before applying the resulting rule.
-
-  \end{description}
-*}
-
-
-chapter {* Executable code *}
-
-text {* For validation purposes, it is often useful to \emph{execute}
-  specifications.  In principle, execution could be simulated by
-  Isabelle's inference kernel, i.e. by a combination of resolution and
-  simplification.  Unfortunately, this approach is rather inefficient.
-  A more efficient way of executing specifications is to translate
-  them into a functional programming language such as ML.
-
-  Isabelle provides a generic framework to support code generation
-  from executable specifications.  Isabelle/HOL instantiates these
-  mechanisms in a way that is amenable to end-user applications.  Code
-  can be generated for functional programs (including overloading
-  using type classes) targeting SML \cite{SML}, OCaml \cite{OCaml},
-  Haskell \cite{haskell-revised-report} and Scala
-  \cite{scala-overview-tech-report}.  Conceptually, code generation is
-  split up in three steps: \emph{selection} of code theorems,
-  \emph{translation} into an abstract executable view and
-  \emph{serialization} to a specific \emph{target language}.
-  Inductive specifications can be executed using the predicate
-  compiler which operates within HOL.  See \cite{isabelle-codegen} for
-  an introduction.
-
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "export_code"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{attribute_def (HOL) code} & : & @{text attribute} \\
-    @{command_def (HOL) "code_datatype"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "print_codesetup"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{attribute_def (HOL) code_unfold} & : & @{text attribute} \\
-    @{attribute_def (HOL) code_post} & : & @{text attribute} \\
-    @{attribute_def (HOL) code_abbrev} & : & @{text attribute} \\
-    @{command_def (HOL) "print_codeproc"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def (HOL) "code_thms"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def (HOL) "code_deps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def (HOL) "code_reserved"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "code_printing"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "code_identifier"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "code_monad"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "code_reflect"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "code_pred"} & : & @{text "theory \<rightarrow> proof(prove)"}
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) export_code} ( @'open' ) ? ( constexpr + ) \<newline>
-       ( ( @'in' target ( @'module_name' @{syntax string} ) ? \<newline>
-        ( @'file' @{syntax string} ) ? ( '(' args ')' ) ?) + ) ?
-    ;
-
-    const: @{syntax term}
-    ;
-
-    constexpr: ( const | 'name._' | '_' )
-    ;
-
-    typeconstructor: @{syntax nameref}
-    ;
-
-    class: @{syntax nameref}
-    ;
-
-    target: 'SML' | 'OCaml' | 'Haskell' | 'Scala' | 'Eval'
-    ;
-
-    @@{attribute (HOL) code} ( 'del' | 'equation' | 'abstype' | 'abstract'
-      | 'drop:' ( const + ) | 'abort:' ( const + ) )?
-    ;
-
-    @@{command (HOL) code_datatype} ( const + )
-    ;
-
-    @@{attribute (HOL) code_unfold} ( 'del' ) ?
-    ;
-
-    @@{attribute (HOL) code_post} ( 'del' ) ?
-    ;
-
-    @@{attribute (HOL) code_abbrev}
-    ;
-
-    @@{command (HOL) code_thms} ( constexpr + ) ?
-    ;
-
-    @@{command (HOL) code_deps} ( constexpr + ) ?
-    ;
-
-    @@{command (HOL) code_reserved} target ( @{syntax string} + )
-    ;
-
-    symbol_const: ( @'constant' const )
-    ;
-
-    symbol_typeconstructor: ( @'type_constructor' typeconstructor )
-    ;
-
-    symbol_class: ( @'type_class' class )
-    ;
-
-    symbol_class_relation: ( @'class_relation' class ( '<' | '\<subseteq>' ) class )
-    ;
-
-    symbol_class_instance: ( @'class_instance' typeconstructor @'::' class )
-    ;
-
-    symbol_module: ( @'code_module' name )
-    ;
-
-    syntax: @{syntax string} | ( @'infix' | @'infixl' | @'infixr' ) @{syntax nat} @{syntax string}
-    ;
-
-    printing_const: symbol_const ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' syntax ? + @'and' )
-    ;
-
-    printing_typeconstructor: symbol_typeconstructor ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' syntax ? + @'and' )
-    ;
-
-    printing_class: symbol_class ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' @{syntax string} ? + @'and' )
-    ;
-
-    printing_class_relation: symbol_class_relation ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' @{syntax string} ? + @'and' )
-    ;
-
-    printing_class_instance: symbol_class_instance ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' '-' ? + @'and' )
-    ;
-
-    printing_module: symbol_module ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' ( @{syntax string} ( @'attach' ( const + ) ) ? ) ? + @'and' )
-    ;
-
-    @@{command (HOL) code_printing} ( ( printing_const | printing_typeconstructor
-      | printing_class | printing_class_relation | printing_class_instance
-      | printing_module ) + '|' )
-    ;
-
-    @@{command (HOL) code_identifier} ( ( symbol_const | symbol_typeconstructor
-      | symbol_class | symbol_class_relation | symbol_class_instance
-      | symbol_module ) ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' @{syntax string} ? + @'and' ) + '|' )
-    ;
-
-    @@{command (HOL) code_monad} const const target
-    ;
-
-    @@{command (HOL) code_reflect} @{syntax string} \<newline>
-      ( @'datatypes' ( @{syntax string} '=' ( '_' | ( @{syntax string} + '|' ) + @'and' ) ) ) ? \<newline>
-      ( @'functions' ( @{syntax string} + ) ) ? ( @'file' @{syntax string} ) ?
-    ;
-
-    @@{command (HOL) code_pred} \<newline> ('(' @'modes' ':' modedecl ')')? \<newline> const
-    ;
-
-    modedecl: (modes | ((const ':' modes) \<newline>
-        (@'and' ((const ':' modes @'and') +))?))
-    ;
-
-    modes: mode @'as' const
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "export_code"} generates code for a given list
-  of constants in the specified target language(s).  If no
-  serialization instruction is given, only abstract code is generated
-  internally.
-
-  Constants may be specified by giving them literally, referring to
-  all executable constants within a certain theory by giving @{text
-  "name._"}, or referring to \emph{all} executable constants currently
-  available by giving @{text "_"}.
-
-  By default, exported identifiers are minimized per module.  This
-  can be suppressed by prepending @{keyword "open"} before the list
-  of contants.
-
-  By default, for each involved theory one corresponding name space
-  module is generated.  Alternatively, a module name may be specified
-  after the @{keyword "module_name"} keyword; then \emph{all} code is
-  placed in this module.
-
-  For \emph{SML}, \emph{OCaml} and \emph{Scala} the file specification
-  refers to a single file; for \emph{Haskell}, it refers to a whole
-  directory, where code is generated in multiple files reflecting the
-  module hierarchy.  Omitting the file specification denotes standard
-  output.
-
-  Serializers take an optional list of arguments in parentheses.
-  For \emph{Haskell} a module name prefix may be given using the
-  ``@{text "root:"}'' argument; ``@{text string_classes}'' adds a
-  ``@{verbatim "deriving (Read, Show)"}'' clause to each appropriate
-  datatype declaration.
-
-  \item @{attribute (HOL) code} declare code equations for code
-  generation.  Variant @{text "code equation"} declares a conventional
-  equation as code equation.  Variants @{text "code abstype"} and
-  @{text "code abstract"} declare abstract datatype certificates or
-  code equations on abstract datatype representations respectively.
-  Vanilla @{text "code"} falls back to @{text "code equation"}
-  or @{text "code abstype"} depending on the syntactic shape
-  of the underlying equation.  Variant @{text "code del"}
-  deselects a code equation for code generation.
-
-  Variants @{text "code drop:"} and @{text "code abort:"} take
-  a list of constant as arguments and drop all code equations declared
-  for them.  In the case of {text abort}, these constants then are
-  are not required to have a definition by means of code equations;
-  if needed these are implemented by program abort (exception) instead.
-
-  Usually packages introducing code equations provide a reasonable
-  default setup for selection.  
-
-  \item @{command (HOL) "code_datatype"} specifies a constructor set
-  for a logical type.
-
-  \item @{command (HOL) "print_codesetup"} gives an overview on
-  selected code equations and code generator datatypes.
-
-  \item @{attribute (HOL) code_unfold} declares (or with option
-  ``@{text "del"}'' removes) theorems which during preprocessing
-  are applied as rewrite rules to any code equation or evaluation
-  input.
-
-  \item @{attribute (HOL) code_post} declares (or with option ``@{text
-  "del"}'' removes) theorems which are applied as rewrite rules to any
-  result of an evaluation.
-
-  \item @{attribute (HOL) code_abbrev} declares equations which are
-  applied as rewrite rules to any result of an evaluation and
-  symmetrically during preprocessing to any code equation or evaluation
-  input.
-
-  \item @{command (HOL) "print_codeproc"} prints the setup of the code
-  generator preprocessor.
-
-  \item @{command (HOL) "code_thms"} prints a list of theorems
-  representing the corresponding program containing all given
-  constants after preprocessing.
-
-  \item @{command (HOL) "code_deps"} visualizes dependencies of
-  theorems representing the corresponding program containing all given
-  constants after preprocessing.
-
-  \item @{command (HOL) "code_reserved"} declares a list of names as
-  reserved for a given target, preventing it to be shadowed by any
-  generated code.
-
-  \item @{command (HOL) "code_printing"} associates a series of symbols
-  (constants, type constructors, classes, class relations, instances,
-  module names) with target-specific serializations; omitting a serialization
-  deletes an existing serialization.
-
-  \item @{command (HOL) "code_monad"} provides an auxiliary mechanism
-  to generate monadic code for Haskell.
-
-  \item @{command (HOL) "code_identifier"} associates a a series of symbols
-  (constants, type constructors, classes, class relations, instances,
-  module names) with target-specific hints how these symbols shall be named.
-  These hints gain precedence over names for symbols with no hints at all.
-  Conflicting hints are subject to name disambiguation.
-  \emph{Warning:} It is at the discretion
-  of the user to ensure that name prefixes of identifiers in compound
-  statements like type classes or datatypes are still the same.
-
-  \item @{command (HOL) "code_reflect"} without a ``@{text "file"}''
-  argument compiles code into the system runtime environment and
-  modifies the code generator setup that future invocations of system
-  runtime code generation referring to one of the ``@{text
-  "datatypes"}'' or ``@{text "functions"}'' entities use these
-  precompiled entities.  With a ``@{text "file"}'' argument, the
-  corresponding code is generated into that specified file without
-  modifying the code generator setup.
-
-  \item @{command (HOL) "code_pred"} creates code equations for a
-    predicate given a set of introduction rules. Optional mode
-    annotations determine which arguments are supposed to be input or
-    output. If alternative introduction rules are declared, one must
-    prove a corresponding elimination rule.
-
-  \end{description}
-*}
-
-end
--- a/src/Doc/IsarRef/Inner_Syntax.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1709 +0,0 @@
-theory Inner_Syntax
-imports Base Main
-begin
-
-chapter {* Inner syntax --- the term language \label{ch:inner-syntax} *}
-
-text {* The inner syntax of Isabelle provides concrete notation for
-  the main entities of the logical framework, notably @{text
-  "\<lambda>"}-terms with types and type classes.  Applications may either
-  extend existing syntactic categories by additional notation, or
-  define new sub-languages that are linked to the standard term
-  language via some explicit markers.  For example @{verbatim
-  FOO}~@{text "foo"} could embed the syntax corresponding for some
-  user-defined nonterminal @{text "foo"} --- within the bounds of the
-  given lexical syntax of Isabelle/Pure.
-
-  The most basic way to specify concrete syntax for logical entities
-  works via mixfix annotations (\secref{sec:mixfix}), which may be
-  usually given as part of the original declaration or via explicit
-  notation commands later on (\secref{sec:notation}).  This already
-  covers many needs of concrete syntax without having to understand
-  the full complexity of inner syntax layers.
-
-  Further details of the syntax engine involves the classical
-  distinction of lexical language versus context-free grammar (see
-  \secref{sec:pure-syntax}), and various mechanisms for \emph{syntax
-  transformations} (see \secref{sec:syntax-transformations}).
-*}
-
-
-section {* Printing logical entities *}
-
-subsection {* Diagnostic commands \label{sec:print-diag} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "typ"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "term"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "prop"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "thm"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "prf"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "full_prf"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "print_state"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
-  \end{matharray}
-
-  These diagnostic commands assist interactive development by printing
-  internal logical entities in a human-readable fashion.
-
-  @{rail \<open>
-    @@{command typ} @{syntax modes}? @{syntax type} ('::' @{syntax sort})?
-    ;
-    @@{command term} @{syntax modes}? @{syntax term}
-    ;
-    @@{command prop} @{syntax modes}? @{syntax prop}
-    ;
-    @@{command thm} @{syntax modes}? @{syntax thmrefs}
-    ;
-    ( @@{command prf} | @@{command full_prf} ) @{syntax modes}? @{syntax thmrefs}?
-    ;
-    @@{command print_state} @{syntax modes}?
-    ;
-    @{syntax_def modes}: '(' (@{syntax name} + ) ')'
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "typ"}~@{text \<tau>} reads and prints a type expression
-  according to the current context.
-
-  \item @{command "typ"}~@{text "\<tau> :: s"} uses type-inference to
-  determine the most general way to make @{text "\<tau>"} conform to sort
-  @{text "s"}.  For concrete @{text "\<tau>"} this checks if the type
-  belongs to that sort.  Dummy type parameters ``@{text "_"}''
-  (underscore) are assigned to fresh type variables with most general
-  sorts, according the the principles of type-inference.
-
-  \item @{command "term"}~@{text t} and @{command "prop"}~@{text \<phi>}
-  read, type-check and print terms or propositions according to the
-  current theory or proof context; the inferred type of @{text t} is
-  output as well.  Note that these commands are also useful in
-  inspecting the current environment of term abbreviations.
-
-  \item @{command "thm"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} retrieves
-  theorems from the current theory or proof context.  Note that any
-  attributes included in the theorem specifications are applied to a
-  temporary context derived from the current theory or proof; the
-  result is discarded, i.e.\ attributes involved in @{text "a\<^sub>1,
-  \<dots>, a\<^sub>n"} do not have any permanent effect.
-
-  \item @{command "prf"} displays the (compact) proof term of the
-  current proof state (if present), or of the given theorems. Note
-  that this requires proof terms to be switched on for the current
-  object logic (see the ``Proof terms'' section of the Isabelle
-  reference manual for information on how to do this).
-
-  \item @{command "full_prf"} is like @{command "prf"}, but displays
-  the full proof term, i.e.\ also displays information omitted in the
-  compact proof term, which is denoted by ``@{text _}'' placeholders
-  there.
-
-  \item @{command "print_state"} prints the current proof state (if
-  present), including current facts and goals.
-
-  \end{description}
-
-  All of the diagnostic commands above admit a list of @{text modes}
-  to be specified, which is appended to the current print mode; see
-  also \secref{sec:print-modes}.  Thus the output behavior may be
-  modified according particular print mode features.  For example,
-  @{command "print_state"}~@{text "(latex xsymbols)"} prints the
-  current proof state with mathematical symbols and special characters
-  represented in {\LaTeX} source, according to the Isabelle style
-  \cite{isabelle-sys}.
-
-  Note that antiquotations (cf.\ \secref{sec:antiq}) provide a more
-  systematic way to include formal items into the printed text
-  document.
-*}
-
-
-subsection {* Details of printed content *}
-
-text {*
-  \begin{tabular}{rcll}
-    @{attribute_def show_markup} & : & @{text attribute} \\
-    @{attribute_def show_types} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def show_sorts} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def show_consts} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def show_abbrevs} & : & @{text attribute} & default @{text true} \\
-    @{attribute_def show_brackets} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def names_long} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def names_short} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def names_unique} & : & @{text attribute} & default @{text true} \\
-    @{attribute_def eta_contract} & : & @{text attribute} & default @{text true} \\
-    @{attribute_def goals_limit} & : & @{text attribute} & default @{text 10} \\
-    @{attribute_def show_main_goal} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def show_hyps} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def show_tags} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def show_question_marks} & : & @{text attribute} & default @{text true} \\
-  \end{tabular}
-  \medskip
-
-  These configuration options control the detail of information that
-  is displayed for types, terms, theorems, goals etc.  See also
-  \secref{sec:config}.
-
-  \begin{description}
-
-  \item @{attribute show_markup} controls direct inlining of markup
-  into the printed representation of formal entities --- notably type
-  and sort constraints.  This enables Prover IDE users to retrieve
-  that information via tooltips or popups while hovering with the
-  mouse over the output window, for example.  Consequently, this
-  option is enabled by default for Isabelle/jEdit, but disabled for
-  TTY and Proof~General~/Emacs where document markup would not work.
-
-  \item @{attribute show_types} and @{attribute show_sorts} control
-  printing of type constraints for term variables, and sort
-  constraints for type variables.  By default, neither of these are
-  shown in output.  If @{attribute show_sorts} is enabled, types are
-  always shown as well.  In Isabelle/jEdit, manual setting of these
-  options is normally not required thanks to @{attribute show_markup}
-  above.
-
-  Note that displaying types and sorts may explain why a polymorphic
-  inference rule fails to resolve with some goal, or why a rewrite
-  rule does not apply as expected.
-
-  \item @{attribute show_consts} controls printing of types of
-  constants when displaying a goal state.
-
-  Note that the output can be enormous, because polymorphic constants
-  often occur at several different type instances.
-
-  \item @{attribute show_abbrevs} controls folding of constant
-  abbreviations.
-
-  \item @{attribute show_brackets} controls bracketing in pretty
-  printed output.  If enabled, all sub-expressions of the pretty
-  printing tree will be parenthesized, even if this produces malformed
-  term syntax!  This crude way of showing the internal structure of
-  pretty printed entities may occasionally help to diagnose problems
-  with operator priorities, for example.
-
-  \item @{attribute names_long}, @{attribute names_short}, and
-  @{attribute names_unique} control the way of printing fully
-  qualified internal names in external form.  See also
-  \secref{sec:antiq} for the document antiquotation options of the
-  same names.
-
-  \item @{attribute eta_contract} controls @{text "\<eta>"}-contracted
-  printing of terms.
-
-  The @{text \<eta>}-contraction law asserts @{prop "(\<lambda>x. f x) \<equiv> f"},
-  provided @{text x} is not free in @{text f}.  It asserts
-  \emph{extensionality} of functions: @{prop "f \<equiv> g"} if @{prop "f x \<equiv>
-  g x"} for all @{text x}.  Higher-order unification frequently puts
-  terms into a fully @{text \<eta>}-expanded form.  For example, if @{text
-  F} has type @{text "(\<tau> \<Rightarrow> \<tau>) \<Rightarrow> \<tau>"} then its expanded form is @{term
-  "\<lambda>h. F (\<lambda>x. h x)"}.
-
-  Enabling @{attribute eta_contract} makes Isabelle perform @{text
-  \<eta>}-contractions before printing, so that @{term "\<lambda>h. F (\<lambda>x. h x)"}
-  appears simply as @{text F}.
-
-  Note that the distinction between a term and its @{text \<eta>}-expanded
-  form occasionally matters.  While higher-order resolution and
-  rewriting operate modulo @{text "\<alpha>\<beta>\<eta>"}-conversion, some other tools
-  might look at terms more discretely.
-
-  \item @{attribute goals_limit} controls the maximum number of
-  subgoals to be printed.
-
-  \item @{attribute show_main_goal} controls whether the main result
-  to be proven should be displayed.  This information might be
-  relevant for schematic goals, to inspect the current claim that has
-  been synthesized so far.
-
-  \item @{attribute show_hyps} controls printing of implicit
-  hypotheses of local facts.  Normally, only those hypotheses are
-  displayed that are \emph{not} covered by the assumptions of the
-  current context: this situation indicates a fault in some tool being
-  used.
-
-  By enabling @{attribute show_hyps}, output of \emph{all} hypotheses
-  can be enforced, which is occasionally useful for diagnostic
-  purposes.
-
-  \item @{attribute show_tags} controls printing of extra annotations
-  within theorems, such as internal position information, or the case
-  names being attached by the attribute @{attribute case_names}.
-
-  Note that the @{attribute tagged} and @{attribute untagged}
-  attributes provide low-level access to the collection of tags
-  associated with a theorem.
-
-  \item @{attribute show_question_marks} controls printing of question
-  marks for schematic variables, such as @{text ?x}.  Only the leading
-  question mark is affected, the remaining text is unchanged
-  (including proper markup for schematic variables that might be
-  relevant for user interfaces).
-
-  \end{description}
-*}
-
-
-subsection {* Alternative print modes \label{sec:print-modes} *}
-
-text {*
-  \begin{mldecls}
-    @{index_ML print_mode_value: "unit -> string list"} \\
-    @{index_ML Print_Mode.with_modes: "string list -> ('a -> 'b) -> 'a -> 'b"} \\
-  \end{mldecls}
-
-  The \emph{print mode} facility allows to modify various operations
-  for printing.  Commands like @{command typ}, @{command term},
-  @{command thm} (see \secref{sec:print-diag}) take additional print
-  modes as optional argument.  The underlying ML operations are as
-  follows.
-
-  \begin{description}
-
-  \item @{ML "print_mode_value ()"} yields the list of currently
-  active print mode names.  This should be understood as symbolic
-  representation of certain individual features for printing (with
-  precedence from left to right).
-
-  \item @{ML Print_Mode.with_modes}~@{text "modes f x"} evaluates
-  @{text "f x"} in an execution context where the print mode is
-  prepended by the given @{text "modes"}.  This provides a thread-safe
-  way to augment print modes.  It is also monotonic in the set of mode
-  names: it retains the default print mode that certain
-  user-interfaces might have installed for their proper functioning!
-
-  \end{description}
-
-  \begin{warn}
-  The old global reference @{ML print_mode} should never be used
-  directly in applications.  Its main reason for being publicly
-  accessible is to support historic versions of Proof~General.
-  \end{warn}
-
-  \medskip The pretty printer for inner syntax maintains alternative
-  mixfix productions for any print mode name invented by the user, say
-  in commands like @{command notation} or @{command abbreviation}.
-  Mode names can be arbitrary, but the following ones have a specific
-  meaning by convention:
-
-  \begin{itemize}
-
-  \item @{verbatim "\"\""} (the empty string): default mode;
-  implicitly active as last element in the list of modes.
-
-  \item @{verbatim input}: dummy print mode that is never active; may
-  be used to specify notation that is only available for input.
-
-  \item @{verbatim internal} dummy print mode that is never active;
-  used internally in Isabelle/Pure.
-
-  \item @{verbatim xsymbols}: enable proper mathematical symbols
-  instead of ASCII art.\footnote{This traditional mode name stems from
-  the ``X-Symbol'' package for old versions Proof~General with XEmacs,
-  although that package has been superseded by Unicode in recent
-  years.}
-
-  \item @{verbatim HTML}: additional mode that is active in HTML
-  presentation of Isabelle theory sources; allows to provide
-  alternative output notation.
-
-  \item @{verbatim latex}: additional mode that is active in {\LaTeX}
-  document preparation of Isabelle theory sources; allows to provide
-  alternative output notation.
-
-  \end{itemize}
-*}
-
-
-subsection {* Printing limits *}
-
-text {*
-  \begin{mldecls}
-    @{index_ML Pretty.margin_default: "int Unsynchronized.ref"} \\
-  \end{mldecls}
-
-  \begin{tabular}{rcll}
-    @{attribute_def ML_print_depth} & : & @{text attribute} & default 10 \\ %FIXME move?
-  \end{tabular}
-
-  \begin{description}
-
-  \item @{ML Pretty.margin_default} indicates the global default for
-  the right margin of the built-in pretty printer, with initial value
-  76.  Note that user-interfaces typically control margins
-  automatically when resizing windows, or even bypass the formatting
-  engine of Isabelle/ML altogether and do it within the front end via
-  Isabelle/Scala.
-
-  \item @{attribute ML_print_depth} limits the printing depth of the
-  ML toplevel pretty printer; the precise effect depends on the ML
-  compiler and run-time system.  Typically the limit should be less
-  than 10.  Bigger values such as 100--1000 are useful for debugging.
-
-  \end{description}
-*}
-
-
-section {* Mixfix annotations \label{sec:mixfix} *}
-
-text {* Mixfix annotations specify concrete \emph{inner syntax} of
-  Isabelle types and terms.  Locally fixed parameters in toplevel
-  theorem statements, locale and class specifications also admit
-  mixfix annotations in a fairly uniform manner.  A mixfix annotation
-  describes the concrete syntax, the translation to abstract
-  syntax, and the pretty printing.  Special case annotations provide a
-  simple means of specifying infix operators and binders.
-
-  Isabelle mixfix syntax is inspired by {\OBJ} \cite{OBJ}.  It allows
-  to specify any context-free priority grammar, which is more general
-  than the fixity declarations of ML and Prolog.
-
-  @{rail \<open>
-    @{syntax_def mixfix}: '('
-      @{syntax template} prios? @{syntax nat}? |
-      (@'infix' | @'infixl' | @'infixr') @{syntax template} @{syntax nat} |
-      @'binder' @{syntax template} prios? @{syntax nat} |
-      @'structure'
-    ')'
-    ;
-    template: string
-    ;
-    prios: '[' (@{syntax nat} + ',') ']'
-  \<close>}
-
-  The string given as @{text template} may include literal text,
-  spacing, blocks, and arguments (denoted by ``@{text _}''); the
-  special symbol ``@{verbatim "\<index>"}'' (printed as ``@{text "\<index>"}'')
-  represents an index argument that specifies an implicit @{keyword
-  "structure"} reference (see also \secref{sec:locale}).  Only locally
-  fixed variables may be declared as @{keyword "structure"}.
-
-  Infix and binder declarations provide common abbreviations for
-  particular mixfix declarations.  So in practice, mixfix templates
-  mostly degenerate to literal text for concrete syntax, such as
-  ``@{verbatim "++"}'' for an infix symbol.  *}
-
-
-subsection {* The general mixfix form *}
-
-text {* In full generality, mixfix declarations work as follows.
-  Suppose a constant @{text "c :: \<tau>\<^sub>1 \<Rightarrow> \<dots> \<tau>\<^sub>n \<Rightarrow> \<tau>"} is annotated by
-  @{text "(mixfix [p\<^sub>1, \<dots>, p\<^sub>n] p)"}, where @{text "mixfix"} is a string
-  @{text "d\<^sub>0 _ d\<^sub>1 _ \<dots> _ d\<^sub>n"} consisting of delimiters that surround
-  argument positions as indicated by underscores.
-
-  Altogether this determines a production for a context-free priority
-  grammar, where for each argument @{text "i"} the syntactic category
-  is determined by @{text "\<tau>\<^sub>i"} (with priority @{text "p\<^sub>i"}), and the
-  result category is determined from @{text "\<tau>"} (with priority @{text
-  "p"}).  Priority specifications are optional, with default 0 for
-  arguments and 1000 for the result.\footnote{Omitting priorities is
-  prone to syntactic ambiguities unless the delimiter tokens determine
-  fully bracketed notation, as in @{text "if _ then _ else _ fi"}.}
-
-  Since @{text "\<tau>"} may be again a function type, the constant
-  type scheme may have more argument positions than the mixfix
-  pattern.  Printing a nested application @{text "c t\<^sub>1 \<dots> t\<^sub>m"} for
-  @{text "m > n"} works by attaching concrete notation only to the
-  innermost part, essentially by printing @{text "(c t\<^sub>1 \<dots> t\<^sub>n) \<dots> t\<^sub>m"}
-  instead.  If a term has fewer arguments than specified in the mixfix
-  template, the concrete syntax is ignored.
-
-  \medskip A mixfix template may also contain additional directives
-  for pretty printing, notably spaces, blocks, and breaks.  The
-  general template format is a sequence over any of the following
-  entities.
-
-  \begin{description}
-
-  \item @{text "d"} is a delimiter, namely a non-empty sequence of
-  characters other than the following special characters:
-
-  \smallskip
-  \begin{tabular}{ll}
-    @{verbatim "'"} & single quote \\
-    @{verbatim "_"} & underscore \\
-    @{text "\<index>"} & index symbol \\
-    @{verbatim "("} & open parenthesis \\
-    @{verbatim ")"} & close parenthesis \\
-    @{verbatim "/"} & slash \\
-  \end{tabular}
-  \medskip
-
-  \item @{verbatim "'"} escapes the special meaning of these
-  meta-characters, producing a literal version of the following
-  character, unless that is a blank.
-
-  A single quote followed by a blank separates delimiters, without
-  affecting printing, but input tokens may have additional white space
-  here.
-
-  \item @{verbatim "_"} is an argument position, which stands for a
-  certain syntactic category in the underlying grammar.
-
-  \item @{text "\<index>"} is an indexed argument position; this is the place
-  where implicit structure arguments can be attached.
-
-  \item @{text "s"} is a non-empty sequence of spaces for printing.
-  This and the following specifications do not affect parsing at all.
-
-  \item @{verbatim "("}@{text n} opens a pretty printing block.  The
-  optional number specifies how much indentation to add when a line
-  break occurs within the block.  If the parenthesis is not followed
-  by digits, the indentation defaults to 0.  A block specified via
-  @{verbatim "(00"} is unbreakable.
-
-  \item @{verbatim ")"} closes a pretty printing block.
-
-  \item @{verbatim "//"} forces a line break.
-
-  \item @{verbatim "/"}@{text s} allows a line break.  Here @{text s}
-  stands for the string of spaces (zero or more) right after the
-  slash.  These spaces are printed if the break is \emph{not} taken.
-
-  \end{description}
-
-  The general idea of pretty printing with blocks and breaks is also
-  described in \cite{paulson-ml2}; it goes back to \cite{Oppen:1980}.
-*}
-
-
-subsection {* Infixes *}
-
-text {* Infix operators are specified by convenient short forms that
-  abbreviate general mixfix annotations as follows:
-
-  \begin{center}
-  \begin{tabular}{lll}
-
-  @{verbatim "("}@{keyword_def "infix"}~@{verbatim "\""}@{text sy}@{verbatim "\""} @{text "p"}@{verbatim ")"}
-  & @{text "\<mapsto>"} &
-  @{verbatim "(\"(_ "}@{text sy}@{verbatim "/ _)\" ["}@{text "p + 1"}@{verbatim ", "}@{text "p + 1"}@{verbatim "]"}@{text " p"}@{verbatim ")"} \\
-  @{verbatim "("}@{keyword_def "infixl"}~@{verbatim "\""}@{text sy}@{verbatim "\""} @{text "p"}@{verbatim ")"}
-  & @{text "\<mapsto>"} &
-  @{verbatim "(\"(_ "}@{text sy}@{verbatim "/ _)\" ["}@{text "p"}@{verbatim ", "}@{text "p + 1"}@{verbatim "]"}@{text " p"}@{verbatim ")"} \\
-  @{verbatim "("}@{keyword_def "infixr"}~@{verbatim "\""}@{text sy}@{verbatim "\""} @{text "p"}@{verbatim ")"}
-  & @{text "\<mapsto>"} &
-  @{verbatim "(\"(_ "}@{text sy}@{verbatim "/ _)\" ["}@{text "p + 1"}@{verbatim ", "}@{text "p"}@{verbatim "]"}@{text " p"}@{verbatim ")"} \\
-
-  \end{tabular}
-  \end{center}
-
-  The mixfix template @{verbatim "\"(_ "}@{text sy}@{verbatim "/ _)\""}
-  specifies two argument positions; the delimiter is preceded by a
-  space and followed by a space or line break; the entire phrase is a
-  pretty printing block.
-
-  The alternative notation @{verbatim "op"}~@{text sy} is introduced
-  in addition.  Thus any infix operator may be written in prefix form
-  (as in ML), independently of the number of arguments in the term.
-*}
-
-
-subsection {* Binders *}
-
-text {* A \emph{binder} is a variable-binding construct such as a
-  quantifier.  The idea to formalize @{text "\<forall>x. b"} as @{text "All
-  (\<lambda>x. b)"} for @{text "All :: ('a \<Rightarrow> bool) \<Rightarrow> bool"} already goes back
-  to \cite{church40}.  Isabelle declarations of certain higher-order
-  operators may be annotated with @{keyword_def "binder"} annotations
-  as follows:
-
-  \begin{center}
-  @{text "c :: "}@{verbatim "\""}@{text "(\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2) \<Rightarrow> \<tau>\<^sub>3"}@{verbatim "\"  ("}@{keyword "binder"}@{verbatim " \""}@{text "sy"}@{verbatim "\" ["}@{text "p"}@{verbatim "] "}@{text "q"}@{verbatim ")"}
-  \end{center}
-
-  This introduces concrete binder syntax @{text "sy x. b"}, where
-  @{text x} is a bound variable of type @{text "\<tau>\<^sub>1"}, the body @{text
-  b} has type @{text "\<tau>\<^sub>2"} and the whole term has type @{text "\<tau>\<^sub>3"}.
-  The optional integer @{text p} specifies the syntactic priority of
-  the body; the default is @{text "q"}, which is also the priority of
-  the whole construct.
-
-  Internally, the binder syntax is expanded to something like this:
-  \begin{center}
-  @{text "c_binder :: "}@{verbatim "\""}@{text "idts \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> \<tau>\<^sub>3"}@{verbatim "\"  (\"(3"}@{text sy}@{verbatim "_./ _)\" [0, "}@{text "p"}@{verbatim "] "}@{text "q"}@{verbatim ")"}
-  \end{center}
-
-  Here @{syntax (inner) idts} is the nonterminal symbol for a list of
-  identifiers with optional type constraints (see also
-  \secref{sec:pure-grammar}).  The mixfix template @{verbatim
-  "\"(3"}@{text sy}@{verbatim "_./ _)\""} defines argument positions
-  for the bound identifiers and the body, separated by a dot with
-  optional line break; the entire phrase is a pretty printing block of
-  indentation level 3.  Note that there is no extra space after @{text
-  "sy"}, so it needs to be included user specification if the binder
-  syntax ends with a token that may be continued by an identifier
-  token at the start of @{syntax (inner) idts}.
-
-  Furthermore, a syntax translation to transforms @{text "c_binder x\<^sub>1
-  \<dots> x\<^sub>n b"} into iterated application @{text "c (\<lambda>x\<^sub>1. \<dots> c (\<lambda>x\<^sub>n. b)\<dots>)"}.
-  This works in both directions, for parsing and printing.  *}
-
-
-section {* Explicit notation \label{sec:notation} *}
-
-text {*
-  \begin{matharray}{rcll}
-    @{command_def "type_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "no_type_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "no_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "write"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-  \end{matharray}
-
-  Commands that introduce new logical entities (terms or types)
-  usually allow to provide mixfix annotations on the spot, which is
-  convenient for default notation.  Nonetheless, the syntax may be
-  modified later on by declarations for explicit notation.  This
-  allows to add or delete mixfix annotations for of existing logical
-  entities within the current context.
-
-  @{rail \<open>
-    (@@{command type_notation} | @@{command no_type_notation}) @{syntax target}?
-      @{syntax mode}? \<newline> (@{syntax nameref} @{syntax mixfix} + @'and')
-    ;
-    (@@{command notation} | @@{command no_notation}) @{syntax target}? @{syntax mode}? \<newline>
-      (@{syntax nameref} @{syntax mixfix} + @'and')
-    ;
-    @@{command write} @{syntax mode}? (@{syntax nameref} @{syntax mixfix} + @'and')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "type_notation"}~@{text "c (mx)"} associates mixfix
-  syntax with an existing type constructor.  The arity of the
-  constructor is retrieved from the context.
-
-  \item @{command "no_type_notation"} is similar to @{command
-  "type_notation"}, but removes the specified syntax annotation from
-  the present context.
-
-  \item @{command "notation"}~@{text "c (mx)"} associates mixfix
-  syntax with an existing constant or fixed variable.  The type
-  declaration of the given entity is retrieved from the context.
-
-  \item @{command "no_notation"} is similar to @{command "notation"},
-  but removes the specified syntax annotation from the present
-  context.
-
-  \item @{command "write"} is similar to @{command "notation"}, but
-  works within an Isar proof body.
-
-  \end{description}
-*}
-
-
-section {* The Pure syntax \label{sec:pure-syntax} *}
-
-subsection {* Lexical matters \label{sec:inner-lex} *}
-
-text {* The inner lexical syntax vaguely resembles the outer one
-  (\secref{sec:outer-lex}), but some details are different.  There are
-  two main categories of inner syntax tokens:
-
-  \begin{enumerate}
-
-  \item \emph{delimiters} --- the literal tokens occurring in
-  productions of the given priority grammar (cf.\
-  \secref{sec:priority-grammar});
-
-  \item \emph{named tokens} --- various categories of identifiers etc.
-
-  \end{enumerate}
-
-  Delimiters override named tokens and may thus render certain
-  identifiers inaccessible.  Sometimes the logical context admits
-  alternative ways to refer to the same entity, potentially via
-  qualified names.
-
-  \medskip The categories for named tokens are defined once and for
-  all as follows, reusing some categories of the outer token syntax
-  (\secref{sec:outer-lex}).
-
-  \begin{center}
-  \begin{supertabular}{rcl}
-    @{syntax_def (inner) id} & = & @{syntax_ref ident} \\
-    @{syntax_def (inner) longid} & = & @{syntax_ref longident} \\
-    @{syntax_def (inner) var} & = & @{syntax_ref var} \\
-    @{syntax_def (inner) tid} & = & @{syntax_ref typefree} \\
-    @{syntax_def (inner) tvar} & = & @{syntax_ref typevar} \\
-    @{syntax_def (inner) num_token} & = & @{syntax_ref nat}@{text "  |  "}@{verbatim "-"}@{syntax_ref nat} \\
-    @{syntax_def (inner) float_token} & = & @{syntax_ref nat}@{verbatim "."}@{syntax_ref nat}@{text "  |  "}@{verbatim "-"}@{syntax_ref nat}@{verbatim "."}@{syntax_ref nat} \\
-    @{syntax_def (inner) xnum_token} & = & @{verbatim "#"}@{syntax_ref nat}@{text "  |  "}@{verbatim "#-"}@{syntax_ref nat} \\
-    @{syntax_def (inner) str_token} & = & @{verbatim "''"} @{text "\<dots>"} @{verbatim "''"} \\
-    @{syntax_def (inner) string_token} & = & @{verbatim "\""} @{text "\<dots>"} @{verbatim "\""} \\
-    @{syntax_def (inner) cartouche} & = & @{verbatim "\<open>"} @{text "\<dots>"} @{verbatim "\<close>"} \\
-  \end{supertabular}
-  \end{center}
-
-  The token categories @{syntax (inner) num_token}, @{syntax (inner)
-  float_token}, @{syntax (inner) xnum_token}, @{syntax (inner)
-  str_token}, @{syntax (inner) string_token}, and @{syntax (inner)
-  cartouche} are not used in Pure. Object-logics may implement
-  numerals and string literals by adding appropriate syntax
-  declarations, together with some translation functions (e.g.\ see
-  @{file "~~/src/HOL/Tools/string_syntax.ML"}).
-
-  The derived categories @{syntax_def (inner) num_const}, @{syntax_def
-  (inner) float_const}, and @{syntax_def (inner) num_const} provide
-  robust access to the respective tokens: the syntax tree holds a
-  syntactic constant instead of a free variable.
-*}
-
-
-subsection {* Priority grammars \label{sec:priority-grammar} *}
-
-text {* A context-free grammar consists of a set of \emph{terminal
-  symbols}, a set of \emph{nonterminal symbols} and a set of
-  \emph{productions}.  Productions have the form @{text "A = \<gamma>"},
-  where @{text A} is a nonterminal and @{text \<gamma>} is a string of
-  terminals and nonterminals.  One designated nonterminal is called
-  the \emph{root symbol}.  The language defined by the grammar
-  consists of all strings of terminals that can be derived from the
-  root symbol by applying productions as rewrite rules.
-
-  The standard Isabelle parser for inner syntax uses a \emph{priority
-  grammar}.  Each nonterminal is decorated by an integer priority:
-  @{text "A\<^sup>(\<^sup>p\<^sup>)"}.  In a derivation, @{text "A\<^sup>(\<^sup>p\<^sup>)"} may be rewritten
-  using a production @{text "A\<^sup>(\<^sup>q\<^sup>) = \<gamma>"} only if @{text "p \<le> q"}.  Any
-  priority grammar can be translated into a normal context-free
-  grammar by introducing new nonterminals and productions.
-
-  \medskip Formally, a set of context free productions @{text G}
-  induces a derivation relation @{text "\<longrightarrow>\<^sub>G"} as follows.  Let @{text
-  \<alpha>} and @{text \<beta>} denote strings of terminal or nonterminal symbols.
-  Then @{text "\<alpha> A\<^sup>(\<^sup>p\<^sup>) \<beta> \<longrightarrow>\<^sub>G \<alpha> \<gamma> \<beta>"} holds if and only if @{text G}
-  contains some production @{text "A\<^sup>(\<^sup>q\<^sup>) = \<gamma>"} for @{text "p \<le> q"}.
-
-  \medskip The following grammar for arithmetic expressions
-  demonstrates how binding power and associativity of operators can be
-  enforced by priorities.
-
-  \begin{center}
-  \begin{tabular}{rclr}
-  @{text "A\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "="} & @{verbatim "("} @{text "A\<^sup>(\<^sup>0\<^sup>)"} @{verbatim ")"} \\
-  @{text "A\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "="} & @{verbatim 0} \\
-  @{text "A\<^sup>(\<^sup>0\<^sup>)"} & @{text "="} & @{text "A\<^sup>(\<^sup>0\<^sup>)"} @{verbatim "+"} @{text "A\<^sup>(\<^sup>1\<^sup>)"} \\
-  @{text "A\<^sup>(\<^sup>2\<^sup>)"} & @{text "="} & @{text "A\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "*"} @{text "A\<^sup>(\<^sup>2\<^sup>)"} \\
-  @{text "A\<^sup>(\<^sup>3\<^sup>)"} & @{text "="} & @{verbatim "-"} @{text "A\<^sup>(\<^sup>3\<^sup>)"} \\
-  \end{tabular}
-  \end{center}
-  The choice of priorities determines that @{verbatim "-"} binds
-  tighter than @{verbatim "*"}, which binds tighter than @{verbatim
-  "+"}.  Furthermore @{verbatim "+"} associates to the left and
-  @{verbatim "*"} to the right.
-
-  \medskip For clarity, grammars obey these conventions:
-  \begin{itemize}
-
-  \item All priorities must lie between 0 and 1000.
-
-  \item Priority 0 on the right-hand side and priority 1000 on the
-  left-hand side may be omitted.
-
-  \item The production @{text "A\<^sup>(\<^sup>p\<^sup>) = \<alpha>"} is written as @{text "A = \<alpha>
-  (p)"}, i.e.\ the priority of the left-hand side actually appears in
-  a column on the far right.
-
-  \item Alternatives are separated by @{text "|"}.
-
-  \item Repetition is indicated by dots @{text "(\<dots>)"} in an informal
-  but obvious way.
-
-  \end{itemize}
-
-  Using these conventions, the example grammar specification above
-  takes the form:
-  \begin{center}
-  \begin{tabular}{rclc}
-    @{text A} & @{text "="} & @{verbatim "("} @{text A} @{verbatim ")"} \\
-              & @{text "|"} & @{verbatim 0} & \qquad\qquad \\
-              & @{text "|"} & @{text A} @{verbatim "+"} @{text "A\<^sup>(\<^sup>1\<^sup>)"} & @{text "(0)"} \\
-              & @{text "|"} & @{text "A\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "*"} @{text "A\<^sup>(\<^sup>2\<^sup>)"} & @{text "(2)"} \\
-              & @{text "|"} & @{verbatim "-"} @{text "A\<^sup>(\<^sup>3\<^sup>)"} & @{text "(3)"} \\
-  \end{tabular}
-  \end{center}
-*}
-
-
-subsection {* The Pure grammar \label{sec:pure-grammar} *}
-
-text {* The priority grammar of the @{text "Pure"} theory is defined
-  approximately like this:
-
-  \begin{center}
-  \begin{supertabular}{rclr}
-
-  @{syntax_def (inner) any} & = & @{text "prop  |  logic"} \\\\
-
-  @{syntax_def (inner) prop} & = & @{verbatim "("} @{text prop} @{verbatim ")"} \\
-    & @{text "|"} & @{text "prop\<^sup>(\<^sup>4\<^sup>)"} @{verbatim "::"} @{text type} & @{text "(3)"} \\
-    & @{text "|"} & @{text "any\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "=="} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(2)"} \\
-    & @{text "|"} & @{text "any\<^sup>(\<^sup>3\<^sup>)"} @{text "\<equiv>"} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(2)"} \\
-    & @{text "|"} & @{text "prop\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "&&&"} @{text "prop\<^sup>(\<^sup>2\<^sup>)"} & @{text "(2)"} \\
-    & @{text "|"} & @{text "prop\<^sup>(\<^sup>2\<^sup>)"} @{verbatim "==>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
-    & @{text "|"} & @{text "prop\<^sup>(\<^sup>2\<^sup>)"} @{text "\<Longrightarrow>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
-    & @{text "|"} & @{verbatim "[|"} @{text prop} @{verbatim ";"} @{text "\<dots>"} @{verbatim ";"} @{text prop} @{verbatim "|]"} @{verbatim "==>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
-    & @{text "|"} & @{text "\<lbrakk>"} @{text prop} @{verbatim ";"} @{text "\<dots>"} @{verbatim ";"} @{text prop} @{text "\<rbrakk>"} @{text "\<Longrightarrow>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
-    & @{text "|"} & @{verbatim "!!"} @{text idts} @{verbatim "."} @{text prop} & @{text "(0)"} \\
-    & @{text "|"} & @{text "\<And>"} @{text idts} @{verbatim "."} @{text prop} & @{text "(0)"} \\
-    & @{text "|"} & @{verbatim OFCLASS} @{verbatim "("} @{text type} @{verbatim ","} @{text logic} @{verbatim ")"} \\
-    & @{text "|"} & @{verbatim SORT_CONSTRAINT} @{verbatim "("} @{text type} @{verbatim ")"} \\
-    & @{text "|"} & @{verbatim TERM} @{text logic} \\
-    & @{text "|"} & @{verbatim PROP} @{text aprop} \\\\
-
-  @{syntax_def (inner) aprop} & = & @{verbatim "("} @{text aprop} @{verbatim ")"} \\
-    & @{text "|"} & @{text "id  |  longid  |  var  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "..."} \\
-    & @{text "|"} & @{verbatim CONST} @{text "id  |  "}@{verbatim CONST} @{text "longid"} \\
-    & @{text "|"} & @{verbatim XCONST} @{text "id  |  "}@{verbatim XCONST} @{text "longid"} \\
-    & @{text "|"} & @{text "logic\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)  any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) \<dots> any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "(999)"} \\\\
-
-  @{syntax_def (inner) logic} & = & @{verbatim "("} @{text logic} @{verbatim ")"} \\
-    & @{text "|"} & @{text "logic\<^sup>(\<^sup>4\<^sup>)"} @{verbatim "::"} @{text type} & @{text "(3)"} \\
-    & @{text "|"} & @{text "id  |  longid  |  var  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "..."} \\
-    & @{text "|"} & @{verbatim CONST} @{text "id  |  "}@{verbatim CONST} @{text "longid"} \\
-    & @{text "|"} & @{verbatim XCONST} @{text "id  |  "}@{verbatim XCONST} @{text "longid"} \\
-    & @{text "|"} & @{text "logic\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)  any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) \<dots> any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "(999)"} \\
-    & @{text "|"} & @{text "\<struct> index\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} \\
-    & @{text "|"} & @{verbatim "%"} @{text pttrns} @{verbatim "."} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(3)"} \\
-    & @{text "|"} & @{text \<lambda>} @{text pttrns} @{verbatim "."} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(3)"} \\
-    & @{text "|"} & @{verbatim op} @{verbatim "=="}@{text "  |  "}@{verbatim op} @{text "\<equiv>"}@{text "  |  "}@{verbatim op} @{verbatim "&&&"} \\
-    & @{text "|"} & @{verbatim op} @{verbatim "==>"}@{text "  |  "}@{verbatim op} @{text "\<Longrightarrow>"} \\
-    & @{text "|"} & @{verbatim TYPE} @{verbatim "("} @{text type} @{verbatim ")"} \\\\
-
-  @{syntax_def (inner) idt} & = & @{verbatim "("} @{text idt} @{verbatim ")"}@{text "  |  id  |  "}@{verbatim "_"} \\
-    & @{text "|"} & @{text id} @{verbatim "::"} @{text type} & @{text "(0)"} \\
-    & @{text "|"} & @{verbatim "_"} @{verbatim "::"} @{text type} & @{text "(0)"} \\\\
-
-  @{syntax_def (inner) index} & = & @{verbatim "\<^bsub>"} @{text "logic\<^sup>(\<^sup>0\<^sup>)"} @{verbatim "\<^esub>"}@{text "  |  |  \<index>"} \\\\
-
-  @{syntax_def (inner) idts} & = & @{text "idt  |  idt\<^sup>(\<^sup>1\<^sup>) idts"} & @{text "(0)"} \\\\
-
-  @{syntax_def (inner) pttrn} & = & @{text idt} \\\\
-
-  @{syntax_def (inner) pttrns} & = & @{text "pttrn  |  pttrn\<^sup>(\<^sup>1\<^sup>) pttrns"} & @{text "(0)"} \\\\
-
-  @{syntax_def (inner) type} & = & @{verbatim "("} @{text type} @{verbatim ")"} \\
-    & @{text "|"} & @{text "tid  |  tvar  |  "}@{verbatim "_"} \\
-    & @{text "|"} & @{text "tid"} @{verbatim "::"} @{text "sort  |  tvar  "}@{verbatim "::"} @{text "sort  |  "}@{verbatim "_"} @{verbatim "::"} @{text "sort"} \\
-    & @{text "|"} & @{text "type_name  |  type\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) type_name"} \\
-    & @{text "|"} & @{verbatim "("} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim ")"} @{text type_name} \\
-    & @{text "|"} & @{text "type\<^sup>(\<^sup>1\<^sup>)"} @{verbatim "=>"} @{text type} & @{text "(0)"} \\
-    & @{text "|"} & @{text "type\<^sup>(\<^sup>1\<^sup>)"} @{text "\<Rightarrow>"} @{text type} & @{text "(0)"} \\
-    & @{text "|"} & @{verbatim "["} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim "]"} @{verbatim "=>"} @{text type} & @{text "(0)"} \\
-    & @{text "|"} & @{verbatim "["} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim "]"} @{text "\<Rightarrow>"} @{text type} & @{text "(0)"} \\
-  @{syntax_def (inner) type_name} & = & @{text "id  |  longid"} \\\\
-
-  @{syntax_def (inner) sort} & = & @{syntax class_name}~@{text "  |  "}@{verbatim "{}"} \\
-    & @{text "|"} & @{verbatim "{"} @{syntax class_name} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{syntax class_name} @{verbatim "}"} \\
-  @{syntax_def (inner) class_name} & = & @{text "id  |  longid"} \\
-  \end{supertabular}
-  \end{center}
-
-  \medskip Here literal terminals are printed @{verbatim "verbatim"};
-  see also \secref{sec:inner-lex} for further token categories of the
-  inner syntax.  The meaning of the nonterminals defined by the above
-  grammar is as follows:
-
-  \begin{description}
-
-  \item @{syntax_ref (inner) any} denotes any term.
-
-  \item @{syntax_ref (inner) prop} denotes meta-level propositions,
-  which are terms of type @{typ prop}.  The syntax of such formulae of
-  the meta-logic is carefully distinguished from usual conventions for
-  object-logics.  In particular, plain @{text "\<lambda>"}-term notation is
-  \emph{not} recognized as @{syntax (inner) prop}.
-
-  \item @{syntax_ref (inner) aprop} denotes atomic propositions, which
-  are embedded into regular @{syntax (inner) prop} by means of an
-  explicit @{verbatim PROP} token.
-
-  Terms of type @{typ prop} with non-constant head, e.g.\ a plain
-  variable, are printed in this form.  Constants that yield type @{typ
-  prop} are expected to provide their own concrete syntax; otherwise
-  the printed version will appear like @{syntax (inner) logic} and
-  cannot be parsed again as @{syntax (inner) prop}.
-
-  \item @{syntax_ref (inner) logic} denotes arbitrary terms of a
-  logical type, excluding type @{typ prop}.  This is the main
-  syntactic category of object-logic entities, covering plain @{text
-  \<lambda>}-term notation (variables, abstraction, application), plus
-  anything defined by the user.
-
-  When specifying notation for logical entities, all logical types
-  (excluding @{typ prop}) are \emph{collapsed} to this single category
-  of @{syntax (inner) logic}.
-
-  \item @{syntax_ref (inner) index} denotes an optional index term for
-  indexed syntax.  If omitted, it refers to the first @{keyword_ref
-  "structure"} variable in the context.  The special dummy ``@{text
-  "\<index>"}'' serves as pattern variable in mixfix annotations that
-  introduce indexed notation.
-
-  \item @{syntax_ref (inner) idt} denotes identifiers, possibly
-  constrained by types.
-
-  \item @{syntax_ref (inner) idts} denotes a sequence of @{syntax_ref
-  (inner) idt}.  This is the most basic category for variables in
-  iterated binders, such as @{text "\<lambda>"} or @{text "\<And>"}.
-
-  \item @{syntax_ref (inner) pttrn} and @{syntax_ref (inner) pttrns}
-  denote patterns for abstraction, cases bindings etc.  In Pure, these
-  categories start as a merely copy of @{syntax (inner) idt} and
-  @{syntax (inner) idts}, respectively.  Object-logics may add
-  additional productions for binding forms.
-
-  \item @{syntax_ref (inner) type} denotes types of the meta-logic.
-
-  \item @{syntax_ref (inner) sort} denotes meta-level sorts.
-
-  \end{description}
-
-  Here are some further explanations of certain syntax features.
-
-  \begin{itemize}
-
-  \item In @{syntax (inner) idts}, note that @{text "x :: nat y"} is
-  parsed as @{text "x :: (nat y)"}, treating @{text y} like a type
-  constructor applied to @{text nat}.  To avoid this interpretation,
-  write @{text "(x :: nat) y"} with explicit parentheses.
-
-  \item Similarly, @{text "x :: nat y :: nat"} is parsed as @{text "x ::
-  (nat y :: nat)"}.  The correct form is @{text "(x :: nat) (y ::
-  nat)"}, or @{text "(x :: nat) y :: nat"} if @{text y} is last in the
-  sequence of identifiers.
-
-  \item Type constraints for terms bind very weakly.  For example,
-  @{text "x < y :: nat"} is normally parsed as @{text "(x < y) ::
-  nat"}, unless @{text "<"} has a very low priority, in which case the
-  input is likely to be ambiguous.  The correct form is @{text "x < (y
-  :: nat)"}.
-
-  \item Constraints may be either written with two literal colons
-  ``@{verbatim "::"}'' or the double-colon symbol @{verbatim "\<Colon>"},
-  which actually looks exactly the same in some {\LaTeX} styles.
-
-  \item Dummy variables (written as underscore) may occur in different
-  roles.
-
-  \begin{description}
-
-  \item A type ``@{text "_"}'' or ``@{text "_ :: sort"}'' acts like an
-  anonymous inference parameter, which is filled-in according to the
-  most general type produced by the type-checking phase.
-
-  \item A bound ``@{text "_"}'' refers to a vacuous abstraction, where
-  the body does not refer to the binding introduced here.  As in the
-  term @{term "\<lambda>x _. x"}, which is @{text "\<alpha>"}-equivalent to @{text
-  "\<lambda>x y. x"}.
-
-  \item A free ``@{text "_"}'' refers to an implicit outer binding.
-  Higher definitional packages usually allow forms like @{text "f x _
-  = x"}.
-
-  \item A schematic ``@{text "_"}'' (within a term pattern, see
-  \secref{sec:term-decls}) refers to an anonymous variable that is
-  implicitly abstracted over its context of locally bound variables.
-  For example, this allows pattern matching of @{text "{x. f x = g
-  x}"} against @{text "{x. _ = _}"}, or even @{text "{_. _ = _}"} by
-  using both bound and schematic dummies.
-
-  \end{description}
-
-  \item The three literal dots ``@{verbatim "..."}'' may be also
-  written as ellipsis symbol @{verbatim "\<dots>"}.  In both cases this
-  refers to a special schematic variable, which is bound in the
-  context.  This special term abbreviation works nicely with
-  calculational reasoning (\secref{sec:calculation}).
-
-  \item @{verbatim CONST} ensures that the given identifier is treated
-  as constant term, and passed through the parse tree in fully
-  internalized form.  This is particularly relevant for translation
-  rules (\secref{sec:syn-trans}), notably on the RHS.
-
-  \item @{verbatim XCONST} is similar to @{verbatim CONST}, but
-  retains the constant name as given.  This is only relevant to
-  translation rules (\secref{sec:syn-trans}), notably on the LHS.
-
-  \end{itemize}
-*}
-
-
-subsection {* Inspecting the syntax *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "print_syntax"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-  \end{matharray}
-
-  \begin{description}
-
-  \item @{command "print_syntax"} prints the inner syntax of the
-  current context.  The output can be quite large; the most important
-  sections are explained below.
-
-  \begin{description}
-
-  \item @{text "lexicon"} lists the delimiters of the inner token
-  language; see \secref{sec:inner-lex}.
-
-  \item @{text "prods"} lists the productions of the underlying
-  priority grammar; see \secref{sec:priority-grammar}.
-
-  The nonterminal @{text "A\<^sup>(\<^sup>p\<^sup>)"} is rendered in plain text as @{text
-  "A[p]"}; delimiters are quoted.  Many productions have an extra
-  @{text "\<dots> => name"}.  These names later become the heads of parse
-  trees; they also guide the pretty printer.
-
-  Productions without such parse tree names are called \emph{copy
-  productions}.  Their right-hand side must have exactly one
-  nonterminal symbol (or named token).  The parser does not create a
-  new parse tree node for copy productions, but simply returns the
-  parse tree of the right-hand symbol.
-
-  If the right-hand side of a copy production consists of a single
-  nonterminal without any delimiters, then it is called a \emph{chain
-  production}.  Chain productions act as abbreviations: conceptually,
-  they are removed from the grammar by adding new productions.
-  Priority information attached to chain productions is ignored; only
-  the dummy value @{text "-1"} is displayed.
-
-  \item @{text "print modes"} lists the alternative print modes
-  provided by this grammar; see \secref{sec:print-modes}.
-
-  \item @{text "parse_rules"} and @{text "print_rules"} relate to
-  syntax translations (macros); see \secref{sec:syn-trans}.
-
-  \item @{text "parse_ast_translation"} and @{text
-  "print_ast_translation"} list sets of constants that invoke
-  translation functions for abstract syntax trees, which are only
-  required in very special situations; see \secref{sec:tr-funs}.
-
-  \item @{text "parse_translation"} and @{text "print_translation"}
-  list the sets of constants that invoke regular translation
-  functions; see \secref{sec:tr-funs}.
-
-  \end{description}
-
-  \end{description}
-*}
-
-
-subsection {* Ambiguity of parsed expressions *}
-
-text {*
-  \begin{tabular}{rcll}
-    @{attribute_def syntax_ambiguity_warning} & : & @{text attribute} & default @{text true} \\
-    @{attribute_def syntax_ambiguity_limit} & : & @{text attribute} & default @{text 10} \\
-  \end{tabular}
-
-  Depending on the grammar and the given input, parsing may be
-  ambiguous.  Isabelle lets the Earley parser enumerate all possible
-  parse trees, and then tries to make the best out of the situation.
-  Terms that cannot be type-checked are filtered out, which often
-  leads to a unique result in the end.  Unlike regular type
-  reconstruction, which is applied to the whole collection of input
-  terms simultaneously, the filtering stage only treats each given
-  term in isolation.  Filtering is also not attempted for individual
-  types or raw ASTs (as required for @{command translations}).
-
-  Certain warning or error messages are printed, depending on the
-  situation and the given configuration options.  Parsing ultimately
-  fails, if multiple results remain after the filtering phase.
-
-  \begin{description}
-
-  \item @{attribute syntax_ambiguity_warning} controls output of
-  explicit warning messages about syntax ambiguity.
-
-  \item @{attribute syntax_ambiguity_limit} determines the number of
-  resulting parse trees that are shown as part of the printed message
-  in case of an ambiguity.
-
-  \end{description}
-*}
-
-
-section {* Syntax transformations \label{sec:syntax-transformations} *}
-
-text {* The inner syntax engine of Isabelle provides separate
-  mechanisms to transform parse trees either via rewrite systems on
-  first-order ASTs (\secref{sec:syn-trans}), or ML functions on ASTs
-  or syntactic @{text "\<lambda>"}-terms (\secref{sec:tr-funs}).  This works
-  both for parsing and printing, as outlined in
-  \figref{fig:parse-print}.
-
-  \begin{figure}[htbp]
-  \begin{center}
-  \begin{tabular}{cl}
-  string          & \\
-  @{text "\<down>"}     & lexer + parser \\
-  parse tree      & \\
-  @{text "\<down>"}     & parse AST translation \\
-  AST             & \\
-  @{text "\<down>"}     & AST rewriting (macros) \\
-  AST             & \\
-  @{text "\<down>"}     & parse translation \\
-  --- pre-term ---    & \\
-  @{text "\<down>"}     & print translation \\
-  AST             & \\
-  @{text "\<down>"}     & AST rewriting (macros) \\
-  AST             & \\
-  @{text "\<down>"}     & print AST translation \\
-  string          &
-  \end{tabular}
-  \end{center}
-  \caption{Parsing and printing with translations}\label{fig:parse-print}
-  \end{figure}
-
-  These intermediate syntax tree formats eventually lead to a pre-term
-  with all names and binding scopes resolved, but most type
-  information still missing.  Explicit type constraints might be given by
-  the user, or implicit position information by the system --- both
-  need to be passed-through carefully by syntax transformations.
-
-  Pre-terms are further processed by the so-called \emph{check} and
-  \emph{unckeck} phases that are intertwined with type-inference (see
-  also \cite{isabelle-implementation}).  The latter allows to operate
-  on higher-order abstract syntax with proper binding and type
-  information already available.
-
-  As a rule of thumb, anything that manipulates bindings of variables
-  or constants needs to be implemented as syntax transformation (see
-  below).  Anything else is better done via check/uncheck: a prominent
-  example application is the @{command abbreviation} concept of
-  Isabelle/Pure. *}
-
-
-subsection {* Abstract syntax trees \label{sec:ast} *}
-
-text {* The ML datatype @{ML_type Ast.ast} explicitly represents the
-  intermediate AST format that is used for syntax rewriting
-  (\secref{sec:syn-trans}).  It is defined in ML as follows:
-  \begin{ttbox}
-  datatype ast =
-    Constant of string |
-    Variable of string |
-    Appl of ast list
-  \end{ttbox}
-
-  An AST is either an atom (constant or variable) or a list of (at
-  least two) subtrees.  Occasional diagnostic output of ASTs uses
-  notation that resembles S-expression of LISP.  Constant atoms are
-  shown as quoted strings, variable atoms as non-quoted strings and
-  applications as a parenthesized list of subtrees.  For example, the
-  AST
-  @{ML [display] "Ast.Appl
-  [Ast.Constant \"_abs\", Ast.Variable \"x\", Ast.Variable \"t\"]"}
-  is pretty-printed as @{verbatim "(\"_abs\" x t)"}.  Note that
-  @{verbatim "()"} and @{verbatim "(x)"} are excluded as ASTs, because
-  they have too few subtrees.
-
-  \medskip AST application is merely a pro-forma mechanism to indicate
-  certain syntactic structures.  Thus @{verbatim "(c a b)"} could mean
-  either term application or type application, depending on the
-  syntactic context.
-
-  Nested application like @{verbatim "((\"_abs\" x t) u)"} is also
-  possible, but ASTs are definitely first-order: the syntax constant
-  @{verbatim "\"_abs\""} does not bind the @{verbatim x} in any way.
-  Proper bindings are introduced in later stages of the term syntax,
-  where @{verbatim "(\"_abs\" x t)"} becomes an @{ML Abs} node and
-  occurrences of @{verbatim x} in @{verbatim t} are replaced by bound
-  variables (represented as de-Bruijn indices).
-*}
-
-
-subsubsection {* AST constants versus variables *}
-
-text {* Depending on the situation --- input syntax, output syntax,
-  translation patterns --- the distinction of atomic asts as @{ML
-  Ast.Constant} versus @{ML Ast.Variable} serves slightly different
-  purposes.
-
-  Input syntax of a term such as @{text "f a b = c"} does not yet
-  indicate the scopes of atomic entities @{text "f, a, b, c"}: they
-  could be global constants or local variables, even bound ones
-  depending on the context of the term.  @{ML Ast.Variable} leaves
-  this choice still open: later syntax layers (or translation
-  functions) may capture such a variable to determine its role
-  specifically, to make it a constant, bound variable, free variable
-  etc.  In contrast, syntax translations that introduce already known
-  constants would rather do it via @{ML Ast.Constant} to prevent
-  accidental re-interpretation later on.
-
-  Output syntax turns term constants into @{ML Ast.Constant} and
-  variables (free or schematic) into @{ML Ast.Variable}.  This
-  information is precise when printing fully formal @{text "\<lambda>"}-terms.
-
-  \medskip AST translation patterns (\secref{sec:syn-trans}) that
-  represent terms cannot distinguish constants and variables
-  syntactically.  Explicit indication of @{text "CONST c"} inside the
-  term language is required, unless @{text "c"} is known as special
-  \emph{syntax constant} (see also @{command syntax}).  It is also
-  possible to use @{command syntax} declarations (without mixfix
-  annotation) to enforce that certain unqualified names are always
-  treated as constant within the syntax machinery.
-
-  The situation is simpler for ASTs that represent types or sorts,
-  since the concrete syntax already distinguishes type variables from
-  type constants (constructors).  So @{text "('a, 'b) foo"}
-  corresponds to an AST application of some constant for @{text foo}
-  and variable arguments for @{text "'a"} and @{text "'b"}.  Note that
-  the postfix application is merely a feature of the concrete syntax,
-  while in the AST the constructor occurs in head position.  *}
-
-
-subsubsection {* Authentic syntax names *}
-
-text {* Naming constant entities within ASTs is another delicate
-  issue.  Unqualified names are resolved in the name space tables in
-  the last stage of parsing, after all translations have been applied.
-  Since syntax transformations do not know about this later name
-  resolution, there can be surprises in boundary cases.
-
-  \emph{Authentic syntax names} for @{ML Ast.Constant} avoid this
-  problem: the fully-qualified constant name with a special prefix for
-  its formal category (@{text "class"}, @{text "type"}, @{text
-  "const"}, @{text "fixed"}) represents the information faithfully
-  within the untyped AST format.  Accidental overlap with free or
-  bound variables is excluded as well.  Authentic syntax names work
-  implicitly in the following situations:
-
-  \begin{itemize}
-
-  \item Input of term constants (or fixed variables) that are
-  introduced by concrete syntax via @{command notation}: the
-  correspondence of a particular grammar production to some known term
-  entity is preserved.
-
-  \item Input of type constants (constructors) and type classes ---
-  thanks to explicit syntactic distinction independently on the
-  context.
-
-  \item Output of term constants, type constants, type classes ---
-  this information is already available from the internal term to be
-  printed.
-
-  \end{itemize}
-
-  In other words, syntax transformations that operate on input terms
-  written as prefix applications are difficult to make robust.
-  Luckily, this case rarely occurs in practice, because syntax forms
-  to be translated usually correspond to some concrete notation. *}
-
-
-subsection {* Raw syntax and translations \label{sec:syn-trans} *}
-
-text {*
-  \begin{tabular}{rcll}
-    @{command_def "nonterminal"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "syntax"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "no_syntax"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "translations"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "no_translations"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{attribute_def syntax_ast_trace} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def syntax_ast_stats} & : & @{text attribute} & default @{text false} \\
-  \end{tabular}
-
-  Unlike mixfix notation for existing formal entities
-  (\secref{sec:notation}), raw syntax declarations provide full access
-  to the priority grammar of the inner syntax, without any sanity
-  checks.  This includes additional syntactic categories (via
-  @{command nonterminal}) and free-form grammar productions (via
-  @{command syntax}).  Additional syntax translations (or macros, via
-  @{command translations}) are required to turn resulting parse trees
-  into proper representations of formal entities again.
-
-  @{rail \<open>
-    @@{command nonterminal} (@{syntax name} + @'and')
-    ;
-    (@@{command syntax} | @@{command no_syntax}) @{syntax mode}? (constdecl +)
-    ;
-    (@@{command translations} | @@{command no_translations})
-      (transpat ('==' | '=>' | '<=' | '\<rightleftharpoons>' | '\<rightharpoonup>' | '\<leftharpoondown>') transpat +)
-    ;
-
-    constdecl: @{syntax name} '::' @{syntax type} @{syntax mixfix}?
-    ;
-    mode: ('(' ( @{syntax name} | @'output' | @{syntax name} @'output' ) ')')
-    ;
-    transpat: ('(' @{syntax nameref} ')')? @{syntax string}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "nonterminal"}~@{text c} declares a type
-  constructor @{text c} (without arguments) to act as purely syntactic
-  type: a nonterminal symbol of the inner syntax.
-
-  \item @{command "syntax"}~@{text "(mode) c :: \<sigma> (mx)"} augments the
-  priority grammar and the pretty printer table for the given print
-  mode (default @{verbatim "\"\""}). An optional keyword @{keyword_ref
-  "output"} means that only the pretty printer table is affected.
-
-  Following \secref{sec:mixfix}, the mixfix annotation @{text "mx =
-  template ps q"} together with type @{text "\<sigma> = \<tau>\<^sub>1 \<Rightarrow> \<dots> \<tau>\<^sub>n \<Rightarrow> \<tau>"} and
-  specify a grammar production.  The @{text template} contains
-  delimiter tokens that surround @{text "n"} argument positions
-  (@{verbatim "_"}).  The latter correspond to nonterminal symbols
-  @{text "A\<^sub>i"} derived from the argument types @{text "\<tau>\<^sub>i"} as
-  follows:
-  \begin{itemize}
-
-  \item @{text "prop"} if @{text "\<tau>\<^sub>i = prop"}
-
-  \item @{text "logic"} if @{text "\<tau>\<^sub>i = (\<dots>)\<kappa>"} for logical type
-  constructor @{text "\<kappa> \<noteq> prop"}
-
-  \item @{text any} if @{text "\<tau>\<^sub>i = \<alpha>"} for type variables
-
-  \item @{text "\<kappa>"} if @{text "\<tau>\<^sub>i = \<kappa>"} for nonterminal @{text "\<kappa>"}
-  (syntactic type constructor)
-
-  \end{itemize}
-
-  Each @{text "A\<^sub>i"} is decorated by priority @{text "p\<^sub>i"} from the
-  given list @{text "ps"}; misssing priorities default to 0.
-
-  The resulting nonterminal of the production is determined similarly
-  from type @{text "\<tau>"}, with priority @{text "q"} and default 1000.
-
-  \medskip Parsing via this production produces parse trees @{text
-  "t\<^sub>1, \<dots>, t\<^sub>n"} for the argument slots.  The resulting parse tree is
-  composed as @{text "c t\<^sub>1 \<dots> t\<^sub>n"}, by using the syntax constant @{text
-  "c"} of the syntax declaration.
-
-  Such syntactic constants are invented on the spot, without formal
-  check wrt.\ existing declarations.  It is conventional to use plain
-  identifiers prefixed by a single underscore (e.g.\ @{text
-  "_foobar"}).  Names should be chosen with care, to avoid clashes
-  with other syntax declarations.
-
-  \medskip The special case of copy production is specified by @{text
-  "c = "}@{verbatim "\"\""} (empty string).  It means that the
-  resulting parse tree @{text "t"} is copied directly, without any
-  further decoration.
-
-  \item @{command "no_syntax"}~@{text "(mode) decls"} removes grammar
-  declarations (and translations) resulting from @{text decls}, which
-  are interpreted in the same manner as for @{command "syntax"} above.
-
-  \item @{command "translations"}~@{text rules} specifies syntactic
-  translation rules (i.e.\ macros) as first-order rewrite rules on
-  ASTs (\secref{sec:ast}).  The theory context maintains two
-  independent lists translation rules: parse rules (@{verbatim "=>"}
-  or @{text "\<rightharpoonup>"}) and print rules (@{verbatim "<="} or @{text "\<leftharpoondown>"}).
-  For convenience, both can be specified simultaneously as parse~/
-  print rules (@{verbatim "=="} or @{text "\<rightleftharpoons>"}).
-
-  Translation patterns may be prefixed by the syntactic category to be
-  used for parsing; the default is @{text logic} which means that
-  regular term syntax is used.  Both sides of the syntax translation
-  rule undergo parsing and parse AST translations
-  \secref{sec:tr-funs}, in order to perform some fundamental
-  normalization like @{text "\<lambda>x y. b \<leadsto> \<lambda>x. \<lambda>y. b"}, but other AST
-  translation rules are \emph{not} applied recursively here.
-
-  When processing AST patterns, the inner syntax lexer runs in a
-  different mode that allows identifiers to start with underscore.
-  This accommodates the usual naming convention for auxiliary syntax
-  constants --- those that do not have a logical counter part --- by
-  allowing to specify arbitrary AST applications within the term
-  syntax, independently of the corresponding concrete syntax.
-
-  Atomic ASTs are distinguished as @{ML Ast.Constant} versus @{ML
-  Ast.Variable} as follows: a qualified name or syntax constant
-  declared via @{command syntax}, or parse tree head of concrete
-  notation becomes @{ML Ast.Constant}, anything else @{ML
-  Ast.Variable}.  Note that @{text CONST} and @{text XCONST} within
-  the term language (\secref{sec:pure-grammar}) allow to enforce
-  treatment as constants.
-
-  AST rewrite rules @{text "(lhs, rhs)"} need to obey the following
-  side-conditions:
-
-  \begin{itemize}
-
-  \item Rules must be left linear: @{text "lhs"} must not contain
-  repeated variables.\footnote{The deeper reason for this is that AST
-  equality is not well-defined: different occurrences of the ``same''
-  AST could be decorated differently by accidental type-constraints or
-  source position information, for example.}
-
-  \item Every variable in @{text "rhs"} must also occur in @{text
-  "lhs"}.
-
-  \end{itemize}
-
-  \item @{command "no_translations"}~@{text rules} removes syntactic
-  translation rules, which are interpreted in the same manner as for
-  @{command "translations"} above.
-
-  \item @{attribute syntax_ast_trace} and @{attribute
-  syntax_ast_stats} control diagnostic output in the AST normalization
-  process, when translation rules are applied to concrete input or
-  output.
-
-  \end{description}
-
-  Raw syntax and translations provides a slightly more low-level
-  access to the grammar and the form of resulting parse trees.  It is
-  often possible to avoid this untyped macro mechanism, and use
-  type-safe @{command abbreviation} or @{command notation} instead.
-  Some important situations where @{command syntax} and @{command
-  translations} are really need are as follows:
-
-  \begin{itemize}
-
-  \item Iterated replacement via recursive @{command translations}.
-  For example, consider list enumeration @{term "[a, b, c, d]"} as
-  defined in theory @{theory List} in Isabelle/HOL.
-
-  \item Change of binding status of variables: anything beyond the
-  built-in @{keyword "binder"} mixfix annotation requires explicit
-  syntax translations.  For example, consider list filter
-  comprehension @{term "[x \<leftarrow> xs . P]"} as defined in theory @{theory
-  List} in Isabelle/HOL.
-
-  \end{itemize}
-*}
-
-subsubsection {* Applying translation rules *}
-
-text {* As a term is being parsed or printed, an AST is generated as
-  an intermediate form according to \figref{fig:parse-print}.  The AST
-  is normalized by applying translation rules in the manner of a
-  first-order term rewriting system.  We first examine how a single
-  rule is applied.
-
-  Let @{text "t"} be the abstract syntax tree to be normalized and
-  @{text "(lhs, rhs)"} some translation rule.  A subtree @{text "u"}
-  of @{text "t"} is called \emph{redex} if it is an instance of @{text
-  "lhs"}; in this case the pattern @{text "lhs"} is said to match the
-  object @{text "u"}.  A redex matched by @{text "lhs"} may be
-  replaced by the corresponding instance of @{text "rhs"}, thus
-  \emph{rewriting} the AST @{text "t"}.  Matching requires some notion
-  of \emph{place-holders} in rule patterns: @{ML Ast.Variable} serves
-  this purpose.
-
-  More precisely, the matching of the object @{text "u"} against the
-  pattern @{text "lhs"} is performed as follows:
-
-  \begin{itemize}
-
-  \item Objects of the form @{ML Ast.Variable}~@{text "x"} or @{ML
-  Ast.Constant}~@{text "x"} are matched by pattern @{ML
-  Ast.Constant}~@{text "x"}.  Thus all atomic ASTs in the object are
-  treated as (potential) constants, and a successful match makes them
-  actual constants even before name space resolution (see also
-  \secref{sec:ast}).
-
-  \item Object @{text "u"} is matched by pattern @{ML
-  Ast.Variable}~@{text "x"}, binding @{text "x"} to @{text "u"}.
-
-  \item Object @{ML Ast.Appl}~@{text "us"} is matched by @{ML
-  Ast.Appl}~@{text "ts"} if @{text "us"} and @{text "ts"} have the
-  same length and each corresponding subtree matches.
-
-  \item In every other case, matching fails.
-
-  \end{itemize}
-
-  A successful match yields a substitution that is applied to @{text
-  "rhs"}, generating the instance that replaces @{text "u"}.
-
-  Normalizing an AST involves repeatedly applying translation rules
-  until none are applicable.  This works yoyo-like: top-down,
-  bottom-up, top-down, etc.  At each subtree position, rules are
-  chosen in order of appearance in the theory definitions.
-
-  The configuration options @{attribute syntax_ast_trace} and
-  @{attribute syntax_ast_stats} might help to understand this process
-  and diagnose problems.
-
-  \begin{warn}
-  If syntax translation rules work incorrectly, the output of
-  @{command_ref print_syntax} with its \emph{rules} sections reveals the
-  actual internal forms of AST pattern, without potentially confusing
-  concrete syntax.  Recall that AST constants appear as quoted strings
-  and variables without quotes.
-  \end{warn}
-
-  \begin{warn}
-  If @{attribute_ref eta_contract} is set to @{text "true"}, terms
-  will be @{text "\<eta>"}-contracted \emph{before} the AST rewriter sees
-  them.  Thus some abstraction nodes needed for print rules to match
-  may vanish.  For example, @{text "Ball A (\<lambda>x. P x)"} would contract
-  to @{text "Ball A P"} and the standard print rule would fail to
-  apply.  This problem can be avoided by hand-written ML translation
-  functions (see also \secref{sec:tr-funs}), which is in fact the same
-  mechanism used in built-in @{keyword "binder"} declarations.
-  \end{warn}
-*}
-
-
-subsection {* Syntax translation functions \label{sec:tr-funs} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "parse_ast_translation"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "parse_translation"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "print_translation"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "typed_print_translation"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "print_ast_translation"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{ML_antiquotation_def "class_syntax"} & : & @{text "ML antiquotation"} \\
-    @{ML_antiquotation_def "type_syntax"} & : & @{text "ML antiquotation"} \\
-    @{ML_antiquotation_def "const_syntax"} & : & @{text "ML antiquotation"} \\
-    @{ML_antiquotation_def "syntax_const"} & : & @{text "ML antiquotation"} \\
-  \end{matharray}
-
-  Syntax translation functions written in ML admit almost arbitrary
-  manipulations of inner syntax, at the expense of some complexity and
-  obscurity in the implementation.
-
-  @{rail \<open>
-  ( @@{command parse_ast_translation} | @@{command parse_translation} |
-    @@{command print_translation} | @@{command typed_print_translation} |
-    @@{command print_ast_translation}) @{syntax text}
-  ;
-  (@@{ML_antiquotation class_syntax} |
-   @@{ML_antiquotation type_syntax} |
-   @@{ML_antiquotation const_syntax} |
-   @@{ML_antiquotation syntax_const}) name
-  \<close>}
-
-  \begin{description}
-
-  \item @{command parse_translation} etc. declare syntax translation
-  functions to the theory.  Any of these commands have a single
-  @{syntax text} argument that refers to an ML expression of
-  appropriate type as follows:
-
-  \medskip
-  {\footnotesize
-  \begin{tabular}{l}
-  @{command parse_ast_translation} : \\
-  \quad @{ML_type "(string * (Proof.context -> Ast.ast list -> Ast.ast)) list"} \\
-  @{command parse_translation} : \\
-  \quad @{ML_type "(string * (Proof.context -> term list -> term)) list"} \\
-  @{command print_translation} : \\
-  \quad @{ML_type "(string * (Proof.context -> term list -> term)) list"} \\
-  @{command typed_print_translation} : \\
-  \quad @{ML_type "(string * (Proof.context -> typ -> term list -> term)) list"} \\
-  @{command print_ast_translation} : \\
-  \quad @{ML_type "(string * (Proof.context -> Ast.ast list -> Ast.ast)) list"} \\
-  \end{tabular}}
-  \medskip
-
-  The argument list consists of @{text "(c, tr)"} pairs, where @{text
-  "c"} is the syntax name of the formal entity involved, and @{text
-  "tr"} a function that translates a syntax form @{text "c args"} into
-  @{text "tr ctxt args"} (depending on the context).  The Isabelle/ML
-  naming convention for parse translations is @{text "c_tr"} and for
-  print translations @{text "c_tr'"}.
-
-  The @{command_ref print_syntax} command displays the sets of names
-  associated with the translation functions of a theory under @{text
-  "parse_ast_translation"} etc.
-
-  \item @{text "@{class_syntax c}"}, @{text "@{type_syntax c}"},
-  @{text "@{const_syntax c}"} inline the authentic syntax name of the
-  given formal entities into the ML source.  This is the
-  fully-qualified logical name prefixed by a special marker to
-  indicate its kind: thus different logical name spaces are properly
-  distinguished within parse trees.
-
-  \item @{text "@{const_syntax c}"} inlines the name @{text "c"} of
-  the given syntax constant, having checked that it has been declared
-  via some @{command syntax} commands within the theory context.  Note
-  that the usual naming convention makes syntax constants start with
-  underscore, to reduce the chance of accidental clashes with other
-  names occurring in parse trees (unqualified constants etc.).
-
-  \end{description}
-*}
-
-
-subsubsection {* The translation strategy *}
-
-text {* The different kinds of translation functions are invoked during
-  the transformations between parse trees, ASTs and syntactic terms
-  (cf.\ \figref{fig:parse-print}).  Whenever a combination of the form
-  @{text "c x\<^sub>1 \<dots> x\<^sub>n"} is encountered, and a translation function
-  @{text "f"} of appropriate kind is declared for @{text "c"}, the
-  result is produced by evaluation of @{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} in ML.
-
-  For AST translations, the arguments @{text "x\<^sub>1, \<dots>, x\<^sub>n"} are ASTs.  A
-  combination has the form @{ML "Ast.Constant"}~@{text "c"} or @{ML
-  "Ast.Appl"}~@{text "["}@{ML Ast.Constant}~@{text "c, x\<^sub>1, \<dots>, x\<^sub>n]"}.
-  For term translations, the arguments are terms and a combination has
-  the form @{ML Const}~@{text "(c, \<tau>)"} or @{ML Const}~@{text "(c, \<tau>)
-  $ x\<^sub>1 $ \<dots> $ x\<^sub>n"}.  Terms allow more sophisticated transformations
-  than ASTs do, typically involving abstractions and bound
-  variables. \emph{Typed} print translations may even peek at the type
-  @{text "\<tau>"} of the constant they are invoked on, although some
-  information might have been suppressed for term output already.
-
-  Regardless of whether they act on ASTs or terms, translation
-  functions called during the parsing process differ from those for
-  printing in their overall behaviour:
-
-  \begin{description}
-
-  \item [Parse translations] are applied bottom-up.  The arguments are
-  already in translated form.  The translations must not fail;
-  exceptions trigger an error message.  There may be at most one
-  function associated with any syntactic name.
-
-  \item [Print translations] are applied top-down.  They are supplied
-  with arguments that are partly still in internal form.  The result
-  again undergoes translation; therefore a print translation should
-  not introduce as head the very constant that invoked it.  The
-  function may raise exception @{ML Match} to indicate failure; in
-  this event it has no effect.  Multiple functions associated with
-  some syntactic name are tried in the order of declaration in the
-  theory.
-
-  \end{description}
-
-  Only constant atoms --- constructor @{ML Ast.Constant} for ASTs and
-  @{ML Const} for terms --- can invoke translation functions.  This
-  means that parse translations can only be associated with parse tree
-  heads of concrete syntax, or syntactic constants introduced via
-  other translations.  For plain identifiers within the term language,
-  the status of constant versus variable is not yet know during
-  parsing.  This is in contrast to print translations, where constants
-  are explicitly known from the given term in its fully internal form.
-*}
-
-
-subsection {* Built-in syntax transformations *}
-
-text {*
-  Here are some further details of the main syntax transformation
-  phases of \figref{fig:parse-print}.
-*}
-
-
-subsubsection {* Transforming parse trees to ASTs *}
-
-text {* The parse tree is the raw output of the parser.  It is
-  transformed into an AST according to some basic scheme that may be
-  augmented by AST translation functions as explained in
-  \secref{sec:tr-funs}.
-
-  The parse tree is constructed by nesting the right-hand sides of the
-  productions used to recognize the input.  Such parse trees are
-  simply lists of tokens and constituent parse trees, the latter
-  representing the nonterminals of the productions.  Ignoring AST
-  translation functions, parse trees are transformed to ASTs by
-  stripping out delimiters and copy productions, while retaining some
-  source position information from input tokens.
-
-  The Pure syntax provides predefined AST translations to make the
-  basic @{text "\<lambda>"}-term structure more apparent within the
-  (first-order) AST representation, and thus facilitate the use of
-  @{command translations} (see also \secref{sec:syn-trans}).  This
-  covers ordinary term application, type application, nested
-  abstraction, iterated meta implications and function types.  The
-  effect is illustrated on some representative input strings is as
-  follows:
-
-  \begin{center}
-  \begin{tabular}{ll}
-  input source & AST \\
-  \hline
-  @{text "f x y z"} & @{verbatim "(f x y z)"} \\
-  @{text "'a ty"} & @{verbatim "(ty 'a)"} \\
-  @{text "('a, 'b)ty"} & @{verbatim "(ty 'a 'b)"} \\
-  @{text "\<lambda>x y z. t"} & @{verbatim "(\"_abs\" x (\"_abs\" y (\"_abs\" z t)))"} \\
-  @{text "\<lambda>x :: 'a. t"} & @{verbatim "(\"_abs\" (\"_constrain\" x 'a) t)"} \\
-  @{text "\<lbrakk>P; Q; R\<rbrakk> \<Longrightarrow> S"} & @{verbatim "(\"==>\" P (\"==>\" Q (\"==>\" R S)))"} \\
-   @{text "['a, 'b, 'c] \<Rightarrow> 'd"} & @{verbatim "(\"fun\" 'a (\"fun\" 'b (\"fun\" 'c 'd)))"} \\
-  \end{tabular}
-  \end{center}
-
-  Note that type and sort constraints may occur in further places ---
-  translations need to be ready to cope with them.  The built-in
-  syntax transformation from parse trees to ASTs insert additional
-  constraints that represent source positions.
-*}
-
-
-subsubsection {* Transforming ASTs to terms *}
-
-text {* After application of macros (\secref{sec:syn-trans}), the AST
-  is transformed into a term.  This term still lacks proper type
-  information, but it might contain some constraints consisting of
-  applications with head @{verbatim "_constrain"}, where the second
-  argument is a type encoded as a pre-term within the syntax.  Type
-  inference later introduces correct types, or indicates type errors
-  in the input.
-
-  Ignoring parse translations, ASTs are transformed to terms by
-  mapping AST constants to term constants, AST variables to term
-  variables or constants (according to the name space), and AST
-  applications to iterated term applications.
-
-  The outcome is still a first-order term.  Proper abstractions and
-  bound variables are introduced by parse translations associated with
-  certain syntax constants.  Thus @{verbatim "(_abs x x)"} eventually
-  becomes a de-Bruijn term @{verbatim "Abs (\"x\", _, Bound 0)"}.
-*}
-
-
-subsubsection {* Printing of terms *}
-
-text {* The output phase is essentially the inverse of the input
-  phase.  Terms are translated via abstract syntax trees into
-  pretty-printed text.
-
-  Ignoring print translations, the transformation maps term constants,
-  variables and applications to the corresponding constructs on ASTs.
-  Abstractions are mapped to applications of the special constant
-  @{verbatim "_abs"} as seen before.  Type constraints are represented
-  via special @{verbatim "_constrain"} forms, according to various
-  policies of type annotation determined elsewhere.  Sort constraints
-  of type variables are handled in a similar fashion.
-
-  After application of macros (\secref{sec:syn-trans}), the AST is
-  finally pretty-printed.  The built-in print AST translations reverse
-  the corresponding parse AST translations.
-
-  \medskip For the actual printing process, the priority grammar
-  (\secref{sec:priority-grammar}) plays a vital role: productions are
-  used as templates for pretty printing, with argument slots stemming
-  from nonterminals, and syntactic sugar stemming from literal tokens.
-
-  Each AST application with constant head @{text "c"} and arguments
-  @{text "t\<^sub>1"}, \dots, @{text "t\<^sub>n"} (for @{text "n = 0"} the AST is
-  just the constant @{text "c"} itself) is printed according to the
-  first grammar production of result name @{text "c"}.  The required
-  syntax priority of the argument slot is given by its nonterminal
-  @{text "A\<^sup>(\<^sup>p\<^sup>)"}.  The argument @{text "t\<^sub>i"} that corresponds to the
-  position of @{text "A\<^sup>(\<^sup>p\<^sup>)"} is printed recursively, and then put in
-  parentheses \emph{if} its priority @{text "p"} requires this.  The
-  resulting output is concatenated with the syntactic sugar according
-  to the grammar production.
-
-  If an AST application @{text "(c x\<^sub>1 \<dots> x\<^sub>m)"} has more arguments than
-  the corresponding production, it is first split into @{text "((c x\<^sub>1
-  \<dots> x\<^sub>n) x\<^sub>n\<^sub>+\<^sub>1 \<dots> x\<^sub>m)"} and then printed recursively as above.
-
-  Applications with too few arguments or with non-constant head or
-  without a corresponding production are printed in prefix-form like
-  @{text "f t\<^sub>1 \<dots> t\<^sub>n"} for terms.
-
-  Multiple productions associated with some name @{text "c"} are tried
-  in order of appearance within the grammar.  An occurrence of some
-  AST variable @{text "x"} is printed as @{text "x"} outright.
-
-  \medskip White space is \emph{not} inserted automatically.  If
-  blanks (or breaks) are required to separate tokens, they need to be
-  specified in the mixfix declaration (\secref{sec:mixfix}).
-*}
-
-end
--- a/src/Doc/IsarRef/ML_Tactic.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,177 +0,0 @@
-theory ML_Tactic
-imports Base Main
-begin
-
-chapter {* ML tactic expressions *}
-
-text {*
-  Isar Proof methods closely resemble traditional tactics, when used
-  in unstructured sequences of @{command "apply"} commands.
-  Isabelle/Isar provides emulations for all major ML tactics of
-  classic Isabelle --- mostly for the sake of easy porting of existing
-  developments, as actual Isar proof texts would demand much less
-  diversity of proof methods.
-
-  Unlike tactic expressions in ML, Isar proof methods provide proper
-  concrete syntax for additional arguments, options, modifiers etc.
-  Thus a typical method text is usually more concise than the
-  corresponding ML tactic.  Furthermore, the Isar versions of classic
-  Isabelle tactics often cover several variant forms by a single
-  method with separate options to tune the behavior.  For example,
-  method @{method simp} replaces all of @{ML simp_tac}~/ @{ML
-  asm_simp_tac}~/ @{ML full_simp_tac}~/ @{ML asm_full_simp_tac}, there
-  is also concrete syntax for augmenting the Simplifier context (the
-  current ``simpset'') in a convenient way.
-*}
-
-
-section {* Resolution tactics *}
-
-text {*
-  Classic Isabelle provides several variant forms of tactics for
-  single-step rule applications (based on higher-order resolution).
-  The space of resolution tactics has the following main dimensions.
-
-  \begin{enumerate}
-
-  \item The ``mode'' of resolution: intro, elim, destruct, or forward
-  (e.g.\ @{ML resolve_tac}, @{ML eresolve_tac}, @{ML dresolve_tac},
-  @{ML forward_tac}).
-
-  \item Optional explicit instantiation (e.g.\ @{ML resolve_tac} vs.\
-  @{ML res_inst_tac}).
-
-  \item Abbreviations for singleton arguments (e.g.\ @{ML resolve_tac}
-  vs.\ @{ML rtac}).
-
-  \end{enumerate}
-
-  Basically, the set of Isar tactic emulations @{method rule_tac},
-  @{method erule_tac}, @{method drule_tac}, @{method frule_tac} (see
-  \secref{sec:tactics}) would be sufficient to cover the four modes,
-  either with or without instantiation, and either with single or
-  multiple arguments.  Although it is more convenient in most cases to
-  use the plain @{method_ref (Pure) rule} method, or any of its
-  ``improper'' variants @{method erule}, @{method drule}, @{method
-  frule}.  Note that explicit goal addressing is only supported by the
-  actual @{method rule_tac} version.
-
-  With this in mind, plain resolution tactics correspond to Isar
-  methods as follows.
-
-  \medskip
-  \begin{tabular}{lll}
-    @{ML rtac}~@{text "a 1"} & & @{text "rule a"} \\
-    @{ML resolve_tac}~@{text "[a\<^sub>1, \<dots>] 1"} & & @{text "rule a\<^sub>1 \<dots>"} \\
-    @{ML res_inst_tac}~@{text "ctxt [(x\<^sub>1, t\<^sub>1), \<dots>] a 1"} & &
-    @{text "rule_tac x\<^sub>1 = t\<^sub>1 \<AND> \<dots> \<IN> a"} \\[0.5ex]
-    @{ML rtac}~@{text "a i"} & & @{text "rule_tac [i] a"} \\
-    @{ML resolve_tac}~@{text "[a\<^sub>1, \<dots>] i"} & & @{text "rule_tac [i] a\<^sub>1 \<dots>"} \\
-    @{ML res_inst_tac}~@{text "ctxt [(x\<^sub>1, t\<^sub>1), \<dots>] a i"} & &
-    @{text "rule_tac [i] x\<^sub>1 = t\<^sub>1 \<AND> \<dots> \<IN> a"} \\
-  \end{tabular}
-  \medskip
-
-  Note that explicit goal addressing may be usually avoided by
-  changing the order of subgoals with @{command "defer"} or @{command
-  "prefer"} (see \secref{sec:tactic-commands}).
-*}
-
-
-section {* Simplifier tactics *}
-
-text {* The main Simplifier tactics @{ML simp_tac} and variants are
-  all covered by the @{method simp} and @{method simp_all} methods
-  (see \secref{sec:simplifier}).  Note that there is no individual
-  goal addressing available, simplification acts either on the first
-  goal (@{method simp}) or all goals (@{method simp_all}).
-
-  \medskip
-  \begin{tabular}{lll}
-    @{ML asm_full_simp_tac}~@{text "@{context} 1"} & & @{method simp} \\
-    @{ML ALLGOALS}~(@{ML asm_full_simp_tac}~@{text "@{context}"}) & & @{method simp_all} \\[0.5ex]
-    @{ML simp_tac}~@{text "@{context} 1"} & & @{method simp}~@{text "(no_asm)"} \\
-    @{ML asm_simp_tac}~@{text "@{context} 1"} & & @{method simp}~@{text "(no_asm_simp)"} \\
-    @{ML full_simp_tac}~@{text "@{context} 1"} & & @{method simp}~@{text "(no_asm_use)"} \\
-    @{ML asm_lr_simp_tac}~@{text "@{context} 1"} & & @{method simp}~@{text "(asm_lr)"} \\
-  \end{tabular}
-  \medskip
-*}
-
-
-section {* Classical Reasoner tactics *}
-
-text {* The Classical Reasoner provides a rather large number of
-  variations of automated tactics, such as @{ML blast_tac}, @{ML
-  fast_tac}, @{ML clarify_tac} etc.  The corresponding Isar methods
-  usually share the same base name, such as @{method blast}, @{method
-  fast}, @{method clarify} etc.\ (see \secref{sec:classical}).  *}
-
-
-section {* Miscellaneous tactics *}
-
-text {*
-  There are a few additional tactics defined in various theories of
-  Isabelle/HOL, some of these also in Isabelle/FOL or Isabelle/ZF.
-  The most common ones of these may be ported to Isar as follows.
-
-  \medskip
-  \begin{tabular}{lll}
-    @{ML stac}~@{text "a 1"} & & @{text "subst a"} \\
-    @{ML hyp_subst_tac}~@{text 1} & & @{text hypsubst} \\
-    @{ML split_all_tac}~@{text 1} & & @{text "simp (no_asm_simp) only: split_tupled_all"} \\
-      & @{text "\<approx>"} & @{text "simp only: split_tupled_all"} \\
-      & @{text "\<lless>"} & @{text "clarify"} \\
-  \end{tabular}
-*}
-
-
-section {* Tacticals *}
-
-text {*
-  Classic Isabelle provides a huge amount of tacticals for combination
-  and modification of existing tactics.  This has been greatly reduced
-  in Isar, providing the bare minimum of combinators only: ``@{text
-  ","}'' (sequential composition), ``@{text "|"}'' (alternative
-  choices), ``@{text "?"}'' (try), ``@{text "+"}'' (repeat at least
-  once).  These are usually sufficient in practice; if all fails,
-  arbitrary ML tactic code may be invoked via the @{method tactic}
-  method (see \secref{sec:tactics}).
-
-  \medskip Common ML tacticals may be expressed directly in Isar as
-  follows:
-
-  \medskip
-  \begin{tabular}{lll}
-    @{text "tac\<^sub>1"}~@{ML_text THEN}~@{text "tac\<^sub>2"} & & @{text "meth\<^sub>1, meth\<^sub>2"} \\
-    @{text "tac\<^sub>1"}~@{ML_text ORELSE}~@{text "tac\<^sub>2"} & & @{text "meth\<^sub>1 | meth\<^sub>2"} \\
-    @{ML TRY}~@{text tac} & & @{text "meth?"} \\
-    @{ML REPEAT1}~@{text tac} & & @{text "meth+"} \\
-    @{ML REPEAT}~@{text tac} & & @{text "(meth+)?"} \\
-    @{ML EVERY}~@{text "[tac\<^sub>1, \<dots>]"} & & @{text "meth\<^sub>1, \<dots>"} \\
-    @{ML FIRST}~@{text "[tac\<^sub>1, \<dots>]"} & & @{text "meth\<^sub>1 | \<dots>"} \\
-  \end{tabular}
-  \medskip
-
-  \medskip @{ML CHANGED} (see \cite{isabelle-implementation}) is
-  usually not required in Isar, since most basic proof methods already
-  fail unless there is an actual change in the goal state.
-  Nevertheless, ``@{text "?"}''  (try) may be used to accept
-  \emph{unchanged} results as well.
-
-  \medskip @{ML ALLGOALS}, @{ML SOMEGOAL} etc.\ (see
-  \cite{isabelle-implementation}) are not available in Isar, since
-  there is no direct goal addressing.  Nevertheless, some basic
-  methods address all goals internally, notably @{method simp_all}
-  (see \secref{sec:simplifier}).  Also note that @{ML ALLGOALS} can be
-  often replaced by ``@{text "+"}'' (repeat at least once), although
-  this usually has a different operational behavior: subgoals are
-  solved in a different order.
-
-  \medskip Iterated resolution, such as
-  @{ML_text "REPEAT (FIRSTGOAL (resolve_tac ...))"}, is usually better
-  expressed using the @{method intro} and @{method elim} methods of
-  Isar (see \secref{sec:classical}).
-*}
-
-end
--- a/src/Doc/IsarRef/Misc.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,150 +0,0 @@
-theory Misc
-imports Base Main
-begin
-
-chapter {* Other commands *}
-
-section {* Inspecting the context *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "print_theory"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "print_methods"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "print_attributes"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "print_theorems"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "find_theorems"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "find_consts"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "thm_deps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "unused_thms"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "print_facts"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "print_binds"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{command print_theory} | @@{command print_theorems} | @@{command print_facts}) ('!'?)
-    ;
-
-    @@{command find_theorems} ('(' @{syntax nat}? 'with_dups'? ')')? \<newline> (thmcriterion * )
-    ;
-    thmcriterion: ('-'?) ('name' ':' @{syntax nameref} | 'intro' | 'elim' | 'dest' |
-      'solves' | 'simp' ':' @{syntax term} | @{syntax term})
-    ;
-    @@{command find_consts} (constcriterion * )
-    ;
-    constcriterion: ('-'?)
-      ('name' ':' @{syntax nameref} | 'strict' ':' @{syntax type} | @{syntax type})
-    ;
-    @@{command thm_deps} @{syntax thmrefs}
-    ;
-    @@{command unused_thms} ((@{syntax name} +) '-' (@{syntax name} * ))?
-  \<close>}
-
-  These commands print certain parts of the theory and proof context.
-  Note that there are some further ones available, such as for the set
-  of rules declared for simplifications.
-
-  \begin{description}
-  
-  \item @{command "print_theory"} prints the main logical content of
-  the background theory; the ``@{text "!"}'' option indicates extra
-  verbosity.
-
-  \item @{command "print_methods"} prints all proof methods
-  available in the current theory context.
-  
-  \item @{command "print_attributes"} prints all attributes
-  available in the current theory context.
-  
-  \item @{command "print_theorems"} prints theorems of the background
-  theory resulting from the last command; the ``@{text "!"}'' option
-  indicates extra verbosity.
-  
-  \item @{command "print_facts"} prints all local facts of the
-  current context, both named and unnamed ones; the ``@{text "!"}''
-  option indicates extra verbosity.
-  
-  \item @{command "print_binds"} prints all term abbreviations
-  present in the context.
-
-  \item @{command "find_theorems"}~@{text criteria} retrieves facts
-  from the theory or proof context matching all of given search
-  criteria.  The criterion @{text "name: p"} selects all theorems
-  whose fully qualified name matches pattern @{text p}, which may
-  contain ``@{text "*"}'' wildcards.  The criteria @{text intro},
-  @{text elim}, and @{text dest} select theorems that match the
-  current goal as introduction, elimination or destruction rules,
-  respectively.  The criterion @{text "solves"} returns all rules
-  that would directly solve the current goal.  The criterion
-  @{text "simp: t"} selects all rewrite rules whose left-hand side
-  matches the given term.  The criterion term @{text t} selects all
-  theorems that contain the pattern @{text t} -- as usual, patterns
-  may contain occurrences of the dummy ``@{text _}'', schematic
-  variables, and type constraints.
-  
-  Criteria can be preceded by ``@{text "-"}'' to select theorems that
-  do \emph{not} match. Note that giving the empty list of criteria
-  yields \emph{all} currently known facts.  An optional limit for the
-  number of printed facts may be given; the default is 40.  By
-  default, duplicates are removed from the search result. Use
-  @{text with_dups} to display duplicates.
-
-  \item @{command "find_consts"}~@{text criteria} prints all constants
-  whose type meets all of the given criteria. The criterion @{text
-  "strict: ty"} is met by any type that matches the type pattern
-  @{text ty}.  Patterns may contain both the dummy type ``@{text _}''
-  and sort constraints. The criterion @{text ty} is similar, but it
-  also matches against subtypes. The criterion @{text "name: p"} and
-  the prefix ``@{text "-"}'' function as described for @{command
-  "find_theorems"}.
-
-  \item @{command "thm_deps"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}
-  visualizes dependencies of facts, using Isabelle's graph browser
-  tool (see also \cite{isabelle-sys}).
-
-  \item @{command "unused_thms"}~@{text "A\<^sub>1 \<dots> A\<^sub>m - B\<^sub>1 \<dots> B\<^sub>n"}
-  displays all theorems that are proved in theories @{text "B\<^sub>1 \<dots> B\<^sub>n"}
-  or their parents but not in @{text "A\<^sub>1 \<dots> A\<^sub>m"} or their parents and
-  that are never used.
-  If @{text n} is @{text 0}, the end of the range of theories
-  defaults to the current theory. If no range is specified,
-  only the unused theorems in the current theory are displayed.
-  
-  \end{description}
-*}
-
-
-section {* System commands *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "cd"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
-    @{command_def "pwd"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
-    @{command_def "use_thy"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{command cd} | @@{command use_thy}) @{syntax name}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "cd"}~@{text path} changes the current directory
-  of the Isabelle process.
-
-  \item @{command "pwd"} prints the current working directory.
-
-  \item @{command "use_thy"}~@{text A} preload theory @{text A}.
-  These system commands are scarcely used when working interactively,
-  since loading of theories is done automatically as required.
-
-  \end{description}
-
-  %FIXME proper place (!?)
-  Isabelle file specification may contain path variables (e.g.\
-  @{verbatim "$ISABELLE_HOME"}) that are expanded accordingly.  Note
-  that @{verbatim "~"} abbreviates @{verbatim "$USER_HOME"}, and
-  @{verbatim "~~"} abbreviates @{verbatim "$ISABELLE_HOME"}.  The
-  general syntax for path specifications follows POSIX conventions.
-*}
-
-end
--- a/src/Doc/IsarRef/Outer_Syntax.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,466 +0,0 @@
-theory Outer_Syntax
-imports Base Main
-begin
-
-chapter {* Outer syntax --- the theory language \label{ch:outer-syntax} *}
-
-text {*
-  The rather generic framework of Isabelle/Isar syntax emerges from
-  three main syntactic categories: \emph{commands} of the top-level
-  Isar engine (covering theory and proof elements), \emph{methods} for
-  general goal refinements (analogous to traditional ``tactics''), and
-  \emph{attributes} for operations on facts (within a certain
-  context).  Subsequently we give a reference of basic syntactic
-  entities underlying Isabelle/Isar syntax in a bottom-up manner.
-  Concrete theory and proof language elements will be introduced later
-  on.
-
-  \medskip In order to get started with writing well-formed
-  Isabelle/Isar documents, the most important aspect to be noted is
-  the difference of \emph{inner} versus \emph{outer} syntax.  Inner
-  syntax is that of Isabelle types and terms of the logic, while outer
-  syntax is that of Isabelle/Isar theory sources (specifications and
-  proofs).  As a general rule, inner syntax entities may occur only as
-  \emph{atomic entities} within outer syntax.  For example, the string
-  @{verbatim "\"x + y\""} and identifier @{verbatim z} are legal term
-  specifications within a theory, while @{verbatim "x + y"} without
-  quotes is not.
-
-  Printed theory documents usually omit quotes to gain readability
-  (this is a matter of {\LaTeX} macro setup, say via @{verbatim
-  "\\isabellestyle"}, see also \cite{isabelle-sys}).  Experienced
-  users of Isabelle/Isar may easily reconstruct the lost technical
-  information, while mere readers need not care about quotes at all.
-
-  \medskip Isabelle/Isar input may contain any number of input
-  termination characters ``@{verbatim ";"}'' (semicolon) to separate
-  commands explicitly.  This is particularly useful in interactive
-  shell sessions to make clear where the current command is intended
-  to end.  Otherwise, the interpreter loop will continue to issue a
-  secondary prompt ``@{verbatim "#"}'' until an end-of-command is
-  clearly recognized from the input syntax, e.g.\ encounter of the
-  next command keyword.
-
-  More advanced interfaces such as Isabelle/jEdit \cite{Wenzel:2012}
-  and Proof~General \cite{proofgeneral} do not require explicit
-  semicolons: command spans are determined by inspecting the content
-  of the editor buffer.  In the printed presentation of Isabelle/Isar
-  documents semicolons are omitted altogether for readability.
-
-  \begin{warn}
-    Proof~General requires certain syntax classification tables in
-    order to achieve properly synchronized interaction with the
-    Isabelle/Isar process.  These tables need to be consistent with
-    the Isabelle version and particular logic image to be used in a
-    running session (common object-logics may well change the outer
-    syntax).  The standard setup should work correctly with any of the
-    ``official'' logic images derived from Isabelle/HOL (including
-    HOLCF etc.).  Users of alternative logics may need to tell
-    Proof~General explicitly, e.g.\ by giving an option @{verbatim "-k ZF"}
-    (in conjunction with @{verbatim "-l ZF"}, to specify the default
-    logic image).  Note that option @{verbatim "-L"} does both
-    of this at the same time.
-  \end{warn}
-*}
-
-
-section {* Commands *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "print_commands"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
-    @{command_def "help"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command help} (@{syntax name} * )
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "print_commands"} prints all outer syntax keywords
-  and commands.
-
-  \item @{command "help"}~@{text "pats"} retrieves outer syntax
-  commands according to the specified name patterns.
-
-  \end{description}
-*}
-
-
-subsubsection {* Examples *}
-
-text {* Some common diagnostic commands are retrieved like this
-  (according to usual naming conventions): *}
-
-help "print"
-help "find"
-
-
-section {* Lexical matters \label{sec:outer-lex} *}
-
-text {* The outer lexical syntax consists of three main categories of
-  syntax tokens:
-
-  \begin{enumerate}
-
-  \item \emph{major keywords} --- the command names that are available
-  in the present logic session;
-
-  \item \emph{minor keywords} --- additional literal tokens required
-  by the syntax of commands;
-
-  \item \emph{named tokens} --- various categories of identifiers etc.
-
-  \end{enumerate}
-
-  Major keywords and minor keywords are guaranteed to be disjoint.
-  This helps user-interfaces to determine the overall structure of a
-  theory text, without knowing the full details of command syntax.
-  Internally, there is some additional information about the kind of
-  major keywords, which approximates the command type (theory command,
-  proof command etc.).
-
-  Keywords override named tokens.  For example, the presence of a
-  command called @{verbatim term} inhibits the identifier @{verbatim
-  term}, but the string @{verbatim "\"term\""} can be used instead.
-  By convention, the outer syntax always allows quoted strings in
-  addition to identifiers, wherever a named entity is expected.
-
-  When tokenizing a given input sequence, the lexer repeatedly takes
-  the longest prefix of the input that forms a valid token.  Spaces,
-  tabs, newlines and formfeeds between tokens serve as explicit
-  separators.
-
-  \medskip The categories for named tokens are defined once and for
-  all as follows.
-
-  \begin{center}
-  \begin{supertabular}{rcl}
-    @{syntax_def ident} & = & @{text "letter (subscript\<^sup>? quasiletter)\<^sup>*"} \\
-    @{syntax_def longident} & = & @{text "ident("}@{verbatim "."}@{text "ident)\<^sup>+"} \\
-    @{syntax_def symident} & = & @{text "sym\<^sup>+  |  "}@{verbatim "\\"}@{verbatim "<"}@{text ident}@{verbatim ">"} \\
-    @{syntax_def nat} & = & @{text "digit\<^sup>+"} \\
-    @{syntax_def float} & = & @{syntax_ref nat}@{verbatim "."}@{syntax_ref nat}@{text "  |  "}@{verbatim "-"}@{syntax_ref nat}@{verbatim "."}@{syntax_ref nat} \\
-    @{syntax_def var} & = & @{verbatim "?"}@{text "ident  |  "}@{verbatim "?"}@{text ident}@{verbatim "."}@{text nat} \\
-    @{syntax_def typefree} & = & @{verbatim "'"}@{text ident} \\
-    @{syntax_def typevar} & = & @{verbatim "?"}@{text "typefree  |  "}@{verbatim "?"}@{text typefree}@{verbatim "."}@{text nat} \\
-    @{syntax_def string} & = & @{verbatim "\""} @{text "\<dots>"} @{verbatim "\""} \\
-    @{syntax_def altstring} & = & @{verbatim "`"} @{text "\<dots>"} @{verbatim "`"} \\
-    @{syntax_def cartouche} & = & @{verbatim "\<open>"} @{text "\<dots>"} @{verbatim "\<close>"} \\
-    @{syntax_def verbatim} & = & @{verbatim "{*"} @{text "\<dots>"} @{verbatim "*"}@{verbatim "}"} \\[1ex]
-
-    @{text letter} & = & @{text "latin  |  "}@{verbatim "\\"}@{verbatim "<"}@{text latin}@{verbatim ">"}@{text "  |  "}@{verbatim "\\"}@{verbatim "<"}@{text "latin latin"}@{verbatim ">"}@{text "  |  greek  |"} \\
-    @{text subscript} & = & @{verbatim "\<^sub>"} \\
-    @{text quasiletter} & = & @{text "letter  |  digit  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "'"} \\
-    @{text latin} & = & @{verbatim a}@{text "  | \<dots> |  "}@{verbatim z}@{text "  |  "}@{verbatim A}@{text "  |  \<dots> |  "}@{verbatim Z} \\
-    @{text digit} & = & @{verbatim "0"}@{text "  |  \<dots> |  "}@{verbatim "9"} \\
-    @{text sym} & = & @{verbatim "!"}@{text "  |  "}@{verbatim "#"}@{text "  |  "}@{verbatim "$"}@{text "  |  "}@{verbatim "%"}@{text "  |  "}@{verbatim "&"}@{text "  |  "}@{verbatim "*"}@{text "  |  "}@{verbatim "+"}@{text "  |  "}@{verbatim "-"}@{text "  |  "}@{verbatim "/"}@{text "  |"} \\
-    & & @{verbatim "<"}@{text "  |  "}@{verbatim "="}@{text "  |  "}@{verbatim ">"}@{text "  |  "}@{verbatim "?"}@{text "  |  "}@{verbatim "@"}@{text "  |  "}@{verbatim "^"}@{text "  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "|"}@{text "  |  "}@{verbatim "~"} \\
-    @{text greek} & = & @{verbatim "\<alpha>"}@{text "  |  "}@{verbatim "\<beta>"}@{text "  |  "}@{verbatim "\<gamma>"}@{text "  |  "}@{verbatim "\<delta>"}@{text "  |"} \\
-          &   & @{verbatim "\<epsilon>"}@{text "  |  "}@{verbatim "\<zeta>"}@{text "  |  "}@{verbatim "\<eta>"}@{text "  |  "}@{verbatim "\<theta>"}@{text "  |"} \\
-          &   & @{verbatim "\<iota>"}@{text "  |  "}@{verbatim "\<kappa>"}@{text "  |  "}@{verbatim "\<mu>"}@{text "  |  "}@{verbatim "\<nu>"}@{text "  |"} \\
-          &   & @{verbatim "\<xi>"}@{text "  |  "}@{verbatim "\<pi>"}@{text "  |  "}@{verbatim "\<rho>"}@{text "  |  "}@{verbatim "\<sigma>"}@{text "  |  "}@{verbatim "\<tau>"}@{text "  |"} \\
-          &   & @{verbatim "\<upsilon>"}@{text "  |  "}@{verbatim "\<phi>"}@{text "  |  "}@{verbatim "\<chi>"}@{text "  |  "}@{verbatim "\<psi>"}@{text "  |"} \\
-          &   & @{verbatim "\<omega>"}@{text "  |  "}@{verbatim "\<Gamma>"}@{text "  |  "}@{verbatim "\<Delta>"}@{text "  |  "}@{verbatim "\<Theta>"}@{text "  |"} \\
-          &   & @{verbatim "\<Lambda>"}@{text "  |  "}@{verbatim "\<Xi>"}@{text "  |  "}@{verbatim "\<Pi>"}@{text "  |  "}@{verbatim "\<Sigma>"}@{text "  |"} \\
-          &   & @{verbatim "\<Upsilon>"}@{text "  |  "}@{verbatim "\<Phi>"}@{text "  |  "}@{verbatim "\<Psi>"}@{text "  |  "}@{verbatim "\<Omega>"} \\
-  \end{supertabular}
-  \end{center}
-
-  A @{syntax_ref var} or @{syntax_ref typevar} describes an unknown,
-  which is internally a pair of base name and index (ML type @{ML_type
-  indexname}).  These components are either separated by a dot as in
-  @{text "?x.1"} or @{text "?x7.3"} or run together as in @{text
-  "?x1"}.  The latter form is possible if the base name does not end
-  with digits.  If the index is 0, it may be dropped altogether:
-  @{text "?x"} and @{text "?x0"} and @{text "?x.0"} all refer to the
-  same unknown, with basename @{text "x"} and index 0.
-
-  The syntax of @{syntax_ref string} admits any characters, including
-  newlines; ``@{verbatim "\""}'' (double-quote) and ``@{verbatim
-  "\\"}'' (backslash) need to be escaped by a backslash; arbitrary
-  character codes may be specified as ``@{verbatim "\\"}@{text ddd}'',
-  with three decimal digits.  Alternative strings according to
-  @{syntax_ref altstring} are analogous, using single back-quotes
-  instead.
-
-  The body of @{syntax_ref verbatim} may consist of any text not
-  containing ``@{verbatim "*"}@{verbatim "}"}''; this allows
-  convenient inclusion of quotes without further escapes.  There is no
-  way to escape ``@{verbatim "*"}@{verbatim "}"}''.  If the quoted
-  text is {\LaTeX} source, one may usually add some blank or comment
-  to avoid the critical character sequence.
-
-  A @{syntax_ref cartouche} consists of arbitrary text, with properly
-  balanced blocks of ``@{verbatim "\<open>"}~@{text "\<dots>"}~@{verbatim
-  "\<close>"}''.  Note that the rendering of cartouche delimiters is
-  usually like this: ``@{text "\<open> \<dots> \<close>"}''.
-
-  Source comments take the form @{verbatim "(*"}~@{text
-  "\<dots>"}~@{verbatim "*)"} and may be nested, although the user-interface
-  might prevent this.  Note that this form indicates source comments
-  only, which are stripped after lexical analysis of the input.  The
-  Isar syntax also provides proper \emph{document comments} that are
-  considered as part of the text (see \secref{sec:comments}).
-
-  Common mathematical symbols such as @{text \<forall>} are represented in
-  Isabelle as @{verbatim \<forall>}.  There are infinitely many Isabelle
-  symbols like this, although proper presentation is left to front-end
-  tools such as {\LaTeX}, Proof~General, or Isabelle/jEdit.  A list of
-  predefined Isabelle symbols that work well with these tools is given
-  in \appref{app:symbols}.  Note that @{verbatim "\<lambda>"} does not belong
-  to the @{text letter} category, since it is already used differently
-  in the Pure term language.  *}
-
-
-section {* Common syntax entities *}
-
-text {*
-  We now introduce several basic syntactic entities, such as names,
-  terms, and theorem specifications, which are factored out of the
-  actual Isar language elements to be described later.
-*}
-
-
-subsection {* Names *}
-
-text {* Entity @{syntax name} usually refers to any name of types,
-  constants, theorems etc.\ that are to be \emph{declared} or
-  \emph{defined} (so qualified identifiers are excluded here).  Quoted
-  strings provide an escape for non-identifier names or those ruled
-  out by outer syntax keywords (e.g.\ quoted @{verbatim "\"let\""}).
-  Already existing objects are usually referenced by @{syntax
-  nameref}.
-
-  @{rail \<open>
-    @{syntax_def name}: @{syntax ident} | @{syntax symident} |
-      @{syntax string} | @{syntax nat}
-    ;
-    @{syntax_def parname}: '(' @{syntax name} ')'
-    ;
-    @{syntax_def nameref}: @{syntax name} | @{syntax longident}
-  \<close>}
-*}
-
-
-subsection {* Numbers *}
-
-text {* The outer lexical syntax (\secref{sec:outer-lex}) admits
-  natural numbers and floating point numbers.  These are combined as
-  @{syntax int} and @{syntax real} as follows.
-
-  @{rail \<open>
-    @{syntax_def int}: @{syntax nat} | '-' @{syntax nat}
-    ;
-    @{syntax_def real}: @{syntax float} | @{syntax int}
-  \<close>}
-
-  Note that there is an overlap with the category @{syntax name},
-  which also includes @{syntax nat}.
-*}
-
-
-subsection {* Comments \label{sec:comments} *}
-
-text {* Large chunks of plain @{syntax text} are usually given
-  @{syntax verbatim}, i.e.\ enclosed in @{verbatim "{"}@{verbatim
-  "*"}~@{text "\<dots>"}~@{verbatim "*"}@{verbatim "}"}.  For convenience,
-  any of the smaller text units conforming to @{syntax nameref} are
-  admitted as well.  A marginal @{syntax comment} is of the form
-  @{verbatim "--"}~@{syntax text}.  Any number of these may occur
-  within Isabelle/Isar commands.
-
-  @{rail \<open>
-    @{syntax_def text}: @{syntax verbatim} | @{syntax nameref}
-    ;
-    @{syntax_def comment}: '--' @{syntax text}
-  \<close>}
-*}
-
-
-subsection {* Type classes, sorts and arities *}
-
-text {*
-  Classes are specified by plain names.  Sorts have a very simple
-  inner syntax, which is either a single class name @{text c} or a
-  list @{text "{c\<^sub>1, \<dots>, c\<^sub>n}"} referring to the
-  intersection of these classes.  The syntax of type arities is given
-  directly at the outer level.
-
-  @{rail \<open>
-    @{syntax_def classdecl}: @{syntax name} (('<' | '\<subseteq>') (@{syntax nameref} + ','))?
-    ;
-    @{syntax_def sort}: @{syntax nameref}
-    ;
-    @{syntax_def arity}: ('(' (@{syntax sort} + ',') ')')? @{syntax sort}
-  \<close>}
-*}
-
-
-subsection {* Types and terms \label{sec:types-terms} *}
-
-text {*
-  The actual inner Isabelle syntax, that of types and terms of the
-  logic, is far too sophisticated in order to be modelled explicitly
-  at the outer theory level.  Basically, any such entity has to be
-  quoted to turn it into a single token (the parsing and type-checking
-  is performed internally later).  For convenience, a slightly more
-  liberal convention is adopted: quotes may be omitted for any type or
-  term that is already atomic at the outer level.  For example, one
-  may just write @{verbatim x} instead of quoted @{verbatim "\"x\""}.
-  Note that symbolic identifiers (e.g.\ @{verbatim "++"} or @{text
-  "\<forall>"} are available as well, provided these have not been superseded
-  by commands or other keywords already (such as @{verbatim "="} or
-  @{verbatim "+"}).
-
-  @{rail \<open>
-    @{syntax_def type}: @{syntax nameref} | @{syntax typefree} |
-      @{syntax typevar}
-    ;
-    @{syntax_def term}: @{syntax nameref} | @{syntax var}
-    ;
-    @{syntax_def prop}: @{syntax term}
-  \<close>}
-
-  Positional instantiations are indicated by giving a sequence of
-  terms, or the placeholder ``@{text _}'' (underscore), which means to
-  skip a position.
-
-  @{rail \<open>
-    @{syntax_def inst}: '_' | @{syntax term}
-    ;
-    @{syntax_def insts}: (@{syntax inst} *)
-  \<close>}
-
-  Type declarations and definitions usually refer to @{syntax
-  typespec} on the left-hand side.  This models basic type constructor
-  application at the outer syntax level.  Note that only plain postfix
-  notation is available here, but no infixes.
-
-  @{rail \<open>
-    @{syntax_def typespec}:
-      (() | @{syntax typefree} | '(' ( @{syntax typefree} + ',' ) ')') @{syntax name}
-    ;
-    @{syntax_def typespec_sorts}:
-      (() | (@{syntax typefree} ('::' @{syntax sort})?) |
-        '(' ( (@{syntax typefree} ('::' @{syntax sort})?) + ',' ) ')') @{syntax name}
-  \<close>}
-*}
-
-
-subsection {* Term patterns and declarations \label{sec:term-decls} *}
-
-text {* Wherever explicit propositions (or term fragments) occur in a
-  proof text, casual binding of schematic term variables may be given
-  specified via patterns of the form ``@{text "(\<IS> p\<^sub>1 \<dots> p\<^sub>n)"}''.
-  This works both for @{syntax term} and @{syntax prop}.
-
-  @{rail \<open>
-    @{syntax_def term_pat}: '(' (@'is' @{syntax term} +) ')'
-    ;
-    @{syntax_def prop_pat}: '(' (@'is' @{syntax prop} +) ')'
-  \<close>}
-
-  \medskip Declarations of local variables @{text "x :: \<tau>"} and
-  logical propositions @{text "a : \<phi>"} represent different views on
-  the same principle of introducing a local scope.  In practice, one
-  may usually omit the typing of @{syntax vars} (due to
-  type-inference), and the naming of propositions (due to implicit
-  references of current facts).  In any case, Isar proof elements
-  usually admit to introduce multiple such items simultaneously.
-
-  @{rail \<open>
-    @{syntax_def vars}: (@{syntax name} +) ('::' @{syntax type})?
-    ;
-    @{syntax_def props}: @{syntax thmdecl}? (@{syntax prop} @{syntax prop_pat}? +)
-  \<close>}
-
-  The treatment of multiple declarations corresponds to the
-  complementary focus of @{syntax vars} versus @{syntax props}.  In
-  ``@{text "x\<^sub>1 \<dots> x\<^sub>n :: \<tau>"}'' the typing refers to all variables, while
-  in @{text "a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} the naming refers to all propositions
-  collectively.  Isar language elements that refer to @{syntax vars}
-  or @{syntax props} typically admit separate typings or namings via
-  another level of iteration, with explicit @{keyword_ref "and"}
-  separators; e.g.\ see @{command "fix"} and @{command "assume"} in
-  \secref{sec:proof-context}.
-*}
-
-
-subsection {* Attributes and theorems \label{sec:syn-att} *}
-
-text {* Attributes have their own ``semi-inner'' syntax, in the sense
-  that input conforming to @{syntax args} below is parsed by the
-  attribute a second time.  The attribute argument specifications may
-  be any sequence of atomic entities (identifiers, strings etc.), or
-  properly bracketed argument lists.  Below @{syntax atom} refers to
-  any atomic entity, including any @{syntax keyword} conforming to
-  @{syntax symident}.
-
-  @{rail \<open>
-    @{syntax_def atom}: @{syntax nameref} | @{syntax typefree} |
-      @{syntax typevar} | @{syntax var} | @{syntax nat} | @{syntax float} |
-      @{syntax keyword} | @{syntax cartouche}
-    ;
-    arg: @{syntax atom} | '(' @{syntax args} ')' | '[' @{syntax args} ']'
-    ;
-    @{syntax_def args}: arg *
-    ;
-    @{syntax_def attributes}: '[' (@{syntax nameref} @{syntax args} * ',') ']'
-  \<close>}
-
-  Theorem specifications come in several flavors: @{syntax axmdecl}
-  and @{syntax thmdecl} usually refer to axioms, assumptions or
-  results of goal statements, while @{syntax thmdef} collects lists of
-  existing theorems.  Existing theorems are given by @{syntax thmref}
-  and @{syntax thmrefs}, the former requires an actual singleton
-  result.
-
-  There are three forms of theorem references:
-  \begin{enumerate}
-  
-  \item named facts @{text "a"},
-
-  \item selections from named facts @{text "a(i)"} or @{text "a(j - k)"},
-
-  \item literal fact propositions using @{syntax_ref altstring} syntax
-  @{verbatim "`"}@{text "\<phi>"}@{verbatim "`"} (see also method
-  @{method_ref fact}).
-
-  \end{enumerate}
-
-  Any kind of theorem specification may include lists of attributes
-  both on the left and right hand sides; attributes are applied to any
-  immediately preceding fact.  If names are omitted, the theorems are
-  not stored within the theorem database of the theory or proof
-  context, but any given attributes are applied nonetheless.
-
-  An extra pair of brackets around attributes (like ``@{text
-  "[[simproc a]]"}'') abbreviates a theorem reference involving an
-  internal dummy fact, which will be ignored later on.  So only the
-  effect of the attribute on the background context will persist.
-  This form of in-place declarations is particularly useful with
-  commands like @{command "declare"} and @{command "using"}.
-
-  @{rail \<open>
-    @{syntax_def axmdecl}: @{syntax name} @{syntax attributes}? ':'
-    ;
-    @{syntax_def thmdecl}: thmbind ':'
-    ;
-    @{syntax_def thmdef}: thmbind '='
-    ;
-    @{syntax_def thmref}:
-      (@{syntax nameref} selection? | @{syntax altstring}) @{syntax attributes}? |
-      '[' @{syntax attributes} ']'
-    ;
-    @{syntax_def thmrefs}: @{syntax thmref} +
-    ;
-
-    thmbind: @{syntax name} @{syntax attributes} | @{syntax name} | @{syntax attributes}
-    ;
-    selection: '(' ((@{syntax nat} | @{syntax nat} '-' @{syntax nat}?) + ',') ')'
-  \<close>}
-*}
-
-end
--- a/src/Doc/IsarRef/Preface.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-theory Preface
-imports Base Main
-begin
-
-chapter {* Preface *}
-
-text {*
-  The \emph{Isabelle} system essentially provides a generic
-  infrastructure for building deductive systems (programmed in
-  Standard ML), with a special focus on interactive theorem proving in
-  higher-order logics.  Many years ago, even end-users would refer to
-  certain ML functions (goal commands, tactics, tacticals etc.) to
-  pursue their everyday theorem proving tasks.
-  
-  In contrast \emph{Isar} provides an interpreted language environment
-  of its own, which has been specifically tailored for the needs of
-  theory and proof development.  Compared to raw ML, the Isabelle/Isar
-  top-level provides a more robust and comfortable development
-  platform, with proper support for theory development graphs, managed
-  transactions with unlimited undo etc.
-
-  In its pioneering times, the Isabelle/Isar version of the
-  \emph{Proof~General} user interface
-  \cite{proofgeneral,Aspinall:TACAS:2000} has contributed to the
-  success of for interactive theory and proof development in this
-  advanced theorem proving environment, even though it was somewhat
-  biased towards old-style proof scripts.  The more recent
-  Isabelle/jEdit Prover IDE \cite{Wenzel:2012} emphasizes the
-  document-oriented approach of Isabelle/Isar again more explicitly.
-
-  \medskip Apart from the technical advances over bare-bones ML
-  programming, the main purpose of the Isar language is to provide a
-  conceptually different view on machine-checked proofs
-  \cite{Wenzel:1999:TPHOL,Wenzel-PhD}.  \emph{Isar} stands for
-  \emph{Intelligible semi-automated reasoning}.  Drawing from both the
-  traditions of informal mathematical proof texts and high-level
-  programming languages, Isar offers a versatile environment for
-  structured formal proof documents.  Thus properly written Isar
-  proofs become accessible to a broader audience than unstructured
-  tactic scripts (which typically only provide operational information
-  for the machine).  Writing human-readable proof texts certainly
-  requires some additional efforts by the writer to achieve a good
-  presentation, both of formal and informal parts of the text.  On the
-  other hand, human-readable formal texts gain some value in their own
-  right, independently of the mechanic proof-checking process.
-
-  Despite its grand design of structured proof texts, Isar is able to
-  assimilate the old tactical style as an ``improper'' sub-language.
-  This provides an easy upgrade path for existing tactic scripts, as
-  well as some means for interactive experimentation and debugging of
-  structured proofs.  Isabelle/Isar supports a broad range of proof
-  styles, both readable and unreadable ones.
-
-  \medskip The generic Isabelle/Isar framework (see
-  \chref{ch:isar-framework}) works reasonably well for any Isabelle
-  object-logic that conforms to the natural deduction view of the
-  Isabelle/Pure framework.  Specific language elements introduced by
-  Isabelle/HOL are described in \partref{part:hol}.  Although the main
-  language elements are already provided by the Isabelle/Pure
-  framework, examples given in the generic parts will usually refer to
-  Isabelle/HOL.
-
-  \medskip Isar commands may be either \emph{proper} document
-  constructors, or \emph{improper commands}.  Some proof methods and
-  attributes introduced later are classified as improper as well.
-  Improper Isar language elements, which are marked by ``@{text
-  "\<^sup>*"}'' in the subsequent chapters; they are often helpful
-  when developing proof documents, but their use is discouraged for
-  the final human-readable outcome.  Typical examples are diagnostic
-  commands that print terms or theorems according to the current
-  context; other commands emulate old-style tactical theorem proving.
-*}
-
-end
--- a/src/Doc/IsarRef/Proof.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1551 +0,0 @@
-theory Proof
-imports Base Main
-begin
-
-chapter {* Proofs \label{ch:proofs} *}
-
-text {*
-  Proof commands perform transitions of Isar/VM machine
-  configurations, which are block-structured, consisting of a stack of
-  nodes with three main components: logical proof context, current
-  facts, and open goals.  Isar/VM transitions are typed according to
-  the following three different modes of operation:
-
-  \begin{description}
-
-  \item @{text "proof(prove)"} means that a new goal has just been
-  stated that is now to be \emph{proven}; the next command may refine
-  it by some proof method, and enter a sub-proof to establish the
-  actual result.
-
-  \item @{text "proof(state)"} is like a nested theory mode: the
-  context may be augmented by \emph{stating} additional assumptions,
-  intermediate results etc.
-
-  \item @{text "proof(chain)"} is intermediate between @{text
-  "proof(state)"} and @{text "proof(prove)"}: existing facts (i.e.\
-  the contents of the special ``@{fact_ref this}'' register) have been
-  just picked up in order to be used when refining the goal claimed
-  next.
-
-  \end{description}
-
-  The proof mode indicator may be understood as an instruction to the
-  writer, telling what kind of operation may be performed next.  The
-  corresponding typings of proof commands restricts the shape of
-  well-formed proof texts to particular command sequences.  So dynamic
-  arrangements of commands eventually turn out as static texts of a
-  certain structure.
-
-  \Appref{ap:refcard} gives a simplified grammar of the (extensible)
-  language emerging that way from the different types of proof
-  commands.  The main ideas of the overall Isar framework are
-  explained in \chref{ch:isar-framework}.
-*}
-
-
-section {* Proof structure *}
-
-subsection {* Formal notepad *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "notepad"} & : & @{text "local_theory \<rightarrow> proof(state)"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command notepad} @'begin'
-    ;
-    @@{command end}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "notepad"}~@{keyword "begin"} opens a proof state
-  without any goal statement.  This allows to experiment with Isar,
-  without producing any persistent result.
-
-  The notepad can be closed by @{command "end"} or discontinued by
-  @{command "oops"}.
-
-  \end{description}
-*}
-
-
-subsection {* Blocks *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "next"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-    @{command_def "{"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-    @{command_def "}"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-  \end{matharray}
-
-  While Isar is inherently block-structured, opening and closing
-  blocks is mostly handled rather casually, with little explicit
-  user-intervention.  Any local goal statement automatically opens
-  \emph{two} internal blocks, which are closed again when concluding
-  the sub-proof (by @{command "qed"} etc.).  Sections of different
-  context within a sub-proof may be switched via @{command "next"},
-  which is just a single block-close followed by block-open again.
-  The effect of @{command "next"} is to reset the local proof context;
-  there is no goal focus involved here!
-
-  For slightly more advanced applications, there are explicit block
-  parentheses as well.  These typically achieve a stronger forward
-  style of reasoning.
-
-  \begin{description}
-
-  \item @{command "next"} switches to a fresh block within a
-  sub-proof, resetting the local context to the initial one.
-
-  \item @{command "{"} and @{command "}"} explicitly open and close
-  blocks.  Any current facts pass through ``@{command "{"}''
-  unchanged, while ``@{command "}"}'' causes any result to be
-  \emph{exported} into the enclosing context.  Thus fixed variables
-  are generalized, assumptions discharged, and local definitions
-  unfolded (cf.\ \secref{sec:proof-context}).  There is no difference
-  of @{command "assume"} and @{command "presume"} in this mode of
-  forward reasoning --- in contrast to plain backward reasoning with
-  the result exported at @{command "show"} time.
-
-  \end{description}
-*}
-
-
-subsection {* Omitting proofs *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "oops"} & : & @{text "proof \<rightarrow> local_theory | theory"} \\
-  \end{matharray}
-
-  The @{command "oops"} command discontinues the current proof
-  attempt, while considering the partial proof text as properly
-  processed.  This is conceptually quite different from ``faking''
-  actual proofs via @{command_ref "sorry"} (see
-  \secref{sec:proof-steps}): @{command "oops"} does not observe the
-  proof structure at all, but goes back right to the theory level.
-  Furthermore, @{command "oops"} does not produce any result theorem
-  --- there is no intended claim to be able to complete the proof
-  in any way.
-
-  A typical application of @{command "oops"} is to explain Isar proofs
-  \emph{within} the system itself, in conjunction with the document
-  preparation tools of Isabelle described in \chref{ch:document-prep}.
-  Thus partial or even wrong proof attempts can be discussed in a
-  logically sound manner.  Note that the Isabelle {\LaTeX} macros can
-  be easily adapted to print something like ``@{text "\<dots>"}'' instead of
-  the keyword ``@{command "oops"}''.
-*}
-
-
-section {* Statements *}
-
-subsection {* Context elements \label{sec:proof-context} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "fix"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-    @{command_def "assume"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-    @{command_def "presume"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-    @{command_def "def"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-  \end{matharray}
-
-  The logical proof context consists of fixed variables and
-  assumptions.  The former closely correspond to Skolem constants, or
-  meta-level universal quantification as provided by the Isabelle/Pure
-  logical framework.  Introducing some \emph{arbitrary, but fixed}
-  variable via ``@{command "fix"}~@{text x}'' results in a local value
-  that may be used in the subsequent proof as any other variable or
-  constant.  Furthermore, any result @{text "\<turnstile> \<phi>[x]"} exported from
-  the context will be universally closed wrt.\ @{text x} at the
-  outermost level: @{text "\<turnstile> \<And>x. \<phi>[x]"} (this is expressed in normal
-  form using Isabelle's meta-variables).
-
-  Similarly, introducing some assumption @{text \<chi>} has two effects.
-  On the one hand, a local theorem is created that may be used as a
-  fact in subsequent proof steps.  On the other hand, any result
-  @{text "\<chi> \<turnstile> \<phi>"} exported from the context becomes conditional wrt.\
-  the assumption: @{text "\<turnstile> \<chi> \<Longrightarrow> \<phi>"}.  Thus, solving an enclosing goal
-  using such a result would basically introduce a new subgoal stemming
-  from the assumption.  How this situation is handled depends on the
-  version of assumption command used: while @{command "assume"}
-  insists on solving the subgoal by unification with some premise of
-  the goal, @{command "presume"} leaves the subgoal unchanged in order
-  to be proved later by the user.
-
-  Local definitions, introduced by ``@{command "def"}~@{text "x \<equiv>
-  t"}'', are achieved by combining ``@{command "fix"}~@{text x}'' with
-  another version of assumption that causes any hypothetical equation
-  @{text "x \<equiv> t"} to be eliminated by the reflexivity rule.  Thus,
-  exporting some result @{text "x \<equiv> t \<turnstile> \<phi>[x]"} yields @{text "\<turnstile>
-  \<phi>[t]"}.
-
-  @{rail \<open>
-    @@{command fix} (@{syntax vars} + @'and')
-    ;
-    (@@{command assume} | @@{command presume}) (@{syntax props} + @'and')
-    ;
-    @@{command def} (def + @'and')
-    ;
-    def: @{syntax thmdecl}? \<newline>
-      @{syntax name} ('==' | '\<equiv>') @{syntax term} @{syntax term_pat}?
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command "fix"}~@{text x} introduces a local variable @{text
-  x} that is \emph{arbitrary, but fixed.}
-  
-  \item @{command "assume"}~@{text "a: \<phi>"} and @{command
-  "presume"}~@{text "a: \<phi>"} introduce a local fact @{text "\<phi> \<turnstile> \<phi>"} by
-  assumption.  Subsequent results applied to an enclosing goal (e.g.\
-  by @{command_ref "show"}) are handled as follows: @{command
-  "assume"} expects to be able to unify with existing premises in the
-  goal, while @{command "presume"} leaves @{text \<phi>} as new subgoals.
-  
-  Several lists of assumptions may be given (separated by
-  @{keyword_ref "and"}; the resulting list of current facts consists
-  of all of these concatenated.
-  
-  \item @{command "def"}~@{text "x \<equiv> t"} introduces a local
-  (non-polymorphic) definition.  In results exported from the context,
-  @{text x} is replaced by @{text t}.  Basically, ``@{command
-  "def"}~@{text "x \<equiv> t"}'' abbreviates ``@{command "fix"}~@{text
-  x}~@{command "assume"}~@{text "x \<equiv> t"}'', with the resulting
-  hypothetical equation solved by reflexivity.
-  
-  The default name for the definitional equation is @{text x_def}.
-  Several simultaneous definitions may be given at the same time.
-
-  \end{description}
-
-  The special name @{fact_ref prems} refers to all assumptions of the
-  current context as a list of theorems.  This feature should be used
-  with great care!  It is better avoided in final proof texts.
-*}
-
-
-subsection {* Term abbreviations \label{sec:term-abbrev} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "let"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-    @{keyword_def "is"} & : & syntax \\
-  \end{matharray}
-
-  Abbreviations may be either bound by explicit @{command
-  "let"}~@{text "p \<equiv> t"} statements, or by annotating assumptions or
-  goal statements with a list of patterns ``@{text "(\<IS> p\<^sub>1 \<dots>
-  p\<^sub>n)"}''.  In both cases, higher-order matching is invoked to
-  bind extra-logical term variables, which may be either named
-  schematic variables of the form @{text ?x}, or nameless dummies
-  ``@{variable _}'' (underscore). Note that in the @{command "let"}
-  form the patterns occur on the left-hand side, while the @{keyword
-  "is"} patterns are in postfix position.
-
-  Polymorphism of term bindings is handled in Hindley-Milner style,
-  similar to ML.  Type variables referring to local assumptions or
-  open goal statements are \emph{fixed}, while those of finished
-  results or bound by @{command "let"} may occur in \emph{arbitrary}
-  instances later.  Even though actual polymorphism should be rarely
-  used in practice, this mechanism is essential to achieve proper
-  incremental type-inference, as the user proceeds to build up the
-  Isar proof text from left to right.
-
-  \medskip Term abbreviations are quite different from local
-  definitions as introduced via @{command "def"} (see
-  \secref{sec:proof-context}).  The latter are visible within the
-  logic as actual equations, while abbreviations disappear during the
-  input process just after type checking.  Also note that @{command
-  "def"} does not support polymorphism.
-
-  @{rail \<open>
-    @@{command let} ((@{syntax term} + @'and') '=' @{syntax term} + @'and')
-  \<close>}
-
-  The syntax of @{keyword "is"} patterns follows @{syntax term_pat} or
-  @{syntax prop_pat} (see \secref{sec:term-decls}).
-
-  \begin{description}
-
-  \item @{command "let"}~@{text "p\<^sub>1 = t\<^sub>1 \<AND> \<dots> p\<^sub>n = t\<^sub>n"} binds any
-  text variables in patterns @{text "p\<^sub>1, \<dots>, p\<^sub>n"} by simultaneous
-  higher-order matching against terms @{text "t\<^sub>1, \<dots>, t\<^sub>n"}.
-
-  \item @{text "(\<IS> p\<^sub>1 \<dots> p\<^sub>n)"} resembles @{command "let"}, but
-  matches @{text "p\<^sub>1, \<dots>, p\<^sub>n"} against the preceding statement.  Also
-  note that @{keyword "is"} is not a separate command, but part of
-  others (such as @{command "assume"}, @{command "have"} etc.).
-
-  \end{description}
-
-  Some \emph{implicit} term abbreviations\index{term abbreviations}
-  for goals and facts are available as well.  For any open goal,
-  @{variable_ref thesis} refers to its object-level statement,
-  abstracted over any meta-level parameters (if present).  Likewise,
-  @{variable_ref this} is bound for fact statements resulting from
-  assumptions or finished goals.  In case @{variable this} refers to
-  an object-logic statement that is an application @{text "f t"}, then
-  @{text t} is bound to the special text variable ``@{variable "\<dots>"}''
-  (three dots).  The canonical application of this convenience are
-  calculational proofs (see \secref{sec:calculation}).
-*}
-
-
-subsection {* Facts and forward chaining \label{sec:proof-facts} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "note"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-    @{command_def "then"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
-    @{command_def "from"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
-    @{command_def "with"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
-    @{command_def "using"} & : & @{text "proof(prove) \<rightarrow> proof(prove)"} \\
-    @{command_def "unfolding"} & : & @{text "proof(prove) \<rightarrow> proof(prove)"} \\
-  \end{matharray}
-
-  New facts are established either by assumption or proof of local
-  statements.  Any fact will usually be involved in further proofs,
-  either as explicit arguments of proof methods, or when forward
-  chaining towards the next goal via @{command "then"} (and variants);
-  @{command "from"} and @{command "with"} are composite forms
-  involving @{command "note"}.  The @{command "using"} elements
-  augments the collection of used facts \emph{after} a goal has been
-  stated.  Note that the special theorem name @{fact_ref this} refers
-  to the most recently established facts, but only \emph{before}
-  issuing a follow-up claim.
-
-  @{rail \<open>
-    @@{command note} (@{syntax thmdef}? @{syntax thmrefs} + @'and')
-    ;
-    (@@{command from} | @@{command with} | @@{command using} | @@{command unfolding})
-      (@{syntax thmrefs} + @'and')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "note"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"} recalls existing facts
-  @{text "b\<^sub>1, \<dots>, b\<^sub>n"}, binding the result as @{text a}.  Note that
-  attributes may be involved as well, both on the left and right hand
-  sides.
-
-  \item @{command "then"} indicates forward chaining by the current
-  facts in order to establish the goal to be claimed next.  The
-  initial proof method invoked to refine that will be offered the
-  facts to do ``anything appropriate'' (see also
-  \secref{sec:proof-steps}).  For example, method @{method (Pure) rule}
-  (see \secref{sec:pure-meth-att}) would typically do an elimination
-  rather than an introduction.  Automatic methods usually insert the
-  facts into the goal state before operation.  This provides a simple
-  scheme to control relevance of facts in automated proof search.
-  
-  \item @{command "from"}~@{text b} abbreviates ``@{command
-  "note"}~@{text b}~@{command "then"}''; thus @{command "then"} is
-  equivalent to ``@{command "from"}~@{text this}''.
-  
-  \item @{command "with"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} abbreviates ``@{command
-  "from"}~@{text "b\<^sub>1 \<dots> b\<^sub>n \<AND> this"}''; thus the forward chaining
-  is from earlier facts together with the current ones.
-  
-  \item @{command "using"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} augments the facts being
-  currently indicated for use by a subsequent refinement step (such as
-  @{command_ref "apply"} or @{command_ref "proof"}).
-  
-  \item @{command "unfolding"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} is structurally
-  similar to @{command "using"}, but unfolds definitional equations
-  @{text "b\<^sub>1, \<dots> b\<^sub>n"} throughout the goal state and facts.
-
-  \end{description}
-
-  Forward chaining with an empty list of theorems is the same as not
-  chaining at all.  Thus ``@{command "from"}~@{text nothing}'' has no
-  effect apart from entering @{text "prove(chain)"} mode, since
-  @{fact_ref nothing} is bound to the empty list of theorems.
-
-  Basic proof methods (such as @{method_ref (Pure) rule}) expect multiple
-  facts to be given in their proper order, corresponding to a prefix
-  of the premises of the rule involved.  Note that positions may be
-  easily skipped using something like @{command "from"}~@{text "_
-  \<AND> a \<AND> b"}, for example.  This involves the trivial rule
-  @{text "PROP \<psi> \<Longrightarrow> PROP \<psi>"}, which is bound in Isabelle/Pure as
-  ``@{fact_ref "_"}'' (underscore).
-
-  Automated methods (such as @{method simp} or @{method auto}) just
-  insert any given facts before their usual operation.  Depending on
-  the kind of procedure involved, the order of facts is less
-  significant here.
-*}
-
-
-subsection {* Goals \label{sec:goals} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "lemma"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
-    @{command_def "theorem"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
-    @{command_def "corollary"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
-    @{command_def "schematic_lemma"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
-    @{command_def "schematic_theorem"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
-    @{command_def "schematic_corollary"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
-    @{command_def "have"} & : & @{text "proof(state) | proof(chain) \<rightarrow> proof(prove)"} \\
-    @{command_def "show"} & : & @{text "proof(state) | proof(chain) \<rightarrow> proof(prove)"} \\
-    @{command_def "hence"} & : & @{text "proof(state) \<rightarrow> proof(prove)"} \\
-    @{command_def "thus"} & : & @{text "proof(state) \<rightarrow> proof(prove)"} \\
-    @{command_def "print_statement"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-  \end{matharray}
-
-  From a theory context, proof mode is entered by an initial goal
-  command such as @{command "lemma"}, @{command "theorem"}, or
-  @{command "corollary"}.  Within a proof, new claims may be
-  introduced locally as well; four variants are available here to
-  indicate whether forward chaining of facts should be performed
-  initially (via @{command_ref "then"}), and whether the final result
-  is meant to solve some pending goal.
-
-  Goals may consist of multiple statements, resulting in a list of
-  facts eventually.  A pending multi-goal is internally represented as
-  a meta-level conjunction (@{text "&&&"}), which is usually
-  split into the corresponding number of sub-goals prior to an initial
-  method application, via @{command_ref "proof"}
-  (\secref{sec:proof-steps}) or @{command_ref "apply"}
-  (\secref{sec:tactic-commands}).  The @{method_ref induct} method
-  covered in \secref{sec:cases-induct} acts on multiple claims
-  simultaneously.
-
-  Claims at the theory level may be either in short or long form.  A
-  short goal merely consists of several simultaneous propositions
-  (often just one).  A long goal includes an explicit context
-  specification for the subsequent conclusion, involving local
-  parameters and assumptions.  Here the role of each part of the
-  statement is explicitly marked by separate keywords (see also
-  \secref{sec:locale}); the local assumptions being introduced here
-  are available as @{fact_ref assms} in the proof.  Moreover, there
-  are two kinds of conclusions: @{element_def "shows"} states several
-  simultaneous propositions (essentially a big conjunction), while
-  @{element_def "obtains"} claims several simultaneous simultaneous
-  contexts of (essentially a big disjunction of eliminated parameters
-  and assumptions, cf.\ \secref{sec:obtain}).
-
-  @{rail \<open>
-    (@@{command lemma} | @@{command theorem} | @@{command corollary} |
-     @@{command schematic_lemma} | @@{command schematic_theorem} |
-     @@{command schematic_corollary}) @{syntax target}? (goal | longgoal)
-    ;
-    (@@{command have} | @@{command show} | @@{command hence} | @@{command thus}) goal
-    ;
-    @@{command print_statement} @{syntax modes}? @{syntax thmrefs}
-    ;
-  
-    goal: (@{syntax props} + @'and')
-    ;
-    longgoal: @{syntax thmdecl}? (@{syntax_ref "includes"}?) (@{syntax context_elem} * ) conclusion
-    ;
-    conclusion: @'shows' goal | @'obtains' (@{syntax parname}? case + '|')
-    ;
-    case: (@{syntax vars} + @'and') @'where' (@{syntax props} + @'and')
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command "lemma"}~@{text "a: \<phi>"} enters proof mode with
-  @{text \<phi>} as main goal, eventually resulting in some fact @{text "\<turnstile>
-  \<phi>"} to be put back into the target context.  An additional @{syntax
-  context} specification may build up an initial proof context for the
-  subsequent claim; this includes local definitions and syntax as
-  well, see also @{syntax "includes"} in \secref{sec:bundle} and
-  @{syntax context_elem} in \secref{sec:locale}.
-  
-  \item @{command "theorem"}~@{text "a: \<phi>"} and @{command
-  "corollary"}~@{text "a: \<phi>"} are essentially the same as @{command
-  "lemma"}~@{text "a: \<phi>"}, but the facts are internally marked as
-  being of a different kind.  This discrimination acts like a formal
-  comment.
-
-  \item @{command "schematic_lemma"}, @{command "schematic_theorem"},
-  @{command "schematic_corollary"} are similar to @{command "lemma"},
-  @{command "theorem"}, @{command "corollary"}, respectively but allow
-  the statement to contain unbound schematic variables.
-
-  Under normal circumstances, an Isar proof text needs to specify
-  claims explicitly.  Schematic goals are more like goals in Prolog,
-  where certain results are synthesized in the course of reasoning.
-  With schematic statements, the inherent compositionality of Isar
-  proofs is lost, which also impacts performance, because proof
-  checking is forced into sequential mode.
-  
-  \item @{command "have"}~@{text "a: \<phi>"} claims a local goal,
-  eventually resulting in a fact within the current logical context.
-  This operation is completely independent of any pending sub-goals of
-  an enclosing goal statements, so @{command "have"} may be freely
-  used for experimental exploration of potential results within a
-  proof body.
-  
-  \item @{command "show"}~@{text "a: \<phi>"} is like @{command
-  "have"}~@{text "a: \<phi>"} plus a second stage to refine some pending
-  sub-goal for each one of the finished result, after having been
-  exported into the corresponding context (at the head of the
-  sub-proof of this @{command "show"} command).
-  
-  To accommodate interactive debugging, resulting rules are printed
-  before being applied internally.  Even more, interactive execution
-  of @{command "show"} predicts potential failure and displays the
-  resulting error as a warning beforehand.  Watch out for the
-  following message:
-
-  %FIXME proper antiquitation
-  \begin{ttbox}
-  Problem! Local statement will fail to solve any pending goal
-  \end{ttbox}
-  
-  \item @{command "hence"} abbreviates ``@{command "then"}~@{command
-  "have"}'', i.e.\ claims a local goal to be proven by forward
-  chaining the current facts.  Note that @{command "hence"} is also
-  equivalent to ``@{command "from"}~@{text this}~@{command "have"}''.
-  
-  \item @{command "thus"} abbreviates ``@{command "then"}~@{command
-  "show"}''.  Note that @{command "thus"} is also equivalent to
-  ``@{command "from"}~@{text this}~@{command "show"}''.
-  
-  \item @{command "print_statement"}~@{text a} prints facts from the
-  current theory or proof context in long statement form, according to
-  the syntax for @{command "lemma"} given above.
-
-  \end{description}
-
-  Any goal statement causes some term abbreviations (such as
-  @{variable_ref "?thesis"}) to be bound automatically, see also
-  \secref{sec:term-abbrev}.
-
-  The optional case names of @{element_ref "obtains"} have a twofold
-  meaning: (1) during the of this claim they refer to the the local
-  context introductions, (2) the resulting rule is annotated
-  accordingly to support symbolic case splits when used with the
-  @{method_ref cases} method (cf.\ \secref{sec:cases-induct}).
-*}
-
-
-section {* Refinement steps *}
-
-subsection {* Proof method expressions \label{sec:proof-meth} *}
-
-text {* Proof methods are either basic ones, or expressions composed
-  of methods via ``@{verbatim ","}'' (sequential composition),
-  ``@{verbatim "|"}'' (alternative choices), ``@{verbatim "?"}''
-  (try), ``@{verbatim "+"}'' (repeat at least once), ``@{verbatim
-  "["}@{text n}@{verbatim "]"}'' (restriction to first @{text n}
-  sub-goals, with default @{text "n = 1"}).  In practice, proof
-  methods are usually just a comma separated list of @{syntax
-  nameref}~@{syntax args} specifications.  Note that parentheses may
-  be dropped for single method specifications (with no arguments).
-
-  @{rail \<open>
-    @{syntax_def method}:
-      (@{syntax nameref} | '(' methods ')') (() | '?' | '+' | '[' @{syntax nat}? ']')
-    ;
-    methods: (@{syntax nameref} @{syntax args} | @{syntax method}) + (',' | '|')
-  \<close>}
-
-  Proper Isar proof methods do \emph{not} admit arbitrary goal
-  addressing, but refer either to the first sub-goal or all sub-goals
-  uniformly.  The goal restriction operator ``@{text "[n]"}''
-  evaluates a method expression within a sandbox consisting of the
-  first @{text n} sub-goals (which need to exist).  For example, the
-  method ``@{text "simp_all[3]"}'' simplifies the first three
-  sub-goals, while ``@{text "(rule foo, simp_all)[]"}'' simplifies all
-  new goals that emerge from applying rule @{text "foo"} to the
-  originally first one.
-
-  Improper methods, notably tactic emulations, offer a separate
-  low-level goal addressing scheme as explicit argument to the
-  individual tactic being involved.  Here ``@{text "[!]"}'' refers to
-  all goals, and ``@{text "[n-]"}'' to all goals starting from @{text
-  "n"}.
-
-  @{rail \<open>
-    @{syntax_def goal_spec}:
-      '[' (@{syntax nat} '-' @{syntax nat} | @{syntax nat} '-' | @{syntax nat} | '!' ) ']'
-  \<close>}
-*}
-
-
-subsection {* Initial and terminal proof steps \label{sec:proof-steps} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "proof"} & : & @{text "proof(prove) \<rightarrow> proof(state)"} \\
-    @{command_def "qed"} & : & @{text "proof(state) \<rightarrow> proof(state) | local_theory | theory"} \\
-    @{command_def "by"} & : & @{text "proof(prove) \<rightarrow> proof(state) | local_theory | theory"} \\
-    @{command_def ".."} & : & @{text "proof(prove) \<rightarrow> proof(state) | local_theory | theory"} \\
-    @{command_def "."} & : & @{text "proof(prove) \<rightarrow> proof(state) | local_theory | theory"} \\
-    @{command_def "sorry"} & : & @{text "proof(prove) \<rightarrow> proof(state) | local_theory | theory"} \\
-  \end{matharray}
-
-  Arbitrary goal refinement via tactics is considered harmful.
-  Structured proof composition in Isar admits proof methods to be
-  invoked in two places only.
-
-  \begin{enumerate}
-
-  \item An \emph{initial} refinement step @{command_ref
-  "proof"}~@{text "m\<^sub>1"} reduces a newly stated goal to a number
-  of sub-goals that are to be solved later.  Facts are passed to
-  @{text "m\<^sub>1"} for forward chaining, if so indicated by @{text
-  "proof(chain)"} mode.
-  
-  \item A \emph{terminal} conclusion step @{command_ref "qed"}~@{text
-  "m\<^sub>2"} is intended to solve remaining goals.  No facts are
-  passed to @{text "m\<^sub>2"}.
-
-  \end{enumerate}
-
-  The only other (proper) way to affect pending goals in a proof body
-  is by @{command_ref "show"}, which involves an explicit statement of
-  what is to be solved eventually.  Thus we avoid the fundamental
-  problem of unstructured tactic scripts that consist of numerous
-  consecutive goal transformations, with invisible effects.
-
-  \medskip As a general rule of thumb for good proof style, initial
-  proof methods should either solve the goal completely, or constitute
-  some well-understood reduction to new sub-goals.  Arbitrary
-  automatic proof tools that are prone leave a large number of badly
-  structured sub-goals are no help in continuing the proof document in
-  an intelligible manner.
-
-  Unless given explicitly by the user, the default initial method is
-  @{method_ref (Pure) rule} (or its classical variant @{method_ref
-  rule}), which applies a single standard elimination or introduction
-  rule according to the topmost symbol involved.  There is no separate
-  default terminal method.  Any remaining goals are always solved by
-  assumption in the very last step.
-
-  @{rail \<open>
-    @@{command proof} method?
-    ;
-    @@{command qed} method?
-    ;
-    @@{command "by"} method method?
-    ;
-    (@@{command "."} | @@{command ".."} | @@{command sorry})
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command "proof"}~@{text "m\<^sub>1"} refines the goal by proof
-  method @{text "m\<^sub>1"}; facts for forward chaining are passed if so
-  indicated by @{text "proof(chain)"} mode.
-  
-  \item @{command "qed"}~@{text "m\<^sub>2"} refines any remaining goals by
-  proof method @{text "m\<^sub>2"} and concludes the sub-proof by assumption.
-  If the goal had been @{text "show"} (or @{text "thus"}), some
-  pending sub-goal is solved as well by the rule resulting from the
-  result \emph{exported} into the enclosing goal context.  Thus @{text
-  "qed"} may fail for two reasons: either @{text "m\<^sub>2"} fails, or the
-  resulting rule does not fit to any pending goal\footnote{This
-  includes any additional ``strong'' assumptions as introduced by
-  @{command "assume"}.} of the enclosing context.  Debugging such a
-  situation might involve temporarily changing @{command "show"} into
-  @{command "have"}, or weakening the local context by replacing
-  occurrences of @{command "assume"} by @{command "presume"}.
-  
-  \item @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} is a \emph{terminal
-  proof}\index{proof!terminal}; it abbreviates @{command
-  "proof"}~@{text "m\<^sub>1"}~@{command "qed"}~@{text "m\<^sub>2"}, but with
-  backtracking across both methods.  Debugging an unsuccessful
-  @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} command can be done by expanding its
-  definition; in many cases @{command "proof"}~@{text "m\<^sub>1"} (or even
-  @{text "apply"}~@{text "m\<^sub>1"}) is already sufficient to see the
-  problem.
-
-  \item ``@{command ".."}'' is a \emph{default
-  proof}\index{proof!default}; it abbreviates @{command "by"}~@{text
-  "rule"}.
-
-  \item ``@{command "."}'' is a \emph{trivial
-  proof}\index{proof!trivial}; it abbreviates @{command "by"}~@{text
-  "this"}.
-  
-  \item @{command "sorry"} is a \emph{fake proof}\index{proof!fake}
-  pretending to solve the pending claim without further ado.  This
-  only works in interactive development, or if the @{attribute
-  quick_and_dirty} is enabled.  Facts emerging from fake
-  proofs are not the real thing.  Internally, the derivation object is
-  tainted by an oracle invocation, which may be inspected via the
-  theorem status \cite{isabelle-implementation}.
-  
-  The most important application of @{command "sorry"} is to support
-  experimentation and top-down proof development.
-
-  \end{description}
-*}
-
-
-subsection {* Fundamental methods and attributes \label{sec:pure-meth-att} *}
-
-text {*
-  The following proof methods and attributes refer to basic logical
-  operations of Isar.  Further methods and attributes are provided by
-  several generic and object-logic specific tools and packages (see
-  \chref{ch:gen-tools} and \partref{part:hol}).
-
-  \begin{matharray}{rcl}
-    @{command_def "print_rules"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\[0.5ex]
-    @{method_def "-"} & : & @{text method} \\
-    @{method_def "fact"} & : & @{text method} \\
-    @{method_def "assumption"} & : & @{text method} \\
-    @{method_def "this"} & : & @{text method} \\
-    @{method_def (Pure) "rule"} & : & @{text method} \\
-    @{attribute_def (Pure) "intro"} & : & @{text attribute} \\
-    @{attribute_def (Pure) "elim"} & : & @{text attribute} \\
-    @{attribute_def (Pure) "dest"} & : & @{text attribute} \\
-    @{attribute_def (Pure) "rule"} & : & @{text attribute} \\[0.5ex]
-    @{attribute_def "OF"} & : & @{text attribute} \\
-    @{attribute_def "of"} & : & @{text attribute} \\
-    @{attribute_def "where"} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method fact} @{syntax thmrefs}?
-    ;
-    @@{method (Pure) rule} @{syntax thmrefs}?
-    ;
-    rulemod: ('intro' | 'elim' | 'dest')
-      ((('!' | () | '?') @{syntax nat}?) | 'del') ':' @{syntax thmrefs}
-    ;
-    (@@{attribute intro} | @@{attribute elim} | @@{attribute dest})
-      ('!' | () | '?') @{syntax nat}?
-    ;
-    @@{attribute (Pure) rule} 'del'
-    ;
-    @@{attribute OF} @{syntax thmrefs}
-    ;
-    @@{attribute of} @{syntax insts} ('concl' ':' @{syntax insts})? \<newline>
-      (@'for' (@{syntax vars} + @'and'))?
-    ;
-    @@{attribute "where"}
-      ((@{syntax name} | @{syntax var} | @{syntax typefree} | @{syntax typevar}) '='
-      (@{syntax type} | @{syntax term}) * @'and') \<newline>
-      (@'for' (@{syntax vars} + @'and'))?
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command "print_rules"} prints rules declared via attributes
-  @{attribute (Pure) intro}, @{attribute (Pure) elim}, @{attribute
-  (Pure) dest} of Isabelle/Pure.
-
-  See also the analogous @{command "print_claset"} command for similar
-  rule declarations of the classical reasoner
-  (\secref{sec:classical}).
-
-  \item ``@{method "-"}'' (minus) does nothing but insert the forward
-  chaining facts as premises into the goal.  Note that command
-  @{command_ref "proof"} without any method actually performs a single
-  reduction step using the @{method_ref (Pure) rule} method; thus a plain
-  \emph{do-nothing} proof step would be ``@{command "proof"}~@{text
-  "-"}'' rather than @{command "proof"} alone.
-  
-  \item @{method "fact"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} composes some fact from
-  @{text "a\<^sub>1, \<dots>, a\<^sub>n"} (or implicitly from the current proof context)
-  modulo unification of schematic type and term variables.  The rule
-  structure is not taken into account, i.e.\ meta-level implication is
-  considered atomic.  This is the same principle underlying literal
-  facts (cf.\ \secref{sec:syn-att}): ``@{command "have"}~@{text
-  "\<phi>"}~@{command "by"}~@{text fact}'' is equivalent to ``@{command
-  "note"}~@{verbatim "`"}@{text \<phi>}@{verbatim "`"}'' provided that
-  @{text "\<turnstile> \<phi>"} is an instance of some known @{text "\<turnstile> \<phi>"} in the
-  proof context.
-  
-  \item @{method assumption} solves some goal by a single assumption
-  step.  All given facts are guaranteed to participate in the
-  refinement; this means there may be only 0 or 1 in the first place.
-  Recall that @{command "qed"} (\secref{sec:proof-steps}) already
-  concludes any remaining sub-goals by assumption, so structured
-  proofs usually need not quote the @{method assumption} method at
-  all.
-  
-  \item @{method this} applies all of the current facts directly as
-  rules.  Recall that ``@{command "."}'' (dot) abbreviates ``@{command
-  "by"}~@{text this}''.
-  
-  \item @{method (Pure) rule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} applies some rule given as
-  argument in backward manner; facts are used to reduce the rule
-  before applying it to the goal.  Thus @{method (Pure) rule} without facts
-  is plain introduction, while with facts it becomes elimination.
-  
-  When no arguments are given, the @{method (Pure) rule} method tries to pick
-  appropriate rules automatically, as declared in the current context
-  using the @{attribute (Pure) intro}, @{attribute (Pure) elim},
-  @{attribute (Pure) dest} attributes (see below).  This is the
-  default behavior of @{command "proof"} and ``@{command ".."}'' 
-  (double-dot) steps (see \secref{sec:proof-steps}).
-  
-  \item @{attribute (Pure) intro}, @{attribute (Pure) elim}, and
-  @{attribute (Pure) dest} declare introduction, elimination, and
-  destruct rules, to be used with method @{method (Pure) rule}, and similar
-  tools.  Note that the latter will ignore rules declared with
-  ``@{text "?"}'', while ``@{text "!"}''  are used most aggressively.
-  
-  The classical reasoner (see \secref{sec:classical}) introduces its
-  own variants of these attributes; use qualified names to access the
-  present versions of Isabelle/Pure, i.e.\ @{attribute (Pure)
-  "Pure.intro"}.
-  
-  \item @{attribute (Pure) rule}~@{text del} undeclares introduction,
-  elimination, or destruct rules.
-
-  \item @{attribute OF}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} applies some theorem to all
-  of the given rules @{text "a\<^sub>1, \<dots>, a\<^sub>n"} in canonical right-to-left
-  order, which means that premises stemming from the @{text "a\<^sub>i"}
-  emerge in parallel in the result, without interfering with each
-  other.  In many practical situations, the @{text "a\<^sub>i"} do not have
-  premises themselves, so @{text "rule [OF a\<^sub>1 \<dots> a\<^sub>n]"} can be actually
-  read as functional application (modulo unification).
-
-  Argument positions may be effectively skipped by using ``@{text _}''
-  (underscore), which refers to the propositional identity rule in the
-  Pure theory.
-  
-  \item @{attribute of}~@{text "t\<^sub>1 \<dots> t\<^sub>n"} performs positional
-  instantiation of term variables.  The terms @{text "t\<^sub>1, \<dots>, t\<^sub>n"} are
-  substituted for any schematic variables occurring in a theorem from
-  left to right; ``@{text _}'' (underscore) indicates to skip a
-  position.  Arguments following a ``@{text "concl:"}'' specification
-  refer to positions of the conclusion of a rule.
-
-  An optional context of local variables @{text "\<FOR> x\<^sub>1 \<dots> x\<^sub>m"} may
-  be specified: the instantiated theorem is exported, and these
-  variables become schematic (usually with some shifting of indices).
-  
-  \item @{attribute "where"}~@{text "x\<^sub>1 = t\<^sub>1 \<AND> \<dots> x\<^sub>n = t\<^sub>n"}
-  performs named instantiation of schematic type and term variables
-  occurring in a theorem.  Schematic variables have to be specified on
-  the left-hand side (e.g.\ @{text "?x1.3"}).  The question mark may
-  be omitted if the variable name is a plain identifier without index.
-  As type instantiations are inferred from term instantiations,
-  explicit type instantiations are seldom necessary.
-
-  An optional context of local variables @{text "\<FOR> x\<^sub>1 \<dots> x\<^sub>m"} may
-  be specified as for @{attribute "of"} above.
-
-  \end{description}
-*}
-
-
-subsection {* Emulating tactic scripts \label{sec:tactic-commands} *}
-
-text {*
-  The Isar provides separate commands to accommodate tactic-style
-  proof scripts within the same system.  While being outside the
-  orthodox Isar proof language, these might come in handy for
-  interactive exploration and debugging, or even actual tactical proof
-  within new-style theories (to benefit from document preparation, for
-  example).  See also \secref{sec:tactics} for actual tactics, that
-  have been encapsulated as proof methods.  Proper proof methods may
-  be used in scripts, too.
-
-  \begin{matharray}{rcl}
-    @{command_def "apply"}@{text "\<^sup>*"} & : & @{text "proof(prove) \<rightarrow> proof(prove)"} \\
-    @{command_def "apply_end"}@{text "\<^sup>*"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-    @{command_def "done"}@{text "\<^sup>*"} & : & @{text "proof(prove) \<rightarrow> proof(state) | local_theory | theory"} \\
-    @{command_def "defer"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow> proof"} \\
-    @{command_def "prefer"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow> proof"} \\
-    @{command_def "back"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow> proof"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    ( @@{command apply} | @@{command apply_end} ) @{syntax method}
-    ;
-    @@{command defer} @{syntax nat}?
-    ;
-    @@{command prefer} @{syntax nat}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "apply"}~@{text m} applies proof method @{text m} in
-  initial position, but unlike @{command "proof"} it retains ``@{text
-  "proof(prove)"}'' mode.  Thus consecutive method applications may be
-  given just as in tactic scripts.
-  
-  Facts are passed to @{text m} as indicated by the goal's
-  forward-chain mode, and are \emph{consumed} afterwards.  Thus any
-  further @{command "apply"} command would always work in a purely
-  backward manner.
-  
-  \item @{command "apply_end"}~@{text "m"} applies proof method @{text
-  m} as if in terminal position.  Basically, this simulates a
-  multi-step tactic script for @{command "qed"}, but may be given
-  anywhere within the proof body.
-  
-  No facts are passed to @{text m} here.  Furthermore, the static
-  context is that of the enclosing goal (as for actual @{command
-  "qed"}).  Thus the proof method may not refer to any assumptions
-  introduced in the current body, for example.
-  
-  \item @{command "done"} completes a proof script, provided that the
-  current goal state is solved completely.  Note that actual
-  structured proof commands (e.g.\ ``@{command "."}'' or @{command
-  "sorry"}) may be used to conclude proof scripts as well.
-
-  \item @{command "defer"}~@{text n} and @{command "prefer"}~@{text n}
-  shuffle the list of pending goals: @{command "defer"} puts off
-  sub-goal @{text n} to the end of the list (@{text "n = 1"} by
-  default), while @{command "prefer"} brings sub-goal @{text n} to the
-  front.
-  
-  \item @{command "back"} does back-tracking over the result sequence
-  of the latest proof command.  Any proof command may return multiple
-  results, and this command explores the possibilities step-by-step.
-  It is mainly useful for experimentation and interactive exploration,
-  and should be avoided in finished proofs.
-  
-  \end{description}
-
-  Any proper Isar proof method may be used with tactic script commands
-  such as @{command "apply"}.  A few additional emulations of actual
-  tactics are provided as well; these would be never used in actual
-  structured proofs, of course.
-*}
-
-
-subsection {* Defining proof methods *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "method_setup"} & : & @{text "theory \<rightarrow> theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command method_setup} @{syntax name} '=' @{syntax text} @{syntax text}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "method_setup"}~@{text "name = text description"}
-  defines a proof method in the current theory.  The given @{text
-  "text"} has to be an ML expression of type
-  @{ML_type "(Proof.context -> Proof.method) context_parser"}, cf.\
-  basic parsers defined in structure @{ML_structure Args} and @{ML_structure
-  Attrib}.  There are also combinators like @{ML METHOD} and @{ML
-  SIMPLE_METHOD} to turn certain tactic forms into official proof
-  methods; the primed versions refer to tactics with explicit goal
-  addressing.
-
-  Here are some example method definitions:
-
-  \end{description}
-*}
-
-  method_setup my_method1 = {*
-    Scan.succeed (K (SIMPLE_METHOD' (fn i: int => no_tac)))
-  *}  "my first method (without any arguments)"
-
-  method_setup my_method2 = {*
-    Scan.succeed (fn ctxt: Proof.context =>
-      SIMPLE_METHOD' (fn i: int => no_tac))
-  *}  "my second method (with context)"
-
-  method_setup my_method3 = {*
-    Attrib.thms >> (fn thms: thm list => fn ctxt: Proof.context =>
-      SIMPLE_METHOD' (fn i: int => no_tac))
-  *}  "my third method (with theorem arguments and context)"
-
-
-section {* Generalized elimination \label{sec:obtain} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "obtain"} & : & @{text "proof(state) | proof(chain) \<rightarrow> proof(prove)"} \\
-    @{command_def "guess"}@{text "\<^sup>*"} & : & @{text "proof(state) | proof(chain) \<rightarrow> proof(prove)"} \\
-  \end{matharray}
-
-  Generalized elimination means that additional elements with certain
-  properties may be introduced in the current context, by virtue of a
-  locally proven ``soundness statement''.  Technically speaking, the
-  @{command "obtain"} language element is like a declaration of
-  @{command "fix"} and @{command "assume"} (see also see
-  \secref{sec:proof-context}), together with a soundness proof of its
-  additional claim.  According to the nature of existential reasoning,
-  assumptions get eliminated from any result exported from the context
-  later, provided that the corresponding parameters do \emph{not}
-  occur in the conclusion.
-
-  @{rail \<open>
-    @@{command obtain} @{syntax parname}? (@{syntax vars} + @'and')
-      @'where' (@{syntax props} + @'and')
-    ;
-    @@{command guess} (@{syntax vars} + @'and')
-  \<close>}
-
-  The derived Isar command @{command "obtain"} is defined as follows
-  (where @{text "b\<^sub>1, \<dots>, b\<^sub>k"} shall refer to (optional)
-  facts indicated for forward chaining).
-  \begin{matharray}{l}
-    @{text "\<langle>using b\<^sub>1 \<dots> b\<^sub>k\<rangle>"}~~@{command "obtain"}~@{text "x\<^sub>1 \<dots> x\<^sub>m \<WHERE> a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n  \<langle>proof\<rangle> \<equiv>"} \\[1ex]
-    \quad @{command "have"}~@{text "\<And>thesis. (\<And>x\<^sub>1 \<dots> x\<^sub>m. \<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> thesis) \<Longrightarrow> thesis"} \\
-    \quad @{command "proof"}~@{method succeed} \\
-    \qquad @{command "fix"}~@{text thesis} \\
-    \qquad @{command "assume"}~@{text "that [Pure.intro?]: \<And>x\<^sub>1 \<dots> x\<^sub>m. \<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> thesis"} \\
-    \qquad @{command "then"}~@{command "show"}~@{text thesis} \\
-    \quad\qquad @{command "apply"}~@{text -} \\
-    \quad\qquad @{command "using"}~@{text "b\<^sub>1 \<dots> b\<^sub>k  \<langle>proof\<rangle>"} \\
-    \quad @{command "qed"} \\
-    \quad @{command "fix"}~@{text "x\<^sub>1 \<dots> x\<^sub>m"}~@{command "assume"}@{text "\<^sup>* a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} \\
-  \end{matharray}
-
-  Typically, the soundness proof is relatively straight-forward, often
-  just by canonical automated tools such as ``@{command "by"}~@{text
-  simp}'' or ``@{command "by"}~@{text blast}''.  Accordingly, the
-  ``@{text that}'' reduction above is declared as simplification and
-  introduction rule.
-
-  In a sense, @{command "obtain"} represents at the level of Isar
-  proofs what would be meta-logical existential quantifiers and
-  conjunctions.  This concept has a broad range of useful
-  applications, ranging from plain elimination (or introduction) of
-  object-level existential and conjunctions, to elimination over
-  results of symbolic evaluation of recursive definitions, for
-  example.  Also note that @{command "obtain"} without parameters acts
-  much like @{command "have"}, where the result is treated as a
-  genuine assumption.
-
-  An alternative name to be used instead of ``@{text that}'' above may
-  be given in parentheses.
-
-  \medskip The improper variant @{command "guess"} is similar to
-  @{command "obtain"}, but derives the obtained statement from the
-  course of reasoning!  The proof starts with a fixed goal @{text
-  thesis}.  The subsequent proof may refine this to anything of the
-  form like @{text "\<And>x\<^sub>1 \<dots> x\<^sub>m. \<phi>\<^sub>1 \<Longrightarrow> \<dots>
-  \<phi>\<^sub>n \<Longrightarrow> thesis"}, but must not introduce new subgoals.  The
-  final goal state is then used as reduction rule for the obtain
-  scheme described above.  Obtained parameters @{text "x\<^sub>1, \<dots>,
-  x\<^sub>m"} are marked as internal by default, which prevents the
-  proof context from being polluted by ad-hoc variables.  The variable
-  names and type constraints given as arguments for @{command "guess"}
-  specify a prefix of obtained parameters explicitly in the text.
-
-  It is important to note that the facts introduced by @{command
-  "obtain"} and @{command "guess"} may not be polymorphic: any
-  type-variables occurring here are fixed in the present context!
-*}
-
-
-section {* Calculational reasoning \label{sec:calculation} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "also"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-    @{command_def "finally"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
-    @{command_def "moreover"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-    @{command_def "ultimately"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
-    @{command_def "print_trans_rules"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{attribute trans} & : & @{text attribute} \\
-    @{attribute sym} & : & @{text attribute} \\
-    @{attribute symmetric} & : & @{text attribute} \\
-  \end{matharray}
-
-  Calculational proof is forward reasoning with implicit application
-  of transitivity rules (such those of @{text "="}, @{text "\<le>"},
-  @{text "<"}).  Isabelle/Isar maintains an auxiliary fact register
-  @{fact_ref calculation} for accumulating results obtained by
-  transitivity composed with the current result.  Command @{command
-  "also"} updates @{fact calculation} involving @{fact this}, while
-  @{command "finally"} exhibits the final @{fact calculation} by
-  forward chaining towards the next goal statement.  Both commands
-  require valid current facts, i.e.\ may occur only after commands
-  that produce theorems such as @{command "assume"}, @{command
-  "note"}, or some finished proof of @{command "have"}, @{command
-  "show"} etc.  The @{command "moreover"} and @{command "ultimately"}
-  commands are similar to @{command "also"} and @{command "finally"},
-  but only collect further results in @{fact calculation} without
-  applying any rules yet.
-
-  Also note that the implicit term abbreviation ``@{text "\<dots>"}'' has
-  its canonical application with calculational proofs.  It refers to
-  the argument of the preceding statement. (The argument of a curried
-  infix expression happens to be its right-hand side.)
-
-  Isabelle/Isar calculations are implicitly subject to block structure
-  in the sense that new threads of calculational reasoning are
-  commenced for any new block (as opened by a local goal, for
-  example).  This means that, apart from being able to nest
-  calculations, there is no separate \emph{begin-calculation} command
-  required.
-
-  \medskip The Isar calculation proof commands may be defined as
-  follows:\footnote{We suppress internal bookkeeping such as proper
-  handling of block-structure.}
-
-  \begin{matharray}{rcl}
-    @{command "also"}@{text "\<^sub>0"} & \equiv & @{command "note"}~@{text "calculation = this"} \\
-    @{command "also"}@{text "\<^sub>n+1"} & \equiv & @{command "note"}~@{text "calculation = trans [OF calculation this]"} \\[0.5ex]
-    @{command "finally"} & \equiv & @{command "also"}~@{command "from"}~@{text calculation} \\[0.5ex]
-    @{command "moreover"} & \equiv & @{command "note"}~@{text "calculation = calculation this"} \\
-    @{command "ultimately"} & \equiv & @{command "moreover"}~@{command "from"}~@{text calculation} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{command also} | @@{command finally}) ('(' @{syntax thmrefs} ')')?
-    ;
-    @@{attribute trans} (() | 'add' | 'del')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "also"}~@{text "(a\<^sub>1 \<dots> a\<^sub>n)"} maintains the auxiliary
-  @{fact calculation} register as follows.  The first occurrence of
-  @{command "also"} in some calculational thread initializes @{fact
-  calculation} by @{fact this}. Any subsequent @{command "also"} on
-  the same level of block-structure updates @{fact calculation} by
-  some transitivity rule applied to @{fact calculation} and @{fact
-  this} (in that order).  Transitivity rules are picked from the
-  current context, unless alternative rules are given as explicit
-  arguments.
-
-  \item @{command "finally"}~@{text "(a\<^sub>1 \<dots> a\<^sub>n)"} maintaining @{fact
-  calculation} in the same way as @{command "also"}, and concludes the
-  current calculational thread.  The final result is exhibited as fact
-  for forward chaining towards the next goal. Basically, @{command
-  "finally"} just abbreviates @{command "also"}~@{command
-  "from"}~@{fact calculation}.  Typical idioms for concluding
-  calculational proofs are ``@{command "finally"}~@{command
-  "show"}~@{text ?thesis}~@{command "."}'' and ``@{command
-  "finally"}~@{command "have"}~@{text \<phi>}~@{command "."}''.
-
-  \item @{command "moreover"} and @{command "ultimately"} are
-  analogous to @{command "also"} and @{command "finally"}, but collect
-  results only, without applying rules.
-
-  \item @{command "print_trans_rules"} prints the list of transitivity
-  rules (for calculational commands @{command "also"} and @{command
-  "finally"}) and symmetry rules (for the @{attribute symmetric}
-  operation and single step elimination patters) of the current
-  context.
-
-  \item @{attribute trans} declares theorems as transitivity rules.
-
-  \item @{attribute sym} declares symmetry rules, as well as
-  @{attribute "Pure.elim"}@{text "?"} rules.
-
-  \item @{attribute symmetric} resolves a theorem with some rule
-  declared as @{attribute sym} in the current context.  For example,
-  ``@{command "assume"}~@{text "[symmetric]: x = y"}'' produces a
-  swapped fact derived from that assumption.
-
-  In structured proof texts it is often more appropriate to use an
-  explicit single-step elimination proof, such as ``@{command
-  "assume"}~@{text "x = y"}~@{command "then"}~@{command "have"}~@{text
-  "y = x"}~@{command ".."}''.
-
-  \end{description}
-*}
-
-
-section {* Proof by cases and induction \label{sec:cases-induct} *}
-
-subsection {* Rule contexts *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "case"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-    @{command_def "print_cases"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{attribute_def case_names} & : & @{text attribute} \\
-    @{attribute_def case_conclusion} & : & @{text attribute} \\
-    @{attribute_def params} & : & @{text attribute} \\
-    @{attribute_def consumes} & : & @{text attribute} \\
-  \end{matharray}
-
-  The puristic way to build up Isar proof contexts is by explicit
-  language elements like @{command "fix"}, @{command "assume"},
-  @{command "let"} (see \secref{sec:proof-context}).  This is adequate
-  for plain natural deduction, but easily becomes unwieldy in concrete
-  verification tasks, which typically involve big induction rules with
-  several cases.
-
-  The @{command "case"} command provides a shorthand to refer to a
-  local context symbolically: certain proof methods provide an
-  environment of named ``cases'' of the form @{text "c: x\<^sub>1, \<dots>,
-  x\<^sub>m, \<phi>\<^sub>1, \<dots>, \<phi>\<^sub>n"}; the effect of ``@{command
-  "case"}~@{text c}'' is then equivalent to ``@{command "fix"}~@{text
-  "x\<^sub>1 \<dots> x\<^sub>m"}~@{command "assume"}~@{text "c: \<phi>\<^sub>1 \<dots>
-  \<phi>\<^sub>n"}''.  Term bindings may be covered as well, notably
-  @{variable ?case} for the main conclusion.
-
-  By default, the ``terminology'' @{text "x\<^sub>1, \<dots>, x\<^sub>m"} of
-  a case value is marked as hidden, i.e.\ there is no way to refer to
-  such parameters in the subsequent proof text.  After all, original
-  rule parameters stem from somewhere outside of the current proof
-  text.  By using the explicit form ``@{command "case"}~@{text "(c
-  y\<^sub>1 \<dots> y\<^sub>m)"}'' instead, the proof author is able to
-  chose local names that fit nicely into the current context.
-
-  \medskip It is important to note that proper use of @{command
-  "case"} does not provide means to peek at the current goal state,
-  which is not directly observable in Isar!  Nonetheless, goal
-  refinement commands do provide named cases @{text "goal\<^sub>i"}
-  for each subgoal @{text "i = 1, \<dots>, n"} of the resulting goal state.
-  Using this extra feature requires great care, because some bits of
-  the internal tactical machinery intrude the proof text.  In
-  particular, parameter names stemming from the left-over of automated
-  reasoning tools are usually quite unpredictable.
-
-  Under normal circumstances, the text of cases emerge from standard
-  elimination or induction rules, which in turn are derived from
-  previous theory specifications in a canonical way (say from
-  @{command "inductive"} definitions).
-
-  \medskip Proper cases are only available if both the proof method
-  and the rules involved support this.  By using appropriate
-  attributes, case names, conclusions, and parameters may be also
-  declared by hand.  Thus variant versions of rules that have been
-  derived manually become ready to use in advanced case analysis
-  later.
-
-  @{rail \<open>
-    @@{command case} (caseref | '(' caseref (('_' | @{syntax name}) *) ')')
-    ;
-    caseref: nameref attributes?
-    ;
-
-    @@{attribute case_names} ((@{syntax name} ( '[' (('_' | @{syntax name}) +) ']' ) ? ) +)
-    ;
-    @@{attribute case_conclusion} @{syntax name} (@{syntax name} * )
-    ;
-    @@{attribute params} ((@{syntax name} * ) + @'and')
-    ;
-    @@{attribute consumes} @{syntax int}?
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command "case"}~@{text "(c x\<^sub>1 \<dots> x\<^sub>m)"} invokes a named local
-  context @{text "c: x\<^sub>1, \<dots>, x\<^sub>m, \<phi>\<^sub>1, \<dots>, \<phi>\<^sub>m"}, as provided by an
-  appropriate proof method (such as @{method_ref cases} and
-  @{method_ref induct}).  The command ``@{command "case"}~@{text "(c
-  x\<^sub>1 \<dots> x\<^sub>m)"}'' abbreviates ``@{command "fix"}~@{text "x\<^sub>1 \<dots>
-  x\<^sub>m"}~@{command "assume"}~@{text "c: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}''.
-
-  \item @{command "print_cases"} prints all local contexts of the
-  current state, using Isar proof language notation.
-  
-  \item @{attribute case_names}~@{text "c\<^sub>1 \<dots> c\<^sub>k"} declares names for
-  the local contexts of premises of a theorem; @{text "c\<^sub>1, \<dots>, c\<^sub>k"}
-  refers to the \emph{prefix} of the list of premises. Each of the
-  cases @{text "c\<^sub>i"} can be of the form @{text "c[h\<^sub>1 \<dots> h\<^sub>n]"} where
-  the @{text "h\<^sub>1 \<dots> h\<^sub>n"} are the names of the hypotheses in case @{text "c\<^sub>i"}
-  from left to right.
-  
-  \item @{attribute case_conclusion}~@{text "c d\<^sub>1 \<dots> d\<^sub>k"} declares
-  names for the conclusions of a named premise @{text c}; here @{text
-  "d\<^sub>1, \<dots>, d\<^sub>k"} refers to the prefix of arguments of a logical formula
-  built by nesting a binary connective (e.g.\ @{text "\<or>"}).
-  
-  Note that proof methods such as @{method induct} and @{method
-  coinduct} already provide a default name for the conclusion as a
-  whole.  The need to name subformulas only arises with cases that
-  split into several sub-cases, as in common co-induction rules.
-
-  \item @{attribute params}~@{text "p\<^sub>1 \<dots> p\<^sub>m \<AND> \<dots> q\<^sub>1 \<dots> q\<^sub>n"} renames
-  the innermost parameters of premises @{text "1, \<dots>, n"} of some
-  theorem.  An empty list of names may be given to skip positions,
-  leaving the present parameters unchanged.
-  
-  Note that the default usage of case rules does \emph{not} directly
-  expose parameters to the proof context.
-  
-  \item @{attribute consumes}~@{text n} declares the number of ``major
-  premises'' of a rule, i.e.\ the number of facts to be consumed when
-  it is applied by an appropriate proof method.  The default value of
-  @{attribute consumes} is @{text "n = 1"}, which is appropriate for
-  the usual kind of cases and induction rules for inductive sets (cf.\
-  \secref{sec:hol-inductive}).  Rules without any @{attribute
-  consumes} declaration given are treated as if @{attribute
-  consumes}~@{text 0} had been specified.
-
-  A negative @{text n} is interpreted relatively to the total number
-  of premises of the rule in the target context.  Thus its absolute
-  value specifies the remaining number of premises, after subtracting
-  the prefix of major premises as indicated above. This form of
-  declaration has the technical advantage of being stable under more
-  morphisms, notably those that export the result from a nested
-  @{command_ref context} with additional assumptions.
-
-  Note that explicit @{attribute consumes} declarations are only
-  rarely needed; this is already taken care of automatically by the
-  higher-level @{attribute cases}, @{attribute induct}, and
-  @{attribute coinduct} declarations.
-
-  \end{description}
-*}
-
-
-subsection {* Proof methods *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def cases} & : & @{text method} \\
-    @{method_def induct} & : & @{text method} \\
-    @{method_def induction} & : & @{text method} \\
-    @{method_def coinduct} & : & @{text method} \\
-  \end{matharray}
-
-  The @{method cases}, @{method induct}, @{method induction},
-  and @{method coinduct}
-  methods provide a uniform interface to common proof techniques over
-  datatypes, inductive predicates (or sets), recursive functions etc.
-  The corresponding rules may be specified and instantiated in a
-  casual manner.  Furthermore, these methods provide named local
-  contexts that may be invoked via the @{command "case"} proof command
-  within the subsequent proof text.  This accommodates compact proof
-  texts even when reasoning about large specifications.
-
-  The @{method induct} method also provides some additional
-  infrastructure in order to be applicable to structure statements
-  (either using explicit meta-level connectives, or including facts
-  and parameters separately).  This avoids cumbersome encoding of
-  ``strengthened'' inductive statements within the object-logic.
-
-  Method @{method induction} differs from @{method induct} only in
-  the names of the facts in the local context invoked by the @{command "case"}
-  command.
-
-  @{rail \<open>
-    @@{method cases} ('(' 'no_simp' ')')? \<newline>
-      (@{syntax insts} * @'and') rule?
-    ;
-    (@@{method induct} | @@{method induction})
-      ('(' 'no_simp' ')')? (definsts * @'and') \<newline> arbitrary? taking? rule?
-    ;
-    @@{method coinduct} @{syntax insts} taking rule?
-    ;
-
-    rule: ('type' | 'pred' | 'set') ':' (@{syntax nameref} +) | 'rule' ':' (@{syntax thmref} +)
-    ;
-    definst: @{syntax name} ('==' | '\<equiv>') @{syntax term} | '(' @{syntax term} ')' | @{syntax inst}
-    ;
-    definsts: ( definst * )
-    ;
-    arbitrary: 'arbitrary' ':' ((@{syntax term} * ) @'and' +)
-    ;
-    taking: 'taking' ':' @{syntax insts}
-  \<close>}
-
-  \begin{description}
-
-  \item @{method cases}~@{text "insts R"} applies method @{method
-  rule} with an appropriate case distinction theorem, instantiated to
-  the subjects @{text insts}.  Symbolic case names are bound according
-  to the rule's local contexts.
-
-  The rule is determined as follows, according to the facts and
-  arguments passed to the @{method cases} method:
-
-  \medskip
-  \begin{tabular}{llll}
-    facts           &                 & arguments   & rule \\\hline
-                    & @{method cases} &             & classical case split \\
-                    & @{method cases} & @{text t}   & datatype exhaustion (type of @{text t}) \\
-    @{text "\<turnstile> A t"} & @{method cases} & @{text "\<dots>"} & inductive predicate/set elimination (of @{text A}) \\
-    @{text "\<dots>"}     & @{method cases} & @{text "\<dots> rule: R"} & explicit rule @{text R} \\
-  \end{tabular}
-  \medskip
-
-  Several instantiations may be given, referring to the \emph{suffix}
-  of premises of the case rule; within each premise, the \emph{prefix}
-  of variables is instantiated.  In most situations, only a single
-  term needs to be specified; this refers to the first variable of the
-  last premise (it is usually the same for all cases).  The @{text
-  "(no_simp)"} option can be used to disable pre-simplification of
-  cases (see the description of @{method induct} below for details).
-
-  \item @{method induct}~@{text "insts R"} and
-  @{method induction}~@{text "insts R"} are analogous to the
-  @{method cases} method, but refer to induction rules, which are
-  determined as follows:
-
-  \medskip
-  \begin{tabular}{llll}
-    facts           &                  & arguments            & rule \\\hline
-                    & @{method induct} & @{text "P x"}        & datatype induction (type of @{text x}) \\
-    @{text "\<turnstile> A x"} & @{method induct} & @{text "\<dots>"}          & predicate/set induction (of @{text A}) \\
-    @{text "\<dots>"}     & @{method induct} & @{text "\<dots> rule: R"} & explicit rule @{text R} \\
-  \end{tabular}
-  \medskip
-  
-  Several instantiations may be given, each referring to some part of
-  a mutual inductive definition or datatype --- only related partial
-  induction rules may be used together, though.  Any of the lists of
-  terms @{text "P, x, \<dots>"} refers to the \emph{suffix} of variables
-  present in the induction rule.  This enables the writer to specify
-  only induction variables, or both predicates and variables, for
-  example.
-
-  Instantiations may be definitional: equations @{text "x \<equiv> t"}
-  introduce local definitions, which are inserted into the claim and
-  discharged after applying the induction rule.  Equalities reappear
-  in the inductive cases, but have been transformed according to the
-  induction principle being involved here.  In order to achieve
-  practically useful induction hypotheses, some variables occurring in
-  @{text t} need to be fixed (see below).  Instantiations of the form
-  @{text t}, where @{text t} is not a variable, are taken as a
-  shorthand for \mbox{@{text "x \<equiv> t"}}, where @{text x} is a fresh
-  variable. If this is not intended, @{text t} has to be enclosed in
-  parentheses.  By default, the equalities generated by definitional
-  instantiations are pre-simplified using a specific set of rules,
-  usually consisting of distinctness and injectivity theorems for
-  datatypes. This pre-simplification may cause some of the parameters
-  of an inductive case to disappear, or may even completely delete
-  some of the inductive cases, if one of the equalities occurring in
-  their premises can be simplified to @{text False}.  The @{text
-  "(no_simp)"} option can be used to disable pre-simplification.
-  Additional rules to be used in pre-simplification can be declared
-  using the @{attribute_def induct_simp} attribute.
-
-  The optional ``@{text "arbitrary: x\<^sub>1 \<dots> x\<^sub>m"}''
-  specification generalizes variables @{text "x\<^sub>1, \<dots>,
-  x\<^sub>m"} of the original goal before applying induction.  One can
-  separate variables by ``@{text "and"}'' to generalize them in other
-  goals then the first. Thus induction hypotheses may become
-  sufficiently general to get the proof through.  Together with
-  definitional instantiations, one may effectively perform induction
-  over expressions of a certain structure.
-  
-  The optional ``@{text "taking: t\<^sub>1 \<dots> t\<^sub>n"}''
-  specification provides additional instantiations of a prefix of
-  pending variables in the rule.  Such schematic induction rules
-  rarely occur in practice, though.
-
-  \item @{method coinduct}~@{text "inst R"} is analogous to the
-  @{method induct} method, but refers to coinduction rules, which are
-  determined as follows:
-
-  \medskip
-  \begin{tabular}{llll}
-    goal          &                    & arguments & rule \\\hline
-                  & @{method coinduct} & @{text x} & type coinduction (type of @{text x}) \\
-    @{text "A x"} & @{method coinduct} & @{text "\<dots>"} & predicate/set coinduction (of @{text A}) \\
-    @{text "\<dots>"}   & @{method coinduct} & @{text "\<dots> rule: R"} & explicit rule @{text R} \\
-  \end{tabular}
-  
-  Coinduction is the dual of induction.  Induction essentially
-  eliminates @{text "A x"} towards a generic result @{text "P x"},
-  while coinduction introduces @{text "A x"} starting with @{text "B
-  x"}, for a suitable ``bisimulation'' @{text B}.  The cases of a
-  coinduct rule are typically named after the predicates or sets being
-  covered, while the conclusions consist of several alternatives being
-  named after the individual destructor patterns.
-  
-  The given instantiation refers to the \emph{suffix} of variables
-  occurring in the rule's major premise, or conclusion if unavailable.
-  An additional ``@{text "taking: t\<^sub>1 \<dots> t\<^sub>n"}''
-  specification may be required in order to specify the bisimulation
-  to be used in the coinduction step.
-
-  \end{description}
-
-  Above methods produce named local contexts, as determined by the
-  instantiated rule as given in the text.  Beyond that, the @{method
-  induct} and @{method coinduct} methods guess further instantiations
-  from the goal specification itself.  Any persisting unresolved
-  schematic variables of the resulting rule will render the the
-  corresponding case invalid.  The term binding @{variable ?case} for
-  the conclusion will be provided with each case, provided that term
-  is fully specified.
-
-  The @{command "print_cases"} command prints all named cases present
-  in the current proof state.
-
-  \medskip Despite the additional infrastructure, both @{method cases}
-  and @{method coinduct} merely apply a certain rule, after
-  instantiation, while conforming due to the usual way of monotonic
-  natural deduction: the context of a structured statement @{text
-  "\<And>x\<^sub>1 \<dots> x\<^sub>m. \<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> \<dots>"}
-  reappears unchanged after the case split.
-
-  The @{method induct} method is fundamentally different in this
-  respect: the meta-level structure is passed through the
-  ``recursive'' course involved in the induction.  Thus the original
-  statement is basically replaced by separate copies, corresponding to
-  the induction hypotheses and conclusion; the original goal context
-  is no longer available.  Thus local assumptions, fixed parameters
-  and definitions effectively participate in the inductive rephrasing
-  of the original statement.
-
-  In @{method induct} proofs, local assumptions introduced by cases are split
-  into two different kinds: @{text hyps} stemming from the rule and
-  @{text prems} from the goal statement.  This is reflected in the
-  extracted cases accordingly, so invoking ``@{command "case"}~@{text
-  c}'' will provide separate facts @{text c.hyps} and @{text c.prems},
-  as well as fact @{text c} to hold the all-inclusive list.
-
-  In @{method induction} proofs, local assumptions introduced by cases are
-  split into three different kinds: @{text IH}, the induction hypotheses,
-  @{text hyps}, the remaining hypotheses stemming from the rule, and
-  @{text prems}, the assumptions from the goal statement. The names are
-  @{text c.IH}, @{text c.hyps} and @{text c.prems}, as above.
-
-
-  \medskip Facts presented to either method are consumed according to
-  the number of ``major premises'' of the rule involved, which is
-  usually 0 for plain cases and induction rules of datatypes etc.\ and
-  1 for rules of inductive predicates or sets and the like.  The
-  remaining facts are inserted into the goal verbatim before the
-  actual @{text cases}, @{text induct}, or @{text coinduct} rule is
-  applied.
-*}
-
-
-subsection {* Declaring rules *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "print_induct_rules"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{attribute_def cases} & : & @{text attribute} \\
-    @{attribute_def induct} & : & @{text attribute} \\
-    @{attribute_def coinduct} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{attribute cases} spec
-    ;
-    @@{attribute induct} spec
-    ;
-    @@{attribute coinduct} spec
-    ;
-
-    spec: (('type' | 'pred' | 'set') ':' @{syntax nameref}) | 'del'
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "print_induct_rules"} prints cases and induct rules
-  for predicates (or sets) and types of the current context.
-
-  \item @{attribute cases}, @{attribute induct}, and @{attribute
-  coinduct} (as attributes) declare rules for reasoning about
-  (co)inductive predicates (or sets) and types, using the
-  corresponding methods of the same name.  Certain definitional
-  packages of object-logics usually declare emerging cases and
-  induction rules as expected, so users rarely need to intervene.
-
-  Rules may be deleted via the @{text "del"} specification, which
-  covers all of the @{text "type"}/@{text "pred"}/@{text "set"}
-  sub-categories simultaneously.  For example, @{attribute
-  cases}~@{text del} removes any @{attribute cases} rules declared for
-  some type, predicate, or set.
-  
-  Manual rule declarations usually refer to the @{attribute
-  case_names} and @{attribute params} attributes to adjust names of
-  cases and parameters of a rule; the @{attribute consumes}
-  declaration is taken care of automatically: @{attribute
-  consumes}~@{text 0} is specified for ``type'' rules and @{attribute
-  consumes}~@{text 1} for ``predicate'' / ``set'' rules.
-
-  \end{description}
-*}
-
-end
--- a/src/Doc/IsarRef/Quick_Reference.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,231 +0,0 @@
-theory Quick_Reference
-imports Base Main
-begin
-
-chapter {* Isabelle/Isar quick reference \label{ap:refcard} *}
-
-section {* Proof commands *}
-
-subsection {* Primitives and basic syntax *}
-
-text {*
-  \begin{tabular}{ll}
-    @{command "fix"}~@{text x} & augment context by @{text "\<And>x. \<box>"} \\
-    @{command "assume"}~@{text "a: \<phi>"} & augment context by @{text "\<phi> \<Longrightarrow> \<box>"} \\
-    @{command "then"} & indicate forward chaining of facts \\
-    @{command "have"}~@{text "a: \<phi>"} & prove local result \\
-    @{command "show"}~@{text "a: \<phi>"} & prove local result, refining some goal \\
-    @{command "using"}~@{text a} & indicate use of additional facts \\
-    @{command "unfolding"}~@{text a} & unfold definitional equations \\
-    @{command "proof"}~@{text "m\<^sub>1"}~\dots~@{command "qed"}~@{text "m\<^sub>2"} & indicate proof structure and refinements \\
-    @{command "{"}~@{text "\<dots>"}~@{command "}"} & indicate explicit blocks \\
-    @{command "next"} & switch blocks \\
-    @{command "note"}~@{text "a = b"} & reconsider facts \\
-    @{command "let"}~@{text "p = t"} & abbreviate terms by higher-order matching \\
-    @{command "write"}~@{text "c  (mx)"} & declare local mixfix syntax \\
-  \end{tabular}
-
-  \medskip
-
-  \begin{tabular}{rcl}
-    @{text "proof"} & = & @{text "prfx\<^sup>*"}~@{command "proof"}~@{text "method\<^sup>? stmt\<^sup>*"}~@{command "qed"}~@{text "method\<^sup>?"} \\
-    & @{text "|"} & @{text "prfx\<^sup>*"}~@{command "done"} \\
-    @{text prfx} & = & @{command "apply"}~@{text method} \\
-    & @{text "|"} & @{command "using"}~@{text "facts"} \\
-    & @{text "|"} & @{command "unfolding"}~@{text "facts"} \\
-    @{text stmt} & = & @{command "{"}~@{text "stmt\<^sup>*"}~@{command "}"} \\
-    & @{text "|"} & @{command "next"} \\
-    & @{text "|"} & @{command "note"}~@{text "name = facts"} \\
-    & @{text "|"} & @{command "let"}~@{text "term = term"} \\
-    & @{text "|"} & @{command "write"}~@{text "name (mixfix)"} \\
-    & @{text "|"} & @{command "fix"}~@{text "var\<^sup>+"} \\
-    & @{text "|"} & @{command "assume"}~@{text "name: props"} \\
-    & @{text "|"} & @{command "then"}@{text "\<^sup>?"}~@{text goal} \\
-    @{text goal} & = & @{command "have"}~@{text "name: props proof"} \\
-    & @{text "|"} & @{command "show"}~@{text "name: props proof"} \\
-  \end{tabular}
-*}
-
-
-subsection {* Abbreviations and synonyms *}
-
-text {*
-  \begin{tabular}{rcl}
-    @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} & @{text "\<equiv>"} &
-      @{command "proof"}~@{text "m\<^sub>1"}~@{command "qed"}~@{text "m\<^sub>2"} \\
-    @{command ".."} & @{text "\<equiv>"} & @{command "by"}~@{text rule} \\
-    @{command "."} & @{text "\<equiv>"} & @{command "by"}~@{text this} \\
-    @{command "hence"} & @{text "\<equiv>"} & @{command "then"}~@{command "have"} \\
-    @{command "thus"} & @{text "\<equiv>"} & @{command "then"}~@{command "show"} \\
-    @{command "from"}~@{text a} & @{text "\<equiv>"} & @{command "note"}~@{text a}~@{command "then"} \\
-    @{command "with"}~@{text a} & @{text "\<equiv>"} & @{command "from"}~@{text "a \<AND> this"} \\
-    @{command "from"}~@{text this} & @{text "\<equiv>"} & @{command "then"} \\
-    @{command "from"}~@{text this}~@{command "have"} & @{text "\<equiv>"} & @{command "hence"} \\
-    @{command "from"}~@{text this}~@{command "show"} & @{text "\<equiv>"} & @{command "thus"} \\
-  \end{tabular}
-*}
-
-
-subsection {* Derived elements *}
-
-text {*
-  \begin{tabular}{rcl}
-    @{command "also"}@{text "\<^sub>0"} & @{text "\<approx>"} &
-      @{command "note"}~@{text "calculation = this"} \\
-    @{command "also"}@{text "\<^sub>n\<^sub>+\<^sub>1"} & @{text "\<approx>"} &
-      @{command "note"}~@{text "calculation = trans [OF calculation this]"} \\
-    @{command "finally"} & @{text "\<approx>"} &
-      @{command "also"}~@{command "from"}~@{text calculation} \\[0.5ex]
-    @{command "moreover"} & @{text "\<approx>"} &
-      @{command "note"}~@{text "calculation = calculation this"} \\
-    @{command "ultimately"} & @{text "\<approx>"} &
-      @{command "moreover"}~@{command "from"}~@{text calculation} \\[0.5ex]
-    @{command "presume"}~@{text "a: \<phi>"} & @{text "\<approx>"} &
-      @{command "assume"}~@{text "a: \<phi>"} \\
-    @{command "def"}~@{text "a: x \<equiv> t"} & @{text "\<approx>"} &
-      @{command "fix"}~@{text x}~@{command "assume"}~@{text "a: x \<equiv> t"} \\
-    @{command "obtain"}~@{text "x \<WHERE> a: \<phi>"} & @{text "\<approx>"} &
-      @{text "\<dots>"}~@{command "fix"}~@{text x}~@{command "assume"}~@{text "a: \<phi>"} \\
-    @{command "case"}~@{text c} & @{text "\<approx>"} &
-      @{command "fix"}~@{text x}~@{command "assume"}~@{text "c: \<phi>"} \\
-    @{command "sorry"} & @{text "\<approx>"} &
-      @{command "by"}~@{text cheating} \\
-  \end{tabular}
-*}
-
-
-subsection {* Diagnostic commands *}
-
-text {*
-  \begin{tabular}{ll}
-    @{command "print_state"} & print current state \\
-    @{command "thm"}~@{text a} & print fact \\
-    @{command "prop"}~@{text \<phi>} & print proposition \\
-    @{command "term"}~@{text t} & print term \\
-    @{command "typ"}~@{text \<tau>} & print type \\
-  \end{tabular}
-*}
-
-
-section {* Proof methods *}
-
-text {*
-  \begin{tabular}{ll}
-    \multicolumn{2}{l}{\textbf{Single steps (forward-chaining facts)}} \\[0.5ex]
-    @{method assumption} & apply some assumption \\
-    @{method this} & apply current facts \\
-    @{method rule}~@{text a} & apply some rule  \\
-    @{method rule} & apply standard rule (default for @{command "proof"}) \\
-    @{method contradiction} & apply @{text "\<not>"} elimination rule (any order) \\
-    @{method cases}~@{text t} & case analysis (provides cases) \\
-    @{method induct}~@{text x} & proof by induction (provides cases) \\[2ex]
-
-    \multicolumn{2}{l}{\textbf{Repeated steps (inserting facts)}} \\[0.5ex]
-    @{method "-"} & no rules \\
-    @{method intro}~@{text a} & introduction rules \\
-    @{method intro_classes} & class introduction rules \\
-    @{method elim}~@{text a} & elimination rules \\
-    @{method unfold}~@{text a} & definitional rewrite rules \\[2ex]
-
-    \multicolumn{2}{l}{\textbf{Automated proof tools (inserting facts)}} \\[0.5ex]
-    @{method iprover} & intuitionistic proof search \\
-    @{method blast}, @{method fast} & Classical Reasoner \\
-    @{method simp}, @{method simp_all} & Simplifier (+ Splitter) \\
-    @{method auto}, @{method force} & Simplifier + Classical Reasoner \\
-    @{method arith} & Arithmetic procedures \\
-  \end{tabular}
-*}
-
-
-section {* Attributes *}
-
-text {*
-  \begin{tabular}{ll}
-    \multicolumn{2}{l}{\textbf{Rules}} \\[0.5ex]
-    @{attribute OF}~@{text a} & rule resolved with facts (skipping ``@{text _}'') \\
-    @{attribute of}~@{text t} & rule instantiated with terms (skipping ``@{text _}'') \\
-    @{attribute "where"}~@{text "x = t"} & rule instantiated with terms, by variable name \\
-    @{attribute symmetric} & resolution with symmetry rule \\
-    @{attribute THEN}~@{text b} & resolution with another rule \\
-    @{attribute rule_format} & result put into standard rule format \\
-    @{attribute elim_format} & destruct rule turned into elimination rule format \\[1ex]
-
-    \multicolumn{2}{l}{\textbf{Declarations}} \\[0.5ex]
-    @{attribute simp} & Simplifier rule \\
-    @{attribute intro}, @{attribute elim}, @{attribute dest} & Pure or Classical Reasoner rule \\
-    @{attribute iff} & Simplifier + Classical Reasoner rule \\
-    @{attribute split} & case split rule \\
-    @{attribute trans} & transitivity rule \\
-    @{attribute sym} & symmetry rule \\
-  \end{tabular}
-*}
-
-
-section {* Rule declarations and methods *}
-
-text {*
-  \begin{tabular}{l|lllll}
-      & @{method rule} & @{method iprover} & @{method blast} & @{method simp} & @{method auto} \\
-      &                &                   & @{method fast} & @{method simp_all} & @{method force} \\
-    \hline
-    @{attribute Pure.elim}@{text "!"} @{attribute Pure.intro}@{text "!"}
-      & @{text "\<times>"}    & @{text "\<times>"} \\
-    @{attribute Pure.elim} @{attribute Pure.intro}
-      & @{text "\<times>"}    & @{text "\<times>"} \\
-    @{attribute elim}@{text "!"} @{attribute intro}@{text "!"}
-      & @{text "\<times>"}    &                    & @{text "\<times>"}          &                     & @{text "\<times>"} \\
-    @{attribute elim} @{attribute intro}
-      & @{text "\<times>"}    &                    & @{text "\<times>"}          &                     & @{text "\<times>"} \\
-    @{attribute iff}
-      & @{text "\<times>"}    &                    & @{text "\<times>"}          & @{text "\<times>"}         & @{text "\<times>"} \\
-    @{attribute iff}@{text "?"}
-      & @{text "\<times>"} \\
-    @{attribute elim}@{text "?"} @{attribute intro}@{text "?"}
-      & @{text "\<times>"} \\
-    @{attribute simp}
-      &                &                    &                      & @{text "\<times>"}         & @{text "\<times>"} \\
-    @{attribute cong}
-      &                &                    &                      & @{text "\<times>"}         & @{text "\<times>"} \\
-    @{attribute split}
-      &                &                    &                      & @{text "\<times>"}         & @{text "\<times>"} \\
-  \end{tabular}
-*}
-
-
-section {* Emulating tactic scripts *}
-
-subsection {* Commands *}
-
-text {*
-  \begin{tabular}{ll}
-    @{command "apply"}~@{text m} & apply proof method at initial position \\
-    @{command "apply_end"}~@{text m} & apply proof method near terminal position \\
-    @{command "done"} & complete proof \\
-    @{command "defer"}~@{text n} & move subgoal to end \\
-    @{command "prefer"}~@{text n} & move subgoal to beginning \\
-    @{command "back"} & backtrack last command \\
-  \end{tabular}
-*}
-
-
-subsection {* Methods *}
-
-text {*
-  \begin{tabular}{ll}
-    @{method rule_tac}~@{text insts} & resolution (with instantiation) \\
-    @{method erule_tac}~@{text insts} & elim-resolution (with instantiation) \\
-    @{method drule_tac}~@{text insts} & destruct-resolution (with instantiation) \\
-    @{method frule_tac}~@{text insts} & forward-resolution (with instantiation) \\
-    @{method cut_tac}~@{text insts} & insert facts (with instantiation) \\
-    @{method thin_tac}~@{text \<phi>} & delete assumptions \\
-    @{method subgoal_tac}~@{text \<phi>} & new claims \\
-    @{method rename_tac}~@{text x} & rename innermost goal parameters \\
-    @{method rotate_tac}~@{text n} & rotate assumptions of goal \\
-    @{method tactic}~@{text "text"} & arbitrary ML tactic \\
-    @{method case_tac}~@{text t} & exhaustion (datatypes) \\
-    @{method induct_tac}~@{text x} & induction (datatypes) \\
-    @{method ind_cases}~@{text t} & exhaustion + simplification (inductive predicates) \\
-  \end{tabular}
-*}
-
-end
--- a/src/Doc/IsarRef/Spec.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1347 +0,0 @@
-theory Spec
-imports Base Main
-begin
-
-chapter {* Specifications *}
-
-text {*
-  The Isabelle/Isar theory format integrates specifications and
-  proofs, supporting interactive development with unlimited undo
-  operation.  There is an integrated document preparation system (see
-  \chref{ch:document-prep}), for typesetting formal developments
-  together with informal text.  The resulting hyper-linked PDF
-  documents can be used both for WWW presentation and printed copies.
-
-  The Isar proof language (see \chref{ch:proofs}) is embedded into the
-  theory language as a proper sub-language.  Proof mode is entered by
-  stating some @{command theorem} or @{command lemma} at the theory
-  level, and left again with the final conclusion (e.g.\ via @{command
-  qed}).  Some theory specification mechanisms also require a proof,
-  such as @{command typedef} in HOL, which demands non-emptiness of
-  the representing sets.
-*}
-
-
-section {* Defining theories \label{sec:begin-thy} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "theory"} & : & @{text "toplevel \<rightarrow> theory"} \\
-    @{command_def (global) "end"} & : & @{text "theory \<rightarrow> toplevel"} \\
-  \end{matharray}
-
-  Isabelle/Isar theories are defined via theory files, which may
-  contain both specifications and proofs; occasionally definitional
-  mechanisms also require some explicit proof.  The theory body may be
-  sub-structured by means of \emph{local theory targets}, such as
-  @{command "locale"} and @{command "class"}.
-
-  The first proper command of a theory is @{command "theory"}, which
-  indicates imports of previous theories and optional dependencies on
-  other source files (usually in ML).  Just preceding the initial
-  @{command "theory"} command there may be an optional @{command
-  "header"} declaration, which is only relevant to document
-  preparation: see also the other section markup commands in
-  \secref{sec:markup}.
-
-  A theory is concluded by a final @{command (global) "end"} command,
-  one that does not belong to a local theory target.  No further
-  commands may follow such a global @{command (global) "end"},
-  although some user-interfaces might pretend that trailing input is
-  admissible.
-
-  @{rail \<open>
-    @@{command theory} @{syntax name} imports keywords? \<newline> @'begin'
-    ;
-    imports: @'imports' (@{syntax name} +)
-    ;
-    keywords: @'keywords' (keyword_decls + @'and')
-    ;
-    keyword_decls: (@{syntax string} +)
-      ('::' @{syntax name} @{syntax tags})? ('==' @{syntax name})?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "theory"}~@{text "A \<IMPORTS> B\<^sub>1 \<dots> B\<^sub>n \<BEGIN>"}
-  starts a new theory @{text A} based on the merge of existing
-  theories @{text "B\<^sub>1 \<dots> B\<^sub>n"}.  Due to the possibility to import more
-  than one ancestor, the resulting theory structure of an Isabelle
-  session forms a directed acyclic graph (DAG).  Isabelle takes care
-  that sources contributing to the development graph are always
-  up-to-date: changed files are automatically rechecked whenever a
-  theory header specification is processed.
-
-  The optional @{keyword_def "keywords"} specification declares outer
-  syntax (\chref{ch:outer-syntax}) that is introduced in this theory
-  later on (rare in end-user applications).  Both minor keywords and
-  major keywords of the Isar command language need to be specified, in
-  order to make parsing of proof documents work properly.  Command
-  keywords need to be classified according to their structural role in
-  the formal text.  Examples may be seen in Isabelle/HOL sources
-  itself, such as @{keyword "keywords"}~@{verbatim "\"typedef\""}
-  @{text ":: thy_goal"} or @{keyword "keywords"}~@{verbatim
-  "\"datatype\""} @{text ":: thy_decl"} for theory-level declarations
-  with and without proof, respectively.  Additional @{syntax tags}
-  provide defaults for document preparation (\secref{sec:tags}).
-
-  It is possible to specify an alternative completion via @{text "==
-  text"}, while the default is the corresponding keyword name.
-  
-  \item @{command (global) "end"} concludes the current theory
-  definition.  Note that some other commands, e.g.\ local theory
-  targets @{command locale} or @{command class} may involve a
-  @{keyword "begin"} that needs to be matched by @{command (local)
-  "end"}, according to the usual rules for nested blocks.
-
-  \end{description}
-*}
-
-
-section {* Local theory targets \label{sec:target} *}
-
-text {*
-  \begin{matharray}{rcll}
-    @{command_def "context"} & : & @{text "theory \<rightarrow> local_theory"} \\
-    @{command_def (local) "end"} & : & @{text "local_theory \<rightarrow> theory"} \\
-  \end{matharray}
-
-  A local theory target is a context managed separately within the
-  enclosing theory.  Contexts may introduce parameters (fixed
-  variables) and assumptions (hypotheses).  Definitions and theorems
-  depending on the context may be added incrementally later on.
-
-  \emph{Named contexts} refer to locales (cf.\ \secref{sec:locale}) or
-  type classes (cf.\ \secref{sec:class}); the name ``@{text "-"}''
-  signifies the global theory context.
-
-  \emph{Unnamed contexts} may introduce additional parameters and
-  assumptions, and results produced in the context are generalized
-  accordingly.  Such auxiliary contexts may be nested within other
-  targets, like @{command "locale"}, @{command "class"}, @{command
-  "instantiation"}, @{command "overloading"}.
-
-  @{rail \<open>
-    @@{command context} @{syntax nameref} @'begin'
-    ;
-    @@{command context} @{syntax_ref "includes"}? (@{syntax context_elem} * ) @'begin'
-    ;
-    @{syntax_def target}: '(' @'in' @{syntax nameref} ')'
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command "context"}~@{text "c \<BEGIN>"} opens a named
-  context, by recommencing an existing locale or class @{text c}.
-  Note that locale and class definitions allow to include the
-  @{keyword "begin"} keyword as well, in order to continue the local
-  theory immediately after the initial specification.
-
-  \item @{command "context"}~@{text "bundles elements \<BEGIN>"} opens
-  an unnamed context, by extending the enclosing global or local
-  theory target by the given declaration bundles (\secref{sec:bundle})
-  and context elements (@{text "\<FIXES>"}, @{text "\<ASSUMES>"}
-  etc.).  This means any results stemming from definitions and proofs
-  in the extended context will be exported into the enclosing target
-  by lifting over extra parameters and premises.
-  
-  \item @{command (local) "end"} concludes the current local theory,
-  according to the nesting of contexts.  Note that a global @{command
-  (global) "end"} has a different meaning: it concludes the theory
-  itself (\secref{sec:begin-thy}).
-  
-  \item @{text "("}@{keyword_def "in"}~@{text "c)"} given after any
-  local theory command specifies an immediate target, e.g.\
-  ``@{command "definition"}~@{text "(\<IN> c) \<dots>"}'' or ``@{command
-  "theorem"}~@{text "(\<IN> c) \<dots>"}''.  This works both in a local or
-  global theory context; the current target context will be suspended
-  for this command only.  Note that ``@{text "(\<IN> -)"}'' will
-  always produce a global result independently of the current target
-  context.
-
-  \end{description}
-
-  The exact meaning of results produced within a local theory context
-  depends on the underlying target infrastructure (locale, type class
-  etc.).  The general idea is as follows, considering a context named
-  @{text c} with parameter @{text x} and assumption @{text "A[x]"}.
-  
-  Definitions are exported by introducing a global version with
-  additional arguments; a syntactic abbreviation links the long form
-  with the abstract version of the target context.  For example,
-  @{text "a \<equiv> t[x]"} becomes @{text "c.a ?x \<equiv> t[?x]"} at the theory
-  level (for arbitrary @{text "?x"}), together with a local
-  abbreviation @{text "c \<equiv> c.a x"} in the target context (for the
-  fixed parameter @{text x}).
-
-  Theorems are exported by discharging the assumptions and
-  generalizing the parameters of the context.  For example, @{text "a:
-  B[x]"} becomes @{text "c.a: A[?x] \<Longrightarrow> B[?x]"}, again for arbitrary
-  @{text "?x"}.
-
-  \medskip The Isabelle/HOL library contains numerous applications of
-  locales and classes, e.g.\ see @{file "~~/src/HOL/Algebra"}.  An
-  example for an unnamed auxiliary contexts is given in @{file
-  "~~/src/HOL/Isar_Examples/Group_Context.thy"}.  *}
-
-
-section {* Bundled declarations \label{sec:bundle} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "bundle"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "print_bundles"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow> "} \\
-    @{command_def "include"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-    @{command_def "including"} & : & @{text "proof(prove) \<rightarrow> proof(prove)"} \\
-    @{keyword_def "includes"} & : & syntax \\
-  \end{matharray}
-
-  The outer syntax of fact expressions (\secref{sec:syn-att}) involves
-  theorems and attributes, which are evaluated in the context and
-  applied to it.  Attributes may declare theorems to the context, as
-  in @{text "this_rule [intro] that_rule [elim]"} for example.
-  Configuration options (\secref{sec:config}) are special declaration
-  attributes that operate on the context without a theorem, as in
-  @{text "[[show_types = false]]"} for example.
-
-  Expressions of this form may be defined as \emph{bundled
-  declarations} in the context, and included in other situations later
-  on.  Including declaration bundles augments a local context casually
-  without logical dependencies, which is in contrast to locales and
-  locale interpretation (\secref{sec:locale}).
-
-  @{rail \<open>
-    @@{command bundle} @{syntax target}? \<newline>
-    @{syntax name} '=' @{syntax thmrefs} (@'for' (@{syntax vars} + @'and'))?
-    ;
-    (@@{command include} | @@{command including}) (@{syntax nameref}+)
-    ;
-    @{syntax_def "includes"}: @'includes' (@{syntax nameref}+)
-  \<close>}
-
-  \begin{description}
-
-  \item @{command bundle}~@{text "b = decls"} defines a bundle of
-  declarations in the current context.  The RHS is similar to the one
-  of the @{command declare} command.  Bundles defined in local theory
-  targets are subject to transformations via morphisms, when moved
-  into different application contexts; this works analogously to any
-  other local theory specification.
-
-  \item @{command print_bundles} prints the named bundles that are
-  available in the current context.
-
-  \item @{command include}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} includes the declarations
-  from the given bundles into the current proof body context.  This is
-  analogous to @{command "note"} (\secref{sec:proof-facts}) with the
-  expanded bundles.
-
-  \item @{command including} is similar to @{command include}, but
-  works in proof refinement (backward mode).  This is analogous to
-  @{command "using"} (\secref{sec:proof-facts}) with the expanded
-  bundles.
-
-  \item @{keyword "includes"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} is similar to
-  @{command include}, but works in situations where a specification
-  context is constructed, notably for @{command context} and long
-  statements of @{command theorem} etc.
-
-  \end{description}
-
-  Here is an artificial example of bundling various configuration
-  options: *}
-
-bundle trace = [[simp_trace, linarith_trace, metis_trace, smt_trace]]
-
-lemma "x = x"
-  including trace by metis
-
-
-section {* Basic specification elements \label{sec:basic-spec} *}
-
-text {*
-  \begin{matharray}{rcll}
-    @{command_def "axiomatization"} & : & @{text "theory \<rightarrow> theory"} & (axiomatic!) \\
-    @{command_def "definition"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{attribute_def "defn"} & : & @{text attribute} \\
-    @{command_def "print_defn_rules"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow> "} \\
-    @{command_def "abbreviation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "print_abbrevs"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow> "} \\
-  \end{matharray}
-
-  These specification mechanisms provide a slightly more abstract view
-  than the underlying primitives of @{command "consts"}, @{command
-  "defs"} (see \secref{sec:consts}), and raw axioms.  In particular,
-  type-inference covers the whole specification as usual.
-
-  @{rail \<open>
-    @@{command axiomatization} @{syntax "fixes"}? (@'where' specs)?
-    ;
-    @@{command definition} @{syntax target}? \<newline>
-      (decl @'where')? @{syntax thmdecl}? @{syntax prop}
-    ;
-    @@{command abbreviation} @{syntax target}? @{syntax mode}? \<newline>
-      (decl @'where')? @{syntax prop}
-    ;
-
-    @{syntax_def "fixes"}: ((@{syntax name} ('::' @{syntax type})?
-      @{syntax mixfix}? | @{syntax vars}) + @'and')
-    ;
-    specs: (@{syntax thmdecl}? @{syntax props} + @'and')
-    ;
-    decl: @{syntax name} ('::' @{syntax type})? @{syntax mixfix}?
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command "axiomatization"}~@{text "c\<^sub>1 \<dots> c\<^sub>m \<WHERE> \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}
-  introduces several constants simultaneously and states axiomatic
-  properties for these.  The constants are marked as being specified
-  once and for all, which prevents additional specifications being
-  issued later on.
-  
-  Note that axiomatic specifications are only appropriate when
-  declaring a new logical system; axiomatic specifications are
-  restricted to global theory contexts.  Normal applications should
-  only use definitional mechanisms!
-
-  \item @{command "definition"}~@{text "c \<WHERE> eq"} produces an
-  internal definition @{text "c \<equiv> t"} according to the specification
-  given as @{text eq}, which is then turned into a proven fact.  The
-  given proposition may deviate from internal meta-level equality
-  according to the rewrite rules declared as @{attribute defn} by the
-  object-logic.  This usually covers object-level equality @{text "x =
-  y"} and equivalence @{text "A \<leftrightarrow> B"}.  End-users normally need not
-  change the @{attribute defn} setup.
-  
-  Definitions may be presented with explicit arguments on the LHS, as
-  well as additional conditions, e.g.\ @{text "f x y = t"} instead of
-  @{text "f \<equiv> \<lambda>x y. t"} and @{text "y \<noteq> 0 \<Longrightarrow> g x y = u"} instead of an
-  unrestricted @{text "g \<equiv> \<lambda>x y. u"}.
-
-  \item @{command "print_defn_rules"} prints the definitional rewrite rules
-  declared via @{attribute defn} in the current context.
-
-  \item @{command "abbreviation"}~@{text "c \<WHERE> eq"} introduces a
-  syntactic constant which is associated with a certain term according
-  to the meta-level equality @{text eq}.
-  
-  Abbreviations participate in the usual type-inference process, but
-  are expanded before the logic ever sees them.  Pretty printing of
-  terms involves higher-order rewriting with rules stemming from
-  reverted abbreviations.  This needs some care to avoid overlapping
-  or looping syntactic replacements!
-  
-  The optional @{text mode} specification restricts output to a
-  particular print mode; using ``@{text input}'' here achieves the
-  effect of one-way abbreviations.  The mode may also include an
-  ``@{keyword "output"}'' qualifier that affects the concrete syntax
-  declared for abbreviations, cf.\ @{command "syntax"} in
-  \secref{sec:syn-trans}.
-  
-  \item @{command "print_abbrevs"} prints all constant abbreviations
-  of the current context.
-  
-  \end{description}
-*}
-
-
-section {* Generic declarations *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "declaration"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "syntax_declaration"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "declare"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-  \end{matharray}
-
-  Arbitrary operations on the background context may be wrapped-up as
-  generic declaration elements.  Since the underlying concept of local
-  theories may be subject to later re-interpretation, there is an
-  additional dependency on a morphism that tells the difference of the
-  original declaration context wrt.\ the application context
-  encountered later on.  A fact declaration is an important special
-  case: it consists of a theorem which is applied to the context by
-  means of an attribute.
-
-  @{rail \<open>
-    (@@{command declaration} | @@{command syntax_declaration})
-      ('(' @'pervasive' ')')? \<newline> @{syntax target}? @{syntax text}
-    ;
-    @@{command declare} @{syntax target}? (@{syntax thmrefs} + @'and')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "declaration"}~@{text d} adds the declaration
-  function @{text d} of ML type @{ML_type declaration}, to the current
-  local theory under construction.  In later application contexts, the
-  function is transformed according to the morphisms being involved in
-  the interpretation hierarchy.
-
-  If the @{text "(pervasive)"} option is given, the corresponding
-  declaration is applied to all possible contexts involved, including
-  the global background theory.
-
-  \item @{command "syntax_declaration"} is similar to @{command
-  "declaration"}, but is meant to affect only ``syntactic'' tools by
-  convention (such as notation and type-checking information).
-
-  \item @{command "declare"}~@{text thms} declares theorems to the
-  current local theory context.  No theorem binding is involved here,
-  unlike @{command "theorems"} or @{command "lemmas"} (cf.\
-  \secref{sec:theorems}), so @{command "declare"} only has the effect
-  of applying attributes as included in the theorem specification.
-
-  \end{description}
-*}
-
-
-section {* Locales \label{sec:locale} *}
-
-text {*
-  A locale is a functor that maps parameters (including implicit type
-  parameters) and a specification to a list of declarations.  The
-  syntax of locales is modeled after the Isar proof context commands
-  (cf.\ \secref{sec:proof-context}).
-
-  Locale hierarchies are supported by maintaining a graph of
-  dependencies between locale instances in the global theory.
-  Dependencies may be introduced through import (where a locale is
-  defined as sublocale of the imported instances) or by proving that
-  an existing locale is a sublocale of one or several locale
-  instances.
-
-  A locale may be opened with the purpose of appending to its list of
-  declarations (cf.\ \secref{sec:target}).  When opening a locale
-  declarations from all dependencies are collected and are presented
-  as a local theory.  In this process, which is called \emph{roundup},
-  redundant locale instances are omitted.  A locale instance is
-  redundant if it is subsumed by an instance encountered earlier.  A
-  more detailed description of this process is available elsewhere
-  \cite{Ballarin2014}.
-*}
-
-
-subsection {* Locale expressions \label{sec:locale-expr} *}
-
-text {*
-  A \emph{locale expression} denotes a context composed of instances
-  of existing locales.  The context consists of the declaration
-  elements from the locale instances.  Redundant locale instances are
-  omitted according to roundup.
-
-  @{rail \<open>
-    @{syntax_def locale_expr}: (instance + '+') (@'for' (@{syntax "fixes"} + @'and'))?
-    ;
-    instance: (qualifier ':')? @{syntax nameref} (pos_insts | named_insts)
-    ;
-    qualifier: @{syntax name} ('?' | '!')?
-    ;
-    pos_insts: ('_' | @{syntax term})*
-    ;
-    named_insts: @'where' (@{syntax name} '=' @{syntax term} + @'and')
-  \<close>}
-
-  A locale instance consists of a reference to a locale and either
-  positional or named parameter instantiations.  Identical
-  instantiations (that is, those that instantiate a parameter by itself)
-  may be omitted.  The notation `@{text "_"}' enables to omit the
-  instantiation for a parameter inside a positional instantiation.
-
-  Terms in instantiations are from the context the locale expressions
-  is declared in.  Local names may be added to this context with the
-  optional @{keyword "for"} clause.  This is useful for shadowing names
-  bound in outer contexts, and for declaring syntax.  In addition,
-  syntax declarations from one instance are effective when parsing
-  subsequent instances of the same expression.
-
-  Instances have an optional qualifier which applies to names in
-  declarations.  Names include local definitions and theorem names.
-  If present, the qualifier itself is either optional
-  (``\texttt{?}''), which means that it may be omitted on input of the
-  qualified name, or mandatory (``\texttt{!}'').  If neither
-  ``\texttt{?}'' nor ``\texttt{!}'' are present, the command's default
-  is used.  For @{command "interpretation"} and @{command "interpret"}
-  the default is ``mandatory'', for @{command "locale"} and @{command
-  "sublocale"} the default is ``optional''.  Qualifiers play no role
-  in determining whether one locale instance subsumes another.
-*}
-
-
-subsection {* Locale declarations *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "locale"} & : & @{text "theory \<rightarrow> local_theory"} \\
-    @{command_def "print_locale"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "print_locales"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "locale_deps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{method_def intro_locales} & : & @{text method} \\
-    @{method_def unfold_locales} & : & @{text method} \\
-  \end{matharray}
-
-  \indexisarelem{fixes}\indexisarelem{constrains}\indexisarelem{assumes}
-  \indexisarelem{defines}\indexisarelem{notes}
-  @{rail \<open>
-    @@{command locale} @{syntax name} ('=' @{syntax locale})? @'begin'?
-    ;
-    @@{command print_locale} '!'? @{syntax nameref}
-    ;
-    @{syntax_def locale}: @{syntax context_elem}+ |
-      @{syntax locale_expr} ('+' (@{syntax context_elem}+))?
-    ;
-    @{syntax_def context_elem}:
-      @'fixes' (@{syntax "fixes"} + @'and') |
-      @'constrains' (@{syntax name} '::' @{syntax type} + @'and') |
-      @'assumes' (@{syntax props} + @'and') |
-      @'defines' (@{syntax thmdecl}? @{syntax prop} @{syntax prop_pat}? + @'and') |
-      @'notes' (@{syntax thmdef}? @{syntax thmrefs} + @'and')
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command "locale"}~@{text "loc = import + body"} defines a
-  new locale @{text loc} as a context consisting of a certain view of
-  existing locales (@{text import}) plus some additional elements
-  (@{text body}).  Both @{text import} and @{text body} are optional;
-  the degenerate form @{command "locale"}~@{text loc} defines an empty
-  locale, which may still be useful to collect declarations of facts
-  later on.  Type-inference on locale expressions automatically takes
-  care of the most general typing that the combined context elements
-  may acquire.
-
-  The @{text import} consists of a locale expression; see
-  \secref{sec:proof-context} above.  Its @{keyword "for"} clause defines
-  the parameters of @{text import}.  These are parameters of
-  the defined locale.  Locale parameters whose instantiation is
-  omitted automatically extend the (possibly empty) @{keyword "for"}
-  clause: they are inserted at its beginning.  This means that these
-  parameters may be referred to from within the expression and also in
-  the subsequent context elements and provides a notational
-  convenience for the inheritance of parameters in locale
-  declarations.
-
-  The @{text body} consists of context elements.
-
-  \begin{description}
-
-  \item @{element "fixes"}~@{text "x :: \<tau> (mx)"} declares a local
-  parameter of type @{text \<tau>} and mixfix annotation @{text mx} (both
-  are optional).  The special syntax declaration ``@{text
-  "("}@{keyword_ref "structure"}@{text ")"}'' means that @{text x} may
-  be referenced implicitly in this context.
-
-  \item @{element "constrains"}~@{text "x :: \<tau>"} introduces a type
-  constraint @{text \<tau>} on the local parameter @{text x}.  This
-  element is deprecated.  The type constraint should be introduced in
-  the @{keyword "for"} clause or the relevant @{element "fixes"} element.
-
-  \item @{element "assumes"}~@{text "a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}
-  introduces local premises, similar to @{command "assume"} within a
-  proof (cf.\ \secref{sec:proof-context}).
-
-  \item @{element "defines"}~@{text "a: x \<equiv> t"} defines a previously
-  declared parameter.  This is similar to @{command "def"} within a
-  proof (cf.\ \secref{sec:proof-context}), but @{element "defines"}
-  takes an equational proposition instead of variable-term pair.  The
-  left-hand side of the equation may have additional arguments, e.g.\
-  ``@{element "defines"}~@{text "f x\<^sub>1 \<dots> x\<^sub>n \<equiv> t"}''.
-
-  \item @{element "notes"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"}
-  reconsiders facts within a local context.  Most notably, this may
-  include arbitrary declarations in any attribute specifications
-  included here, e.g.\ a local @{attribute simp} rule.
-
-  \end{description}
-
-  Both @{element "assumes"} and @{element "defines"} elements
-  contribute to the locale specification.  When defining an operation
-  derived from the parameters, @{command "definition"}
-  (\secref{sec:basic-spec}) is usually more appropriate.
-  
-  Note that ``@{text "(\<IS> p\<^sub>1 \<dots> p\<^sub>n)"}'' patterns given
-  in the syntax of @{element "assumes"} and @{element "defines"} above
-  are illegal in locale definitions.  In the long goal format of
-  \secref{sec:goals}, term bindings may be included as expected,
-  though.
-  
-  \medskip Locale specifications are ``closed up'' by
-  turning the given text into a predicate definition @{text
-  loc_axioms} and deriving the original assumptions as local lemmas
-  (modulo local definitions).  The predicate statement covers only the
-  newly specified assumptions, omitting the content of included locale
-  expressions.  The full cumulative view is only provided on export,
-  involving another predicate @{text loc} that refers to the complete
-  specification text.
-  
-  In any case, the predicate arguments are those locale parameters
-  that actually occur in the respective piece of text.  Also these
-  predicates operate at the meta-level in theory, but the locale
-  packages attempts to internalize statements according to the
-  object-logic setup (e.g.\ replacing @{text \<And>} by @{text \<forall>}, and
-  @{text "\<Longrightarrow>"} by @{text "\<longrightarrow>"} in HOL; see also
-  \secref{sec:object-logic}).  Separate introduction rules @{text
-  loc_axioms.intro} and @{text loc.intro} are provided as well.
-  
-  \item @{command "print_locale"}~@{text "locale"} prints the
-  contents of the named locale.  The command omits @{element "notes"}
-  elements by default.  Use @{command "print_locale"}@{text "!"} to
-  have them included.
-
-  \item @{command "print_locales"} prints the names of all locales
-  of the current theory.
-
-  \item @{command "locale_deps"} visualizes all locales and their
-  relations as a Hasse diagram. This includes locales defined as type
-  classes (\secref{sec:class}).  See also @{command
-  "print_dependencies"} below.
-
-  \item @{method intro_locales} and @{method unfold_locales}
-  repeatedly expand all introduction rules of locale predicates of the
-  theory.  While @{method intro_locales} only applies the @{text
-  loc.intro} introduction rules and therefore does not descend to
-  assumptions, @{method unfold_locales} is more aggressive and applies
-  @{text loc_axioms.intro} as well.  Both methods are aware of locale
-  specifications entailed by the context, both from target statements,
-  and from interpretations (see below).  New goals that are entailed
-  by the current context are discharged automatically.
-
-  \end{description}
-*}
-
-
-subsection {* Locale interpretation *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "interpretation"} & : & @{text "theory | local_theory \<rightarrow> proof(prove)"} \\
-    @{command_def "interpret"} & : & @{text "proof(state) | proof(chain) \<rightarrow> proof(prove)"} \\
-    @{command_def "sublocale"} & : & @{text "theory | local_theory \<rightarrow> proof(prove)"} \\
-    @{command_def "print_dependencies"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "print_interps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-  \end{matharray}
-
-  Locales may be instantiated, and the resulting instantiated
-  declarations added to the current context.  This requires proof (of
-  the instantiated specification) and is called \emph{locale
-  interpretation}.  Interpretation is possible in locales (@{command
-  "sublocale"}), global and local theories (@{command
-  "interpretation"}) and also within proof bodies (@{command
-  "interpret"}).
-
-  @{rail \<open>
-    @@{command interpretation} @{syntax locale_expr} equations?
-    ;
-    @@{command interpret} @{syntax locale_expr} equations?
-    ;
-    @@{command sublocale} (@{syntax nameref} ('<' | '\<subseteq>'))? @{syntax locale_expr} \<newline>
-      equations?
-    ;
-    @@{command print_dependencies} '!'? @{syntax locale_expr}
-    ;
-    @@{command print_interps} @{syntax nameref}
-    ;
-
-    equations: @'where' (@{syntax thmdecl}? @{syntax prop} + @'and')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "interpretation"}~@{text "expr \<WHERE> eqns"}
-  interprets @{text expr} in a global or local theory.  The command
-  generates proof obligations for the instantiated specifications.
-  Once these are discharged by the user, instantiated declarations (in
-  particular, facts) are added to the theory in a post-processing
-  phase.
-
-  The command is aware of interpretations that are already active.
-  Post-processing is achieved through a variant of roundup that takes
-  the interpretations of the current global or local theory into
-  account.  In order to simplify the proof obligations according to
-  existing interpretations use methods @{method intro_locales} or
-  @{method unfold_locales}.
-
-  When adding declarations to locales, interpreted versions of these
-  declarations are added to the global theory for all interpretations
-  in the global theory as well. That is, interpretations in global
-  theories dynamically participate in any declarations added to
-  locales.
-
-  In contrast, the lifetime of an interpretation in a local theory is
-  limited to the current context block.  At the closing @{command end}
-  of the block the interpretation and its declarations disappear.
-  This enables establishing facts based on interpretations without
-  creating permanent links to the interpreted locale instances, as
-  would be the case with @{command sublocale}.
-  While @{command "interpretation"}~@{text "(\<IN> c)
-  \<dots>"} is technically possible, it is not useful since its result is
-  discarded immediately.
-
-  Free variables in the interpreted expression are allowed.  They are
-  turned into schematic variables in the generated declarations.  In
-  order to use a free variable whose name is already bound in the
-  context --- for example, because a constant of that name exists ---
-  add it to the @{keyword "for"} clause.
-
-  The equations @{text eqns} yield \emph{rewrite morphisms}, which are
-  unfolded during post-processing and are useful for interpreting
-  concepts introduced through definitions.  The equations must be
-  proved.
-
-  \item @{command "interpret"}~@{text "expr \<WHERE> eqns"} interprets
-  @{text expr} in the proof context and is otherwise similar to
-  interpretation in local theories.  Note that for @{command
-  "interpret"} the @{text eqns} should be
-  explicitly universally quantified.
-
-  \item @{command "sublocale"}~@{text "name \<subseteq> expr \<WHERE>
-  eqns"}
-  interprets @{text expr} in the locale @{text name}.  A proof that
-  the specification of @{text name} implies the specification of
-  @{text expr} is required.  As in the localized version of the
-  theorem command, the proof is in the context of @{text name}.  After
-  the proof obligation has been discharged, the locale hierarchy is
-  changed as if @{text name} imported @{text expr} (hence the name
-  @{command "sublocale"}).  When the context of @{text name} is
-  subsequently entered, traversing the locale hierarchy will involve
-  the locale instances of @{text expr}, and their declarations will be
-  added to the context.  This makes @{command "sublocale"}
-  dynamic: extensions of a locale that is instantiated in @{text expr}
-  may take place after the @{command "sublocale"} declaration and
-  still become available in the context.  Circular @{command
-  "sublocale"} declarations are allowed as long as they do not lead to
-  infinite chains.
-
-  If interpretations of @{text name} exist in the current global
-  theory, the command adds interpretations for @{text expr} as well,
-  with the same qualifier, although only for fragments of @{text expr}
-  that are not interpreted in the theory already.
-
-  The equations @{text eqns} amend the morphism through
-  which @{text expr} is interpreted.  This enables mapping definitions
-  from the interpreted locales to entities of @{text name} and can
-  help break infinite chains induced by circular @{command
-  "sublocale"} declarations.
-
-  In a named context block the @{command sublocale} command may also
-  be used, but the locale argument must be omitted.  The command then
-  refers to the locale (or class) target of the context block.
-
-  \item @{command "print_dependencies"}~@{text "expr"} is useful for
-  understanding the effect of an interpretation of @{text "expr"} in
-  the current context.  It lists all locale instances for which
-  interpretations would be added to the current context.  Variant
-  @{command "print_dependencies"}@{text "!"} does not generalize
-  parameters and assumes an empty context --- that is, it prints all
-  locale instances that would be considered for interpretation.  The
-  latter is useful for understanding the dependencies of a locale
-  expression.
-
-  \item @{command "print_interps"}~@{text "locale"} lists all
-  interpretations of @{text "locale"} in the current theory or proof
-  context, including those due to a combination of an @{command
-  "interpretation"} or @{command "interpret"} and one or several
-  @{command "sublocale"} declarations.
-
-  \end{description}
-
-  \begin{warn}
-    If a global theory inherits declarations (body elements) for a
-    locale from one parent and an interpretation of that locale from
-    another parent, the interpretation will not be applied to the
-    declarations.
-  \end{warn}
-
-  \begin{warn}
-    Since attributes are applied to interpreted theorems,
-    interpretation may modify the context of common proof tools, e.g.\
-    the Simplifier or Classical Reasoner.  As the behavior of such
-    tools is \emph{not} stable under interpretation morphisms, manual
-    declarations might have to be added to the target context of the
-    interpretation to revert such declarations.
-  \end{warn}
-
-  \begin{warn}
-    An interpretation in a local theory or proof context may subsume previous
-    interpretations.  This happens if the same specification fragment
-    is interpreted twice and the instantiation of the second
-    interpretation is more general than the interpretation of the
-    first.  The locale package does not attempt to remove subsumed
-    interpretations.
-  \end{warn}
-*}
-
-
-section {* Classes \label{sec:class} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "class"} & : & @{text "theory \<rightarrow> local_theory"} \\
-    @{command_def "instantiation"} & : & @{text "theory \<rightarrow> local_theory"} \\
-    @{command_def "instance"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command "instance"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
-    @{command_def "subclass"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "print_classes"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "class_deps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{method_def intro_classes} & : & @{text method} \\
-  \end{matharray}
-
-  A class is a particular locale with \emph{exactly one} type variable
-  @{text \<alpha>}.  Beyond the underlying locale, a corresponding type class
-  is established which is interpreted logically as axiomatic type
-  class \cite{Wenzel:1997:TPHOL} whose logical content are the
-  assumptions of the locale.  Thus, classes provide the full
-  generality of locales combined with the commodity of type classes
-  (notably type-inference).  See \cite{isabelle-classes} for a short
-  tutorial.
-
-  @{rail \<open>
-    @@{command class} class_spec @'begin'?
-    ;
-    class_spec: @{syntax name} '='
-      ((@{syntax nameref} '+' (@{syntax context_elem}+)) |
-        @{syntax nameref} | (@{syntax context_elem}+))      
-    ;
-    @@{command instantiation} (@{syntax nameref} + @'and') '::' @{syntax arity} @'begin'
-    ;
-    @@{command instance} (() | (@{syntax nameref} + @'and') '::' @{syntax arity} |
-      @{syntax nameref} ('<' | '\<subseteq>') @{syntax nameref} )
-    ;
-    @@{command subclass} @{syntax target}? @{syntax nameref}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "class"}~@{text "c = superclasses + body"} defines
-  a new class @{text c}, inheriting from @{text superclasses}.  This
-  introduces a locale @{text c} with import of all locales @{text
-  superclasses}.
-
-  Any @{element "fixes"} in @{text body} are lifted to the global
-  theory level (\emph{class operations} @{text "f\<^sub>1, \<dots>,
-  f\<^sub>n"} of class @{text c}), mapping the local type parameter
-  @{text \<alpha>} to a schematic type variable @{text "?\<alpha> :: c"}.
-
-  Likewise, @{element "assumes"} in @{text body} are also lifted,
-  mapping each local parameter @{text "f :: \<tau>[\<alpha>]"} to its
-  corresponding global constant @{text "f :: \<tau>[?\<alpha> :: c]"}.  The
-  corresponding introduction rule is provided as @{text
-  c_class_axioms.intro}.  This rule should be rarely needed directly
-  --- the @{method intro_classes} method takes care of the details of
-  class membership proofs.
-
-  \item @{command "instantiation"}~@{text "t :: (s\<^sub>1, \<dots>, s\<^sub>n)s
-  \<BEGIN>"} opens a target (cf.\ \secref{sec:target}) which
-  allows to specify class operations @{text "f\<^sub>1, \<dots>, f\<^sub>n"} corresponding
-  to sort @{text s} at the particular type instance @{text "(\<alpha>\<^sub>1 :: s\<^sub>1,
-  \<dots>, \<alpha>\<^sub>n :: s\<^sub>n) t"}.  A plain @{command "instance"} command in the
-  target body poses a goal stating these type arities.  The target is
-  concluded by an @{command_ref (local) "end"} command.
-
-  Note that a list of simultaneous type constructors may be given;
-  this corresponds nicely to mutually recursive type definitions, e.g.\
-  in Isabelle/HOL.
-
-  \item @{command "instance"} in an instantiation target body sets
-  up a goal stating the type arities claimed at the opening @{command
-  "instantiation"}.  The proof would usually proceed by @{method
-  intro_classes}, and then establish the characteristic theorems of
-  the type classes involved.  After finishing the proof, the
-  background theory will be augmented by the proven type arities.
-
-  On the theory level, @{command "instance"}~@{text "t :: (s\<^sub>1, \<dots>,
-  s\<^sub>n)s"} provides a convenient way to instantiate a type class with no
-  need to specify operations: one can continue with the
-  instantiation proof immediately.
-
-  \item @{command "subclass"}~@{text c} in a class context for class
-  @{text d} sets up a goal stating that class @{text c} is logically
-  contained in class @{text d}.  After finishing the proof, class
-  @{text d} is proven to be subclass @{text c} and the locale @{text
-  c} is interpreted into @{text d} simultaneously.
-
-  A weakend form of this is available through a further variant of
-  @{command instance}:  @{command instance}~@{text "c\<^sub>1 \<subseteq> c\<^sub>2"} opens
-  a proof that class @{text "c\<^sub>2"} implies @{text "c\<^sub>1"} without reference
-  to the underlying locales;  this is useful if the properties to prove
-  the logical connection are not sufficent on the locale level but on
-  the theory level.
-
-  \item @{command "print_classes"} prints all classes in the current
-  theory.
-
-  \item @{command "class_deps"} visualizes all classes and their
-  subclass relations as a Hasse diagram.
-
-  \item @{method intro_classes} repeatedly expands all class
-  introduction rules of this theory.  Note that this method usually
-  needs not be named explicitly, as it is already included in the
-  default proof step (e.g.\ of @{command "proof"}).  In particular,
-  instantiation of trivial (syntactic) classes may be performed by a
-  single ``@{command ".."}'' proof step.
-
-  \end{description}
-*}
-
-
-subsection {* The class target *}
-
-text {*
-  %FIXME check
-
-  A named context may refer to a locale (cf.\ \secref{sec:target}).
-  If this locale is also a class @{text c}, apart from the common
-  locale target behaviour the following happens.
-
-  \begin{itemize}
-
-  \item Local constant declarations @{text "g[\<alpha>]"} referring to the
-  local type parameter @{text \<alpha>} and local parameters @{text "f[\<alpha>]"}
-  are accompanied by theory-level constants @{text "g[?\<alpha> :: c]"}
-  referring to theory-level class operations @{text "f[?\<alpha> :: c]"}.
-
-  \item Local theorem bindings are lifted as are assumptions.
-
-  \item Local syntax refers to local operations @{text "g[\<alpha>]"} and
-  global operations @{text "g[?\<alpha> :: c]"} uniformly.  Type inference
-  resolves ambiguities.  In rare cases, manual type annotations are
-  needed.
-  
-  \end{itemize}
-*}
-
-
-subsection {* Co-regularity of type classes and arities *}
-
-text {* The class relation together with the collection of
-  type-constructor arities must obey the principle of
-  \emph{co-regularity} as defined below.
-
-  \medskip For the subsequent formulation of co-regularity we assume
-  that the class relation is closed by transitivity and reflexivity.
-  Moreover the collection of arities @{text "t :: (\<^vec>s)c"} is
-  completed such that @{text "t :: (\<^vec>s)c"} and @{text "c \<subseteq> c'"}
-  implies @{text "t :: (\<^vec>s)c'"} for all such declarations.
-
-  Treating sorts as finite sets of classes (meaning the intersection),
-  the class relation @{text "c\<^sub>1 \<subseteq> c\<^sub>2"} is extended to sorts as
-  follows:
-  \[
-    @{text "s\<^sub>1 \<subseteq> s\<^sub>2 \<equiv> \<forall>c\<^sub>2 \<in> s\<^sub>2. \<exists>c\<^sub>1 \<in> s\<^sub>1. c\<^sub>1 \<subseteq> c\<^sub>2"}
-  \]
-
-  This relation on sorts is further extended to tuples of sorts (of
-  the same length) in the component-wise way.
-
-  \smallskip Co-regularity of the class relation together with the
-  arities relation means:
-  \[
-    @{text "t :: (\<^vec>s\<^sub>1)c\<^sub>1 \<Longrightarrow> t :: (\<^vec>s\<^sub>2)c\<^sub>2 \<Longrightarrow> c\<^sub>1 \<subseteq> c\<^sub>2 \<Longrightarrow> \<^vec>s\<^sub>1 \<subseteq> \<^vec>s\<^sub>2"}
-  \]
-  \noindent for all such arities.  In other words, whenever the result
-  classes of some type-constructor arities are related, then the
-  argument sorts need to be related in the same way.
-
-  \medskip Co-regularity is a very fundamental property of the
-  order-sorted algebra of types.  For example, it entails principle
-  types and most general unifiers, e.g.\ see \cite{nipkow-prehofer}.
-*}
-
-
-section {* Unrestricted overloading *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "overloading"} & : & @{text "theory \<rightarrow> local_theory"} \\
-  \end{matharray}
-
-  Isabelle/Pure's definitional schemes support certain forms of
-  overloading (see \secref{sec:consts}).  Overloading means that a
-  constant being declared as @{text "c :: \<alpha> decl"} may be
-  defined separately on type instances
-  @{text "c :: (\<beta>\<^sub>1, \<dots>, \<beta>\<^sub>n) t decl"}
-  for each type constructor @{text t}.  At most occassions
-  overloading will be used in a Haskell-like fashion together with
-  type classes by means of @{command "instantiation"} (see
-  \secref{sec:class}).  Sometimes low-level overloading is desirable.
-  The @{command "overloading"} target provides a convenient view for
-  end-users.
-
-  @{rail \<open>
-    @@{command overloading} ( spec + ) @'begin'
-    ;
-    spec: @{syntax name} ( '==' | '\<equiv>' ) @{syntax term} ( '(' @'unchecked' ')' )?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "overloading"}~@{text "x\<^sub>1 \<equiv> c\<^sub>1 :: \<tau>\<^sub>1 \<AND> \<dots> x\<^sub>n \<equiv> c\<^sub>n :: \<tau>\<^sub>n \<BEGIN>"}
-  opens a theory target (cf.\ \secref{sec:target}) which allows to
-  specify constants with overloaded definitions.  These are identified
-  by an explicitly given mapping from variable names @{text "x\<^sub>i"} to
-  constants @{text "c\<^sub>i"} at particular type instances.  The
-  definitions themselves are established using common specification
-  tools, using the names @{text "x\<^sub>i"} as reference to the
-  corresponding constants.  The target is concluded by @{command
-  (local) "end"}.
-
-  A @{text "(unchecked)"} option disables global dependency checks for
-  the corresponding definition, which is occasionally useful for
-  exotic overloading (see \secref{sec:consts} for a precise description).
-  It is at the discretion of the user to avoid
-  malformed theory specifications!
-
-  \end{description}
-*}
-
-
-section {* Incorporating ML code \label{sec:ML} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "SML_file"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "ML_file"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "ML"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "ML_prf"} & : & @{text "proof \<rightarrow> proof"} \\
-    @{command_def "ML_val"} & : & @{text "any \<rightarrow>"} \\
-    @{command_def "ML_command"} & : & @{text "any \<rightarrow>"} \\
-    @{command_def "setup"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "local_setup"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "attribute_setup"} & : & @{text "theory \<rightarrow> theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{command SML_file} | @@{command ML_file}) @{syntax name}
-    ;
-    (@@{command ML} | @@{command ML_prf} | @@{command ML_val} |
-      @@{command ML_command} | @@{command setup} | @@{command local_setup}) @{syntax text}
-    ;
-    @@{command attribute_setup} @{syntax name} '=' @{syntax text} @{syntax text}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "SML_file"}~@{text "name"} reads and evaluates the
-  given Standard ML file.  Top-level SML bindings are stored within
-  the theory context; the initial environment is restricted to the
-  Standard ML implementation of Poly/ML, without the many add-ons of
-  Isabelle/ML.  Multiple @{command "SML_file"} commands may be used to
-  build larger Standard ML projects, independently of the regular
-  Isabelle/ML environment.
-
-  \item @{command "ML_file"}~@{text "name"} reads and evaluates the
-  given ML file.  The current theory context is passed down to the ML
-  toplevel and may be modified, using @{ML "Context.>>"} or derived ML
-  commands.  Top-level ML bindings are stored within the (global or
-  local) theory context.
-  
-  \item @{command "ML"}~@{text "text"} is similar to @{command
-  "ML_file"}, but evaluates directly the given @{text "text"}.
-  Top-level ML bindings are stored within the (global or local) theory
-  context.
-
-  \item @{command "ML_prf"} is analogous to @{command "ML"} but works
-  within a proof context.  Top-level ML bindings are stored within the
-  proof context in a purely sequential fashion, disregarding the
-  nested proof structure.  ML bindings introduced by @{command
-  "ML_prf"} are discarded at the end of the proof.
-
-  \item @{command "ML_val"} and @{command "ML_command"} are diagnostic
-  versions of @{command "ML"}, which means that the context may not be
-  updated.  @{command "ML_val"} echos the bindings produced at the ML
-  toplevel, but @{command "ML_command"} is silent.
-  
-  \item @{command "setup"}~@{text "text"} changes the current theory
-  context by applying @{text "text"}, which refers to an ML expression
-  of type @{ML_type "theory -> theory"}.  This enables to initialize
-  any object-logic specific tools and packages written in ML, for
-  example.
-
-  \item @{command "local_setup"} is similar to @{command "setup"} for
-  a local theory context, and an ML expression of type @{ML_type
-  "local_theory -> local_theory"}.  This allows to
-  invoke local theory specification packages without going through
-  concrete outer syntax, for example.
-
-  \item @{command "attribute_setup"}~@{text "name = text description"}
-  defines an attribute in the current theory.  The given @{text
-  "text"} has to be an ML expression of type
-  @{ML_type "attribute context_parser"}, cf.\ basic parsers defined in
-  structure @{ML_structure Args} and @{ML_structure Attrib}.
-
-  In principle, attributes can operate both on a given theorem and the
-  implicit context, although in practice only one is modified and the
-  other serves as parameter.  Here are examples for these two cases:
-
-  \end{description}
-*}
-
-  attribute_setup my_rule = {*
-    Attrib.thms >> (fn ths =>
-      Thm.rule_attribute
-        (fn context: Context.generic => fn th: thm =>
-          let val th' = th OF ths
-          in th' end)) *}
-
-  attribute_setup my_declaration = {*
-    Attrib.thms >> (fn ths =>
-      Thm.declaration_attribute
-        (fn th: thm => fn context: Context.generic =>
-          let val context' = context
-          in context' end)) *}
-
-
-section {* Primitive specification elements *}
-
-subsection {* Sorts *}
-
-text {*
-  \begin{matharray}{rcll}
-    @{command_def "default_sort"} & : & @{text "local_theory \<rightarrow> local_theory"}
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command default_sort} @{syntax sort}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "default_sort"}~@{text s} makes sort @{text s} the
-  new default sort for any type variable that is given explicitly in
-  the text, but lacks a sort constraint (wrt.\ the current context).
-  Type variables generated by type inference are not affected.
-
-  Usually the default sort is only changed when defining a new
-  object-logic.  For example, the default sort in Isabelle/HOL is
-  @{class type}, the class of all HOL types.
-
-  When merging theories, the default sorts of the parents are
-  logically intersected, i.e.\ the representations as lists of classes
-  are joined.
-
-  \end{description}
-*}
-
-
-subsection {* Types and type abbreviations \label{sec:types-pure} *}
-
-text {*
-  \begin{matharray}{rcll}
-    @{command_def "type_synonym"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "typedecl"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command type_synonym} (@{syntax typespec} '=' @{syntax type} @{syntax mixfix}?)
-    ;
-    @@{command typedecl} @{syntax typespec} @{syntax mixfix}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "type_synonym"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = \<tau>"}
-  introduces a \emph{type synonym} @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} for the
-  existing type @{text "\<tau>"}.  Unlike actual type definitions, as are
-  available in Isabelle/HOL for example, type synonyms are merely
-  syntactic abbreviations without any logical significance.
-  Internally, type synonyms are fully expanded.
-  
-  \item @{command "typedecl"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} declares a new
-  type constructor @{text t}.  If the object-logic defines a base sort
-  @{text s}, then the constructor is declared to operate on that, via
-  the axiomatic type-class instance @{text "t :: (s, \<dots>, s)s"}.
-
-  \end{description}
-*}
-
-
-subsection {* Constants and definitions \label{sec:consts} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "consts"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "defs"} & : & @{text "theory \<rightarrow> theory"} \\
-  \end{matharray}
-
-  Definitions essentially express abbreviations within the logic.  The
-  simplest form of a definition is @{text "c :: \<sigma> \<equiv> t"}, where @{text
-  c} is a newly declared constant.  Isabelle also allows derived forms
-  where the arguments of @{text c} appear on the left, abbreviating a
-  prefix of @{text \<lambda>}-abstractions, e.g.\ @{text "c \<equiv> \<lambda>x y. t"} may be
-  written more conveniently as @{text "c x y \<equiv> t"}.  Moreover,
-  definitions may be weakened by adding arbitrary pre-conditions:
-  @{text "A \<Longrightarrow> c x y \<equiv> t"}.
-
-  \medskip The built-in well-formedness conditions for definitional
-  specifications are:
-
-  \begin{itemize}
-
-  \item Arguments (on the left-hand side) must be distinct variables.
-
-  \item All variables on the right-hand side must also appear on the
-  left-hand side.
-
-  \item All type variables on the right-hand side must also appear on
-  the left-hand side; this prohibits @{text "0 :: nat \<equiv> length ([] ::
-  \<alpha> list)"} for example.
-
-  \item The definition must not be recursive.  Most object-logics
-  provide definitional principles that can be used to express
-  recursion safely.
-
-  \end{itemize}
-
-  The right-hand side of overloaded definitions may mention overloaded constants
-  recursively at type instances corresponding to the immediate
-  argument types @{text "\<beta>\<^sub>1, \<dots>, \<beta>\<^sub>n"}.  Incomplete
-  specification patterns impose global constraints on all occurrences,
-  e.g.\ @{text "d :: \<alpha> \<times> \<alpha>"} on the left-hand side means that all
-  corresponding occurrences on some right-hand side need to be an
-  instance of this, general @{text "d :: \<alpha> \<times> \<beta>"} will be disallowed.
-
-  @{rail \<open>
-    @@{command consts} ((@{syntax name} '::' @{syntax type} @{syntax mixfix}?) +)
-    ;
-    @@{command defs} opt? (@{syntax axmdecl} @{syntax prop} +)
-    ;
-    opt: '(' @'unchecked'? @'overloaded'? ')'
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "consts"}~@{text "c :: \<sigma>"} declares constant @{text
-  c} to have any instance of type scheme @{text \<sigma>}.  The optional
-  mixfix annotations may attach concrete syntax to the constants
-  declared.
-  
-  \item @{command "defs"}~@{text "name: eqn"} introduces @{text eqn}
-  as a definitional axiom for some existing constant.
-  
-  The @{text "(unchecked)"} option disables global dependency checks
-  for this definition, which is occasionally useful for exotic
-  overloading.  It is at the discretion of the user to avoid malformed
-  theory specifications!
-  
-  The @{text "(overloaded)"} option declares definitions to be
-  potentially overloaded.  Unless this option is given, a warning
-  message would be issued for any definitional equation with a more
-  special type than that of the corresponding constant declaration.
-  
-  \end{description}
-*}
-
-
-section {* Naming existing theorems \label{sec:theorems} *}
-
-text {*
-  \begin{matharray}{rcll}
-    @{command_def "lemmas"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "theorems"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{command lemmas} | @@{command theorems}) @{syntax target}? \<newline>
-      (@{syntax thmdef}? @{syntax thmrefs} + @'and')
-      (@'for' (@{syntax vars} + @'and'))?
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command "lemmas"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"}~@{keyword_def
-  "for"}~@{text "x\<^sub>1 \<dots> x\<^sub>m"} evaluates given facts (with attributes) in
-  the current context, which may be augmented by local variables.
-  Results are standardized before being stored, i.e.\ schematic
-  variables are renamed to enforce index @{text "0"} uniformly.
-
-  \item @{command "theorems"} is the same as @{command "lemmas"}, but
-  marks the result as a different kind of facts.
-
-  \end{description}
-*}
-
-
-section {* Oracles *}
-
-text {*
-  \begin{matharray}{rcll}
-    @{command_def "oracle"} & : & @{text "theory \<rightarrow> theory"} & (axiomatic!) \\
-  \end{matharray}
-
-  Oracles allow Isabelle to take advantage of external reasoners such
-  as arithmetic decision procedures, model checkers, fast tautology
-  checkers or computer algebra systems.  Invoked as an oracle, an
-  external reasoner can create arbitrary Isabelle theorems.
-
-  It is the responsibility of the user to ensure that the external
-  reasoner is as trustworthy as the application requires.  Another
-  typical source of errors is the linkup between Isabelle and the
-  external tool, not just its concrete implementation, but also the
-  required translation between two different logical environments.
-
-  Isabelle merely guarantees well-formedness of the propositions being
-  asserted, and records within the internal derivation object how
-  presumed theorems depend on unproven suppositions.
-
-  @{rail \<open>
-    @@{command oracle} @{syntax name} '=' @{syntax text}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "oracle"}~@{text "name = text"} turns the given ML
-  expression @{text "text"} of type @{ML_text "'a -> cterm"} into an
-  ML function of type @{ML_text "'a -> thm"}, which is bound to the
-  global identifier @{ML_text name}.  This acts like an infinitary
-  specification of axioms!  Invoking the oracle only works within the
-  scope of the resulting theory.
-
-  \end{description}
-
-  See @{file "~~/src/HOL/ex/Iff_Oracle.thy"} for a worked example of
-  defining a new primitive rule as oracle, and turning it into a proof
-  method.
-*}
-
-
-section {* Name spaces *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "hide_class"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "hide_type"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "hide_const"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "hide_fact"} & : & @{text "theory \<rightarrow> theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    ( @{command hide_class} | @{command hide_type} |
-      @{command hide_const} | @{command hide_fact} ) ('(' @'open' ')')? (@{syntax nameref} + )
-  \<close>}
-
-  Isabelle organizes any kind of name declarations (of types,
-  constants, theorems etc.) by separate hierarchically structured name
-  spaces.  Normally the user does not have to control the behavior of
-  name spaces by hand, yet the following commands provide some way to
-  do so.
-
-  \begin{description}
-
-  \item @{command "hide_class"}~@{text names} fully removes class
-  declarations from a given name space; with the @{text "(open)"}
-  option, only the base name is hidden.
-
-  Note that hiding name space accesses has no impact on logical
-  declarations --- they remain valid internally.  Entities that are no
-  longer accessible to the user are printed with the special qualifier
-  ``@{text "??"}'' prefixed to the full internal name.
-
-  \item @{command "hide_type"}, @{command "hide_const"}, and @{command
-  "hide_fact"} are similar to @{command "hide_class"}, but hide types,
-  constants, and facts, respectively.
-  
-  \end{description}
-*}
-
-end
--- a/src/Doc/IsarRef/Symbols.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-theory Symbols
-imports Base Main
-begin
-
-chapter {* Predefined Isabelle symbols \label{app:symbols} *}
-
-text {*
-  Isabelle supports an infinite number of non-ASCII symbols, which are
-  represented in source text as @{verbatim "\\"}@{verbatim "<"}@{text
-  name}@{verbatim ">"} (where @{text name} may be any identifier).  It
-  is left to front-end tools how to present these symbols to the user.
-  The collection of predefined standard symbols given below is
-  available by default for Isabelle document output, due to
-  appropriate definitions of @{verbatim "\\"}@{verbatim isasym}@{text
-  name} for each @{verbatim "\\"}@{verbatim "<"}@{text name}@{verbatim
-  ">"} in the @{verbatim isabellesym.sty} file.  Most of these symbols
-  are displayed properly in Isabelle/jEdit and {\LaTeX} generated from Isabelle.
-
-  Moreover, any single symbol (or ASCII character) may be prefixed by
-  @{verbatim "\<^sup>"} for superscript and @{verbatim "\<^sub>"} for subscript,
-  such as @{verbatim "A\<^sup>\<star>"} for @{text "A\<^sup>\<star>"} and @{verbatim "A\<^sub>1"} for
-  @{text "A\<^sub>1"}.  Sub- and superscripts that span a region of text can
-  be marked up with @{verbatim "\<^bsub>"}@{text "\<dots>"}@{verbatim
-  "\<^esub>"} and @{verbatim "\<^bsup>"}@{text "\<dots>"}@{verbatim "\<^esup>"}
-  respectively, but note that there are limitations in the typographic
-  rendering quality of this form.  Furthermore, all ASCII characters
-  and most other symbols may be printed in bold by prefixing
-  @{verbatim "\<^bold>"} such as @{verbatim "\<^bold>\<alpha>"} for @{text "\<^bold>\<alpha>"}.  Note that
-  @{verbatim "\<^sup>"}, @{verbatim "\<^sub>"}, @{verbatim "\<^bold>"} cannot be combined.
-
-  Further details of Isabelle document preparation are covered in
-  \chref{ch:document-prep}.
-
-  \begin{center}
-  \begin{isabellebody}
-  \input{syms}  
-  \end{isabellebody}
-  \end{center}
-*}
-
-end
\ No newline at end of file
--- a/src/Doc/IsarRef/Synopsis.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1108 +0,0 @@
-theory Synopsis
-imports Base Main
-begin
-
-chapter {* Synopsis *}
-
-section {* Notepad *}
-
-text {*
-  An Isar proof body serves as mathematical notepad to compose logical
-  content, consisting of types, terms, facts.
-*}
-
-
-subsection {* Types and terms *}
-
-notepad
-begin
-  txt {* Locally fixed entities: *}
-  fix x   -- {* local constant, without any type information yet *}
-  fix x :: 'a  -- {* variant with explicit type-constraint for subsequent use*}
-
-  fix a b
-  assume "a = b"  -- {* type assignment at first occurrence in concrete term *}
-
-  txt {* Definitions (non-polymorphic): *}
-  def x \<equiv> "t::'a"
-
-  txt {* Abbreviations (polymorphic): *}
-  let ?f = "\<lambda>x. x"
-  term "?f ?f"
-
-  txt {* Notation: *}
-  write x  ("***")
-end
-
-
-subsection {* Facts *}
-
-text {*
-  A fact is a simultaneous list of theorems.
-*}
-
-
-subsubsection {* Producing facts *}
-
-notepad
-begin
-
-  txt {* Via assumption (``lambda''): *}
-  assume a: A
-
-  txt {* Via proof (``let''): *}
-  have b: B sorry
-
-  txt {* Via abbreviation (``let''): *}
-  note c = a b
-
-end
-
-
-subsubsection {* Referencing facts *}
-
-notepad
-begin
-  txt {* Via explicit name: *}
-  assume a: A
-  note a
-
-  txt {* Via implicit name: *}
-  assume A
-  note this
-
-  txt {* Via literal proposition (unification with results from the proof text): *}
-  assume A
-  note `A`
-
-  assume "\<And>x. B x"
-  note `B a`
-  note `B b`
-end
-
-
-subsubsection {* Manipulating facts *}
-
-notepad
-begin
-  txt {* Instantiation: *}
-  assume a: "\<And>x. B x"
-  note a
-  note a [of b]
-  note a [where x = b]
-
-  txt {* Backchaining: *}
-  assume 1: A
-  assume 2: "A \<Longrightarrow> C"
-  note 2 [OF 1]
-  note 1 [THEN 2]
-
-  txt {* Symmetric results: *}
-  assume "x = y"
-  note this [symmetric]
-
-  assume "x \<noteq> y"
-  note this [symmetric]
-
-  txt {* Adhoc-simplification (take care!): *}
-  assume "P ([] @ xs)"
-  note this [simplified]
-end
-
-
-subsubsection {* Projections *}
-
-text {*
-  Isar facts consist of multiple theorems.  There is notation to project
-  interval ranges.
-*}
-
-notepad
-begin
-  assume stuff: A B C D
-  note stuff(1)
-  note stuff(2-3)
-  note stuff(2-)
-end
-
-
-subsubsection {* Naming conventions *}
-
-text {*
-  \begin{itemize}
-
-  \item Lower-case identifiers are usually preferred.
-
-  \item Facts can be named after the main term within the proposition.
-
-  \item Facts should \emph{not} be named after the command that
-  introduced them (@{command "assume"}, @{command "have"}).  This is
-  misleading and hard to maintain.
-
-  \item Natural numbers can be used as ``meaningless'' names (more
-  appropriate than @{text "a1"}, @{text "a2"} etc.)
-
-  \item Symbolic identifiers are supported (e.g. @{text "*"}, @{text
-  "**"}, @{text "***"}).
-
-  \end{itemize}
-*}
-
-
-subsection {* Block structure *}
-
-text {*
-  The formal notepad is block structured.  The fact produced by the last
-  entry of a block is exported into the outer context.
-*}
-
-notepad
-begin
-  {
-    have a: A sorry
-    have b: B sorry
-    note a b
-  }
-  note this
-  note `A`
-  note `B`
-end
-
-text {* Explicit blocks as well as implicit blocks of nested goal
-  statements (e.g.\ @{command have}) automatically introduce one extra
-  pair of parentheses in reserve.  The @{command next} command allows
-  to ``jump'' between these sub-blocks. *}
-
-notepad
-begin
-
-  {
-    have a: A sorry
-  next
-    have b: B
-    proof -
-      show B sorry
-    next
-      have c: C sorry
-    next
-      have d: D sorry
-    qed
-  }
-
-  txt {* Alternative version with explicit parentheses everywhere: *}
-
-  {
-    {
-      have a: A sorry
-    }
-    {
-      have b: B
-      proof -
-        {
-          show B sorry
-        }
-        {
-          have c: C sorry
-        }
-        {
-          have d: D sorry
-        }
-      qed
-    }
-  }
-
-end
-
-
-section {* Calculational reasoning \label{sec:calculations-synopsis} *}
-
-text {*
-  For example, see @{file "~~/src/HOL/Isar_Examples/Group.thy"}.
-*}
-
-
-subsection {* Special names in Isar proofs *}
-
-text {*
-  \begin{itemize}
-
-  \item term @{text "?thesis"} --- the main conclusion of the
-  innermost pending claim
-
-  \item term @{text "\<dots>"} --- the argument of the last explicitly
-    stated result (for infix application this is the right-hand side)
-
-  \item fact @{text "this"} --- the last result produced in the text
-
-  \end{itemize}
-*}
-
-notepad
-begin
-  have "x = y"
-  proof -
-    term ?thesis
-    show ?thesis sorry
-    term ?thesis  -- {* static! *}
-  qed
-  term "\<dots>"
-  thm this
-end
-
-text {* Calculational reasoning maintains the special fact called
-  ``@{text calculation}'' in the background.  Certain language
-  elements combine primary @{text this} with secondary @{text
-  calculation}. *}
-
-
-subsection {* Transitive chains *}
-
-text {* The Idea is to combine @{text this} and @{text calculation}
-  via typical @{text trans} rules (see also @{command
-  print_trans_rules}): *}
-
-thm trans
-thm less_trans
-thm less_le_trans
-
-notepad
-begin
-  txt {* Plain bottom-up calculation: *}
-  have "a = b" sorry
-  also
-  have "b = c" sorry
-  also
-  have "c = d" sorry
-  finally
-  have "a = d" .
-
-  txt {* Variant using the @{text "\<dots>"} abbreviation: *}
-  have "a = b" sorry
-  also
-  have "\<dots> = c" sorry
-  also
-  have "\<dots> = d" sorry
-  finally
-  have "a = d" .
-
-  txt {* Top-down version with explicit claim at the head: *}
-  have "a = d"
-  proof -
-    have "a = b" sorry
-    also
-    have "\<dots> = c" sorry
-    also
-    have "\<dots> = d" sorry
-    finally
-    show ?thesis .
-  qed
-next
-  txt {* Mixed inequalities (require suitable base type): *}
-  fix a b c d :: nat
-
-  have "a < b" sorry
-  also
-  have "b \<le> c" sorry
-  also
-  have "c = d" sorry
-  finally
-  have "a < d" .
-end
-
-
-subsubsection {* Notes *}
-
-text {*
-  \begin{itemize}
-
-  \item The notion of @{text trans} rule is very general due to the
-  flexibility of Isabelle/Pure rule composition.
-
-  \item User applications may declare their own rules, with some care
-  about the operational details of higher-order unification.
-
-  \end{itemize}
-*}
-
-
-subsection {* Degenerate calculations and bigstep reasoning *}
-
-text {* The Idea is to append @{text this} to @{text calculation},
-  without rule composition.  *}
-
-notepad
-begin
-  txt {* A vacuous proof: *}
-  have A sorry
-  moreover
-  have B sorry
-  moreover
-  have C sorry
-  ultimately
-  have A and B and C .
-next
-  txt {* Slightly more content (trivial bigstep reasoning): *}
-  have A sorry
-  moreover
-  have B sorry
-  moreover
-  have C sorry
-  ultimately
-  have "A \<and> B \<and> C" by blast
-next
-  txt {* More ambitious bigstep reasoning involving structured results: *}
-  have "A \<or> B \<or> C" sorry
-  moreover
-  { assume A have R sorry }
-  moreover
-  { assume B have R sorry }
-  moreover
-  { assume C have R sorry }
-  ultimately
-  have R by blast  -- {* ``big-bang integration'' of proof blocks (occasionally fragile) *}
-end
-
-
-section {* Induction *}
-
-subsection {* Induction as Natural Deduction *}
-
-text {* In principle, induction is just a special case of Natural
-  Deduction (see also \secref{sec:natural-deduction-synopsis}).  For
-  example: *}
-
-thm nat.induct
-print_statement nat.induct
-
-notepad
-begin
-  fix n :: nat
-  have "P n"
-  proof (rule nat.induct)  -- {* fragile rule application! *}
-    show "P 0" sorry
-  next
-    fix n :: nat
-    assume "P n"
-    show "P (Suc n)" sorry
-  qed
-end
-
-text {*
-  In practice, much more proof infrastructure is required.
-
-  The proof method @{method induct} provides:
-  \begin{itemize}
-
-  \item implicit rule selection and robust instantiation
-
-  \item context elements via symbolic case names
-
-  \item support for rule-structured induction statements, with local
-    parameters, premises, etc.
-
-  \end{itemize}
-*}
-
-notepad
-begin
-  fix n :: nat
-  have "P n"
-  proof (induct n)
-    case 0
-    show ?case sorry
-  next
-    case (Suc n)
-    from Suc.hyps show ?case sorry
-  qed
-end
-
-
-subsubsection {* Example *}
-
-text {*
-  The subsequent example combines the following proof patterns:
-  \begin{itemize}
-
-  \item outermost induction (over the datatype structure of natural
-  numbers), to decompose the proof problem in top-down manner
-
-  \item calculational reasoning (\secref{sec:calculations-synopsis})
-  to compose the result in each case
-
-  \item solving local claims within the calculation by simplification
-
-  \end{itemize}
-*}
-
-lemma
-  fixes n :: nat
-  shows "(\<Sum>i=0..n. i) = n * (n + 1) div 2"
-proof (induct n)
-  case 0
-  have "(\<Sum>i=0..0. i) = (0::nat)" by simp
-  also have "\<dots> = 0 * (0 + 1) div 2" by simp
-  finally show ?case .
-next
-  case (Suc n)
-  have "(\<Sum>i=0..Suc n. i) = (\<Sum>i=0..n. i) + (n + 1)" by simp
-  also have "\<dots> = n * (n + 1) div 2 + (n + 1)" by (simp add: Suc.hyps)
-  also have "\<dots> = (n * (n + 1) + 2 * (n + 1)) div 2" by simp
-  also have "\<dots> = (Suc n * (Suc n + 1)) div 2" by simp
-  finally show ?case .
-qed
-
-text {* This demonstrates how induction proofs can be done without
-  having to consider the raw Natural Deduction structure. *}
-
-
-subsection {* Induction with local parameters and premises *}
-
-text {* Idea: Pure rule statements are passed through the induction
-  rule.  This achieves convenient proof patterns, thanks to some
-  internal trickery in the @{method induct} method.
-
-  Important: Using compact HOL formulae with @{text "\<forall>/\<longrightarrow>"} is a
-  well-known anti-pattern! It would produce useless formal noise.
-*}
-
-notepad
-begin
-  fix n :: nat
-  fix P :: "nat \<Rightarrow> bool"
-  fix Q :: "'a \<Rightarrow> nat \<Rightarrow> bool"
-
-  have "P n"
-  proof (induct n)
-    case 0
-    show "P 0" sorry
-  next
-    case (Suc n)
-    from `P n` show "P (Suc n)" sorry
-  qed
-
-  have "A n \<Longrightarrow> P n"
-  proof (induct n)
-    case 0
-    from `A 0` show "P 0" sorry
-  next
-    case (Suc n)
-    from `A n \<Longrightarrow> P n`
-      and `A (Suc n)` show "P (Suc n)" sorry
-  qed
-
-  have "\<And>x. Q x n"
-  proof (induct n)
-    case 0
-    show "Q x 0" sorry
-  next
-    case (Suc n)
-    from `\<And>x. Q x n` show "Q x (Suc n)" sorry
-    txt {* Local quantification admits arbitrary instances: *}
-    note `Q a n` and `Q b n`
-  qed
-end
-
-
-subsection {* Implicit induction context *}
-
-text {* The @{method induct} method can isolate local parameters and
-  premises directly from the given statement.  This is convenient in
-  practical applications, but requires some understanding of what is
-  going on internally (as explained above).  *}
-
-notepad
-begin
-  fix n :: nat
-  fix Q :: "'a \<Rightarrow> nat \<Rightarrow> bool"
-
-  fix x :: 'a
-  assume "A x n"
-  then have "Q x n"
-  proof (induct n arbitrary: x)
-    case 0
-    from `A x 0` show "Q x 0" sorry
-  next
-    case (Suc n)
-    from `\<And>x. A x n \<Longrightarrow> Q x n`  -- {* arbitrary instances can be produced here *}
-      and `A x (Suc n)` show "Q x (Suc n)" sorry
-  qed
-end
-
-
-subsection {* Advanced induction with term definitions *}
-
-text {* Induction over subexpressions of a certain shape are delicate
-  to formalize.  The Isar @{method induct} method provides
-  infrastructure for this.
-
-  Idea: sub-expressions of the problem are turned into a defined
-  induction variable; often accompanied with fixing of auxiliary
-  parameters in the original expression.  *}
-
-notepad
-begin
-  fix a :: "'a \<Rightarrow> nat"
-  fix A :: "nat \<Rightarrow> bool"
-
-  assume "A (a x)"
-  then have "P (a x)"
-  proof (induct "a x" arbitrary: x)
-    case 0
-    note prem = `A (a x)`
-      and defn = `0 = a x`
-    show "P (a x)" sorry
-  next
-    case (Suc n)
-    note hyp = `\<And>x. n = a x \<Longrightarrow> A (a x) \<Longrightarrow> P (a x)`
-      and prem = `A (a x)`
-      and defn = `Suc n = a x`
-    show "P (a x)" sorry
-  qed
-end
-
-
-section {* Natural Deduction \label{sec:natural-deduction-synopsis} *}
-
-subsection {* Rule statements *}
-
-text {*
-  Isabelle/Pure ``theorems'' are always natural deduction rules,
-  which sometimes happen to consist of a conclusion only.
-
-  The framework connectives @{text "\<And>"} and @{text "\<Longrightarrow>"} indicate the
-  rule structure declaratively.  For example: *}
-
-thm conjI
-thm impI
-thm nat.induct
-
-text {*
-  The object-logic is embedded into the Pure framework via an implicit
-  derivability judgment @{term "Trueprop :: bool \<Rightarrow> prop"}.
-
-  Thus any HOL formulae appears atomic to the Pure framework, while
-  the rule structure outlines the corresponding proof pattern.
-
-  This can be made explicit as follows:
-*}
-
-notepad
-begin
-  write Trueprop  ("Tr")
-
-  thm conjI
-  thm impI
-  thm nat.induct
-end
-
-text {*
-  Isar provides first-class notation for rule statements as follows.
-*}
-
-print_statement conjI
-print_statement impI
-print_statement nat.induct
-
-
-subsubsection {* Examples *}
-
-text {*
-  Introductions and eliminations of some standard connectives of
-  the object-logic can be written as rule statements as follows.  (The
-  proof ``@{command "by"}~@{method blast}'' serves as sanity check.)
-*}
-
-lemma "(P \<Longrightarrow> False) \<Longrightarrow> \<not> P" by blast
-lemma "\<not> P \<Longrightarrow> P \<Longrightarrow> Q" by blast
-
-lemma "P \<Longrightarrow> Q \<Longrightarrow> P \<and> Q" by blast
-lemma "P \<and> Q \<Longrightarrow> (P \<Longrightarrow> Q \<Longrightarrow> R) \<Longrightarrow> R" by blast
-
-lemma "P \<Longrightarrow> P \<or> Q" by blast
-lemma "Q \<Longrightarrow> P \<or> Q" by blast
-lemma "P \<or> Q \<Longrightarrow> (P \<Longrightarrow> R) \<Longrightarrow> (Q \<Longrightarrow> R) \<Longrightarrow> R" by blast
-
-lemma "(\<And>x. P x) \<Longrightarrow> (\<forall>x. P x)" by blast
-lemma "(\<forall>x. P x) \<Longrightarrow> P x" by blast
-
-lemma "P x \<Longrightarrow> (\<exists>x. P x)" by blast
-lemma "(\<exists>x. P x) \<Longrightarrow> (\<And>x. P x \<Longrightarrow> R) \<Longrightarrow> R" by blast
-
-lemma "x \<in> A \<Longrightarrow> x \<in> B \<Longrightarrow> x \<in> A \<inter> B" by blast
-lemma "x \<in> A \<inter> B \<Longrightarrow> (x \<in> A \<Longrightarrow> x \<in> B \<Longrightarrow> R) \<Longrightarrow> R" by blast
-
-lemma "x \<in> A \<Longrightarrow> x \<in> A \<union> B" by blast
-lemma "x \<in> B \<Longrightarrow> x \<in> A \<union> B" by blast
-lemma "x \<in> A \<union> B \<Longrightarrow> (x \<in> A \<Longrightarrow> R) \<Longrightarrow> (x \<in> B \<Longrightarrow> R) \<Longrightarrow> R" by blast
-
-
-subsection {* Isar context elements *}
-
-text {* We derive some results out of the blue, using Isar context
-  elements and some explicit blocks.  This illustrates their meaning
-  wrt.\ Pure connectives, without goal states getting in the way.  *}
-
-notepad
-begin
-  {
-    fix x
-    have "B x" sorry
-  }
-  have "\<And>x. B x" by fact
-
-next
-
-  {
-    assume A
-    have B sorry
-  }
-  have "A \<Longrightarrow> B" by fact
-
-next
-
-  {
-    def x \<equiv> t
-    have "B x" sorry
-  }
-  have "B t" by fact
-
-next
-
-  {
-    obtain x :: 'a where "B x" sorry
-    have C sorry
-  }
-  have C by fact
-
-end
-
-
-subsection {* Pure rule composition *}
-
-text {*
-  The Pure framework provides means for:
-
-  \begin{itemize}
-
-    \item backward-chaining of rules by @{inference resolution}
-
-    \item closing of branches by @{inference assumption}
-
-  \end{itemize}
-
-  Both principles involve higher-order unification of @{text \<lambda>}-terms
-  modulo @{text "\<alpha>\<beta>\<eta>"}-equivalence (cf.\ Huet and Miller).  *}
-
-notepad
-begin
-  assume a: A and b: B
-  thm conjI
-  thm conjI [of A B]  -- "instantiation"
-  thm conjI [of A B, OF a b]  -- "instantiation and composition"
-  thm conjI [OF a b]  -- "composition via unification (trivial)"
-  thm conjI [OF `A` `B`]
-
-  thm conjI [OF disjI1]
-end
-
-text {* Note: Low-level rule composition is tedious and leads to
-  unreadable~/ unmaintainable expressions in the text.  *}
-
-
-subsection {* Structured backward reasoning *}
-
-text {* Idea: Canonical proof decomposition via @{command fix}~/
-  @{command assume}~/ @{command show}, where the body produces a
-  natural deduction rule to refine some goal.  *}
-
-notepad
-begin
-  fix A B :: "'a \<Rightarrow> bool"
-
-  have "\<And>x. A x \<Longrightarrow> B x"
-  proof -
-    fix x
-    assume "A x"
-    show "B x" sorry
-  qed
-
-  have "\<And>x. A x \<Longrightarrow> B x"
-  proof -
-    {
-      fix x
-      assume "A x"
-      show "B x" sorry
-    } -- "implicit block structure made explicit"
-    note `\<And>x. A x \<Longrightarrow> B x`
-      -- "side exit for the resulting rule"
-  qed
-end
-
-
-subsection {* Structured rule application *}
-
-text {*
-  Idea: Previous facts and new claims are composed with a rule from
-  the context (or background library).
-*}
-
-notepad
-begin
-  assume r1: "A \<Longrightarrow> B \<Longrightarrow> C"  -- {* simple rule (Horn clause) *}
-
-  have A sorry  -- "prefix of facts via outer sub-proof"
-  then have C
-  proof (rule r1)
-    show B sorry  -- "remaining rule premises via inner sub-proof"
-  qed
-
-  have C
-  proof (rule r1)
-    show A sorry
-    show B sorry
-  qed
-
-  have A and B sorry
-  then have C
-  proof (rule r1)
-  qed
-
-  have A and B sorry
-  then have C by (rule r1)
-
-next
-
-  assume r2: "A \<Longrightarrow> (\<And>x. B1 x \<Longrightarrow> B2 x) \<Longrightarrow> C"  -- {* nested rule *}
-
-  have A sorry
-  then have C
-  proof (rule r2)
-    fix x
-    assume "B1 x"
-    show "B2 x" sorry
-  qed
-
-  txt {* The compound rule premise @{prop "\<And>x. B1 x \<Longrightarrow> B2 x"} is better
-    addressed via @{command fix}~/ @{command assume}~/ @{command show}
-    in the nested proof body.  *}
-end
-
-
-subsection {* Example: predicate logic *}
-
-text {*
-  Using the above principles, standard introduction and elimination proofs
-  of predicate logic connectives of HOL work as follows.
-*}
-
-notepad
-begin
-  have "A \<longrightarrow> B" and A sorry
-  then have B ..
-
-  have A sorry
-  then have "A \<or> B" ..
-
-  have B sorry
-  then have "A \<or> B" ..
-
-  have "A \<or> B" sorry
-  then have C
-  proof
-    assume A
-    then show C sorry
-  next
-    assume B
-    then show C sorry
-  qed
-
-  have A and B sorry
-  then have "A \<and> B" ..
-
-  have "A \<and> B" sorry
-  then have A ..
-
-  have "A \<and> B" sorry
-  then have B ..
-
-  have False sorry
-  then have A ..
-
-  have True ..
-
-  have "\<not> A"
-  proof
-    assume A
-    then show False sorry
-  qed
-
-  have "\<not> A" and A sorry
-  then have B ..
-
-  have "\<forall>x. P x"
-  proof
-    fix x
-    show "P x" sorry
-  qed
-
-  have "\<forall>x. P x" sorry
-  then have "P a" ..
-
-  have "\<exists>x. P x"
-  proof
-    show "P a" sorry
-  qed
-
-  have "\<exists>x. P x" sorry
-  then have C
-  proof
-    fix a
-    assume "P a"
-    show C sorry
-  qed
-
-  txt {* Less awkward version using @{command obtain}: *}
-  have "\<exists>x. P x" sorry
-  then obtain a where "P a" ..
-end
-
-text {* Further variations to illustrate Isar sub-proofs involving
-  @{command show}: *}
-
-notepad
-begin
-  have "A \<and> B"
-  proof  -- {* two strictly isolated subproofs *}
-    show A sorry
-  next
-    show B sorry
-  qed
-
-  have "A \<and> B"
-  proof  -- {* one simultaneous sub-proof *}
-    show A and B sorry
-  qed
-
-  have "A \<and> B"
-  proof  -- {* two subproofs in the same context *}
-    show A sorry
-    show B sorry
-  qed
-
-  have "A \<and> B"
-  proof  -- {* swapped order *}
-    show B sorry
-    show A sorry
-  qed
-
-  have "A \<and> B"
-  proof  -- {* sequential subproofs *}
-    show A sorry
-    show B using `A` sorry
-  qed
-end
-
-
-subsubsection {* Example: set-theoretic operators *}
-
-text {* There is nothing special about logical connectives (@{text
-  "\<and>"}, @{text "\<or>"}, @{text "\<forall>"}, @{text "\<exists>"} etc.).  Operators from
-  set-theory or lattice-theory work analogously.  It is only a matter
-  of rule declarations in the library; rules can be also specified
-  explicitly.
-*}
-
-notepad
-begin
-  have "x \<in> A" and "x \<in> B" sorry
-  then have "x \<in> A \<inter> B" ..
-
-  have "x \<in> A" sorry
-  then have "x \<in> A \<union> B" ..
-
-  have "x \<in> B" sorry
-  then have "x \<in> A \<union> B" ..
-
-  have "x \<in> A \<union> B" sorry
-  then have C
-  proof
-    assume "x \<in> A"
-    then show C sorry
-  next
-    assume "x \<in> B"
-    then show C sorry
-  qed
-
-next
-  have "x \<in> \<Inter>A"
-  proof
-    fix a
-    assume "a \<in> A"
-    show "x \<in> a" sorry
-  qed
-
-  have "x \<in> \<Inter>A" sorry
-  then have "x \<in> a"
-  proof
-    show "a \<in> A" sorry
-  qed
-
-  have "a \<in> A" and "x \<in> a" sorry
-  then have "x \<in> \<Union>A" ..
-
-  have "x \<in> \<Union>A" sorry
-  then obtain a where "a \<in> A" and "x \<in> a" ..
-end
-
-
-section {* Generalized elimination and cases *}
-
-subsection {* General elimination rules *}
-
-text {*
-  The general format of elimination rules is illustrated by the
-  following typical representatives:
-*}
-
-thm exE     -- {* local parameter *}
-thm conjE   -- {* local premises *}
-thm disjE   -- {* split into cases *}
-
-text {*
-  Combining these characteristics leads to the following general scheme
-  for elimination rules with cases:
-
-  \begin{itemize}
-
-  \item prefix of assumptions (or ``major premises'')
-
-  \item one or more cases that enable to establish the main conclusion
-    in an augmented context
-
-  \end{itemize}
-*}
-
-notepad
-begin
-  assume r:
-    "A1 \<Longrightarrow> A2 \<Longrightarrow>  (* assumptions *)
-      (\<And>x y. B1 x y \<Longrightarrow> C1 x y \<Longrightarrow> R) \<Longrightarrow>  (* case 1 *)
-      (\<And>x y. B2 x y \<Longrightarrow> C2 x y \<Longrightarrow> R) \<Longrightarrow>  (* case 2 *)
-      R  (* main conclusion *)"
-
-  have A1 and A2 sorry
-  then have R
-  proof (rule r)
-    fix x y
-    assume "B1 x y" and "C1 x y"
-    show ?thesis sorry
-  next
-    fix x y
-    assume "B2 x y" and "C2 x y"
-    show ?thesis sorry
-  qed
-end
-
-text {* Here @{text "?thesis"} is used to refer to the unchanged goal
-  statement.  *}
-
-
-subsection {* Rules with cases *}
-
-text {*
-  Applying an elimination rule to some goal, leaves that unchanged
-  but allows to augment the context in the sub-proof of each case.
-
-  Isar provides some infrastructure to support this:
-
-  \begin{itemize}
-
-  \item native language elements to state eliminations
-
-  \item symbolic case names
-
-  \item method @{method cases} to recover this structure in a
-  sub-proof
-
-  \end{itemize}
-*}
-
-print_statement exE
-print_statement conjE
-print_statement disjE
-
-lemma
-  assumes A1 and A2  -- {* assumptions *}
-  obtains
-    (case1)  x y where "B1 x y" and "C1 x y"
-  | (case2)  x y where "B2 x y" and "C2 x y"
-  sorry
-
-
-subsubsection {* Example *}
-
-lemma tertium_non_datur:
-  obtains
-    (T)  A
-  | (F)  "\<not> A"
-  by blast
-
-notepad
-begin
-  fix x y :: 'a
-  have C
-  proof (cases "x = y" rule: tertium_non_datur)
-    case T
-    from `x = y` show ?thesis sorry
-  next
-    case F
-    from `x \<noteq> y` show ?thesis sorry
-  qed
-end
-
-
-subsubsection {* Example *}
-
-text {*
-  Isabelle/HOL specification mechanisms (datatype, inductive, etc.)
-  provide suitable derived cases rules.
-*}
-
-datatype foo = Foo | Bar foo
-
-notepad
-begin
-  fix x :: foo
-  have C
-  proof (cases x)
-    case Foo
-    from `x = Foo` show ?thesis sorry
-  next
-    case (Bar a)
-    from `x = Bar a` show ?thesis sorry
-  qed
-end
-
-
-subsection {* Obtaining local contexts *}
-
-text {* A single ``case'' branch may be inlined into Isar proof text
-  via @{command obtain}.  This proves @{prop "(\<And>x. B x \<Longrightarrow> thesis) \<Longrightarrow>
-  thesis"} on the spot, and augments the context afterwards.  *}
-
-notepad
-begin
-  fix B :: "'a \<Rightarrow> bool"
-
-  obtain x where "B x" sorry
-  note `B x`
-
-  txt {* Conclusions from this context may not mention @{term x} again! *}
-  {
-    obtain x where "B x" sorry
-    from `B x` have C sorry
-  }
-  note `C`
-end
-
-end
--- a/src/Doc/IsarRef/document/build	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,20 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-FORMAT="$1"
-VARIANT="$2"
-
-"$ISABELLE_TOOL" logo Isar
-
-cp "$ISABELLE_HOME/src/Doc/iman.sty" .
-cp "$ISABELLE_HOME/src/Doc/extra.sty" .
-cp "$ISABELLE_HOME/src/Doc/isar.sty" .
-cp "$ISABELLE_HOME/src/Doc/ttbox.sty" .
-cp "$ISABELLE_HOME/src/Doc/underscore.sty" .
-cp "$ISABELLE_HOME/src/Doc/manual.bib" .
-
-./showsymbols "$ISABELLE_HOME/lib/texinputs/isabellesym.sty" > syms.tex
-
-"$ISABELLE_HOME/src/Doc/prepare_document" "$FORMAT"
-
Binary file src/Doc/IsarRef/document/isar-vm.pdf has changed
--- a/src/Doc/IsarRef/document/isar-vm.svg	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,460 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://creativecommons.org/ns#"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="543.02673"
-   height="215.66071"
-   id="svg2"
-   sodipodi:version="0.32"
-   inkscape:version="0.46"
-   version="1.0"
-   sodipodi:docname="isar-vm.svg"
-   inkscape:output_extension="org.inkscape.output.svg.inkscape">
-  <defs
-     id="defs4">
-    <marker
-       inkscape:stockid="TriangleOutM"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="TriangleOutM"
-       style="overflow:visible">
-      <path
-         id="path4130"
-         d="M 5.77,0 L -2.88,5 L -2.88,-5 L 5.77,0 z"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
-         transform="scale(0.4,0.4)" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Mend"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="Arrow1Mend"
-       style="overflow:visible">
-      <path
-         id="path3993"
-         d="M 0,0 L 5,-5 L -12.5,0 L 5,5 L 0,0 z"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
-         transform="matrix(-0.4,0,0,-0.4,-4,0)" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Lend"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="Arrow1Lend"
-       style="overflow:visible">
-      <path
-         id="path3207"
-         d="M 0,0 L 5,-5 L -12.5,0 L 5,5 L 0,0 z"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
-         transform="matrix(-0.8,0,0,-0.8,-10,0)" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Lstart"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="Arrow1Lstart"
-       style="overflow:visible">
-      <path
-         id="path3204"
-         d="M 0,0 L 5,-5 L -12.5,0 L 5,5 L 0,0 z"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
-         transform="matrix(0.8,0,0,0.8,10,0)" />
-    </marker>
-    <inkscape:perspective
-       sodipodi:type="inkscape:persp3d"
-       inkscape:vp_x="0 : 526.18109 : 1"
-       inkscape:vp_y="0 : 1000 : 0"
-       inkscape:vp_z="744.09448 : 526.18109 : 1"
-       inkscape:persp3d-origin="372.04724 : 350.78739 : 1"
-       id="perspective10" />
-  </defs>
-  <sodipodi:namedview
-     id="base"
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1.0"
-     gridtolerance="10"
-     guidetolerance="10"
-     objecttolerance="10"
-     inkscape:pageopacity="0.0"
-     inkscape:pageshadow="2"
-     inkscape:zoom="1.4142136"
-     inkscape:cx="305.44602"
-     inkscape:cy="38.897723"
-     inkscape:document-units="mm"
-     inkscape:current-layer="layer1"
-     showgrid="true"
-     inkscape:snap-global="true"
-     units="mm"
-     inkscape:window-width="1226"
-     inkscape:window-height="951"
-     inkscape:window-x="28"
-     inkscape:window-y="47">
-    <inkscape:grid
-       type="xygrid"
-       id="grid2383"
-       visible="true"
-       enabled="true"
-       units="mm"
-       spacingx="2.5mm"
-       spacingy="2.5mm"
-       empspacing="2" />
-  </sodipodi:namedview>
-  <metadata
-     id="metadata7">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <g
-     inkscape:label="Layer 1"
-     inkscape:groupmode="layer"
-     id="layer1"
-     transform="translate(-44.641342,-76.87234)">
-    <g
-       id="g3448"
-       transform="translate(70.838012,79.725562)">
-      <rect
-         ry="17.67767"
-         y="131.52507"
-         x="212.09882"
-         height="53.149605"
-         width="70.866142"
-         id="rect3407"
-         style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
-      <text
-         sodipodi:linespacing="100%"
-         id="text3409"
-         y="164.06471"
-         x="223.50845"
-         style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-         xml:space="preserve"><tspan
-           style="font-size:16px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
-           y="164.06471"
-           x="223.50845"
-           id="tspan3411"
-           sodipodi:role="line">chain</tspan></text>
-    </g>
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:0.99921262;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 424.72469,236.82544 L 356.83209,236.82544 L 356.83209,236.82544"
-       id="path3458" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:0.99921268;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 282.35183,236.82544 L 215.11403,236.82544 L 215.11403,236.82544"
-       id="path4771" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:miter;marker-start:none;marker-mid:none;marker-end:url(#TriangleOutM);stroke-opacity:1"
-       d="M 424.69726,192.5341 L 215.13005,192.5341"
-       id="path4773" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-opacity:1"
-       d="M 211.98429,148.24276 L 422.13162,148.24276"
-       id="path6883" />
-    <g
-       id="g3443"
-       transform="translate(70.866146,78.725567)">
-      <rect
-         ry="17.67767"
-         y="42.942394"
-         x="70.366531"
-         height="141.73228"
-         width="70.866142"
-         id="rect2586"
-         style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
-      <text
-         sodipodi:linespacing="100%"
-         id="text3370"
-         y="116.62494"
-         x="79.682419"
-         style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-         xml:space="preserve"><tspan
-           style="font-size:16px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
-           y="116.62494"
-           x="79.682419"
-           id="tspan3372"
-           sodipodi:role="line">prove</tspan></text>
-    </g>
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-opacity:1"
-       d="M 176.66575,92.035445 L 176.66575,118.61025"
-       id="path7412" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path9011"
-       sodipodi:cx="119.58662"
-       sodipodi:cy="266.74686"
-       sodipodi:rx="93.01181"
-       sodipodi:ry="53.149605"
-       d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
-       transform="matrix(0.2378166,0,0,-0.2269133,90.621413,253.06251)"
-       sodipodi:start="0.29223018"
-       sodipodi:end="5.9921036"
-       sodipodi:open="true" />
-    <g
-       id="g3453"
-       transform="translate(70.866151,78.725565)">
-      <rect
-         ry="17.67767"
-         y="42.942394"
-         x="353.83112"
-         height="141.73228"
-         width="70.866142"
-         id="rect3381"
-         style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
-      <text
-         sodipodi:linespacing="100%"
-         id="text3383"
-         y="119.31244"
-         x="365.98294"
-         style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-         xml:space="preserve"><tspan
-           style="font-size:16px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
-           y="119.31244"
-           x="365.98294"
-           sodipodi:role="line"
-           id="tspan3387">state</tspan></text>
-    </g>
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-opacity:1"
-       d="M 460.13031,263.40024 L 460.13031,289.97505"
-       id="path7941" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path10594"
-       sodipodi:cx="119.58662"
-       sodipodi:cy="266.74686"
-       sodipodi:rx="93.01181"
-       sodipodi:ry="53.149605"
-       d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
-       transform="matrix(-0.2378166,0,0,0.2269133,546.17466,132.00569)"
-       sodipodi:start="0.29223018"
-       sodipodi:end="5.9921036"
-       sodipodi:open="true" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:bevel;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path12210"
-       sodipodi:cx="119.58662"
-       sodipodi:cy="266.74686"
-       sodipodi:rx="93.01181"
-       sodipodi:ry="53.149605"
-       d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
-       transform="matrix(-0.2378166,0,0,0.2269133,546.17465,87.714359)"
-       sodipodi:start="0.29223018"
-       sodipodi:end="5.9921036"
-       sodipodi:open="true" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:round;marker-start:none;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path12212"
-       sodipodi:cx="119.58662"
-       sodipodi:cy="266.74686"
-       sodipodi:rx="93.01181"
-       sodipodi:ry="53.149605"
-       d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
-       transform="matrix(-0.2378166,0,0,0.2269133,546.17465,176.29703)"
-       sodipodi:start="0.29223018"
-       sodipodi:end="5.9921036"
-       sodipodi:open="true" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path12214"
-       sodipodi:cx="119.58662"
-       sodipodi:cy="266.74686"
-       sodipodi:rx="93.01181"
-       sodipodi:ry="53.149605"
-       d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
-       transform="matrix(0,0.2378166,0.2269133,0,399.60191,71.056696)"
-       sodipodi:start="0.29223018"
-       sodipodi:end="5.9921036"
-       sodipodi:open="true" />
-    <text
-       xml:space="preserve"
-       style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-       x="173.49998"
-       y="97.094513"
-       id="text19307"
-       sodipodi:linespacing="100%"
-       transform="translate(17.216929,6.5104864)"><tspan
-         sodipodi:role="line"
-         id="tspan19309"
-         x="173.49998"
-         y="97.094513" /></text>
-    <text
-       xml:space="preserve"
-       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-       x="185.52402"
-       y="110.07987"
-       id="text19311"
-       sodipodi:linespacing="100%"><tspan
-         sodipodi:role="line"
-         id="tspan19313"
-         x="185.52402"
-         y="110.07987">theorem</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-       x="389.99997"
-       y="11.594519"
-       id="text19315"
-       sodipodi:linespacing="100%"
-       transform="translate(17.216929,6.5104864)"><tspan
-         sodipodi:role="line"
-         id="tspan19317"
-         x="389.99997"
-         y="11.594519" /></text>
-    <text
-       xml:space="preserve"
-       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-       x="468.98859"
-       y="280.47543"
-       id="text19319"
-       sodipodi:linespacing="100%"><tspan
-         sodipodi:role="line"
-         id="tspan19321"
-         x="468.98859"
-         y="280.47543">qed</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-       x="549.06946"
-       y="239.58423"
-       id="text19323"
-       sodipodi:linespacing="100%"><tspan
-         sodipodi:role="line"
-         id="tspan19325"
-         x="549.06946"
-         y="239.58423">qed</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-       x="549.39172"
-       y="191.26213"
-       id="text19327"
-       sodipodi:linespacing="100%"><tspan
-         sodipodi:role="line"
-         id="tspan19329"
-         x="549.39172"
-         y="191.26213">fix</tspan><tspan
-         sodipodi:role="line"
-         x="549.39172"
-         y="201.26213"
-         id="tspan19331">assume</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-       x="548.71301"
-       y="146.97079"
-       id="text19333"
-       sodipodi:linespacing="100%"><tspan
-         sodipodi:role="line"
-         id="tspan19335"
-         x="548.71301"
-         y="146.97079">{ }</tspan><tspan
-         sodipodi:role="line"
-         x="548.71301"
-         y="156.97079"
-         id="tspan19337">next</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-       x="477.84686"
-       y="98.264297"
-       id="text19339"
-       sodipodi:linespacing="100%"><tspan
-         sodipodi:role="line"
-         x="477.84686"
-         y="98.264297"
-         id="tspan19343">note</tspan><tspan
-         sodipodi:role="line"
-         x="477.84686"
-         y="108.2643"
-         id="tspan19358">let</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-       x="43.791733"
-       y="190.29289"
-       id="text19345"
-       sodipodi:linespacing="100%"><tspan
-         sodipodi:role="line"
-         id="tspan19347"
-         x="43.791733"
-         y="190.29289">using</tspan><tspan
-         sodipodi:role="line"
-         x="43.791733"
-         y="200.29289"
-         id="tspan19349">unfolding</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-       x="378.65891"
-       y="230.52518"
-       id="text19360"
-       sodipodi:linespacing="100%"><tspan
-         sodipodi:role="line"
-         id="tspan19362"
-         x="378.65891"
-         y="230.52518">then</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:150%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-       x="233.98795"
-       y="233.05347"
-       id="text19364"
-       sodipodi:linespacing="150%"><tspan
-         sodipodi:role="line"
-         x="233.98795"
-         y="233.05347"
-         id="tspan19368">have</tspan><tspan
-         sodipodi:role="line"
-         x="233.98795"
-         y="248.05347"
-         id="tspan19370">show</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:150%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-       x="305.89636"
-       y="188.76213"
-       id="text19374"
-       sodipodi:linespacing="150%"><tspan
-         sodipodi:role="line"
-         x="305.89636"
-         y="188.76213"
-         id="tspan19376">have</tspan><tspan
-         sodipodi:role="line"
-         x="305.89636"
-         y="203.76213"
-         id="tspan19378">show</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
-       x="303.82324"
-       y="141.07895"
-       id="text19380"
-       sodipodi:linespacing="100%"><tspan
-         sodipodi:role="line"
-         id="tspan19382"
-         x="303.82324"
-         y="141.07895">proof</tspan></text>
-  </g>
-</svg>
--- a/src/Doc/IsarRef/document/root.tex	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,95 +0,0 @@
-\documentclass[12pt,a4paper,fleqn]{report}
-\usepackage[T1]{fontenc}
-\usepackage{amssymb}
-\usepackage{eurosym}
-\usepackage[english]{babel}
-\usepackage[only,bigsqcap]{stmaryrd}
-\usepackage{textcomp}
-\usepackage{latexsym}
-\usepackage{graphicx}
-\let\intorig=\int  %iman.sty redefines \int
-\usepackage{iman,extra,isar,proof}
-\usepackage[nohyphen,strings]{underscore}
-\usepackage{isabelle}
-\usepackage{isabellesym}
-\usepackage{railsetup}
-\usepackage{ttbox}
-\usepackage{supertabular}
-\usepackage{style}
-\usepackage{pdfsetup}
-
-\hyphenation{Isabelle}
-\hyphenation{Isar}
-
-\isadroptag{theory}
-\title{\includegraphics[scale=0.5]{isabelle_isar} \\[4ex] The Isabelle/Isar Reference Manual}
-\author{\emph{Makarius Wenzel} \\[3ex]
-  With Contributions by
-  Clemens Ballarin,
-  Stefan Berghofer, \\
-  Jasmin Blanchette,
-  Timothy Bourke,
-  Lukas Bulwahn, \\
-  Amine Chaieb,
-  Lucas Dixon,
-  Florian Haftmann, \\
-  Brian Huffman,
-  Gerwin Klein,
-  Alexander Krauss, \\
-  Ond\v{r}ej Kun\v{c}ar,
-  Andreas Lochbihler,
-  Tobias Nipkow, \\
-  Lars Noschinski,
-  David von Oheimb,
-  Larry Paulson, \\
-  Sebastian Skalberg,
-  Christian Sternagel
-}
-
-\makeindex
-
-\chardef\charbackquote=`\`
-\newcommand{\backquote}{\mbox{\tt\charbackquote}}
-
-
-\begin{document}
-
-\maketitle 
-
-\pagenumbering{roman}
-{\def\isamarkupchapter#1{\chapter*{#1}}\input{Preface.tex}}
-\tableofcontents
-\clearfirst
-
-\part{Basic Concepts}
-\input{Synopsis.tex}
-\input{Framework.tex}
-\input{First_Order_Logic.tex}
-\part{General Language Elements}
-\input{Outer_Syntax.tex}
-\input{Document_Preparation.tex}
-\input{Spec.tex}
-\input{Proof.tex}
-\input{Inner_Syntax.tex}
-\input{Misc.tex}
-\input{Generic.tex}
-\part{Isabelle/HOL}\label{part:hol}
-\input{HOL_Specific.tex}
-
-\part{Appendix}
-\appendix
-\input{Quick_Reference.tex}
-\let\int\intorig
-\input{Symbols.tex}
-\input{ML_Tactic.tex}
-
-\begingroup
-  \tocentry{\bibname}
-  \bibliographystyle{abbrv} \small\raggedright\frenchspacing
-  \bibliography{manual}
-\endgroup
-
-\tocentry{\indexname}
-\printindex
-
-\end{document}
--- a/src/Doc/IsarRef/document/showsymbols	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-#!/usr/bin/env perl
-
-print "\\begin{supertabular}{ll\@{\\qquad}ll}\n";
-
-$eol = "&";
-
-while (<ARGV>) {
-    if (m/^\\newcommand\{\\isasym([A-Za-z]+)\}/) {
-       print "\\verb,\\<$1>, & {\\isasym$1} $eol\n";
-#       print "\\verb,\\<$1>, & \\isactrlbold{\\isasym$1}~{\\isasym$1} $eol\n";
-#       print "\\verb,\\<$1>, & B\\isactrlsup{\\isasym$1} $eol\n";
-#       print "\\verb,\\<$1>, & B\\isactrlsub{\\isasym$1} $eol\n";
-        if ("$eol" eq "&") {
-            $eol = "\\\\";
-        } else {
-            $eol = "&";
-        }
-    }
-}
-
-if ("$eol" eq "\\\\") {
-    print "$eol\n";
-}
-
-print "\\end{supertabular}\n";
-
--- a/src/Doc/IsarRef/document/style.sty	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-%% toc
-\newcommand{\tocentry}[1]{\cleardoublepage\phantomsection\addcontentsline{toc}{chapter}{#1}
-\@mkboth{\MakeUppercase{#1}}{\MakeUppercase{#1}}}
-
-\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}
-
-%% references
-\newcommand{\partref}[1]{part~\ref{#1}}
-\newcommand{\secref}[1]{\S\ref{#1}}
-\newcommand{\chref}[1]{chapter~\ref{#1}}
-\newcommand{\Chref}[1]{Chapter~\ref{#1}}
-\newcommand{\appref}[1]{appendix~\ref{#1}}
-\newcommand{\Appref}[1]{Appendix~\ref{#1}}
-\newcommand{\figref}[1]{figure~\ref{#1}}
-\newcommand{\Figref}[1]{Figure~\ref{#1}}
-
-%% Isar
-\newcommand{\isasymBBAR}{{\,\newdimen{\tmpheight}\settoheight\tmpheight{\isacharbar}\rule{1pt}{\tmpheight}\,}}
-\isafoldtag{noproof}\def\isafoldnoproof{~\isafold{proof}}
-\newcommand{\isadigitreset}{\def\isadigit##1{##1}}
-\renewcommand{\isacommand}[1]{\isakeyword{\isadigitreset#1}}
-
-%% ML
-\newenvironment{mldecls}{\par\noindent\begingroup\def\isanewline{\\}\begin{tabular}{ll}}{\end{tabular}\medskip\endgroup}
-
-\renewcommand{\isatagML}{\begingroup\isabellestyle{default}\isastyle\isadigitreset}
-\renewcommand{\endisatagML}{\endgroup}
-
-%% math
-\newcommand{\isasymstrut}{\isamath{\mathstrut}}
-\newcommand{\isasymvartheta}{\isamath{\,\theta}}
-\newcommand{\isactrlvec}[1]{\emph{$\overline{#1}$}}
-\renewcommand{\isadigit}[1]{\isamath{#1}}
-\newcommand{\text}[1]{\mbox{#1}}
-
-%% global style options
-\pagestyle{headings}
-\sloppy
-
-\parindent 0pt\parskip 0.5ex
-
-\isabellestyle{literalunderscore}
-
-\newcommand{\isasymdash}{\isatext{\mbox{-}}}
-
-\railtermfont{\isabellestyle{tt}}
-\railnontermfont{\isabellestyle{literalunderscore}}
-\railnamefont{\isabellestyle{literalunderscore}}
--- a/src/Doc/JEdit/document/build	Sat Apr 05 17:52:29 2014 +0100
+++ b/src/Doc/JEdit/document/build	Sat Apr 05 11:37:00 2014 +0200
@@ -7,7 +7,7 @@
 
 "$ISABELLE_TOOL" logo jEdit
 
-cp "$ISABELLE_HOME/src/Doc/IsarRef/document/style.sty" .
+cp "$ISABELLE_HOME/src/Doc/Isar-Ref/document/style.sty" .
 cp "$ISABELLE_HOME/src/Doc/iman.sty" .
 cp "$ISABELLE_HOME/src/Doc/extra.sty" .
 cp "$ISABELLE_HOME/src/Doc/isar.sty" .
--- a/src/Doc/LaTeXsugar/Sugar.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,542 +0,0 @@
-(*<*)
-theory Sugar
-imports "~~/src/HOL/Library/LaTeXsugar" "~~/src/HOL/Library/OptionalSugar"
-begin
-(*>*)
-text{*
-\section{Introduction}
-
-This document is for those Isabelle users who have mastered
-the art of mixing \LaTeX\ text and Isabelle theories and never want to
-typeset a theorem by hand anymore because they have experienced the
-bliss of writing \verb!@!\verb!{thm[display]setsum_cartesian_product[no_vars]}!
-and seeing Isabelle typeset it for them:
-@{thm[display,eta_contract=false] setsum_cartesian_product[no_vars]}
-No typos, no omissions, no sweat.
-If you have not experienced that joy, read Chapter 4, \emph{Presenting
-Theories}, \cite{LNCS2283} first.
-
-If you have mastered the art of Isabelle's \emph{antiquotations},
-i.e.\ things like the above \verb!@!\verb!{thm...}!, beware: in your vanity
-you may be tempted to think that all readers of the stunning ps or pdf
-documents you can now produce at the drop of a hat will be struck with
-awe at the beauty unfolding in front of their eyes. Until one day you
-come across that very critical of readers known as the ``common referee''.
-He has the nasty habit of refusing to understand unfamiliar notation
-like Isabelle's infamous @{text"\<lbrakk> \<rbrakk> \<Longrightarrow>"} no matter how many times you
-explain it in your paper. Even worse, he thinks that using @{text"\<lbrakk>
-\<rbrakk>"} for anything other than denotational semantics is a cardinal sin
-that must be punished by instant rejection.
-
-
-This document shows you how to make Isabelle and \LaTeX\ cooperate to
-produce ordinary looking mathematics that hides the fact that it was
-typeset by a machine. You merely need to load the right files:
-\begin{itemize}
-\item Import theory \texttt{LaTeXsugar} in the header of your own
-theory.  You may also want bits of \texttt{OptionalSugar}, which you can
-copy selectively into your own theory or import as a whole.  Both
-theories live in \texttt{HOL/Library} and are found automatically.
-
-\item Should you need additional \LaTeX\ packages (the text will tell
-you so), you include them at the beginning of your \LaTeX\ document,
-typically in \texttt{root.tex}. For a start, you should
-\verb!\usepackage{amssymb}! --- otherwise typesetting
-@{prop[source]"\<not>(\<exists>x. P x)"} will fail because the AMS symbol
-@{text"\<nexists>"} is missing.
-\end{itemize}
-
-
-\section{HOL syntax}
-
-\subsection{Logic}
-
-The formula @{prop[source]"\<not>(\<exists>x. P x)"} is typeset as @{prop"~(EX x. P x)"}.
-
-The predefined constructs @{text"if"}, @{text"let"} and
-@{text"case"} are set in sans serif font to distinguish them from
-other functions. This improves readability:
-\begin{itemize}
-\item @{term"if b then e\<^sub>1 else e\<^sub>2"} instead of @{text"if b then e\<^sub>1 else e\<^sub>2"}.
-\item @{term"let x = e\<^sub>1 in e\<^sub>2"} instead of @{text"let x = e\<^sub>1 in e\<^sub>2"}.
-\item @{term"case x of True \<Rightarrow> e\<^sub>1 | False \<Rightarrow> e\<^sub>2"} instead of\\
-      @{text"case x of True \<Rightarrow> e\<^sub>1 | False \<Rightarrow> e\<^sub>2"}.
-\end{itemize}
-
-\subsection{Sets}
-
-Although set syntax in HOL is already close to
-standard, we provide a few further improvements:
-\begin{itemize}
-\item @{term"{x. P}"} instead of @{text"{x. P}"}.
-\item @{term"{}"} instead of @{text"{}"}, where
- @{term"{}"} is also input syntax.
-\item @{term"insert a (insert b (insert c M))"} instead of @{text"insert a (insert b (insert c M))"}.
-\end{itemize}
-
-
-\subsection{Lists}
-
-If lists are used heavily, the following notations increase readability:
-\begin{itemize}
-\item @{term"x # xs"} instead of @{text"x # xs"},
-      where @{term"x # xs"} is also input syntax.
-If you prefer more space around the $\cdot$ you have to redefine
-\verb!\isasymcdot! in \LaTeX:
-\verb!\renewcommand{\isasymcdot}{\isamath{\,\cdot\,}}!
-
-\item @{term"length xs"} instead of @{text"length xs"}.
-\item @{term"nth xs n"} instead of @{text"nth xs n"},
-      the $n$th element of @{text xs}.
-
-\item Human readers are good at converting automatically from lists to
-sets. Hence \texttt{OptionalSugar} contains syntax for suppressing the
-conversion function @{const set}: for example, @{prop[source]"x \<in> set xs"}
-becomes @{prop"x \<in> set xs"}.
-
-\item The @{text"@"} operation associates implicitly to the right,
-which leads to unpleasant line breaks if the term is too long for one
-line. To avoid this, \texttt{OptionalSugar} contains syntax to group
-@{text"@"}-terms to the left before printing, which leads to better
-line breaking behaviour:
-@{term[display]"term\<^sub>0 @ term\<^sub>1 @ term\<^sub>2 @ term\<^sub>3 @ term\<^sub>4 @ term\<^sub>5 @ term\<^sub>6 @ term\<^sub>7 @ term\<^sub>8 @ term\<^sub>9 @ term\<^sub>1\<^sub>0"}
-
-\end{itemize}
-
-
-\subsection{Numbers}
-
-Coercions between numeric types are alien to mathematicians who
-consider, for example, @{typ nat} as a subset of @{typ int}.
-\texttt{OptionalSugar} contains syntax for suppressing numeric coercions such
-as @{const int} @{text"::"} @{typ"nat \<Rightarrow> int"}. For example,
-@{term[source]"int 5"} is printed as @{term "int 5"}. Embeddings of types
-@{typ nat}, @{typ int}, @{typ real} are covered; non-injective coercions such
-as @{const nat} @{text"::"} @{typ"int \<Rightarrow> nat"} are not and should not be
-hidden.
-
-
-\section{Printing theorems}
-
-\subsection{Question marks}
-
-If you print anything, especially theorems, containing
-schematic variables they are prefixed with a question mark:
-\verb!@!\verb!{thm conjI}! results in @{thm conjI}. Most of the time
-you would rather not see the question marks. There is an attribute
-\verb!no_vars! that you can attach to the theorem that turns its
-schematic into ordinary free variables: \verb!@!\verb!{thm conjI[no_vars]}!
-results in @{thm conjI[no_vars]}.
-
-This \verb!no_vars! business can become a bit tedious.
-If you would rather never see question marks, simply put
-\begin{quote}
-\verb!options [show_question_marks = false]!
-\end{quote}
-into the relevant \texttt{ROOT} file, just before the \texttt{theories} for that session.
-The rest of this document is produced with this flag set to \texttt{false}.
-
-Hint: Setting \verb!show_question_marks! to \texttt{false} only
-suppresses question marks; variables that end in digits,
-e.g. @{text"x1"}, are still printed with a trailing @{text".0"},
-e.g. @{text"x1.0"}, their internal index. This can be avoided by
-turning the last digit into a subscript: write \verb!x\<^sub>1! and
-obtain the much nicer @{text"x\<^sub>1"}. *}
-
-(*<*)declare [[show_question_marks = false]](*>*)
-
-subsection {*Qualified names*}
-
-text{* If there are multiple declarations of the same name, Isabelle prints
-the qualified name, for example @{text "T.length"}, where @{text T} is the
-theory it is defined in, to distinguish it from the predefined @{const[source]
-"List.length"}. In case there is no danger of confusion, you can insist on
-short names (no qualifiers) by setting the \verb!names_short!
-configuration option in the context.
-
-
-\subsection {Variable names\label{sec:varnames}}
-
-It sometimes happens that you want to change the name of a
-variable in a theorem before printing it. This can easily be achieved
-with the help of Isabelle's instantiation attribute \texttt{where}:
-@{thm conjI[where P = \<phi> and Q = \<psi>]} is the result of
-\begin{quote}
-\verb!@!\verb!{thm conjI[where P = \<phi> and Q = \<psi>]}!
-\end{quote}
-To support the ``\_''-notation for irrelevant variables
-the constant \texttt{DUMMY} has been introduced:
-@{thm fst_conv[of _ DUMMY]} is produced by
-\begin{quote}
-\verb!@!\verb!{thm fst_conv[of _ DUMMY]}!
-\end{quote}
-Variables that are bound by quantifiers or lambdas cannot be renamed
-like this. Instead, the attribute \texttt{rename\_abs} does the
-job. It expects a list of names or underscores, similar to the
-\texttt{of} attribute:
-\begin{quote}
-\verb!@!\verb!{thm split_paired_All[rename_abs _ l r]}!
-\end{quote}
-produces @{thm split_paired_All[rename_abs _ l r]}.
-
-
-\subsection{Inference rules}
-
-To print theorems as inference rules you need to include Didier
-R\'emy's \texttt{mathpartir} package~\cite{mathpartir}
-for typesetting inference rules in your \LaTeX\ file.
-
-Writing \verb!@!\verb!{thm[mode=Rule] conjI}! produces
-@{thm[mode=Rule] conjI}, even in the middle of a sentence.
-If you prefer your inference rule on a separate line, maybe with a name,
-\begin{center}
-@{thm[mode=Rule] conjI} {\sc conjI}
-\end{center}
-is produced by
-\begin{quote}
-\verb!\begin{center}!\\
-\verb!@!\verb!{thm[mode=Rule] conjI} {\sc conjI}!\\
-\verb!\end{center}!
-\end{quote}
-It is not recommended to use the standard \texttt{display} option
-together with \texttt{Rule} because centering does not work and because
-the line breaking mechanisms of \texttt{display} and \texttt{mathpartir} can
-clash.
-
-Of course you can display multiple rules in this fashion:
-\begin{quote}
-\verb!\begin{center}!\\
-\verb!@!\verb!{thm[mode=Rule] conjI} {\sc conjI} \\[1ex]!\\
-\verb!@!\verb!{thm[mode=Rule] conjE} {\sc disjI$_1$} \qquad!\\
-\verb!@!\verb!{thm[mode=Rule] disjE} {\sc disjI$_2$}!\\
-\verb!\end{center}!
-\end{quote}
-yields
-\begin{center}\small
-@{thm[mode=Rule] conjI} {\sc conjI} \\[1ex]
-@{thm[mode=Rule] disjI1} {\sc disjI$_1$} \qquad
-@{thm[mode=Rule] disjI2} {\sc disjI$_2$}
-\end{center}
-
-The \texttt{mathpartir} package copes well if there are too many
-premises for one line:
-\begin{center}
-@{prop[mode=Rule] "\<lbrakk> A \<longrightarrow> B; B \<longrightarrow> C; C \<longrightarrow> D; D \<longrightarrow> E; E \<longrightarrow> F; F \<longrightarrow> G;
- G \<longrightarrow> H; H \<longrightarrow> I; I \<longrightarrow> J; J \<longrightarrow> K \<rbrakk> \<Longrightarrow> A \<longrightarrow> K"}
-\end{center}
-
-Limitations: 1. Premises and conclusion must each not be longer than
-the line.  2. Premises that are @{text"\<Longrightarrow>"}-implications are again
-displayed with a horizontal line, which looks at least unusual.
-
-
-In case you print theorems without premises no rule will be printed by the
-\texttt{Rule} print mode. However, you can use \texttt{Axiom} instead:
-\begin{quote}
-\verb!\begin{center}!\\
-\verb!@!\verb!{thm[mode=Axiom] refl} {\sc refl}! \\
-\verb!\end{center}!
-\end{quote}
-yields
-\begin{center}
-@{thm[mode=Axiom] refl} {\sc refl} 
-\end{center}
-
-
-\subsection{Displays and font sizes}
-
-When displaying theorems with the \texttt{display} option, for example as in
-\verb!@!\verb!{thm[display] refl}! @{thm[display] refl} the theorem is
-set in small font. It uses the \LaTeX-macro \verb!\isastyle!,
-which is also the style that regular theory text is set in, e.g. *}
-
-lemma "t = t"
-(*<*)oops(*>*)
-
-text{* \noindent Otherwise \verb!\isastyleminor! is used,
-which does not modify the font size (assuming you stick to the default
-\verb!\isabellestyle{it}! in \texttt{root.tex}). If you prefer
-normal font size throughout your text, include
-\begin{quote}
-\verb!\renewcommand{\isastyle}{\isastyleminor}!
-\end{quote}
-in \texttt{root.tex}. On the other hand, if you like the small font,
-just put \verb!\isastyle! in front of the text in question,
-e.g.\ at the start of one of the center-environments above.
-
-The advantage of the display option is that you can display a whole
-list of theorems in one go. For example,
-\verb!@!\verb!{thm[display] append.simps}!
-generates @{thm[display] append.simps}
-
-
-\subsection{If-then}
-
-If you prefer a fake ``natural language'' style you can produce
-the body of
-\newtheorem{theorem}{Theorem}
-\begin{theorem}
-@{thm[mode=IfThen] le_trans}
-\end{theorem}
-by typing
-\begin{quote}
-\verb!@!\verb!{thm[mode=IfThen] le_trans}!
-\end{quote}
-
-In order to prevent odd line breaks, the premises are put into boxes.
-At times this is too drastic:
-\begin{theorem}
-@{prop[mode=IfThen] "longpremise \<Longrightarrow> longerpremise \<Longrightarrow> P(f(f(f(f(f(f(f(f(f(x)))))))))) \<Longrightarrow> longestpremise \<Longrightarrow> conclusion"}
-\end{theorem}
-In which case you should use \texttt{IfThenNoBox} instead of
-\texttt{IfThen}:
-\begin{theorem}
-@{prop[mode=IfThenNoBox] "longpremise \<Longrightarrow> longerpremise \<Longrightarrow> P(f(f(f(f(f(f(f(f(f(x)))))))))) \<Longrightarrow> longestpremise \<Longrightarrow> conclusion"}
-\end{theorem}
-
-
-\subsection{Doing it yourself\label{sec:yourself}}
-
-If for some reason you want or need to present theorems your
-own way, you can extract the premises and the conclusion explicitly
-and combine them as you like:
-\begin{itemize}
-\item \verb!@!\verb!{thm (prem 1)! $thm$\verb!}!
-prints premise 1 of $thm$.
-\item \verb!@!\verb!{thm (concl)! $thm$\verb!}!
-prints the conclusion of $thm$.
-\end{itemize}
-For example, ``from @{thm (prem 2) conjI} and
-@{thm (prem 1) conjI} we conclude @{thm (concl) conjI}''
-is produced by
-\begin{quote}
-\verb!from !\verb!@!\verb!{thm (prem 2) conjI}! \verb!and !\verb!@!\verb!{thm (prem 1) conjI}!\\
-\verb!we conclude !\verb!@!\verb!{thm (concl) conjI}!
-\end{quote}
-Thus you can rearrange or hide premises and typeset the theorem as you like.
-Styles like \verb!(prem 1)! are a general mechanism explained
-in \S\ref{sec:styles}.
-
-
-\subsection{Patterns}
-
-
-In \S\ref{sec:varnames} we shows how to create patterns containing ``@{term DUMMY}''.
-You can drive this game even further and extend the syntax of let
-bindings such that certain functions like @{term fst}, @{term hd}, 
-etc.\ are printed as patterns. \texttt{OptionalSugar} provides the following:
-
-\begin{center}
-\begin{tabular}{l@ {~~produced by~~}l}
-@{term "let x = fst p in t"} & \verb!@!\verb!{term "let x = fst p in t"}!\\
-@{term "let x = snd p in t"} & \verb!@!\verb!{term "let x = snd p in t"}!\\
-@{term "let x = hd xs in t"} & \verb!@!\verb!{term "let x = hd xs in t"}!\\
-@{term "let x = tl xs in t"} & \verb!@!\verb!{term "let x = tl xs in t"}!\\
-@{term "let x = the y in t"} & \verb!@!\verb!{term "let x = the y in t"}!\\
-\end{tabular}
-\end{center}
-
-
-\section {Styles\label{sec:styles}}
-
-The \verb!thm! antiquotation works nicely for single theorems, but
-sets of equations as used in definitions are more difficult to
-typeset nicely: people tend to prefer aligned @{text "="} signs.
-
-To deal with such cases where it is desirable to dive into the structure
-of terms and theorems, Isabelle offers antiquotations featuring ``styles'':
-
-\begin{quote}
-\verb!@!\verb!{thm (style) thm}!\\
-\verb!@!\verb!{prop (style) thm}!\\
-\verb!@!\verb!{term (style) term}!\\
-\verb!@!\verb!{term_type (style) term}!\\
-\verb!@!\verb!{typeof (style) term}!\\
-\end{quote}
-
- A ``style'' is a transformation of a term. There are predefined
- styles, namely \verb!lhs! and \verb!rhs!, \verb!prem! with one argument, and \verb!concl!.
-For example, the output
-\begin{center}
-\begin{tabular}{l@ {~~@{text "="}~~}l}
-@{thm (lhs) append_Nil} & @{thm (rhs) append_Nil}\\
-@{thm (lhs) append_Cons} & @{thm (rhs) append_Cons}
-\end{tabular}
-\end{center}
-is produced by the following code:
-\begin{quote}
-  \verb!\begin{center}!\\
-  \verb!\begin{tabular}{l@ {~~!\verb!@!\verb!{text "="}~~}l}!\\
-  \verb!@!\verb!{thm (lhs) append_Nil} & @!\verb!{thm (rhs) append_Nil}\\!\\
-  \verb!@!\verb!{thm (lhs) append_Cons} & @!\verb!{thm (rhs) append_Cons}!\\
-  \verb!\end{tabular}!\\
-  \verb!\end{center}!
-\end{quote}
-Note the space between \verb!@! and \verb!{! in the tabular argument.
-It prevents Isabelle from interpreting \verb!@ {~~...~~}! 
-as an antiquotation. The styles \verb!lhs! and \verb!rhs!
-extract the left hand side (or right hand side respectively) from the
-conclusion of propositions consisting of a binary operator
-(e.~g.~@{text "="}, @{text "\<equiv>"}, @{text "<"}).
-
-Likewise, \verb!concl! may be used as a style to show just the
-conclusion of a proposition. For example, take \verb!hd_Cons_tl!:
-\begin{center}
-  @{thm hd_Cons_tl}
-\end{center}
-To print just the conclusion,
-\begin{center}
-  @{thm (concl) hd_Cons_tl}
-\end{center}
-type
-\begin{quote}
-  \verb!\begin{center}!\\
-  \verb!@!\verb!{thm (concl) hd_Cons_tl}!\\
-  \verb!\end{center}!
-\end{quote}
-Beware that any options must be placed \emph{before} the style, as in this example.
-
-Further use cases can be found in \S\ref{sec:yourself}.
-If you are not afraid of ML, you may also define your own styles.
-Have a look at module @{ML_structure Term_Style}.
-
-
-\section {Proofs}
-
-Full proofs, even if written in beautiful Isar style, are
-likely to be too long and detailed to be included in conference
-papers, but some key lemmas might be of interest.
-It is usually easiest to put them in figures like the one in Fig.\
-\ref{fig:proof}. This was achieved with the \isakeyword{text\_raw} command:
-*}
-text_raw {*
-  \begin{figure}
-  \begin{center}\begin{minipage}{0.6\textwidth}  
-  \isastyleminor\isamarkuptrue
-*}
-lemma True
-proof -
-  -- "pretty trivial"
-  show True by force
-qed
-text_raw {*    
-  \end{minipage}\end{center}
-  \caption{Example proof in a figure.}\label{fig:proof}
-  \end{figure}
-*}
-text {*
-
-\begin{quote}
-\small
-\verb!text_raw {!\verb!*!\\
-\verb!  \begin{figure}!\\
-\verb!  \begin{center}\begin{minipage}{0.6\textwidth}!\\
-\verb!  \isastyleminor\isamarkuptrue!\\
-\verb!*!\verb!}!\\
-\verb!lemma True!\\
-\verb!proof -!\\
-\verb!  -- "pretty trivial"!\\
-\verb!  show True by force!\\
-\verb!qed!\\
-\verb!text_raw {!\verb!*!\\
-\verb!  \end{minipage}\end{center}!\\
-\verb!  \caption{Example proof in a figure.}\label{fig:proof}!\\
-\verb!  \end{figure}!\\
-\verb!*!\verb!}!
-\end{quote}
-
-Other theory text, e.g.\ definitions, can be put in figures, too.
-
-\section{Theory snippets}
-
-This section describes how to include snippets of a theory text in some other \LaTeX\ document.
-The typical scenario is that the description of your theory is not part of the theory text but
-a separate document that antiquotes bits of the theory. This works well for terms and theorems
-but there are no antiquotations, for example, for function definitions or proofs. Even if there are antiquotations,
-the output is usually a reformatted (by Isabelle) version of the input and may not look like
-you wanted it to look. Here is how to include a snippet of theory text (in \LaTeX\ form) in some
-other \LaTeX\ document, in 4 easy steps. Beware that these snippets are not processed by
-any antiquotation mechanism: the resulting \LaTeX\ text is more or less exactly what you wrote
-in the theory, without any added sugar.
-
-\subsection{Theory markup}
-
-Include some markers at the beginning and the end of the theory snippet you want to cut out.
-You have to place the following lines before and after the snippet, where snippets must always be
-consecutive lines of theory text:
-\begin{quote}
-\verb!\text_raw{!\verb!*\snip{!\emph{snippetname}\verb!}{1}{2}{%*!\verb!}!\\
-\emph{theory text}\\
-\verb!\text_raw{!\verb!*!\verb!}%endsnip*!\verb!}!
-\end{quote}
-where \emph{snippetname} should be a unique name for the snippet. The numbers \texttt{1}
-and \texttt{2} are explained in a moment.
-
-\subsection{Generate the \texttt{.tex} file}
-
-Run your theory \texttt{T} with the \texttt{isabelle} \texttt{build} tool
-to generate the \LaTeX-file \texttt{T.tex} which is needed for the next step,
-extraction of marked snippets.
-You may also want to process \texttt{T.tex} to generate a pdf document.
-This requires a definition of \texttt{\char`\\snippet}:
-\begin{verbatim}
-\newcommand{\repeatisanl}[1]
-  {\ifnum#1=0\else\isanewline\repeatisanl{\numexpr#1-1}\fi}
-\newcommand{\snip}[4]{\repeatisanl#2#4\repeatisanl#3}
-\end{verbatim}
-Parameter 2 and 3 of \texttt{\char`\\snippet} are numbers (the \texttt{1}
-and \texttt{2} above) and determine how many newlines are inserted before and after the snippet.
-Unfortunately \texttt{text\_raw} eats up all preceding and following newlines
-and they have to be inserted again in this manner. Otherwise the document generated from \texttt{T.tex}
-will look ugly around the snippets. It can take some iterations to get the number of required
-newlines exactly right.
-
-\subsection{Extract marked snippets}
-\label{subsec:extract}
-
-Extract the marked bits of text with a shell-level script, e.g.
-\begin{quote}
-\verb!sed -n '/\\snip{/,/endsnip/p' T.tex > !\emph{snippets}\verb!.tex!
-\end{quote}
-File \emph{snippets}\texttt{.tex} (the name is arbitrary) now contains a sequence of blocks like this
-\begin{quote}
-\verb!\snip{!\emph{snippetname}\verb!}{1}{2}{%!\\
-\emph{theory text}\\
-\verb!}%endsnip!
-\end{quote}
-
-\subsection{Including snippets}
-
-In the preamble of the document where the snippets are to be used you define \texttt{\char`\\snip}
-and input \emph{snippets}\texttt{.tex}:
-\begin{verbatim}
-\newcommand{\snip}[4]
-  {\expandafter\newcommand\csname #1\endcsname{#4}}
-\input{snippets}
-\end{verbatim}
-This definition of \texttt{\char`\\snip} simply has the effect of defining for each snippet
-\emph{snippetname} a \LaTeX\ command \texttt{\char`\\}\emph{snippetname}
-that produces the corresponding snippet text. In the body of your document you can display that text
-like this:
-\begin{quote}
-\verb!\begin{isabelle}!\\
-\texttt{\char`\\}\emph{snippetname}\\
-\verb!\end{isabelle}!
-\end{quote}
-The \texttt{isabelle} environment is the one defined in the standard file
-\texttt{isabelle.sty} which most likely you are loading anyway.
-
-
-\section{Antiquotation}
-
-You want to show a constant and its type? Instead of going
-\verb!@!\verb!{const myconst}! \verb!@!\verb!{text "::"}! \verb!@!\verb!{typeof myconst}!,
-you can just write \verb!@!\verb!{const_typ myconst}! using the new antiquotation
-\texttt{const\_typ} defined in \texttt{LaTeXsugar}. For example,
-\verb!@!\verb!{const_typ length}! produces @{const_typ length}.
-
-*}
-
-(*<*)
-end
-(*>*)
--- a/src/Doc/LaTeXsugar/document/build	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,9 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-FORMAT="$1"
-VARIANT="$2"
-
-"$ISABELLE_HOME/src/Doc/prepare_document" "$FORMAT"
-
--- a/src/Doc/LaTeXsugar/document/mathpartir.sty	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,388 +0,0 @@
-%  Mathpartir --- Math Paragraph for Typesetting Inference Rules
-%
-%  Copyright (C) 2001, 2002, 2003 Didier Rémy
-%
-%  Author         : Didier Remy 
-%  Version        : 1.1.1
-%  Bug Reports    : to author
-%  Web Site       : http://pauillac.inria.fr/~remy/latex/
-% 
-%  WhizzyTeX is free software; you can redistribute it and/or modify
-%  it under the terms of the GNU General Public License as published by
-%  the Free Software Foundation; either version 2, or (at your option)
-%  any later version.
-%  
-%  Mathpartir is distributed in the hope that it will be useful,
-%  but WITHOUT ANY WARRANTY; without even the implied warranty of
-%  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-%  GNU General Public License for more details 
-%  (http://pauillac.inria.fr/~remy/license/GPL).
-%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%  File mathpartir.sty (LaTeX macros)
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\NeedsTeXFormat{LaTeX2e}
-\ProvidesPackage{mathpartir}
-    [2003/07/10 version 1.1.1 Math Paragraph for Typesetting Inference Rules]
-
-%%
-
-%% Identification
-%% Preliminary declarations
-
-\RequirePackage {keyval}
-
-%% Options
-%% More declarations
-
-%% PART I: Typesetting maths in paragraphe mode
-
-\newdimen \mpr@tmpdim
-
-% To ensure hevea \hva compatibility, \hva should expands to nothing 
-% in mathpar or in inferrule
-\let \mpr@hva \empty
-
-%% normal paragraph parametters, should rather be taken dynamically
-\def \mpr@savepar {%
-  \edef \MathparNormalpar
-     {\noexpand \lineskiplimit \the\lineskiplimit
-      \noexpand \lineskip \the\lineskip}%
-  }
-
-\def \mpr@rulelineskip {\lineskiplimit=0.3em\lineskip=0.2em plus 0.1em}
-\def \mpr@lesslineskip {\lineskiplimit=0.6em\lineskip=0.5em plus 0.2em}
-\def \mpr@lineskip  {\lineskiplimit=1.2em\lineskip=1.2em plus 0.2em}
-\let \MathparLineskip \mpr@lineskip
-\def \mpr@paroptions {\MathparLineskip}
-\let \mpr@prebindings \relax
-
-\newskip \mpr@andskip \mpr@andskip 2em plus 0.5fil minus 0.5em
-
-\def \mpr@goodbreakand
-   {\hskip -\mpr@andskip  \penalty -1000\hskip \mpr@andskip}
-\def \mpr@and {\hskip \mpr@andskip}
-\def \mpr@andcr {\penalty 50\mpr@and}
-\def \mpr@cr {\penalty -10000\mpr@and}
-\def \mpr@eqno #1{\mpr@andcr #1\hskip 0em plus -1fil \penalty 10}
-
-\def \mpr@bindings {%
-  \let \and \mpr@andcr
-  \let \par \mpr@andcr
-  \let \\\mpr@cr
-  \let \eqno \mpr@eqno
-  \let \hva \mpr@hva
-  } 
-\let \MathparBindings \mpr@bindings
-
-% \@ifundefined {ignorespacesafterend}
-%    {\def \ignorespacesafterend {\aftergroup \ignorespaces}
-
-\newenvironment{mathpar}[1][]
-  {$$\mpr@savepar \parskip 0em \hsize \linewidth \centering
-     \vbox \bgroup \mpr@prebindings \mpr@paroptions #1\ifmmode $\else
-     \noindent $\displaystyle\fi
-     \MathparBindings}
-  {\unskip \ifmmode $\fi\egroup $$\ignorespacesafterend}
-
-% \def \math@mathpar #1{\setbox0 \hbox {$\displaystyle #1$}\ifnum
-%     \wd0 < \hsize  $$\box0$$\else \bmathpar #1\emathpar \fi}
-
-%%% HOV BOXES
-
-\def \mathvbox@ #1{\hbox \bgroup \mpr@normallineskip 
-  \vbox \bgroup \tabskip 0em \let \\ \cr
-  \halign \bgroup \hfil $##$\hfil\cr #1\crcr \egroup \egroup
-  \egroup}
-
-\def \mathhvbox@ #1{\setbox0 \hbox {\let \\\qquad $#1$}\ifnum \wd0 < \hsize
-      \box0\else \mathvbox {#1}\fi}
-
-
-%% Part II -- operations on lists
-
-\newtoks \mpr@lista
-\newtoks \mpr@listb
-
-\long \def\mpr@cons #1\mpr@to#2{\mpr@lista {\\{#1}}\mpr@listb \expandafter
-{#2}\edef #2{\the \mpr@lista \the \mpr@listb}}
-
-\long \def\mpr@snoc #1\mpr@to#2{\mpr@lista {\\{#1}}\mpr@listb \expandafter
-{#2}\edef #2{\the \mpr@listb\the\mpr@lista}}
-
-\long \def \mpr@concat#1=#2\mpr@to#3{\mpr@lista \expandafter {#2}\mpr@listb
-\expandafter {#3}\edef #1{\the \mpr@listb\the\mpr@lista}}
-
-\def \mpr@head #1\mpr@to #2{\expandafter \mpr@head@ #1\mpr@head@ #1#2}
-\long \def \mpr@head@ #1#2\mpr@head@ #3#4{\def #4{#1}\def#3{#2}}
-
-\def \mpr@flatten #1\mpr@to #2{\expandafter \mpr@flatten@ #1\mpr@flatten@ #1#2}
-\long \def \mpr@flatten@ \\#1\\#2\mpr@flatten@ #3#4{\def #4{#1}\def #3{\\#2}}
-
-\def \mpr@makelist #1\mpr@to #2{\def \mpr@all {#1}%
-   \mpr@lista {\\}\mpr@listb \expandafter {\mpr@all}\edef \mpr@all {\the
-   \mpr@lista \the \mpr@listb \the \mpr@lista}\let #2\empty 
-   \def \mpr@stripof ##1##2\mpr@stripend{\def \mpr@stripped{##2}}\loop
-     \mpr@flatten \mpr@all \mpr@to \mpr@one
-     \expandafter \mpr@snoc \mpr@one \mpr@to #2\expandafter \mpr@stripof
-     \mpr@all \mpr@stripend  
-     \ifx \mpr@stripped \empty \let \mpr@isempty 0\else \let \mpr@isempty 1\fi
-     \ifx 1\mpr@isempty
-   \repeat
-}
-
-%% Part III -- Type inference rules
-
-\def \mpr@rev #1\mpr@to #2{\let \mpr@tmp \empty
-   \def \\##1{\mpr@cons ##1\mpr@to \mpr@tmp}#1\let #2\mpr@tmp}
-
-\newif \if@premisse
-\newbox \mpr@hlist
-\newbox \mpr@vlist
-\newif \ifmpr@center \mpr@centertrue
-\def \mpr@htovlist {%
-   \setbox \mpr@hlist
-      \hbox {\strut
-             \ifmpr@center \hskip -0.5\wd\mpr@hlist\fi
-             \unhbox \mpr@hlist}%
-   \setbox \mpr@vlist
-      \vbox {\if@premisse  \box \mpr@hlist \unvbox \mpr@vlist
-             \else \unvbox \mpr@vlist \box \mpr@hlist
-             \fi}%
-}
-% OLD version
-% \def \mpr@htovlist {%
-%    \setbox \mpr@hlist
-%       \hbox {\strut \hskip -0.5\wd\mpr@hlist \unhbox \mpr@hlist}%
-%    \setbox \mpr@vlist
-%       \vbox {\if@premisse  \box \mpr@hlist \unvbox \mpr@vlist
-%              \else \unvbox \mpr@vlist \box \mpr@hlist
-%              \fi}%
-% }
-
-\def \mpr@sep{2em}
-\def \mpr@blank { }
-\def \mpr@hovbox #1#2{\hbox
-  \bgroup
-  \ifx #1T\@premissetrue
-  \else \ifx #1B\@premissefalse
-  \else
-     \PackageError{mathpartir}
-       {Premisse orientation should either be P or B}
-       {Fatal error in Package}%
-  \fi \fi
-  \def \@test {#2}\ifx \@test \mpr@blank\else
-  \setbox \mpr@hlist \hbox {}%
-  \setbox \mpr@vlist \vbox {}%
-  \if@premisse \let \snoc \mpr@cons \else \let \snoc \mpr@snoc \fi
-  \let \@hvlist \empty \let \@rev \empty
-  \mpr@tmpdim 0em
-  \expandafter \mpr@makelist #2\mpr@to \mpr@flat
-  \if@premisse \mpr@rev \mpr@flat \mpr@to \@rev \else \let \@rev \mpr@flat \fi
-  \def \\##1{%
-     \def \@test {##1}\ifx \@test \empty
-        \mpr@htovlist
-        \mpr@tmpdim 0em %%% last bug fix not extensively checked
-     \else
-      \setbox0 \hbox{$\displaystyle {##1}$}\relax
-      \advance \mpr@tmpdim by \wd0
-      %\mpr@tmpdim 1.02\mpr@tmpdim
-      \ifnum \mpr@tmpdim < \hsize
-         \ifnum \wd\mpr@hlist > 0
-           \if@premisse
-             \setbox \mpr@hlist 
-                \hbox {\unhbox0 \hskip \mpr@sep \unhbox \mpr@hlist}%
-           \else
-             \setbox \mpr@hlist
-                \hbox {\unhbox \mpr@hlist  \hskip \mpr@sep \unhbox0}%
-           \fi
-         \else 
-         \setbox \mpr@hlist \hbox {\unhbox0}%
-         \fi
-      \else
-         \ifnum \wd \mpr@hlist > 0
-            \mpr@htovlist 
-            \mpr@tmpdim \wd0
-         \fi
-         \setbox \mpr@hlist \hbox {\unhbox0}%
-      \fi
-      \advance \mpr@tmpdim by \mpr@sep
-   \fi
-   }%
-   \@rev
-   \mpr@htovlist
-   \ifmpr@center \hskip \wd\mpr@vlist\fi \box \mpr@vlist
-   \fi
-   \egroup
-}
-
-%%% INFERENCE RULES
-
-\@ifundefined{@@over}{%
-    \let\@@over\over % fallback if amsmath is not loaded
-    \let\@@overwithdelims\overwithdelims
-    \let\@@atop\atop \let\@@atopwithdelims\atopwithdelims
-    \let\@@above\above \let\@@abovewithdelims\abovewithdelims
-  }{}
-
-
-\def \mpr@@fraction #1#2{\hbox {\advance \hsize by -0.5em
-    $\displaystyle {#1\@@over #2}$}}
-\let \mpr@fraction \mpr@@fraction
-\def \mpr@@reduce #1#2{\hbox
-    {$\lower 0.01pt \mpr@@fraction {#1}{#2}\mkern -15mu\rightarrow$}}
-\def \mpr@@rewrite #1#2#3{\hbox
-    {$\lower 0.01pt \mpr@@fraction {#2}{#3}\mkern -8mu#1$}}
-\def \mpr@infercenter #1{\vcenter {\mpr@hovbox{T}{#1}}}
-
-\def \mpr@empty {}
-\def \mpr@inferrule
-  {\bgroup
-     \ifnum \linewidth<\hsize \hsize \linewidth\fi
-     \mpr@rulelineskip
-     \let \and \qquad
-     \let \hva \mpr@hva
-     \let \@rulename \mpr@empty
-     \let \@rule@options \mpr@empty
-     \mpr@inferrule@}
-\newcommand {\mpr@inferrule@}[3][]
-  {\everymath={\displaystyle}%       
-   \def \@test {#2}\ifx \empty \@test
-      \setbox0 \hbox {$\vcenter {\mpr@hovbox{B}{#3}}$}%
-   \else 
-   \def \@test {#3}\ifx \empty \@test
-      \setbox0 \hbox {$\vcenter {\mpr@hovbox{T}{#2}}$}%
-   \else
-   \setbox0 \mpr@fraction {\mpr@hovbox{T}{#2}}{\mpr@hovbox{B}{#3}}%
-   \fi \fi
-   \def \@test {#1}\ifx \@test\empty \box0
-   \else \vbox 
-%%% Suggestion de Francois pour les etiquettes longues
-%%%   {\hbox to \wd0 {\RefTirName {#1}\hfil}\box0}\fi
-      {\hbox {\RefTirName {#1}}\box0}\fi
-   \egroup}
-
-\def \mpr@vdotfil #1{\vbox to #1{\leaders \hbox{$\cdot$} \vfil}}
-
-% They are two forms
-% \inferrule [label]{[premisses}{conclusions}
-% or
-% \inferrule* [options]{[premisses}{conclusions}
-%
-% Premisses and conclusions are lists of elements separated by \\
-% Each \\ produces a break, attempting horizontal breaks if possible, 
-% and  vertical breaks if needed. 
-% 
-% An empty element obtained by \\\\ produces a vertical break in all cases. 
-%
-% The former rule is aligned on the fraction bar. 
-% The optional label appears on top of the rule
-% The second form to be used in a derivation tree is aligned on the last
-% line of its conclusion
-% 
-% The second form can be parameterized, using the key=val interface. The
-% folloiwng keys are recognized:
-%       
-%  width                set the width of the rule to val
-%  narrower             set the width of the rule to val\hsize
-%  before               execute val at the beginning/left
-%  lab                  put a label [Val] on top of the rule
-%  lskip                add negative skip on the right
-%  left                 put a left label [Val]
-%  Left                 put a left label [Val],  ignoring its width 
-%  right                put a right label [Val]
-%  Right                put a right label [Val], ignoring its width
-%  leftskip             skip negative space on the left-hand side
-%  rightskip            skip negative space on the right-hand side
-%  vdots                lift the rule by val and fill vertical space with dots
-%  after                execute val at the end/right
-%  
-%  Note that most options must come in this order to avoid strange
-%  typesetting (in particular  leftskip must preceed left and Left and
-%  rightskip must follow Right or right; vdots must come last 
-%  or be only followed by rightskip. 
-%  
-
-\define@key {mprset}{flushleft}[]{\mpr@centerfalse}
-\define@key {mprset}{center}[]{\mpr@centertrue}
-\def \mprset #1{\setkeys{mprset}{#1}}
-
-\newbox \mpr@right
-\define@key {mpr}{flushleft}[]{\mpr@centerfalse}
-\define@key {mpr}{center}[]{\mpr@centertrue}
-\define@key {mpr}{left}{\setbox0 \hbox {$\TirName {#1}\;$}\relax
-     \advance \hsize by -\wd0\box0}
-\define@key {mpr}{width}{\hsize #1}
-\define@key {mpr}{sep}{\def\mpr@sep{#1}}
-\define@key {mpr}{before}{#1}
-\define@key {mpr}{lab}{\let \RefTirName \TirName \def \mpr@rulename {#1}}
-\define@key {mpr}{Lab}{\let \RefTirName \TirName \def \mpr@rulename {#1}}
-\define@key {mpr}{narrower}{\hsize #1\hsize}
-\define@key {mpr}{leftskip}{\hskip -#1}
-\define@key {mpr}{reduce}[]{\let \mpr@fraction \mpr@@reduce}
-\define@key {mpr}{rightskip}
-  {\setbox \mpr@right \hbox {\unhbox \mpr@right \hskip -#1}}
-\define@key {mpr}{LEFT}{\setbox0 \hbox {$#1$}\relax
-     \advance \hsize by -\wd0\box0}
-\define@key {mpr}{left}{\setbox0 \hbox {$\TirName {#1}\;$}\relax
-     \advance \hsize by -\wd0\box0}
-\define@key {mpr}{Left}{\llap{$\TirName {#1}\;$}}
-\define@key {mpr}{right}
-  {\setbox0 \hbox {$\;\TirName {#1}$}\relax \advance \hsize by -\wd0
-   \setbox \mpr@right \hbox {\unhbox \mpr@right \unhbox0}}
-\define@key {mpr}{RIGHT}
-  {\setbox0 \hbox {$#1$}\relax \advance \hsize by -\wd0
-   \setbox \mpr@right \hbox {\unhbox \mpr@right \unhbox0}}
-\define@key {mpr}{Right}
-  {\setbox \mpr@right \hbox {\unhbox \mpr@right \rlap {$\;\TirName {#1}$}}}
-\define@key {mpr}{vdots}{\def \mpr@vdots {\@@atop \mpr@vdotfil{#1}}}
-\define@key {mpr}{after}{\edef \mpr@after {\mpr@after #1}}
-
-\newdimen \rule@dimen
-\newcommand \mpr@inferstar@ [3][]{\setbox0
-  \hbox {\let \mpr@rulename \mpr@empty \let \mpr@vdots \relax
-         \setbox \mpr@right \hbox{}%
-         $\setkeys{mpr}{#1}%
-          \ifx \mpr@rulename \mpr@empty \mpr@inferrule {#2}{#3}\else
-          \mpr@inferrule [{\mpr@rulename}]{#2}{#3}\fi
-          \box \mpr@right \mpr@vdots$}
-  \setbox1 \hbox {\strut}
-  \rule@dimen \dp0 \advance \rule@dimen by -\dp1
-  \raise \rule@dimen \box0}
-
-\def \mpr@infer {\@ifnextchar *{\mpr@inferstar}{\mpr@inferrule}}
-\newcommand \mpr@err@skipargs[3][]{}
-\def \mpr@inferstar*{\ifmmode 
-    \let \@do \mpr@inferstar@
-  \else 
-    \let \@do \mpr@err@skipargs
-    \PackageError {mathpartir}
-      {\string\inferrule* can only be used in math mode}{}%
-  \fi \@do}
-
-
-%%% Exports
-
-% Envirnonment mathpar
-
-\let \inferrule \mpr@infer
-
-% make a short name \infer is not already defined
-\@ifundefined {infer}{\let \infer \mpr@infer}{}
-
-\def \tir@name #1{\hbox {\small \sc #1}}
-\let \TirName \tir@name
-\let \RefTirName \tir@name
-
-%%% Other Exports
-
-% \let \listcons \mpr@cons
-% \let \listsnoc \mpr@snoc
-% \let \listhead \mpr@head
-% \let \listmake \mpr@makelist
-
-
-
-
-\endinput
--- a/src/Doc/LaTeXsugar/document/root.bib	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,12 +0,0 @@
-@book{LNCS2283,author={Tobias Nipkow and Lawrence Paulson and Markus Wenzel},
-title="Isabelle/HOL --- A Proof Assistant for Higher-Order Logic",
-publisher=Springer,series=LNCS,volume=2283,year=2002,
-note={\url{http://www.in.tum.de/~nipkow/LNCS2283/}}}
-
-@misc{mathpartir,author={Didier R\'emy},title={mathpartir},
-note={\url{http://cristal.inria.fr/~remy/latex/}}}
-
-@misc{tar,author={Gerwin Klein and Norber Schirmer and Tobias Nipkow},
-title={{LaTeX} sugar theories and support files}, 
-note={\url{http://isabelle.in.tum.de/sugar.tar.gz}}}
-
--- a/src/Doc/LaTeXsugar/document/root.tex	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-\documentclass[11pt,a4paper]{article}
-\usepackage{isabelle,isabellesym}
-
-\usepackage{amssymb}
-
-\usepackage{mathpartir}
-
-% this should be the last package used
-\usepackage{pdfsetup}
-
-% urls in roman style, theory text in math-similar italics
-\urlstyle{rm}
-\isabellestyle{it}
-
-\hyphenation{Isa-belle}
-\begin{document}
-
-\title{\LaTeX\ Sugar for Isabelle Documents}
-\author{Florian Haftmann, Gerwin Klein, Tobias Nipkow, Norbert Schirmer}
-\maketitle
-
-\begin{abstract}
-This document shows how to typset mathematics in Isabelle-based
-documents in a style close to that in ordinary computer science papers.
-\end{abstract}
-
-%\tableofcontents
-
-% generated text of all theories
-\input{Sugar.tex}
-
-% optional bibliography
-\bibliographystyle{abbrv}
-\bibliography{root}
-
-\end{document}
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: t
-%%% End:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Logics-ZF/FOL_examples.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,34 @@
+header{*Examples of Classical Reasoning*}
+
+theory FOL_examples imports "~~/src/FOL/FOL" begin
+
+lemma "EX y. ALL x. P(y)-->P(x)"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule exCI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule allI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule impI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule allE)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+txt{*see below for @{text allI} combined with @{text swap}*}
+apply (erule allI [THEN [2] swap])
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule impI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule notE)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply assumption
+done
+
+text {*
+@{thm[display] allI [THEN [2] swap]}
+*}
+
+lemma "EX y. ALL x. P(y)-->P(x)"
+by blast
+
+end
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Logics-ZF/IFOL_examples.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,58 @@
+header{*Examples of Intuitionistic Reasoning*}
+
+theory IFOL_examples imports "~~/src/FOL/IFOL" begin
+
+text{*Quantifier example from the book Logic and Computation*}
+lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule impI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule allI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule exI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule exE)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule allE)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+txt{*Now @{text "apply assumption"} fails*}
+oops
+
+text{*Trying again, with the same first two steps*}
+lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule impI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule allI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule exE)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule exI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule allE)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply assumption
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+done
+
+lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
+by (tactic {*IntPr.fast_tac @{context} 1*})
+
+text{*Example of Dyckhoff's method*}
+lemma "~ ~ ((P-->Q) | (Q-->P))"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (unfold not_def)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule impI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule disj_impE)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule imp_impE)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+ apply (erule imp_impE)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply assumption 
+apply (erule FalseE)+
+done
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Logics-ZF/If.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,85 @@
+(*  Title:      Doc/ZF/If.thy
+    Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
+    Copyright   1991  University of Cambridge
+
+First-Order Logic: the 'if' example.
+*)
+
+theory If imports "~~/src/FOL/FOL" begin
+
+definition "if" :: "[o,o,o]=>o" where
+  "if(P,Q,R) == P&Q | ~P&R"
+
+lemma ifI:
+    "[| P ==> Q; ~P ==> R |] ==> if(P,Q,R)"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (simp add: if_def)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply blast
+done
+
+lemma ifE:
+   "[| if(P,Q,R);  [| P; Q |] ==> S; [| ~P; R |] ==> S |] ==> S"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (simp add: if_def)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply blast
+done
+
+lemma if_commute: "if(P, if(Q,A,B), if(Q,C,D)) <-> if(Q, if(P,A,C), if(P,B,D))"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule iffI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule ifE)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule ifE)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule ifI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule ifI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+oops
+
+text{*Trying again from the beginning in order to use @{text blast}*}
+declare ifI [intro!]
+declare ifE [elim!]
+
+lemma if_commute: "if(P, if(Q,A,B), if(Q,C,D)) <-> if(Q, if(P,A,C), if(P,B,D))"
+by blast
+
+
+lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+by blast
+
+text{*Trying again from the beginning in order to prove from the definitions*}
+lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (simp add: if_def)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply blast
+done
+
+
+text{*An invalid formula.  High-level rules permit a simpler diagnosis*}
+lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply auto
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+(*The next step will fail unless subgoals remain*)
+apply (tactic all_tac)
+oops
+
+text{*Trying again from the beginning in order to prove from the definitions*}
+lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (simp add: if_def)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (auto) 
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+(*The next step will fail unless subgoals remain*)
+apply (tactic all_tac)
+oops
+
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Logics-ZF/ZF_Isar.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,138 @@
+theory ZF_Isar
+imports Main
+begin
+
+(*<*)
+ML_file "../antiquote_setup.ML"
+(*>*)
+
+chapter {* Some Isar language elements *}
+
+section {* Type checking *}
+
+text {*
+  The ZF logic is essentially untyped, so the concept of ``type
+  checking'' is performed as logical reasoning about set-membership
+  statements.  A special method assists users in this task; a version
+  of this is already declared as a ``solver'' in the standard
+  Simplifier setup.
+
+  \begin{matharray}{rcl}
+    @{command_def (ZF) "print_tcset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{method_def (ZF) typecheck} & : & @{text method} \\
+    @{attribute_def (ZF) TC} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{attribute (ZF) TC} (() | 'add' | 'del')
+  \<close>}
+
+  \begin{description}
+  
+  \item @{command (ZF) "print_tcset"} prints the collection of
+  typechecking rules of the current context.
+  
+  \item @{method (ZF) typecheck} attempts to solve any pending
+  type-checking problems in subgoals.
+  
+  \item @{attribute (ZF) TC} adds or deletes type-checking rules from
+  the context.
+
+  \end{description}
+*}
+
+
+section {* (Co)Inductive sets and datatypes *}
+
+subsection {* Set definitions *}
+
+text {*
+  In ZF everything is a set.  The generic inductive package also
+  provides a specific view for ``datatype'' specifications.
+  Coinductive definitions are available in both cases, too.
+
+  \begin{matharray}{rcl}
+    @{command_def (ZF) "inductive"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (ZF) "coinductive"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (ZF) "datatype"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (ZF) "codatatype"} & : & @{text "theory \<rightarrow> theory"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{command (ZF) inductive} | @@{command (ZF) coinductive}) domains intros hints
+    ;
+
+    domains: @'domains' (@{syntax term} + '+') ('<=' | '\<subseteq>') @{syntax term}
+    ;
+    intros: @'intros' (@{syntax thmdecl}? @{syntax prop} +)
+    ;
+    hints: @{syntax (ZF) "monos"}? condefs? \<newline>
+      @{syntax (ZF) typeintros}? @{syntax (ZF) typeelims}?
+    ;
+    @{syntax_def (ZF) "monos"}: @'monos' @{syntax thmrefs}
+    ;
+    condefs: @'con_defs' @{syntax thmrefs}
+    ;
+    @{syntax_def (ZF) typeintros}: @'type_intros' @{syntax thmrefs}
+    ;
+    @{syntax_def (ZF) typeelims}: @'type_elims' @{syntax thmrefs}
+  \<close>}
+
+  In the following syntax specification @{text "monos"}, @{text
+  typeintros}, and @{text typeelims} are the same as above.
+
+  @{rail \<open>
+    (@@{command (ZF) datatype} | @@{command (ZF) codatatype}) domain? (dtspec + @'and') hints
+    ;
+
+    domain: ('<=' | '\<subseteq>') @{syntax term}
+    ;
+    dtspec: @{syntax term} '=' (con + '|')
+    ;
+    con: @{syntax name} ('(' (@{syntax term} ',' +) ')')?
+    ;
+    hints: @{syntax (ZF) "monos"}? @{syntax (ZF) typeintros}? @{syntax (ZF) typeelims}?
+  \<close>}
+
+  See \cite{isabelle-ZF} for further information on inductive
+  definitions in ZF, but note that this covers the old-style theory
+  format.
+*}
+
+
+subsection {* Primitive recursive functions *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (ZF) "primrec"} & : & @{text "theory \<rightarrow> theory"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    @@{command (ZF) primrec} (@{syntax thmdecl}? @{syntax prop} +)
+  \<close>}
+*}
+
+
+subsection {* Cases and induction: emulating tactic scripts *}
+
+text {*
+  The following important tactical tools of Isabelle/ZF have been
+  ported to Isar.  These should not be used in proper proof texts.
+
+  \begin{matharray}{rcl}
+    @{method_def (ZF) case_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def (ZF) induct_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def (ZF) ind_cases}@{text "\<^sup>*"} & : & @{text method} \\
+    @{command_def (ZF) "inductive_cases"} & : & @{text "theory \<rightarrow> theory"} \\
+  \end{matharray}
+
+  @{rail \<open>
+    (@@{method (ZF) case_tac} | @@{method (ZF) induct_tac}) @{syntax goal_spec}? @{syntax name}
+    ;
+    @@{method (ZF) ind_cases} (@{syntax prop} +)
+    ;
+    @@{command (ZF) inductive_cases} (@{syntax thmdecl}? (@{syntax prop} +) + @'and')
+  \<close>}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Logics-ZF/ZF_examples.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,202 @@
+header{*Examples of Reasoning in ZF Set Theory*}
+
+theory ZF_examples imports Main_ZFC begin
+
+subsection {* Binary Trees *}
+
+consts
+  bt :: "i => i"
+
+datatype "bt(A)" =
+  Lf | Br ("a \<in> A", "t1 \<in> bt(A)", "t2 \<in> bt(A)")
+
+declare bt.intros [simp]
+
+text{*Induction via tactic emulation*}
+lemma Br_neq_left [rule_format]: "l \<in> bt(A) ==> \<forall>x r. Br(x, l, r) \<noteq> l"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+  apply (induct_tac l)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+  apply auto
+  done
+
+(*
+  apply (Inductive.case_tac l)
+  apply (tactic {*exhaust_tac "l" 1*})
+*)
+
+text{*The new induction method, which I don't understand*}
+lemma Br_neq_left': "l \<in> bt(A) ==> (!!x r. Br(x, l, r) \<noteq> l)"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+  apply (induct set: bt)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+  apply auto
+  done
+
+lemma Br_iff: "Br(a,l,r) = Br(a',l',r') <-> a=a' & l=l' & r=r'"
+  -- "Proving a freeness theorem."
+  by (blast elim!: bt.free_elims)
+
+inductive_cases Br_in_bt: "Br(a,l,r) \<in> bt(A)"
+  -- "An elimination rule, for type-checking."
+
+text {*
+@{thm[display] Br_in_bt[no_vars]}
+*};
+
+subsection{*Primitive recursion*}
+
+consts  n_nodes :: "i => i"
+primrec
+  "n_nodes(Lf) = 0"
+  "n_nodes(Br(a,l,r)) = succ(n_nodes(l) #+ n_nodes(r))"
+
+lemma n_nodes_type [simp]: "t \<in> bt(A) ==> n_nodes(t) \<in> nat"
+  by (induct_tac t, auto) 
+
+consts  n_nodes_aux :: "i => i"
+primrec
+  "n_nodes_aux(Lf) = (\<lambda>k \<in> nat. k)"
+  "n_nodes_aux(Br(a,l,r)) = 
+      (\<lambda>k \<in> nat. n_nodes_aux(r) `  (n_nodes_aux(l) ` succ(k)))"
+
+lemma n_nodes_aux_eq [rule_format]:
+     "t \<in> bt(A) ==> \<forall>k \<in> nat. n_nodes_aux(t)`k = n_nodes(t) #+ k"
+  by (induct_tac t, simp_all) 
+
+definition n_nodes_tail :: "i => i" where
+   "n_nodes_tail(t) == n_nodes_aux(t) ` 0"
+
+lemma "t \<in> bt(A) ==> n_nodes_tail(t) = n_nodes(t)"
+ by (simp add: n_nodes_tail_def n_nodes_aux_eq) 
+
+
+subsection {*Inductive definitions*}
+
+consts  Fin       :: "i=>i"
+inductive
+  domains   "Fin(A)" \<subseteq> "Pow(A)"
+  intros
+    emptyI:  "0 \<in> Fin(A)"
+    consI:   "[| a \<in> A;  b \<in> Fin(A) |] ==> cons(a,b) \<in> Fin(A)"
+  type_intros  empty_subsetI cons_subsetI PowI
+  type_elims   PowD [elim_format]
+
+
+consts  acc :: "i => i"
+inductive
+  domains "acc(r)" \<subseteq> "field(r)"
+  intros
+    vimage:  "[| r-``{a}: Pow(acc(r)); a \<in> field(r) |] ==> a \<in> acc(r)"
+  monos      Pow_mono
+
+
+consts
+  llist  :: "i=>i";
+
+codatatype
+  "llist(A)" = LNil | LCons ("a \<in> A", "l \<in> llist(A)")
+
+
+(*Coinductive definition of equality*)
+consts
+  lleq :: "i=>i"
+
+(*Previously used <*> in the domain and variant pairs as elements.  But
+  standard pairs work just as well.  To use variant pairs, must change prefix
+  a q/Q to the Sigma, Pair and converse rules.*)
+coinductive
+  domains "lleq(A)" \<subseteq> "llist(A) * llist(A)"
+  intros
+    LNil:  "<LNil, LNil> \<in> lleq(A)"
+    LCons: "[| a \<in> A; <l,l'> \<in> lleq(A) |] 
+            ==> <LCons(a,l), LCons(a,l')> \<in> lleq(A)"
+  type_intros  llist.intros
+
+
+subsection{*Powerset example*}
+
+lemma Pow_mono: "A\<subseteq>B  ==>  Pow(A) \<subseteq> Pow(B)"
+apply (rule subsetI)
+apply (rule PowI)
+apply (drule PowD)
+apply (erule subset_trans, assumption)
+done
+
+lemma "Pow(A Int B) = Pow(A) Int Pow(B)"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule equalityI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule Int_greatest)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule Int_lower1 [THEN Pow_mono])
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule Int_lower2 [THEN Pow_mono])
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule subsetI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule IntE)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule PowI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (drule PowD)+
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule Int_greatest)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (assumption+)
+done
+
+text{*Trying again from the beginning in order to use @{text blast}*}
+lemma "Pow(A Int B) = Pow(A) Int Pow(B)"
+by blast
+
+
+lemma "C\<subseteq>D ==> Union(C) \<subseteq> Union(D)"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule subsetI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule UnionE)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule UnionI)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule subsetD)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply assumption 
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply assumption 
+done
+
+text{*A more abstract version of the same proof*}
+
+lemma "C\<subseteq>D ==> Union(C) \<subseteq> Union(D)"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule Union_least)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule Union_upper)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (erule subsetD, assumption)
+done
+
+
+lemma "[| a \<in> A;  f \<in> A->B;  g \<in> C->D;  A \<inter> C = 0 |] ==> (f \<union> g)`a = f`a"
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule apply_equality)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule UnI1)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule apply_Pair)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply assumption 
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply assumption 
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (rule fun_disjoint_Un)
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply assumption 
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply assumption 
+  --{* @{subgoals[display,indent=0,margin=65]} *}
+apply assumption 
+done
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Logics-ZF/document/FOL.tex	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,855 @@
+%!TEX root = logics-ZF.tex
+\chapter{First-Order Logic}
+\index{first-order logic|(}
+
+Isabelle implements Gentzen's natural deduction systems {\sc nj} and {\sc
+  nk}.  Intuitionistic first-order logic is defined first, as theory
+\thydx{IFOL}.  Classical logic, theory \thydx{FOL}, is
+obtained by adding the double negation rule.  Basic proof procedures are
+provided.  The intuitionistic prover works with derived rules to simplify
+implications in the assumptions.  Classical~\texttt{FOL} employs Isabelle's
+classical reasoner, which simulates a sequent calculus.
+
+\section{Syntax and rules of inference}
+The logic is many-sorted, using Isabelle's type classes.  The class of
+first-order terms is called \cldx{term} and is a subclass of \isa{logic}.
+No types of individuals are provided, but extensions can define types such
+as \isa{nat::term} and type constructors such as \isa{list::(term)term}
+(see the examples directory, \texttt{FOL/ex}).  Below, the type variable
+$\alpha$ ranges over class \isa{term}; the equality symbol and quantifiers
+are polymorphic (many-sorted).  The type of formulae is~\tydx{o}, which
+belongs to class~\cldx{logic}.  Figure~\ref{fol-syntax} gives the syntax.
+Note that $a$\verb|~=|$b$ is translated to $\neg(a=b)$.
+
+Figure~\ref{fol-rules} shows the inference rules with their names.
+Negation is defined in the usual way for intuitionistic logic; $\neg P$
+abbreviates $P\imp\bot$.  The biconditional~($\bimp$) is defined through
+$\conj$ and~$\imp$; introduction and elimination rules are derived for it.
+
+The unique existence quantifier, $\exists!x.P(x)$, is defined in terms
+of~$\exists$ and~$\forall$.  An Isabelle binder, it admits nested
+quantifications.  For instance, $\exists!x\;y.P(x,y)$ abbreviates
+$\exists!x. \exists!y.P(x,y)$; note that this does not mean that there
+exists a unique pair $(x,y)$ satisfying~$P(x,y)$.
+
+Some intuitionistic derived rules are shown in
+Fig.\ts\ref{fol-int-derived}, again with their names.  These include
+rules for the defined symbols $\neg$, $\bimp$ and $\exists!$.  Natural
+deduction typically involves a combination of forward and backward
+reasoning, particularly with the destruction rules $(\conj E)$,
+$({\imp}E)$, and~$(\forall E)$.  Isabelle's backward style handles these
+rules badly, so sequent-style rules are derived to eliminate conjunctions,
+implications, and universal quantifiers.  Used with elim-resolution,
+\tdx{allE} eliminates a universal quantifier while \tdx{all_dupE}
+re-inserts the quantified formula for later use.  The rules
+\isa{conj\_impE}, etc., support the intuitionistic proof procedure
+(see~\S\ref{fol-int-prover}).
+
+See the on-line theory library for complete listings of the rules and
+derived rules.
+
+\begin{figure} 
+\begin{center}
+\begin{tabular}{rrr} 
+  \it name      &\it meta-type  & \it description \\ 
+  \cdx{Trueprop}& $o\To prop$           & coercion to $prop$\\
+  \cdx{Not}     & $o\To o$              & negation ($\neg$) \\
+  \cdx{True}    & $o$                   & tautology ($\top$) \\
+  \cdx{False}   & $o$                   & absurdity ($\bot$)
+\end{tabular}
+\end{center}
+\subcaption{Constants}
+
+\begin{center}
+\begin{tabular}{llrrr} 
+  \it symbol &\it name     &\it meta-type & \it priority & \it description \\
+  \sdx{ALL}  & \cdx{All}  & $(\alpha\To o)\To o$ & 10 & 
+        universal quantifier ($\forall$) \\
+  \sdx{EX}   & \cdx{Ex}   & $(\alpha\To o)\To o$ & 10 & 
+        existential quantifier ($\exists$) \\
+  \isa{EX!}  & \cdx{Ex1}  & $(\alpha\To o)\To o$ & 10 & 
+        unique existence ($\exists!$)
+\end{tabular}
+\index{*"E"X"! symbol}
+\end{center}
+\subcaption{Binders} 
+
+\begin{center}
+\index{*"= symbol}
+\index{&@{\tt\&} symbol}
+\index{"!@{\tt\char124} symbol} %\char124 is vertical bar. We use ! because | stopped working
+\index{*"-"-"> symbol}
+\index{*"<"-"> symbol}
+\begin{tabular}{rrrr} 
+  \it symbol    & \it meta-type         & \it priority & \it description \\ 
+  \tt =         & $[\alpha,\alpha]\To o$ & Left 50 & equality ($=$) \\
+  \tt \&        & $[o,o]\To o$          & Right 35 & conjunction ($\conj$) \\
+  \tt |         & $[o,o]\To o$          & Right 30 & disjunction ($\disj$) \\
+  \tt -->       & $[o,o]\To o$          & Right 25 & implication ($\imp$) \\
+  \tt <->       & $[o,o]\To o$          & Right 25 & biconditional ($\bimp$) 
+\end{tabular}
+\end{center}
+\subcaption{Infixes}
+
+\dquotes
+\[\begin{array}{rcl}
+ formula & = & \hbox{expression of type~$o$} \\
+         & | & term " = " term \quad| \quad term " \ttilde= " term \\
+         & | & "\ttilde\ " formula \\
+         & | & formula " \& " formula \\
+         & | & formula " | " formula \\
+         & | & formula " --> " formula \\
+         & | & formula " <-> " formula \\
+         & | & "ALL~" id~id^* " . " formula \\
+         & | & "EX~~" id~id^* " . " formula \\
+         & | & "EX!~" id~id^* " . " formula
+  \end{array}
+\]
+\subcaption{Grammar}
+\caption{Syntax of \texttt{FOL}} \label{fol-syntax}
+\end{figure}
+
+
+\begin{figure} 
+\begin{ttbox}
+\tdx{refl}        a=a
+\tdx{subst}       [| a=b;  P(a) |] ==> P(b)
+\subcaption{Equality rules}
+
+\tdx{conjI}       [| P;  Q |] ==> P&Q
+\tdx{conjunct1}   P&Q ==> P
+\tdx{conjunct2}   P&Q ==> Q
+
+\tdx{disjI1}      P ==> P|Q
+\tdx{disjI2}      Q ==> P|Q
+\tdx{disjE}       [| P|Q;  P ==> R;  Q ==> R |] ==> R
+
+\tdx{impI}        (P ==> Q) ==> P-->Q
+\tdx{mp}          [| P-->Q;  P |] ==> Q
+
+\tdx{FalseE}      False ==> P
+\subcaption{Propositional rules}
+
+\tdx{allI}        (!!x. P(x))  ==> (ALL x.P(x))
+\tdx{spec}        (ALL x.P(x)) ==> P(x)
+
+\tdx{exI}         P(x) ==> (EX x.P(x))
+\tdx{exE}         [| EX x.P(x);  !!x. P(x) ==> R |] ==> R
+\subcaption{Quantifier rules}
+
+\tdx{True_def}    True        == False-->False
+\tdx{not_def}     ~P          == P-->False
+\tdx{iff_def}     P<->Q       == (P-->Q) & (Q-->P)
+\tdx{ex1_def}     EX! x. P(x) == EX x. P(x) & (ALL y. P(y) --> y=x)
+\subcaption{Definitions}
+\end{ttbox}
+
+\caption{Rules of intuitionistic logic} \label{fol-rules}
+\end{figure}
+
+
+\begin{figure} 
+\begin{ttbox}
+\tdx{sym}       a=b ==> b=a
+\tdx{trans}     [| a=b;  b=c |] ==> a=c
+\tdx{ssubst}    [| b=a;  P(a) |] ==> P(b)
+\subcaption{Derived equality rules}
+
+\tdx{TrueI}     True
+
+\tdx{notI}      (P ==> False) ==> ~P
+\tdx{notE}      [| ~P;  P |] ==> R
+
+\tdx{iffI}      [| P ==> Q;  Q ==> P |] ==> P<->Q
+\tdx{iffE}      [| P <-> Q;  [| P-->Q; Q-->P |] ==> R |] ==> R
+\tdx{iffD1}     [| P <-> Q;  P |] ==> Q            
+\tdx{iffD2}     [| P <-> Q;  Q |] ==> P
+
+\tdx{ex1I}      [| P(a);  !!x. P(x) ==> x=a |]  ==>  EX! x. P(x)
+\tdx{ex1E}      [| EX! x.P(x);  !!x.[| P(x);  ALL y. P(y) --> y=x |] ==> R 
+          |] ==> R
+\subcaption{Derived rules for \(\top\), \(\neg\), \(\bimp\) and \(\exists!\)}
+
+\tdx{conjE}     [| P&Q;  [| P; Q |] ==> R |] ==> R
+\tdx{impE}      [| P-->Q;  P;  Q ==> R |] ==> R
+\tdx{allE}      [| ALL x.P(x);  P(x) ==> R |] ==> R
+\tdx{all_dupE}  [| ALL x.P(x);  [| P(x); ALL x.P(x) |] ==> R |] ==> R
+\subcaption{Sequent-style elimination rules}
+
+\tdx{conj_impE} [| (P&Q)-->S;  P-->(Q-->S) ==> R |] ==> R
+\tdx{disj_impE} [| (P|Q)-->S;  [| P-->S; Q-->S |] ==> R |] ==> R
+\tdx{imp_impE}  [| (P-->Q)-->S;  [| P; Q-->S |] ==> Q;  S ==> R |] ==> R
+\tdx{not_impE}  [| ~P --> S;  P ==> False;  S ==> R |] ==> R
+\tdx{iff_impE}  [| (P<->Q)-->S; [| P; Q-->S |] ==> Q; [| Q; P-->S |] ==> P;
+             S ==> R |] ==> R
+\tdx{all_impE}  [| (ALL x.P(x))-->S;  !!x.P(x);  S ==> R |] ==> R
+\tdx{ex_impE}   [| (EX x.P(x))-->S;  P(a)-->S ==> R |] ==> R
+\end{ttbox}
+\subcaption{Intuitionistic simplification of implication}
+\caption{Derived rules for intuitionistic logic} \label{fol-int-derived}
+\end{figure}
+
+
+\section{Generic packages}
+FOL instantiates most of Isabelle's generic packages.
+\begin{itemize}
+\item 
+It instantiates the simplifier, which is invoked through the method 
+\isa{simp}.  Both equality ($=$) and the biconditional
+($\bimp$) may be used for rewriting.  
+
+\item 
+It instantiates the classical reasoner, which is invoked mainly through the 
+methods \isa{blast} and \isa{auto}.  See~\S\ref{fol-cla-prover}
+for details. 
+%
+%\item FOL provides the tactic \ttindex{hyp_subst_tac}, which substitutes for
+%  an equality throughout a subgoal and its hypotheses.  This tactic uses FOL's
+%  general substitution rule.
+\end{itemize}
+
+\begin{warn}\index{simplification!of conjunctions}%
+  Simplifying $a=b\conj P(a)$ to $a=b\conj P(b)$ is often advantageous.  The
+  left part of a conjunction helps in simplifying the right part.  This effect
+  is not available by default: it can be slow.  It can be obtained by
+  including the theorem \isa{conj_cong}\index{*conj_cong (rule)}%
+  as a congruence rule in
+  simplification, \isa{simp cong:\ conj\_cong}.
+\end{warn}
+
+
+\section{Intuitionistic proof procedures} \label{fol-int-prover}
+Implication elimination (the rules~\isa{mp} and~\isa{impE}) pose
+difficulties for automated proof.  In intuitionistic logic, the assumption
+$P\imp Q$ cannot be treated like $\neg P\disj Q$.  Given $P\imp Q$, we may
+use~$Q$ provided we can prove~$P$; the proof of~$P$ may require repeated
+use of $P\imp Q$.  If the proof of~$P$ fails then the whole branch of the
+proof must be abandoned.  Thus intuitionistic propositional logic requires
+backtracking.  
+
+For an elementary example, consider the intuitionistic proof of $Q$ from
+$P\imp Q$ and $(P\imp Q)\imp P$.  The implication $P\imp Q$ is needed
+twice:
+\[ \infer[({\imp}E)]{Q}{P\imp Q &
+       \infer[({\imp}E)]{P}{(P\imp Q)\imp P & P\imp Q}} 
+\]
+The theorem prover for intuitionistic logic does not use~\isa{impE}.\@
+Instead, it simplifies implications using derived rules
+(Fig.\ts\ref{fol-int-derived}).  It reduces the antecedents of implications
+to atoms and then uses Modus Ponens: from $P\imp Q$ and~$P$ deduce~$Q$.
+The rules \tdx{conj_impE} and \tdx{disj_impE} are 
+straightforward: $(P\conj Q)\imp S$ is equivalent to $P\imp (Q\imp S)$, and
+$(P\disj Q)\imp S$ is equivalent to the conjunction of $P\imp S$ and $Q\imp
+S$.  The other \ldots\isa{\_impE} rules are unsafe; the method requires
+backtracking.  All the rules are derived in the same simple manner.
+
+Dyckhoff has independently discovered similar rules, and (more importantly)
+has demonstrated their completeness for propositional
+logic~\cite{dyckhoff}.  However, the tactics given below are not complete
+for first-order logic because they discard universally quantified
+assumptions after a single use. These are \ML{} functions, and are listed
+below with their \ML{} type:
+\begin{ttbox} 
+mp_tac              : int -> tactic
+eq_mp_tac           : int -> tactic
+IntPr.safe_step_tac : int -> tactic
+IntPr.safe_tac      :        tactic
+IntPr.inst_step_tac : int -> tactic
+IntPr.step_tac      : int -> tactic
+IntPr.fast_tac      : int -> tactic
+IntPr.best_tac      : int -> tactic
+\end{ttbox}
+Most of these belong to the structure \ML{} structure \texttt{IntPr} and resemble the
+tactics of Isabelle's classical reasoner.  There are no corresponding
+Isar methods, but you can use the 
+\isa{tactic} method to call \ML{} tactics in an Isar script:
+\begin{isabelle}
+\isacommand{apply}\ (tactic\ {* IntPr.fast\_tac 1*})
+\end{isabelle}
+Here is a description of each tactic:
+
+\begin{ttdescription}
+\item[\ttindexbold{mp_tac} {\it i}] 
+attempts to use \tdx{notE} or \tdx{impE} within the assumptions in
+subgoal $i$.  For each assumption of the form $\neg P$ or $P\imp Q$, it
+searches for another assumption unifiable with~$P$.  By
+contradiction with $\neg P$ it can solve the subgoal completely; by Modus
+Ponens it can replace the assumption $P\imp Q$ by $Q$.  The tactic can
+produce multiple outcomes, enumerating all suitable pairs of assumptions.
+
+\item[\ttindexbold{eq_mp_tac} {\it i}] 
+is like \texttt{mp_tac} {\it i}, but may not instantiate unknowns --- thus, it
+is safe.
+
+\item[\ttindexbold{IntPr.safe_step_tac} $i$] performs a safe step on
+subgoal~$i$.  This may include proof by assumption or Modus Ponens (taking
+care not to instantiate unknowns), or \texttt{hyp_subst_tac}. 
+
+\item[\ttindexbold{IntPr.safe_tac}] repeatedly performs safe steps on all 
+subgoals.  It is deterministic, with at most one outcome.
+
+\item[\ttindexbold{IntPr.inst_step_tac} $i$] is like \texttt{safe_step_tac},
+but allows unknowns to be instantiated.
+
+\item[\ttindexbold{IntPr.step_tac} $i$] tries \texttt{safe_tac} or {\tt
+    inst_step_tac}, or applies an unsafe rule.  This is the basic step of
+  the intuitionistic proof procedure.
+
+\item[\ttindexbold{IntPr.fast_tac} $i$] applies \texttt{step_tac}, using
+depth-first search, to solve subgoal~$i$.
+
+\item[\ttindexbold{IntPr.best_tac} $i$] applies \texttt{step_tac}, using
+best-first search (guided by the size of the proof state) to solve subgoal~$i$.
+\end{ttdescription}
+Here are some of the theorems that \texttt{IntPr.fast_tac} proves
+automatically.  The latter three date from {\it Principia Mathematica}
+(*11.53, *11.55, *11.61)~\cite{principia}.
+\begin{ttbox}
+~~P & ~~(P --> Q) --> ~~Q
+(ALL x y. P(x) --> Q(y)) <-> ((EX x. P(x)) --> (ALL y. Q(y)))
+(EX x y. P(x) & Q(x,y)) <-> (EX x. P(x) & (EX y. Q(x,y)))
+(EX y. ALL x. P(x) --> Q(x,y)) --> (ALL x. P(x) --> (EX y. Q(x,y)))
+\end{ttbox}
+
+
+
+\begin{figure} 
+\begin{ttbox}
+\tdx{excluded_middle}    ~P | P
+
+\tdx{disjCI}    (~Q ==> P) ==> P|Q
+\tdx{exCI}      (ALL x. ~P(x) ==> P(a)) ==> EX x.P(x)
+\tdx{impCE}     [| P-->Q; ~P ==> R; Q ==> R |] ==> R
+\tdx{iffCE}     [| P<->Q;  [| P; Q |] ==> R;  [| ~P; ~Q |] ==> R |] ==> R
+\tdx{notnotD}   ~~P ==> P
+\tdx{swap}      ~P ==> (~Q ==> P) ==> Q
+\end{ttbox}
+\caption{Derived rules for classical logic} \label{fol-cla-derived}
+\end{figure}
+
+
+\section{Classical proof procedures} \label{fol-cla-prover}
+The classical theory, \thydx{FOL}, consists of intuitionistic logic plus
+the rule
+$$ \vcenter{\infer{P}{\infer*{P}{[\neg P]}}} \eqno(classical) $$
+\noindent
+Natural deduction in classical logic is not really all that natural.  FOL
+derives classical introduction rules for $\disj$ and~$\exists$, as well as
+classical elimination rules for~$\imp$ and~$\bimp$, and the swap rule (see
+Fig.\ts\ref{fol-cla-derived}).
+
+The classical reasoner is installed.  The most useful methods are
+\isa{blast} (pure classical reasoning) and \isa{auto} (classical reasoning
+combined with simplification), but the full range of
+methods is available, including \isa{clarify},
+\isa{fast}, \isa{best} and \isa{force}. 
+ See the 
+\iflabelundefined{chap:classical}{the {\em Reference Manual\/}}%
+        {Chap.\ts\ref{chap:classical}} 
+and the \emph{Tutorial}~\cite{isa-tutorial}
+for more discussion of classical proof methods.
+
+
+\section{An intuitionistic example}
+Here is a session similar to one in the book {\em Logic and Computation}
+\cite[pages~222--3]{paulson87}. It illustrates the treatment of
+quantifier reasoning, which is different in Isabelle than it is in
+{\sc lcf}-based theorem provers such as {\sc hol}.  
+
+The theory header specifies that we are working in intuitionistic
+logic by designating \isa{IFOL} rather than \isa{FOL} as the parent
+theory:
+\begin{isabelle}
+\isacommand{theory}\ IFOL\_examples\ \isacommand{imports}\ IFOL\isanewline
+\isacommand{begin}
+\end{isabelle}
+The proof begins by entering the goal, then applying the rule $({\imp}I)$.
+\begin{isabelle}
+\isacommand{lemma}\ "(EX\ y.\ ALL\ x.\ Q(x,y))\ -->\ \ (ALL\ x.\ EX\ y.\ Q(x,y))"\isanewline
+\ 1.\ (\isasymexists y.\ \isasymforall x.\ Q(x,\ y))\
+\isasymlongrightarrow \ (\isasymforall x.\ \isasymexists y.\ Q(x,\ y))
+\isanewline
+\isacommand{apply}\ (rule\ impI)\isanewline
+\ 1.\ \isasymexists y.\ \isasymforall x.\ Q(x,\ y)\
+\isasymLongrightarrow \ \isasymforall x.\ \isasymexists y.\ Q(x,\ y)
+\end{isabelle}
+Isabelle's output is shown as it would appear using Proof General.
+In this example, we shall never have more than one subgoal.  Applying
+$({\imp}I)$ replaces~\isa{\isasymlongrightarrow}
+by~\isa{\isasymLongrightarrow}, so that
+\(\ex{y}\all{x}Q(x,y)\) becomes an assumption.  We have the choice of
+$({\exists}E)$ and $({\forall}I)$; let us try the latter.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ allI)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymexists y.\ \isasymforall x.\ Q(x,\ y)\
+\isasymLongrightarrow \ \isasymexists y.\ Q(x,\ y)\hfill\((*)\)
+\end{isabelle}
+Applying $({\forall}I)$ replaces the \isa{\isasymforall
+x} (in ASCII, \isa{ALL~x}) by \isa{\isasymAnd x}
+(or \isa{!!x}), replacing FOL's universal quantifier by Isabelle's 
+meta universal quantifier.  The bound variable is a {\bf parameter} of
+the subgoal.  We now must choose between $({\exists}I)$ and
+$({\exists}E)$.  What happens if the wrong rule is chosen?
+\begin{isabelle}
+\isacommand{apply}\ (rule\ exI)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymexists y.\ \isasymforall x.\ Q(x,\ y)\
+\isasymLongrightarrow \ Q(x,\ ?y2(x))
+\end{isabelle}
+The new subgoal~1 contains the function variable \isa{?y2}.  Instantiating
+\isa{?y2} can replace~\isa{?y2(x)} by a term containing~\isa{x}, even
+though~\isa{x} is a bound variable.  Now we analyse the assumption
+\(\exists y.\forall x. Q(x,y)\) using elimination rules:
+\begin{isabelle}
+\isacommand{apply}\ (erule\ exE)\isanewline
+\ 1.\ \isasymAnd x\ y.\ \isasymforall x.\ Q(x,\ y)\ \isasymLongrightarrow \ Q(x,\ ?y2(x))
+\end{isabelle}
+Applying $(\exists E)$ has produced the parameter \isa{y} and stripped the
+existential quantifier from the assumption.  But the subgoal is unprovable:
+there is no way to unify \isa{?y2(x)} with the bound variable~\isa{y}.
+Using Proof General, we can return to the critical point, marked
+$(*)$ above.  This time we apply $({\exists}E)$:
+\begin{isabelle}
+\isacommand{apply}\ (erule\ exE)\isanewline
+\ 1.\ \isasymAnd x\ y.\ \isasymforall x.\ Q(x,\ y)\
+\isasymLongrightarrow \ \isasymexists y.\ Q(x,\ y)
+\end{isabelle}
+We now have two parameters and no scheme variables.  Applying
+$({\exists}I)$ and $({\forall}E)$ produces two scheme variables, which are
+applied to those parameters.  Parameters should be produced early, as this
+example demonstrates.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ exI)\isanewline
+\ 1.\ \isasymAnd x\ y.\ \isasymforall x.\ Q(x,\ y)\
+\isasymLongrightarrow \ Q(x,\ ?y3(x,\ y))
+\isanewline
+\isacommand{apply}\ (erule\ allE)\isanewline
+\ 1.\ \isasymAnd x\ y.\ Q(?x4(x,\ y),\ y)\ \isasymLongrightarrow \
+Q(x,\ ?y3(x,\ y))
+\end{isabelle}
+The subgoal has variables \isa{?y3} and \isa{?x4} applied to both
+parameters.  The obvious projection functions unify \isa{?x4(x,y)} with~\isa{
+x} and \isa{?y3(x,y)} with~\isa{y}.
+\begin{isabelle}
+\isacommand{apply}\ assumption\isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+The theorem was proved in six method invocations, not counting the
+abandoned ones.  But proof checking is tedious, and the \ML{} tactic
+\ttindex{IntPr.fast_tac} can prove the theorem in one step.
+\begin{isabelle}
+\isacommand{lemma}\ "(EX\ y.\ ALL\ x.\ Q(x,y))\ -->\ \ (ALL\ x.\ EX\ y.\ Q(x,y))"\isanewline
+\ 1.\ (\isasymexists y.\ \isasymforall x.\ Q(x,\ y))\
+\isasymlongrightarrow \ (\isasymforall x.\ \isasymexists y.\ Q(x,\ y))
+\isanewline
+\isacommand{by} (tactic \ttlbrace*IntPr.fast_tac 1*\ttrbrace)\isanewline
+No\ subgoals!
+\end{isabelle}
+
+
+\section{An example of intuitionistic negation}
+The following example demonstrates the specialized forms of implication
+elimination.  Even propositional formulae can be difficult to prove from
+the basic rules; the specialized rules help considerably.  
+
+Propositional examples are easy to invent.  As Dummett notes~\cite[page
+28]{dummett}, $\neg P$ is classically provable if and only if it is
+intuitionistically provable;  therefore, $P$ is classically provable if and
+only if $\neg\neg P$ is intuitionistically provable.%
+\footnote{This remark holds only for propositional logic, not if $P$ is
+  allowed to contain quantifiers.}
+%
+Proving $\neg\neg P$ intuitionistically is
+much harder than proving~$P$ classically.
+
+Our example is the double negation of the classical tautology $(P\imp
+Q)\disj (Q\imp P)$.  The first step is apply the
+\isa{unfold} method, which expands
+negations to implications using the definition $\neg P\equiv P\imp\bot$
+and allows use of the special implication rules.
+\begin{isabelle}
+\isacommand{lemma}\ "\isachartilde \ \isachartilde \ ((P-->Q)\ |\ (Q-->P))"\isanewline
+\ 1.\ \isasymnot \ \isasymnot \ ((P\ \isasymlongrightarrow \ Q)\ \isasymor \ (Q\ \isasymlongrightarrow \ P))
+\isanewline
+\isacommand{apply}\ (unfold\ not\_def)\isanewline
+\ 1.\ ((P\ \isasymlongrightarrow \ Q)\ \isasymor \ (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False)\ \isasymlongrightarrow \ False%
+\end{isabelle}
+The next step is a trivial use of $(\imp I)$.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ impI)\isanewline
+\ 1.\ (P\ \isasymlongrightarrow \ Q)\ \isasymor \ (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False\ \isasymLongrightarrow \ False%
+\end{isabelle}
+By $(\imp E)$ it would suffice to prove $(P\imp Q)\disj (Q\imp P)$, but
+that formula is not a theorem of intuitionistic logic.  Instead, we
+apply the specialized implication rule \tdx{disj_impE}.  It splits the
+assumption into two assumptions, one for each disjunct.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ disj\_impE)\isanewline
+\ 1.\ \isasymlbrakk (P\ \isasymlongrightarrow \ Q)\ \isasymlongrightarrow \ False;\ (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False\isasymrbrakk \ \isasymLongrightarrow \
+False
+\end{isabelle}
+We cannot hope to prove $P\imp Q$ or $Q\imp P$ separately, but
+their negations are inconsistent.  Applying \tdx{imp_impE} breaks down
+the assumption $\neg(P\imp Q)$, asking to show~$Q$ while providing new
+assumptions~$P$ and~$\neg Q$.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ imp\_impE)\isanewline
+\ 1.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False;\ P;\ Q\ \isasymlongrightarrow \ False\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
+\ 2.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False;\ False\isasymrbrakk \ \isasymLongrightarrow \
+False
+\end{isabelle}
+Subgoal~2 holds trivially; let us ignore it and continue working on
+subgoal~1.  Thanks to the assumption~$P$, we could prove $Q\imp P$;
+applying \tdx{imp_impE} is simpler.
+\begin{isabelle}
+\ \isacommand{apply}\ (erule\ imp\_impE)\isanewline
+\ 1.\ \isasymlbrakk P;\ Q\ \isasymlongrightarrow \ False;\ Q;\ P\ \isasymlongrightarrow \ False\isasymrbrakk \ \isasymLongrightarrow \ P\isanewline
+\ 2.\ \isasymlbrakk P;\ Q\ \isasymlongrightarrow \ False;\ False\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
+\ 3.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False;\ False\isasymrbrakk \ \isasymLongrightarrow \ False%
+\end{isabelle}
+The three subgoals are all trivial.
+\begin{isabelle}
+\isacommand{apply}\ assumption\ \isanewline
+\ 1.\ \isasymlbrakk P;\ Q\ \isasymlongrightarrow \ False;\
+False\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
+\ 2.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\
+\isasymlongrightarrow \ False;\ False\isasymrbrakk \
+\isasymLongrightarrow \ False%
+\isanewline
+\isacommand{apply}\ (erule\ FalseE)+\isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+This proof is also trivial for the \ML{} tactic \isa{IntPr.fast_tac}.
+
+
+\section{A classical example} \label{fol-cla-example}
+To illustrate classical logic, we shall prove the theorem
+$\ex{y}\all{x}P(y)\imp P(x)$.  Informally, the theorem can be proved as
+follows.  Choose~$y$ such that~$\neg P(y)$, if such exists; otherwise
+$\all{x}P(x)$ is true.  Either way the theorem holds.  First, we must
+work in a theory based on classical logic, the theory \isa{FOL}:
+\begin{isabelle}
+\isacommand{theory}\ FOL\_examples\ \isacommand{imports}\ FOL\isanewline
+\isacommand{begin}
+\end{isabelle}
+
+The formal proof does not conform in any obvious way to the sketch given
+above.  Its key step is its first rule, \tdx{exCI}, a classical
+version of~$(\exists I)$ that allows multiple instantiation of the
+quantifier.
+\begin{isabelle}
+\isacommand{lemma}\ "EX\ y.\ ALL\ x.\ P(y)-->P(x)"\isanewline
+\ 1.\ \isasymexists y.\ \isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x)
+\isanewline
+\isacommand{apply}\ (rule\ exCI)\isanewline
+\ 1.\ \isasymforall y.\ \isasymnot \ (\isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x))\ \isasymLongrightarrow \ \isasymforall x.\ P(?a)\ \isasymlongrightarrow \ P(x)
+\end{isabelle}
+We can either exhibit a term \isa{?a} to satisfy the conclusion of
+subgoal~1, or produce a contradiction from the assumption.  The next
+steps are routine.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ allI)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymforall y.\ \isasymnot \ (\isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x))\ \isasymLongrightarrow \ P(?a)\ \isasymlongrightarrow \ P(x)
+\isanewline
+\isacommand{apply}\ (rule\ impI)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk \isasymforall y.\ \isasymnot \ (\isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x));\ P(?a)\isasymrbrakk \ \isasymLongrightarrow \ P(x)
+\end{isabelle}
+By the duality between $\exists$ and~$\forall$, applying~$(\forall E)$
+is equivalent to applying~$(\exists I)$ again.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ allE)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk P(?a);\ \isasymnot \ (\isasymforall xa.\ P(?y3(x))\ \isasymlongrightarrow \ P(xa))\isasymrbrakk \ \isasymLongrightarrow \ P(x)
+\end{isabelle}
+In classical logic, a negated assumption is equivalent to a conclusion.  To
+get this effect, we create a swapped version of $(\forall I)$ and apply it
+using \ttindex{erule}.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ allI\ [THEN\ [2]\ swap])\isanewline
+\ 1.\ \isasymAnd x\ xa.\ \isasymlbrakk P(?a);\ \isasymnot \ P(x)\isasymrbrakk \ \isasymLongrightarrow \ P(?y3(x))\ \isasymlongrightarrow \ P(xa)
+\end{isabelle}
+The operand of \isa{erule} above denotes the following theorem:
+\begin{isabelle}
+\ \ \ \ \isasymlbrakk \isasymnot \ (\isasymforall x.\ ?P1(x));\
+\isasymAnd x.\ \isasymnot \ ?P\ \isasymLongrightarrow \
+?P1(x)\isasymrbrakk \
+\isasymLongrightarrow \ ?P%
+\end{isabelle}
+The previous conclusion, \isa{P(x)}, has become a negated assumption.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ impI)\isanewline
+\ 1.\ \isasymAnd x\ xa.\ \isasymlbrakk P(?a);\ \isasymnot \ P(x);\ P(?y3(x))\isasymrbrakk \ \isasymLongrightarrow \ P(xa)
+\end{isabelle}
+The subgoal has three assumptions.  We produce a contradiction between the
+\index{assumptions!contradictory} assumptions~\isa{\isasymnot P(x)}
+and~\isa{P(?y3(x))}.   The proof never instantiates the
+unknown~\isa{?a}.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ notE)\isanewline
+\ 1.\ \isasymAnd x\ xa.\ \isasymlbrakk P(?a);\ P(?y3(x))\isasymrbrakk \ \isasymLongrightarrow \ P(x)
+\isanewline
+\isacommand{apply}\ assumption\isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+The civilised way to prove this theorem is using the
+\methdx{blast} method, which automatically uses the classical form
+of the rule~$(\exists I)$:
+\begin{isabelle}
+\isacommand{lemma}\ "EX\ y.\ ALL\ x.\ P(y)-->P(x)"\isanewline
+\ 1.\ \isasymexists y.\ \isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x)
+\isanewline
+\isacommand{by}\ blast\isanewline
+No\ subgoals!
+\end{isabelle}
+If this theorem seems counterintuitive, then perhaps you are an
+intuitionist.  In constructive logic, proving $\ex{y}\all{x}P(y)\imp P(x)$
+requires exhibiting a particular term~$t$ such that $\all{x}P(t)\imp P(x)$,
+which we cannot do without further knowledge about~$P$.
+
+
+\section{Derived rules and the classical tactics}
+Classical first-order logic can be extended with the propositional
+connective $if(P,Q,R)$, where 
+$$ if(P,Q,R) \equiv P\conj Q \disj \neg P \conj R. \eqno(if) $$
+Theorems about $if$ can be proved by treating this as an abbreviation,
+replacing $if(P,Q,R)$ by $P\conj Q \disj \neg P \conj R$ in subgoals.  But
+this duplicates~$P$, causing an exponential blowup and an unreadable
+formula.  Introducing further abbreviations makes the problem worse.
+
+Natural deduction demands rules that introduce and eliminate $if(P,Q,R)$
+directly, without reference to its definition.  The simple identity
+\[ if(P,Q,R) \,\bimp\, (P\imp Q)\conj (\neg P\imp R) \]
+suggests that the
+$if$-introduction rule should be
+\[ \infer[({if}\,I)]{if(P,Q,R)}{\infer*{Q}{[P]}  &  \infer*{R}{[\neg P]}} \]
+The $if$-elimination rule reflects the definition of $if(P,Q,R)$ and the
+elimination rules for~$\disj$ and~$\conj$.
+\[ \infer[({if}\,E)]{S}{if(P,Q,R) & \infer*{S}{[P,Q]}
+                                  & \infer*{S}{[\neg P,R]}} 
+\]
+Having made these plans, we get down to work with Isabelle.  The theory of
+classical logic, \texttt{FOL}, is extended with the constant
+$if::[o,o,o]\To o$.  The axiom \tdx{if_def} asserts the
+equation~$(if)$.
+\begin{isabelle}
+\isacommand{theory}\ If\ \isacommand{imports}\ FOL\isanewline
+\isacommand{begin}\isanewline
+\isacommand{constdefs}\isanewline
+\ \ if\ ::\ "[o,o,o]=>o"\isanewline
+\ \ \ "if(P,Q,R)\ ==\ P\&Q\ |\ \isachartilde P\&R"
+\end{isabelle}
+We create the file \texttt{If.thy} containing these declarations.  (This file
+is on directory \texttt{FOL/ex} in the Isabelle distribution.)  Typing
+\begin{isabelle}
+use_thy "If";  
+\end{isabelle}
+loads that theory and sets it to be the current context.
+
+
+\subsection{Deriving the introduction rule}
+
+The derivations of the introduction and elimination rules demonstrate the
+methods for rewriting with definitions.  Classical reasoning is required,
+so we use \isa{blast}.
+
+The introduction rule, given the premises $P\Imp Q$ and $\neg P\Imp R$,
+concludes $if(P,Q,R)$.  We propose this lemma and immediately simplify
+using \isa{if\_def} to expand the definition of \isa{if} in the
+subgoal.
+\begin{isabelle}
+\isacommand{lemma}\ ifI: "[|\ P\ ==>\ Q;\ \isachartilde P\ ==>\ R\
+|]\ ==>\ if(P,Q,R)"\isanewline
+\ 1.\ \isasymlbrakk P\ \isasymLongrightarrow \ Q;\ \isasymnot \ P\ \isasymLongrightarrow \ R\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ Q,\ R)
+\isanewline
+\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
+\ 1.\ \isasymlbrakk P\ \isasymLongrightarrow \ Q;\ \isasymnot \ P\ \isasymLongrightarrow \ R\isasymrbrakk \ \isasymLongrightarrow \ P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \
+R
+\end{isabelle}
+The rule's premises, although expressed using meta-level implication
+(\isa{\isasymLongrightarrow}) are passed as ordinary implications to
+\methdx{blast}.  
+\begin{isabelle}
+\isacommand{apply}\ blast\isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+
+
+\subsection{Deriving the elimination rule}
+The elimination rule has three premises, two of which are themselves rules.
+The conclusion is simply $S$.
+\begin{isabelle}
+\isacommand{lemma}\ ifE:\isanewline
+\ \ \ "[|\ if(P,Q,R);\ \ [|P;\ Q|]\ ==>\ S;\ [|\isachartilde P;\ R|]\ ==>\ S\ |]\ ==>\ S"\isanewline
+\ 1.\ \isasymlbrakk if(P,\ Q,\ R);\ \isasymlbrakk P;\ Q\isasymrbrakk \ \isasymLongrightarrow \ S;\ \isasymlbrakk \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ S\isasymrbrakk \ \isasymLongrightarrow \ S%
+\isanewline
+\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
+\ 1.\ \isasymlbrakk P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \ R;\ \isasymlbrakk P;\ Q\isasymrbrakk \ \isasymLongrightarrow \ S;\ \isasymlbrakk \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ S\isasymrbrakk \ \isasymLongrightarrow \ S%
+\end{isabelle}
+The proof script is the same as before: \isa{simp} followed by
+\isa{blast}:
+\begin{isabelle}
+\isacommand{apply}\ blast\isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+
+
+\subsection{Using the derived rules}
+Our new derived rules, \tdx{ifI} and~\tdx{ifE}, permit natural
+proofs of theorems such as the following:
+\begin{eqnarray*}
+    if(P, if(Q,A,B), if(Q,C,D)) & \bimp & if(Q,if(P,A,C),if(P,B,D)) \\
+    if(if(P,Q,R), A, B)         & \bimp & if(P,if(Q,A,B),if(R,A,B))
+\end{eqnarray*}
+Proofs also require the classical reasoning rules and the $\bimp$
+introduction rule (called~\tdx{iffI}: do not confuse with~\isa{ifI}). 
+
+To display the $if$-rules in action, let us analyse a proof step by step.
+\begin{isabelle}
+\isacommand{lemma}\ if\_commute:\isanewline
+\ \ \ \ \ "if(P,\ if(Q,A,B),\
+if(Q,C,D))\ <->\ if(Q,\ if(P,A,C),\ if(P,B,D))"\isanewline
+\isacommand{apply}\ (rule\ iffI)\isanewline
+\ 1.\ if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))\ \isasymLongrightarrow \isanewline
+\isaindent{\ 1.\ }if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 2.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
+\isaindent{\ 2.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
+\end{isabelle}
+The $if$-elimination rule can be applied twice in succession.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ ifE)\isanewline
+\ 1.\ \isasymlbrakk P;\ if(Q,\ A,\ B)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 2.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 3.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
+\isaindent{\ 3.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
+\isanewline
+\isacommand{apply}\ (erule\ ifE)\isanewline
+\ 1.\ \isasymlbrakk P;\ Q;\ A\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 2.\ \isasymlbrakk P;\ \isasymnot \ Q;\ B\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 3.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 4.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
+\isaindent{\ 4.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
+\end{isabelle}
+%
+In the first two subgoals, all assumptions have been reduced to atoms.  Now
+$if$-introduction can be applied.  Observe how the $if$-rules break down
+occurrences of $if$ when they become the outermost connective.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ ifI)\isanewline
+\ 1.\ \isasymlbrakk P;\ Q;\ A;\ Q\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ A,\ C)\isanewline
+\ 2.\ \isasymlbrakk P;\ Q;\ A;\ \isasymnot \ Q\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ B,\ D)\isanewline
+\ 3.\ \isasymlbrakk P;\ \isasymnot \ Q;\ B\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 4.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 5.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
+\isaindent{\ 5.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
+\isanewline
+\isacommand{apply}\ (rule\ ifI)\isanewline
+\ 1.\ \isasymlbrakk P;\ Q;\ A;\ Q;\ P\isasymrbrakk \ \isasymLongrightarrow \ A\isanewline
+\ 2.\ \isasymlbrakk P;\ Q;\ A;\ Q;\ \isasymnot \ P\isasymrbrakk \ \isasymLongrightarrow \ C\isanewline
+\ 3.\ \isasymlbrakk P;\ Q;\ A;\ \isasymnot \ Q\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ B,\ D)\isanewline
+\ 4.\ \isasymlbrakk P;\ \isasymnot \ Q;\ B\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 5.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 6.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
+\isaindent{\ 6.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
+\end{isabelle}
+Where do we stand?  The first subgoal holds by assumption; the second and
+third, by contradiction.  This is getting tedious.  We could use the classical
+reasoner, but first let us extend the default claset with the derived rules
+for~$if$.
+\begin{isabelle}
+\isacommand{declare}\ ifI\ [intro!]\isanewline
+\isacommand{declare}\ ifE\ [elim!]
+\end{isabelle}
+With these declarations, we could have proved this theorem with a single
+call to \isa{blast}.  Here is another example:
+\begin{isabelle}
+\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,A,B))"\isanewline
+\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ A,\ B))
+\isanewline
+\isacommand{by}\ blast
+\end{isabelle}
+
+
+\subsection{Derived rules versus definitions}
+Dispensing with the derived rules, we can treat $if$ as an
+abbreviation, and let \ttindex{blast_tac} prove the expanded formula.  Let
+us redo the previous proof:
+\begin{isabelle}
+\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,A,B))"\isanewline
+\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ A,\ B))
+\end{isabelle}
+This time, we simply unfold using the definition of $if$:
+\begin{isabelle}
+\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
+\ 1.\ (P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \ R)\ \isasymand \ A\ \isasymor \ (\isasymnot \ P\ \isasymor \ \isasymnot \ Q)\ \isasymand \ (P\ \isasymor \ \isasymnot \ R)\ \isasymand \ B\ \isasymlongleftrightarrow \isanewline
+\isaindent{\ 1.\ }P\ \isasymand \ (Q\ \isasymand \ A\ \isasymor \ \isasymnot \ Q\ \isasymand \ B)\ \isasymor \ \isasymnot \ P\ \isasymand \ (R\ \isasymand \ A\ \isasymor \ \isasymnot \ R\ \isasymand \ B)
+\end{isabelle}
+We are left with a subgoal in pure first-order logic, and it falls to
+\isa{blast}:
+\begin{isabelle}
+\isacommand{apply}\ blast\isanewline
+No\ subgoals!
+\end{isabelle}
+Expanding definitions reduces the extended logic to the base logic.  This
+approach has its merits, but it can be slow.  In these examples, proofs
+using the derived rules for~\isa{if} run about six
+times faster  than proofs using just the rules of first-order logic.
+
+Expanding definitions can also make it harder to diagnose errors. 
+Suppose we are having difficulties in proving some goal.  If by expanding
+definitions we have made it unreadable, then we have little hope of
+diagnosing the problem.
+
+Attempts at program verification often yield invalid assertions.
+Let us try to prove one:
+\begin{isabelle}
+\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,B,A))"\isanewline
+\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ B,\
+A))
+\end{isabelle}
+Calling \isa{blast} yields an uninformative failure message. We can
+get a closer look at the situation by applying \methdx{auto}.
+\begin{isabelle}
+\isacommand{apply}\ auto\isanewline
+\ 1.\ \isasymlbrakk A;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ B\isanewline
+\ 2.\ \isasymlbrakk B;\ \isasymnot \ P;\ \isasymnot \ R\isasymrbrakk \ \isasymLongrightarrow \ A\isanewline
+\ 3.\ \isasymlbrakk B;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ A\isanewline
+\ 4.\ \isasymlbrakk \isasymnot \ R;\ A;\ \isasymnot \ B;\ \isasymnot \ P\isasymrbrakk \ \isasymLongrightarrow \
+False
+\end{isabelle}
+Subgoal~1 is unprovable and yields a countermodel: $P$ and~$B$ are false
+while~$R$ and~$A$ are true.  This truth assignment reduces the main goal to
+$true\bimp false$, which is of course invalid.
+
+We can repeat this analysis by expanding definitions, using just the rules of
+first-order logic:
+\begin{isabelle}
+\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,B,A))"\isanewline
+\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ B,\
+A))
+\isanewline
+\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
+\ 1.\ (P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \ R)\ \isasymand \ A\ \isasymor \ (\isasymnot \ P\ \isasymor \ \isasymnot \ Q)\ \isasymand \ (P\ \isasymor \ \isasymnot \ R)\ \isasymand \ B\ \isasymlongleftrightarrow \isanewline
+\isaindent{\ 1.\ }P\ \isasymand \ (Q\ \isasymand \ A\ \isasymor \ \isasymnot \ Q\ \isasymand \ B)\ \isasymor \ \isasymnot \ P\ \isasymand \ (R\ \isasymand \ B\ \isasymor \ \isasymnot \ R\ \isasymand \ A)
+\end{isabelle}
+Again \isa{blast} would fail, so we try \methdx{auto}:
+\begin{isabelle}
+\isacommand{apply}\ (auto)\ \isanewline
+\ 1.\ \isasymlbrakk A;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ B\isanewline
+\ 2.\ \isasymlbrakk A;\ \isasymnot \ P;\ R;\ \isasymnot \ B\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
+\ 3.\ \isasymlbrakk B;\ \isasymnot \ R;\ \isasymnot \ P;\ \isasymnot \ A\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
+\ 4.\ \isasymlbrakk B;\ \isasymnot \ P;\ \isasymnot \ A;\ \isasymnot \ R;\ Q\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
+\ 5.\ \isasymlbrakk B;\ \isasymnot \ Q;\ \isasymnot \ R;\ \isasymnot \ P;\ \isasymnot \ A\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
+\ 6.\ \isasymlbrakk B;\ \isasymnot \ A;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
+\ 7.\ \isasymlbrakk \isasymnot \ P;\ A;\ \isasymnot \ B;\ \isasymnot \ R\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
+\ 8.\ \isasymlbrakk \isasymnot \ P;\ A;\ \isasymnot \ B;\ \isasymnot \ R\isasymrbrakk \ \isasymLongrightarrow \ Q%
+\end{isabelle}
+Subgoal~1 yields the same countermodel as before.  But each proof step has
+taken six times as long, and the final result contains twice as many subgoals.
+
+Expanding your definitions usually makes proofs more difficult.  This is
+why the classical prover has been designed to accept derived rules.
+
+\index{first-order logic|)}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Logics-ZF/document/ZF.tex	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,2717 @@
+\chapter{Zermelo-Fraenkel Set Theory}
+\index{set theory|(}
+
+The theory~\thydx{ZF} implements Zermelo-Fraenkel set
+theory~\cite{halmos60,suppes72} as an extension of~\texttt{FOL}, classical
+first-order logic.  The theory includes a collection of derived natural
+deduction rules, for use with Isabelle's classical reasoner.  Some
+of it is based on the work of No\"el~\cite{noel}.
+
+A tremendous amount of set theory has been formally developed, including the
+basic properties of relations, functions, ordinals and cardinals.  Significant
+results have been proved, such as the Schr\"oder-Bernstein Theorem, the
+Wellordering Theorem and a version of Ramsey's Theorem.  \texttt{ZF} provides
+both the integers and the natural numbers.  General methods have been
+developed for solving recursion equations over monotonic functors; these have
+been applied to yield constructions of lists, trees, infinite lists, etc.
+
+\texttt{ZF} has a flexible package for handling inductive definitions,
+such as inference systems, and datatype definitions, such as lists and
+trees.  Moreover it handles coinductive definitions, such as
+bisimulation relations, and codatatype definitions, such as streams.  It
+provides a streamlined syntax for defining primitive recursive functions over
+datatypes. 
+
+Published articles~\cite{paulson-set-I,paulson-set-II} describe \texttt{ZF}
+less formally than this chapter.  Isabelle employs a novel treatment of
+non-well-founded data structures within the standard {\sc zf} axioms including
+the Axiom of Foundation~\cite{paulson-mscs}.
+
+
+\section{Which version of axiomatic set theory?}
+The two main axiom systems for set theory are Bernays-G\"odel~({\sc bg})
+and Zermelo-Fraenkel~({\sc zf}).  Resolution theorem provers can use {\sc
+  bg} because it is finite~\cite{boyer86,quaife92}.  {\sc zf} does not
+have a finite axiom system because of its Axiom Scheme of Replacement.
+This makes it awkward to use with many theorem provers, since instances
+of the axiom scheme have to be invoked explicitly.  Since Isabelle has no
+difficulty with axiom schemes, we may adopt either axiom system.
+
+These two theories differ in their treatment of {\bf classes}, which are
+collections that are `too big' to be sets.  The class of all sets,~$V$,
+cannot be a set without admitting Russell's Paradox.  In {\sc bg}, both
+classes and sets are individuals; $x\in V$ expresses that $x$ is a set.  In
+{\sc zf}, all variables denote sets; classes are identified with unary
+predicates.  The two systems define essentially the same sets and classes,
+with similar properties.  In particular, a class cannot belong to another
+class (let alone a set).
+
+Modern set theorists tend to prefer {\sc zf} because they are mainly concerned
+with sets, rather than classes.  {\sc bg} requires tiresome proofs that various
+collections are sets; for instance, showing $x\in\{x\}$ requires showing that
+$x$ is a set.
+
+
+\begin{figure} \small
+\begin{center}
+\begin{tabular}{rrr} 
+  \it name      &\it meta-type  & \it description \\ 
+  \cdx{Let}     & $[\alpha,\alpha\To\beta]\To\beta$ & let binder\\
+  \cdx{0}       & $i$           & empty set\\
+  \cdx{cons}    & $[i,i]\To i$  & finite set constructor\\
+  \cdx{Upair}   & $[i,i]\To i$  & unordered pairing\\
+  \cdx{Pair}    & $[i,i]\To i$  & ordered pairing\\
+  \cdx{Inf}     & $i$   & infinite set\\
+  \cdx{Pow}     & $i\To i$      & powerset\\
+  \cdx{Union} \cdx{Inter} & $i\To i$    & set union/intersection \\
+  \cdx{split}   & $[[i,i]\To i, i] \To i$ & generalized projection\\
+  \cdx{fst} \cdx{snd}   & $i\To i$      & projections\\
+  \cdx{converse}& $i\To i$      & converse of a relation\\
+  \cdx{succ}    & $i\To i$      & successor\\
+  \cdx{Collect} & $[i,i\To o]\To i$     & separation\\
+  \cdx{Replace} & $[i, [i,i]\To o] \To i$       & replacement\\
+  \cdx{PrimReplace} & $[i, [i,i]\To o] \To i$   & primitive replacement\\
+  \cdx{RepFun}  & $[i, i\To i] \To i$   & functional replacement\\
+  \cdx{Pi} \cdx{Sigma}  & $[i,i\To i]\To i$     & general product/sum\\
+  \cdx{domain}  & $i\To i$      & domain of a relation\\
+  \cdx{range}   & $i\To i$      & range of a relation\\
+  \cdx{field}   & $i\To i$      & field of a relation\\
+  \cdx{Lambda}  & $[i, i\To i]\To i$    & $\lambda$-abstraction\\
+  \cdx{restrict}& $[i, i] \To i$        & restriction of a function\\
+  \cdx{The}     & $[i\To o]\To i$       & definite description\\
+  \cdx{if}      & $[o,i,i]\To i$        & conditional\\
+  \cdx{Ball} \cdx{Bex}  & $[i, i\To o]\To o$    & bounded quantifiers
+\end{tabular}
+\end{center}
+\subcaption{Constants}
+
+\begin{center}
+\index{*"`"` symbol}
+\index{*"-"`"` symbol}
+\index{*"` symbol}\index{function applications}
+\index{*"- symbol}
+\index{*": symbol}
+\index{*"<"= symbol}
+\begin{tabular}{rrrr} 
+  \it symbol  & \it meta-type & \it priority & \it description \\ 
+  \tt ``        & $[i,i]\To i$  &  Left 90      & image \\
+  \tt -``       & $[i,i]\To i$  &  Left 90      & inverse image \\
+  \tt `         & $[i,i]\To i$  &  Left 90      & application \\
+  \sdx{Int}     & $[i,i]\To i$  &  Left 70      & intersection ($\int$) \\
+  \sdx{Un}      & $[i,i]\To i$  &  Left 65      & union ($\un$) \\
+  \tt -         & $[i,i]\To i$  &  Left 65      & set difference ($-$) \\[1ex]
+  \tt:          & $[i,i]\To o$  &  Left 50      & membership ($\in$) \\
+  \tt <=        & $[i,i]\To o$  &  Left 50      & subset ($\subseteq$) 
+\end{tabular}
+\end{center}
+\subcaption{Infixes}
+\caption{Constants of ZF} \label{zf-constants}
+\end{figure} 
+
+
+\section{The syntax of set theory}
+The language of set theory, as studied by logicians, has no constants.  The
+traditional axioms merely assert the existence of empty sets, unions,
+powersets, etc.; this would be intolerable for practical reasoning.  The
+Isabelle theory declares constants for primitive sets.  It also extends
+\texttt{FOL} with additional syntax for finite sets, ordered pairs,
+comprehension, general union/intersection, general sums/products, and
+bounded quantifiers.  In most other respects, Isabelle implements precisely
+Zermelo-Fraenkel set theory.
+
+Figure~\ref{zf-constants} lists the constants and infixes of~ZF, while
+Figure~\ref{zf-trans} presents the syntax translations.  Finally,
+Figure~\ref{zf-syntax} presents the full grammar for set theory, including the
+constructs of FOL.
+
+Local abbreviations can be introduced by a \isa{let} construct whose
+syntax appears in Fig.\ts\ref{zf-syntax}.  Internally it is translated into
+the constant~\cdx{Let}.  It can be expanded by rewriting with its
+definition, \tdx{Let_def}.
+
+Apart from \isa{let}, set theory does not use polymorphism.  All terms in
+ZF have type~\tydx{i}, which is the type of individuals and has
+class~\cldx{term}.  The type of first-order formulae, remember, 
+is~\tydx{o}.
+
+Infix operators include binary union and intersection ($A\un B$ and
+$A\int B$), set difference ($A-B$), and the subset and membership
+relations.  Note that $a$\verb|~:|$b$ is translated to $\lnot(a\in b)$,
+which is equivalent to  $a\notin b$.  The
+union and intersection operators ($\bigcup A$ and $\bigcap A$) form the
+union or intersection of a set of sets; $\bigcup A$ means the same as
+$\bigcup@{x\in A}x$.  Of these operators, only $\bigcup A$ is primitive.
+
+The constant \cdx{Upair} constructs unordered pairs; thus \isa{Upair($A$,$B$)} denotes the set~$\{A,B\}$ and
+\isa{Upair($A$,$A$)} denotes the singleton~$\{A\}$.  General union is
+used to define binary union.  The Isabelle version goes on to define
+the constant
+\cdx{cons}:
+\begin{eqnarray*}
+   A\cup B              & \equiv &       \bigcup(\isa{Upair}(A,B)) \\
+   \isa{cons}(a,B)      & \equiv &        \isa{Upair}(a,a) \un B
+\end{eqnarray*}
+The $\{a@1, \ldots\}$ notation abbreviates finite sets constructed in the
+obvious manner using~\isa{cons} and~$\emptyset$ (the empty set) \isasymin \begin{eqnarray*}
+ \{a,b,c\} & \equiv & \isa{cons}(a,\isa{cons}(b,\isa{cons}(c,\emptyset)))
+\end{eqnarray*}
+
+The constant \cdx{Pair} constructs ordered pairs, as in \isa{Pair($a$,$b$)}.  Ordered pairs may also be written within angle brackets,
+as {\tt<$a$,$b$>}.  The $n$-tuple {\tt<$a@1$,\ldots,$a@{n-1}$,$a@n$>}
+abbreviates the nest of pairs\par\nobreak
+\centerline{\isa{Pair($a@1$,\ldots,Pair($a@{n-1}$,$a@n$)\ldots).}}
+
+In ZF, a function is a set of pairs.  A ZF function~$f$ is simply an
+individual as far as Isabelle is concerned: its Isabelle type is~$i$, not say
+$i\To i$.  The infix operator~{\tt`} denotes the application of a function set
+to its argument; we must write~$f{\tt`}x$, not~$f(x)$.  The syntax for image
+is~$f{\tt``}A$ and that for inverse image is~$f{\tt-``}A$.
+
+
+\begin{figure} 
+\index{lambda abs@$\lambda$-abstractions}
+\index{*"-"> symbol}
+\index{*"* symbol}
+\begin{center} \footnotesize\tt\frenchspacing
+\begin{tabular}{rrr} 
+  \it external          & \it internal  & \it description \\ 
+  $a$ \ttilde: $b$      & \ttilde($a$ : $b$)    & \rm negated membership\\
+  \ttlbrace$a@1$, $\ldots$, $a@n$\ttrbrace  &  cons($a@1$,$\ldots$,cons($a@n$,0)) &
+        \rm finite set \\
+  <$a@1$, $\ldots$, $a@{n-1}$, $a@n$> & 
+        Pair($a@1$,\ldots,Pair($a@{n-1}$,$a@n$)\ldots) &
+        \rm ordered $n$-tuple \\
+  \ttlbrace$x$:$A . P[x]$\ttrbrace    &  Collect($A$,$\lambda x. P[x]$) &
+        \rm separation \\
+  \ttlbrace$y . x$:$A$, $Q[x,y]$\ttrbrace  &  Replace($A$,$\lambda x\,y. Q[x,y]$) &
+        \rm replacement \\
+  \ttlbrace$b[x] . x$:$A$\ttrbrace  &  RepFun($A$,$\lambda x. b[x]$) &
+        \rm functional replacement \\
+  \sdx{INT} $x$:$A . B[x]$      & Inter(\ttlbrace$B[x] . x$:$A$\ttrbrace) &
+        \rm general intersection \\
+  \sdx{UN}  $x$:$A . B[x]$      & Union(\ttlbrace$B[x] . x$:$A$\ttrbrace) &
+        \rm general union \\
+  \sdx{PROD} $x$:$A . B[x]$     & Pi($A$,$\lambda x. B[x]$) & 
+        \rm general product \\
+  \sdx{SUM}  $x$:$A . B[x]$     & Sigma($A$,$\lambda x. B[x]$) & 
+        \rm general sum \\
+  $A$ -> $B$            & Pi($A$,$\lambda x. B$) & 
+        \rm function space \\
+  $A$ * $B$             & Sigma($A$,$\lambda x. B$) & 
+        \rm binary product \\
+  \sdx{THE}  $x . P[x]$ & The($\lambda x. P[x]$) & 
+        \rm definite description \\
+  \sdx{lam}  $x$:$A . b[x]$     & Lambda($A$,$\lambda x. b[x]$) & 
+        \rm $\lambda$-abstraction\\[1ex]
+  \sdx{ALL} $x$:$A . P[x]$      & Ball($A$,$\lambda x. P[x]$) & 
+        \rm bounded $\forall$ \\
+  \sdx{EX}  $x$:$A . P[x]$      & Bex($A$,$\lambda x. P[x]$) & 
+        \rm bounded $\exists$
+\end{tabular}
+\end{center}
+\caption{Translations for ZF} \label{zf-trans}
+\end{figure} 
+
+
+\begin{figure} 
+\index{*let symbol}
+\index{*in symbol}
+\dquotes
+\[\begin{array}{rcl}
+    term & = & \hbox{expression of type~$i$} \\
+         & | & "let"~id~"="~term";"\dots";"~id~"="~term~"in"~term \\
+         & | & "if"~term~"then"~term~"else"~term \\
+         & | & "{\ttlbrace} " term\; ("," term)^* " {\ttrbrace}" \\
+         & | & "< "  term\; ("," term)^* " >"  \\
+         & | & "{\ttlbrace} " id ":" term " . " formula " {\ttrbrace}" \\
+         & | & "{\ttlbrace} " id " . " id ":" term ", " formula " {\ttrbrace}" \\
+         & | & "{\ttlbrace} " term " . " id ":" term " {\ttrbrace}" \\
+         & | & term " `` " term \\
+         & | & term " -`` " term \\
+         & | & term " ` " term \\
+         & | & term " * " term \\
+         & | & term " \isasyminter " term \\
+         & | & term " \isasymunion " term \\
+         & | & term " - " term \\
+         & | & term " -> " term \\
+         & | & "THE~~"  id  " . " formula\\
+         & | & "lam~~"  id ":" term " . " term \\
+         & | & "INT~~"  id ":" term " . " term \\
+         & | & "UN~~~"  id ":" term " . " term \\
+         & | & "PROD~"  id ":" term " . " term \\
+         & | & "SUM~~"  id ":" term " . " term \\[2ex]
+ formula & = & \hbox{expression of type~$o$} \\
+         & | & term " : " term \\
+         & | & term " \ttilde: " term \\
+         & | & term " <= " term \\
+         & | & term " = " term \\
+         & | & term " \ttilde= " term \\
+         & | & "\ttilde\ " formula \\
+         & | & formula " \& " formula \\
+         & | & formula " | " formula \\
+         & | & formula " --> " formula \\
+         & | & formula " <-> " formula \\
+         & | & "ALL " id ":" term " . " formula \\
+         & | & "EX~~" id ":" term " . " formula \\
+         & | & "ALL~" id~id^* " . " formula \\
+         & | & "EX~~" id~id^* " . " formula \\
+         & | & "EX!~" id~id^* " . " formula
+  \end{array}
+\]
+\caption{Full grammar for ZF} \label{zf-syntax}
+\end{figure} 
+
+
+\section{Binding operators}
+The constant \cdx{Collect} constructs sets by the principle of {\bf
+  separation}.  The syntax for separation is
+\hbox{\tt\ttlbrace$x$:$A$.\ $P[x]$\ttrbrace}, where $P[x]$ is a formula
+that may contain free occurrences of~$x$.  It abbreviates the set \isa{Collect($A$,$\lambda x. P[x]$)}, which consists of all $x\in A$ that
+satisfy~$P[x]$.  Note that \isa{Collect} is an unfortunate choice of
+name: some set theories adopt a set-formation principle, related to
+replacement, called collection.
+
+The constant \cdx{Replace} constructs sets by the principle of {\bf
+  replacement}.  The syntax
+\hbox{\tt\ttlbrace$y$.\ $x$:$A$,$Q[x,y]$\ttrbrace} denotes the set 
+\isa{Replace($A$,$\lambda x\,y. Q[x,y]$)}, which consists of all~$y$ such
+that there exists $x\in A$ satisfying~$Q[x,y]$.  The Replacement Axiom
+has the condition that $Q$ must be single-valued over~$A$: for
+all~$x\in A$ there exists at most one $y$ satisfying~$Q[x,y]$.  A
+single-valued binary predicate is also called a {\bf class function}.
+
+The constant \cdx{RepFun} expresses a special case of replacement,
+where $Q[x,y]$ has the form $y=b[x]$.  Such a $Q$ is trivially
+single-valued, since it is just the graph of the meta-level
+function~$\lambda x. b[x]$.  The resulting set consists of all $b[x]$
+for~$x\in A$.  This is analogous to the \ML{} functional \isa{map},
+since it applies a function to every element of a set.  The syntax is
+\isa{\ttlbrace$b[x]$.\ $x$:$A$\ttrbrace}, which expands to 
+\isa{RepFun($A$,$\lambda x. b[x]$)}.
+
+\index{*INT symbol}\index{*UN symbol} 
+General unions and intersections of indexed
+families of sets, namely $\bigcup@{x\in A}B[x]$ and $\bigcap@{x\in A}B[x]$,
+are written \isa{UN $x$:$A$.\ $B[x]$} and \isa{INT $x$:$A$.\ $B[x]$}.
+Their meaning is expressed using \isa{RepFun} as
+\[
+\bigcup(\{B[x]. x\in A\}) \qquad\hbox{and}\qquad 
+\bigcap(\{B[x]. x\in A\}). 
+\]
+General sums $\sum@{x\in A}B[x]$ and products $\prod@{x\in A}B[x]$ can be
+constructed in set theory, where $B[x]$ is a family of sets over~$A$.  They
+have as special cases $A\times B$ and $A\to B$, where $B$ is simply a set.
+This is similar to the situation in Constructive Type Theory (set theory
+has `dependent sets') and calls for similar syntactic conventions.  The
+constants~\cdx{Sigma} and~\cdx{Pi} construct general sums and
+products.  Instead of \isa{Sigma($A$,$B$)} and \isa{Pi($A$,$B$)} we may
+write 
+\isa{SUM $x$:$A$.\ $B[x]$} and \isa{PROD $x$:$A$.\ $B[x]$}.  
+\index{*SUM symbol}\index{*PROD symbol}%
+The special cases as \hbox{\tt$A$*$B$} and \hbox{\tt$A$->$B$} abbreviate
+general sums and products over a constant family.\footnote{Unlike normal
+infix operators, {\tt*} and {\tt->} merely define abbreviations; there are
+no constants~\isa{op~*} and~\isa{op~->}.} Isabelle accepts these
+abbreviations in parsing and uses them whenever possible for printing.
+
+\index{*THE symbol} As mentioned above, whenever the axioms assert the
+existence and uniqueness of a set, Isabelle's set theory declares a constant
+for that set.  These constants can express the {\bf definite description}
+operator~$\iota x. P[x]$, which stands for the unique~$a$ satisfying~$P[a]$,
+if such exists.  Since all terms in ZF denote something, a description is
+always meaningful, but we do not know its value unless $P[x]$ defines it
+uniquely.  Using the constant~\cdx{The}, we may write descriptions as 
+\isa{The($\lambda x. P[x]$)} or use the syntax \isa{THE $x$.\ $P[x]$}.
+
+\index{*lam symbol}
+Function sets may be written in $\lambda$-notation; $\lambda x\in A. b[x]$
+stands for the set of all pairs $\pair{x,b[x]}$ for $x\in A$.  In order for
+this to be a set, the function's domain~$A$ must be given.  Using the
+constant~\cdx{Lambda}, we may express function sets as \isa{Lambda($A$,$\lambda x. b[x]$)} or use the syntax \isa{lam $x$:$A$.\ $b[x]$}.
+
+Isabelle's set theory defines two {\bf bounded quantifiers}:
+\begin{eqnarray*}
+   \forall x\in A. P[x] &\hbox{abbreviates}& \forall x. x\in A\imp P[x] \\
+   \exists x\in A. P[x] &\hbox{abbreviates}& \exists x. x\in A\conj P[x]
+\end{eqnarray*}
+The constants~\cdx{Ball} and~\cdx{Bex} are defined
+accordingly.  Instead of \isa{Ball($A$,$P$)} and \isa{Bex($A$,$P$)} we may
+write
+\isa{ALL $x$:$A$.\ $P[x]$} and \isa{EX $x$:$A$.\ $P[x]$}.
+
+
+%%%% ZF.thy
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{Let_def}:           Let(s, f) == f(s)
+
+\tdx{Ball_def}:          Ball(A,P) == {\isasymforall}x. x \isasymin A --> P(x)
+\tdx{Bex_def}:           Bex(A,P)  == {\isasymexists}x. x \isasymin A & P(x)
+
+\tdx{subset_def}:        A \isasymsubseteq B  == {\isasymforall}x \isasymin A. x \isasymin B
+\tdx{extension}:         A = B  <->  A \isasymsubseteq B & B \isasymsubseteq A
+
+\tdx{Union_iff}:         A \isasymin Union(C) <-> ({\isasymexists}B \isasymin C. A \isasymin B)
+\tdx{Pow_iff}:           A \isasymin Pow(B) <-> A \isasymsubseteq B
+\tdx{foundation}:        A=0 | ({\isasymexists}x \isasymin A. {\isasymforall}y \isasymin x. y \isasymnotin A)
+
+\tdx{replacement}:       ({\isasymforall}x \isasymin A. {\isasymforall}y z. P(x,y) & P(x,z) --> y=z) ==>
+                   b \isasymin PrimReplace(A,P) <-> ({\isasymexists}x{\isasymin}A. P(x,b))
+\subcaption{The Zermelo-Fraenkel Axioms}
+
+\tdx{Replace_def}: Replace(A,P) == 
+                   PrimReplace(A, \%x y. (\isasymexists!z. P(x,z)) & P(x,y))
+\tdx{RepFun_def}:  RepFun(A,f)  == {\ttlbrace}y . x \isasymin A, y=f(x)\ttrbrace
+\tdx{the_def}:     The(P)       == Union({\ttlbrace}y . x \isasymin {\ttlbrace}0{\ttrbrace}, P(y){\ttrbrace})
+\tdx{if_def}:      if(P,a,b)    == THE z. P & z=a | ~P & z=b
+\tdx{Collect_def}: Collect(A,P) == {\ttlbrace}y . x \isasymin A, x=y & P(x){\ttrbrace}
+\tdx{Upair_def}:   Upair(a,b)   == 
+               {\ttlbrace}y. x\isasymin{}Pow(Pow(0)), x=0 & y=a | x=Pow(0) & y=b{\ttrbrace}
+\subcaption{Consequences of replacement}
+
+\tdx{Inter_def}:   Inter(A) == {\ttlbrace}x \isasymin Union(A) . {\isasymforall}y \isasymin A. x \isasymin y{\ttrbrace}
+\tdx{Un_def}:      A \isasymunion B  == Union(Upair(A,B))
+\tdx{Int_def}:     A \isasyminter B  == Inter(Upair(A,B))
+\tdx{Diff_def}:    A - B    == {\ttlbrace}x \isasymin A . x \isasymnotin B{\ttrbrace}
+\subcaption{Union, intersection, difference}
+\end{alltt*}
+\caption{Rules and axioms of ZF} \label{zf-rules}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{cons_def}:    cons(a,A) == Upair(a,a) \isasymunion A
+\tdx{succ_def}:    succ(i) == cons(i,i)
+\tdx{infinity}:    0 \isasymin Inf & ({\isasymforall}y \isasymin Inf. succ(y) \isasymin Inf)
+\subcaption{Finite and infinite sets}
+
+\tdx{Pair_def}:      <a,b>      == {\ttlbrace}{\ttlbrace}a,a{\ttrbrace}, {\ttlbrace}a,b{\ttrbrace}{\ttrbrace}
+\tdx{split_def}:     split(c,p) == THE y. {\isasymexists}a b. p=<a,b> & y=c(a,b)
+\tdx{fst_def}:       fst(A)     == split(\%x y. x, p)
+\tdx{snd_def}:       snd(A)     == split(\%x y. y, p)
+\tdx{Sigma_def}:     Sigma(A,B) == {\isasymUnion}x \isasymin A. {\isasymUnion}y \isasymin B(x). {\ttlbrace}<x,y>{\ttrbrace}
+\subcaption{Ordered pairs and Cartesian products}
+
+\tdx{converse_def}: converse(r) == {\ttlbrace}z. w\isasymin{}r, {\isasymexists}x y. w=<x,y> & z=<y,x>{\ttrbrace}
+\tdx{domain_def}:   domain(r)   == {\ttlbrace}x. w \isasymin r, {\isasymexists}y. w=<x,y>{\ttrbrace}
+\tdx{range_def}:    range(r)    == domain(converse(r))
+\tdx{field_def}:    field(r)    == domain(r) \isasymunion range(r)
+\tdx{image_def}:    r `` A      == {\ttlbrace}y\isasymin{}range(r) . {\isasymexists}x \isasymin A. <x,y> \isasymin r{\ttrbrace}
+\tdx{vimage_def}:   r -`` A     == converse(r)``A
+\subcaption{Operations on relations}
+
+\tdx{lam_def}:   Lambda(A,b) == {\ttlbrace}<x,b(x)> . x \isasymin A{\ttrbrace}
+\tdx{apply_def}: f`a         == THE y. <a,y> \isasymin f
+\tdx{Pi_def}: Pi(A,B) == {\ttlbrace}f\isasymin{}Pow(Sigma(A,B)). {\isasymforall}x\isasymin{}A. \isasymexists!y. <x,y>\isasymin{}f{\ttrbrace}
+\tdx{restrict_def}:  restrict(f,A) == lam x \isasymin A. f`x
+\subcaption{Functions and general product}
+\end{alltt*}
+\caption{Further definitions of ZF} \label{zf-defs}
+\end{figure}
+
+
+
+\section{The Zermelo-Fraenkel axioms}
+The axioms appear in Fig.\ts \ref{zf-rules}.  They resemble those
+presented by Suppes~\cite{suppes72}.  Most of the theory consists of
+definitions.  In particular, bounded quantifiers and the subset relation
+appear in other axioms.  Object-level quantifiers and implications have
+been replaced by meta-level ones wherever possible, to simplify use of the
+axioms.  
+
+The traditional replacement axiom asserts
+\[ y \in \isa{PrimReplace}(A,P) \bimp (\exists x\in A. P(x,y)) \]
+subject to the condition that $P(x,y)$ is single-valued for all~$x\in A$.
+The Isabelle theory defines \cdx{Replace} to apply
+\cdx{PrimReplace} to the single-valued part of~$P$, namely
+\[ (\exists!z. P(x,z)) \conj P(x,y). \]
+Thus $y\in \isa{Replace}(A,P)$ if and only if there is some~$x$ such that
+$P(x,-)$ holds uniquely for~$y$.  Because the equivalence is unconditional,
+\isa{Replace} is much easier to use than \isa{PrimReplace}; it defines the
+same set, if $P(x,y)$ is single-valued.  The nice syntax for replacement
+expands to \isa{Replace}.
+
+Other consequences of replacement include replacement for 
+meta-level functions
+(\cdx{RepFun}) and definite descriptions (\cdx{The}).
+Axioms for separation (\cdx{Collect}) and unordered pairs
+(\cdx{Upair}) are traditionally assumed, but they actually follow
+from replacement~\cite[pages 237--8]{suppes72}.
+
+The definitions of general intersection, etc., are straightforward.  Note
+the definition of \isa{cons}, which underlies the finite set notation.
+The axiom of infinity gives us a set that contains~0 and is closed under
+successor (\cdx{succ}).  Although this set is not uniquely defined,
+the theory names it (\cdx{Inf}) in order to simplify the
+construction of the natural numbers.
+                                             
+Further definitions appear in Fig.\ts\ref{zf-defs}.  Ordered pairs are
+defined in the standard way, $\pair{a,b}\equiv\{\{a\},\{a,b\}\}$.  Recall
+that \cdx{Sigma}$(A,B)$ generalizes the Cartesian product of two
+sets.  It is defined to be the union of all singleton sets
+$\{\pair{x,y}\}$, for $x\in A$ and $y\in B(x)$.  This is a typical usage of
+general union.
+
+The projections \cdx{fst} and~\cdx{snd} are defined in terms of the
+generalized projection \cdx{split}.  The latter has been borrowed from
+Martin-L\"of's Type Theory, and is often easier to use than \cdx{fst}
+and~\cdx{snd}.
+
+Operations on relations include converse, domain, range, and image.  The
+set $\isa{Pi}(A,B)$ generalizes the space of functions between two sets.
+Note the simple definitions of $\lambda$-abstraction (using
+\cdx{RepFun}) and application (using a definite description).  The
+function \cdx{restrict}$(f,A)$ has the same values as~$f$, but only
+over the domain~$A$.
+
+
+%%%% zf.thy
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{ballI}:     [| !!x. x\isasymin{}A ==> P(x) |] ==> {\isasymforall}x\isasymin{}A. P(x)
+\tdx{bspec}:     [| {\isasymforall}x\isasymin{}A. P(x);  x\isasymin{}A |] ==> P(x)
+\tdx{ballE}:     [| {\isasymforall}x\isasymin{}A. P(x);  P(x) ==> Q;  x \isasymnotin A ==> Q |] ==> Q
+
+\tdx{ball_cong}:  [| A=A';  !!x. x\isasymin{}A' ==> P(x) <-> P'(x) |] ==> 
+             ({\isasymforall}x\isasymin{}A. P(x)) <-> ({\isasymforall}x\isasymin{}A'. P'(x))
+
+\tdx{bexI}:      [| P(x);  x\isasymin{}A |] ==> {\isasymexists}x\isasymin{}A. P(x)
+\tdx{bexCI}:     [| {\isasymforall}x\isasymin{}A. ~P(x) ==> P(a);  a\isasymin{}A |] ==> {\isasymexists}x\isasymin{}A. P(x)
+\tdx{bexE}:      [| {\isasymexists}x\isasymin{}A. P(x);  !!x. [| x\isasymin{}A; P(x) |] ==> Q |] ==> Q
+
+\tdx{bex_cong}:  [| A=A';  !!x. x\isasymin{}A' ==> P(x) <-> P'(x) |] ==> 
+             ({\isasymexists}x\isasymin{}A. P(x)) <-> ({\isasymexists}x\isasymin{}A'. P'(x))
+\subcaption{Bounded quantifiers}
+
+\tdx{subsetI}:     (!!x. x \isasymin A ==> x \isasymin B) ==> A \isasymsubseteq B
+\tdx{subsetD}:     [| A \isasymsubseteq B;  c \isasymin A |] ==> c \isasymin B
+\tdx{subsetCE}:    [| A \isasymsubseteq B;  c \isasymnotin A ==> P;  c \isasymin B ==> P |] ==> P
+\tdx{subset_refl}:  A \isasymsubseteq A
+\tdx{subset_trans}: [| A \isasymsubseteq B;  B \isasymsubseteq C |] ==> A \isasymsubseteq C
+
+\tdx{equalityI}:   [| A \isasymsubseteq B;  B \isasymsubseteq A |] ==> A = B
+\tdx{equalityD1}:  A = B ==> A \isasymsubseteq B
+\tdx{equalityD2}:  A = B ==> B \isasymsubseteq A
+\tdx{equalityE}:   [| A = B;  [| A \isasymsubseteq B; B \isasymsubseteq A |] ==> P |]  ==>  P
+\subcaption{Subsets and extensionality}
+
+\tdx{emptyE}:        a \isasymin 0 ==> P
+\tdx{empty_subsetI}:  0 \isasymsubseteq A
+\tdx{equals0I}:      [| !!y. y \isasymin A ==> False |] ==> A=0
+\tdx{equals0D}:      [| A=0;  a \isasymin A |] ==> P
+
+\tdx{PowI}:          A \isasymsubseteq B ==> A \isasymin Pow(B)
+\tdx{PowD}:          A \isasymin Pow(B)  ==>  A \isasymsubseteq B
+\subcaption{The empty set; power sets}
+\end{alltt*}
+\caption{Basic derived rules for ZF} \label{zf-lemmas1}
+\end{figure}
+
+
+\section{From basic lemmas to function spaces}
+Faced with so many definitions, it is essential to prove lemmas.  Even
+trivial theorems like $A \int B = B \int A$ would be difficult to
+prove from the definitions alone.  Isabelle's set theory derives many
+rules using a natural deduction style.  Ideally, a natural deduction
+rule should introduce or eliminate just one operator, but this is not
+always practical.  For most operators, we may forget its definition
+and use its derived rules instead.
+
+\subsection{Fundamental lemmas}
+Figure~\ref{zf-lemmas1} presents the derived rules for the most basic
+operators.  The rules for the bounded quantifiers resemble those for the
+ordinary quantifiers, but note that \tdx{ballE} uses a negated assumption
+in the style of Isabelle's classical reasoner.  The \rmindex{congruence
+  rules} \tdx{ball_cong} and \tdx{bex_cong} are required by Isabelle's
+simplifier, but have few other uses.  Congruence rules must be specially
+derived for all binding operators, and henceforth will not be shown.
+
+Figure~\ref{zf-lemmas1} also shows rules for the subset and equality
+relations (proof by extensionality), and rules about the empty set and the
+power set operator.
+
+Figure~\ref{zf-lemmas2} presents rules for replacement and separation.
+The rules for \cdx{Replace} and \cdx{RepFun} are much simpler than
+comparable rules for \isa{PrimReplace} would be.  The principle of
+separation is proved explicitly, although most proofs should use the
+natural deduction rules for \isa{Collect}.  The elimination rule
+\tdx{CollectE} is equivalent to the two destruction rules
+\tdx{CollectD1} and \tdx{CollectD2}, but each rule is suited to
+particular circumstances.  Although too many rules can be confusing, there
+is no reason to aim for a minimal set of rules.  
+
+Figure~\ref{zf-lemmas3} presents rules for general union and intersection.
+The empty intersection should be undefined.  We cannot have
+$\bigcap(\emptyset)=V$ because $V$, the universal class, is not a set.  All
+expressions denote something in ZF set theory; the definition of
+intersection implies $\bigcap(\emptyset)=\emptyset$, but this value is
+arbitrary.  The rule \tdx{InterI} must have a premise to exclude
+the empty intersection.  Some of the laws governing intersections require
+similar premises.
+
+
+%the [p] gives better page breaking for the book
+\begin{figure}[p]
+\begin{alltt*}\isastyleminor
+\tdx{ReplaceI}:   [| x\isasymin{}A;  P(x,b);  !!y. P(x,y) ==> y=b |] ==> 
+            b\isasymin{}{\ttlbrace}y. x\isasymin{}A, P(x,y){\ttrbrace}
+
+\tdx{ReplaceE}:   [| b\isasymin{}{\ttlbrace}y. x\isasymin{}A, P(x,y){\ttrbrace};  
+               !!x. [| x\isasymin{}A; P(x,b); {\isasymforall}y. P(x,y)-->y=b |] ==> R 
+            |] ==> R
+
+\tdx{RepFunI}:    [| a\isasymin{}A |] ==> f(a)\isasymin{}{\ttlbrace}f(x). x\isasymin{}A{\ttrbrace}
+\tdx{RepFunE}:    [| b\isasymin{}{\ttlbrace}f(x). x\isasymin{}A{\ttrbrace};  
+                !!x.[| x\isasymin{}A;  b=f(x) |] ==> P |] ==> P
+
+\tdx{separation}:  a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace} <-> a\isasymin{}A & P(a)
+\tdx{CollectI}:    [| a\isasymin{}A;  P(a) |] ==> a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace}
+\tdx{CollectE}:    [| a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace};  [| a\isasymin{}A; P(a) |] ==> R |] ==> R
+\tdx{CollectD1}:   a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace} ==> a\isasymin{}A
+\tdx{CollectD2}:   a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace} ==> P(a)
+\end{alltt*}
+\caption{Replacement and separation} \label{zf-lemmas2}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{UnionI}: [| B\isasymin{}C;  A\isasymin{}B |] ==> A\isasymin{}Union(C)
+\tdx{UnionE}: [| A\isasymin{}Union(C);  !!B.[| A\isasymin{}B;  B\isasymin{}C |] ==> R |] ==> R
+
+\tdx{InterI}: [| !!x. x\isasymin{}C ==> A\isasymin{}x;  c\isasymin{}C |] ==> A\isasymin{}Inter(C)
+\tdx{InterD}: [| A\isasymin{}Inter(C);  B\isasymin{}C |] ==> A\isasymin{}B
+\tdx{InterE}: [| A\isasymin{}Inter(C);  A\isasymin{}B ==> R;  B \isasymnotin C ==> R |] ==> R
+
+\tdx{UN_I}:   [| a\isasymin{}A;  b\isasymin{}B(a) |] ==> b\isasymin{}({\isasymUnion}x\isasymin{}A. B(x))
+\tdx{UN_E}:   [| b\isasymin{}({\isasymUnion}x\isasymin{}A. B(x));  !!x.[| x\isasymin{}A;  b\isasymin{}B(x) |] ==> R 
+           |] ==> R
+
+\tdx{INT_I}:  [| !!x. x\isasymin{}A ==> b\isasymin{}B(x);  a\isasymin{}A |] ==> b\isasymin{}({\isasymInter}x\isasymin{}A. B(x))
+\tdx{INT_E}:  [| b\isasymin{}({\isasymInter}x\isasymin{}A. B(x));  a\isasymin{}A |] ==> b\isasymin{}B(a)
+\end{alltt*}
+\caption{General union and intersection} \label{zf-lemmas3}
+\end{figure}
+
+
+%%% upair.thy
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{pairing}:   a\isasymin{}Upair(b,c) <-> (a=b | a=c)
+\tdx{UpairI1}:   a\isasymin{}Upair(a,b)
+\tdx{UpairI2}:   b\isasymin{}Upair(a,b)
+\tdx{UpairE}:    [| a\isasymin{}Upair(b,c);  a=b ==> P;  a=c ==> P |] ==> P
+\end{alltt*}
+\caption{Unordered pairs} \label{zf-upair1}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{UnI1}:      c\isasymin{}A ==> c\isasymin{}A \isasymunion B
+\tdx{UnI2}:      c\isasymin{}B ==> c\isasymin{}A \isasymunion B
+\tdx{UnCI}:      (c \isasymnotin B ==> c\isasymin{}A) ==> c\isasymin{}A \isasymunion B
+\tdx{UnE}:       [| c\isasymin{}A \isasymunion B;  c\isasymin{}A ==> P;  c\isasymin{}B ==> P |] ==> P
+
+\tdx{IntI}:      [| c\isasymin{}A;  c\isasymin{}B |] ==> c\isasymin{}A \isasyminter B
+\tdx{IntD1}:     c\isasymin{}A \isasyminter B ==> c\isasymin{}A
+\tdx{IntD2}:     c\isasymin{}A \isasyminter B ==> c\isasymin{}B
+\tdx{IntE}:      [| c\isasymin{}A \isasyminter B;  [| c\isasymin{}A; c\isasymin{}B |] ==> P |] ==> P
+
+\tdx{DiffI}:     [| c\isasymin{}A;  c \isasymnotin B |] ==> c\isasymin{}A - B
+\tdx{DiffD1}:    c\isasymin{}A - B ==> c\isasymin{}A
+\tdx{DiffD2}:    c\isasymin{}A - B ==> c  \isasymnotin  B
+\tdx{DiffE}:     [| c\isasymin{}A - B;  [| c\isasymin{}A; c \isasymnotin B |] ==> P |] ==> P
+\end{alltt*}
+\caption{Union, intersection, difference} \label{zf-Un}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{consI1}:    a\isasymin{}cons(a,B)
+\tdx{consI2}:    a\isasymin{}B ==> a\isasymin{}cons(b,B)
+\tdx{consCI}:    (a \isasymnotin B ==> a=b) ==> a\isasymin{}cons(b,B)
+\tdx{consE}:     [| a\isasymin{}cons(b,A);  a=b ==> P;  a\isasymin{}A ==> P |] ==> P
+
+\tdx{singletonI}:  a\isasymin{}{\ttlbrace}a{\ttrbrace}
+\tdx{singletonE}:  [| a\isasymin{}{\ttlbrace}b{\ttrbrace}; a=b ==> P |] ==> P
+\end{alltt*}
+\caption{Finite and singleton sets} \label{zf-upair2}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{succI1}:    i\isasymin{}succ(i)
+\tdx{succI2}:    i\isasymin{}j ==> i\isasymin{}succ(j)
+\tdx{succCI}:    (i \isasymnotin j ==> i=j) ==> i\isasymin{}succ(j)
+\tdx{succE}:     [| i\isasymin{}succ(j);  i=j ==> P;  i\isasymin{}j ==> P |] ==> P
+\tdx{succ_neq_0}:  [| succ(n)=0 |] ==> P
+\tdx{succ_inject}: succ(m) = succ(n) ==> m=n
+\end{alltt*}
+\caption{The successor function} \label{zf-succ}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{the_equality}: [| P(a); !!x. P(x) ==> x=a |] ==> (THE x. P(x))=a
+\tdx{theI}:         \isasymexists! x. P(x) ==> P(THE x. P(x))
+
+\tdx{if_P}:          P ==> (if P then a else b) = a
+\tdx{if_not_P}:     ~P ==> (if P then a else b) = b
+
+\tdx{mem_asym}:     [| a\isasymin{}b;  b\isasymin{}a |] ==> P
+\tdx{mem_irrefl}:   a\isasymin{}a ==> P
+\end{alltt*}
+\caption{Descriptions; non-circularity} \label{zf-the}
+\end{figure}
+
+
+\subsection{Unordered pairs and finite sets}
+Figure~\ref{zf-upair1} presents the principle of unordered pairing, along
+with its derived rules.  Binary union and intersection are defined in terms
+of ordered pairs (Fig.\ts\ref{zf-Un}).  Set difference is also included.  The
+rule \tdx{UnCI} is useful for classical reasoning about unions,
+like \isa{disjCI}\@; it supersedes \tdx{UnI1} and
+\tdx{UnI2}, but these rules are often easier to work with.  For
+intersection and difference we have both elimination and destruction rules.
+Again, there is no reason to provide a minimal rule set.
+
+Figure~\ref{zf-upair2} is concerned with finite sets: it presents rules
+for~\isa{cons}, the finite set constructor, and rules for singleton
+sets.  Figure~\ref{zf-succ} presents derived rules for the successor
+function, which is defined in terms of~\isa{cons}.  The proof that 
+\isa{succ} is injective appears to require the Axiom of Foundation.
+
+Definite descriptions (\sdx{THE}) are defined in terms of the singleton
+set~$\{0\}$, but their derived rules fortunately hide this
+(Fig.\ts\ref{zf-the}).  The rule~\tdx{theI} is difficult to apply
+because of the two occurrences of~$\Var{P}$.  However,
+\tdx{the_equality} does not have this problem and the files contain
+many examples of its use.
+
+Finally, the impossibility of having both $a\in b$ and $b\in a$
+(\tdx{mem_asym}) is proved by applying the Axiom of Foundation to
+the set $\{a,b\}$.  The impossibility of $a\in a$ is a trivial consequence.
+
+
+%%% subset.thy?
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{Union_upper}:    B\isasymin{}A ==> B \isasymsubseteq Union(A)
+\tdx{Union_least}:    [| !!x. x\isasymin{}A ==> x \isasymsubseteq C |] ==> Union(A) \isasymsubseteq C
+
+\tdx{Inter_lower}:    B\isasymin{}A ==> Inter(A) \isasymsubseteq B
+\tdx{Inter_greatest}: [| a\isasymin{}A; !!x. x\isasymin{}A ==> C \isasymsubseteq x |] ==> C\isasymsubseteq{}Inter(A)
+
+\tdx{Un_upper1}:      A \isasymsubseteq A \isasymunion B
+\tdx{Un_upper2}:      B \isasymsubseteq A \isasymunion B
+\tdx{Un_least}:       [| A \isasymsubseteq C;  B \isasymsubseteq C |] ==> A \isasymunion B \isasymsubseteq C
+
+\tdx{Int_lower1}:     A \isasyminter B \isasymsubseteq A
+\tdx{Int_lower2}:     A \isasyminter B \isasymsubseteq B
+\tdx{Int_greatest}:   [| C \isasymsubseteq A;  C \isasymsubseteq B |] ==> C \isasymsubseteq A \isasyminter B
+
+\tdx{Diff_subset}:    A-B \isasymsubseteq A
+\tdx{Diff_contains}:  [| C \isasymsubseteq A;  C \isasyminter B = 0 |] ==> C \isasymsubseteq A-B
+
+\tdx{Collect_subset}: Collect(A,P) \isasymsubseteq A
+\end{alltt*}
+\caption{Subset and lattice properties} \label{zf-subset}
+\end{figure}
+
+
+\subsection{Subset and lattice properties}
+The subset relation is a complete lattice.  Unions form least upper bounds;
+non-empty intersections form greatest lower bounds.  Figure~\ref{zf-subset}
+shows the corresponding rules.  A few other laws involving subsets are
+included. 
+Reasoning directly about subsets often yields clearer proofs than
+reasoning about the membership relation.  Section~\ref{sec:ZF-pow-example}
+below presents an example of this, proving the equation 
+${\isa{Pow}(A)\cap \isa{Pow}(B)}= \isa{Pow}(A\cap B)$.
+
+%%% pair.thy
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{Pair_inject1}: <a,b> = <c,d> ==> a=c
+\tdx{Pair_inject2}: <a,b> = <c,d> ==> b=d
+\tdx{Pair_inject}:  [| <a,b> = <c,d>;  [| a=c; b=d |] ==> P |] ==> P
+\tdx{Pair_neq_0}:   <a,b>=0 ==> P
+
+\tdx{fst_conv}:     fst(<a,b>) = a
+\tdx{snd_conv}:     snd(<a,b>) = b
+\tdx{split}:        split(\%x y. c(x,y), <a,b>) = c(a,b)
+
+\tdx{SigmaI}:     [| a\isasymin{}A;  b\isasymin{}B(a) |] ==> <a,b>\isasymin{}Sigma(A,B)
+
+\tdx{SigmaE}:     [| c\isasymin{}Sigma(A,B);  
+                !!x y.[| x\isasymin{}A; y\isasymin{}B(x); c=<x,y> |] ==> P |] ==> P
+
+\tdx{SigmaE2}:    [| <a,b>\isasymin{}Sigma(A,B);    
+                [| a\isasymin{}A;  b\isasymin{}B(a) |] ==> P   |] ==> P
+\end{alltt*}
+\caption{Ordered pairs; projections; general sums} \label{zf-pair}
+\end{figure}
+
+
+\subsection{Ordered pairs} \label{sec:pairs}
+
+Figure~\ref{zf-pair} presents the rules governing ordered pairs,
+projections and general sums --- in particular, that
+$\{\{a\},\{a,b\}\}$ functions as an ordered pair.  This property is
+expressed as two destruction rules,
+\tdx{Pair_inject1} and \tdx{Pair_inject2}, and equivalently
+as the elimination rule \tdx{Pair_inject}.
+
+The rule \tdx{Pair_neq_0} asserts $\pair{a,b}\neq\emptyset$.  This
+is a property of $\{\{a\},\{a,b\}\}$, and need not hold for other 
+encodings of ordered pairs.  The non-standard ordered pairs mentioned below
+satisfy $\pair{\emptyset;\emptyset}=\emptyset$.
+
+The natural deduction rules \tdx{SigmaI} and \tdx{SigmaE}
+assert that \cdx{Sigma}$(A,B)$ consists of all pairs of the form
+$\pair{x,y}$, for $x\in A$ and $y\in B(x)$.  The rule \tdx{SigmaE2}
+merely states that $\pair{a,b}\in \isa{Sigma}(A,B)$ implies $a\in A$ and
+$b\in B(a)$.
+
+In addition, it is possible to use tuples as patterns in abstractions:
+\begin{center}
+{\tt\%<$x$,$y$>. $t$} \quad stands for\quad \isa{split(\%$x$ $y$.\ $t$)}
+\end{center}
+Nested patterns are translated recursively:
+{\tt\%<$x$,$y$,$z$>. $t$} $\leadsto$ {\tt\%<$x$,<$y$,$z$>>. $t$} $\leadsto$
+\isa{split(\%$x$.\%<$y$,$z$>. $t$)} $\leadsto$ \isa{split(\%$x$. split(\%$y$
+  $z$.\ $t$))}.  The reverse translation is performed upon printing.
+\begin{warn}
+  The translation between patterns and \isa{split} is performed automatically
+  by the parser and printer.  Thus the internal and external form of a term
+  may differ, which affects proofs.  For example the term \isa{(\%<x,y>.<y,x>)<a,b>} requires the theorem \isa{split} to rewrite to
+  {\tt<b,a>}.
+\end{warn}
+In addition to explicit $\lambda$-abstractions, patterns can be used in any
+variable binding construct which is internally described by a
+$\lambda$-abstraction.  Here are some important examples:
+\begin{description}
+\item[Let:] \isa{let {\it pattern} = $t$ in $u$}
+\item[Choice:] \isa{THE~{\it pattern}~.~$P$}
+\item[Set operations:] \isa{\isasymUnion~{\it pattern}:$A$.~$B$}
+\item[Comprehension:] \isa{{\ttlbrace}~{\it pattern}:$A$~.~$P$~{\ttrbrace}}
+\end{description}
+
+
+%%% domrange.thy?
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{domainI}:     <a,b>\isasymin{}r ==> a\isasymin{}domain(r)
+\tdx{domainE}:     [| a\isasymin{}domain(r); !!y. <a,y>\isasymin{}r ==> P |] ==> P
+\tdx{domain_subset}: domain(Sigma(A,B)) \isasymsubseteq A
+
+\tdx{rangeI}:      <a,b>\isasymin{}r ==> b\isasymin{}range(r)
+\tdx{rangeE}:      [| b\isasymin{}range(r); !!x. <x,b>\isasymin{}r ==> P |] ==> P
+\tdx{range_subset}: range(A*B) \isasymsubseteq B
+
+\tdx{fieldI1}:     <a,b>\isasymin{}r ==> a\isasymin{}field(r)
+\tdx{fieldI2}:     <a,b>\isasymin{}r ==> b\isasymin{}field(r)
+\tdx{fieldCI}:     (<c,a> \isasymnotin r ==> <a,b>\isasymin{}r) ==> a\isasymin{}field(r)
+
+\tdx{fieldE}:      [| a\isasymin{}field(r); 
+                !!x. <a,x>\isasymin{}r ==> P; 
+                !!x. <x,a>\isasymin{}r ==> P      
+             |] ==> P
+
+\tdx{field_subset}:  field(A*A) \isasymsubseteq A
+\end{alltt*}
+\caption{Domain, range and field of a relation} \label{zf-domrange}
+\end{figure}
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{imageI}:      [| <a,b>\isasymin{}r; a\isasymin{}A |] ==> b\isasymin{}r``A
+\tdx{imageE}:      [| b\isasymin{}r``A; !!x.[| <x,b>\isasymin{}r; x\isasymin{}A |] ==> P |] ==> P
+
+\tdx{vimageI}:     [| <a,b>\isasymin{}r; b\isasymin{}B |] ==> a\isasymin{}r-``B
+\tdx{vimageE}:     [| a\isasymin{}r-``B; !!x.[| <a,x>\isasymin{}r;  x\isasymin{}B |] ==> P |] ==> P
+\end{alltt*}
+\caption{Image and inverse image} \label{zf-domrange2}
+\end{figure}
+
+
+\subsection{Relations}
+Figure~\ref{zf-domrange} presents rules involving relations, which are sets
+of ordered pairs.  The converse of a relation~$r$ is the set of all pairs
+$\pair{y,x}$ such that $\pair{x,y}\in r$; if $r$ is a function, then
+{\cdx{converse}$(r)$} is its inverse.  The rules for the domain
+operation, namely \tdx{domainI} and~\tdx{domainE}, assert that
+\cdx{domain}$(r)$ consists of all~$x$ such that $r$ contains
+some pair of the form~$\pair{x,y}$.  The range operation is similar, and
+the field of a relation is merely the union of its domain and range.  
+
+Figure~\ref{zf-domrange2} presents rules for images and inverse images.
+Note that these operations are generalisations of range and domain,
+respectively. 
+
+
+%%% func.thy
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{fun_is_rel}:     f\isasymin{}Pi(A,B) ==> f \isasymsubseteq Sigma(A,B)
+
+\tdx{apply_equality}:  [| <a,b>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> f`a = b
+\tdx{apply_equality2}: [| <a,b>\isasymin{}f; <a,c>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> b=c
+
+\tdx{apply_type}:     [| f\isasymin{}Pi(A,B); a\isasymin{}A |] ==> f`a\isasymin{}B(a)
+\tdx{apply_Pair}:     [| f\isasymin{}Pi(A,B); a\isasymin{}A |] ==> <a,f`a>\isasymin{}f
+\tdx{apply_iff}:      f\isasymin{}Pi(A,B) ==> <a,b>\isasymin{}f <-> a\isasymin{}A & f`a = b
+
+\tdx{fun_extension}:  [| f\isasymin{}Pi(A,B); g\isasymin{}Pi(A,D);
+                   !!x. x\isasymin{}A ==> f`x = g`x     |] ==> f=g
+
+\tdx{domain_type}:    [| <a,b>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> a\isasymin{}A
+\tdx{range_type}:     [| <a,b>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> b\isasymin{}B(a)
+
+\tdx{Pi_type}:        [| f\isasymin{}A->C; !!x. x\isasymin{}A ==> f`x\isasymin{}B(x) |] ==> f\isasymin{}Pi(A,B)
+\tdx{domain_of_fun}:  f\isasymin{}Pi(A,B) ==> domain(f)=A
+\tdx{range_of_fun}:   f\isasymin{}Pi(A,B) ==> f\isasymin{}A->range(f)
+
+\tdx{restrict}:       a\isasymin{}A ==> restrict(f,A) ` a = f`a
+\tdx{restrict_type}:  [| !!x. x\isasymin{}A ==> f`x\isasymin{}B(x) |] ==> 
+                restrict(f,A)\isasymin{}Pi(A,B)
+\end{alltt*}
+\caption{Functions} \label{zf-func1}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{lamI}:     a\isasymin{}A ==> <a,b(a)>\isasymin{}(lam x\isasymin{}A. b(x))
+\tdx{lamE}:     [| p\isasymin{}(lam x\isasymin{}A. b(x)); !!x.[| x\isasymin{}A; p=<x,b(x)> |] ==> P 
+          |] ==>  P
+
+\tdx{lam_type}: [| !!x. x\isasymin{}A ==> b(x)\isasymin{}B(x) |] ==> (lam x\isasymin{}A. b(x))\isasymin{}Pi(A,B)
+
+\tdx{beta}:     a\isasymin{}A ==> (lam x\isasymin{}A. b(x)) ` a = b(a)
+\tdx{eta}:      f\isasymin{}Pi(A,B) ==> (lam x\isasymin{}A. f`x) = f
+\end{alltt*}
+\caption{$\lambda$-abstraction} \label{zf-lam}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{fun_empty}:           0\isasymin{}0->0
+\tdx{fun_single}:          {\ttlbrace}<a,b>{\ttrbrace}\isasymin{}{\ttlbrace}a{\ttrbrace} -> {\ttlbrace}b{\ttrbrace}
+
+\tdx{fun_disjoint_Un}:     [| f\isasymin{}A->B; g\isasymin{}C->D; A \isasyminter C = 0  |] ==>  
+                     (f \isasymunion g)\isasymin{}(A \isasymunion C) -> (B \isasymunion D)
+
+\tdx{fun_disjoint_apply1}: [| a\isasymin{}A; f\isasymin{}A->B; g\isasymin{}C->D;  A\isasyminter{}C = 0 |] ==>  
+                     (f \isasymunion g)`a = f`a
+
+\tdx{fun_disjoint_apply2}: [| c\isasymin{}C; f\isasymin{}A->B; g\isasymin{}C->D;  A\isasyminter{}C = 0 |] ==>  
+                     (f \isasymunion g)`c = g`c
+\end{alltt*}
+\caption{Constructing functions from smaller sets} \label{zf-func2}
+\end{figure}
+
+
+\subsection{Functions}
+Functions, represented by graphs, are notoriously difficult to reason
+about.  The ZF theory provides many derived rules, which overlap more
+than they ought.  This section presents the more important rules.
+
+Figure~\ref{zf-func1} presents the basic properties of \cdx{Pi}$(A,B)$,
+the generalized function space.  For example, if $f$ is a function and
+$\pair{a,b}\in f$, then $f`a=b$ (\tdx{apply_equality}).  Two functions
+are equal provided they have equal domains and deliver equals results
+(\tdx{fun_extension}).
+
+By \tdx{Pi_type}, a function typing of the form $f\in A\to C$ can be
+refined to the dependent typing $f\in\prod@{x\in A}B(x)$, given a suitable
+family of sets $\{B(x)\}@{x\in A}$.  Conversely, by \tdx{range_of_fun},
+any dependent typing can be flattened to yield a function type of the form
+$A\to C$; here, $C=\isa{range}(f)$.
+
+Among the laws for $\lambda$-abstraction, \tdx{lamI} and \tdx{lamE}
+describe the graph of the generated function, while \tdx{beta} and
+\tdx{eta} are the standard conversions.  We essentially have a
+dependently-typed $\lambda$-calculus (Fig.\ts\ref{zf-lam}).
+
+Figure~\ref{zf-func2} presents some rules that can be used to construct
+functions explicitly.  We start with functions consisting of at most one
+pair, and may form the union of two functions provided their domains are
+disjoint.  
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{Int_absorb}:        A \isasyminter A = A
+\tdx{Int_commute}:       A \isasyminter B = B \isasyminter A
+\tdx{Int_assoc}:         (A \isasyminter B) \isasyminter C  =  A \isasyminter (B \isasyminter C)
+\tdx{Int_Un_distrib}:    (A \isasymunion B) \isasyminter C  =  (A \isasyminter C) \isasymunion (B \isasyminter C)
+
+\tdx{Un_absorb}:         A \isasymunion A = A
+\tdx{Un_commute}:        A \isasymunion B = B \isasymunion A
+\tdx{Un_assoc}:          (A \isasymunion B) \isasymunion C  =  A \isasymunion (B \isasymunion C)
+\tdx{Un_Int_distrib}:    (A \isasyminter B) \isasymunion C  =  (A \isasymunion C) \isasyminter (B \isasymunion C)
+
+\tdx{Diff_cancel}:       A-A = 0
+\tdx{Diff_disjoint}:     A \isasyminter (B-A) = 0
+\tdx{Diff_partition}:    A \isasymsubseteq B ==> A \isasymunion (B-A) = B
+\tdx{double_complement}: [| A \isasymsubseteq B; B \isasymsubseteq C |] ==> (B - (C-A)) = A
+\tdx{Diff_Un}:           A - (B \isasymunion C) = (A-B) \isasyminter (A-C)
+\tdx{Diff_Int}:          A - (B \isasyminter C) = (A-B) \isasymunion (A-C)
+
+\tdx{Union_Un_distrib}:  Union(A \isasymunion B) = Union(A) \isasymunion Union(B)
+\tdx{Inter_Un_distrib}:  [| a \isasymin A;  b \isasymin B |] ==> 
+                   Inter(A \isasymunion B) = Inter(A) \isasyminter Inter(B)
+
+\tdx{Int_Union_RepFun}:  A \isasyminter Union(B) = ({\isasymUnion}C \isasymin B. A \isasyminter C)
+
+\tdx{Un_Inter_RepFun}:   b \isasymin B ==> 
+                   A \isasymunion Inter(B) = ({\isasymInter}C \isasymin B. A \isasymunion C)
+
+\tdx{SUM_Un_distrib1}:   (SUM x \isasymin A \isasymunion B. C(x)) = 
+                   (SUM x \isasymin A. C(x)) \isasymunion (SUM x \isasymin B. C(x))
+
+\tdx{SUM_Un_distrib2}:   (SUM x \isasymin C. A(x) \isasymunion B(x)) =
+                   (SUM x \isasymin C. A(x)) \isasymunion (SUM x \isasymin C. B(x))
+
+\tdx{SUM_Int_distrib1}:  (SUM x \isasymin A \isasyminter B. C(x)) =
+                   (SUM x \isasymin A. C(x)) \isasyminter (SUM x \isasymin B. C(x))
+
+\tdx{SUM_Int_distrib2}:  (SUM x \isasymin C. A(x) \isasyminter B(x)) =
+                   (SUM x \isasymin C. A(x)) \isasyminter (SUM x \isasymin C. B(x))
+\end{alltt*}
+\caption{Equalities} \label{zf-equalities}
+\end{figure}
+
+
+\begin{figure}
+%\begin{constants} 
+%  \cdx{1}       & $i$           &       & $\{\emptyset\}$       \\
+%  \cdx{bool}    & $i$           &       & the set $\{\emptyset,1\}$     \\
+%  \cdx{cond}   & $[i,i,i]\To i$ &       & conditional for \isa{bool}    \\
+%  \cdx{not}    & $i\To i$       &       & negation for \isa{bool}       \\
+%  \sdx{and}    & $[i,i]\To i$   & Left 70 & conjunction for \isa{bool}  \\
+%  \sdx{or}     & $[i,i]\To i$   & Left 65 & disjunction for \isa{bool}  \\
+%  \sdx{xor}    & $[i,i]\To i$   & Left 65 & exclusive-or for \isa{bool}
+%\end{constants}
+%
+\begin{alltt*}\isastyleminor
+\tdx{bool_def}:      bool == {\ttlbrace}0,1{\ttrbrace}
+\tdx{cond_def}:      cond(b,c,d) == if b=1 then c else d
+\tdx{not_def}:       not(b)  == cond(b,0,1)
+\tdx{and_def}:       a and b == cond(a,b,0)
+\tdx{or_def}:        a or b  == cond(a,1,b)
+\tdx{xor_def}:       a xor b == cond(a,not(b),b)
+
+\tdx{bool_1I}:       1 \isasymin bool
+\tdx{bool_0I}:       0 \isasymin bool
+\tdx{boolE}:         [| c \isasymin bool;  c=1 ==> P;  c=0 ==> P |] ==> P
+\tdx{cond_1}:        cond(1,c,d) = c
+\tdx{cond_0}:        cond(0,c,d) = d
+\end{alltt*}
+\caption{The booleans} \label{zf-bool}
+\end{figure}
+
+
+\section{Further developments}
+The next group of developments is complex and extensive, and only
+highlights can be covered here.  It involves many theories and proofs. 
+
+Figure~\ref{zf-equalities} presents commutative, associative, distributive,
+and idempotency laws of union and intersection, along with other equations.
+
+Theory \thydx{Bool} defines $\{0,1\}$ as a set of booleans, with the usual
+operators including a conditional (Fig.\ts\ref{zf-bool}).  Although ZF is a
+first-order theory, you can obtain the effect of higher-order logic using
+\isa{bool}-valued functions, for example.  The constant~\isa{1} is
+translated to \isa{succ(0)}.
+
+\begin{figure}
+\index{*"+ symbol}
+\begin{constants}
+  \it symbol    & \it meta-type & \it priority & \it description \\ 
+  \tt +         & $[i,i]\To i$  &  Right 65     & disjoint union operator\\
+  \cdx{Inl}~~\cdx{Inr}  & $i\To i$      &       & injections\\
+  \cdx{case}    & $[i\To i,i\To i, i]\To i$ &   & conditional for $A+B$
+\end{constants}
+\begin{alltt*}\isastyleminor
+\tdx{sum_def}:   A+B == {\ttlbrace}0{\ttrbrace}*A \isasymunion {\ttlbrace}1{\ttrbrace}*B
+\tdx{Inl_def}:   Inl(a) == <0,a>
+\tdx{Inr_def}:   Inr(b) == <1,b>
+\tdx{case_def}:  case(c,d,u) == split(\%y z. cond(y, d(z), c(z)), u)
+
+\tdx{InlI}:      a \isasymin A ==> Inl(a) \isasymin A+B
+\tdx{InrI}:      b \isasymin B ==> Inr(b) \isasymin A+B
+
+\tdx{Inl_inject}:  Inl(a)=Inl(b) ==> a=b
+\tdx{Inr_inject}:  Inr(a)=Inr(b) ==> a=b
+\tdx{Inl_neq_Inr}: Inl(a)=Inr(b) ==> P
+
+\tdx{sum_iff}:  u \isasymin A+B <-> ({\isasymexists}x\isasymin{}A. u=Inl(x)) | ({\isasymexists}y\isasymin{}B. u=Inr(y))
+
+\tdx{case_Inl}:  case(c,d,Inl(a)) = c(a)
+\tdx{case_Inr}:  case(c,d,Inr(b)) = d(b)
+\end{alltt*}
+\caption{Disjoint unions} \label{zf-sum}
+\end{figure}
+
+
+\subsection{Disjoint unions}
+
+Theory \thydx{Sum} defines the disjoint union of two sets, with
+injections and a case analysis operator (Fig.\ts\ref{zf-sum}).  Disjoint
+unions play a role in datatype definitions, particularly when there is
+mutual recursion~\cite{paulson-set-II}.
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{QPair_def}:      <a;b> == a+b
+\tdx{qsplit_def}:     qsplit(c,p)  == THE y. {\isasymexists}a b. p=<a;b> & y=c(a,b)
+\tdx{qfsplit_def}:    qfsplit(R,z) == {\isasymexists}x y. z=<x;y> & R(x,y)
+\tdx{qconverse_def}:  qconverse(r) == {\ttlbrace}z. w \isasymin r, {\isasymexists}x y. w=<x;y> & z=<y;x>{\ttrbrace}
+\tdx{QSigma_def}:     QSigma(A,B)  == {\isasymUnion}x \isasymin A. {\isasymUnion}y \isasymin B(x). {\ttlbrace}<x;y>{\ttrbrace}
+
+\tdx{qsum_def}:       A <+> B      == ({\ttlbrace}0{\ttrbrace} <*> A) \isasymunion ({\ttlbrace}1{\ttrbrace} <*> B)
+\tdx{QInl_def}:       QInl(a)      == <0;a>
+\tdx{QInr_def}:       QInr(b)      == <1;b>
+\tdx{qcase_def}:      qcase(c,d)   == qsplit(\%y z. cond(y, d(z), c(z)))
+\end{alltt*}
+\caption{Non-standard pairs, products and sums} \label{zf-qpair}
+\end{figure}
+
+
+\subsection{Non-standard ordered pairs}
+
+Theory \thydx{QPair} defines a notion of ordered pair that admits
+non-well-founded tupling (Fig.\ts\ref{zf-qpair}).  Such pairs are written
+{\tt<$a$;$b$>}.  It also defines the eliminator \cdx{qsplit}, the
+converse operator \cdx{qconverse}, and the summation operator
+\cdx{QSigma}.  These are completely analogous to the corresponding
+versions for standard ordered pairs.  The theory goes on to define a
+non-standard notion of disjoint sum using non-standard pairs.  All of these
+concepts satisfy the same properties as their standard counterparts; in
+addition, {\tt<$a$;$b$>} is continuous.  The theory supports coinductive
+definitions, for example of infinite lists~\cite{paulson-mscs}.
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{bnd_mono_def}:  bnd_mono(D,h) == 
+               h(D)\isasymsubseteq{}D & ({\isasymforall}W X. W\isasymsubseteq{}X --> X\isasymsubseteq{}D --> h(W)\isasymsubseteq{}h(X))
+
+\tdx{lfp_def}:       lfp(D,h) == Inter({\ttlbrace}X \isasymin Pow(D). h(X) \isasymsubseteq X{\ttrbrace})
+\tdx{gfp_def}:       gfp(D,h) == Union({\ttlbrace}X \isasymin Pow(D). X \isasymsubseteq h(X){\ttrbrace})
+
+
+\tdx{lfp_lowerbound}: [| h(A) \isasymsubseteq A;  A \isasymsubseteq D |] ==> lfp(D,h) \isasymsubseteq A
+
+\tdx{lfp_subset}:    lfp(D,h) \isasymsubseteq D
+
+\tdx{lfp_greatest}:  [| bnd_mono(D,h);  
+                  !!X. [| h(X) \isasymsubseteq X;  X \isasymsubseteq D |] ==> A \isasymsubseteq X 
+               |] ==> A \isasymsubseteq lfp(D,h)
+
+\tdx{lfp_Tarski}:    bnd_mono(D,h) ==> lfp(D,h) = h(lfp(D,h))
+
+\tdx{induct}:        [| a \isasymin lfp(D,h);  bnd_mono(D,h);
+                  !!x. x \isasymin h(Collect(lfp(D,h),P)) ==> P(x)
+               |] ==> P(a)
+
+\tdx{lfp_mono}:      [| bnd_mono(D,h);  bnd_mono(E,i);
+                  !!X. X \isasymsubseteq D ==> h(X) \isasymsubseteq i(X)  
+               |] ==> lfp(D,h) \isasymsubseteq lfp(E,i)
+
+\tdx{gfp_upperbound}: [| A \isasymsubseteq h(A);  A \isasymsubseteq D |] ==> A \isasymsubseteq gfp(D,h)
+
+\tdx{gfp_subset}:    gfp(D,h) \isasymsubseteq D
+
+\tdx{gfp_least}:     [| bnd_mono(D,h);  
+                  !!X. [| X \isasymsubseteq h(X);  X \isasymsubseteq D |] ==> X \isasymsubseteq A
+               |] ==> gfp(D,h) \isasymsubseteq A
+
+\tdx{gfp_Tarski}:    bnd_mono(D,h) ==> gfp(D,h) = h(gfp(D,h))
+
+\tdx{coinduct}:      [| bnd_mono(D,h); a \isasymin X; X \isasymsubseteq h(X \isasymunion gfp(D,h)); X \isasymsubseteq D 
+               |] ==> a \isasymin gfp(D,h)
+
+\tdx{gfp_mono}:      [| bnd_mono(D,h);  D \isasymsubseteq E;
+                  !!X. X \isasymsubseteq D ==> h(X) \isasymsubseteq i(X)  
+               |] ==> gfp(D,h) \isasymsubseteq gfp(E,i)
+\end{alltt*}
+\caption{Least and greatest fixedpoints} \label{zf-fixedpt}
+\end{figure}
+
+
+\subsection{Least and greatest fixedpoints}
+
+The Knaster-Tarski Theorem states that every monotone function over a
+complete lattice has a fixedpoint.  Theory \thydx{Fixedpt} proves the
+Theorem only for a particular lattice, namely the lattice of subsets of a
+set (Fig.\ts\ref{zf-fixedpt}).  The theory defines least and greatest
+fixedpoint operators with corresponding induction and coinduction rules.
+These are essential to many definitions that follow, including the natural
+numbers and the transitive closure operator.  The (co)inductive definition
+package also uses the fixedpoint operators~\cite{paulson-CADE}.  See
+Davey and Priestley~\cite{davey-priestley} for more on the Knaster-Tarski
+Theorem and my paper~\cite{paulson-set-II} for discussion of the Isabelle
+proofs.
+
+Monotonicity properties are proved for most of the set-forming operations:
+union, intersection, Cartesian product, image, domain, range, etc.  These
+are useful for applying the Knaster-Tarski Fixedpoint Theorem.  The proofs
+themselves are trivial applications of Isabelle's classical reasoner. 
+
+
+\subsection{Finite sets and lists}
+
+Theory \texttt{Finite} (Figure~\ref{zf-fin}) defines the finite set operator;
+$\isa{Fin}(A)$ is the set of all finite sets over~$A$.  The theory employs
+Isabelle's inductive definition package, which proves various rules
+automatically.  The induction rule shown is stronger than the one proved by
+the package.  The theory also defines the set of all finite functions
+between two given sets.
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{Fin.emptyI}      0 \isasymin Fin(A)
+\tdx{Fin.consI}       [| a \isasymin A;  b \isasymin Fin(A) |] ==> cons(a,b) \isasymin Fin(A)
+
+\tdx{Fin_induct}
+    [| b \isasymin Fin(A);
+       P(0);
+       !!x y. [| x\isasymin{}A; y\isasymin{}Fin(A); x\isasymnotin{}y; P(y) |] ==> P(cons(x,y))
+    |] ==> P(b)
+
+\tdx{Fin_mono}:       A \isasymsubseteq B ==> Fin(A) \isasymsubseteq Fin(B)
+\tdx{Fin_UnI}:        [| b \isasymin Fin(A);  c \isasymin Fin(A) |] ==> b \isasymunion c \isasymin Fin(A)
+\tdx{Fin_UnionI}:     C \isasymin Fin(Fin(A)) ==> Union(C) \isasymin Fin(A)
+\tdx{Fin_subset}:     [| c \isasymsubseteq b;  b \isasymin Fin(A) |] ==> c \isasymin Fin(A)
+\end{alltt*}
+\caption{The finite set operator} \label{zf-fin}
+\end{figure}
+
+\begin{figure}
+\begin{constants}
+  \it symbol  & \it meta-type & \it priority & \it description \\ 
+  \cdx{list}    & $i\To i$      && lists over some set\\
+  \cdx{list_case} & $[i, [i,i]\To i, i] \To i$  && conditional for $list(A)$ \\
+  \cdx{map}     & $[i\To i, i] \To i$   &       & mapping functional\\
+  \cdx{length}  & $i\To i$              &       & length of a list\\
+  \cdx{rev}     & $i\To i$              &       & reverse of a list\\
+  \tt \at       & $[i,i]\To i$  &  Right 60     & append for lists\\
+  \cdx{flat}    & $i\To i$   &                  & append of list of lists
+\end{constants}
+
+\underscoreon %%because @ is used here
+\begin{alltt*}\isastyleminor
+\tdx{NilI}:       Nil \isasymin list(A)
+\tdx{ConsI}:      [| a \isasymin A;  l \isasymin list(A) |] ==> Cons(a,l) \isasymin list(A)
+
+\tdx{List.induct}
+    [| l \isasymin list(A);
+       P(Nil);
+       !!x y. [| x \isasymin A;  y \isasymin list(A);  P(y) |] ==> P(Cons(x,y))
+    |] ==> P(l)
+
+\tdx{Cons_iff}:       Cons(a,l)=Cons(a',l') <-> a=a' & l=l'
+\tdx{Nil_Cons_iff}:    Nil \isasymnoteq Cons(a,l)
+
+\tdx{list_mono}:      A \isasymsubseteq B ==> list(A) \isasymsubseteq list(B)
+
+\tdx{map_ident}:      l\isasymin{}list(A) ==> map(\%u. u, l) = l
+\tdx{map_compose}:    l\isasymin{}list(A) ==> map(h, map(j,l)) = map(\%u. h(j(u)), l)
+\tdx{map_app_distrib}: xs\isasymin{}list(A) ==> map(h, xs@ys) = map(h,xs)@map(h,ys)
+\tdx{map_type}
+    [| l\isasymin{}list(A); !!x. x\isasymin{}A ==> h(x)\isasymin{}B |] ==> map(h,l)\isasymin{}list(B)
+\tdx{map_flat}
+    ls: list(list(A)) ==> map(h, flat(ls)) = flat(map(map(h),ls))
+\end{alltt*}
+\caption{Lists} \label{zf-list}
+\end{figure}
+
+
+Figure~\ref{zf-list} presents the set of lists over~$A$, $\isa{list}(A)$.  The
+definition employs Isabelle's datatype package, which defines the introduction
+and induction rules automatically, as well as the constructors, case operator
+(\isa{list\_case}) and recursion operator.  The theory then defines the usual
+list functions by primitive recursion.  See theory \texttt{List}.
+
+
+\subsection{Miscellaneous}
+
+\begin{figure}
+\begin{constants} 
+  \it symbol  & \it meta-type & \it priority & \it description \\ 
+  \sdx{O}       & $[i,i]\To i$  &  Right 60     & composition ($\circ$) \\
+  \cdx{id}      & $i\To i$      &       & identity function \\
+  \cdx{inj}     & $[i,i]\To i$  &       & injective function space\\
+  \cdx{surj}    & $[i,i]\To i$  &       & surjective function space\\
+  \cdx{bij}     & $[i,i]\To i$  &       & bijective function space
+\end{constants}
+
+\begin{alltt*}\isastyleminor
+\tdx{comp_def}: r O s     == {\ttlbrace}xz \isasymin domain(s)*range(r) . 
+                        {\isasymexists}x y z. xz=<x,z> & <x,y> \isasymin s & <y,z> \isasymin r{\ttrbrace}
+\tdx{id_def}:   id(A)     == (lam x \isasymin A. x)
+\tdx{inj_def}:  inj(A,B)  == {\ttlbrace} f\isasymin{}A->B. {\isasymforall}w\isasymin{}A. {\isasymforall}x\isasymin{}A. f`w=f`x --> w=x {\ttrbrace}
+\tdx{surj_def}: surj(A,B) == {\ttlbrace} f\isasymin{}A->B . {\isasymforall}y\isasymin{}B. {\isasymexists}x\isasymin{}A. f`x=y {\ttrbrace}
+\tdx{bij_def}:  bij(A,B)  == inj(A,B) \isasyminter surj(A,B)
+
+
+\tdx{left_inverse}:    [| f\isasymin{}inj(A,B);  a\isasymin{}A |] ==> converse(f)`(f`a) = a
+\tdx{right_inverse}:   [| f\isasymin{}inj(A,B);  b\isasymin{}range(f) |] ==> 
+                 f`(converse(f)`b) = b
+
+\tdx{inj_converse_inj}: f\isasymin{}inj(A,B) ==> converse(f) \isasymin inj(range(f),A)
+\tdx{bij_converse_bij}: f\isasymin{}bij(A,B) ==> converse(f) \isasymin bij(B,A)
+
+\tdx{comp_type}:     [| s \isasymsubseteq A*B;  r \isasymsubseteq B*C |] ==> (r O s) \isasymsubseteq A*C
+\tdx{comp_assoc}:    (r O s) O t = r O (s O t)
+
+\tdx{left_comp_id}:  r \isasymsubseteq A*B ==> id(B) O r = r
+\tdx{right_comp_id}: r \isasymsubseteq A*B ==> r O id(A) = r
+
+\tdx{comp_func}:     [| g\isasymin{}A->B; f\isasymin{}B->C |] ==> (f O g) \isasymin A->C
+\tdx{comp_func_apply}: [| g\isasymin{}A->B; f\isasymin{}B->C; a\isasymin{}A |] ==> (f O g)`a = f`(g`a)
+
+\tdx{comp_inj}:      [| g\isasymin{}inj(A,B);  f\isasymin{}inj(B,C)  |] ==> (f O g)\isasymin{}inj(A,C)
+\tdx{comp_surj}:     [| g\isasymin{}surj(A,B); f\isasymin{}surj(B,C) |] ==> (f O g)\isasymin{}surj(A,C)
+\tdx{comp_bij}:      [| g\isasymin{}bij(A,B); f\isasymin{}bij(B,C) |] ==> (f O g)\isasymin{}bij(A,C)
+
+\tdx{left_comp_inverse}:    f\isasymin{}inj(A,B) ==> converse(f) O f = id(A)
+\tdx{right_comp_inverse}:   f\isasymin{}surj(A,B) ==> f O converse(f) = id(B)
+
+\tdx{bij_disjoint_Un}:  
+    [| f\isasymin{}bij(A,B);  g\isasymin{}bij(C,D);  A \isasyminter C = 0;  B \isasyminter D = 0 |] ==> 
+    (f \isasymunion g)\isasymin{}bij(A \isasymunion C, B \isasymunion D)
+
+\tdx{restrict_bij}: [| f\isasymin{}inj(A,B); C\isasymsubseteq{}A |] ==> restrict(f,C)\isasymin{}bij(C, f``C)
+\end{alltt*}
+\caption{Permutations} \label{zf-perm}
+\end{figure}
+
+The theory \thydx{Perm} is concerned with permutations (bijections) and
+related concepts.  These include composition of relations, the identity
+relation, and three specialized function spaces: injective, surjective and
+bijective.  Figure~\ref{zf-perm} displays many of their properties that
+have been proved.  These results are fundamental to a treatment of
+equipollence and cardinality.
+
+Theory \thydx{Univ} defines a `universe' $\isa{univ}(A)$, which is used by
+the datatype package.  This set contains $A$ and the
+natural numbers.  Vitally, it is closed under finite products: 
+$\isa{univ}(A)\times\isa{univ}(A)\subseteq\isa{univ}(A)$.  This theory also
+defines the cumulative hierarchy of axiomatic set theory, which
+traditionally is written $V@\alpha$ for an ordinal~$\alpha$.  The
+`universe' is a simple generalization of~$V@\omega$.
+
+Theory \thydx{QUniv} defines a `universe' $\isa{quniv}(A)$, which is used by
+the datatype package to construct codatatypes such as streams.  It is
+analogous to $\isa{univ}(A)$ (and is defined in terms of it) but is closed
+under the non-standard product and sum.
+
+
+\section{Automatic Tools}
+
+ZF provides the simplifier and the classical reasoner.  Moreover it supplies a
+specialized tool to infer `types' of terms.
+
+\subsection{Simplification and Classical Reasoning}
+
+ZF inherits simplification from FOL but adopts it for set theory.  The
+extraction of rewrite rules takes the ZF primitives into account.  It can
+strip bounded universal quantifiers from a formula; for example, ${\forall
+  x\in A. f(x)=g(x)}$ yields the conditional rewrite rule $x\in A \Imp
+f(x)=g(x)$.  Given $a\in\{x\in A. P(x)\}$ it extracts rewrite rules from $a\in
+A$ and~$P(a)$.  It can also break down $a\in A\int B$ and $a\in A-B$.
+
+The default simpset used by \isa{simp} contains congruence rules for all of ZF's
+binding operators.  It contains all the conversion rules, such as
+\isa{fst} and
+\isa{snd}, as well as the rewrites shown in Fig.\ts\ref{zf-simpdata}.
+
+Classical reasoner methods such as \isa{blast} and \isa{auto} refer to
+a rich collection of built-in axioms for all the set-theoretic
+primitives.
+
+
+\begin{figure}
+\begin{eqnarray*}
+  a\in \emptyset        & \bimp &  \bot\\
+  a \in A \un B      & \bimp &  a\in A \disj a\in B\\
+  a \in A \int B      & \bimp &  a\in A \conj a\in B\\
+  a \in A-B             & \bimp &  a\in A \conj \lnot (a\in B)\\
+  \pair{a,b}\in \isa{Sigma}(A,B)
+                        & \bimp &  a\in A \conj b\in B(a)\\
+  a \in \isa{Collect}(A,P)      & \bimp &  a\in A \conj P(a)\\
+  (\forall x \in \emptyset. P(x)) & \bimp &  \top\\
+  (\forall x \in A. \top)       & \bimp &  \top
+\end{eqnarray*}
+\caption{Some rewrite rules for set theory} \label{zf-simpdata}
+\end{figure}
+
+
+\subsection{Type-Checking Tactics}
+\index{type-checking tactics}
+
+Isabelle/ZF provides simple tactics to help automate those proofs that are
+essentially type-checking.  Such proofs are built by applying rules such as
+these:
+\begin{ttbox}\isastyleminor
+[| ?P ==> ?a \isasymin ?A; ~?P ==> ?b \isasymin ?A |] 
+==> (if ?P then ?a else ?b) \isasymin ?A
+
+[| ?m \isasymin nat; ?n \isasymin nat |] ==> ?m #+ ?n \isasymin nat
+
+?a \isasymin ?A ==> Inl(?a) \isasymin ?A + ?B  
+\end{ttbox}
+In typical applications, the goal has the form $t\in\Var{A}$: in other words,
+we have a specific term~$t$ and need to infer its `type' by instantiating the
+set variable~$\Var{A}$.  Neither the simplifier nor the classical reasoner
+does this job well.  The if-then-else rule, and many similar ones, can make
+the classical reasoner loop.  The simplifier refuses (on principle) to
+instantiate variables during rewriting, so goals such as \isa{i\#+j \isasymin \ ?A}
+are left unsolved.
+
+The simplifier calls the type-checker to solve rewritten subgoals: this stage
+can indeed instantiate variables.  If you have defined new constants and
+proved type-checking rules for them, then declare the rules using
+the attribute \isa{TC} and the rest should be automatic.  In
+particular, the simplifier will use type-checking to help satisfy
+conditional rewrite rules. Call the method \ttindex{typecheck} to
+break down all subgoals using type-checking rules. You can add new
+type-checking rules temporarily like this:
+\begin{isabelle}
+\isacommand{apply}\ (typecheck add:\ inj_is_fun)
+\end{isabelle}
+
+
+%Though the easiest way to invoke the type-checker is via the simplifier,
+%specialized applications may require more detailed knowledge of
+%the type-checking primitives.  They are modelled on the simplifier's:
+%\begin{ttdescription}
+%\item[\ttindexbold{tcset}] is the type of tcsets: sets of type-checking rules.
+%
+%\item[\ttindexbold{addTCs}] is an infix operator to add type-checking rules to
+%  a tcset.
+%  
+%\item[\ttindexbold{delTCs}] is an infix operator to remove type-checking rules
+%  from a tcset.
+%
+%\item[\ttindexbold{typecheck_tac}] is a tactic for attempting to prove all
+%  subgoals using the rules given in its argument, a tcset.
+%\end{ttdescription}
+%
+%Tcsets, like simpsets, are associated with theories and are merged when
+%theories are merged.  There are further primitives that use the default tcset.
+%\begin{ttdescription}
+%\item[\ttindexbold{tcset}] is a function to return the default tcset; use the
+%  expression \isa{tcset()}.
+%
+%\item[\ttindexbold{AddTCs}] adds type-checking rules to the default tcset.
+%  
+%\item[\ttindexbold{DelTCs}] removes type-checking rules from the default
+%  tcset.
+%
+%\item[\ttindexbold{Typecheck_tac}] calls \isa{typecheck_tac} using the
+%  default tcset.
+%\end{ttdescription}
+%
+%To supply some type-checking rules temporarily, using \isa{Addrules} and
+%later \isa{Delrules} is the simplest way.  There is also a high-tech
+%approach.  Call the simplifier with a new solver expressed using
+%\ttindexbold{type_solver_tac} and your temporary type-checking rules.
+%\begin{ttbox}\isastyleminor
+%by (asm_simp_tac 
+%     (simpset() setSolver type_solver_tac (tcset() addTCs prems)) 2);
+%\end{ttbox}
+
+
+\section{Natural number and integer arithmetic}
+
+\index{arithmetic|(}
+
+\begin{figure}\small
+\index{#*@{\tt\#*} symbol}
+\index{*div symbol}
+\index{*mod symbol}
+\index{#+@{\tt\#+} symbol}
+\index{#-@{\tt\#-} symbol}
+\begin{constants}
+  \it symbol  & \it meta-type & \it priority & \it description \\ 
+  \cdx{nat}     & $i$                   &       & set of natural numbers \\
+  \cdx{nat_case}& $[i,i\To i,i]\To i$     &     & conditional for $nat$\\
+  \tt \#*       & $[i,i]\To i$  &  Left 70      & multiplication \\
+  \tt div       & $[i,i]\To i$  &  Left 70      & division\\
+  \tt mod       & $[i,i]\To i$  &  Left 70      & modulus\\
+  \tt \#+       & $[i,i]\To i$  &  Left 65      & addition\\
+  \tt \#-       & $[i,i]\To i$  &  Left 65      & subtraction
+\end{constants}
+
+\begin{alltt*}\isastyleminor
+\tdx{nat_def}: nat == lfp(lam r \isasymin Pow(Inf). {\ttlbrace}0{\ttrbrace} \isasymunion {\ttlbrace}succ(x). x \isasymin r{\ttrbrace}
+
+\tdx{nat_case_def}:  nat_case(a,b,k) == 
+              THE y. k=0 & y=a | ({\isasymexists}x. k=succ(x) & y=b(x))
+
+\tdx{nat_0I}:           0 \isasymin nat
+\tdx{nat_succI}:        n \isasymin nat ==> succ(n) \isasymin nat
+
+\tdx{nat_induct}:        
+    [| n \isasymin nat;  P(0);  !!x. [| x \isasymin nat;  P(x) |] ==> P(succ(x)) 
+    |] ==> P(n)
+
+\tdx{nat_case_0}:       nat_case(a,b,0) = a
+\tdx{nat_case_succ}:    nat_case(a,b,succ(m)) = b(m)
+
+\tdx{add_0_natify}:     0 #+ n = natify(n)
+\tdx{add_succ}:         succ(m) #+ n = succ(m #+ n)
+
+\tdx{mult_type}:        m #* n \isasymin nat
+\tdx{mult_0}:           0 #* n = 0
+\tdx{mult_succ}:        succ(m) #* n = n #+ (m #* n)
+\tdx{mult_commute}:     m #* n = n #* m
+\tdx{add_mult_dist}:    (m #+ n) #* k = (m #* k) #+ (n #* k)
+\tdx{mult_assoc}:       (m #* n) #* k = m #* (n #* k)
+\tdx{mod_div_equality}: m \isasymin nat ==> (m div n)#*n #+ m mod n = m
+\end{alltt*}
+\caption{The natural numbers} \label{zf-nat}
+\end{figure}
+
+\index{natural numbers}
+
+Theory \thydx{Nat} defines the natural numbers and mathematical
+induction, along with a case analysis operator.  The set of natural
+numbers, here called \isa{nat}, is known in set theory as the ordinal~$\omega$.
+
+Theory \thydx{Arith} develops arithmetic on the natural numbers
+(Fig.\ts\ref{zf-nat}).  Addition, multiplication and subtraction are defined
+by primitive recursion.  Division and remainder are defined by repeated
+subtraction, which requires well-founded recursion; the termination argument
+relies on the divisor's being non-zero.  Many properties are proved:
+commutative, associative and distributive laws, identity and cancellation
+laws, etc.  The most interesting result is perhaps the theorem $a \bmod b +
+(a/b)\times b = a$.
+
+To minimize the need for tedious proofs of $t\in\isa{nat}$, the arithmetic
+operators coerce their arguments to be natural numbers.  The function
+\cdx{natify} is defined such that $\isa{natify}(n) = n$ if $n$ is a natural
+number, $\isa{natify}(\isa{succ}(x)) =
+\isa{succ}(\isa{natify}(x))$ for all $x$, and finally
+$\isa{natify}(x)=0$ in all other cases.  The benefit is that the addition,
+subtraction, multiplication, division and remainder operators always return
+natural numbers, regardless of their arguments.  Algebraic laws (commutative,
+associative, distributive) are unconditional.  Occurrences of \isa{natify}
+as operands of those operators are simplified away.  Any remaining occurrences
+can either be tolerated or else eliminated by proving that the argument is a
+natural number.
+
+The simplifier automatically cancels common terms on the opposite sides of
+subtraction and of relations ($=$, $<$ and $\le$).  Here is an example:
+\begin{isabelle}
+ 1. i \#+ j \#+ k \#- j < k \#+ l\isanewline
+\isacommand{apply}\ simp\isanewline
+ 1. natify(i) < natify(l)
+\end{isabelle}
+Given the assumptions \isa{i \isasymin nat} and \isa{l \isasymin nat}, both occurrences of
+\cdx{natify} would be simplified away.
+
+
+\begin{figure}\small
+\index{$*@{\tt\$*} symbol}
+\index{$+@{\tt\$+} symbol}
+\index{$-@{\tt\$-} symbol}
+\begin{constants}
+  \it symbol  & \it meta-type & \it priority & \it description \\ 
+  \cdx{int}     & $i$                   &       & set of integers \\
+  \tt \$*       & $[i,i]\To i$  &  Left 70      & multiplication \\
+  \tt \$+       & $[i,i]\To i$  &  Left 65      & addition\\
+  \tt \$-       & $[i,i]\To i$  &  Left 65      & subtraction\\
+  \tt \$<       & $[i,i]\To o$  &  Left 50      & $<$ on integers\\
+  \tt \$<=      & $[i,i]\To o$  &  Left 50      & $\le$ on integers
+\end{constants}
+
+\begin{alltt*}\isastyleminor
+\tdx{zadd_0_intify}:    0 $+ n = intify(n)
+
+\tdx{zmult_type}:       m $* n \isasymin int
+\tdx{zmult_0}:          0 $* n = 0
+\tdx{zmult_commute}:    m $* n = n $* m
+\tdx{zadd_zmult_dist}:   (m $+ n) $* k = (m $* k) $+ (n $* k)
+\tdx{zmult_assoc}:      (m $* n) $* k = m $* (n $* k)
+\end{alltt*}
+\caption{The integers} \label{zf-int}
+\end{figure}
+
+
+\index{integers}
+
+Theory \thydx{Int} defines the integers, as equivalence classes of natural
+numbers.   Figure~\ref{zf-int} presents a tidy collection of laws.  In
+fact, a large library of facts is proved, including monotonicity laws for
+addition and multiplication, covering both positive and negative operands.  
+
+As with the natural numbers, the need for typing proofs is minimized.  All the
+operators defined in Fig.\ts\ref{zf-int} coerce their operands to integers by
+applying the function \cdx{intify}.  This function is the identity on integers
+and maps other operands to zero.
+
+Decimal notation is provided for the integers.  Numbers, written as
+\isa{\#$nnn$} or \isa{\#-$nnn$}, are represented internally in
+two's-complement binary.  Expressions involving addition, subtraction and
+multiplication of numeral constants are evaluated (with acceptable efficiency)
+by simplification.  The simplifier also collects similar terms, multiplying
+them by a numerical coefficient.  It also cancels occurrences of the same
+terms on the other side of the relational operators.  Example:
+\begin{isabelle}
+ 1. y \$+ z \$+ \#-3 \$* x \$+ y \$<=  x \$* \#2 \$+
+z\isanewline
+\isacommand{apply}\ simp\isanewline
+ 1. \#2 \$* y \$<= \#5 \$* x
+\end{isabelle}
+For more information on the integers, please see the theories on directory
+\texttt{ZF/Integ}. 
+
+\index{arithmetic|)}
+
+
+\section{Datatype definitions}
+\label{sec:ZF:datatype}
+\index{*datatype|(}
+
+The \ttindex{datatype} definition package of ZF constructs inductive datatypes
+similar to \ML's.  It can also construct coinductive datatypes
+(codatatypes), which are non-well-founded structures such as streams.  It
+defines the set using a fixed-point construction and proves induction rules,
+as well as theorems for recursion and case combinators.  It supplies
+mechanisms for reasoning about freeness.  The datatype package can handle both
+mutual and indirect recursion.
+
+
+\subsection{Basics}
+\label{subsec:datatype:basics}
+
+A \isa{datatype} definition has the following form:
+\[
+\begin{array}{llcl}
+\mathtt{datatype} & t@1(A@1,\ldots,A@h) & = &
+  constructor^1@1 ~\mid~ \ldots ~\mid~ constructor^1@{k@1} \\
+ & & \vdots \\
+\mathtt{and} & t@n(A@1,\ldots,A@h) & = &
+  constructor^n@1~ ~\mid~ \ldots ~\mid~ constructor^n@{k@n}
+\end{array}
+\]
+Here $t@1$, \ldots,~$t@n$ are identifiers and $A@1$, \ldots,~$A@h$ are
+variables: the datatype's parameters.  Each constructor specification has the
+form \dquotesoff
+\[ C \hbox{\tt~( } \hbox{\tt"} x@1 \hbox{\tt:} T@1 \hbox{\tt"},\;
+                   \ldots,\;
+                   \hbox{\tt"} x@m \hbox{\tt:} T@m \hbox{\tt"}
+     \hbox{\tt~)}
+\]
+Here $C$ is the constructor name, and variables $x@1$, \ldots,~$x@m$ are the
+constructor arguments, belonging to the sets $T@1$, \ldots, $T@m$,
+respectively.  Typically each $T@j$ is either a constant set, a datatype
+parameter (one of $A@1$, \ldots, $A@h$) or a recursive occurrence of one of
+the datatypes, say $t@i(A@1,\ldots,A@h)$.  More complex possibilities exist,
+but they are much harder to realize.  Often, additional information must be
+supplied in the form of theorems.
+
+A datatype can occur recursively as the argument of some function~$F$.  This
+is called a {\em nested} (or \emph{indirect}) occurrence.  It is only allowed
+if the datatype package is given a theorem asserting that $F$ is monotonic.
+If the datatype has indirect occurrences, then Isabelle/ZF does not support
+recursive function definitions.
+
+A simple example of a datatype is \isa{list}, which is built-in, and is
+defined by
+\begin{alltt*}\isastyleminor
+consts     list :: "i=>i"
+datatype  "list(A)" = Nil | Cons ("a \isasymin A", "l \isasymin list(A)")
+\end{alltt*}
+Note that the datatype operator must be declared as a constant first.
+However, the package declares the constructors.  Here, \isa{Nil} gets type
+$i$ and \isa{Cons} gets type $[i,i]\To i$.
+
+Trees and forests can be modelled by the mutually recursive datatype
+definition
+\begin{alltt*}\isastyleminor
+consts   
+  tree :: "i=>i"
+  forest :: "i=>i"
+  tree_forest :: "i=>i"
+datatype  "tree(A)"   = Tcons ("a{\isasymin}A",  "f{\isasymin}forest(A)")
+and "forest(A)" = Fnil | Fcons ("t{\isasymin}tree(A)",  "f{\isasymin}forest(A)")
+\end{alltt*}
+Here $\isa{tree}(A)$ is the set of trees over $A$, $\isa{forest}(A)$ is
+the set of forests over $A$, and  $\isa{tree_forest}(A)$ is the union of
+the previous two sets.  All three operators must be declared first.
+
+The datatype \isa{term}, which is defined by
+\begin{alltt*}\isastyleminor
+consts     term :: "i=>i"
+datatype  "term(A)" = Apply ("a \isasymin A", "l \isasymin list(term(A))")
+  monos list_mono
+  type_elims list_univ [THEN subsetD, elim_format]
+\end{alltt*}
+is an example of nested recursion.  (The theorem \isa{list_mono} is proved
+in theory \isa{List}, and the \isa{term} example is developed in
+theory
+\thydx{Induct/Term}.)
+
+\subsubsection{Freeness of the constructors}
+
+Constructors satisfy {\em freeness} properties.  Constructions are distinct,
+for example $\isa{Nil}\not=\isa{Cons}(a,l)$, and they are injective, for
+example $\isa{Cons}(a,l)=\isa{Cons}(a',l') \bimp a=a' \conj l=l'$.
+Because the number of freeness is quadratic in the number of constructors, the
+datatype package does not prove them.  Instead, it ensures that simplification
+will prove them dynamically: when the simplifier encounters a formula
+asserting the equality of two datatype constructors, it performs freeness
+reasoning.  
+
+Freeness reasoning can also be done using the classical reasoner, but it is
+more complicated.  You have to add some safe elimination rules rules to the
+claset.  For the \isa{list} datatype, they are called
+\isa{list.free_elims}.  Occasionally this exposes the underlying
+representation of some constructor, which can be rectified using the command
+\isa{unfold list.con_defs [symmetric]}.
+
+
+\subsubsection{Structural induction}
+
+The datatype package also provides structural induction rules.  For datatypes
+without mutual or nested recursion, the rule has the form exemplified by
+\isa{list.induct} in Fig.\ts\ref{zf-list}.  For mutually recursive
+datatypes, the induction rule is supplied in two forms.  Consider datatype
+\isa{TF}.  The rule \isa{tree_forest.induct} performs induction over a
+single predicate~\isa{P}, which is presumed to be defined for both trees
+and forests:
+\begin{alltt*}\isastyleminor
+[| x \isasymin tree_forest(A);
+   !!a f. [| a \isasymin A; f \isasymin forest(A); P(f) |] ==> P(Tcons(a, f)); 
+   P(Fnil);
+   !!f t. [| t \isasymin tree(A); P(t); f \isasymin forest(A); P(f) |]
+          ==> P(Fcons(t, f)) 
+|] ==> P(x)
+\end{alltt*}
+The rule \isa{tree_forest.mutual_induct} performs induction over two
+distinct predicates, \isa{P_tree} and \isa{P_forest}.
+\begin{alltt*}\isastyleminor
+[| !!a f.
+      [| a{\isasymin}A; f{\isasymin}forest(A); P_forest(f) |] ==> P_tree(Tcons(a,f));
+   P_forest(Fnil);
+   !!f t. [| t{\isasymin}tree(A); P_tree(t); f{\isasymin}forest(A); P_forest(f) |]
+          ==> P_forest(Fcons(t, f)) 
+|] ==> ({\isasymforall}za. za \isasymin tree(A) --> P_tree(za)) &
+    ({\isasymforall}za. za \isasymin forest(A) --> P_forest(za))
+\end{alltt*}
+
+For datatypes with nested recursion, such as the \isa{term} example from
+above, things are a bit more complicated.  The rule \isa{term.induct}
+refers to the monotonic operator, \isa{list}:
+\begin{alltt*}\isastyleminor
+[| x \isasymin term(A);
+   !!a l. [| a\isasymin{}A; l\isasymin{}list(Collect(term(A), P)) |] ==> P(Apply(a,l)) 
+|] ==> P(x)
+\end{alltt*}
+The theory \isa{Induct/Term.thy} derives two higher-level induction rules,
+one of which is particularly useful for proving equations:
+\begin{alltt*}\isastyleminor
+[| t \isasymin term(A);
+   !!x zs. [| x \isasymin A; zs \isasymin list(term(A)); map(f, zs) = map(g, zs) |]
+           ==> f(Apply(x, zs)) = g(Apply(x, zs)) 
+|] ==> f(t) = g(t)  
+\end{alltt*}
+How this can be generalized to other nested datatypes is a matter for future
+research.
+
+
+\subsubsection{The \isa{case} operator}
+
+The package defines an operator for performing case analysis over the
+datatype.  For \isa{list}, it is called \isa{list_case} and satisfies
+the equations
+\begin{ttbox}\isastyleminor
+list_case(f_Nil, f_Cons, []) = f_Nil
+list_case(f_Nil, f_Cons, Cons(a, l)) = f_Cons(a, l)
+\end{ttbox}
+Here \isa{f_Nil} is the value to return if the argument is \isa{Nil} and
+\isa{f_Cons} is a function that computes the value to return if the
+argument has the form $\isa{Cons}(a,l)$.  The function can be expressed as
+an abstraction, over patterns if desired (\S\ref{sec:pairs}).
+
+For mutually recursive datatypes, there is a single \isa{case} operator.
+In the tree/forest example, the constant \isa{tree_forest_case} handles all
+of the constructors of the two datatypes.
+
+
+\subsection{Defining datatypes}
+
+The theory syntax for datatype definitions is shown in the
+Isabelle/Isar reference manual.  In order to be well-formed, a
+datatype definition has to obey the rules stated in the previous
+section.  As a result the theory is extended with the new types, the
+constructors, and the theorems listed in the previous section.
+
+Codatatypes are declared like datatypes and are identical to them in every
+respect except that they have a coinduction rule instead of an induction rule.
+Note that while an induction rule has the effect of limiting the values
+contained in the set, a coinduction rule gives a way of constructing new
+values of the set.
+
+Most of the theorems about datatypes become part of the default simpset.  You
+never need to see them again because the simplifier applies them
+automatically.  
+
+\subsubsection{Specialized methods for datatypes}
+
+Induction and case-analysis can be invoked using these special-purpose
+methods:
+\begin{ttdescription}
+\item[\methdx{induct_tac} $x$] applies structural
+  induction on variable $x$ to subgoal~1, provided the type of $x$ is a
+  datatype.  The induction variable should not occur among other assumptions
+  of the subgoal.
+\end{ttdescription}
+% 
+% we also have the ind_cases method, but what does it do?
+In some situations, induction is overkill and a case distinction over all
+constructors of the datatype suffices.
+\begin{ttdescription}
+\item[\methdx{case_tac} $x$]
+ performs a case analysis for the variable~$x$.
+\end{ttdescription}
+
+Both tactics can only be applied to a variable, whose typing must be given in
+some assumption, for example the assumption \isa{x \isasymin \ list(A)}.  The tactics
+also work for the natural numbers (\isa{nat}) and disjoint sums, although
+these sets were not defined using the datatype package.  (Disjoint sums are
+not recursive, so only \isa{case_tac} is available.)
+
+Structured Isar methods are also available. Below, $t$ 
+stands for the name of the datatype.
+\begin{ttdescription}
+\item[\methdx{induct} \isa{set:}\ $t$] is the Isar induction tactic.
+\item[\methdx{cases} \isa{set:}\ $t$] is the Isar case-analysis tactic.
+\end{ttdescription}
+
+
+\subsubsection{The theorems proved by a datatype declaration}
+
+Here are some more details for the technically minded.  Processing the
+datatype declaration of a set~$t$ produces a name space~$t$ containing
+the following theorems:
+\begin{ttbox}\isastyleminor
+intros          \textrm{the introduction rules}
+cases           \textrm{the case analysis rule}
+induct          \textrm{the standard induction rule}
+mutual_induct   \textrm{the mutual induction rule, if needed}
+case_eqns       \textrm{equations for the case operator}
+recursor_eqns   \textrm{equations for the recursor}
+simps           \textrm{the union of} case_eqns \textrm{and} recursor_eqns
+con_defs        \textrm{definitions of the case operator and constructors}
+free_iffs       \textrm{logical equivalences for proving freeness}
+free_elims      \textrm{elimination rules for proving freeness}
+defs            \textrm{datatype definition(s)}
+\end{ttbox}
+Furthermore there is the theorem $C$ for every constructor~$C$; for
+example, the \isa{list} datatype's introduction rules are bound to the
+identifiers \isa{Nil} and \isa{Cons}.
+
+For a codatatype, the component \isa{coinduct} is the coinduction rule,
+replacing the \isa{induct} component.
+
+See the theories \isa{Induct/Ntree} and \isa{Induct/Brouwer} for examples of
+infinitely branching datatypes.  See theory \isa{Induct/LList} for an example
+of a codatatype.  Some of these theories illustrate the use of additional,
+undocumented features of the datatype package.  Datatype definitions are
+reduced to inductive definitions, and the advanced features should be
+understood in that light.
+
+
+\subsection{Examples}
+
+\subsubsection{The datatype of binary trees}
+
+Let us define the set $\isa{bt}(A)$ of binary trees over~$A$.  The theory
+must contain these lines:
+\begin{alltt*}\isastyleminor
+consts   bt :: "i=>i"
+datatype "bt(A)" = Lf | Br ("a\isasymin{}A", "t1\isasymin{}bt(A)", "t2\isasymin{}bt(A)")
+\end{alltt*}
+After loading the theory, we can prove some theorem.  
+We begin by declaring the constructor's typechecking rules
+as simplification rules:
+\begin{isabelle}
+\isacommand{declare}\ bt.intros\ [simp]%
+\end{isabelle}
+
+Our first example is the theorem that no tree equals its
+left branch.  To make the inductive hypothesis strong enough, 
+the proof requires a quantified induction formula, but 
+the \isa{rule\_format} attribute will remove the quantifiers 
+before the theorem is stored.
+\begin{isabelle}
+\isacommand{lemma}\ Br\_neq\_left\ [rule\_format]:\ "l\isasymin bt(A)\ ==>\ \isasymforall x\ r.\ Br(x,l,r)\isasymnoteq{}l"\isanewline
+\ 1.\ l\ \isasymin \ bt(A)\ \isasymLongrightarrow \ \isasymforall x\ r.\ Br(x,\ l,\ r)\ \isasymnoteq \ l%
+\end{isabelle}
+This can be proved by the structural induction tactic:
+\begin{isabelle}
+\ \ \isacommand{apply}\ (induct\_tac\ l)\isanewline
+\ 1.\ \isasymforall x\ r.\ Br(x,\ Lf,\ r)\ \isasymnoteq \ Lf\isanewline
+\ 2.\ \isasymAnd a\ t1\ t2.\isanewline
+\isaindent{\ 2.\ \ \ \ }\isasymlbrakk a\ \isasymin \ A;\ t1\ \isasymin \ bt(A);\ \isasymforall x\ r.\ Br(x,\ t1,\ r)\ \isasymnoteq \ t1;\ t2\ \isasymin \ bt(A);\isanewline
+\isaindent{\ 2.\ \ \ \ \ \ \ }\isasymforall x\ r.\ Br(x,\ t2,\ r)\ \isasymnoteq \ t2\isasymrbrakk \isanewline
+\isaindent{\ 2.\ \ \ \ }\isasymLongrightarrow \ \isasymforall x\ r.\ Br(x,\ Br(a,\ t1,\ t2),\ r)\ \isasymnoteq \ Br(a,\ t1,\ t2)
+\end{isabelle}
+Both subgoals are proved using \isa{auto}, which performs the necessary
+freeness reasoning. 
+\begin{isabelle}
+\ \ \isacommand{apply}\ auto\isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+
+An alternative proof uses Isar's fancy \isa{induct} method, which 
+automatically quantifies over all free variables:
+
+\begin{isabelle}
+\isacommand{lemma}\ Br\_neq\_left':\ "l\ \isasymin \ bt(A)\ ==>\ (!!x\ r.\ Br(x,\ l,\ r)\ \isasymnoteq \ l)"\isanewline
+\ \ \isacommand{apply}\ (induct\ set:\ bt)\isanewline
+\ 1.\ \isasymAnd x\ r.\ Br(x,\ Lf,\ r)\ \isasymnoteq \ Lf\isanewline
+\ 2.\ \isasymAnd a\ t1\ t2\ x\ r.\isanewline
+\isaindent{\ 2.\ \ \ \ }\isasymlbrakk a\ \isasymin \ A;\ t1\ \isasymin \ bt(A);\ \isasymAnd x\ r.\ Br(x,\ t1,\ r)\ \isasymnoteq \ t1;\ t2\ \isasymin \ bt(A);\isanewline
+\isaindent{\ 2.\ \ \ \ \ \ \ }\isasymAnd x\ r.\ Br(x,\ t2,\ r)\ \isasymnoteq \ t2\isasymrbrakk \isanewline
+\isaindent{\ 2.\ \ \ \ }\isasymLongrightarrow \ Br(x,\ Br(a,\ t1,\ t2),\ r)\ \isasymnoteq \ Br(a,\ t1,\ t2)
+\end{isabelle}
+Compare the form of the induction hypotheses with the corresponding ones in
+the previous proof. As before, to conclude requires only \isa{auto}.
+
+When there are only a few constructors, we might prefer to prove the freenness
+theorems for each constructor.  This is simple:
+\begin{isabelle}
+\isacommand{lemma}\ Br\_iff:\ "Br(a,l,r)\ =\ Br(a',l',r')\ <->\ a=a'\ \&\ l=l'\ \&\ r=r'"\isanewline
+\ \ \isacommand{by}\ (blast\ elim!:\ bt.free\_elims)
+\end{isabelle}
+Here we see a demonstration of freeness reasoning using
+\isa{bt.free\_elims}, but simpler still is just to apply \isa{auto}.
+
+An \ttindex{inductive\_cases} declaration generates instances of the
+case analysis rule that have been simplified  using freeness
+reasoning. 
+\begin{isabelle}
+\isacommand{inductive\_cases}\ Br\_in\_bt:\ "Br(a,\ l,\ r)\ \isasymin \ bt(A)"
+\end{isabelle}
+The theorem just created is 
+\begin{isabelle}
+\isasymlbrakk Br(a,\ l,\ r)\ \isasymin \ bt(A);\ \isasymlbrakk a\ \isasymin \ A;\ l\ \isasymin \ bt(A);\ r\ \isasymin \ bt(A)\isasymrbrakk \ \isasymLongrightarrow \ Q\isasymrbrakk \ \isasymLongrightarrow \ Q.
+\end{isabelle}
+It is an elimination rule that from $\isa{Br}(a,l,r)\in\isa{bt}(A)$
+lets us infer $a\in A$, $l\in\isa{bt}(A)$ and
+$r\in\isa{bt}(A)$.
+
+
+\subsubsection{Mixfix syntax in datatypes}
+
+Mixfix syntax is sometimes convenient.  The theory \isa{Induct/PropLog} makes a
+deep embedding of propositional logic:
+\begin{alltt*}\isastyleminor
+consts     prop :: i
+datatype  "prop" = Fls
+                 | Var ("n \isasymin nat")                ("#_" [100] 100)
+                 | "=>" ("p \isasymin prop", "q \isasymin prop")   (infixr 90)
+\end{alltt*}
+The second constructor has a special $\#n$ syntax, while the third constructor
+is an infixed arrow.
+
+
+\subsubsection{A giant enumeration type}
+
+This example shows a datatype that consists of 60 constructors:
+\begin{alltt*}\isastyleminor
+consts  enum :: i
+datatype
+  "enum" = C00 | C01 | C02 | C03 | C04 | C05 | C06 | C07 | C08 | C09
+         | C10 | C11 | C12 | C13 | C14 | C15 | C16 | C17 | C18 | C19
+         | C20 | C21 | C22 | C23 | C24 | C25 | C26 | C27 | C28 | C29
+         | C30 | C31 | C32 | C33 | C34 | C35 | C36 | C37 | C38 | C39
+         | C40 | C41 | C42 | C43 | C44 | C45 | C46 | C47 | C48 | C49
+         | C50 | C51 | C52 | C53 | C54 | C55 | C56 | C57 | C58 | C59
+end
+\end{alltt*}
+The datatype package scales well.  Even though all properties are proved
+rather than assumed, full processing of this definition takes around two seconds
+(on a 1.8GHz machine).  The constructors have a balanced representation,
+related to binary notation, so freeness properties can be proved fast.
+\begin{isabelle}
+\isacommand{lemma}\ "C00 \isasymnoteq\ C01"\isanewline
+\ \ \isacommand{by}\ simp
+\end{isabelle}
+You need not derive such inequalities explicitly.  The simplifier will
+dispose of them automatically.
+
+\index{*datatype|)}
+
+
+\subsection{Recursive function definitions}\label{sec:ZF:recursive}
+\index{recursive functions|see{recursion}}
+\index{*primrec|(}
+\index{recursion!primitive|(}
+
+Datatypes come with a uniform way of defining functions, {\bf primitive
+  recursion}.  Such definitions rely on the recursion operator defined by the
+datatype package.  Isabelle proves the desired recursion equations as
+theorems.
+
+In principle, one could introduce primitive recursive functions by asserting
+their reduction rules as axioms.  Here is a dangerous way of defining a
+recursive function over binary trees:
+\begin{isabelle}
+\isacommand{consts}\ \ n\_nodes\ ::\ "i\ =>\ i"\isanewline
+\isacommand{axioms}\isanewline
+\ \ n\_nodes\_Lf:\ "n\_nodes(Lf)\ =\ 0"\isanewline
+\ \ n\_nodes\_Br:\ "n\_nodes(Br(a,l,r))\ =\ succ(n\_nodes(l)\ \#+\ n\_nodes(r))"
+\end{isabelle}
+Asserting axioms brings the danger of accidentally introducing
+contradictions.  It should be avoided whenever possible.
+
+The \ttindex{primrec} declaration is a safe means of defining primitive
+recursive functions on datatypes:
+\begin{isabelle}
+\isacommand{consts}\ \ n\_nodes\ ::\ "i\ =>\ i"\isanewline
+\isacommand{primrec}\isanewline
+\ \ "n\_nodes(Lf)\ =\ 0"\isanewline
+\ \ "n\_nodes(Br(a,\ l,\ r))\ =\ succ(n\_nodes(l)\ \#+\ n\_nodes(r))"
+\end{isabelle}
+Isabelle will now derive the two equations from a low-level definition  
+based upon well-founded recursion.  If they do not define a legitimate
+recursion, then Isabelle will reject the declaration.
+
+
+\subsubsection{Syntax of recursive definitions}
+
+The general form of a primitive recursive definition is
+\begin{ttbox}\isastyleminor
+primrec
+    {\it reduction rules}
+\end{ttbox}
+where \textit{reduction rules} specify one or more equations of the form
+\[ f \, x@1 \, \dots \, x@m \, (C \, y@1 \, \dots \, y@k) \, z@1 \,
+\dots \, z@n = r \] such that $C$ is a constructor of the datatype, $r$
+contains only the free variables on the left-hand side, and all recursive
+calls in $r$ are of the form $f \, \dots \, y@i \, \dots$ for some $i$.  
+There must be at most one reduction rule for each constructor.  The order is
+immaterial.  For missing constructors, the function is defined to return zero.
+
+All reduction rules are added to the default simpset.
+If you would like to refer to some rule by name, then you must prefix
+the rule with an identifier.  These identifiers, like those in the
+\isa{rules} section of a theory, will be visible in proof scripts.
+
+The reduction rules become part of the default simpset, which
+leads to short proof scripts:
+\begin{isabelle}
+\isacommand{lemma}\ n\_nodes\_type\ [simp]:\ "t\ \isasymin \ bt(A)\ ==>\ n\_nodes(t)\ \isasymin \ nat"\isanewline
+\ \ \isacommand{by}\ (induct\_tac\ t,\ auto)
+\end{isabelle}
+
+You can even use the \isa{primrec} form with non-recursive datatypes and
+with codatatypes.  Recursion is not allowed, but it provides a convenient
+syntax for defining functions by cases.
+
+
+\subsubsection{Example: varying arguments}
+
+All arguments, other than the recursive one, must be the same in each equation
+and in each recursive call.  To get around this restriction, use explict
+$\lambda$-abstraction and function application.  For example, let us
+define the tail-recursive version of \isa{n\_nodes}, using an 
+accumulating argument for the counter.  The second argument, $k$, varies in
+recursive calls.
+\begin{isabelle}
+\isacommand{consts}\ \ n\_nodes\_aux\ ::\ "i\ =>\ i"\isanewline
+\isacommand{primrec}\isanewline
+\ \ "n\_nodes\_aux(Lf)\ =\ (\isasymlambda k\ \isasymin \ nat.\ k)"\isanewline
+\ \ "n\_nodes\_aux(Br(a,l,r))\ =\ \isanewline
+\ \ \ \ \ \ (\isasymlambda k\ \isasymin \ nat.\ n\_nodes\_aux(r)\ `\ \ (n\_nodes\_aux(l)\ `\ succ(k)))"
+\end{isabelle}
+Now \isa{n\_nodes\_aux(t)\ `\ k} is our function in two arguments. We
+can prove a theorem relating it to \isa{n\_nodes}. Note the quantification
+over \isa{k\ \isasymin \ nat}:
+\begin{isabelle}
+\isacommand{lemma}\ n\_nodes\_aux\_eq\ [rule\_format]:\isanewline
+\ \ \ \ \ "t\ \isasymin \ bt(A)\ ==>\ \isasymforall k\ \isasymin \ nat.\ n\_nodes\_aux(t)`k\ =\ n\_nodes(t)\ \#+\ k"\isanewline
+\ \ \isacommand{by}\ (induct\_tac\ t,\ simp\_all)
+\end{isabelle}
+
+Now, we can use \isa{n\_nodes\_aux} to define a tail-recursive version
+of \isa{n\_nodes}:
+\begin{isabelle}
+\isacommand{constdefs}\isanewline
+\ \ n\_nodes\_tail\ ::\ "i\ =>\ i"\isanewline
+\ \ \ "n\_nodes\_tail(t)\ ==\ n\_nodes\_aux(t)\ `\ 0"
+\end{isabelle}
+It is easy to
+prove that \isa{n\_nodes\_tail} is equivalent to \isa{n\_nodes}:
+\begin{isabelle}
+\isacommand{lemma}\ "t\ \isasymin \ bt(A)\ ==>\ n\_nodes\_tail(t)\ =\ n\_nodes(t)"\isanewline
+\ \isacommand{by}\ (simp\ add:\ n\_nodes\_tail\_def\ n\_nodes\_aux\_eq)
+\end{isabelle}
+
+
+
+
+\index{recursion!primitive|)}
+\index{*primrec|)}
+
+
+\section{Inductive and coinductive definitions}
+\index{*inductive|(}
+\index{*coinductive|(}
+
+An {\bf inductive definition} specifies the least set~$R$ closed under given
+rules.  (Applying a rule to elements of~$R$ yields a result within~$R$.)  For
+example, a structural operational semantics is an inductive definition of an
+evaluation relation.  Dually, a {\bf coinductive definition} specifies the
+greatest set~$R$ consistent with given rules.  (Every element of~$R$ can be
+seen as arising by applying a rule to elements of~$R$.)  An important example
+is using bisimulation relations to formalise equivalence of processes and
+infinite data structures.
+
+A theory file may contain any number of inductive and coinductive
+definitions.  They may be intermixed with other declarations; in
+particular, the (co)inductive sets {\bf must} be declared separately as
+constants, and may have mixfix syntax or be subject to syntax translations.
+
+Each (co)inductive definition adds definitions to the theory and also
+proves some theorems.  It behaves identially to the analogous
+inductive definition except that instead of an induction rule there is
+a coinduction rule.  Its treatment of coinduction is described in
+detail in a separate paper,%
+\footnote{It appeared in CADE~\cite{paulson-CADE}; a longer version is
+  distributed with Isabelle as \emph{A Fixedpoint Approach to 
+ (Co)Inductive and (Co)Datatype Definitions}.}  %
+which you might refer to for background information.
+
+
+\subsection{The syntax of a (co)inductive definition}
+An inductive definition has the form
+\begin{ttbox}\isastyleminor
+inductive
+  domains     {\it domain declarations}
+  intros      {\it introduction rules}
+  monos       {\it monotonicity theorems}
+  con_defs    {\it constructor definitions}
+  type_intros {\it introduction rules for type-checking}
+  type_elims  {\it elimination rules for type-checking}
+\end{ttbox}
+A coinductive definition is identical, but starts with the keyword
+\isa{co\-inductive}.  
+
+The \isa{monos}, \isa{con\_defs}, \isa{type\_intros} and \isa{type\_elims}
+sections are optional.  If present, each is specified as a list of
+theorems, which may contain Isar attributes as usual.
+
+\begin{description}
+\item[\it domain declarations] are items of the form
+  {\it string\/}~\isa{\isasymsubseteq }~{\it string}, associating each recursive set with
+  its domain.  (The domain is some existing set that is large enough to
+  hold the new set being defined.)
+
+\item[\it introduction rules] specify one or more introduction rules in
+  the form {\it ident\/}~{\it string}, where the identifier gives the name of
+  the rule in the result structure.
+
+\item[\it monotonicity theorems] are required for each operator applied to
+  a recursive set in the introduction rules.  There \textbf{must} be a theorem
+  of the form $A\subseteq B\Imp M(A)\subseteq M(B)$, for each premise $t\in M(R_i)$
+  in an introduction rule!
+
+\item[\it constructor definitions] contain definitions of constants
+  appearing in the introduction rules.  The (co)datatype package supplies
+  the constructors' definitions here.  Most (co)inductive definitions omit
+  this section; one exception is the primitive recursive functions example;
+  see theory \isa{Induct/Primrec}.
+  
+\item[\it type\_intros] consists of introduction rules for type-checking the
+  definition: for demonstrating that the new set is included in its domain.
+  (The proof uses depth-first search.)
+
+\item[\it type\_elims] consists of elimination rules for type-checking the
+  definition.  They are presumed to be safe and are applied as often as
+  possible prior to the \isa{type\_intros} search.
+\end{description}
+
+The package has a few restrictions:
+\begin{itemize}
+\item The theory must separately declare the recursive sets as
+  constants.
+
+\item The names of the recursive sets must be identifiers, not infix
+operators.  
+
+\item Side-conditions must not be conjunctions.  However, an introduction rule
+may contain any number of side-conditions.
+
+\item Side-conditions of the form $x=t$, where the variable~$x$ does not
+  occur in~$t$, will be substituted through the rule \isa{mutual\_induct}.
+\end{itemize}
+
+
+\subsection{Example of an inductive definition}
+
+Below, we shall see how Isabelle/ZF defines the finite powerset
+operator.  The first step is to declare the constant~\isa{Fin}.  Then we
+must declare it inductively, with two introduction rules:
+\begin{isabelle}
+\isacommand{consts}\ \ Fin\ ::\ "i=>i"\isanewline
+\isacommand{inductive}\isanewline
+\ \ \isakeyword{domains}\ \ \ "Fin(A)"\ \isasymsubseteq\ "Pow(A)"\isanewline
+\ \ \isakeyword{intros}\isanewline
+\ \ \ \ emptyI:\ \ "0\ \isasymin\ Fin(A)"\isanewline
+\ \ \ \ consI:\ \ \ "[|\ a\ \isasymin\ A;\ \ b\ \isasymin\ Fin(A)\ |]\ ==>\ cons(a,b)\ \isasymin\ Fin(A)"\isanewline
+\ \ \isakeyword{type\_intros}\ \ empty\_subsetI\ cons\_subsetI\ PowI\isanewline
+\ \ \isakeyword{type\_elims}\ \ \ PowD\ [THEN\ revcut\_rl]\end{isabelle}
+The resulting theory contains a name space, called~\isa{Fin}.
+The \isa{Fin}$~A$ introduction rules can be referred to collectively as
+\isa{Fin.intros}, and also individually as \isa{Fin.emptyI} and
+\isa{Fin.consI}.  The induction rule is \isa{Fin.induct}.
+
+The chief problem with making (co)inductive definitions involves type-checking
+the rules.  Sometimes, additional theorems need to be supplied under
+\isa{type_intros} or \isa{type_elims}.  If the package fails when trying
+to prove your introduction rules, then set the flag \ttindexbold{trace_induct}
+to \isa{true} and try again.  (See the manual \emph{A Fixedpoint Approach
+  \ldots} for more discussion of type-checking.)
+
+In the example above, $\isa{Pow}(A)$ is given as the domain of
+$\isa{Fin}(A)$, for obviously every finite subset of~$A$ is a subset
+of~$A$.  However, the inductive definition package can only prove that given a
+few hints.
+Here is the output that results (with the flag set) when the
+\isa{type_intros} and \isa{type_elims} are omitted from the inductive
+definition above:
+\begin{alltt*}\isastyleminor
+Inductive definition Finite.Fin
+Fin(A) ==
+lfp(Pow(A),
+    \%X. {z\isasymin{}Pow(A) . z = 0 | ({\isasymexists}a b. z = cons(a,b) & a\isasymin{}A & b\isasymin{}X)})
+  Proving monotonicity...
+\ttbreak
+  Proving the introduction rules...
+The type-checking subgoal:
+0 \isasymin Fin(A)
+ 1. 0 \isasymin Pow(A)
+\ttbreak
+The subgoal after monos, type_elims:
+0 \isasymin Fin(A)
+ 1. 0 \isasymin Pow(A)
+*** prove_goal: tactic failed
+\end{alltt*}
+We see the need to supply theorems to let the package prove
+$\emptyset\in\isa{Pow}(A)$.  Restoring the \isa{type_intros} but not the
+\isa{type_elims}, we again get an error message:
+\begin{alltt*}\isastyleminor
+The type-checking subgoal:
+0 \isasymin Fin(A)
+ 1. 0 \isasymin Pow(A)
+\ttbreak
+The subgoal after monos, type_elims:
+0 \isasymin Fin(A)
+ 1. 0 \isasymin Pow(A)
+\ttbreak
+The type-checking subgoal:
+cons(a, b) \isasymin Fin(A)
+ 1. [| a \isasymin A; b \isasymin Fin(A) |] ==> cons(a, b) \isasymin Pow(A)
+\ttbreak
+The subgoal after monos, type_elims:
+cons(a, b) \isasymin Fin(A)
+ 1. [| a \isasymin A; b \isasymin Pow(A) |] ==> cons(a, b) \isasymin Pow(A)
+*** prove_goal: tactic failed
+\end{alltt*}
+The first rule has been type-checked, but the second one has failed.  The
+simplest solution to such problems is to prove the failed subgoal separately
+and to supply it under \isa{type_intros}.  The solution actually used is
+to supply, under \isa{type_elims}, a rule that changes
+$b\in\isa{Pow}(A)$ to $b\subseteq A$; together with \isa{cons_subsetI}
+and \isa{PowI}, it is enough to complete the type-checking.
+
+
+
+\subsection{Further examples}
+
+An inductive definition may involve arbitrary monotonic operators.  Here is a
+standard example: the accessible part of a relation.  Note the use
+of~\isa{Pow} in the introduction rule and the corresponding mention of the
+rule \isa{Pow\_mono} in the \isa{monos} list.  If the desired rule has a
+universally quantified premise, usually the effect can be obtained using
+\isa{Pow}.
+\begin{isabelle}
+\isacommand{consts}\ \ acc\ ::\ "i\ =>\ i"\isanewline
+\isacommand{inductive}\isanewline
+\ \ \isakeyword{domains}\ "acc(r)"\ \isasymsubseteq \ "field(r)"\isanewline
+\ \ \isakeyword{intros}\isanewline
+\ \ \ \ vimage:\ \ "[|\ r-``\isacharbraceleft a\isacharbraceright\ \isasymin\ Pow(acc(r));\ a\ \isasymin \ field(r)\ |]
+\isanewline
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ ==>\ a\ \isasymin \ acc(r)"\isanewline
+\ \ \isakeyword{monos}\ \ Pow\_mono
+\end{isabelle}
+
+Finally, here are some coinductive definitions.  We begin by defining
+lazy (potentially infinite) lists as a codatatype:
+\begin{isabelle}
+\isacommand{consts}\ \ llist\ \ ::\ "i=>i"\isanewline
+\isacommand{codatatype}\isanewline
+\ \ "llist(A)"\ =\ LNil\ |\ LCons\ ("a\ \isasymin \ A",\ "l\ \isasymin \ llist(A)")\isanewline
+\end{isabelle}
+
+The notion of equality on such lists is modelled as a bisimulation:
+\begin{isabelle}
+\isacommand{consts}\ \ lleq\ ::\ "i=>i"\isanewline
+\isacommand{coinductive}\isanewline
+\ \ \isakeyword{domains}\ "lleq(A)"\ <=\ "llist(A)\ *\ llist(A)"\isanewline
+\ \ \isakeyword{intros}\isanewline
+\ \ \ \ LNil:\ \ "<LNil,\ LNil>\ \isasymin \ lleq(A)"\isanewline
+\ \ \ \ LCons:\ "[|\ a\ \isasymin \ A;\ <l,l'>\ \isasymin \ lleq(A)\ |]\ \isanewline
+\ \ \ \ \ \ \ \ \ \ \ \ ==>\ <LCons(a,l),\ LCons(a,l')>\ \isasymin \ lleq(A)"\isanewline
+\ \ \isakeyword{type\_intros}\ \ llist.intros
+\end{isabelle}
+This use of \isa{type_intros} is typical: the relation concerns the
+codatatype \isa{llist}, so naturally the introduction rules for that
+codatatype will be required for type-checking the rules.
+
+The Isabelle distribution contains many other inductive definitions.  Simple
+examples are collected on subdirectory \isa{ZF/Induct}.  The directory
+\isa{Coind} and the theory \isa{ZF/Induct/LList} contain coinductive
+definitions.  Larger examples may be found on other subdirectories of
+\isa{ZF}, such as \isa{IMP}, and \isa{Resid}.
+
+
+\subsection{Theorems generated}
+
+Each (co)inductive set defined in a theory file generates a name space
+containing the following elements:
+\begin{ttbox}\isastyleminor
+intros        \textrm{the introduction rules}
+elim          \textrm{the elimination (case analysis) rule}
+induct        \textrm{the standard induction rule}
+mutual_induct \textrm{the mutual induction rule, if needed}
+defs          \textrm{definitions of inductive sets}
+bnd_mono      \textrm{monotonicity property}
+dom_subset    \textrm{inclusion in `bounding set'}
+\end{ttbox}
+Furthermore, each introduction rule is available under its declared
+name. For a codatatype, the component \isa{coinduct} is the coinduction rule,
+replacing the \isa{induct} component.
+
+Recall that the \ttindex{inductive\_cases} declaration generates
+simplified instances of the case analysis rule.  It is as useful for
+inductive definitions as it is for datatypes.  There are many examples
+in the theory
+\isa{Induct/Comb}, which is discussed at length
+elsewhere~\cite{paulson-generic}.  The theory first defines the
+datatype
+\isa{comb} of combinators:
+\begin{alltt*}\isastyleminor
+consts comb :: i
+datatype  "comb" = K
+                 | S
+                 | "#" ("p \isasymin comb", "q \isasymin comb")   (infixl 90)
+\end{alltt*}
+The theory goes on to define contraction and parallel contraction
+inductively.  Then the theory \isa{Induct/Comb.thy} defines special
+cases of contraction, such as this one:
+\begin{isabelle}
+\isacommand{inductive\_cases}\ K\_contractE [elim!]:\ "K -1-> r"
+\end{isabelle}
+The theorem just created is \isa{K -1-> r \ \isasymLongrightarrow \ Q},
+which expresses that the combinator \isa{K} cannot reduce to
+anything.  (From the assumption \isa{K-1->r}, we can conclude any desired
+formula \isa{Q}\@.)  Similar elimination rules for \isa{S} and application are also
+generated. The attribute \isa{elim!}\ shown above supplies the generated
+theorem to the classical reasoner.  This mode of working allows
+effective reasoniung about operational semantics.
+
+\index{*coinductive|)} \index{*inductive|)}
+
+
+
+\section{The outer reaches of set theory}
+
+The constructions of the natural numbers and lists use a suite of
+operators for handling recursive function definitions.  I have described
+the developments in detail elsewhere~\cite{paulson-set-II}.  Here is a brief
+summary:
+\begin{itemize}
+  \item Theory \isa{Trancl} defines the transitive closure of a relation
+    (as a least fixedpoint).
+
+  \item Theory \isa{WF} proves the well-founded recursion theorem, using an
+    elegant approach of Tobias Nipkow.  This theorem permits general
+    recursive definitions within set theory.
+
+  \item Theory \isa{Ord} defines the notions of transitive set and ordinal
+    number.  It derives transfinite induction.  A key definition is {\bf
+      less than}: $i<j$ if and only if $i$ and $j$ are both ordinals and
+    $i\in j$.  As a special case, it includes less than on the natural
+    numbers.
+    
+  \item Theory \isa{Epsilon} derives $\varepsilon$-induction and
+    $\varepsilon$-recursion, which are generalisations of transfinite
+    induction and recursion.  It also defines \cdx{rank}$(x)$, which is the
+    least ordinal $\alpha$ such that $x$ is constructed at stage $\alpha$ of
+    the cumulative hierarchy (thus $x\in V@{\alpha+1}$).
+\end{itemize}
+
+Other important theories lead to a theory of cardinal numbers.  They have
+not yet been written up anywhere.  Here is a summary:
+\begin{itemize}
+\item Theory \isa{Rel} defines the basic properties of relations, such as
+  reflexivity, symmetry and transitivity.
+
+\item Theory \isa{EquivClass} develops a theory of equivalence
+  classes, not using the Axiom of Choice.
+
+\item Theory \isa{Order} defines partial orderings, total orderings and
+  wellorderings.
+
+\item Theory \isa{OrderArith} defines orderings on sum and product sets.
+  These can be used to define ordinal arithmetic and have applications to
+  cardinal arithmetic.
+
+\item Theory \isa{OrderType} defines order types.  Every wellordering is
+  equivalent to a unique ordinal, which is its order type.
+
+\item Theory \isa{Cardinal} defines equipollence and cardinal numbers.
+ 
+\item Theory \isa{CardinalArith} defines cardinal addition and
+  multiplication, and proves their elementary laws.  It proves that there
+  is no greatest cardinal.  It also proves a deep result, namely
+  $\kappa\otimes\kappa=\kappa$ for every infinite cardinal~$\kappa$; see
+  Kunen~\cite[page 29]{kunen80}.  None of these results assume the Axiom of
+  Choice, which complicates their proofs considerably.  
+\end{itemize}
+
+The following developments involve the Axiom of Choice (AC):
+\begin{itemize}
+\item Theory \isa{AC} asserts the Axiom of Choice and proves some simple
+  equivalent forms.
+
+\item Theory \isa{Zorn} proves Hausdorff's Maximal Principle, Zorn's Lemma
+  and the Wellordering Theorem, following Abrial and
+  Laffitte~\cite{abrial93}.
+
+\item Theory \isa{Cardinal\_AC} uses AC to prove simplified theorems about
+  the cardinals.  It also proves a theorem needed to justify
+  infinitely branching datatype declarations: if $\kappa$ is an infinite
+  cardinal and $|X(\alpha)| \le \kappa$ for all $\alpha<\kappa$ then
+  $|\union\sb{\alpha<\kappa} X(\alpha)| \le \kappa$.
+
+\item Theory \isa{InfDatatype} proves theorems to justify infinitely
+  branching datatypes.  Arbitrary index sets are allowed, provided their
+  cardinalities have an upper bound.  The theory also justifies some
+  unusual cases of finite branching, involving the finite powerset operator
+  and the finite function space operator.
+\end{itemize}
+
+
+
+\section{The examples directories}
+Directory \isa{HOL/IMP} contains a mechanised version of a semantic
+equivalence proof taken from Winskel~\cite{winskel93}.  It formalises the
+denotational and operational semantics of a simple while-language, then
+proves the two equivalent.  It contains several datatype and inductive
+definitions, and demonstrates their use.
+
+The directory \isa{ZF/ex} contains further developments in ZF set theory.
+Here is an overview; see the files themselves for more details.  I describe
+much of this material in other
+publications~\cite{paulson-set-I,paulson-set-II,paulson-fixedpt-milner}.
+\begin{itemize}
+\item File \isa{misc.ML} contains miscellaneous examples such as
+  Cantor's Theorem, the Schr\"oder-Bernstein Theorem and the `Composition
+  of homomorphisms' challenge~\cite{boyer86}.
+
+\item Theory \isa{Ramsey} proves the finite exponent 2 version of
+  Ramsey's Theorem, following Basin and Kaufmann's
+  presentation~\cite{basin91}.
+
+\item Theory \isa{Integ} develops a theory of the integers as
+  equivalence classes of pairs of natural numbers.
+
+\item Theory \isa{Primrec} develops some computation theory.  It
+  inductively defines the set of primitive recursive functions and presents a
+  proof that Ackermann's function is not primitive recursive.
+
+\item Theory \isa{Primes} defines the Greatest Common Divisor of two
+  natural numbers and and the ``divides'' relation.
+
+\item Theory \isa{Bin} defines a datatype for two's complement binary
+  integers, then proves rewrite rules to perform binary arithmetic.  For
+  instance, $1359\times {-}2468 = {-}3354012$ takes 0.3 seconds.
+
+\item Theory \isa{BT} defines the recursive data structure $\isa{bt}(A)$, labelled binary trees.
+
+\item Theory \isa{Term} defines a recursive data structure for terms
+  and term lists.  These are simply finite branching trees.
+
+\item Theory \isa{TF} defines primitives for solving mutually
+  recursive equations over sets.  It constructs sets of trees and forests
+  as an example, including induction and recursion rules that handle the
+  mutual recursion.
+
+\item Theory \isa{Prop} proves soundness and completeness of
+  propositional logic~\cite{paulson-set-II}.  This illustrates datatype
+  definitions, inductive definitions, structural induction and rule
+  induction.
+
+\item Theory \isa{ListN} inductively defines the lists of $n$
+  elements~\cite{paulin-tlca}.
+
+\item Theory \isa{Acc} inductively defines the accessible part of a
+  relation~\cite{paulin-tlca}.
+
+\item Theory \isa{Comb} defines the datatype of combinators and
+  inductively defines contraction and parallel contraction.  It goes on to
+  prove the Church-Rosser Theorem.  This case study follows Camilleri and
+  Melham~\cite{camilleri92}.
+
+\item Theory \isa{LList} defines lazy lists and a coinduction
+  principle for proving equations between them.
+\end{itemize}
+
+
+\section{A proof about powersets}\label{sec:ZF-pow-example}
+To demonstrate high-level reasoning about subsets, let us prove the
+equation ${\isa{Pow}(A)\cap \isa{Pow}(B)}= \isa{Pow}(A\cap B)$.  Compared
+with first-order logic, set theory involves a maze of rules, and theorems
+have many different proofs.  Attempting other proofs of the theorem might
+be instructive.  This proof exploits the lattice properties of
+intersection.  It also uses the monotonicity of the powerset operation,
+from \isa{ZF/mono.ML}:
+\begin{isabelle}
+\tdx{Pow_mono}:     A \isasymsubseteq B ==> Pow(A) \isasymsubseteq Pow(B)
+\end{isabelle}
+We enter the goal and make the first step, which breaks the equation into
+two inclusions by extensionality:\index{*equalityI theorem}
+\begin{isabelle}
+\isacommand{lemma}\ "Pow(A\ Int\ B)\ =\ Pow(A)\ Int\ Pow(B)"\isanewline
+\ 1.\ Pow(A\ \isasyminter \ B)\ =\ Pow(A)\ \isasyminter \ Pow(B)\isanewline
+\isacommand{apply}\ (rule\ equalityI)\isanewline
+\ 1.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(A)\ \isasyminter \ Pow(B)\isanewline
+\ 2.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
+\end{isabelle}
+Both inclusions could be tackled straightforwardly using \isa{subsetI}.
+A shorter proof results from noting that intersection forms the greatest
+lower bound:\index{*Int_greatest theorem}
+\begin{isabelle}
+\isacommand{apply}\ (rule\ Int\_greatest)\isanewline
+\ 1.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(A)\isanewline
+\ 2.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(B)\isanewline
+\ 3.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
+\end{isabelle}
+Subgoal~1 follows by applying the monotonicity of \isa{Pow} to $A\int
+B\subseteq A$; subgoal~2 follows similarly:
+\index{*Int_lower1 theorem}\index{*Int_lower2 theorem}
+\begin{isabelle}
+\isacommand{apply}\ (rule\ Int\_lower1\ [THEN\ Pow\_mono])\isanewline
+\ 1.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(B)\isanewline
+\ 2.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
+\isanewline
+\isacommand{apply}\ (rule\ Int\_lower2\ [THEN\ Pow\_mono])\isanewline
+\ 1.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
+\end{isabelle}
+We are left with the opposite inclusion, which we tackle in the
+straightforward way:\index{*subsetI theorem}
+\begin{isabelle}
+\isacommand{apply}\ (rule\ subsetI)\isanewline
+\ 1.\ \isasymAnd x.\ x\ \isasymin \ Pow(A)\ \isasyminter \ Pow(B)\ \isasymLongrightarrow \ x\ \isasymin \ Pow(A\ \isasyminter \ B)
+\end{isabelle}
+The subgoal is to show $x\in \isa{Pow}(A\cap B)$ assuming $x\in\isa{Pow}(A)\cap \isa{Pow}(B)$; eliminating this assumption produces two
+subgoals.  The rule \tdx{IntE} treats the intersection like a conjunction
+instead of unfolding its definition.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ IntE)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymin \ Pow(A);\ x\ \isasymin \ Pow(B)\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ Pow(A\ \isasyminter \ B)
+\end{isabelle}
+The next step replaces the \isa{Pow} by the subset
+relation~($\subseteq$).\index{*PowI theorem}
+\begin{isabelle}
+\isacommand{apply}\ (rule\ PowI)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymin \ Pow(A);\ x\ \isasymin \ Pow(B)\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ A\ \isasyminter \ B%
+\end{isabelle}
+We perform the same replacement in the assumptions.  This is a good
+demonstration of the tactic \ttindex{drule}:\index{*PowD theorem}
+\begin{isabelle}
+\isacommand{apply}\ (drule\ PowD)+\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymsubseteq \ A;\ x\ \isasymsubseteq \ B\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ A\ \isasyminter \ B%
+\end{isabelle}
+The assumptions are that $x$ is a lower bound of both $A$ and~$B$, but
+$A\int B$ is the greatest lower bound:\index{*Int_greatest theorem}
+\begin{isabelle}
+\isacommand{apply}\ (rule\ Int\_greatest)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymsubseteq \ A;\ x\ \isasymsubseteq \ B\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ A\isanewline
+\ 2.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymsubseteq \ A;\ x\ \isasymsubseteq \ B\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ B%
+\end{isabelle}
+To conclude the proof, we clear up the trivial subgoals:
+\begin{isabelle}
+\isacommand{apply}\ (assumption+)\isanewline
+\isacommand{done}%
+\end{isabelle}
+
+We could have performed this proof instantly by calling
+\ttindex{blast}:
+\begin{isabelle}
+\isacommand{lemma}\ "Pow(A\ Int\ B)\ =\ Pow(A)\ Int\ Pow(B)"\isanewline
+\isacommand{by}
+\end{isabelle}
+Past researchers regarded this as a difficult proof, as indeed it is if all
+the symbols are replaced by their definitions.
+\goodbreak
+
+\section{Monotonicity of the union operator}
+For another example, we prove that general union is monotonic:
+${C\subseteq D}$ implies $\bigcup(C)\subseteq \bigcup(D)$.  To begin, we
+tackle the inclusion using \tdx{subsetI}:
+\begin{isabelle}
+\isacommand{lemma}\ "C\isasymsubseteq D\ ==>\ Union(C)\
+\isasymsubseteq \ Union(D)"\isanewline
+\isacommand{apply}\ (rule\ subsetI)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ \isasymUnion C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ \isasymUnion D%
+\end{isabelle}
+Big union is like an existential quantifier --- the occurrence in the
+assumptions must be eliminated early, since it creates parameters.
+\index{*UnionE theorem}
+\begin{isabelle}
+\isacommand{apply}\ (erule\ UnionE)\isanewline
+\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ \isasymUnion D%
+\end{isabelle}
+Now we may apply \tdx{UnionI}, which creates an unknown involving the
+parameters.  To show \isa{x\ \isasymin \ \isasymUnion D} it suffices to show that~\isa{x} belongs
+to some element, say~\isa{?B2(x,B)}, of~\isa{D}\@.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ UnionI)\isanewline
+\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ ?B2(x,\ B)\ \isasymin \ D\isanewline
+\ 2.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ ?B2(x,\ B)
+\end{isabelle}
+Combining the rule \tdx{subsetD} with the assumption \isa{C\ \isasymsubseteq \ D} yields 
+$\Var{a}\in C \Imp \Var{a}\in D$, which reduces subgoal~1.  Note that
+\isa{erule} removes the subset assumption.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ subsetD)\isanewline
+\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ ?B2(x,\ B)\ \isasymin \ C\isanewline
+\ 2.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ ?B2(x,\ B)
+\end{isabelle}
+The rest is routine.  Observe how the first call to \isa{assumption}
+instantiates \isa{?B2(x,B)} to~\isa{B}\@.
+\begin{isabelle}
+\isacommand{apply}\ assumption\ \isanewline
+\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ B%
+\isanewline
+\isacommand{apply}\ assumption\ \isanewline
+No\ subgoals!\isanewline
+\isacommand{done}%
+\end{isabelle}
+Again, \isa{blast} can prove this theorem in one step.
+
+The theory \isa{ZF/equalities.thy} has many similar proofs.  Reasoning about
+general intersection can be difficult because of its anomalous behaviour on
+the empty set.  However, \isa{blast} copes well with these.  Here is
+a typical example, borrowed from Devlin~\cite[page 12]{devlin79}:
+\[ a\in C \,\Imp\, \inter@{x\in C} \Bigl(A(x) \int B(x)\Bigr) =        
+       \Bigl(\inter@{x\in C} A(x)\Bigr)  \int  
+       \Bigl(\inter@{x\in C} B(x)\Bigr)  \]
+
+\section{Low-level reasoning about functions}
+The derived rules \isa{lamI}, \isa{lamE}, \isa{lam_type}, \isa{beta}
+and \isa{eta} support reasoning about functions in a
+$\lambda$-calculus style.  This is generally easier than regarding
+functions as sets of ordered pairs.  But sometimes we must look at the
+underlying representation, as in the following proof
+of~\tdx{fun_disjoint_apply1}.  This states that if $f$ and~$g$ are
+functions with disjoint domains~$A$ and~$C$, and if $a\in A$, then
+$(f\un g)`a = f`a$:
+\begin{isabelle}
+\isacommand{lemma}\ "[|\ a\ \isasymin \ A;\ \ f\ \isasymin \ A->B;\ \ g\ \isasymin \ C->D;\ \ A\ \isasyminter \ C\ =\ 0\ |]
+\isanewline
+\ \ \ \ \ \ \ \ ==>\ (f\ \isasymunion \ g)`a\ =\ f`a"
+\end{isabelle}
+Using \tdx{apply_equality}, we reduce the equality to reasoning about
+ordered pairs.  The second subgoal is to verify that \isa{f\ \isasymunion \ g} is a function, since
+\isa{Pi(?A,?B)} denotes a dependent function space.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ apply\_equality)\isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
+\isaindent{\ 1.\ }\isasymLongrightarrow \ \isasymlangle a,\ f\ `\ a\isasymrangle \ \isasymin \ f\ \isasymunion \ g\isanewline
+\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
+\isaindent{\ 2.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
+\end{isabelle}
+We must show that the pair belongs to~$f$ or~$g$; by~\tdx{UnI1} we
+choose~$f$:
+\begin{isabelle}
+\isacommand{apply}\ (rule\ UnI1)\isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ \isasymlangle a,\ f\ `\ a\isasymrangle \ \isasymin \ f\isanewline
+\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
+\isaindent{\ 2.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
+\end{isabelle}
+To show $\pair{a,f`a}\in f$ we use \tdx{apply_Pair}, which is
+essentially the converse of \tdx{apply_equality}:
+\begin{isabelle}
+\isacommand{apply}\ (rule\ apply\_Pair)\isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ f\ \isasymin \ Pi(?A2,?B2)\isanewline
+\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ a\ \isasymin \ ?A2\isanewline
+\ 3.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
+\isaindent{\ 3.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
+\end{isabelle}
+Using the assumptions $f\in A\to B$ and $a\in A$, we solve the two subgoals
+from \tdx{apply_Pair}.  Recall that a $\Pi$-set is merely a generalized
+function space, and observe that~{\tt?A2} gets instantiated to~\isa{A}.
+\begin{isabelle}
+\isacommand{apply}\ assumption\ \isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ a\ \isasymin \ A\isanewline
+\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
+\isaindent{\ 2.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
+\isanewline
+\isacommand{apply}\ assumption\ \isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
+\isaindent{\ 1.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
+\end{isabelle}
+To construct functions of the form $f\un g$, we apply
+\tdx{fun_disjoint_Un}:
+\begin{isabelle}
+\isacommand{apply}\ (rule\ fun\_disjoint\_Un)\isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ f\ \isasymin \ ?A3\ \isasymrightarrow \ ?B3\isanewline
+\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ g\ \isasymin \ ?C3\ \isasymrightarrow \ ?D3\isanewline
+\ 3.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ ?A3\ \isasyminter \ ?C3\ =\ 0
+\end{isabelle}
+The remaining subgoals are instances of the assumptions.  Again, observe how
+unknowns become instantiated:
+\begin{isabelle}
+\isacommand{apply}\ assumption\ \isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ g\ \isasymin \ ?C3\ \isasymrightarrow \ ?D3\isanewline
+\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ A\ \isasyminter \ ?C3\ =\ 0
+\isanewline
+\isacommand{apply}\ assumption\ \isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ A\ \isasyminter \ C\ =\ 0
+\isanewline
+\isacommand{apply}\ assumption\ \isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+See the theories \isa{ZF/func.thy} and \isa{ZF/WF.thy} for more
+examples of reasoning about functions.
+
+\index{set theory|)}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Logics-ZF/document/build	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+set -e
+
+FORMAT="$1"
+VARIANT="$2"
+
+"$ISABELLE_TOOL" logo ZF
+
+cp "$ISABELLE_HOME/src/Doc/isar.sty" .
+cp "$ISABELLE_HOME/src/Doc/ttbox.sty" .
+cp "$ISABELLE_HOME/src/Doc/manual.bib" .
+cp "$ISABELLE_HOME/src/Doc/Logics/document/syntax.tex" .
+
+"$ISABELLE_HOME/src/Doc/prepare_document" "$FORMAT"
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Logics-ZF/document/logics.sty	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,1 @@
+% logics.sty : Logics Manuals Page Layout
%
\typeout{Document Style logics. Released 18 August 2003}

\hyphenation{Isa-belle man-u-script man-u-scripts ap-pen-dix mut-u-al-ly}
\hyphenation{data-type data-types co-data-type co-data-types }

%usage: \iflabelundefined{LABEL}{if not defined}{if defined}
\newcommand{\iflabelundefined}[1]{\@ifundefined{r@#1}}


%%%INDEXING  use isa-index to process the index

\newcommand\seealso[2]{\emph{see also} #1}
\usepackage{makeidx}

%index, putting page numbers of definitions in boldface
\def\bold#1{\textbf{#1}}
\newcommand\fnote[1]{#1n}
\newcommand\indexbold[1]{\index{#1|bold}}

% The alternative to \protect\isa in the indexing macros is
% \noexpand\noexpand \noexpand\isa
% need TWO levels of \noexpand to delay the expansion of \isa:
%  the \noexpand\noexpand will leave one \noexpand, to be given to the
%  (still unexpanded) \isa token.  See TeX by Topic, page 122.

%%%% for indexing constants, symbols, theorems, ...
\newcommand\cdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (constant)}}
\newcommand\sdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (symbol)}}

\newcommand\tdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (theorem)}}
\newcommand\tdxbold[1]{\isa{#1}\index{#1@\protect\isa{#1} (theorem)|bold}}

\newcommand\cldx[1]{\isa{#1}\index{#1@\protect\isa{#1} (class)}}
\newcommand\tydx[1]{\isa{#1}\index{#1@\protect\isa{#1} (type)}}
\newcommand\thydx[1]{\isa{#1}\index{#1@\protect\isa{#1} (theory)}}

\newcommand\attrdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (attribute)}}
\newcommand\cmmdx[1]{\index{#1@\protect\isacommand{#1} (command)}}
\newcommand\commdx[1]{\isacommand{#1}\index{#1@\protect\isacommand{#1} (command)}}
\newcommand\methdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (method)}}
\newcommand\tooldx[1]{\isa{#1}\index{#1@\protect\isa{#1} (tool)}}
\newcommand\settdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (setting)}}

%set argument in \bf font and index in ROMAN font (for definitions in text!)
\newcommand\bfindex[1]{{\bf#1}\index{#1|bold}\@}

\newcommand\rmindex[1]{{#1}\index{#1}\@}
\newcommand\ttindex[1]{\texttt{#1}\index{#1@\texttt{#1}}\@}
\newcommand\ttindexbold[1]{\texttt{#1}\index{#1@\texttt{#1}|bold}\@}

\newcommand{\indexboldpos}[2]{#1\@}
\newcommand{\ttindexboldpos}[2]{\isa{#1}\@}

%\newtheorem{theorem}{Theorem}[section]
\newtheorem{Exercise}{Exercise}[section]
\newenvironment{exercise}{\begin{Exercise}\rm}{\end{Exercise}}
\newcommand{\ttlbr}{\texttt{[|}}
\newcommand{\ttrbr}{\texttt{|]}}
\newcommand{\ttor}{\texttt{|}}
\newcommand{\ttall}{\texttt{!}}
\newcommand{\ttuniquex}{\texttt{?!}}
\newcommand{\ttEXU}{\texttt{EX!}}
\newcommand{\ttAnd}{\texttt{!!}}

\newcommand{\isasymignore}{}
\newcommand{\isasymimp}{\isasymlongrightarrow}
\newcommand{\isasymImp}{\isasymLongrightarrow}
\newcommand{\isasymFun}{\isasymRightarrow}
\newcommand{\isasymuniqex}{\isamath{\exists!\,}}
\renewcommand{\S}{Sect.\ts}

\renewenvironment{isamarkuptxt}{\begin{isamarkuptext}}{\end{isamarkuptext}}

\newif\ifremarks
\newcommand{\REMARK}[1]{\ifremarks\marginpar{\raggedright\footnotesize#1}\fi}

%names of Isabelle rules
\newcommand{\rulename}[1]{\hfill(#1)}
\newcommand{\rulenamedx}[1]{\hfill(#1\index{#1@\protect\isa{#1} (theorem)|bold})}

%%%% meta-logical connectives

\let\Forall=\bigwedge
\let\Imp=\Longrightarrow
\let\To=\Rightarrow
\newcommand{\Var}[1]{{?\!#1}}

%%% underscores as ordinary characters, not for subscripting
%%  use @ or \sb for subscripting; use \at for @
%%  only works in \tt font
%%  must not make _ an active char; would make \ttindex fail!
\gdef\underscoreoff{\catcode`\@=8\catcode`\_=\other}
\gdef\underscoreon{\catcode`\_=8\makeatother}
\chardef\other=12
\chardef\at=`\@

% alternative underscore
\def\_{\leavevmode\kern.06em\vbox{\hrule height.2ex width.3em}\hskip0.1em}


%%%% ``WARNING'' environment
\def\dbend{\vtop to 0pt{\vss\hbox{\Huge\bf!}\vss}}
\newenvironment{warn}{\medskip\medbreak\begingroup \clubpenalty=10000 
         \small %%WAS\baselineskip=0.9\baselineskip
         \noindent \hangindent\parindent \hangafter=-2 
         \hbox to0pt{\hskip-\hangindent\dbend\hfill}\ignorespaces}%
        {\par\endgroup\medbreak}


%%%% Standard logical symbols
\let\turn=\vdash
\let\conj=\wedge
\let\disj=\vee
\let\imp=\rightarrow
\let\bimp=\leftrightarrow
\newcommand\all[1]{\forall#1.}  %quantification
\newcommand\ex[1]{\exists#1.}
\newcommand{\pair}[1]{\langle#1\rangle}

\newcommand{\lparr}{\mathopen{(\!|}}
\newcommand{\rparr}{\mathclose{|\!)}}
\newcommand{\fs}{\mathpunct{,\,}}
\newcommand{\ty}{\mathrel{::}}
\newcommand{\asn}{\mathrel{:=}}
\newcommand{\more}{\ldots}
\newcommand{\record}[1]{\lparr #1 \rparr}
\newcommand{\dtt}{\mathord.}

\newcommand\lbrakk{\mathopen{[\![}}
\newcommand\rbrakk{\mathclose{]\!]}}
\newcommand\List[1]{\lbrakk#1\rbrakk}  %was \obj
\newcommand\vpile[1]{\begin{array}{c}#1\end{array}}
\newenvironment{matharray}[1]{\[\begin{array}{#1}}{\end{array}\]}
\newcommand{\Text}[1]{\mbox{#1}}

\DeclareMathSymbol{\dshsym}{\mathalpha}{letters}{"2D}
\newcommand{\dsh}{\mathit{\dshsym}}

\let\int=\cap
\let\un=\cup
\let\inter=\bigcap
\let\union=\bigcup

\def\ML{{\sc ml}}
\def\AST{{\sc ast}}

%macros to change the treatment of symbols
\def\relsemicolon{\mathcode`\;="303B}   %treat ; like a relation
\def\binperiod{\mathcode`\.="213A}   %treat . like a binary operator
\def\binvert{\mathcode`\|="226A}     %treat | like a binary operator

%redefinition of \sloppy and \fussy to use \emergencystretch
\def\sloppy{\tolerance2000 \hfuzz.5pt \vfuzz.5pt \emergencystretch=15pt}
\def\fussy{\tolerance200 \hfuzz.1pt \vfuzz.1pt \emergencystretch=0pt}

%non-bf version of description
\def\descrlabel#1{\hspace\labelsep #1}
\def\descr{\list{}{\labelwidth\z@ \itemindent-\leftmargin\let\makelabel\descrlabel}}
\let\enddescr\endlist

% The mathcodes for the letters A, ..., Z, a, ..., z are changed to
% generate text italic rather than math italic by default. This makes
% multi-letter identifiers look better. The mathcode for character c
% is set to |"7000| (variable family) + |"400| (text italic) + |c|.
%
\DeclareSymbolFont{italics}{\encodingdefault}{\rmdefault}{m}{it}%
\def\@setmcodes#1#2#3{{\count0=#1 \count1=#3
        \loop \global\mathcode\count0=\count1 \ifnum \count0<#2
        \advance\count0 by1 \advance\count1 by1 \repeat}}
\@setmcodes{`A}{`Z}{"7\hexnumber@\symitalics41}
\@setmcodes{`a}{`z}{"7\hexnumber@\symitalics61}

%%% \dquotes permits usage of "..." for \hbox{...} 
%%%   also taken from under.sty
{\catcode`\"=\active
\gdef\dquotes{\catcode`\"=\active  \let"=\@mathText}%
\gdef\@mathText#1"{\hbox{\mathTextFont #1\/}}}
\def\mathTextFont{\frenchspacing\tt}
\def\dquotesoff{\catcode`\"=\other}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Logics-ZF/document/root.tex	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,91 @@
+\documentclass[11pt,a4paper]{report}
+\usepackage{isabelle,isabellesym,railsetup}
+\usepackage{graphicx,logics,ttbox,proof,latexsym}
+\usepackage{isar}
+
+\usepackage{pdfsetup}   
+%last package!
+
+\remarkstrue 
+
+%%% to index derived rls: ^\([a-zA-Z0-9][a-zA-Z0-9_]*\)        \\tdx{\1}  
+%%% to index rulenames:   ^ *(\([a-zA-Z0-9][a-zA-Z0-9_]*\),     \\tdx{\1}  
+%%% to index constants:   \\tt \([a-zA-Z0-9][a-zA-Z0-9_]*\)     \\cdx{\1}  
+%%% to deverbify:         \\verb|\([^|]*\)|     \\ttindex{\1}  
+
+\title{\includegraphics[scale=0.5]{isabelle_zf} \\[4ex] 
+       Isabelle's Logics: FOL and ZF}
+
+\author{{\em Lawrence C. Paulson}\\
+        Computer Laboratory \\ University of Cambridge \\
+        \texttt{lcp@cl.cam.ac.uk}\\[3ex] 
+        With Contributions by Tobias Nipkow and Markus Wenzel}
+
+\newcommand\subcaption[1]{\par {\centering\normalsize\sc#1\par}\bigskip
+  \hrule\bigskip}
+\newenvironment{constants}{\begin{center}\small\begin{tabular}{rrrr}}{\end{tabular}\end{center}}
+
+\let\ts=\thinspace
+
+\makeindex
+
+\underscoreoff
+
+\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}  %% {secnumdepth}{2}???
+
+\pagestyle{headings}
+\sloppy
+\binperiod     %%%treat . like a binary operator
+
+
+\isadroptag{theory}
+
+\railtermfont{\isabellestyle{tt}}
+\railnontermfont{\isabellestyle{literal}}
+\railnamefont{\isabellestyle{literal}}
+
+
+\begin{document}
+\maketitle 
+
+\begin{abstract}
+This manual describes Isabelle's formalizations of many-sorted first-order
+logic (\texttt{FOL}) and Zermelo-Fraenkel set theory (\texttt{ZF}).  See the
+\emph{Reference Manual} for general Isabelle commands, and \emph{Introduction
+  to Isabelle} for an overall tutorial.
+
+This manual is part of the earlier Isabelle documentation, 
+which is somewhat superseded by the Isabelle/HOL
+\emph{Tutorial}~\cite{isa-tutorial}. However, the present document is the
+only available documentation for Isabelle's versions of first-order logic
+and set theory. Much of it is concerned with 
+the primitives for conducting proofs 
+using the ML top level.  It has been rewritten to use the Isar proof
+language, but evidence of the old \ML{} orientation remains.
+\end{abstract}
+
+
+\subsubsection*{Acknowledgements} 
+Markus Wenzel made numerous improvements.
+    Philippe de Groote contributed to~ZF.  Philippe No\"el and
+    Martin Coen made many contributions to~ZF.  The research has 
+    been funded by the EPSRC (grants GR/G53279, GR/H40570, GR/K57381,
+    GR/K77051, GR/M75440) and by ESPRIT (projects 3245:
+    Logical Frameworks, and 6453: Types) and by the DFG Schwerpunktprogramm
+    \emph{Deduktion}.
+    
+\pagenumbering{roman} \tableofcontents \cleardoublepage
+\pagenumbering{arabic} 
+\setcounter{page}{1} 
+\input{syntax}
+\input{FOL}
+\input{ZF}
+
+\isabellestyle{literal}
+\input{ZF_Isar}
+\isabellestyle{tt}
+
+\bibliographystyle{plain}
+\bibliography{manual}
+\printindex
+\end{document}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/Basics.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,145 @@
+(*<*)
+theory Basics
+imports Main
+begin
+(*>*)
+text{*
+This chapter introduces HOL as a functional programming language and shows
+how to prove properties of functional programs by induction.
+
+\section{Basics}
+
+\subsection{Types, Terms and Formulas}
+\label{sec:TypesTermsForms}
+
+HOL is a typed logic whose type system resembles that of functional
+programming languages. Thus there are
+\begin{description}
+\item[base types,] 
+in particular @{typ bool}, the type of truth values,
+@{typ nat}, the type of natural numbers ($\mathbb{N}$), and \indexed{@{typ int}}{int},
+the type of mathematical integers ($\mathbb{Z}$).
+\item[type constructors,]
+ in particular @{text list}, the type of
+lists, and @{text set}, the type of sets. Type constructors are written
+postfix, i.e., after their arguments. For example,
+@{typ "nat list"} is the type of lists whose elements are natural numbers.
+\item[function types,]
+denoted by @{text"\<Rightarrow>"}.
+\item[type variables,]
+  denoted by @{typ 'a}, @{typ 'b} etc., just like in ML\@.
+\end{description}
+Note that @{typ"'a \<Rightarrow> 'b list"} means @{typ[source]"'a \<Rightarrow> ('b list)"},
+not @{typ"('a \<Rightarrow> 'b) list"}: postfix type constructors have precedence
+over @{text"\<Rightarrow>"}.
+
+\conceptidx{Terms}{term} are formed as in functional programming by
+applying functions to arguments. If @{text f} is a function of type
+@{text"\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2"} and @{text t} is a term of type
+@{text"\<tau>\<^sub>1"} then @{term"f t"} is a term of type @{text"\<tau>\<^sub>2"}. We write @{text "t :: \<tau>"} to mean that term @{text t} has type @{text \<tau>}.
+
+\begin{warn}
+There are many predefined infix symbols like @{text "+"} and @{text"\<le>"}.
+The name of the corresponding binary function is @{term"op +"},
+not just @{text"+"}. That is, @{term"x + y"} is nice surface syntax
+(``syntactic sugar'') for \noquotes{@{term[source]"op + x y"}}.
+\end{warn}
+
+HOL also supports some basic constructs from functional programming:
+\begin{quote}
+@{text "(if b then t\<^sub>1 else t\<^sub>2)"}\\
+@{text "(let x = t in u)"}\\
+@{text "(case t of pat\<^sub>1 \<Rightarrow> t\<^sub>1 | \<dots> | pat\<^sub>n \<Rightarrow> t\<^sub>n)"}
+\end{quote}
+\begin{warn}
+The above three constructs must always be enclosed in parentheses
+if they occur inside other constructs.
+\end{warn}
+Terms may also contain @{text "\<lambda>"}-abstractions. For example,
+@{term "\<lambda>x. x"} is the identity function.
+
+\conceptidx{Formulas}{formula} are terms of type @{text bool}.
+There are the basic constants @{term True} and @{term False} and
+the usual logical connectives (in decreasing order of precedence):
+@{text"\<not>"}, @{text"\<and>"}, @{text"\<or>"}, @{text"\<longrightarrow>"}.
+
+\conceptidx{Equality}{equality} is available in the form of the infix function @{text "="}
+of type @{typ "'a \<Rightarrow> 'a \<Rightarrow> bool"}. It also works for formulas, where
+it means ``if and only if''.
+
+\conceptidx{Quantifiers}{quantifier} are written @{prop"\<forall>x. P"} and @{prop"\<exists>x. P"}.
+
+Isabelle automatically computes the type of each variable in a term. This is
+called \concept{type inference}.  Despite type inference, it is sometimes
+necessary to attach an explicit \concept{type constraint} (or \concept{type
+annotation}) to a variable or term.  The syntax is @{text "t :: \<tau>"} as in
+\mbox{\noquotes{@{prop[source] "m < (n::nat)"}}}. Type constraints may be
+needed to
+disambiguate terms involving overloaded functions such as @{text "+"}, @{text
+"*"} and @{text"\<le>"}.
+
+Finally there are the universal quantifier @{text"\<And>"}\index{$4@\isasymAnd} and the implication
+@{text"\<Longrightarrow>"}\index{$3@\isasymLongrightarrow}. They are part of the Isabelle framework, not the logic
+HOL. Logically, they agree with their HOL counterparts @{text"\<forall>"} and
+@{text"\<longrightarrow>"}, but operationally they behave differently. This will become
+clearer as we go along.
+\begin{warn}
+Right-arrows of all kinds always associate to the right. In particular,
+the formula
+@{text"A\<^sub>1 \<Longrightarrow> A\<^sub>2 \<Longrightarrow> A\<^sub>3"} means @{text "A\<^sub>1 \<Longrightarrow> (A\<^sub>2 \<Longrightarrow> A\<^sub>3)"}.
+The (Isabelle specific) notation \mbox{@{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"}}
+is short for the iterated implication \mbox{@{text"A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> A"}}.
+Sometimes we also employ inference rule notation:
+\inferrule{\mbox{@{text "A\<^sub>1"}}\\ \mbox{@{text "\<dots>"}}\\ \mbox{@{text "A\<^sub>n"}}}
+{\mbox{@{text A}}}
+\end{warn}
+
+
+\subsection{Theories}
+\label{sec:Basic:Theories}
+
+Roughly speaking, a \concept{theory} is a named collection of types,
+functions, and theorems, much like a module in a programming language.
+All the Isabelle text that you ever type needs to go into a theory.
+The general format of a theory @{text T} is
+\begin{quote}
+\indexed{\isacom{theory}}{theory} @{text T}\\
+\indexed{\isacom{imports}}{imports} @{text "T\<^sub>1 \<dots> T\<^sub>n"}\\
+\isacom{begin}\\
+\emph{definitions, theorems and proofs}\\
+\isacom{end}
+\end{quote}
+where @{text "T\<^sub>1 \<dots> T\<^sub>n"} are the names of existing
+theories that @{text T} is based on. The @{text "T\<^sub>i"} are the
+direct \conceptidx{parent theories}{parent theory} of @{text T}.
+Everything defined in the parent theories (and their parents, recursively) is
+automatically visible. Each theory @{text T} must
+reside in a \concept{theory file} named @{text "T.thy"}.
+
+\begin{warn}
+HOL contains a theory @{theory Main}\index{Main@@{theory Main}}, the union of all the basic
+predefined theories like arithmetic, lists, sets, etc.
+Unless you know what you are doing, always include @{text Main}
+as a direct or indirect parent of all your theories.
+\end{warn}
+
+In addition to the theories that come with the Isabelle/HOL distribution
+(see @{url "http://isabelle.in.tum.de/library/HOL/"})
+there is also the \emph{Archive of Formal Proofs}
+at @{url "http://afp.sourceforge.net"}, a growing collection of Isabelle theories
+that everybody can contribute to.
+
+\subsection{Quotation Marks}
+
+The textual definition of a theory follows a fixed syntax with keywords like
+\isacommand{begin} and \isacommand{datatype}.  Embedded in this syntax are
+the types and formulas of HOL.  To distinguish the two levels, everything
+HOL-specific (terms and types) must be enclosed in quotation marks:
+\texttt{"}\dots\texttt{"}. To lessen this burden, quotation marks around a
+single identifier can be dropped.  When Isabelle prints a syntax error
+message, it refers to the HOL syntax as the \concept{inner syntax} and the
+enclosing theory language as the \concept{outer syntax}.
+*}
+(*<*)
+end
+(*>*)
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/Bool_nat_list.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,458 @@
+(*<*)
+theory Bool_nat_list
+imports Main
+begin
+(*>*)
+
+text{*
+\vspace{-4ex}
+\section{\texorpdfstring{Types @{typ bool}, @{typ nat} and @{text list}}{Types bool, nat and list}}
+
+These are the most important predefined types. We go through them one by one.
+Based on examples we learn how to define (possibly recursive) functions and
+prove theorems about them by induction and simplification.
+
+\subsection{Type \indexed{@{typ bool}}{bool}}
+
+The type of boolean values is a predefined datatype
+@{datatype[display] bool}
+with the two values \indexed{@{const True}}{True} and \indexed{@{const False}}{False} and
+with many predefined functions:  @{text "\<not>"}, @{text "\<and>"}, @{text "\<or>"}, @{text
+"\<longrightarrow>"} etc. Here is how conjunction could be defined by pattern matching:
+*}
+
+fun conj :: "bool \<Rightarrow> bool \<Rightarrow> bool" where
+"conj True True = True" |
+"conj _ _ = False"
+
+text{* Both the datatype and function definitions roughly follow the syntax
+of functional programming languages.
+
+\subsection{Type \indexed{@{typ nat}}{nat}}
+
+Natural numbers are another predefined datatype:
+@{datatype[display] nat}\index{Suc@@{const Suc}}
+All values of type @{typ nat} are generated by the constructors
+@{text 0} and @{const Suc}. Thus the values of type @{typ nat} are
+@{text 0}, @{term"Suc 0"}, @{term"Suc(Suc 0)"} etc.
+There are many predefined functions: @{text "+"}, @{text "*"}, @{text
+"\<le>"}, etc. Here is how you could define your own addition:
+*}
+
+fun add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
+"add 0 n = n" |
+"add (Suc m) n = Suc(add m n)"
+
+text{* And here is a proof of the fact that @{prop"add m 0 = m"}: *}
+
+lemma add_02: "add m 0 = m"
+apply(induction m)
+apply(auto)
+done
+(*<*)
+lemma "add m 0 = m"
+apply(induction m)
+(*>*)
+txt{* The \isacom{lemma} command starts the proof and gives the lemma
+a name, @{text add_02}. Properties of recursively defined functions
+need to be established by induction in most cases.
+Command \isacom{apply}@{text"(induction m)"} instructs Isabelle to
+start a proof by induction on @{text m}. In response, it will show the
+following proof state:
+@{subgoals[display,indent=0]}
+The numbered lines are known as \emph{subgoals}.
+The first subgoal is the base case, the second one the induction step.
+The prefix @{text"\<And>m."} is Isabelle's way of saying ``for an arbitrary but fixed @{text m}''. The @{text"\<Longrightarrow>"} separates assumptions from the conclusion.
+The command \isacom{apply}@{text"(auto)"} instructs Isabelle to try
+and prove all subgoals automatically, essentially by simplifying them.
+Because both subgoals are easy, Isabelle can do it.
+The base case @{prop"add 0 0 = 0"} holds by definition of @{const add},
+and the induction step is almost as simple:
+@{text"add\<^raw:~>(Suc m) 0 = Suc(add m 0) = Suc m"}
+using first the definition of @{const add} and then the induction hypothesis.
+In summary, both subproofs rely on simplification with function definitions and
+the induction hypothesis.
+As a result of that final \isacom{done}, Isabelle associates the lemma
+just proved with its name. You can now inspect the lemma with the command
+*}
+
+thm add_02
+
+txt{* which displays @{thm[show_question_marks,display] add_02} The free
+variable @{text m} has been replaced by the \concept{unknown}
+@{text"?m"}. There is no logical difference between the two but an
+operational one: unknowns can be instantiated, which is what you want after
+some lemma has been proved.
+
+Note that there is also a proof method @{text induct}, which behaves almost
+like @{text induction}; the difference is explained in \autoref{ch:Isar}.
+
+\begin{warn}
+Terminology: We use \concept{lemma}, \concept{theorem} and \concept{rule}
+interchangeably for propositions that have been proved.
+\end{warn}
+\begin{warn}
+  Numerals (@{text 0}, @{text 1}, @{text 2}, \dots) and most of the standard
+  arithmetic operations (@{text "+"}, @{text "-"}, @{text "*"}, @{text"\<le>"},
+  @{text"<"} etc) are overloaded: they are available
+  not just for natural numbers but for other types as well.
+  For example, given the goal @{text"x + 0 = x"}, there is nothing to indicate
+  that you are talking about natural numbers. Hence Isabelle can only infer
+  that @{term x} is of some arbitrary type where @{text 0} and @{text"+"}
+  exist. As a consequence, you will be unable to prove the goal.
+%  To alert you to such pitfalls, Isabelle flags numerals without a
+%  fixed type in its output: @ {prop"x+0 = x"}.
+  In this particular example, you need to include
+  an explicit type constraint, for example @{text"x+0 = (x::nat)"}. If there
+  is enough contextual information this may not be necessary: @{prop"Suc x =
+  x"} automatically implies @{text"x::nat"} because @{term Suc} is not
+  overloaded.
+\end{warn}
+
+\subsubsection{An Informal Proof}
+
+Above we gave some terse informal explanation of the proof of
+@{prop"add m 0 = m"}. A more detailed informal exposition of the lemma
+might look like this:
+\bigskip
+
+\noindent
+\textbf{Lemma} @{prop"add m 0 = m"}
+
+\noindent
+\textbf{Proof} by induction on @{text m}.
+\begin{itemize}
+\item Case @{text 0} (the base case): @{prop"add 0 0 = 0"}
+  holds by definition of @{const add}.
+\item Case @{term"Suc m"} (the induction step):
+  We assume @{prop"add m 0 = m"}, the induction hypothesis (IH),
+  and we need to show @{text"add (Suc m) 0 = Suc m"}.
+  The proof is as follows:\smallskip
+
+  \begin{tabular}{@ {}rcl@ {\quad}l@ {}}
+  @{term "add (Suc m) 0"} &@{text"="}& @{term"Suc(add m 0)"}
+  & by definition of @{text add}\\
+              &@{text"="}& @{term "Suc m"} & by IH
+  \end{tabular}
+\end{itemize}
+Throughout this book, \concept{IH} will stand for ``induction hypothesis''.
+
+We have now seen three proofs of @{prop"add m 0 = 0"}: the Isabelle one, the
+terse four lines explaining the base case and the induction step, and just now a
+model of a traditional inductive proof. The three proofs differ in the level
+of detail given and the intended reader: the Isabelle proof is for the
+machine, the informal proofs are for humans. Although this book concentrates
+on Isabelle proofs, it is important to be able to rephrase those proofs
+as informal text comprehensible to a reader familiar with traditional
+mathematical proofs. Later on we will introduce an Isabelle proof language
+that is closer to traditional informal mathematical language and is often
+directly readable.
+
+\subsection{Type \indexed{@{text list}}{list}}
+
+Although lists are already predefined, we define our own copy just for
+demonstration purposes:
+*}
+(*<*)
+apply(auto)
+done 
+declare [[names_short]]
+(*>*)
+datatype 'a list = Nil | Cons 'a "'a list"
+
+text{*
+\begin{itemize}
+\item Type @{typ "'a list"} is the type of lists over elements of type @{typ 'a}. Because @{typ 'a} is a type variable, lists are in fact \concept{polymorphic}: the elements of a list can be of arbitrary type (but must all be of the same type).
+\item Lists have two constructors: @{const Nil}, the empty list, and @{const Cons}, which puts an element (of type @{typ 'a}) in front of a list (of type @{typ "'a list"}).
+Hence all lists are of the form @{const Nil}, or @{term"Cons x Nil"},
+or @{term"Cons x (Cons y Nil)"} etc.
+\item \isacom{datatype} requires no quotation marks on the
+left-hand side, but on the right-hand side each of the argument
+types of a constructor needs to be enclosed in quotation marks, unless
+it is just an identifier (e.g.\ @{typ nat} or @{typ 'a}).
+\end{itemize}
+We also define two standard functions, append and reverse: *}
+
+fun app :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where
+"app Nil ys = ys" |
+"app (Cons x xs) ys = Cons x (app xs ys)"
+
+fun rev :: "'a list \<Rightarrow> 'a list" where
+"rev Nil = Nil" |
+"rev (Cons x xs) = app (rev xs) (Cons x Nil)"
+
+text{* By default, variables @{text xs}, @{text ys} and @{text zs} are of
+@{text list} type.
+
+Command \indexed{\isacommand{value}}{value} evaluates a term. For example, *}
+
+value "rev(Cons True (Cons False Nil))"
+
+text{* yields the result @{value "rev(Cons True (Cons False Nil))"}. This works symbolically, too: *}
+
+value "rev(Cons a (Cons b Nil))"
+
+text{* yields @{value "rev(Cons a (Cons b Nil))"}.
+\medskip
+
+Figure~\ref{fig:MyList} shows the theory created so far.
+Because @{text list}, @{const Nil}, @{const Cons} etc are already predefined,
+ Isabelle prints qualified (long) names when executing this theory, for example, @{text MyList.Nil}
+ instead of @{const Nil}.
+ To suppress the qualified names you can insert the command
+ \texttt{declare [[names\_short]]}.
+ This is not recommended in general but just for this unusual example.
+% Notice where the
+%quotations marks are needed that we mostly sweep under the carpet.  In
+%particular, notice that \isacom{datatype} requires no quotation marks on the
+%left-hand side, but that on the right-hand side each of the argument
+%types of a constructor needs to be enclosed in quotation marks.
+
+\begin{figure}[htbp]
+\begin{alltt}
+\input{MyList.thy}\end{alltt}
+\caption{A Theory of Lists}
+\label{fig:MyList}
+\end{figure}
+
+\subsubsection{Structural Induction for Lists}
+
+Just as for natural numbers, there is a proof principle of induction for
+lists. Induction over a list is essentially induction over the length of
+the list, although the length remains implicit. To prove that some property
+@{text P} holds for all lists @{text xs}, i.e.\ \mbox{@{prop"P(xs)"}},
+you need to prove
+\begin{enumerate}
+\item the base case @{prop"P(Nil)"} and
+\item the inductive case @{prop"P(Cons x xs)"} under the assumption @{prop"P(xs)"}, for some arbitrary but fixed @{text x} and @{text xs}.
+\end{enumerate}
+This is often called \concept{structural induction} for lists.
+
+\subsection{The Proof Process}
+
+We will now demonstrate the typical proof process, which involves
+the formulation and proof of auxiliary lemmas.
+Our goal is to show that reversing a list twice produces the original
+list. *}
+
+theorem rev_rev [simp]: "rev(rev xs) = xs"
+
+txt{* Commands \isacom{theorem} and \isacom{lemma} are
+interchangeable and merely indicate the importance we attach to a
+proposition. Via the bracketed attribute @{text simp} we also tell Isabelle
+to make the eventual theorem a \conceptnoidx{simplification rule}: future proofs
+involving simplification will replace occurrences of @{term"rev(rev xs)"} by
+@{term"xs"}. The proof is by induction: *}
+
+apply(induction xs)
+
+txt{*
+As explained above, we obtain two subgoals, namely the base case (@{const Nil}) and the induction step (@{const Cons}):
+@{subgoals[display,indent=0,margin=65]}
+Let us try to solve both goals automatically:
+*}
+
+apply(auto)
+
+txt{*Subgoal~1 is proved, and disappears; the simplified version
+of subgoal~2 becomes the new subgoal~1:
+@{subgoals[display,indent=0,margin=70]}
+In order to simplify this subgoal further, a lemma suggests itself.
+
+\subsubsection{A First Lemma}
+
+We insert the following lemma in front of the main theorem:
+*}
+(*<*)
+oops
+(*>*)
+lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
+
+txt{* There are two variables that we could induct on: @{text xs} and
+@{text ys}. Because @{const app} is defined by recursion on
+the first argument, @{text xs} is the correct one:
+*}
+
+apply(induction xs)
+
+txt{* This time not even the base case is solved automatically: *}
+apply(auto)
+txt{*
+\vspace{-5ex}
+@{subgoals[display,goals_limit=1]}
+Again, we need to abandon this proof attempt and prove another simple lemma
+first.
+
+\subsubsection{A Second Lemma}
+
+We again try the canonical proof procedure:
+*}
+(*<*)
+oops
+(*>*)
+lemma app_Nil2 [simp]: "app xs Nil = xs"
+apply(induction xs)
+apply(auto)
+done
+
+text{*
+Thankfully, this worked.
+Now we can continue with our stuck proof attempt of the first lemma:
+*}
+
+lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
+apply(induction xs)
+apply(auto)
+
+txt{*
+We find that this time @{text"auto"} solves the base case, but the
+induction step merely simplifies to
+@{subgoals[display,indent=0,goals_limit=1]}
+The missing lemma is associativity of @{const app},
+which we insert in front of the failed lemma @{text rev_app}.
+
+\subsubsection{Associativity of @{const app}}
+
+The canonical proof procedure succeeds without further ado:
+*}
+(*<*)oops(*>*)
+lemma app_assoc [simp]: "app (app xs ys) zs = app xs (app ys zs)"
+apply(induction xs)
+apply(auto)
+done
+(*<*)
+lemma rev_app [simp]: "rev(app xs ys) = app (rev ys)(rev xs)"
+apply(induction xs)
+apply(auto)
+done
+
+theorem rev_rev [simp]: "rev(rev xs) = xs"
+apply(induction xs)
+apply(auto)
+done
+(*>*)
+text{*
+Finally the proofs of @{thm[source] rev_app} and @{thm[source] rev_rev}
+succeed, too.
+
+\subsubsection{Another Informal Proof}
+
+Here is the informal proof of associativity of @{const app}
+corresponding to the Isabelle proof above.
+\bigskip
+
+\noindent
+\textbf{Lemma} @{prop"app (app xs ys) zs = app xs (app ys zs)"}
+
+\noindent
+\textbf{Proof} by induction on @{text xs}.
+\begin{itemize}
+\item Case @{text Nil}: \ @{prop"app (app Nil ys) zs = app ys zs"} @{text"="}
+  \mbox{@{term"app Nil (app ys zs)"}} \ holds by definition of @{text app}.
+\item Case @{text"Cons x xs"}: We assume
+  \begin{center} \hfill @{term"app (app xs ys) zs"} @{text"="}
+  @{term"app xs (app ys zs)"} \hfill (IH) \end{center}
+  and we need to show
+  \begin{center} @{prop"app (app (Cons x xs) ys) zs = app (Cons x xs) (app ys zs)"}.\end{center}
+  The proof is as follows:\smallskip
+
+  \begin{tabular}{@ {}l@ {\quad}l@ {}}
+  @{term"app (app (Cons x xs) ys) zs"}\\
+  @{text"= app (Cons x (app xs ys)) zs"} & by definition of @{text app}\\
+  @{text"= Cons x (app (app xs ys) zs)"} & by definition of @{text app}\\
+  @{text"= Cons x (app xs (app ys zs))"} & by IH\\
+  @{text"= app (Cons x xs) (app ys zs)"} & by definition of @{text app}
+  \end{tabular}
+\end{itemize}
+\medskip
+
+\noindent Didn't we say earlier that all proofs are by simplification? But
+in both cases, going from left to right, the last equality step is not a
+simplification at all! In the base case it is @{prop"app ys zs = app Nil (app
+ys zs)"}. It appears almost mysterious because we suddenly complicate the
+term by appending @{text Nil} on the left. What is really going on is this:
+when proving some equality \mbox{@{prop"s = t"}}, both @{text s} and @{text t} are
+simplified until they ``meet in the middle''. This heuristic for equality proofs
+works well for a functional programming context like ours. In the base case
+both @{term"app (app Nil ys) zs"} and @{term"app Nil (app
+ys zs)"} are simplified to @{term"app ys zs"}, the term in the middle.
+
+\subsection{Predefined Lists}
+\label{sec:predeflists}
+
+Isabelle's predefined lists are the same as the ones above, but with
+more syntactic sugar:
+\begin{itemize}
+\item @{text "[]"} is \indexed{@{const Nil}}{Nil},
+\item @{term"x # xs"} is @{term"Cons x xs"}\index{Cons@@{const Cons}},
+\item @{text"[x\<^sub>1, \<dots>, x\<^sub>n]"} is @{text"x\<^sub>1 # \<dots> # x\<^sub>n # []"}, and
+\item @{term "xs @ ys"} is @{term"app xs ys"}.
+\end{itemize}
+There is also a large library of predefined functions.
+The most important ones are the length function
+@{text"length :: 'a list \<Rightarrow> nat"}\index{length@@{const length}} (with the obvious definition),
+and the \indexed{@{const map}}{map} function that applies a function to all elements of a list:
+\begin{isabelle}
+\isacom{fun} @{const map} @{text"::"} @{typ[source] "('a \<Rightarrow> 'b) \<Rightarrow> 'a list \<Rightarrow> 'b list"}\\
+@{text"\""}@{thm list.map(1)}@{text"\" |"}\\
+@{text"\""}@{thm list.map(2)}@{text"\""}
+\end{isabelle}
+
+\ifsem
+Also useful are the \concept{head} of a list, its first element,
+and the \concept{tail}, the rest of the list:
+\begin{isabelle}\index{hd@@{const hd}}
+\isacom{fun} @{text"hd :: 'a list \<Rightarrow> 'a"}\\
+@{prop"hd(x#xs) = x"}
+\end{isabelle}
+\begin{isabelle}\index{tl@@{const tl}}
+\isacom{fun} @{text"tl :: 'a list \<Rightarrow> 'a list"}\\
+@{prop"tl [] = []"} @{text"|"}\\
+@{prop"tl(x#xs) = xs"}
+\end{isabelle}
+Note that since HOL is a logic of total functions, @{term"hd []"} is defined,
+but we do now know what the result is. That is, @{term"hd []"} is not undefined
+but underdefined.
+\fi
+%
+
+From now on lists are always the predefined lists.
+
+
+\subsection*{Exercises}
+
+\begin{exercise}
+Use the \isacom{value} command to evaluate the following expressions:
+@{term[source] "1 + (2::nat)"}, @{term[source] "1 + (2::int)"},
+@{term[source] "1 - (2::nat)"} and @{term[source] "1 - (2::int)"}.
+\end{exercise}
+
+\begin{exercise}
+Start from the definition of @{const add} given above.
+Prove that @{const add} is associative and commutative.
+Define a recursive function @{text double} @{text"::"} @{typ"nat \<Rightarrow> nat"}
+and prove @{prop"double m = add m m"}.
+\end{exercise}
+
+\begin{exercise}
+Define a function @{text"count ::"} @{typ"'a \<Rightarrow> 'a list \<Rightarrow> nat"}
+that counts the number of occurrences of an element in a list. Prove
+@{prop"count x xs \<le> length xs"}.
+\end{exercise}
+
+\begin{exercise}
+Define a recursive function @{text "snoc ::"} @{typ"'a list \<Rightarrow> 'a \<Rightarrow> 'a list"}
+that appends an element to the end of a list. With the help of @{text snoc}
+define a recursive function @{text "reverse ::"} @{typ"'a list \<Rightarrow> 'a list"}
+that reverses a list. Prove @{prop"reverse(reverse xs) = xs"}.
+\end{exercise}
+
+\begin{exercise}
+Define a recursive function @{text "sum ::"} @{typ"nat \<Rightarrow> nat"} such that
+\mbox{@{text"sum n"}} @{text"="} @{text"0 + ... + n"} and prove
+@{prop" sum(n::nat) = n * (n+1) div 2"}.
+\end{exercise}
+*}
+(*<*)
+end
+(*>*)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/Isar.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,1124 @@
+(*<*)
+theory Isar
+imports LaTeXsugar
+begin
+declare [[quick_and_dirty]]
+(*>*)
+text{*
+Apply-scripts are unreadable and hard to maintain. The language of choice
+for larger proofs is \concept{Isar}. The two key features of Isar are:
+\begin{itemize}
+\item It is structured, not linear.
+\item It is readable without running it because
+you need to state what you are proving at any given point.
+\end{itemize}
+Whereas apply-scripts are like assembly language programs, Isar proofs
+are like structured programs with comments. A typical Isar proof looks like this:
+*}text{*
+\begin{tabular}{@ {}l}
+\isacom{proof}\\
+\quad\isacom{assume} @{text"\""}$\mathit{formula}_0$@{text"\""}\\
+\quad\isacom{have} @{text"\""}$\mathit{formula}_1$@{text"\""} \quad\isacom{by} @{text simp}\\
+\quad\vdots\\
+\quad\isacom{have} @{text"\""}$\mathit{formula}_n$@{text"\""} \quad\isacom{by} @{text blast}\\
+\quad\isacom{show} @{text"\""}$\mathit{formula}_{n+1}$@{text"\""} \quad\isacom{by} @{text \<dots>}\\
+\isacom{qed}
+\end{tabular}
+*}text{*
+It proves $\mathit{formula}_0 \Longrightarrow \mathit{formula}_{n+1}$
+(provided each proof step succeeds).
+The intermediate \isacom{have} statements are merely stepping stones
+on the way towards the \isacom{show} statement that proves the actual
+goal. In more detail, this is the Isar core syntax:
+\medskip
+
+\begin{tabular}{@ {}lcl@ {}}
+\textit{proof} &=& \indexed{\isacom{by}}{by} \textit{method}\\
+      &$\mid$& \indexed{\isacom{proof}}{proof} [\textit{method}] \ \textit{step}$^*$ \ \indexed{\isacom{qed}}{qed}
+\end{tabular}
+\medskip
+
+\begin{tabular}{@ {}lcl@ {}}
+\textit{step} &=& \indexed{\isacom{fix}}{fix} \textit{variables} \\
+      &$\mid$& \indexed{\isacom{assume}}{assume} \textit{proposition} \\
+      &$\mid$& [\indexed{\isacom{from}}{from} \textit{fact}$^+$] (\indexed{\isacom{have}}{have} $\mid$ \indexed{\isacom{show}}{show}) \ \textit{proposition} \ \textit{proof}
+\end{tabular}
+\medskip
+
+\begin{tabular}{@ {}lcl@ {}}
+\textit{proposition} &=& [\textit{name}:] @{text"\""}\textit{formula}@{text"\""}
+\end{tabular}
+\medskip
+
+\begin{tabular}{@ {}lcl@ {}}
+\textit{fact} &=& \textit{name} \ $\mid$ \ \dots
+\end{tabular}
+\medskip
+
+\noindent A proof can either be an atomic \isacom{by} with a single proof
+method which must finish off the statement being proved, for example @{text
+auto}.  Or it can be a \isacom{proof}--\isacom{qed} block of multiple
+steps. Such a block can optionally begin with a proof method that indicates
+how to start off the proof, e.g.\ \mbox{@{text"(induction xs)"}}.
+
+A step either assumes a proposition or states a proposition
+together with its proof. The optional \isacom{from} clause
+indicates which facts are to be used in the proof.
+Intermediate propositions are stated with \isacom{have}, the overall goal
+with \isacom{show}. A step can also introduce new local variables with
+\isacom{fix}. Logically, \isacom{fix} introduces @{text"\<And>"}-quantified
+variables, \isacom{assume} introduces the assumption of an implication
+(@{text"\<Longrightarrow>"}) and \isacom{have}/\isacom{show} the conclusion.
+
+Propositions are optionally named formulas. These names can be referred to in
+later \isacom{from} clauses. In the simplest case, a fact is such a name.
+But facts can also be composed with @{text OF} and @{text of} as shown in
+\S\ref{sec:forward-proof}---hence the \dots\ in the above grammar.  Note
+that assumptions, intermediate \isacom{have} statements and global lemmas all
+have the same status and are thus collectively referred to as
+\conceptidx{facts}{fact}.
+
+Fact names can stand for whole lists of facts. For example, if @{text f} is
+defined by command \isacom{fun}, @{text"f.simps"} refers to the whole list of
+recursion equations defining @{text f}. Individual facts can be selected by
+writing @{text"f.simps(2)"}, whole sublists by @{text"f.simps(2-4)"}.
+
+
+\section{Isar by Example}
+
+We show a number of proofs of Cantor's theorem that a function from a set to
+its powerset cannot be surjective, illustrating various features of Isar. The
+constant @{const surj} is predefined.
+*}
+
+lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
+proof
+  assume 0: "surj f"
+  from 0 have 1: "\<forall>A. \<exists>a. A = f a" by(simp add: surj_def)
+  from 1 have 2: "\<exists>a. {x. x \<notin> f x} = f a" by blast
+  from 2 show "False" by blast
+qed
+
+text{*
+The \isacom{proof} command lacks an explicit method how to perform
+the proof. In such cases Isabelle tries to use some standard introduction
+rule, in the above case for @{text"\<not>"}:
+\[
+\inferrule{
+\mbox{@{thm (prem 1) notI}}}
+{\mbox{@{thm (concl) notI}}}
+\]
+In order to prove @{prop"~ P"}, assume @{text P} and show @{text False}.
+Thus we may assume @{prop"surj f"}. The proof shows that names of propositions
+may be (single!) digits---meaningful names are hard to invent and are often
+not necessary. Both \isacom{have} steps are obvious. The second one introduces
+the diagonal set @{term"{x. x \<notin> f x}"}, the key idea in the proof.
+If you wonder why @{text 2} directly implies @{text False}: from @{text 2}
+it follows that @{prop"a \<notin> f a \<longleftrightarrow> a \<in> f a"}.
+
+\subsection{\indexed{@{text this}}{this}, \indexed{\isacom{then}}{then}, \indexed{\isacom{hence}}{hence} and \indexed{\isacom{thus}}{thus}}
+
+Labels should be avoided. They interrupt the flow of the reader who has to
+scan the context for the point where the label was introduced. Ideally, the
+proof is a linear flow, where the output of one step becomes the input of the
+next step, piping the previously proved fact into the next proof, just like
+in a UNIX pipe. In such cases the predefined name @{text this} can be used
+to refer to the proposition proved in the previous step. This allows us to
+eliminate all labels from our proof (we suppress the \isacom{lemma} statement):
+*}
+(*<*)
+lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
+(*>*)
+proof
+  assume "surj f"
+  from this have "\<exists>a. {x. x \<notin> f x} = f a" by(auto simp: surj_def)
+  from this show "False" by blast
+qed
+
+text{* We have also taken the opportunity to compress the two \isacom{have}
+steps into one.
+
+To compact the text further, Isar has a few convenient abbreviations:
+\medskip
+
+\begin{tabular}{r@ {\quad=\quad}l}
+\isacom{then} & \isacom{from} @{text this}\\
+\isacom{thus} & \isacom{then} \isacom{show}\\
+\isacom{hence} & \isacom{then} \isacom{have}
+\end{tabular}
+\medskip
+
+\noindent
+With the help of these abbreviations the proof becomes
+*}
+(*<*)
+lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
+(*>*)
+proof
+  assume "surj f"
+  hence "\<exists>a. {x. x \<notin> f x} = f a" by(auto simp: surj_def)
+  thus "False" by blast
+qed
+text{*
+
+There are two further linguistic variations:
+\medskip
+
+\begin{tabular}{r@ {\quad=\quad}l}
+(\isacom{have}$\mid$\isacom{show}) \ \textit{prop} \ \indexed{\isacom{using}}{using} \ \textit{facts}
+&
+\isacom{from} \ \textit{facts} \ (\isacom{have}$\mid$\isacom{show}) \ \textit{prop}\\
+\indexed{\isacom{with}}{with} \ \textit{facts} & \isacom{from} \ \textit{facts} \isa{this}
+\end{tabular}
+\medskip
+
+\noindent The \isacom{using} idiom de-emphasizes the used facts by moving them
+behind the proposition.
+
+\subsection{Structured Lemma Statements: \indexed{\isacom{fixes}}{fixes}, \indexed{\isacom{assumes}}{assumes}, \indexed{\isacom{shows}}{shows}}
+\index{lemma@\isacom{lemma}}
+Lemmas can also be stated in a more structured fashion. To demonstrate this
+feature with Cantor's theorem, we rephrase @{prop"\<not> surj f"}
+a little:
+*}
+
+lemma
+  fixes f :: "'a \<Rightarrow> 'a set"
+  assumes s: "surj f"
+  shows "False"
+
+txt{* The optional \isacom{fixes} part allows you to state the types of
+variables up front rather than by decorating one of their occurrences in the
+formula with a type constraint. The key advantage of the structured format is
+the \isacom{assumes} part that allows you to name each assumption; multiple
+assumptions can be separated by \isacom{and}. The
+\isacom{shows} part gives the goal. The actual theorem that will come out of
+the proof is @{prop"surj f \<Longrightarrow> False"}, but during the proof the assumption
+@{prop"surj f"} is available under the name @{text s} like any other fact.
+*}
+
+proof -
+  have "\<exists> a. {x. x \<notin> f x} = f a" using s
+    by(auto simp: surj_def)
+  thus "False" by blast
+qed
+
+text{*
+\begin{warn}
+Note the hyphen after the \isacom{proof} command.
+It is the null method that does nothing to the goal. Leaving it out would ask
+Isabelle to try some suitable introduction rule on the goal @{const False}---but
+there is no such rule and \isacom{proof} would fail.
+\end{warn}
+In the \isacom{have} step the assumption @{prop"surj f"} is now
+referenced by its name @{text s}. The duplication of @{prop"surj f"} in the
+above proofs (once in the statement of the lemma, once in its proof) has been
+eliminated.
+
+Stating a lemma with \isacom{assumes}-\isacom{shows} implicitly introduces the
+name \indexed{@{text assms}}{assms} that stands for the list of all assumptions. You can refer
+to individual assumptions by @{text"assms(1)"}, @{text"assms(2)"} etc,
+thus obviating the need to name them individually.
+
+\section{Proof Patterns}
+
+We show a number of important basic proof patterns. Many of them arise from
+the rules of natural deduction that are applied by \isacom{proof} by
+default. The patterns are phrased in terms of \isacom{show} but work for
+\isacom{have} and \isacom{lemma}, too.
+
+We start with two forms of \concept{case analysis}:
+starting from a formula @{text P} we have the two cases @{text P} and
+@{prop"~P"}, and starting from a fact @{prop"P \<or> Q"}
+we have the two cases @{text P} and @{text Q}:
+*}text_raw{*
+\begin{tabular}{@ {}ll@ {}}
+\begin{minipage}[t]{.4\textwidth}
+\isa{%
+*}
+(*<*)lemma "R" proof-(*>*)
+show "R"
+proof cases
+  assume "P"
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show "R" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+next
+  assume "\<not> P"
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show "R" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+qed(*<*)oops(*>*)
+text_raw {* }
+\end{minipage}\index{cases@@{text cases}}
+&
+\begin{minipage}[t]{.4\textwidth}
+\isa{%
+*}
+(*<*)lemma "R" proof-(*>*)
+have "P \<or> Q" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+then show "R"
+proof
+  assume "P"
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show "R" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+next
+  assume "Q"
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show "R" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+qed(*<*)oops(*>*)
+
+text_raw {* }
+\end{minipage}
+\end{tabular}
+\medskip
+\begin{isamarkuptext}%
+How to prove a logical equivalence:
+\end{isamarkuptext}%
+\isa{%
+*}
+(*<*)lemma "P\<longleftrightarrow>Q" proof-(*>*)
+show "P \<longleftrightarrow> Q"
+proof
+  assume "P"
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show "Q" (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
+next
+  assume "Q"
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show "P" (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
+qed(*<*)qed(*>*)
+text_raw {* }
+\medskip
+\begin{isamarkuptext}%
+Proofs by contradiction (@{thm[source] ccontr} stands for ``classical contradiction''):
+\end{isamarkuptext}%
+\begin{tabular}{@ {}ll@ {}}
+\begin{minipage}[t]{.4\textwidth}
+\isa{%
+*}
+(*<*)lemma "\<not> P" proof-(*>*)
+show "\<not> P"
+proof
+  assume "P"
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show "False" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+qed(*<*)oops(*>*)
+
+text_raw {* }
+\end{minipage}
+&
+\begin{minipage}[t]{.4\textwidth}
+\isa{%
+*}
+(*<*)lemma "P" proof-(*>*)
+show "P"
+proof (rule ccontr)
+  assume "\<not>P"
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show "False" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+qed(*<*)oops(*>*)
+
+text_raw {* }
+\end{minipage}
+\end{tabular}
+\medskip
+\begin{isamarkuptext}%
+How to prove quantified formulas:
+\end{isamarkuptext}%
+\begin{tabular}{@ {}ll@ {}}
+\begin{minipage}[t]{.4\textwidth}
+\isa{%
+*}
+(*<*)lemma "ALL x. P x" proof-(*>*)
+show "\<forall>x. P(x)"
+proof
+  fix x
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show "P(x)" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+qed(*<*)oops(*>*)
+
+text_raw {* }
+\end{minipage}
+&
+\begin{minipage}[t]{.4\textwidth}
+\isa{%
+*}
+(*<*)lemma "EX x. P(x)" proof-(*>*)
+show "\<exists>x. P(x)"
+proof
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show "P(witness)" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+qed
+(*<*)oops(*>*)
+
+text_raw {* }
+\end{minipage}
+\end{tabular}
+\medskip
+\begin{isamarkuptext}%
+In the proof of \noquotes{@{prop[source]"\<forall>x. P(x)"}},
+the step \indexed{\isacom{fix}}{fix}~@{text x} introduces a locally fixed variable @{text x}
+into the subproof, the proverbial ``arbitrary but fixed value''.
+Instead of @{text x} we could have chosen any name in the subproof.
+In the proof of \noquotes{@{prop[source]"\<exists>x. P(x)"}},
+@{text witness} is some arbitrary
+term for which we can prove that it satisfies @{text P}.
+
+How to reason forward from \noquotes{@{prop[source] "\<exists>x. P(x)"}}:
+\end{isamarkuptext}%
+*}
+(*<*)lemma True proof- assume 1: "EX x. P x"(*>*)
+have "\<exists>x. P(x)" (*<*)by(rule 1)(*>*)txt_raw{*\ $\dots$\\*}
+then obtain x where p: "P(x)" by blast
+(*<*)oops(*>*)
+text{*
+After the \indexed{\isacom{obtain}}{obtain} step, @{text x} (we could have chosen any name)
+is a fixed local
+variable, and @{text p} is the name of the fact
+\noquotes{@{prop[source] "P(x)"}}.
+This pattern works for one or more @{text x}.
+As an example of the \isacom{obtain} command, here is the proof of
+Cantor's theorem in more detail:
+*}
+
+lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
+proof
+  assume "surj f"
+  hence  "\<exists>a. {x. x \<notin> f x} = f a" by(auto simp: surj_def)
+  then obtain a where  "{x. x \<notin> f x} = f a"  by blast
+  hence  "a \<notin> f a \<longleftrightarrow> a \<in> f a"  by blast
+  thus "False" by blast
+qed
+
+text_raw{*
+\begin{isamarkuptext}%
+
+Finally, how to prove set equality and subset relationship:
+\end{isamarkuptext}%
+\begin{tabular}{@ {}ll@ {}}
+\begin{minipage}[t]{.4\textwidth}
+\isa{%
+*}
+(*<*)lemma "A = (B::'a set)" proof-(*>*)
+show "A = B"
+proof
+  show "A \<subseteq> B" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+next
+  show "B \<subseteq> A" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+qed(*<*)qed(*>*)
+
+text_raw {* }
+\end{minipage}
+&
+\begin{minipage}[t]{.4\textwidth}
+\isa{%
+*}
+(*<*)lemma "A <= (B::'a set)" proof-(*>*)
+show "A \<subseteq> B"
+proof
+  fix x
+  assume "x \<in> A"
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show "x \<in> B" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+qed(*<*)qed(*>*)
+
+text_raw {* }
+\end{minipage}
+\end{tabular}
+\begin{isamarkuptext}%
+\section{Streamlining Proofs}
+
+\subsection{Pattern Matching and Quotations}
+
+In the proof patterns shown above, formulas are often duplicated.
+This can make the text harder to read, write and maintain. Pattern matching
+is an abbreviation mechanism to avoid such duplication. Writing
+\begin{quote}
+\isacom{show} \ \textit{formula} @{text"("}\indexed{\isacom{is}}{is} \textit{pattern}@{text")"}
+\end{quote}
+matches the pattern against the formula, thus instantiating the unknowns in
+the pattern for later use. As an example, consider the proof pattern for
+@{text"\<longleftrightarrow>"}:
+\end{isamarkuptext}%
+*}
+(*<*)lemma "formula\<^sub>1 \<longleftrightarrow> formula\<^sub>2" proof-(*>*)
+show "formula\<^sub>1 \<longleftrightarrow> formula\<^sub>2" (is "?L \<longleftrightarrow> ?R")
+proof
+  assume "?L"
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show "?R" (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
+next
+  assume "?R"
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show "?L" (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
+qed(*<*)qed(*>*)
+
+text{* Instead of duplicating @{text"formula\<^sub>i"} in the text, we introduce
+the two abbreviations @{text"?L"} and @{text"?R"} by pattern matching.
+Pattern matching works wherever a formula is stated, in particular
+with \isacom{have} and \isacom{lemma}.
+
+The unknown \indexed{@{text"?thesis"}}{thesis} is implicitly matched against any goal stated by
+\isacom{lemma} or \isacom{show}. Here is a typical example: *}
+
+lemma "formula"
+proof -
+  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
+  show ?thesis (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
+qed
+
+text{* 
+Unknowns can also be instantiated with \indexed{\isacom{let}}{let} commands
+\begin{quote}
+\isacom{let} @{text"?t"} = @{text"\""}\textit{some-big-term}@{text"\""}
+\end{quote}
+Later proof steps can refer to @{text"?t"}:
+\begin{quote}
+\isacom{have} @{text"\""}\dots @{text"?t"} \dots@{text"\""}
+\end{quote}
+\begin{warn}
+Names of facts are introduced with @{text"name:"} and refer to proved
+theorems. Unknowns @{text"?X"} refer to terms or formulas.
+\end{warn}
+
+Although abbreviations shorten the text, the reader needs to remember what
+they stand for. Similarly for names of facts. Names like @{text 1}, @{text 2}
+and @{text 3} are not helpful and should only be used in short proofs. For
+longer proofs, descriptive names are better. But look at this example:
+\begin{quote}
+\isacom{have} \ @{text"x_gr_0: \"x > 0\""}\\
+$\vdots$\\
+\isacom{from} @{text "x_gr_0"} \dots
+\end{quote}
+The name is longer than the fact it stands for! Short facts do not need names,
+one can refer to them easily by quoting them:
+\begin{quote}
+\isacom{have} \ @{text"\"x > 0\""}\\
+$\vdots$\\
+\isacom{from} @{text "`x>0`"} \dots\index{$IMP053@@{text"`...`"}}
+\end{quote}
+Note that the quotes around @{text"x>0"} are \conceptnoidx{back quotes}.
+They refer to the fact not by name but by value.
+
+\subsection{\indexed{\isacom{moreover}}{moreover}}
+\index{ultimately@\isacom{ultimately}}
+
+Sometimes one needs a number of facts to enable some deduction. Of course
+one can name these facts individually, as shown on the right,
+but one can also combine them with \isacom{moreover}, as shown on the left:
+*}text_raw{*
+\begin{tabular}{@ {}ll@ {}}
+\begin{minipage}[t]{.4\textwidth}
+\isa{%
+*}
+(*<*)lemma "P" proof-(*>*)
+have "P\<^sub>1" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+moreover have "P\<^sub>2" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+moreover
+txt_raw{*\\$\vdots$\\\hspace{-1.4ex}*}(*<*)have "True" ..(*>*)
+moreover have "P\<^sub>n" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+ultimately have "P"  (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+(*<*)oops(*>*)
+
+text_raw {* }
+\end{minipage}
+&
+\qquad
+\begin{minipage}[t]{.4\textwidth}
+\isa{%
+*}
+(*<*)lemma "P" proof-(*>*)
+have lab\<^sub>1: "P\<^sub>1" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+have lab\<^sub>2: "P\<^sub>2" (*<*)sorry(*>*)txt_raw{*\ $\dots$*}
+txt_raw{*\\$\vdots$\\\hspace{-1.4ex}*}
+have lab\<^sub>n: "P\<^sub>n" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+from lab\<^sub>1 lab\<^sub>2 txt_raw{*\ $\dots$\\*}
+have "P"  (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+(*<*)oops(*>*)
+
+text_raw {* }
+\end{minipage}
+\end{tabular}
+\begin{isamarkuptext}%
+The \isacom{moreover} version is no shorter but expresses the structure more
+clearly and avoids new names.
+
+\subsection{Raw Proof Blocks}
+
+Sometimes one would like to prove some lemma locally within a proof.
+A lemma that shares the current context of assumptions but that
+has its own assumptions and is generalized over its locally fixed
+variables at the end. This is what a \concept{raw proof block} does:
+\begin{quote}\index{$IMP053@@{text"{ ... }"} (proof block)}
+@{text"{"} \isacom{fix} @{text"x\<^sub>1 \<dots> x\<^sub>n"}\\
+\mbox{}\ \ \ \isacom{assume} @{text"A\<^sub>1 \<dots> A\<^sub>m"}\\
+\mbox{}\ \ \ $\vdots$\\
+\mbox{}\ \ \ \isacom{have} @{text"B"}\\
+@{text"}"}
+\end{quote}
+proves @{text"\<lbrakk> A\<^sub>1; \<dots> ; A\<^sub>m \<rbrakk> \<Longrightarrow> B"}
+where all @{text"x\<^sub>i"} have been replaced by unknowns @{text"?x\<^sub>i"}.
+\begin{warn}
+The conclusion of a raw proof block is \emph{not} indicated by \isacom{show}
+but is simply the final \isacom{have}.
+\end{warn}
+
+As an example we prove a simple fact about divisibility on integers.
+The definition of @{text "dvd"} is @{thm dvd_def}.
+\end{isamarkuptext}%
+*}
+
+lemma fixes a b :: int assumes "b dvd (a+b)" shows "b dvd a"
+proof -
+  { fix k assume k: "a+b = b*k"
+    have "\<exists>k'. a = b*k'"
+    proof
+      show "a = b*(k - 1)" using k by(simp add: algebra_simps)
+    qed }
+  then show ?thesis using assms by(auto simp add: dvd_def)
+qed
+
+text{* Note that the result of a raw proof block has no name. In this example
+it was directly piped (via \isacom{then}) into the final proof, but it can
+also be named for later reference: you simply follow the block directly by a
+\isacom{note} command:
+\begin{quote}
+\indexed{\isacom{note}}{note} \ @{text"name = this"}
+\end{quote}
+This introduces a new name @{text name} that refers to @{text this},
+the fact just proved, in this case the preceding block. In general,
+\isacom{note} introduces a new name for one or more facts.
+
+\subsection*{Exercises}
+
+\exercise
+Give a readable, structured proof of the following lemma:
+*}
+lemma assumes T: "\<forall>x y. T x y \<or> T y x"
+  and A: "\<forall>x y. A x y \<and> A y x \<longrightarrow> x = y"
+  and TA: "\<forall>x y. T x y \<longrightarrow> A x y" and "A x y"
+  shows "T x y"
+(*<*)oops(*>*)
+text{*
+\endexercise
+
+\exercise
+Give a readable, structured proof of the following lemma:
+*}
+lemma "(\<exists>ys zs. xs = ys @ zs \<and> length ys = length zs)
+      \<or> (\<exists>ys zs. xs = ys @ zs \<and> length ys = length zs + 1)"
+(*<*)oops(*>*)
+text{*
+Hint: There are predefined functions @{const_typ take} and @{const_typ drop}
+such that @{text"take k [x\<^sub>1,\<dots>] = [x\<^sub>1,\<dots>,x\<^sub>k]"} and
+@{text"drop k [x\<^sub>1,\<dots>] = [x\<^bsub>k+1\<^esub>,\<dots>]"}. Let sledgehammer find and apply
+the relevant @{const take} and @{const drop} lemmas for you.
+\endexercise
+
+
+\section{Case Analysis and Induction}
+
+\subsection{Datatype Case Analysis}
+\index{case analysis|(}
+
+We have seen case analysis on formulas. Now we want to distinguish
+which form some term takes: is it @{text 0} or of the form @{term"Suc n"},
+is it @{term"[]"} or of the form @{term"x#xs"}, etc. Here is a typical example
+proof by case analysis on the form of @{text xs}:
+*}
+
+lemma "length(tl xs) = length xs - 1"
+proof (cases xs)
+  assume "xs = []"
+  thus ?thesis by simp
+next
+  fix y ys assume "xs = y#ys"
+  thus ?thesis by simp
+qed
+
+text{*\index{cases@@{text"cases"}|(}Function @{text tl} (''tail'') is defined by @{thm list.sel(2)} and
+@{thm list.sel(3)}. Note that the result type of @{const length} is @{typ nat}
+and @{prop"0 - 1 = (0::nat)"}.
+
+This proof pattern works for any term @{text t} whose type is a datatype.
+The goal has to be proved for each constructor @{text C}:
+\begin{quote}
+\isacom{fix} \ @{text"x\<^sub>1 \<dots> x\<^sub>n"} \isacom{assume} @{text"\"t = C x\<^sub>1 \<dots> x\<^sub>n\""}
+\end{quote}\index{case@\isacom{case}|(}
+Each case can be written in a more compact form by means of the \isacom{case}
+command:
+\begin{quote}
+\isacom{case} @{text "(C x\<^sub>1 \<dots> x\<^sub>n)"}
+\end{quote}
+This is equivalent to the explicit \isacom{fix}-\isacom{assume} line
+but also gives the assumption @{text"\"t = C x\<^sub>1 \<dots> x\<^sub>n\""} a name: @{text C},
+like the constructor.
+Here is the \isacom{case} version of the proof above:
+*}
+(*<*)lemma "length(tl xs) = length xs - 1"(*>*)
+proof (cases xs)
+  case Nil
+  thus ?thesis by simp
+next
+  case (Cons y ys)
+  thus ?thesis by simp
+qed
+
+text{* Remember that @{text Nil} and @{text Cons} are the alphanumeric names
+for @{text"[]"} and @{text"#"}. The names of the assumptions
+are not used because they are directly piped (via \isacom{thus})
+into the proof of the claim.
+\index{case analysis|)}
+
+\subsection{Structural Induction}
+\index{induction|(}
+\index{structural induction|(}
+
+We illustrate structural induction with an example based on natural numbers:
+the sum (@{text"\<Sum>"}) of the first @{text n} natural numbers
+(@{text"{0..n::nat}"}) is equal to \mbox{@{term"n*(n+1) div 2::nat"}}.
+Never mind the details, just focus on the pattern:
+*}
+
+lemma "\<Sum>{0..n::nat} = n*(n+1) div 2"
+proof (induction n)
+  show "\<Sum>{0..0::nat} = 0*(0+1) div 2" by simp
+next
+  fix n assume "\<Sum>{0..n::nat} = n*(n+1) div 2"
+  thus "\<Sum>{0..Suc n} = Suc n*(Suc n+1) div 2" by simp
+qed
+
+text{* Except for the rewrite steps, everything is explicitly given. This
+makes the proof easily readable, but the duplication means it is tedious to
+write and maintain. Here is how pattern
+matching can completely avoid any duplication: *}
+
+lemma "\<Sum>{0..n::nat} = n*(n+1) div 2" (is "?P n")
+proof (induction n)
+  show "?P 0" by simp
+next
+  fix n assume "?P n"
+  thus "?P(Suc n)" by simp
+qed
+
+text{* The first line introduces an abbreviation @{text"?P n"} for the goal.
+Pattern matching @{text"?P n"} with the goal instantiates @{text"?P"} to the
+function @{term"\<lambda>n. \<Sum>{0..n::nat} = n*(n+1) div 2"}.  Now the proposition to
+be proved in the base case can be written as @{text"?P 0"}, the induction
+hypothesis as @{text"?P n"}, and the conclusion of the induction step as
+@{text"?P(Suc n)"}.
+
+Induction also provides the \isacom{case} idiom that abbreviates
+the \isacom{fix}-\isacom{assume} step. The above proof becomes
+*}
+(*<*)lemma "\<Sum>{0..n::nat} = n*(n+1) div 2"(*>*)
+proof (induction n)
+  case 0
+  show ?case by simp
+next
+  case (Suc n)
+  thus ?case by simp
+qed
+
+text{*
+The unknown @{text"?case"}\index{case?@@{text"?case"}|(} is set in each case to the required
+claim, i.e.\ @{text"?P 0"} and \mbox{@{text"?P(Suc n)"}} in the above proof,
+without requiring the user to define a @{text "?P"}. The general
+pattern for induction over @{typ nat} is shown on the left-hand side:
+*}text_raw{*
+\begin{tabular}{@ {}ll@ {}}
+\begin{minipage}[t]{.4\textwidth}
+\isa{%
+*}
+(*<*)lemma "P(n::nat)" proof -(*>*)
+show "P(n)"
+proof (induction n)
+  case 0
+  txt_raw{*\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}*}
+  show ?case (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
+next
+  case (Suc n)
+  txt_raw{*\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}*}
+  show ?case (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
+qed(*<*)qed(*>*)
+
+text_raw {* }
+\end{minipage}
+&
+\begin{minipage}[t]{.4\textwidth}
+~\\
+~\\
+\isacom{let} @{text"?case = \"P(0)\""}\\
+~\\
+~\\
+~\\[1ex]
+\isacom{fix} @{text n} \isacom{assume} @{text"Suc: \"P(n)\""}\\
+\isacom{let} @{text"?case = \"P(Suc n)\""}\\
+\end{minipage}
+\end{tabular}
+\medskip
+*}
+text{*
+On the right side you can see what the \isacom{case} command
+on the left stands for.
+
+In case the goal is an implication, induction does one more thing: the
+proposition to be proved in each case is not the whole implication but only
+its conclusion; the premises of the implication are immediately made
+assumptions of that case. That is, if in the above proof we replace
+\isacom{show}~@{text"\"P(n)\""} by
+\mbox{\isacom{show}~@{text"\"A(n) \<Longrightarrow> P(n)\""}}
+then \isacom{case}~@{text 0} stands for
+\begin{quote}
+\isacom{assume} \ @{text"0: \"A(0)\""}\\
+\isacom{let} @{text"?case = \"P(0)\""}
+\end{quote}
+and \isacom{case}~@{text"(Suc n)"} stands for
+\begin{quote}
+\isacom{fix} @{text n}\\
+\isacom{assume} @{text"Suc:"}
+  \begin{tabular}[t]{l}@{text"\"A(n) \<Longrightarrow> P(n)\""}\\@{text"\"A(Suc n)\""}\end{tabular}\\
+\isacom{let} @{text"?case = \"P(Suc n)\""}
+\end{quote}
+The list of assumptions @{text Suc} is actually subdivided
+into @{text"Suc.IH"}, the induction hypotheses (here @{text"A(n) \<Longrightarrow> P(n)"})
+and @{text"Suc.prems"}, the premises of the goal being proved
+(here @{text"A(Suc n)"}).
+
+Induction works for any datatype.
+Proving a goal @{text"\<lbrakk> A\<^sub>1(x); \<dots>; A\<^sub>k(x) \<rbrakk> \<Longrightarrow> P(x)"}
+by induction on @{text x} generates a proof obligation for each constructor
+@{text C} of the datatype. The command \isacom{case}~@{text"(C x\<^sub>1 \<dots> x\<^sub>n)"}
+performs the following steps:
+\begin{enumerate}
+\item \isacom{fix} @{text"x\<^sub>1 \<dots> x\<^sub>n"}
+\item \isacom{assume} the induction hypotheses (calling them @{text C.IH}\index{IH@@{text".IH"}})
+ and the premises \mbox{@{text"A\<^sub>i(C x\<^sub>1 \<dots> x\<^sub>n)"}} (calling them @{text"C.prems"}\index{prems@@{text".prems"}})
+ and calling the whole list @{text C}
+\item \isacom{let} @{text"?case = \"P(C x\<^sub>1 \<dots> x\<^sub>n)\""}
+\end{enumerate}
+\index{structural induction|)}
+
+\subsection{Rule Induction}
+\index{rule induction|(}
+
+Recall the inductive and recursive definitions of even numbers in
+\autoref{sec:inductive-defs}:
+*}
+
+inductive ev :: "nat \<Rightarrow> bool" where
+ev0: "ev 0" |
+evSS: "ev n \<Longrightarrow> ev(Suc(Suc n))"
+
+fun even :: "nat \<Rightarrow> bool" where
+"even 0 = True" |
+"even (Suc 0) = False" |
+"even (Suc(Suc n)) = even n"
+
+text{* We recast the proof of @{prop"ev n \<Longrightarrow> even n"} in Isar. The
+left column shows the actual proof text, the right column shows
+the implicit effect of the two \isacom{case} commands:*}text_raw{*
+\begin{tabular}{@ {}l@ {\qquad}l@ {}}
+\begin{minipage}[t]{.5\textwidth}
+\isa{%
+*}
+
+lemma "ev n \<Longrightarrow> even n"
+proof(induction rule: ev.induct)
+  case ev0
+  show ?case by simp
+next
+  case evSS
+
+
+
+  thus ?case by simp
+qed
+
+text_raw {* }
+\end{minipage}
+&
+\begin{minipage}[t]{.5\textwidth}
+~\\
+~\\
+\isacom{let} @{text"?case = \"even 0\""}\\
+~\\
+~\\
+\isacom{fix} @{text n}\\
+\isacom{assume} @{text"evSS:"}
+  \begin{tabular}[t]{l} @{text"\"ev n\""}\\@{text"\"even n\""}\end{tabular}\\
+\isacom{let} @{text"?case = \"even(Suc(Suc n))\""}\\
+\end{minipage}
+\end{tabular}
+\medskip
+*}
+text{*
+The proof resembles structural induction, but the induction rule is given
+explicitly and the names of the cases are the names of the rules in the
+inductive definition.
+Let us examine the two assumptions named @{thm[source]evSS}:
+@{prop "ev n"} is the premise of rule @{thm[source]evSS}, which we may assume
+because we are in the case where that rule was used; @{prop"even n"}
+is the induction hypothesis.
+\begin{warn}
+Because each \isacom{case} command introduces a list of assumptions
+named like the case name, which is the name of a rule of the inductive
+definition, those rules now need to be accessed with a qualified name, here
+@{thm[source] ev.ev0} and @{thm[source] ev.evSS}
+\end{warn}
+
+In the case @{thm[source]evSS} of the proof above we have pretended that the
+system fixes a variable @{text n}.  But unless the user provides the name
+@{text n}, the system will just invent its own name that cannot be referred
+to.  In the above proof, we do not need to refer to it, hence we do not give
+it a specific name. In case one needs to refer to it one writes
+\begin{quote}
+\isacom{case} @{text"(evSS m)"}
+\end{quote}
+just like \isacom{case}~@{text"(Suc n)"} in earlier structural inductions.
+The name @{text m} is an arbitrary choice. As a result,
+case @{thm[source] evSS} is derived from a renamed version of
+rule @{thm[source] evSS}: @{text"ev m \<Longrightarrow> ev(Suc(Suc m))"}.
+Here is an example with a (contrived) intermediate step that refers to @{text m}:
+*}
+
+lemma "ev n \<Longrightarrow> even n"
+proof(induction rule: ev.induct)
+  case ev0 show ?case by simp
+next
+  case (evSS m)
+  have "even(Suc(Suc m)) = even m" by simp
+  thus ?case using `even m` by blast
+qed
+
+text{*
+\indent
+In general, let @{text I} be a (for simplicity unary) inductively defined
+predicate and let the rules in the definition of @{text I}
+be called @{text "rule\<^sub>1"}, \dots, @{text "rule\<^sub>n"}. A proof by rule
+induction follows this pattern:\index{inductionrule@@{text"induction ... rule:"}}
+*}
+
+(*<*)
+inductive I where rule\<^sub>1: "I()" |  rule\<^sub>2: "I()" |  rule\<^sub>n: "I()"
+lemma "I x \<Longrightarrow> P x" proof-(*>*)
+show "I x \<Longrightarrow> P x"
+proof(induction rule: I.induct)
+  case rule\<^sub>1
+  txt_raw{*\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
+  show ?case (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+next
+  txt_raw{*\\[-.4ex]$\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
+(*<*)
+  case rule\<^sub>2
+  show ?case sorry
+(*>*)
+next
+  case rule\<^sub>n
+  txt_raw{*\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
+  show ?case (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
+qed(*<*)qed(*>*)
+
+text{*
+One can provide explicit variable names by writing
+\isacom{case}~@{text"(rule\<^sub>i x\<^sub>1 \<dots> x\<^sub>k)"}, thus renaming the first @{text k}
+free variables in rule @{text i} to @{text"x\<^sub>1 \<dots> x\<^sub>k"},
+going through rule @{text i} from left to right.
+
+\subsection{Assumption Naming}
+\label{sec:assm-naming}
+
+In any induction, \isacom{case}~@{text name} sets up a list of assumptions
+also called @{text name}, which is subdivided into three parts:
+\begin{description}
+\item[@{text name.IH}]\index{IH@@{text".IH"}} contains the induction hypotheses.
+\item[@{text name.hyps}]\index{hyps@@{text".hyps"}} contains all the other hypotheses of this case in the
+induction rule. For rule inductions these are the hypotheses of rule
+@{text name}, for structural inductions these are empty.
+\item[@{text name.prems}]\index{prems@@{text".prems"}} contains the (suitably instantiated) premises
+of the statement being proved, i.e. the @{text A\<^sub>i} when
+proving @{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"}.
+\end{description}
+\begin{warn}
+Proof method @{text induct} differs from @{text induction}
+only in this naming policy: @{text induct} does not distinguish
+@{text IH} from @{text hyps} but subsumes @{text IH} under @{text hyps}.
+\end{warn}
+
+More complicated inductive proofs than the ones we have seen so far
+often need to refer to specific assumptions---just @{text name} or even
+@{text name.prems} and @{text name.IH} can be too unspecific.
+This is where the indexing of fact lists comes in handy, e.g.\
+@{text"name.IH(2)"} or @{text"name.prems(1-2)"}.
+
+\subsection{Rule Inversion}
+\label{sec:rule-inversion}
+\index{rule inversion|(}
+
+Rule inversion is case analysis of which rule could have been used to
+derive some fact. The name \conceptnoidx{rule inversion} emphasizes that we are
+reasoning backwards: by which rules could some given fact have been proved?
+For the inductive definition of @{const ev}, rule inversion can be summarized
+like this:
+@{prop[display]"ev n \<Longrightarrow> n = 0 \<or> (EX k. n = Suc(Suc k) \<and> ev k)"}
+The realisation in Isabelle is a case analysis.
+A simple example is the proof that @{prop"ev n \<Longrightarrow> ev (n - 2)"}. We
+already went through the details informally in \autoref{sec:Logic:even}. This
+is the Isar proof:
+*}
+(*<*)
+notepad
+begin fix n
+(*>*)
+  assume "ev n"
+  from this have "ev(n - 2)"
+  proof cases
+    case ev0 thus "ev(n - 2)" by (simp add: ev.ev0)
+  next
+    case (evSS k) thus "ev(n - 2)" by (simp add: ev.evSS)
+  qed
+(*<*)
+end
+(*>*)
+
+text{* The key point here is that a case analysis over some inductively
+defined predicate is triggered by piping the given fact
+(here: \isacom{from}~@{text this}) into a proof by @{text cases}.
+Let us examine the assumptions available in each case. In case @{text ev0}
+we have @{text"n = 0"} and in case @{text evSS} we have @{prop"n = Suc(Suc k)"}
+and @{prop"ev k"}. In each case the assumptions are available under the name
+of the case; there is no fine grained naming schema like for induction.
+
+Sometimes some rules could not have been used to derive the given fact
+because constructors clash. As an extreme example consider
+rule inversion applied to @{prop"ev(Suc 0)"}: neither rule @{text ev0} nor
+rule @{text evSS} can yield @{prop"ev(Suc 0)"} because @{text"Suc 0"} unifies
+neither with @{text 0} nor with @{term"Suc(Suc n)"}. Impossible cases do not
+have to be proved. Hence we can prove anything from @{prop"ev(Suc 0)"}:
+*}
+(*<*)
+notepad begin fix P
+(*>*)
+  assume "ev(Suc 0)" then have P by cases
+(*<*)
+end
+(*>*)
+
+text{* That is, @{prop"ev(Suc 0)"} is simply not provable: *}
+
+lemma "\<not> ev(Suc 0)"
+proof
+  assume "ev(Suc 0)" then show False by cases
+qed
+
+text{* Normally not all cases will be impossible. As a simple exercise,
+prove that \mbox{@{prop"\<not> ev(Suc(Suc(Suc 0)))"}.}
+
+\subsection{Advanced Rule Induction}
+\label{sec:advanced-rule-induction}
+
+So far, rule induction was always applied to goals of the form @{text"I x y z \<Longrightarrow> \<dots>"}
+where @{text I} is some inductively defined predicate and @{text x}, @{text y}, @{text z}
+are variables. In some rare situations one needs to deal with an assumption where
+not all arguments @{text r}, @{text s}, @{text t} are variables:
+\begin{isabelle}
+\isacom{lemma} @{text[source]"I r s t \<Longrightarrow> \<dots>"}
+\end{isabelle}
+Applying the standard form of
+rule induction in such a situation will lead to strange and typically unprovable goals.
+We can easily reduce this situation to the standard one by introducing
+new variables @{text x}, @{text y}, @{text z} and reformulating the goal like this:
+\begin{isabelle}
+\isacom{lemma} @{text[source]"I x y z \<Longrightarrow> x = r \<Longrightarrow> y = s \<Longrightarrow> z = t \<Longrightarrow> \<dots>"}
+\end{isabelle}
+Standard rule induction will worke fine now, provided the free variables in
+@{text r}, @{text s}, @{text t} are generalized via @{text"arbitrary"}.
+
+However, induction can do the above transformation for us, behind the curtains, so we never
+need to see the expanded version of the lemma. This is what we need to write:
+\begin{isabelle}
+\isacom{lemma} @{text[source]"I r s t \<Longrightarrow> \<dots>"}\isanewline
+\isacom{proof}@{text"(induction \"r\" \"s\" \"t\" arbitrary: \<dots> rule: I.induct)"}\index{inductionrule@@{text"induction ... rule:"}}\index{arbitrary@@{text"arbitrary:"}}
+\end{isabelle}
+Just like for rule inversion, cases that are impossible because of constructor clashes
+will not show up at all. Here is a concrete example: *}
+
+lemma "ev (Suc m) \<Longrightarrow> \<not> ev m"
+proof(induction "Suc m" arbitrary: m rule: ev.induct)
+  fix n assume IH: "\<And>m. n = Suc m \<Longrightarrow> \<not> ev m"
+  show "\<not> ev (Suc n)"
+  proof --"contradiction"
+    assume "ev(Suc n)"
+    thus False
+    proof cases --"rule inversion"
+      fix k assume "n = Suc k" "ev k"
+      thus False using IH by auto
+    qed
+  qed
+qed
+
+text{*
+Remarks:
+\begin{itemize}
+\item 
+Instead of the \isacom{case} and @{text ?case} magic we have spelled all formulas out.
+This is merely for greater clarity.
+\item
+We only need to deal with one case because the @{thm[source] ev0} case is impossible.
+\item
+The form of the @{text IH} shows us that internally the lemma was expanded as explained
+above: \noquotes{@{prop[source]"ev x \<Longrightarrow> x = Suc m \<Longrightarrow> \<not> ev m"}}.
+\item
+The goal @{prop"\<not> ev (Suc n)"} may suprise. The expanded version of the lemma
+would suggest that we have a \isacom{fix} @{text m} \isacom{assume} @{prop"Suc(Suc n) = Suc m"}
+and need to show @{prop"\<not> ev m"}. What happened is that Isabelle immediately
+simplified @{prop"Suc(Suc n) = Suc m"} to @{prop"Suc n = m"} and could then eliminate
+@{text m}. Beware of such nice surprises with this advanced form of induction.
+\end{itemize}
+\begin{warn}
+This advanced form of induction does not support the @{text IH}
+naming schema explained in \autoref{sec:assm-naming}:
+the induction hypotheses are instead found under the name @{text hyps}, like for the simpler
+@{text induct} method.
+\end{warn}
+\index{induction|)}
+\index{cases@@{text"cases"}|)}
+\index{case@\isacom{case}|)}
+\index{case?@@{text"?case"}|)}
+\index{rule induction|)}
+\index{rule inversion|)}
+
+\subsection*{Exercises}
+
+
+\exercise
+Give a structured proof by rule inversion:
+*}
+
+lemma assumes a: "ev(Suc(Suc n))" shows "ev n"
+(*<*)oops(*>*)
+
+text{*
+\endexercise
+
+\begin{exercise}
+Give a structured proof of @{prop "\<not> ev(Suc(Suc(Suc 0)))"}
+by rule inversions. If there are no cases to be proved you can close
+a proof immediateley with \isacom{qed}.
+\end{exercise}
+
+\begin{exercise}
+Recall predicate @{text star} from \autoref{sec:star} and @{text iter}
+from Exercise~\ref{exe:iter}. Prove @{prop "iter r n x y \<Longrightarrow> star r x y"}
+in a structured style, do not just sledgehammer each case of the
+required induction.
+\end{exercise}
+
+\begin{exercise}
+Define a recursive function @{text "elems ::"} @{typ"'a list \<Rightarrow> 'a set"}
+and prove @{prop "x : elems xs \<Longrightarrow> \<exists>ys zs. xs = ys @ x # zs \<and> x \<notin> elems ys"}.
+\end{exercise}
+*}
+
+(*<*)
+end
+(*>*)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/LaTeXsugar.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,63 @@
+(*  Title:      HOL/Library/LaTeXsugar.thy
+    Author:     Gerwin Klein, Tobias Nipkow, Norbert Schirmer
+    Copyright   2005 NICTA and TUM
+*)
+
+(*<*)
+theory LaTeXsugar
+imports Main
+begin
+
+(* DUMMY *)
+consts DUMMY :: 'a ("\<^raw:\_>")
+
+(* THEOREMS *)
+notation (Rule output)
+  Pure.imp  ("\<^raw:\mbox{}\inferrule{\mbox{>_\<^raw:}}>\<^raw:{\mbox{>_\<^raw:}}>")
+
+syntax (Rule output)
+  "_bigimpl" :: "asms \<Rightarrow> prop \<Rightarrow> prop"
+  ("\<^raw:\mbox{}\inferrule{>_\<^raw:}>\<^raw:{\mbox{>_\<^raw:}}>")
+
+  "_asms" :: "prop \<Rightarrow> asms \<Rightarrow> asms" 
+  ("\<^raw:\mbox{>_\<^raw:}\\>/ _")
+
+  "_asm" :: "prop \<Rightarrow> asms" ("\<^raw:\mbox{>_\<^raw:}>")
+
+notation (Axiom output)
+  "Trueprop"  ("\<^raw:\mbox{}\inferrule{\mbox{}}{\mbox{>_\<^raw:}}>")
+
+notation (IfThen output)
+  Pure.imp  ("\<^raw:{\normalsize{}>If\<^raw:\,}> _/ \<^raw:{\normalsize \,>then\<^raw:\,}>/ _.")
+syntax (IfThen output)
+  "_bigimpl" :: "asms \<Rightarrow> prop \<Rightarrow> prop"
+  ("\<^raw:{\normalsize{}>If\<^raw:\,}> _ /\<^raw:{\normalsize \,>then\<^raw:\,}>/ _.")
+  "_asms" :: "prop \<Rightarrow> asms \<Rightarrow> asms" ("\<^raw:\mbox{>_\<^raw:}> /\<^raw:{\normalsize \,>and\<^raw:\,}>/ _")
+  "_asm" :: "prop \<Rightarrow> asms" ("\<^raw:\mbox{>_\<^raw:}>")
+
+notation (IfThenNoBox output)
+  Pure.imp  ("\<^raw:{\normalsize{}>If\<^raw:\,}> _/ \<^raw:{\normalsize \,>then\<^raw:\,}>/ _.")
+syntax (IfThenNoBox output)
+  "_bigimpl" :: "asms \<Rightarrow> prop \<Rightarrow> prop"
+  ("\<^raw:{\normalsize{}>If\<^raw:\,}> _ /\<^raw:{\normalsize \,>then\<^raw:\,}>/ _.")
+  "_asms" :: "prop \<Rightarrow> asms \<Rightarrow> asms" ("_ /\<^raw:{\normalsize \,>and\<^raw:\,}>/ _")
+  "_asm" :: "prop \<Rightarrow> asms" ("_")
+
+setup{*
+  let
+    fun pretty ctxt c =
+      let val tc = Proof_Context.read_const {proper = true, strict = false} ctxt c
+      in Pretty.block [Thy_Output.pretty_term ctxt tc, Pretty.str " ::",
+            Pretty.brk 1, Syntax.pretty_typ ctxt (fastype_of tc)]
+      end
+  in
+    Thy_Output.antiquotation @{binding "const_typ"}
+     (Scan.lift Args.name_inner_syntax)
+       (fn {source = src, context = ctxt, ...} => fn arg =>
+          Thy_Output.output ctxt
+            (Thy_Output.maybe_pretty_source pretty ctxt src [arg]))
+  end;
+*}
+
+end
+(*>*)
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/Logic.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,883 @@
+(*<*)
+theory Logic
+imports LaTeXsugar
+begin
+(*>*)
+text{*
+\vspace{-5ex}
+\section{Formulas}
+
+The core syntax of formulas (\textit{form} below)
+provides the standard logical constructs, in decreasing order of precedence:
+\[
+\begin{array}{rcl}
+
+\mathit{form} & ::= &
+  @{text"(form)"} ~\mid~
+  @{const True} ~\mid~
+  @{const False} ~\mid~
+  @{prop "term = term"}\\
+ &\mid& @{prop"\<not> form"}\index{$HOL4@\isasymnot} ~\mid~
+  @{prop "form \<and> form"}\index{$HOL0@\isasymand} ~\mid~
+  @{prop "form \<or> form"}\index{$HOL1@\isasymor} ~\mid~
+  @{prop "form \<longrightarrow> form"}\index{$HOL2@\isasymlongrightarrow}\\
+ &\mid& @{prop"\<forall>x. form"}\index{$HOL6@\isasymforall} ~\mid~  @{prop"\<exists>x. form"}\index{$HOL7@\isasymexists}
+\end{array}
+\]
+Terms are the ones we have seen all along, built from constants, variables,
+function application and @{text"\<lambda>"}-abstraction, including all the syntactic
+sugar like infix symbols, @{text "if"}, @{text "case"} etc.
+\begin{warn}
+Remember that formulas are simply terms of type @{text bool}. Hence
+@{text "="} also works for formulas. Beware that @{text"="} has a higher
+precedence than the other logical operators. Hence @{prop"s = t \<and> A"} means
+@{text"(s = t) \<and> A"}, and @{prop"A\<and>B = B\<and>A"} means @{text"A \<and> (B = B) \<and> A"}.
+Logical equivalence can also be written with
+@{text "\<longleftrightarrow>"} instead of @{text"="}, where @{text"\<longleftrightarrow>"} has the same low
+precedence as @{text"\<longrightarrow>"}. Hence @{text"A \<and> B \<longleftrightarrow> B \<and> A"} really means
+@{text"(A \<and> B) \<longleftrightarrow> (B \<and> A)"}.
+\end{warn}
+\begin{warn}
+Quantifiers need to be enclosed in parentheses if they are nested within
+other constructs (just like @{text "if"}, @{text case} and @{text let}).
+\end{warn}
+The most frequent logical symbols and their ASCII representations are shown
+in Fig.~\ref{fig:log-symbols}.
+\begin{figure}
+\begin{center}
+\begin{tabular}{l@ {\qquad}l@ {\qquad}l}
+@{text "\<forall>"} & \xsymbol{forall} & \texttt{ALL}\\
+@{text "\<exists>"} & \xsymbol{exists} & \texttt{EX}\\
+@{text "\<lambda>"} & \xsymbol{lambda} & \texttt{\%}\\
+@{text "\<longrightarrow>"} & \texttt{-{}->}\\
+@{text "\<longleftrightarrow>"} & \texttt{<->}\\
+@{text "\<and>"} & \texttt{/\char`\\} & \texttt{\&}\\
+@{text "\<or>"} & \texttt{\char`\\/} & \texttt{|}\\
+@{text "\<not>"} & \xsymbol{not} & \texttt{\char`~}\\
+@{text "\<noteq>"} & \xsymbol{noteq} & \texttt{\char`~=}
+\end{tabular}
+\end{center}
+\caption{Logical symbols and their ASCII forms}
+\label{fig:log-symbols}
+\end{figure}
+The first column shows the symbols, the other columns ASCII representations.
+The \texttt{\char`\\}\texttt{<...>} form is always converted into the symbolic form
+by the Isabelle interfaces, the treatment of the other ASCII forms
+depends on the interface. The ASCII forms \texttt{/\char`\\} and
+\texttt{\char`\\/}
+are special in that they are merely keyboard shortcuts for the interface and
+not logical symbols by themselves.
+\begin{warn}
+The implication @{text"\<Longrightarrow>"} is part of the Isabelle framework. It structures
+theorems and proof states, separating assumptions from conclusions.
+The implication @{text"\<longrightarrow>"} is part of the logic HOL and can occur inside the
+formulas that make up the assumptions and conclusion.
+Theorems should be of the form @{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"},
+not @{text"A\<^sub>1 \<and> \<dots> \<and> A\<^sub>n \<longrightarrow> A"}. Both are logically equivalent
+but the first one works better when using the theorem in further proofs.
+\end{warn}
+
+\section{Sets}
+\label{sec:Sets}
+
+Sets of elements of type @{typ 'a} have type @{typ"'a set"}\index{set@@{text set}}.
+They can be finite or infinite. Sets come with the usual notation:
+\begin{itemize}
+\item \indexed{@{term"{}"}}{$IMP042},\quad @{text"{e\<^sub>1,\<dots>,e\<^sub>n}"}
+\item @{prop"e \<in> A"}\index{$HOLSet0@\isasymin},\quad @{prop"A \<subseteq> B"}\index{$HOLSet2@\isasymsubseteq}
+\item @{term"A \<union> B"}\index{$HOLSet4@\isasymunion},\quad @{term"A \<inter> B"}\index{$HOLSet5@\isasyminter},\quad @{term"A - B"},\quad @{term"-A"}
+\end{itemize}
+(where @{term"A-B"} and @{text"-A"} are set difference and complement)
+and much more. @{const UNIV} is the set of all elements of some type.
+Set comprehension\index{set comprehension} is written
+@{term"{x. P}"}\index{$IMP042@@{term"{x. P}"}} rather than @{text"{x | P}"}.
+\begin{warn}
+In @{term"{x. P}"} the @{text x} must be a variable. Set comprehension
+involving a proper term @{text t} must be written
+\noquotes{@{term[source] "{t | x y. P}"}}\index{$IMP042@@{text"{t |x. P}"}},
+where @{text "x y"} are those free variables in @{text t}
+that occur in @{text P}.
+This is just a shorthand for @{term"{v. EX x y. v = t \<and> P}"}, where
+@{text v} is a new variable. For example, @{term"{x+y|x. x \<in> A}"}
+is short for \noquotes{@{term[source]"{v. \<exists>x. v = x+y \<and> x \<in> A}"}}.
+\end{warn}
+
+Here are the ASCII representations of the mathematical symbols:
+\begin{center}
+\begin{tabular}{l@ {\quad}l@ {\quad}l}
+@{text "\<in>"} & \texttt{\char`\\\char`\<in>} & \texttt{:}\\
+@{text "\<subseteq>"} & \texttt{\char`\\\char`\<subseteq>} & \texttt{<=}\\
+@{text "\<union>"} & \texttt{\char`\\\char`\<union>} & \texttt{Un}\\
+@{text "\<inter>"} & \texttt{\char`\\\char`\<inter>} & \texttt{Int}
+\end{tabular}
+\end{center}
+Sets also allow bounded quantifications @{prop"ALL x : A. P"} and
+@{prop"EX x : A. P"}.
+
+For the more ambitious, there are also @{text"\<Union>"}\index{$HOLSet6@\isasymUnion}
+and @{text"\<Inter>"}\index{$HOLSet7@\isasymInter}:
+\begin{center}
+@{thm Union_eq} \qquad @{thm Inter_eq}
+\end{center}
+The ASCII forms of @{text"\<Union>"} are \texttt{\char`\\\char`\<Union>} and \texttt{Union},
+those of @{text"\<Inter>"} are \texttt{\char`\\\char`\<Inter>} and \texttt{Inter}.
+There are also indexed unions and intersections:
+\begin{center}
+@{thm UNION_eq} \\ @{thm INTER_eq}
+\end{center}
+The ASCII forms are \ \texttt{UN x:A.~B} \ and \ \texttt{INT x:A. B} \
+where \texttt{x} may occur in \texttt{B}.
+If \texttt{A} is \texttt{UNIV} you can just write \ \texttt{UN x.~B} \ and \ \texttt{INT x. B}.
+
+Some other frequently useful functions on sets are the following:
+\begin{center}
+\begin{tabular}{l@ {\quad}l}
+@{const_typ set}\index{set@@{const set}} & converts a list to the set of its elements\\
+@{const_typ finite}\index{finite@@{const finite}} & is true iff its argument is finite\\
+@{const_typ card}\index{card@@{const card}} & is the cardinality of a finite set\\
+ & and is @{text 0} for all infinite sets\\
+@{thm image_def}\index{$IMP042@@{term"f ` A"}} & is the image of a function over a set
+\end{tabular}
+\end{center}
+See \cite{Nipkow-Main} for the wealth of further predefined functions in theory
+@{theory Main}.
+
+
+\subsection*{Exercises}
+
+\exercise
+Start from the data type of binary trees defined earlier:
+*}
+
+datatype 'a tree = Tip | Node "'a tree" 'a "'a tree"
+
+text{*
+Define a function @{text "set ::"} @{typ "'a tree \<Rightarrow> 'a set"}
+that returns the elements in a tree and a function
+@{text "ord ::"} @{typ "int tree \<Rightarrow> bool"}
+the tests if an @{typ "int tree"} is ordered.
+
+Define a function @{text ins} that inserts an element into an ordered @{typ "int tree"}
+while maintaining the order of the tree. If the element is already in the tree, the
+same tree should be returned. Prove correctness of @{text ins}:
+@{prop "set(ins x t) = {x} \<union> set t"} and @{prop "ord t \<Longrightarrow> ord(ins i t)"}.
+\endexercise
+
+
+\section{Proof Automation}
+
+So far we have only seen @{text simp} and \indexed{@{text auto}}{auto}: Both perform
+rewriting, both can also prove linear arithmetic facts (no multiplication),
+and @{text auto} is also able to prove simple logical or set-theoretic goals:
+*}
+
+lemma "\<forall>x. \<exists>y. x = y"
+by auto
+
+lemma "A \<subseteq> B \<inter> C \<Longrightarrow> A \<subseteq> B \<union> C"
+by auto
+
+text{* where
+\begin{quote}
+\isacom{by} \textit{proof-method}
+\end{quote}
+is short for
+\begin{quote}
+\isacom{apply} \textit{proof-method}\\
+\isacom{done}
+\end{quote}
+The key characteristics of both @{text simp} and @{text auto} are
+\begin{itemize}
+\item They show you were they got stuck, giving you an idea how to continue.
+\item They perform the obvious steps but are highly incomplete.
+\end{itemize}
+A proof method is \conceptnoidx{complete} if it can prove all true formulas.
+There is no complete proof method for HOL, not even in theory.
+Hence all our proof methods only differ in how incomplete they are.
+
+A proof method that is still incomplete but tries harder than @{text auto} is
+\indexed{@{text fastforce}}{fastforce}.  It either succeeds or fails, it acts on the first
+subgoal only, and it can be modified just like @{text auto}, e.g.\
+with @{text "simp add"}. Here is a typical example of what @{text fastforce}
+can do:
+*}
+
+lemma "\<lbrakk> \<forall>xs \<in> A. \<exists>ys. xs = ys @ ys;  us \<in> A \<rbrakk>
+   \<Longrightarrow> \<exists>n. length us = n+n"
+by fastforce
+
+text{* This lemma is out of reach for @{text auto} because of the
+quantifiers.  Even @{text fastforce} fails when the quantifier structure
+becomes more complicated. In a few cases, its slow version @{text force}
+succeeds where @{text fastforce} fails.
+
+The method of choice for complex logical goals is \indexed{@{text blast}}{blast}. In the
+following example, @{text T} and @{text A} are two binary predicates. It
+is shown that if @{text T} is total, @{text A} is antisymmetric and @{text T} is
+a subset of @{text A}, then @{text A} is a subset of @{text T}:
+*}
+
+lemma
+  "\<lbrakk> \<forall>x y. T x y \<or> T y x;
+     \<forall>x y. A x y \<and> A y x \<longrightarrow> x = y;
+     \<forall>x y. T x y \<longrightarrow> A x y \<rbrakk>
+   \<Longrightarrow> \<forall>x y. A x y \<longrightarrow> T x y"
+by blast
+
+text{*
+We leave it to the reader to figure out why this lemma is true.
+Method @{text blast}
+\begin{itemize}
+\item is (in principle) a complete proof procedure for first-order formulas,
+  a fragment of HOL. In practice there is a search bound.
+\item does no rewriting and knows very little about equality.
+\item covers logic, sets and relations.
+\item either succeeds or fails.
+\end{itemize}
+Because of its strength in logic and sets and its weakness in equality reasoning, it complements the earlier proof methods.
+
+
+\subsection{\concept{Sledgehammer}}
+
+Command \isacom{sledgehammer} calls a number of external automatic
+theorem provers (ATPs) that run for up to 30 seconds searching for a
+proof. Some of these ATPs are part of the Isabelle installation, others are
+queried over the internet. If successful, a proof command is generated and can
+be inserted into your proof.  The biggest win of \isacom{sledgehammer} is
+that it will take into account the whole lemma library and you do not need to
+feed in any lemma explicitly. For example,*}
+
+lemma "\<lbrakk> xs @ ys = ys @ xs;  length xs = length ys \<rbrakk> \<Longrightarrow> xs = ys"
+
+txt{* cannot be solved by any of the standard proof methods, but
+\isacom{sledgehammer} finds the following proof: *}
+
+by (metis append_eq_conv_conj)
+
+text{* We do not explain how the proof was found but what this command
+means. For a start, Isabelle does not trust external tools (and in particular
+not the translations from Isabelle's logic to those tools!)
+and insists on a proof that it can check. This is what \indexed{@{text metis}}{metis} does.
+It is given a list of lemmas and tries to find a proof just using those lemmas
+(and pure logic). In contrast to @{text simp} and friends that know a lot of
+lemmas already, using @{text metis} manually is tedious because one has
+to find all the relevant lemmas first. But that is precisely what
+\isacom{sledgehammer} does for us.
+In this case lemma @{thm[source]append_eq_conv_conj} alone suffices:
+@{thm[display] append_eq_conv_conj}
+We leave it to the reader to figure out why this lemma suffices to prove
+the above lemma, even without any knowledge of what the functions @{const take}
+and @{const drop} do. Keep in mind that the variables in the two lemmas
+are independent of each other, despite the same names, and that you can
+substitute arbitrary values for the free variables in a lemma.
+
+Just as for the other proof methods we have seen, there is no guarantee that
+\isacom{sledgehammer} will find a proof if it exists. Nor is
+\isacom{sledgehammer} superior to the other proof methods.  They are
+incomparable. Therefore it is recommended to apply @{text simp} or @{text
+auto} before invoking \isacom{sledgehammer} on what is left.
+
+\subsection{Arithmetic}
+
+By arithmetic formulas we mean formulas involving variables, numbers, @{text
+"+"}, @{text"-"}, @{text "="}, @{text "<"}, @{text "\<le>"} and the usual logical
+connectives @{text"\<not>"}, @{text"\<and>"}, @{text"\<or>"}, @{text"\<longrightarrow>"},
+@{text"\<longleftrightarrow>"}. Strictly speaking, this is known as \concept{linear arithmetic}
+because it does not involve multiplication, although multiplication with
+numbers, e.g.\ @{text"2*n"} is allowed. Such formulas can be proved by
+\indexed{@{text arith}}{arith}:
+*}
+
+lemma "\<lbrakk> (a::nat) \<le> x + b; 2*x < c \<rbrakk> \<Longrightarrow> 2*a + 1 \<le> 2*b + c"
+by arith
+
+text{* In fact, @{text auto} and @{text simp} can prove many linear
+arithmetic formulas already, like the one above, by calling a weak but fast
+version of @{text arith}. Hence it is usually not necessary to invoke
+@{text arith} explicitly.
+
+The above example involves natural numbers, but integers (type @{typ int})
+and real numbers (type @{text real}) are supported as well. As are a number
+of further operators like @{const min} and @{const max}. On @{typ nat} and
+@{typ int}, @{text arith} can even prove theorems with quantifiers in them,
+but we will not enlarge on that here.
+
+
+\subsection{Trying Them All}
+
+If you want to try all of the above automatic proof methods you simply type
+\begin{isabelle}
+\isacom{try}
+\end{isabelle}
+You can also add specific simplification and introduction rules:
+\begin{isabelle}
+\isacom{try} @{text"simp: \<dots> intro: \<dots>"}
+\end{isabelle}
+There is also a lightweight variant \isacom{try0} that does not call
+sledgehammer.
+
+\section{Single Step Proofs}
+
+Although automation is nice, it often fails, at least initially, and you need
+to find out why. When @{text fastforce} or @{text blast} simply fail, you have
+no clue why. At this point, the stepwise
+application of proof rules may be necessary. For example, if @{text blast}
+fails on @{prop"A \<and> B"}, you want to attack the two
+conjuncts @{text A} and @{text B} separately. This can
+be achieved by applying \emph{conjunction introduction}
+\[ @{thm[mode=Rule,show_question_marks]conjI}\ @{text conjI}
+\]
+to the proof state. We will now examine the details of this process.
+
+\subsection{Instantiating Unknowns}
+
+We had briefly mentioned earlier that after proving some theorem,
+Isabelle replaces all free variables @{text x} by so called \conceptidx{unknowns}{unknown}
+@{text "?x"}. We can see this clearly in rule @{thm[source] conjI}.
+These unknowns can later be instantiated explicitly or implicitly:
+\begin{itemize}
+\item By hand, using \indexed{@{text of}}{of}.
+The expression @{text"conjI[of \"a=b\" \"False\"]"}
+instantiates the unknowns in @{thm[source] conjI} from left to right with the
+two formulas @{text"a=b"} and @{text False}, yielding the rule
+@{thm[display,mode=Rule]conjI[of "a=b" False]}
+
+In general, @{text"th[of string\<^sub>1 \<dots> string\<^sub>n]"} instantiates
+the unknowns in the theorem @{text th} from left to right with the terms
+@{text string\<^sub>1} to @{text string\<^sub>n}.
+
+\item By unification. \conceptidx{Unification}{unification} is the process of making two
+terms syntactically equal by suitable instantiations of unknowns. For example,
+unifying @{text"?P \<and> ?Q"} with \mbox{@{prop"a=b \<and> False"}} instantiates
+@{text "?P"} with @{prop "a=b"} and @{text "?Q"} with @{prop False}.
+\end{itemize}
+We need not instantiate all unknowns. If we want to skip a particular one we
+can just write @{text"_"} instead, for example @{text "conjI[of _ \"False\"]"}.
+Unknowns can also be instantiated by name using \indexed{@{text "where"}}{where}, for example
+@{text "conjI[where ?P = \"a=b\""} \isacom{and} @{text"?Q = \"False\"]"}.
+
+
+\subsection{Rule Application}
+
+\conceptidx{Rule application}{rule application} means applying a rule backwards to a proof state.
+For example, applying rule @{thm[source]conjI} to a proof state
+\begin{quote}
+@{text"1.  \<dots>  \<Longrightarrow> A \<and> B"}
+\end{quote}
+results in two subgoals, one for each premise of @{thm[source]conjI}:
+\begin{quote}
+@{text"1.  \<dots>  \<Longrightarrow> A"}\\
+@{text"2.  \<dots>  \<Longrightarrow> B"}
+\end{quote}
+In general, the application of a rule @{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"}
+to a subgoal \mbox{@{text"\<dots> \<Longrightarrow> C"}} proceeds in two steps:
+\begin{enumerate}
+\item
+Unify @{text A} and @{text C}, thus instantiating the unknowns in the rule.
+\item
+Replace the subgoal @{text C} with @{text n} new subgoals @{text"A\<^sub>1"} to @{text"A\<^sub>n"}.
+\end{enumerate}
+This is the command to apply rule @{text xyz}:
+\begin{quote}
+\isacom{apply}@{text"(rule xyz)"}\index{rule@@{text rule}}
+\end{quote}
+This is also called \concept{backchaining} with rule @{text xyz}.
+
+\subsection{Introduction Rules}
+
+Conjunction introduction (@{thm[source] conjI}) is one example of a whole
+class of rules known as \conceptidx{introduction rules}{introduction rule}. They explain under which
+premises some logical construct can be introduced. Here are some further
+useful introduction rules:
+\[
+\inferrule*[right=\mbox{@{text impI}}]{\mbox{@{text"?P \<Longrightarrow> ?Q"}}}{\mbox{@{text"?P \<longrightarrow> ?Q"}}}
+\qquad
+\inferrule*[right=\mbox{@{text allI}}]{\mbox{@{text"\<And>x. ?P x"}}}{\mbox{@{text"\<forall>x. ?P x"}}}
+\]
+\[
+\inferrule*[right=\mbox{@{text iffI}}]{\mbox{@{text"?P \<Longrightarrow> ?Q"}} \\ \mbox{@{text"?Q \<Longrightarrow> ?P"}}}
+  {\mbox{@{text"?P = ?Q"}}}
+\]
+These rules are part of the logical system of \concept{natural deduction}
+(e.g.\ \cite{HuthRyan}). Although we intentionally de-emphasize the basic rules
+of logic in favour of automatic proof methods that allow you to take bigger
+steps, these rules are helpful in locating where and why automation fails.
+When applied backwards, these rules decompose the goal:
+\begin{itemize}
+\item @{thm[source] conjI} and @{thm[source]iffI} split the goal into two subgoals,
+\item @{thm[source] impI} moves the left-hand side of a HOL implication into the list of assumptions,
+\item and @{thm[source] allI} removes a @{text "\<forall>"} by turning the quantified variable into a fixed local variable of the subgoal.
+\end{itemize}
+Isabelle knows about these and a number of other introduction rules.
+The command
+\begin{quote}
+\isacom{apply} @{text rule}\index{rule@@{text rule}}
+\end{quote}
+automatically selects the appropriate rule for the current subgoal.
+
+You can also turn your own theorems into introduction rules by giving them
+the \indexed{@{text"intro"}}{intro} attribute, analogous to the @{text simp} attribute.  In
+that case @{text blast}, @{text fastforce} and (to a limited extent) @{text
+auto} will automatically backchain with those theorems. The @{text intro}
+attribute should be used with care because it increases the search space and
+can lead to nontermination.  Sometimes it is better to use it only in
+specific calls of @{text blast} and friends. For example,
+@{thm[source] le_trans}, transitivity of @{text"\<le>"} on type @{typ nat},
+is not an introduction rule by default because of the disastrous effect
+on the search space, but can be useful in specific situations:
+*}
+
+lemma "\<lbrakk> (a::nat) \<le> b; b \<le> c; c \<le> d; d \<le> e \<rbrakk> \<Longrightarrow> a \<le> e"
+by(blast intro: le_trans)
+
+text{*
+Of course this is just an example and could be proved by @{text arith}, too.
+
+\subsection{Forward Proof}
+\label{sec:forward-proof}
+
+Forward proof means deriving new theorems from old theorems. We have already
+seen a very simple form of forward proof: the @{text of} operator for
+instantiating unknowns in a theorem. The big brother of @{text of} is
+\indexed{@{text OF}}{OF} for applying one theorem to others. Given a theorem @{prop"A \<Longrightarrow> B"} called
+@{text r} and a theorem @{text A'} called @{text r'}, the theorem @{text
+"r[OF r']"} is the result of applying @{text r} to @{text r'}, where @{text
+r} should be viewed as a function taking a theorem @{text A} and returning
+@{text B}.  More precisely, @{text A} and @{text A'} are unified, thus
+instantiating the unknowns in @{text B}, and the result is the instantiated
+@{text B}. Of course, unification may also fail.
+\begin{warn}
+Application of rules to other rules operates in the forward direction: from
+the premises to the conclusion of the rule; application of rules to proof
+states operates in the backward direction, from the conclusion to the
+premises.
+\end{warn}
+
+In general @{text r} can be of the form @{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"}
+and there can be multiple argument theorems @{text r\<^sub>1} to @{text r\<^sub>m}
+(with @{text"m \<le> n"}), in which case @{text "r[OF r\<^sub>1 \<dots> r\<^sub>m]"} is obtained
+by unifying and thus proving @{text "A\<^sub>i"} with @{text "r\<^sub>i"}, @{text"i = 1\<dots>m"}.
+Here is an example, where @{thm[source]refl} is the theorem
+@{thm[show_question_marks] refl}:
+*}
+
+thm conjI[OF refl[of "a"] refl[of "b"]]
+
+text{* yields the theorem @{thm conjI[OF refl[of "a"] refl[of "b"]]}.
+The command \isacom{thm} merely displays the result.
+
+Forward reasoning also makes sense in connection with proof states.
+Therefore @{text blast}, @{text fastforce} and @{text auto} support a modifier
+@{text dest} which instructs the proof method to use certain rules in a
+forward fashion. If @{text r} is of the form \mbox{@{text "A \<Longrightarrow> B"}}, the modifier
+\mbox{@{text"dest: r"}}\index{dest@@{text"dest:"}}
+allows proof search to reason forward with @{text r}, i.e.\
+to replace an assumption @{text A'}, where @{text A'} unifies with @{text A},
+with the correspondingly instantiated @{text B}. For example, @{thm[source,show_question_marks] Suc_leD} is the theorem \mbox{@{thm Suc_leD}}, which works well for forward reasoning:
+*}
+
+lemma "Suc(Suc(Suc a)) \<le> b \<Longrightarrow> a \<le> b"
+by(blast dest: Suc_leD)
+
+text{* In this particular example we could have backchained with
+@{thm[source] Suc_leD}, too, but because the premise is more complicated than the conclusion this can easily lead to nontermination.
+
+%\subsection{Finding Theorems}
+%
+%Command \isacom{find{\isacharunderscorekeyword}theorems} searches for specific theorems in the current
+%theory. Search criteria include pattern matching on terms and on names.
+%For details see the Isabelle/Isar Reference Manual~\cite{IsarRef}.
+%\bigskip
+
+\begin{warn}
+To ease readability we will drop the question marks
+in front of unknowns from now on.
+\end{warn}
+
+
+\section{Inductive Definitions}
+\label{sec:inductive-defs}\index{inductive definition|(}
+
+Inductive definitions are the third important definition facility, after
+datatypes and recursive function.
+\ifsem
+In fact, they are the key construct in the
+definition of operational semantics in the second part of the book.
+\fi
+
+\subsection{An Example: Even Numbers}
+\label{sec:Logic:even}
+
+Here is a simple example of an inductively defined predicate:
+\begin{itemize}
+\item 0 is even
+\item If $n$ is even, so is $n+2$.
+\end{itemize}
+The operative word ``inductive'' means that these are the only even numbers.
+In Isabelle we give the two rules the names @{text ev0} and @{text evSS}
+and write
+*}
+
+inductive ev :: "nat \<Rightarrow> bool" where
+ev0:    "ev 0" |
+evSS:  (*<*)"ev n \<Longrightarrow> ev (Suc(Suc n))"(*>*)
+text_raw{* @{prop[source]"ev n \<Longrightarrow> ev (n + 2)"} *}
+
+text{* To get used to inductive definitions, we will first prove a few
+properties of @{const ev} informally before we descend to the Isabelle level.
+
+How do we prove that some number is even, e.g.\ @{prop "ev 4"}? Simply by combining the defining rules for @{const ev}:
+\begin{quote}
+@{text "ev 0 \<Longrightarrow> ev (0 + 2) \<Longrightarrow> ev((0 + 2) + 2) = ev 4"}
+\end{quote}
+
+\subsubsection{Rule Induction}\index{rule induction|(}
+
+Showing that all even numbers have some property is more complicated.  For
+example, let us prove that the inductive definition of even numbers agrees
+with the following recursive one:*}
+
+fun even :: "nat \<Rightarrow> bool" where
+"even 0 = True" |
+"even (Suc 0) = False" |
+"even (Suc(Suc n)) = even n"
+
+text{* We prove @{prop"ev m \<Longrightarrow> even m"}.  That is, we
+assume @{prop"ev m"} and by induction on the form of its derivation
+prove @{prop"even m"}. There are two cases corresponding to the two rules
+for @{const ev}:
+\begin{description}
+\item[Case @{thm[source]ev0}:]
+ @{prop"ev m"} was derived by rule @{prop "ev 0"}: \\
+ @{text"\<Longrightarrow>"} @{prop"m=(0::nat)"} @{text"\<Longrightarrow>"} @{text "even m = even 0 = True"}
+\item[Case @{thm[source]evSS}:]
+ @{prop"ev m"} was derived by rule @{prop "ev n \<Longrightarrow> ev(n+2)"}: \\
+@{text"\<Longrightarrow>"} @{prop"m=n+(2::nat)"} and by induction hypothesis @{prop"even n"}\\
+@{text"\<Longrightarrow>"} @{text"even m = even(n + 2) = even n = True"}
+\end{description}
+
+What we have just seen is a special case of \concept{rule induction}.
+Rule induction applies to propositions of this form
+\begin{quote}
+@{prop "ev n \<Longrightarrow> P n"}
+\end{quote}
+That is, we want to prove a property @{prop"P n"}
+for all even @{text n}. But if we assume @{prop"ev n"}, then there must be
+some derivation of this assumption using the two defining rules for
+@{const ev}. That is, we must prove
+\begin{description}
+\item[Case @{thm[source]ev0}:] @{prop"P(0::nat)"}
+\item[Case @{thm[source]evSS}:] @{prop"\<lbrakk> ev n; P n \<rbrakk> \<Longrightarrow> P(n + 2::nat)"}
+\end{description}
+The corresponding rule is called @{thm[source] ev.induct} and looks like this:
+\[
+\inferrule{
+\mbox{@{thm (prem 1) ev.induct[of "n"]}}\\
+\mbox{@{thm (prem 2) ev.induct}}\\
+\mbox{@{prop"!!n. \<lbrakk> ev n; P n \<rbrakk> \<Longrightarrow> P(n+2)"}}}
+{\mbox{@{thm (concl) ev.induct[of "n"]}}}
+\]
+The first premise @{prop"ev n"} enforces that this rule can only be applied
+in situations where we know that @{text n} is even.
+
+Note that in the induction step we may not just assume @{prop"P n"} but also
+\mbox{@{prop"ev n"}}, which is simply the premise of rule @{thm[source]
+evSS}.  Here is an example where the local assumption @{prop"ev n"} comes in
+handy: we prove @{prop"ev m \<Longrightarrow> ev(m - 2)"} by induction on @{prop"ev m"}.
+Case @{thm[source]ev0} requires us to prove @{prop"ev(0 - 2)"}, which follows
+from @{prop"ev 0"} because @{prop"0 - 2 = (0::nat)"} on type @{typ nat}. In
+case @{thm[source]evSS} we have \mbox{@{prop"m = n+(2::nat)"}} and may assume
+@{prop"ev n"}, which implies @{prop"ev (m - 2)"} because @{text"m - 2 = (n +
+2) - 2 = n"}. We did not need the induction hypothesis at all for this proof,
+it is just a case analysis of which rule was used, but having @{prop"ev
+n"} at our disposal in case @{thm[source]evSS} was essential.
+This case analysis of rules is also called ``rule inversion''
+and is discussed in more detail in \autoref{ch:Isar}.
+
+\subsubsection{In Isabelle}
+
+Let us now recast the above informal proofs in Isabelle. For a start,
+we use @{const Suc} terms instead of numerals in rule @{thm[source]evSS}:
+@{thm[display] evSS}
+This avoids the difficulty of unifying @{text"n+2"} with some numeral,
+which is not automatic.
+
+The simplest way to prove @{prop"ev(Suc(Suc(Suc(Suc 0))))"} is in a forward
+direction: @{text "evSS[OF evSS[OF ev0]]"} yields the theorem @{thm evSS[OF
+evSS[OF ev0]]}. Alternatively, you can also prove it as a lemma in backwards
+fashion. Although this is more verbose, it allows us to demonstrate how each
+rule application changes the proof state: *}
+
+lemma "ev(Suc(Suc(Suc(Suc 0))))"
+txt{*
+@{subgoals[display,indent=0,goals_limit=1]}
+*}
+apply(rule evSS)
+txt{*
+@{subgoals[display,indent=0,goals_limit=1]}
+*}
+apply(rule evSS)
+txt{*
+@{subgoals[display,indent=0,goals_limit=1]}
+*}
+apply(rule ev0)
+done
+
+text{* \indent
+Rule induction is applied by giving the induction rule explicitly via the
+@{text"rule:"} modifier:\index{inductionrule@@{text"induction ... rule:"}}*}
+
+lemma "ev m \<Longrightarrow> even m"
+apply(induction rule: ev.induct)
+by(simp_all)
+
+text{* Both cases are automatic. Note that if there are multiple assumptions
+of the form @{prop"ev t"}, method @{text induction} will induct on the leftmost
+one.
+
+As a bonus, we also prove the remaining direction of the equivalence of
+@{const ev} and @{const even}:
+*}
+
+lemma "even n \<Longrightarrow> ev n"
+apply(induction n rule: even.induct)
+
+txt{* This is a proof by computation induction on @{text n} (see
+\autoref{sec:recursive-funs}) that sets up three subgoals corresponding to
+the three equations for @{const even}:
+@{subgoals[display,indent=0]}
+The first and third subgoals follow with @{thm[source]ev0} and @{thm[source]evSS}, and the second subgoal is trivially true because @{prop"even(Suc 0)"} is @{const False}:
+*}
+
+by (simp_all add: ev0 evSS)
+
+text{* The rules for @{const ev} make perfect simplification and introduction
+rules because their premises are always smaller than the conclusion. It
+makes sense to turn them into simplification and introduction rules
+permanently, to enhance proof automation. They are named @{thm[source] ev.intros}
+\index{intros@@{text".intros"}} by Isabelle: *}
+
+declare ev.intros[simp,intro]
+
+text{* The rules of an inductive definition are not simplification rules by
+default because, in contrast to recursive functions, there is no termination
+requirement for inductive definitions.
+
+\subsubsection{Inductive Versus Recursive}
+
+We have seen two definitions of the notion of evenness, an inductive and a
+recursive one. Which one is better? Much of the time, the recursive one is
+more convenient: it allows us to do rewriting in the middle of terms, and it
+expresses both the positive information (which numbers are even) and the
+negative information (which numbers are not even) directly. An inductive
+definition only expresses the positive information directly. The negative
+information, for example, that @{text 1} is not even, has to be proved from
+it (by induction or rule inversion). On the other hand, rule induction is
+tailor-made for proving \mbox{@{prop"ev n \<Longrightarrow> P n"}} because it only asks you
+to prove the positive cases. In the proof of @{prop"even n \<Longrightarrow> P n"} by
+computation induction via @{thm[source]even.induct}, we are also presented
+with the trivial negative cases. If you want the convenience of both
+rewriting and rule induction, you can make two definitions and show their
+equivalence (as above) or make one definition and prove additional properties
+from it, for example rule induction from computation induction.
+
+But many concepts do not admit a recursive definition at all because there is
+no datatype for the recursion (for example, the transitive closure of a
+relation), or the recursion would not terminate (for example,
+an interpreter for a programming language). Even if there is a recursive
+definition, if we are only interested in the positive information, the
+inductive definition may be much simpler.
+
+\subsection{The Reflexive Transitive Closure}
+\label{sec:star}
+
+Evenness is really more conveniently expressed recursively than inductively.
+As a second and very typical example of an inductive definition we define the
+reflexive transitive closure.
+\ifsem
+It will also be an important building block for
+some of the semantics considered in the second part of the book.
+\fi
+
+The reflexive transitive closure, called @{text star} below, is a function
+that maps a binary predicate to another binary predicate: if @{text r} is of
+type @{text"\<tau> \<Rightarrow> \<tau> \<Rightarrow> bool"} then @{term "star r"} is again of type @{text"\<tau> \<Rightarrow>
+\<tau> \<Rightarrow> bool"}, and @{prop"star r x y"} means that @{text x} and @{text y} are in
+the relation @{term"star r"}. Think @{term"r^*"} when you see @{term"star
+r"}, because @{text"star r"} is meant to be the reflexive transitive closure.
+That is, @{prop"star r x y"} is meant to be true if from @{text x} we can
+reach @{text y} in finitely many @{text r} steps. This concept is naturally
+defined inductively: *}
+
+inductive star :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"  for r where
+refl:  "star r x x" |
+step:  "r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
+
+text{* The base case @{thm[source] refl} is reflexivity: @{term "x=y"}. The
+step case @{thm[source]step} combines an @{text r} step (from @{text x} to
+@{text y}) and a @{term"star r"} step (from @{text y} to @{text z}) into a
+@{term"star r"} step (from @{text x} to @{text z}).
+The ``\isacom{for}~@{text r}'' in the header is merely a hint to Isabelle
+that @{text r} is a fixed parameter of @{const star}, in contrast to the
+further parameters of @{const star}, which change. As a result, Isabelle
+generates a simpler induction rule.
+
+By definition @{term"star r"} is reflexive. It is also transitive, but we
+need rule induction to prove that: *}
+
+lemma star_trans: "star r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
+apply(induction rule: star.induct)
+(*<*)
+defer
+apply(rename_tac u x y)
+defer
+(*>*)
+txt{* The induction is over @{prop"star r x y"} (the first matching assumption)
+and we try to prove \mbox{@{prop"star r y z \<Longrightarrow> star r x z"}},
+which we abbreviate by @{prop"P x y"}. These are our two subgoals:
+@{subgoals[display,indent=0]}
+The first one is @{prop"P x x"}, the result of case @{thm[source]refl},
+and it is trivial:\index{assumption@@{text assumption}}
+*}
+apply(assumption)
+txt{* Let us examine subgoal @{text 2}, case @{thm[source] step}.
+Assumptions @{prop"r u x"} and \mbox{@{prop"star r x y"}}
+are the premises of rule @{thm[source]step}.
+Assumption @{prop"star r y z \<Longrightarrow> star r x z"} is \mbox{@{prop"P x y"}},
+the IH coming from @{prop"star r x y"}. We have to prove @{prop"P u y"},
+which we do by assuming @{prop"star r y z"} and proving @{prop"star r u z"}.
+The proof itself is straightforward: from \mbox{@{prop"star r y z"}} the IH
+leads to @{prop"star r x z"} which, together with @{prop"r u x"},
+leads to \mbox{@{prop"star r u z"}} via rule @{thm[source]step}:
+*}
+apply(metis step)
+done
+
+text{*\index{rule induction|)}
+
+\subsection{The General Case}
+
+Inductive definitions have approximately the following general form:
+\begin{quote}
+\isacom{inductive} @{text"I :: \"\<tau> \<Rightarrow> bool\""} \isacom{where}
+\end{quote}
+followed by a sequence of (possibly named) rules of the form
+\begin{quote}
+@{text "\<lbrakk> I a\<^sub>1; \<dots>; I a\<^sub>n \<rbrakk> \<Longrightarrow> I a"}
+\end{quote}
+separated by @{text"|"}. As usual, @{text n} can be 0.
+The corresponding rule induction principle
+@{text I.induct} applies to propositions of the form
+\begin{quote}
+@{prop "I x \<Longrightarrow> P x"}
+\end{quote}
+where @{text P} may itself be a chain of implications.
+\begin{warn}
+Rule induction is always on the leftmost premise of the goal.
+Hence @{text "I x"} must be the first premise.
+\end{warn}
+Proving @{prop "I x \<Longrightarrow> P x"} by rule induction means proving
+for every rule of @{text I} that @{text P} is invariant:
+\begin{quote}
+@{text "\<lbrakk> I a\<^sub>1; P a\<^sub>1; \<dots>; I a\<^sub>n; P a\<^sub>n \<rbrakk> \<Longrightarrow> P a"}
+\end{quote}
+
+The above format for inductive definitions is simplified in a number of
+respects. @{text I} can have any number of arguments and each rule can have
+additional premises not involving @{text I}, so-called \conceptidx{side
+conditions}{side condition}. In rule inductions, these side conditions appear as additional
+assumptions. The \isacom{for} clause seen in the definition of the reflexive
+transitive closure simplifies the induction rule.
+\index{inductive definition|)}
+
+\subsection*{Exercises}
+
+\begin{exercise}
+Formalise the following definition of palindromes
+\begin{itemize}
+\item The empty list and a singleton list are palindromes.
+\item If @{text xs} is a palindrome, so is @{term "a # xs @ [a]"}.
+\end{itemize}
+as an inductive predicate @{text "palindrome ::"} @{typ "'a list \<Rightarrow> bool"}
+and prove that @{prop "rev xs = xs"} if @{text xs} is a palindrome.
+\end{exercise}
+
+\exercise
+We could also have defined @{const star} as follows:
+*}
+
+inductive star' :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" for r where
+refl': "star' r x x" |
+step': "star' r x y \<Longrightarrow> r y z \<Longrightarrow> star' r x z"
+
+text{*
+The single @{text r} step is performed after rather than before the @{text star'}
+steps. Prove @{prop "star' r x y \<Longrightarrow> star r x y"} and
+@{prop "star r x y \<Longrightarrow> star r' x y"}. You may need lemmas.
+Note that rule induction fails
+if the assumption about the inductive predicate is not the first assumption.
+\endexercise
+
+\begin{exercise}\label{exe:iter}
+Analogous to @{const star}, give an inductive definition of the @{text n}-fold iteration
+of a relation @{text r}: @{term "iter r n x y"} should hold if there are @{text x\<^sub>0}, \dots, @{text x\<^sub>n}
+such that @{prop"x = x\<^sub>0"}, @{prop"x\<^sub>n = y"} and @{text"r x\<^bsub>i\<^esub> x\<^bsub>i+1\<^esub>"} for
+all @{prop"i < n"}. Correct and prove the following claim:
+@{prop"star r x y \<Longrightarrow> iter r n x y"}.
+\end{exercise}
+
+\begin{exercise}
+A context-free grammar can be seen as an inductive definition where each
+nonterminal $A$ is an inductively defined predicate on lists of terminal
+symbols: $A(w)$ means that $w$ is in the language generated by $A$.
+For example, the production $S \to a S b$ can be viewed as the implication
+@{prop"S w \<Longrightarrow> S (a # w @ [b])"} where @{text a} and @{text b} are terminal symbols,
+i.e., elements of some alphabet. The alphabet can be defined like this:
+\isacom{datatype} @{text"alpha = a | b | \<dots>"}
+
+Define the two grammars (where $\varepsilon$ is the empty word)
+\[
+\begin{array}{r@ {\quad}c@ {\quad}l}
+S &\to& \varepsilon \quad\mid\quad aSb \quad\mid\quad SS \\
+T &\to& \varepsilon \quad\mid\quad TaTb
+\end{array}
+\]
+as two inductive predicates.
+If you think of @{text a} and @{text b} as ``@{text "("}'' and  ``@{text ")"}'',
+the grammars defines strings of balanced parentheses.
+Prove @{prop"T w \<Longrightarrow> S w"} and @{prop "S w \<Longrightarrow> T w"} separately and conclude
+@{prop "S w = T w"}.
+\end{exercise}
+
+\ifsem
+\begin{exercise}
+In \autoref{sec:AExp} we defined a recursive evaluation function
+@{text "aval :: aexp \<Rightarrow> state \<Rightarrow> val"}.
+Define an inductive evaluation predicate
+@{text "aval_rel :: aexp \<Rightarrow> state \<Rightarrow> val \<Rightarrow> bool"}
+and prove that it agrees with the recursive function:
+@{prop "aval_rel a s v \<Longrightarrow> aval a s = v"}, 
+@{prop "aval a s = v \<Longrightarrow> aval_rel a s v"} and thus
+\noquotes{@{prop [source] "aval_rel a s v \<longleftrightarrow> aval a s = v"}}.
+\end{exercise}
+
+\begin{exercise}
+Consider the stack machine from Chapter~3
+and recall the concept of \concept{stack underflow}
+from Exercise~\ref{exe:stack-underflow}.
+Define an inductive predicate
+@{text "ok :: nat \<Rightarrow> instr list \<Rightarrow> nat \<Rightarrow> bool"}
+such that @{text "ok n is n'"} means that with any initial stack of length
+@{text n} the instructions @{text "is"} can be executed
+without stack underflow and that the final stack has length @{text n'}.
+Prove that @{text ok} correctly computes the final stack size
+@{prop[display] "\<lbrakk>ok n is n'; length stk = n\<rbrakk> \<Longrightarrow> length (exec is s stk) = n'"}
+and that instruction sequences generated by @{text comp}
+cannot cause stack underflow: \ @{text "ok n (comp a) ?"} \ for
+some suitable value of @{text "?"}.
+\end{exercise}
+\fi
+*}
+(*<*)
+end
+(*>*)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/MyList.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,17 @@
+theory MyList
+imports Main
+begin
+
+datatype 'a list = Nil | Cons 'a "'a list"
+
+fun app :: "'a list => 'a list => 'a list" where
+"app Nil ys = ys" |
+"app (Cons x xs) ys = Cons x (app xs ys)"
+
+fun rev :: "'a list => 'a list" where
+"rev Nil = Nil" |
+"rev (Cons x xs) = app (rev xs) (Cons x Nil)"
+
+value "rev(Cons True (Cons False Nil))"
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/Types_and_funs.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,593 @@
+(*<*)
+theory Types_and_funs
+imports Main
+begin
+(*>*)
+text{*
+\vspace{-5ex}
+\section{Type and Function Definitions}
+
+Type synonyms are abbreviations for existing types, for example
+*}
+
+type_synonym string = "char list"
+
+text{*\index{string@@{text string}}
+Type synonyms are expanded after parsing and are not present in internal representation and output. They are mere conveniences for the reader.
+
+\subsection{Datatypes}
+\label{sec:datatypes}
+
+The general form of a datatype definition looks like this:
+\begin{quote}
+\begin{tabular}{@ {}rclcll}
+\indexed{\isacom{datatype}}{datatype} @{text "('a\<^sub>1,\<dots>,'a\<^sub>n)t"}
+     & = & $C_1 \ @{text"\""}\tau_{1,1}@{text"\""} \dots @{text"\""}\tau_{1,n_1}@{text"\""}$ \\
+     & $|$ & \dots \\
+     & $|$ & $C_k \ @{text"\""}\tau_{k,1}@{text"\""} \dots @{text"\""}\tau_{k,n_k}@{text"\""}$
+\end{tabular}
+\end{quote}
+It introduces the constructors \
+$C_i :: \tau_{i,1}\Rightarrow \cdots \Rightarrow \tau_{i,n_i} \Rightarrow$~@{text "('a\<^sub>1,\<dots>,'a\<^sub>n)t"} \ and expresses that any value of this type is built from these constructors in a unique manner. Uniqueness is implied by the following
+properties of the constructors:
+\begin{itemize}
+\item \emph{Distinctness:} $C_i\ \ldots \neq C_j\ \dots$ \quad if $i \neq j$
+\item \emph{Injectivity:}
+\begin{tabular}[t]{l}
+ $(C_i \ x_1 \dots x_{n_i} = C_i \ y_1 \dots y_{n_i}) =$\\
+ $(x_1 = y_1 \land \dots \land x_{n_i} = y_{n_i})$
+\end{tabular}
+\end{itemize}
+The fact that any value of the datatype is built from the constructors implies
+the \concept{structural induction}\index{induction} rule: to show
+$P~x$ for all $x$ of type @{text "('a\<^sub>1,\<dots>,'a\<^sub>n)t"},
+one needs to show $P(C_i\ x_1 \dots x_{n_i})$ (for each $i$) assuming
+$P(x_j)$ for all $j$ where $\tau_{i,j} =$~@{text "('a\<^sub>1,\<dots>,'a\<^sub>n)t"}.
+Distinctness and injectivity are applied automatically by @{text auto}
+and other proof methods. Induction must be applied explicitly.
+
+Datatype values can be taken apart with case expressions\index{case expression}\index{case expression@@{text "case ... of"}}, for example
+\begin{quote}
+\noquotes{@{term[source] "(case xs of [] \<Rightarrow> 0 | x # _ \<Rightarrow> Suc x)"}}
+\end{quote}
+just like in functional programming languages. Case expressions
+must be enclosed in parentheses.
+
+As an example, consider binary trees:
+*}
+
+datatype 'a tree = Tip | Node  "'a tree"  'a  "'a tree"
+
+text{* with a mirror function: *}
+
+fun mirror :: "'a tree \<Rightarrow> 'a tree" where
+"mirror Tip = Tip" |
+"mirror (Node l a r) = Node (mirror r) a (mirror l)"
+
+text{* The following lemma illustrates induction: *}
+
+lemma "mirror(mirror t) = t"
+apply(induction t)
+
+txt{* yields
+@{subgoals[display]}
+The induction step contains two induction hypotheses, one for each subtree.
+An application of @{text auto} finishes the proof.
+
+A very simple but also very useful datatype is the predefined
+@{datatype[display] option}\index{option@@{text option}}\index{None@@{const None}}\index{Some@@{const Some}}
+Its sole purpose is to add a new element @{const None} to an existing
+type @{typ 'a}. To make sure that @{const None} is distinct from all the
+elements of @{typ 'a}, you wrap them up in @{const Some} and call
+the new type @{typ"'a option"}. A typical application is a lookup function
+on a list of key-value pairs, often called an association list:
+*}
+(*<*)
+apply auto
+done
+(*>*)
+fun lookup :: "('a * 'b) list \<Rightarrow> 'a \<Rightarrow> 'b option" where
+"lookup [] x = None" |
+"lookup ((a,b) # ps) x = (if a = x then Some b else lookup ps x)"
+
+text{*
+Note that @{text"\<tau>\<^sub>1 * \<tau>\<^sub>2"} is the type of pairs, also written @{text"\<tau>\<^sub>1 \<times> \<tau>\<^sub>2"}.
+Pairs can be taken apart either by pattern matching (as above) or with the
+projection functions @{const fst} and @{const snd}: @{thm fst_conv} and @{thm snd_conv}. Tuples are simulated by pairs nested to the right: @{term"(a,b,c)"}
+abbreviates @{text"(a, (b, c))"} and @{text "\<tau>\<^sub>1 \<times> \<tau>\<^sub>2 \<times> \<tau>\<^sub>3"} abbreviates
+@{text "\<tau>\<^sub>1 \<times> (\<tau>\<^sub>2 \<times> \<tau>\<^sub>3)"}.
+
+\subsection{Definitions}
+
+Non recursive functions can be defined as in the following example:
+\index{definition@\isacom{definition}}*}
+
+definition sq :: "nat \<Rightarrow> nat" where
+"sq n = n * n"
+
+text{* Such definitions do not allow pattern matching but only
+@{text"f x\<^sub>1 \<dots> x\<^sub>n = t"}, where @{text f} does not occur in @{text t}.
+
+\subsection{Abbreviations}
+
+Abbreviations are similar to definitions:
+\index{abbreviation@\isacom{abbreviation}}*}
+
+abbreviation sq' :: "nat \<Rightarrow> nat" where
+"sq' n \<equiv> n * n"
+
+text{* The key difference is that @{const sq'} is only syntactic sugar:
+after parsing, @{term"sq' t"} is replaced by \mbox{@{term"t*t"}}, and
+before printing, every occurrence of @{term"u*u"} is replaced by
+\mbox{@{term"sq' u"}}.  Internally, @{const sq'} does not exist.
+This is the
+advantage of abbreviations over definitions: definitions need to be expanded
+explicitly (\autoref{sec:rewr-defs}) whereas abbreviations are already
+expanded upon parsing. However, abbreviations should be introduced sparingly:
+if abused, they can lead to a confusing discrepancy between the internal and
+external view of a term.
+
+The ASCII representation of @{text"\<equiv>"} is \texttt{==} or \xsymbol{equiv}.
+
+\subsection{Recursive Functions}
+\label{sec:recursive-funs}
+
+Recursive functions are defined with \indexed{\isacom{fun}}{fun} by pattern matching
+over datatype constructors. The order of equations matters. Just as in
+functional programming languages. However, all HOL functions must be
+total. This simplifies the logic---terms are always defined---but means
+that recursive functions must terminate. Otherwise one could define a
+function @{prop"f n = f n + (1::nat)"} and conclude \mbox{@{prop"(0::nat) = 1"}} by
+subtracting @{term"f n"} on both sides.
+
+Isabelle's automatic termination checker requires that the arguments of
+recursive calls on the right-hand side must be strictly smaller than the
+arguments on the left-hand side. In the simplest case, this means that one
+fixed argument position decreases in size with each recursive call. The size
+is measured as the number of constructors (excluding 0-ary ones, e.g.\ @{text
+Nil}). Lexicographic combinations are also recognized. In more complicated
+situations, the user may have to prove termination by hand. For details
+see~\cite{Krauss}.
+
+Functions defined with \isacom{fun} come with their own induction schema
+that mirrors the recursion schema and is derived from the termination
+order. For example,
+*}
+
+fun div2 :: "nat \<Rightarrow> nat" where
+"div2 0 = 0" |
+"div2 (Suc 0) = 0" |
+"div2 (Suc(Suc n)) = Suc(div2 n)"
+
+text{* does not just define @{const div2} but also proves a
+customized induction rule:
+\[
+\inferrule{
+\mbox{@{thm (prem 1) div2.induct}}\\
+\mbox{@{thm (prem 2) div2.induct}}\\
+\mbox{@{thm (prem 3) div2.induct}}}
+{\mbox{@{thm (concl) div2.induct[of _ "m"]}}}
+\]
+This customized induction rule can simplify inductive proofs. For example,
+*}
+
+lemma "div2(n) = n div 2"
+apply(induction n rule: div2.induct)
+
+txt{* (where the infix @{text div} is the predefined division operation)
+yields the 3 subgoals
+@{subgoals[display,margin=65]}
+An application of @{text auto} finishes the proof.
+Had we used ordinary structural induction on @{text n},
+the proof would have needed an additional
+case analysis in the induction step.
+
+This example leads to the following induction heuristic:
+\begin{quote}
+\emph{Let @{text f} be a recursive function.
+If the definition of @{text f} is more complicated
+than having one equation for each constructor of some datatype,
+then properties of @{text f} are best proved via @{text "f.induct"}.\index{inductionrule@@{text".induct"}}}
+\end{quote}
+
+The general case is often called \concept{computation induction},
+because the induction follows the (terminating!) computation.
+For every defining equation
+\begin{quote}
+@{text "f(e) = \<dots> f(r\<^sub>1) \<dots> f(r\<^sub>k) \<dots>"}
+\end{quote}
+where @{text"f(r\<^sub>i)"}, @{text"i=1\<dots>k"}, are all the recursive calls,
+the induction rule @{text"f.induct"} contains one premise of the form
+\begin{quote}
+@{text"P(r\<^sub>1) \<Longrightarrow> \<dots> \<Longrightarrow> P(r\<^sub>k) \<Longrightarrow> P(e)"}
+\end{quote}
+If @{text "f :: \<tau>\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> \<tau>\<^sub>n \<Rightarrow> \<tau>"} then @{text"f.induct"} is applied like this:
+\begin{quote}
+\isacom{apply}@{text"(induction x\<^sub>1 \<dots> x\<^sub>n rule: f.induct)"}\index{inductionrule@@{text"induction ... rule:"}}
+\end{quote}
+where typically there is a call @{text"f x\<^sub>1 \<dots> x\<^sub>n"} in the goal.
+But note that the induction rule does not mention @{text f} at all,
+except in its name, and is applicable independently of @{text f}.
+
+
+\subsection*{Exercises}
+
+\begin{exercise}
+Starting from the type @{text "'a tree"} defined in the text, define
+a function @{text "contents ::"} @{typ "'a tree \<Rightarrow> 'a list"}
+that collects all values in a tree in a list, in any order,
+without removing duplicates.
+Then define a function @{text "treesum ::"} @{typ "nat tree \<Rightarrow> nat"}
+that sums up all values in a tree of natural numbers
+and prove @{prop "treesum t = listsum(contents t)"}.
+\end{exercise}
+
+\begin{exercise}
+Define a new type @{text "'a tree2"} of binary trees where values are also
+stored in the leaves of the tree.  Also reformulate the
+@{const mirror} function accordingly. Define two functions
+@{text "pre_order"} and @{text "post_order"} of type @{text "'a tree2 \<Rightarrow> 'a list"}
+that traverse a tree and collect all stored values in the respective order in
+a list. Prove @{prop "pre_order (mirror t) = rev (post_order t)"}.
+\end{exercise}
+
+\begin{exercise}
+Define a function @{text "intersperse ::"} @{typ "'a \<Rightarrow> 'a list \<Rightarrow> 'a list"}
+such that @{text "intersperse a [x\<^sub>1, ..., x\<^sub>n] = [x\<^sub>1, a, x\<^sub>2, a, ..., a, x\<^sub>n]"}.
+Now prove that @{prop "map f (intersperse a xs) = intersperse (f a) (map f xs)"}.
+\end{exercise}
+
+
+\section{Induction Heuristics}\index{induction heuristics}
+
+We have already noted that theorems about recursive functions are proved by
+induction. In case the function has more than one argument, we have followed
+the following heuristic in the proofs about the append function:
+\begin{quote}
+\emph{Perform induction on argument number $i$\\
+ if the function is defined by recursion on argument number $i$.}
+\end{quote}
+The key heuristic, and the main point of this section, is to
+\emph{generalize the goal before induction}.
+The reason is simple: if the goal is
+too specific, the induction hypothesis is too weak to allow the induction
+step to go through. Let us illustrate the idea with an example.
+
+Function @{const rev} has quadratic worst-case running time
+because it calls append for each element of the list and
+append is linear in its first argument.  A linear time version of
+@{const rev} requires an extra argument where the result is accumulated
+gradually, using only~@{text"#"}:
+*}
+(*<*)
+apply auto
+done
+(*>*)
+fun itrev :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where
+"itrev []        ys = ys" |
+"itrev (x#xs) ys = itrev xs (x#ys)"
+
+text{* The behaviour of @{const itrev} is simple: it reverses
+its first argument by stacking its elements onto the second argument,
+and it returns that second argument when the first one becomes
+empty. Note that @{const itrev} is tail-recursive: it can be
+compiled into a loop, no stack is necessary for executing it.
+
+Naturally, we would like to show that @{const itrev} does indeed reverse
+its first argument provided the second one is empty:
+*}
+
+lemma "itrev xs [] = rev xs"
+
+txt{* There is no choice as to the induction variable:
+*}
+
+apply(induction xs)
+apply(auto)
+
+txt{*
+Unfortunately, this attempt does not prove
+the induction step:
+@{subgoals[display,margin=70]}
+The induction hypothesis is too weak.  The fixed
+argument,~@{term"[]"}, prevents it from rewriting the conclusion.  
+This example suggests a heuristic:
+\begin{quote}
+\emph{Generalize goals for induction by replacing constants by variables.}
+\end{quote}
+Of course one cannot do this na\"{\i}vely: @{prop"itrev xs ys = rev xs"} is
+just not true.  The correct generalization is
+*};
+(*<*)oops;(*>*)
+lemma "itrev xs ys = rev xs @ ys"
+(*<*)apply(induction xs, auto)(*>*)
+txt{*
+If @{text ys} is replaced by @{term"[]"}, the right-hand side simplifies to
+@{term"rev xs"}, as required.
+In this instance it was easy to guess the right generalization.
+Other situations can require a good deal of creativity.  
+
+Although we now have two variables, only @{text xs} is suitable for
+induction, and we repeat our proof attempt. Unfortunately, we are still
+not there:
+@{subgoals[display,margin=65]}
+The induction hypothesis is still too weak, but this time it takes no
+intuition to generalize: the problem is that the @{text ys}
+in the induction hypothesis is fixed,
+but the induction hypothesis needs to be applied with
+@{term"a # ys"} instead of @{text ys}. Hence we prove the theorem
+for all @{text ys} instead of a fixed one. We can instruct induction
+to perform this generalization for us by adding @{text "arbitrary: ys"}\index{arbitrary@@{text"arbitrary:"}}.
+*}
+(*<*)oops
+lemma "itrev xs ys = rev xs @ ys"
+(*>*)
+apply(induction xs arbitrary: ys)
+
+txt{* The induction hypothesis in the induction step is now universally quantified over @{text ys}:
+@{subgoals[display,margin=65]}
+Thus the proof succeeds:
+*}
+
+apply auto
+done
+
+text{*
+This leads to another heuristic for generalization:
+\begin{quote}
+\emph{Generalize induction by generalizing all free
+variables\\ {\em(except the induction variable itself)}.}
+\end{quote}
+Generalization is best performed with @{text"arbitrary: y\<^sub>1 \<dots> y\<^sub>k"}. 
+This heuristic prevents trivial failures like the one above.
+However, it should not be applied blindly.
+It is not always required, and the additional quantifiers can complicate
+matters in some cases. The variables that need to be quantified are typically
+those that change in recursive calls.
+
+
+\subsection*{Exercises}
+
+\begin{exercise}
+Write a tail-recursive variant of the @{text add} function on @{typ nat}:
+@{term "itadd :: nat \<Rightarrow> nat \<Rightarrow> nat"}.
+Tail-recursive means that in the recursive case, @{text itadd} needs to call
+itself directly: \mbox{@{term"itadd (Suc m) n"}} @{text"= itadd \<dots>"}.
+Prove @{prop "itadd m n = add m n"}.
+\end{exercise}
+
+
+\section{Simplification}
+
+So far we have talked a lot about simplifying terms without explaining the concept.
+\conceptidx{Simplification}{simplification} means
+\begin{itemize}
+\item using equations $l = r$ from left to right (only),
+\item as long as possible.
+\end{itemize}
+To emphasize the directionality, equations that have been given the
+@{text"simp"} attribute are called \conceptidx{simplification rules}{simplification rule}.
+Logically, they are still symmetric, but proofs by
+simplification use them only in the left-to-right direction.  The proof tool
+that performs simplifications is called the \concept{simplifier}. It is the
+basis of @{text auto} and other related proof methods.
+
+The idea of simplification is best explained by an example. Given the
+simplification rules
+\[
+\begin{array}{rcl@ {\quad}l}
+@{term"0 + n::nat"} &@{text"="}& @{text n} & (1) \\
+@{term"(Suc m) + n"} &@{text"="}& @{term"Suc (m + n)"} & (2) \\
+@{text"(Suc m \<le> Suc n)"} &@{text"="}& @{text"(m \<le> n)"} & (3)\\
+@{text"(0 \<le> m)"} &@{text"="}& @{const True} & (4)
+\end{array}
+\]
+the formula @{prop"0 + Suc 0 \<le> Suc 0 + x"} is simplified to @{const True}
+as follows:
+\[
+\begin{array}{r@ {}c@ {}l@ {\quad}l}
+@{text"(0 + Suc 0"} & \leq & @{text"Suc 0 + x)"}  & \stackrel{(1)}{=} \\
+@{text"(Suc 0"}     & \leq & @{text"Suc 0 + x)"}  & \stackrel{(2)}{=} \\
+@{text"(Suc 0"}     & \leq & @{text"Suc (0 + x)"} & \stackrel{(3)}{=} \\
+@{text"(0"}         & \leq & @{text"0 + x)"}      & \stackrel{(4)}{=} \\[1ex]
+ & @{const True}
+\end{array}
+\]
+Simplification is often also called \concept{rewriting}
+and simplification rules \conceptidx{rewrite rules}{rewrite rule}.
+
+\subsection{Simplification Rules}
+
+The attribute @{text"simp"} declares theorems to be simplification rules,
+which the simplifier will use automatically. In addition,
+\isacom{datatype} and \isacom{fun} commands implicitly declare some
+simplification rules: \isacom{datatype} the distinctness and injectivity
+rules, \isacom{fun} the defining equations. Definitions are not declared
+as simplification rules automatically! Nearly any theorem can become a
+simplification rule. The simplifier will try to transform it into an
+equation. For example, the theorem @{prop"\<not> P"} is turned into @{prop"P = False"}.
+
+Only equations that really simplify, like @{prop"rev (rev xs) = xs"} and
+@{prop"xs @ [] = xs"}, should be declared as simplification
+rules. Equations that may be counterproductive as simplification rules
+should only be used in specific proof steps (see \S\ref{sec:simp} below).
+Distributivity laws, for example, alter the structure of terms
+and can produce an exponential blow-up.
+
+\subsection{Conditional Simplification Rules}
+
+Simplification rules can be conditional.  Before applying such a rule, the
+simplifier will first try to prove the preconditions, again by
+simplification. For example, given the simplification rules
+\begin{quote}
+@{prop"p(0::nat) = True"}\\
+@{prop"p(x) \<Longrightarrow> f(x) = g(x)"},
+\end{quote}
+the term @{term"f(0::nat)"} simplifies to @{term"g(0::nat)"}
+but @{term"f(1::nat)"} does not simplify because @{prop"p(1::nat)"}
+is not provable.
+
+\subsection{Termination}
+
+Simplification can run forever, for example if both @{prop"f x = g x"} and
+@{prop"g(x) = f(x)"} are simplification rules. It is the user's
+responsibility not to include simplification rules that can lead to
+nontermination, either on their own or in combination with other
+simplification rules. The right-hand side of a simplification rule should
+always be ``simpler'' than the left-hand side---in some sense. But since
+termination is undecidable, such a check cannot be automated completely
+and Isabelle makes little attempt to detect nontermination.
+
+When conditional simplification rules are applied, their preconditions are
+proved first. Hence all preconditions need to be
+simpler than the left-hand side of the conclusion. For example
+\begin{quote}
+@{prop "n < m \<Longrightarrow> (n < Suc m) = True"}
+\end{quote}
+is suitable as a simplification rule: both @{prop"n<m"} and @{const True}
+are simpler than \mbox{@{prop"n < Suc m"}}. But
+\begin{quote}
+@{prop "Suc n < m \<Longrightarrow> (n < m) = True"}
+\end{quote}
+leads to nontermination: when trying to rewrite @{prop"n<m"} to @{const True}
+one first has to prove \mbox{@{prop"Suc n < m"}}, which can be rewritten to @{const True} provided @{prop"Suc(Suc n) < m"}, \emph{ad infinitum}.
+
+\subsection{The \indexed{@{text simp}}{simp} Proof Method}
+\label{sec:simp}
+
+So far we have only used the proof method @{text auto}.  Method @{text simp}
+is the key component of @{text auto}, but @{text auto} can do much more. In
+some cases, @{text auto} is overeager and modifies the proof state too much.
+In such cases the more predictable @{text simp} method should be used.
+Given a goal
+\begin{quote}
+@{text"1. \<lbrakk> P\<^sub>1; \<dots>; P\<^sub>m \<rbrakk> \<Longrightarrow> C"}
+\end{quote}
+the command
+\begin{quote}
+\isacom{apply}@{text"(simp add: th\<^sub>1 \<dots> th\<^sub>n)"}
+\end{quote}
+simplifies the assumptions @{text "P\<^sub>i"} and the conclusion @{text C} using
+\begin{itemize}
+\item all simplification rules, including the ones coming from \isacom{datatype} and \isacom{fun},
+\item the additional lemmas @{text"th\<^sub>1 \<dots> th\<^sub>n"}, and
+\item the assumptions.
+\end{itemize}
+In addition to or instead of @{text add} there is also @{text del} for removing
+simplification rules temporarily. Both are optional. Method @{text auto}
+can be modified similarly:
+\begin{quote}
+\isacom{apply}@{text"(auto simp add: \<dots> simp del: \<dots>)"}
+\end{quote}
+Here the modifiers are @{text"simp add"} and @{text"simp del"}
+instead of just @{text add} and @{text del} because @{text auto}
+does not just perform simplification.
+
+Note that @{text simp} acts only on subgoal 1, @{text auto} acts on all
+subgoals. There is also @{text simp_all}, which applies @{text simp} to
+all subgoals.
+
+\subsection{Rewriting With Definitions}
+\label{sec:rewr-defs}
+
+Definitions introduced by the command \isacom{definition}
+can also be used as simplification rules,
+but by default they are not: the simplifier does not expand them
+automatically. Definitions are intended for introducing abstract concepts and
+not merely as abbreviations. Of course, we need to expand the definition
+initially, but once we have proved enough abstract properties of the new
+constant, we can forget its original definition. This style makes proofs more
+robust: if the definition has to be changed, only the proofs of the abstract
+properties will be affected.
+
+The definition of a function @{text f} is a theorem named @{text f_def}
+and can be added to a call of @{text simp} just like any other theorem:
+\begin{quote}
+\isacom{apply}@{text"(simp add: f_def)"}
+\end{quote}
+In particular, let-expressions can be unfolded by
+making @{thm[source] Let_def} a simplification rule.
+
+\subsection{Case Splitting With @{text simp}}
+
+Goals containing if-expressions are automatically split into two cases by
+@{text simp} using the rule
+\begin{quote}
+@{prop"P(if A then s else t) = ((A \<longrightarrow> P(s)) \<and> (~A \<longrightarrow> P(t)))"}
+\end{quote}
+For example, @{text simp} can prove
+\begin{quote}
+@{prop"(A \<and> B) = (if A then B else False)"}
+\end{quote}
+because both @{prop"A \<longrightarrow> (A & B) = B"} and @{prop"~A \<longrightarrow> (A & B) = False"}
+simplify to @{const True}.
+
+We can split case-expressions similarly. For @{text nat} the rule looks like this:
+@{prop[display,margin=65,indent=4]"P(case e of 0 \<Rightarrow> a | Suc n \<Rightarrow> b n) = ((e = 0 \<longrightarrow> P a) & (ALL n. e = Suc n \<longrightarrow> P(b n)))"}
+Case expressions are not split automatically by @{text simp}, but @{text simp}
+can be instructed to do so:
+\begin{quote}
+\isacom{apply}@{text"(simp split: nat.split)"}
+\end{quote}
+splits all case-expressions over natural numbers. For an arbitrary
+datatype @{text t} it is @{text "t.split"}\index{split@@{text".split"}} instead of @{thm[source] nat.split}.
+Method @{text auto} can be modified in exactly the same way.
+The modifier \indexed{@{text "split:"}}{split} can be followed by multiple names.
+Splitting if or case-expressions in the assumptions requires 
+@{text "split: if_splits"} or @{text "split: t.splits"}.
+
+
+\subsection*{Exercises}
+
+\exercise\label{exe:tree0}
+Define a datatype @{text tree0} of binary tree skeletons which do not store
+any information, neither in the inner nodes nor in the leaves.
+Define a function @{text "nodes :: tree0 \<Rightarrow> nat"} that counts the number of
+all nodes (inner nodes and leaves) in such a tree.
+Consider the following recursive function:
+*}
+(*<*)
+datatype tree0 = Tip | Node tree0 tree0
+(*>*)
+fun explode :: "nat \<Rightarrow> tree0 \<Rightarrow> tree0" where
+"explode 0 t = t" |
+"explode (Suc n) t = explode n (Node t t)"
+
+text {*
+Find an equation expressing the size of a tree after exploding it
+(\noquotes{@{term [source] "nodes (explode n t)"}}) as a function
+of @{term "nodes t"} and @{text n}. Prove your equation.
+You may use the usual arithmetic operators including the exponentiation
+operator ``@{text"^"}''. For example, \noquotes{@{prop [source] "2 ^ 2 = 4"}}.
+
+Hint: simplifying with the list of theorems @{thm[source] algebra_simps}
+takes care of common algebraic properties of the arithmetic operators.
+\endexercise
+
+\exercise
+Define arithmetic expressions in one variable over integers (type @{typ int})
+as a data type:
+*}
+
+datatype exp = Var | Const int | Add exp exp | Mult exp exp
+
+text{*
+Define a function \noquotes{@{term [source]"eval :: exp \<Rightarrow> int \<Rightarrow> int"}}
+such that @{term"eval e x"} evaluates @{text e} at the value
+@{text x}.
+
+A polynomial can be represented as a list of coefficients, starting with
+the constant. For example, @{term "[4, 2, -1, 3::int]"} represents the
+polynomial $4 + 2x - x^2 + 3x^3$.
+Define a function \noquotes{@{term [source] "evalp :: int list \<Rightarrow> int \<Rightarrow> int"}}
+that evaluates a polynomial at the given value.
+Define a function \noquotes{@{term[source] "coeffs :: exp \<Rightarrow> int list"}}
+that transforms an expression into a polynomial. This may require auxiliary
+functions. Prove that @{text coeffs} preserves the value of the expression:
+\mbox{@{prop"evalp (coeffs e) x = eval e x"}.}
+Hint: consider the hint in Exercise~\ref{exe:tree0}.
+\endexercise
+*}
+(*<*)
+end
+(*>*)
Binary file src/Doc/Prog-Prove/document/bang.pdf has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/document/build	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+set -e
+
+FORMAT="$1"
+VARIANT="$2"
+
+"$ISABELLE_TOOL" logo HOL
+
+cp "$ISABELLE_HOME/src/Doc/Prog-Prove/MyList.thy" .
+
+"$ISABELLE_HOME/src/Doc/prepare_document" "$FORMAT"
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/document/intro-isabelle.tex	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,94 @@
+Isabelle is a generic system for
+implementing logical formalisms, and Isabelle/HOL is the specialization
+of Isabelle for HOL, which abbreviates Higher-Order Logic. We introduce
+HOL step by step following the equation
+\[ \mbox{HOL} = \mbox{Functional Programming} + \mbox{Logic}. \]
+We assume that the reader is used to logical and set theoretic notation
+and is familiar with the basic concepts of functional programming.
+\ifsem
+Open-minded readers have been known to pick up functional
+programming through the wealth of examples in \autoref{sec:FP}
+and \autoref{sec:CaseStudyExp}.
+\fi
+
+\autoref{sec:FP} introduces HOL as a functional programming language and
+explains how to write simple inductive proofs of mostly equational properties
+of recursive functions.
+\ifsem
+\autoref{sec:CaseStudyExp} contains a
+small case study: arithmetic and boolean expressions, their evaluation,
+optimization and compilation.
+\fi
+\autoref{ch:Logic} introduces the rest of HOL: the
+language of formulas beyond equality, automatic proof tools, single
+step proofs, and inductive definitions, an essential specification construct.
+\autoref{ch:Isar} introduces Isar, Isabelle's language for writing structured
+proofs.
+
+%Further material (slides, demos etc) can be found online at
+%\url{http://www.in.tum.de/~nipkow}.
+
+% Relics:
+% We aim to minimise the amount of background knowledge of logic we expect
+% from the reader
+% We have focussed on the core material
+% in the intersection of computation and logic.
+
+This introduction to the core of Isabelle is intentionally concrete and
+example-based: we concentrate on examples that illustrate the typical cases
+without explaining the general case if it can be inferred from the examples.
+We cover the essentials (from a functional programming point of view) as
+quickly and compactly as possible.
+\ifsem
+After all, this book is primarily about semantics.
+\fi
+
+For a comprehensive treatment of all things Isabelle we recommend the
+\emph{Isabelle/Isar Reference Manual}~\cite{IsarRef}, which comes with the
+Isabelle distribution.
+The tutorial by Nipkow, Paulson and Wenzel~\cite{LNCS2283} (in its updated version that comes with the Isabelle distribution) is still recommended for the wealth of examples and material, but its proof style is outdated. In particular it does not cover the structured proof language Isar.
+
+%This introduction to Isabelle has grown out of many years of teaching
+%Isabelle courses. 
+
+\ifsem
+\subsection*{Getting Started with Isabelle}
+
+If you have not done so already, download and install Isabelle
+from \url{http://isabelle.in.tum.de}. You can start it by clicking
+on the application icon. This will launch Isabelle's
+user interface based on the text editor \concept{jedit}. Below you see
+a typical example snapshot of a jedit session. At this point we merely explain
+the layout of the window, not its contents.
+
+\begin{center}
+\includegraphics[width=\textwidth]{jedit.png}
+\end{center}
+The upper part of the window shows the input typed by the user, i.e.\ the
+gradually growing Isabelle text of definitions, theorems, proofs, etc.  The
+interface processes the user input automatically while it is typed, just like
+modern Java IDEs.  Isabelle's response to the user input is shown in the
+lower part of the window. You can examine the response to any input phrase
+by clicking on that phrase or by hovering over underlined text.
+
+This should suffice to get started with the jedit interface.
+Now you need to learn what to type into it.
+\else
+If you want to apply what you have learned about Isabelle we recommend you
+donwload and read the book
+\href{http://www.in.tum.de/~nipkow/Concrete/}{Concrete
+Semantics}~\cite{ConcreteSemantics}, a guided tour of the wonderful world of
+programming langage semantics formalised in Isabelle.  In fact,
+\emph{Programming and Proving in Isabelle/HOL} constitutes part~I of
+\href{http://www.in.tum.de/~nipkow/Concrete/}{Concrete Semantics}.  The web
+pages for \href{http://www.in.tum.de/~nipkow/Concrete/}{Concrete Semantics}
+also provide a set of \LaTeX-based slides for teaching \emph{Programming and
+Proving in Isabelle/HOL}.
+\fi
+
+\ifsem\else
+\paragraph{Acknowledgements}
+I wish to thank the following people for their comments on this document:
+Florian Haftmann, Ren\'{e} Thiemann, Sean Seefried, Christian Sternagel
+and Carl Witty.
+\fi
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/document/mathpartir.sty	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,388 @@
+%  Mathpartir --- Math Paragraph for Typesetting Inference Rules
+%
+%  Copyright (C) 2001, 2002, 2003 Didier Rémy
+%
+%  Author         : Didier Remy 
+%  Version        : 1.1.1
+%  Bug Reports    : to author
+%  Web Site       : http://pauillac.inria.fr/~remy/latex/
+% 
+%  WhizzyTeX is free software; you can redistribute it and/or modify
+%  it under the terms of the GNU General Public License as published by
+%  the Free Software Foundation; either version 2, or (at your option)
+%  any later version.
+%  
+%  Mathpartir is distributed in the hope that it will be useful,
+%  but WITHOUT ANY WARRANTY; without even the implied warranty of
+%  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+%  GNU General Public License for more details 
+%  (http://pauillac.inria.fr/~remy/license/GPL).
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%  File mathpartir.sty (LaTeX macros)
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\NeedsTeXFormat{LaTeX2e}
+\ProvidesPackage{mathpartir}
+    [2003/07/10 version 1.1.1 Math Paragraph for Typesetting Inference Rules]
+
+%%
+
+%% Identification
+%% Preliminary declarations
+
+\RequirePackage {keyval}
+
+%% Options
+%% More declarations
+
+%% PART I: Typesetting maths in paragraphe mode
+
+\newdimen \mpr@tmpdim
+
+% To ensure hevea \hva compatibility, \hva should expands to nothing 
+% in mathpar or in inferrule
+\let \mpr@hva \empty
+
+%% normal paragraph parametters, should rather be taken dynamically
+\def \mpr@savepar {%
+  \edef \MathparNormalpar
+     {\noexpand \lineskiplimit \the\lineskiplimit
+      \noexpand \lineskip \the\lineskip}%
+  }
+
+\def \mpr@rulelineskip {\lineskiplimit=0.3em\lineskip=0.2em plus 0.1em}
+\def \mpr@lesslineskip {\lineskiplimit=0.6em\lineskip=0.5em plus 0.2em}
+\def \mpr@lineskip  {\lineskiplimit=1.2em\lineskip=1.2em plus 0.2em}
+\let \MathparLineskip \mpr@lineskip
+\def \mpr@paroptions {\MathparLineskip}
+\let \mpr@prebindings \relax
+
+\newskip \mpr@andskip \mpr@andskip 2em plus 0.5fil minus 0.5em
+
+\def \mpr@goodbreakand
+   {\hskip -\mpr@andskip  \penalty -1000\hskip \mpr@andskip}
+\def \mpr@and {\hskip \mpr@andskip}
+\def \mpr@andcr {\penalty 50\mpr@and}
+\def \mpr@cr {\penalty -10000\mpr@and}
+\def \mpr@eqno #1{\mpr@andcr #1\hskip 0em plus -1fil \penalty 10}
+
+\def \mpr@bindings {%
+  \let \and \mpr@andcr
+  \let \par \mpr@andcr
+  \let \\\mpr@cr
+  \let \eqno \mpr@eqno
+  \let \hva \mpr@hva
+  } 
+\let \MathparBindings \mpr@bindings
+
+% \@ifundefined {ignorespacesafterend}
+%    {\def \ignorespacesafterend {\aftergroup \ignorespaces}
+
+\newenvironment{mathpar}[1][]
+  {$$\mpr@savepar \parskip 0em \hsize \linewidth \centering
+     \vbox \bgroup \mpr@prebindings \mpr@paroptions #1\ifmmode $\else
+     \noindent $\displaystyle\fi
+     \MathparBindings}
+  {\unskip \ifmmode $\fi\egroup $$\ignorespacesafterend}
+
+% \def \math@mathpar #1{\setbox0 \hbox {$\displaystyle #1$}\ifnum
+%     \wd0 < \hsize  $$\box0$$\else \bmathpar #1\emathpar \fi}
+
+%%% HOV BOXES
+
+\def \mathvbox@ #1{\hbox \bgroup \mpr@normallineskip 
+  \vbox \bgroup \tabskip 0em \let \\ \cr
+  \halign \bgroup \hfil $##$\hfil\cr #1\crcr \egroup \egroup
+  \egroup}
+
+\def \mathhvbox@ #1{\setbox0 \hbox {\let \\\qquad $#1$}\ifnum \wd0 < \hsize
+      \box0\else \mathvbox {#1}\fi}
+
+
+%% Part II -- operations on lists
+
+\newtoks \mpr@lista
+\newtoks \mpr@listb
+
+\long \def\mpr@cons #1\mpr@to#2{\mpr@lista {\\{#1}}\mpr@listb \expandafter
+{#2}\edef #2{\the \mpr@lista \the \mpr@listb}}
+
+\long \def\mpr@snoc #1\mpr@to#2{\mpr@lista {\\{#1}}\mpr@listb \expandafter
+{#2}\edef #2{\the \mpr@listb\the\mpr@lista}}
+
+\long \def \mpr@concat#1=#2\mpr@to#3{\mpr@lista \expandafter {#2}\mpr@listb
+\expandafter {#3}\edef #1{\the \mpr@listb\the\mpr@lista}}
+
+\def \mpr@head #1\mpr@to #2{\expandafter \mpr@head@ #1\mpr@head@ #1#2}
+\long \def \mpr@head@ #1#2\mpr@head@ #3#4{\def #4{#1}\def#3{#2}}
+
+\def \mpr@flatten #1\mpr@to #2{\expandafter \mpr@flatten@ #1\mpr@flatten@ #1#2}
+\long \def \mpr@flatten@ \\#1\\#2\mpr@flatten@ #3#4{\def #4{#1}\def #3{\\#2}}
+
+\def \mpr@makelist #1\mpr@to #2{\def \mpr@all {#1}%
+   \mpr@lista {\\}\mpr@listb \expandafter {\mpr@all}\edef \mpr@all {\the
+   \mpr@lista \the \mpr@listb \the \mpr@lista}\let #2\empty 
+   \def \mpr@stripof ##1##2\mpr@stripend{\def \mpr@stripped{##2}}\loop
+     \mpr@flatten \mpr@all \mpr@to \mpr@one
+     \expandafter \mpr@snoc \mpr@one \mpr@to #2\expandafter \mpr@stripof
+     \mpr@all \mpr@stripend  
+     \ifx \mpr@stripped \empty \let \mpr@isempty 0\else \let \mpr@isempty 1\fi
+     \ifx 1\mpr@isempty
+   \repeat
+}
+
+%% Part III -- Type inference rules
+
+\def \mpr@rev #1\mpr@to #2{\let \mpr@tmp \empty
+   \def \\##1{\mpr@cons ##1\mpr@to \mpr@tmp}#1\let #2\mpr@tmp}
+
+\newif \if@premisse
+\newbox \mpr@hlist
+\newbox \mpr@vlist
+\newif \ifmpr@center \mpr@centertrue
+\def \mpr@htovlist {%
+   \setbox \mpr@hlist
+      \hbox {\strut
+             \ifmpr@center \hskip -0.5\wd\mpr@hlist\fi
+             \unhbox \mpr@hlist}%
+   \setbox \mpr@vlist
+      \vbox {\if@premisse  \box \mpr@hlist \unvbox \mpr@vlist
+             \else \unvbox \mpr@vlist \box \mpr@hlist
+             \fi}%
+}
+% OLD version
+% \def \mpr@htovlist {%
+%    \setbox \mpr@hlist
+%       \hbox {\strut \hskip -0.5\wd\mpr@hlist \unhbox \mpr@hlist}%
+%    \setbox \mpr@vlist
+%       \vbox {\if@premisse  \box \mpr@hlist \unvbox \mpr@vlist
+%              \else \unvbox \mpr@vlist \box \mpr@hlist
+%              \fi}%
+% }
+
+\def \mpr@sep{1.5em}
+\def \mpr@blank { }
+\def \mpr@hovbox #1#2{\hbox
+  \bgroup
+  \ifx #1T\@premissetrue
+  \else \ifx #1B\@premissefalse
+  \else
+     \PackageError{mathpartir}
+       {Premisse orientation should either be P or B}
+       {Fatal error in Package}%
+  \fi \fi
+  \def \@test {#2}\ifx \@test \mpr@blank\else
+  \setbox \mpr@hlist \hbox {}%
+  \setbox \mpr@vlist \vbox {}%
+  \if@premisse \let \snoc \mpr@cons \else \let \snoc \mpr@snoc \fi
+  \let \@hvlist \empty \let \@rev \empty
+  \mpr@tmpdim 0em
+  \expandafter \mpr@makelist #2\mpr@to \mpr@flat
+  \if@premisse \mpr@rev \mpr@flat \mpr@to \@rev \else \let \@rev \mpr@flat \fi
+  \def \\##1{%
+     \def \@test {##1}\ifx \@test \empty
+        \mpr@htovlist
+        \mpr@tmpdim 0em %%% last bug fix not extensively checked
+     \else
+      \setbox0 \hbox{$\displaystyle {##1}$}\relax
+      \advance \mpr@tmpdim by \wd0
+      %\mpr@tmpdim 1.02\mpr@tmpdim
+      \ifnum \mpr@tmpdim < \hsize
+         \ifnum \wd\mpr@hlist > 0
+           \if@premisse
+             \setbox \mpr@hlist 
+                \hbox {\unhbox0 \hskip \mpr@sep \unhbox \mpr@hlist}%
+           \else
+             \setbox \mpr@hlist
+                \hbox {\unhbox \mpr@hlist  \hskip \mpr@sep \unhbox0}%
+           \fi
+         \else 
+         \setbox \mpr@hlist \hbox {\unhbox0}%
+         \fi
+      \else
+         \ifnum \wd \mpr@hlist > 0
+            \mpr@htovlist 
+            \mpr@tmpdim \wd0
+         \fi
+         \setbox \mpr@hlist \hbox {\unhbox0}%
+      \fi
+      \advance \mpr@tmpdim by \mpr@sep
+   \fi
+   }%
+   \@rev
+   \mpr@htovlist
+   \ifmpr@center \hskip \wd\mpr@vlist\fi \box \mpr@vlist
+   \fi
+   \egroup
+}
+
+%%% INFERENCE RULES
+
+\@ifundefined{@@over}{%
+    \let\@@over\over % fallback if amsmath is not loaded
+    \let\@@overwithdelims\overwithdelims
+    \let\@@atop\atop \let\@@atopwithdelims\atopwithdelims
+    \let\@@above\above \let\@@abovewithdelims\abovewithdelims
+  }{}
+
+
+\def \mpr@@fraction #1#2{\hbox {\advance \hsize by -0.5em
+    $\displaystyle {#1\@@over #2}$}}
+\let \mpr@fraction \mpr@@fraction
+\def \mpr@@reduce #1#2{\hbox
+    {$\lower 0.01pt \mpr@@fraction {#1}{#2}\mkern -15mu\rightarrow$}}
+\def \mpr@@rewrite #1#2#3{\hbox
+    {$\lower 0.01pt \mpr@@fraction {#2}{#3}\mkern -8mu#1$}}
+\def \mpr@infercenter #1{\vcenter {\mpr@hovbox{T}{#1}}}
+
+\def \mpr@empty {}
+\def \mpr@inferrule
+  {\bgroup
+     \ifnum \linewidth<\hsize \hsize \linewidth\fi
+     \mpr@rulelineskip
+     \let \and \qquad
+     \let \hva \mpr@hva
+     \let \@rulename \mpr@empty
+     \let \@rule@options \mpr@empty
+     \mpr@inferrule@}
+\newcommand {\mpr@inferrule@}[3][]
+  {\everymath={\displaystyle}%       
+   \def \@test {#2}\ifx \empty \@test
+      \setbox0 \hbox {$\vcenter {\mpr@hovbox{B}{#3}}$}%
+   \else 
+   \def \@test {#3}\ifx \empty \@test
+      \setbox0 \hbox {$\vcenter {\mpr@hovbox{T}{#2}}$}%
+   \else
+   \setbox0 \mpr@fraction {\mpr@hovbox{T}{#2}}{\mpr@hovbox{B}{#3}}%
+   \fi \fi
+   \def \@test {#1}\ifx \@test\empty \box0
+   \else \vbox 
+%%% Suggestion de Francois pour les etiquettes longues
+%%%   {\hbox to \wd0 {\RefTirName {#1}\hfil}\box0}\fi
+      {\hbox {\RefTirName {#1}}\box0}\fi
+   \egroup}
+
+\def \mpr@vdotfil #1{\vbox to #1{\leaders \hbox{$\cdot$} \vfil}}
+
+% They are two forms
+% \inferrule [label]{[premisses}{conclusions}
+% or
+% \inferrule* [options]{[premisses}{conclusions}
+%
+% Premisses and conclusions are lists of elements separated by \\
+% Each \\ produces a break, attempting horizontal breaks if possible, 
+% and  vertical breaks if needed. 
+% 
+% An empty element obtained by \\\\ produces a vertical break in all cases. 
+%
+% The former rule is aligned on the fraction bar. 
+% The optional label appears on top of the rule
+% The second form to be used in a derivation tree is aligned on the last
+% line of its conclusion
+% 
+% The second form can be parameterized, using the key=val interface. The
+% folloiwng keys are recognized:
+%       
+%  width                set the width of the rule to val
+%  narrower             set the width of the rule to val\hsize
+%  before               execute val at the beginning/left
+%  lab                  put a label [Val] on top of the rule
+%  lskip                add negative skip on the right
+%  left                 put a left label [Val]
+%  Left                 put a left label [Val],  ignoring its width 
+%  right                put a right label [Val]
+%  Right                put a right label [Val], ignoring its width
+%  leftskip             skip negative space on the left-hand side
+%  rightskip            skip negative space on the right-hand side
+%  vdots                lift the rule by val and fill vertical space with dots
+%  after                execute val at the end/right
+%  
+%  Note that most options must come in this order to avoid strange
+%  typesetting (in particular  leftskip must preceed left and Left and
+%  rightskip must follow Right or right; vdots must come last 
+%  or be only followed by rightskip. 
+%  
+
+\define@key {mprset}{flushleft}[]{\mpr@centerfalse}
+\define@key {mprset}{center}[]{\mpr@centertrue}
+\def \mprset #1{\setkeys{mprset}{#1}}
+
+\newbox \mpr@right
+\define@key {mpr}{flushleft}[]{\mpr@centerfalse}
+\define@key {mpr}{center}[]{\mpr@centertrue}
+\define@key {mpr}{left}{\setbox0 \hbox {$\TirName {#1}\;$}\relax
+     \advance \hsize by -\wd0\box0}
+\define@key {mpr}{width}{\hsize #1}
+\define@key {mpr}{sep}{\def\mpr@sep{#1}}
+\define@key {mpr}{before}{#1}
+\define@key {mpr}{lab}{\let \RefTirName \TirName \def \mpr@rulename {#1}}
+\define@key {mpr}{Lab}{\let \RefTirName \TirName \def \mpr@rulename {#1}}
+\define@key {mpr}{narrower}{\hsize #1\hsize}
+\define@key {mpr}{leftskip}{\hskip -#1}
+\define@key {mpr}{reduce}[]{\let \mpr@fraction \mpr@@reduce}
+\define@key {mpr}{rightskip}
+  {\setbox \mpr@right \hbox {\unhbox \mpr@right \hskip -#1}}
+\define@key {mpr}{LEFT}{\setbox0 \hbox {$#1$}\relax
+     \advance \hsize by -\wd0\box0}
+\define@key {mpr}{left}{\setbox0 \hbox {$\TirName {#1}\;$}\relax
+     \advance \hsize by -\wd0\box0}
+\define@key {mpr}{Left}{\llap{$\TirName {#1}\;$}}
+\define@key {mpr}{right}
+  {\setbox0 \hbox {$\;\TirName {#1}$}\relax \advance \hsize by -\wd0
+   \setbox \mpr@right \hbox {\unhbox \mpr@right \unhbox0}}
+\define@key {mpr}{RIGHT}
+  {\setbox0 \hbox {$#1$}\relax \advance \hsize by -\wd0
+   \setbox \mpr@right \hbox {\unhbox \mpr@right \unhbox0}}
+\define@key {mpr}{Right}
+  {\setbox \mpr@right \hbox {\unhbox \mpr@right \rlap {$\;\TirName {#1}$}}}
+\define@key {mpr}{vdots}{\def \mpr@vdots {\@@atop \mpr@vdotfil{#1}}}
+\define@key {mpr}{after}{\edef \mpr@after {\mpr@after #1}}
+
+\newdimen \rule@dimen
+\newcommand \mpr@inferstar@ [3][]{\setbox0
+  \hbox {\let \mpr@rulename \mpr@empty \let \mpr@vdots \relax
+         \setbox \mpr@right \hbox{}%
+         $\setkeys{mpr}{#1}%
+          \ifx \mpr@rulename \mpr@empty \mpr@inferrule {#2}{#3}\else
+          \mpr@inferrule [{\mpr@rulename}]{#2}{#3}\fi
+          \box \mpr@right \mpr@vdots$}
+  \setbox1 \hbox {\strut}
+  \rule@dimen \dp0 \advance \rule@dimen by -\dp1
+  \raise \rule@dimen \box0}
+
+\def \mpr@infer {\@ifnextchar *{\mpr@inferstar}{\mpr@inferrule}}
+\newcommand \mpr@err@skipargs[3][]{}
+\def \mpr@inferstar*{\ifmmode 
+    \let \@do \mpr@inferstar@
+  \else 
+    \let \@do \mpr@err@skipargs
+    \PackageError {mathpartir}
+      {\string\inferrule* can only be used in math mode}{}%
+  \fi \@do}
+
+
+%%% Exports
+
+% Envirnonment mathpar
+
+\let \inferrule \mpr@infer
+
+% make a short name \infer is not already defined
+\@ifundefined {infer}{\let \infer \mpr@infer}{}
+
+\def \tir@name #1{\hbox {\small \sc #1}}
+\let \TirName \tir@name
+\let \RefTirName \tir@name
+
+%%% Other Exports
+
+% \let \listcons \mpr@cons
+% \let \listsnoc \mpr@snoc
+% \let \listhead \mpr@head
+% \let \listmake \mpr@makelist
+
+
+
+
+\endinput
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/document/prelude.tex	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,93 @@
+\usepackage{makeidx}         % allows index generation
+\usepackage{graphicx}        % standard LaTeX graphics tool
+                             % when including figure files
+\usepackage{multicol}        % used for the two-column index
+\usepackage[bottom]{footmisc}% places footnotes at page bottom
+\usepackage{alltt}
+
+\usepackage[T1]{fontenc}
+\usepackage{ccfonts}
+\usepackage[euler-digits]{eulervm}
+
+\usepackage{isabelle,isabellesym}
+\usepackage{mathpartir}
+\usepackage{amssymb}
+
+\renewcommand*\descriptionlabel[1]{\hspace\labelsep \textbf{#1}\hfil}
+
+% this should be the last package used
+\usepackage{xcolor}
+\definecolor{linkcolor}{rgb}{0,0,0.4}
+\usepackage[colorlinks=true,linkcolor=linkcolor,citecolor=linkcolor,
+            filecolor=linkcolor,pagecolor=linkcolor,
+            urlcolor=linkcolor]{hyperref}
+
+% urls in roman style, theory text in math-similar italics
+\urlstyle{tt}
+\isabellestyle{it}
+
+\renewcommand{\isadigit}[1]{\ensuremath{#1}}
+
+% font size
+\renewcommand{\isastyle}{\isastyleminor}
+
+% indexing
+\usepackage{ifthen}
+\newcommand{\indexdef}[3]%
+{\ifthenelse{\equal{}{#1}}{\index{#3 (#2)|bold}}{\index{#3 (#1\ #2)|bold}}}
+
+% section commands
+\renewcommand{\chapterautorefname}{Chapter}
+\renewcommand{\sectionautorefname}{Section}
+\renewcommand{\subsectionautorefname}{Section}
+
+\renewcommand{\isamarkupheader}[1]{{\rmfamily\subsection{#1}}}
+\renewcommand{\isamarkupsection}[1]{{\rmfamily\subsection{#1}}}
+\renewcommand{\isamarkupsubsection}[1]{{\rmfamily\subsubsection{#1}}}
+% isabelle in-text command font
+\newcommand{\isacom}[1]{\isa{\isacommand{#1}}}
+
+\protected\def\isacharunderscore{\raisebox{2pt}{\_\kern-1.7pt}}
+\protected\def\isacharunderscorekeyword{\raisebox{2pt}{\_}}
+% isabelle keyword, adapted from isabelle.sty
+\renewcommand{\isakeyword}[1]
+{\emph{\def\isachardot{.}\def\isacharunderscore{\isacharunderscorekeyword}%
+\def\isacharbraceleft{\{}\def\isacharbraceright{\}}\def\bfdefault{sbc}\textbf{#1}}}
+
+% add \noindent to text blocks automatically
+\renewenvironment{isamarkuptext}{\par\isastyletext\begin{isapar}\noindent}{\end{isapar}}
+\renewenvironment{isamarkuptxt}{\par\isastyletext\begin{isapar}\noindent}{\end{isapar}}
+
+\newcommand{\noquotes}[1]{{\renewcommand{\isachardoublequote}{}\renewcommand{\isachardoublequoteopen}{}\renewcommand{\isachardoublequoteclose}{}#1}}
+\newcommand{\xsymbol}[1]{\texttt{\char`\\\char`<#1>}}
+
+%index
+\newcommand{\conceptnoidx}[1]{\textbf{#1}}
+\newcommand{\concept}[1]{\conceptnoidx{#1}\index{#1}}
+\newcommand{\conceptidx}[2]{\conceptnoidx{#1}\index{#2}}
+\newcommand{\indexed}[2]{#1\index{#2@\protect#1}}
+
+\newcommand{\isabox}{\fbox}
+\newcommand{\bigisabox}[1]
+{\isabox{\renewcommand{\isanewline}{\\}%
+ \begin{tabular}{l}#1\end{tabular}}}
+
+%%%% ``WARNING'' environment: 2 ! characters separated by negative thin space
+%\def\warnbang{\vtop to 0pt{\vss\hbox{\let\bfdefault=\bfdefaultold\Huge\textbf{!$\!$!}}\vss}}
+
+\def\warnbang{\vtop to 0pt{\vss\hbox{\includegraphics[width=3ex, height=5.5ex]{bang}}\vss}}
+
+\newenvironment{warn}{\begin{trivlist}\small\item[]\noindent%
+    \begingroup\hangindent=\parindent\hangafter=-2%\clubpenalty=10000%
+    \def\par{\endgraf\endgroup}%
+    \hbox to0pt{\hskip-\hangindent\warnbang\hfill}\ignorespaces}
+  {\par\end{trivlist}}
+
+\chardef\isachardoublequote=`\"%
+\chardef\isachardoublequoteopen=`\"%
+\chardef\isachardoublequoteclose=`\"%
+
+\renewcommand{\isacharbackquoteopen}{\isacharbackquote}
+\renewcommand{\isacharbackquoteclose}{\isacharbackquote}
+
+\hyphenation{Isa-belle}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/document/root.bib	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,26 @@
+@string{CUP="Cambridge University Press"}
+@string{LNCS="Lect.\ Notes in Comp.\ Sci."}
+@string{Springer="Springer-Verlag"}
+
+@book{HuthRyan,author="Michael Huth and Mark Ryan",
+title={Logic in Computer Science},publisher=CUP,year=2004}
+
+@manual{Krauss,author={Alexander Krauss},
+title={Defining Recursive Functions in Isabelle/HOL},
+note={\url{http://isabelle.in.tum.de/doc/functions.pdf}}}
+
+@manual{Nipkow-Main,author={Tobias Nipkow},title={What's in Main},
+note={\url{http://isabelle.in.tum.de/doc/main.pdf}}}
+
+@book{LNCS2283,author={Tobias Nipkow and Lawrence Paulson and Markus Wenzel},
+title="Isabelle/HOL --- A Proof Assistant for Higher-Order Logic",
+publisher=Springer,series=LNCS,volume=2283,year=2002}
+
+@book{ConcreteSemantics,author={Tobias Nipkow and Gerwin Klein},
+title="Concrete Semantics. A Proof Assistant Approach",
+publisher={\url{http://www.in.tum.de/~nipkow/Concrete}},year=2013}
+
+@manual{IsarRef,author={Makarius Wenzel},
+title={The Isabelle/Isar Reference Manual},
+note={\url{http://isabelle.in.tum.de/doc/isar-ref.pdf}}}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/document/root.tex	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,52 @@
+\documentclass[envcountsame,envcountchap]{svmono}
+
+\input{prelude}
+
+\newif\ifsem
+
+\begin{document}
+
+\title{Programming and Proving in Isabelle/HOL}
+\subtitle{\includegraphics[scale=.7]{isabelle_hol}}
+\author{Tobias Nipkow}
+\maketitle
+
+\frontmatter%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\setcounter{tocdepth}{1}
+\tableofcontents
+
+
+\mainmatter%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%\part{Isabelle}
+
+\chapter{Introduction}
+\input{intro-isabelle.tex}
+
+\chapter{Programming and Proving}
+\label{sec:FP}
+\input{Basics.tex}
+\input{Bool_nat_list}
+\input{Types_and_funs}
+
+%\chapter{Case Study: IMP Expressions}
+%\label{sec:CaseStudyExp}
+%\input{../generated/Expressions}
+
+\chapter{Logic and Proof Beyond Equality}
+\label{ch:Logic}
+\input{Logic}
+
+\chapter{Isar: A Language for Structured Proofs}
+\label{ch:Isar}
+\input{Isar}
+
+\backmatter%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\bibliographystyle{plain}
+\bibliography{root}
+
+%\printindex
+
+\end{document}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Prog-Prove/document/svmono.cls	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,1691 @@
+% SVMONO DOCUMENT CLASS -- version 4.17 (31-Oct-06)
+% Springer Verlag global LaTeX2e support for monographs
+%%
+%%
+%% \CharacterTable
+%%  {Upper-case    \A\B\C\D\E\F\G\H\I\J\K\L\M\N\O\P\Q\R\S\T\U\V\W\X\Y\Z
+%%   Lower-case    \a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\u\v\w\x\y\z
+%%   Digits        \0\1\2\3\4\5\6\7\8\9
+%%   Exclamation   \!     Double quote  \"     Hash (number) \#
+%%   Dollar        \$     Percent       \%     Ampersand     \&
+%%   Acute accent  \'     Left paren    \(     Right paren   \)
+%%   Asterisk      \*     Plus          \+     Comma         \,
+%%   Minus         \-     Point         \.     Solidus       \/
+%%   Colon         \:     Semicolon     \;     Less than     \<
+%%   Equals        \=     Greater than  \>     Question mark \?
+%%   Commercial at \@     Left bracket  \[     Backslash     \\
+%%   Right bracket \]     Circumflex    \^     Underscore    \_
+%%   Grave accent  \`     Left brace    \{     Vertical bar  \|
+%%   Right brace   \}     Tilde         \~}
+%%
+\NeedsTeXFormat{LaTeX2e}[1995/12/01]
+\ProvidesClass{svmono}[2006/10/31 v4.17
+^^JSpringer Verlag global LaTeX document class for monographs]
+
+% Options
+% citations
+\DeclareOption{natbib}{\ExecuteOptions{oribibl}%
+\AtEndOfClass{% Loading package 'NATBIB'
+\RequirePackage{natbib}
+% Changing some parameters of NATBIB
+\setlength{\bibhang}{\parindent}
+%\setlength{\bibsep}{0mm}
+\let\bibfont=\small
+\def\@biblabel#1{#1.}
+\newcommand{\etal}{\textit{et al}.}
+%\bibpunct[,]{(}{)}{;}{a}{}{,}}}
+}}
+% Springer environment
+\let\if@spthms\iftrue
+\DeclareOption{nospthms}{\let\if@spthms\iffalse}
+%
+\let\envankh\@empty   % no anchor for "theorems"
+%
+\let\if@envcntreset\iffalse % environment counter is not reset
+\let\if@envcntresetsect=\iffalse % reset each section?
+\DeclareOption{envcountresetchap}{\let\if@envcntreset\iftrue}
+\DeclareOption{envcountresetsect}{\let\if@envcntreset\iftrue
+\let\if@envcntresetsect=\iftrue}
+%
+\let\if@envcntsame\iffalse  % NOT all environments work like "Theorem",
+                            % each using its own counter
+\DeclareOption{envcountsame}{\let\if@envcntsame\iftrue}
+%
+\let\if@envcntshowhiercnt=\iffalse % do not show hierarchy counter at all
+%
+% enhance theorem counter
+\DeclareOption{envcountchap}{\def\envankh{chapter}% show \thechapter along with theorem number
+\let\if@envcntshowhiercnt=\iftrue
+\ExecuteOptions{envcountreset}}
+%
+\DeclareOption{envcountsect}{\def\envankh{section}% show \thesection along with theorem number
+\let\if@envcntshowhiercnt=\iftrue
+\ExecuteOptions{envcountreset}}
+%
+% languages
+\let\switcht@@therlang\relax
+\let\svlanginfo\relax
+\def\ds@deutsch{\def\switcht@@therlang{\switcht@deutsch}%
+\gdef\svlanginfo{\typeout{Man spricht deutsch.}\global\let\svlanginfo\relax}}
+\def\ds@francais{\def\switcht@@therlang{\switcht@francais}%
+\gdef\svlanginfo{\typeout{On parle francais.}\global\let\svlanginfo\relax}}
+%
+\AtBeginDocument{\@ifpackageloaded{babel}{%
+\@ifundefined{extrasamerican}{}{\addto\extrasamerican{\switcht@albion}}%
+\@ifundefined{extrasaustralian}{}{\addto\extrasaustralian{\switcht@albion}}%
+\@ifundefined{extrasbritish}{}{\addto\extrasbritish{\switcht@albion}}%
+\@ifundefined{extrascanadian}{}{\addto\extrascanadian{\switcht@albion}}%
+\@ifundefined{extrasenglish}{}{\addto\extrasenglish{\switcht@albion}}%
+\@ifundefined{extrasnewzealand}{}{\addto\extrasnewzealand{\switcht@albion}}%
+\@ifundefined{extrasUKenglish}{}{\addto\extrasUKenglish{\switcht@albion}}%
+\@ifundefined{extrasUSenglish}{}{\addto\extrasUSenglish{\switcht@albion}}%
+\@ifundefined{captionsfrench}{}{\addto\captionsfrench{\switcht@francais}}%
+\@ifundefined{extrasgerman}{}{\addto\extrasgerman{\switcht@deutsch}}%
+\@ifundefined{extrasngerman}{}{\addto\extrasngerman{\switcht@deutsch}}%
+}{\switcht@@therlang}%
+}
+% numbering style of floats, equations
+\newif\if@numart   \@numartfalse
+\DeclareOption{numart}{\@numarttrue}
+\def\set@numbering{\if@numart\else\num@book\fi}
+\AtEndOfClass{\set@numbering}
+% style for vectors
+\DeclareOption{vecphys}{\def\vec@style{phys}}
+\DeclareOption{vecarrow}{\def\vec@style{arrow}}
+% running heads
+\let\if@runhead\iftrue
+\DeclareOption{norunningheads}{\let\if@runhead\iffalse}
+% referee option
+\let\if@referee\iffalse
+\def\makereferee{\def\baselinestretch{2}\selectfont
+\newbox\refereebox
+\setbox\refereebox=\vbox to\z@{\vskip0.5cm%
+  \hbox to\textwidth{\normalsize\tt\hrulefill\lower0.5ex
+        \hbox{\kern5\p@ referee's copy\kern5\p@}\hrulefill}\vss}%
+\def\@oddfoot{\copy\refereebox}\let\@evenfoot=\@oddfoot}
+\DeclareOption{referee}{\let\if@referee\iftrue
+\AtBeginDocument{\makereferee\small\normalsize}}
+% modification of thebibliography
+\let\if@openbib\iffalse
+\DeclareOption{openbib}{\let\if@openbib\iftrue}
+% LaTeX standard, sectionwise references
+\DeclareOption{oribibl}{\let\oribibl=Y}
+\DeclareOption{sectrefs}{\let\secbibl=Y}
+%
+% footinfo option (provides an informatory line on every page)
+\def\SpringerMacroPackageNameA{svmono.cls}
+% \thetime, \thedate and \timstamp are macros to include
+% time, date (or both) of the TeX run in the document
+\def\maketimestamp{\count255=\time
+\divide\count255 by 60\relax
+\edef\thetime{\the\count255:}%
+\multiply\count255 by-60\relax
+\advance\count255 by\time
+\edef\thetime{\thetime\ifnum\count255<10 0\fi\the\count255}
+\edef\thedate{\number\day-\ifcase\month\or Jan\or Feb\or Mar\or
+             Apr\or May\or Jun\or Jul\or Aug\or Sep\or Oct\or
+             Nov\or Dec\fi-\number\year}
+\def\timstamp{\hbox to\hsize{\tt\hfil\thedate\hfil\thetime\hfil}}}
+\maketimestamp
+%
+% \footinfo generates a info footline on every page containing
+% pagenumber, jobname, macroname, and timestamp
+\DeclareOption{footinfo}{\AtBeginDocument{\maketimestamp
+   \def\ps@empty{\let\@mkboth\@gobbletwo
+   \let\@oddhead\@empty\let\@evenhead\@empty}%
+   \def\@oddfoot{\scriptsize\tt Page:\,\thepage\space\hfil
+                 job:\,\jobname\space\hfil
+                 macro:\,\SpringerMacroPackageNameA\space\hfil
+                 date/time:\,\thedate/\thetime}%
+   \let\@evenfoot=\@oddfoot}}
+%
+% start new chapter on any page
+\newif\if@openright \@openrighttrue
+\DeclareOption{openany}{\@openrightfalse}
+%
+% no size changing allowed
+\DeclareOption{11pt}{\OptionNotUsed}
+\DeclareOption{12pt}{\OptionNotUsed}
+% options for the article class
+\def\@rticle@options{10pt,twoside}
+% fleqn
+\DeclareOption{fleqn}{\def\@rticle@options{10pt,twoside,fleqn}%
+\AtEndOfClass{\let\leftlegendglue\relax}%
+\AtBeginDocument{\mathindent\parindent}}
+% hanging sectioning titles
+\let\if@sechang\iffalse
+\DeclareOption{sechang}{\let\if@sechang\iftrue}
+\def\ClassInfoNoLine#1#2{%
+   \ClassInfo{#1}{#2\@gobble}%
+}
+\let\SVMonoOpt\@empty
+\DeclareOption*{\InputIfFileExists{sv\CurrentOption.clo}{%
+\global\let\SVMonoOpt\CurrentOption}{%
+\ClassWarning{Springer-SVMono}{Specified option or subpackage
+"\CurrentOption" \MessageBreak not found
+passing it to article class \MessageBreak
+-}\PassOptionsToClass{\CurrentOption}{article}%
+}}
+\ProcessOptions\relax
+\ifx\SVMonoOpt\@empty\relax
+\ClassInfoNoLine{Springer-SVMono}{extra/valid Springer sub-package
+\MessageBreak not found in option list - using "global" style}{}
+\fi
+\LoadClass[\@rticle@options]{article}
+\raggedbottom
+
+% various sizes and settings for monographs
+
+\setlength{\textwidth}{28pc}   %  11.8cm
+%\setlength{\textheight}{12pt}\multiply\textheight by 45\relax
+\setlength{\textheight}{540\p@}
+\setlength{\topmargin}{0cm}
+\setlength\oddsidemargin   {63\p@}
+\setlength\evensidemargin  {63\p@}
+\setlength\marginparwidth{90\p@}
+\setlength\headsep   {12\p@}
+
+\setlength{\parindent}{15\p@}
+\setlength{\parskip}{\z@ \@plus \p@}
+\setlength{\hfuzz}{2\p@}
+\setlength{\arraycolsep}{1.5\p@}
+
+\frenchspacing
+
+\tolerance=500
+
+\predisplaypenalty=0
+\clubpenalty=10000
+\widowpenalty=10000
+
+\setlength\footnotesep{7.7\p@}
+
+\newdimen\betweenumberspace          % dimension for space between
+\betweenumberspace=5\p@               % number and text of titles
+\newdimen\headlineindent             % dimension for space of
+\headlineindent=2.5cc                % number and gap of running heads
+
+% fonts, sizes, and the like
+\renewcommand\small{%
+   \@setfontsize\small\@ixpt{11}%
+   \abovedisplayskip 8.5\p@ \@plus3\p@ \@minus4\p@
+   \abovedisplayshortskip \z@ \@plus2\p@
+   \belowdisplayshortskip 4\p@ \@plus2\p@ \@minus2\p@
+   \def\@listi{\leftmargin\leftmargini
+               \parsep \z@ \@plus\p@ \@minus\p@
+               \topsep 6\p@ \@plus2\p@ \@minus4\p@
+               \itemsep\z@}%
+   \belowdisplayskip \abovedisplayskip
+}
+%
+\let\footnotesize=\small
+%
+\newenvironment{petit}{\par\addvspace{6\p@}\small}{\par\addvspace{6\p@}}
+%
+
+% modification of automatic positioning of floating objects
+\setlength\@fptop{\z@ }
+\setlength\@fpsep{12\p@ }
+\setlength\@fpbot{\z@ \@plus 1fil }
+\def\textfraction{.01}
+\def\floatpagefraction{.8}
+\setlength{\intextsep}{20\p@ \@plus 2\p@ \@minus 2\p@}
+\setcounter{topnumber}{4}
+\def\topfraction{.9}
+\setcounter{bottomnumber}{2}
+\def\bottomfraction{.7}
+\setcounter{totalnumber}{6}
+%
+% size and style of headings
+\newcommand{\partsize}{\Large}
+\newcommand{\partstyle}{\bfseries\boldmath}
+\newcommand{\chapsize}{\Large}
+\newcommand{\chapstyle}{\bfseries\boldmath}
+\newcommand{\chapshooksize}{\small}
+\newcommand{\chapshookstyle}{\itshape\unboldmath}
+\newcommand{\secsize}{\large}
+\newcommand{\secstyle}{\bfseries\boldmath}
+\newcommand{\subsecsize}{\normalsize}
+\newcommand{\subsecstyle}{\bfseries\boldmath}
+%
+\def\cleardoublepage{\clearpage\if@twoside \ifodd\c@page\else
+    \hbox{}\newpage\if@twocolumn\hbox{}\newpage\fi\fi\fi}
+
+\newcommand{\clearemptydoublepage}{%
+        \clearpage{\pagestyle{empty}\cleardoublepage}}
+\newcommand{\startnewpage}{\if@openright\clearemptydoublepage\else\clearpage\fi}
+
+% redefinition of \part
+\renewcommand\part{\clearemptydoublepage
+         \thispagestyle{empty}
+         \if@twocolumn
+            \onecolumn
+            \@tempswatrue
+         \else
+            \@tempswafalse
+         \fi
+         \@ifundefined{thispagecropped}{}{\thispagecropped}
+         \secdef\@part\@spart}
+
+\def\@part[#1]#2{\ifnum \c@secnumdepth >-2\relax
+        \refstepcounter{part}
+        \addcontentsline{toc}{part}{\partname\
+        \thepart\thechapterend\hspace{\betweenumberspace}%
+        #1}\else
+        \addcontentsline{toc}{part}{#1}\fi
+   \markboth{}{}
+   {\raggedleft
+    \ifnum \c@secnumdepth >-2\relax
+      \normalfont\partstyle\partsize\vrule height 34pt width 0pt depth 0pt%
+     \partname\ \thepart\llap{\smash{\lower 5pt\hbox to\textwidth{\hrulefill}}}
+    \par
+    \vskip 128.3\p@ \fi
+    #2\par}\@endpart}
+%
+% \@endpart finishes the part page
+%
+\def\@endpart{\vfil\newpage
+   \if@twoside
+       \hbox{}
+       \thispagestyle{empty}
+       \newpage
+   \fi
+   \if@tempswa
+     \twocolumn
+   \fi}
+%
+\def\@spart#1{{\raggedleft
+   \normalfont\partsize\partstyle
+   #1\par}\@endpart}
+%
+\newenvironment{partbacktext}{\def\@endpart{\vfil\newpage}}
+{\thispagestyle{empty} \newpage \if@tempswa\twocolumn\fi}
+%
+% (re)define sectioning
+\setcounter{secnumdepth}{2}
+
+\def\seccounterend{}
+\def\seccountergap{\hskip\betweenumberspace}
+\def\@seccntformat#1{\csname the#1\endcsname\seccounterend\seccountergap\ignorespaces}
+%
+\let\firstmark=\botmark
+%
+\@ifundefined{thechapterend}{\def\thechapterend{}}{}
+%
+\if@sechang
+   \def\sec@hangfrom#1{\setbox\@tempboxa\hbox{#1}%
+         \hangindent\wd\@tempboxa\noindent\box\@tempboxa}
+\else
+   \def\sec@hangfrom#1{\setbox\@tempboxa\hbox{#1}%
+         \hangindent\z@\noindent\box\@tempboxa}
+\fi
+
+\def\chap@hangfrom#1{\noindent\vrule height 34pt width 0pt depth 0pt
+\rlap{\smash{\lower 5pt\hbox to\textwidth{\hrulefill}}}\hbox{#1}
+\vskip10pt}
+\def\schap@hangfrom{\chap@hangfrom{}}
+
+\newcounter{chapter}
+%
+\@addtoreset{section}{chapter}
+\@addtoreset{footnote}{chapter}
+
+\newif\if@mainmatter \@mainmattertrue
+\newcommand\frontmatter{\startnewpage
+            \@mainmatterfalse\pagenumbering{Roman}
+            \setcounter{page}{5}}
+%
+\newcommand\mainmatter{\clearemptydoublepage
+       \@mainmattertrue\pagenumbering{arabic}}
+%
+\newcommand\backmatter{\clearemptydoublepage\@mainmatterfalse}
+
+\def\@chapapp{\chaptername}
+
+\newdimen\chapstarthookwidth
+\newcommand\chapstarthook[2][0.66\textwidth]{%
+\setlength{\chapstarthookwidth}{#1}%
+\gdef\chapst@rthook{#2}}
+
+\newcommand{\processchapstarthook}{\@ifundefined{chapst@rthook}{}{%
+    \setbox0=\hbox{\vbox{\hyphenpenalty=50
+    \begin{flushright}
+    \begin{minipage}{\chapstarthookwidth}
+       \vrule\@width\z@\@height21\p@\@depth\z@
+       \normalfont\chapshooksize\chapshookstyle\chapst@rthook
+    \end{minipage}
+    \end{flushright}}}%
+    \@tempdima=\pagetotal
+    \advance\@tempdima by\ht0
+    \ifdim\@tempdima<106\p@
+       \multiply\@tempdima by-1
+       \advance\@tempdima by106\p@
+       \vskip\@tempdima
+    \fi
+    \box0\par
+    \global\let\chapst@rthook=\undefined}}
+
+\newcommand\chapter{\startnewpage
+                    \@ifundefined{thispagecropped}{}{\thispagecropped}
+                    \thispagestyle{empty}%
+                    \global\@topnum\z@
+                    \@afterindentfalse
+                    \secdef\@chapter\@schapter}
+
+\def\@chapter[#1]#2{\ifnum \c@secnumdepth >\m@ne
+                       \refstepcounter{chapter}%
+                       \if@mainmatter
+                         \typeout{\@chapapp\space\thechapter.}%
+                         \addcontentsline{toc}{chapter}{\protect
+                                  \numberline{\thechapter\thechapterend}#1}%
+                       \else
+                         \addcontentsline{toc}{chapter}{#1}%
+                       \fi
+                    \else
+                      \addcontentsline{toc}{chapter}{#1}%
+                    \fi
+                    \chaptermark{#1}%
+                    \addtocontents{lof}{\protect\addvspace{10\p@}}%
+                    \addtocontents{lot}{\protect\addvspace{10\p@}}%
+                    \if@twocolumn
+                      \@topnewpage[\@makechapterhead{#2}]%
+                    \else
+                      \@makechapterhead{#2}%
+                      \@afterheading
+                    \fi}
+
+\def\@schapter#1{\if@twocolumn
+                   \@topnewpage[\@makeschapterhead{#1}]%
+                 \else
+                   \@makeschapterhead{#1}%
+                   \@afterheading
+                 \fi}
+
+%%changes position and layout of numbered chapter headings
+\def\@makechapterhead#1{{\parindent\z@\raggedright\normalfont
+  \hyphenpenalty \@M
+  \interlinepenalty\@M
+  \chapsize\chapstyle
+  \chap@hangfrom{\thechapter\thechapterend\hskip\betweenumberspace}%!!!
+  \ignorespaces#1\par\nobreak
+  \processchapstarthook
+  \ifdim\pagetotal>157\p@
+     \vskip 11\p@
+  \else
+     \@tempdima=168\p@\advance\@tempdima by-\pagetotal
+     \vskip\@tempdima
+  \fi}}
+
+%%changes position and layout of unnumbered chapter headings
+\def\@makeschapterhead#1{{\parindent \z@ \raggedright\normalfont
+  \hyphenpenalty \@M
+  \interlinepenalty\@M
+  \chapsize\chapstyle
+  \schap@hangfrom
+  \ignorespaces#1\par\nobreak
+  \processchapstarthook
+  \ifdim\pagetotal>157\p@
+     \vskip 11\p@
+  \else
+     \@tempdima=168\p@\advance\@tempdima by-\pagetotal
+     \vskip\@tempdima
+  \fi}}
+
+% predefined unnumbered headings
+\newcommand{\preface}[1][\prefacename]{\chapter*{#1}\markboth{#1}{#1}}
+% same with TOC entry
+\newcommand{\Preface}[1][\prefacename]{\chapter*{#1}\markboth{#1}{#1}%
+\addcontentsline{toc}{chapter}{#1}}
+
+% measures and setting of sections
+\renewcommand\section{\@startsection{section}{1}{\z@}%
+                       {-24\p@ \@plus -4\p@ \@minus -4\p@}%
+                       {12\p@ \@plus 4\p@ \@minus 4\p@}%
+                       {\normalfont\secsize\secstyle
+                        \rightskip=\z@ \@plus 8em\pretolerance=10000 }}
+\renewcommand\subsection{\@startsection{subsection}{2}{\z@}%
+                       {-17\p@ \@plus -4\p@ \@minus -4\p@}%
+                       {10\p@ \@plus 4\p@ \@minus 4\p@}%
+                       {\normalfont\subsecsize\subsecstyle
+                        \rightskip=\z@ \@plus 8em\pretolerance=10000 }}
+\renewcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}%
+                       {-17\p@ \@plus -4\p@ \@minus -4\p@}%
+                       {10\p@ \@plus 4\p@ \@minus 4\p@}%
+                       {\normalfont\normalsize\subsecstyle
+                        \rightskip=\z@ \@plus 8em\pretolerance=10000 }}
+\renewcommand\paragraph{\@startsection{paragraph}{4}{\z@}%
+                       {-10\p@ \@plus -4\p@ \@minus -4\p@}%
+                       {10\p@ \@plus 4\p@ \@minus 4\p@}%
+                       {\normalfont\normalsize\itshape
+                        \rightskip=\z@ \@plus 8em\pretolerance=10000 }}
+\def\subparagraph{\@startsection{subparagraph}{5}{\z@}%
+    {-5.388\p@ \@plus-4\p@ \@minus-4\p@}{-5\p@}{\normalfont\normalsize\itshape}}
+
+% Appendix
+\renewcommand\appendix{\par
+                \stepcounter{chapter}
+                \setcounter{chapter}{0}
+                \stepcounter{section}
+                \setcounter{section}{0}
+                \setcounter{equation}{0}
+                \setcounter{figure}{0}
+                \setcounter{table}{0}
+                \setcounter{footnote}{0}
+  \def\@chapapp{\appendixname}%
+  \renewcommand\thechapter{\@Alph\c@chapter}}
+
+%  definition of sections
+%  \hyphenpenalty and \raggedright added, so that there is no
+%  hyphenation and the text is set ragged-right in sectioning
+
+\def\runinsep{}
+\def\aftertext{\unskip\runinsep}
+%
+\def\thesection{\thechapter.\arabic{section}}
+\def\thesubsection{\thesection.\arabic{subsection}}
+\def\thesubsubsection{\thesubsection.\arabic{subsubsection}}
+\def\theparagraph{\thesubsubsection.\arabic{paragraph}}
+\def\thesubparagraph{\theparagraph.\arabic{subparagraph}}
+\def\chaptermark#1{}
+%
+\def\@ssect#1#2#3#4#5{%
+  \@tempskipa #3\relax
+  \ifdim \@tempskipa>\z@
+    \begingroup
+      #4{%
+        \@hangfrom{\hskip #1}%
+          \raggedright
+          \hyphenpenalty \@M
+          \interlinepenalty \@M #5\@@par}%
+    \endgroup
+  \else
+    \def\@svsechd{#4{\hskip #1\relax #5}}%
+  \fi
+  \@xsect{#3}}
+%
+\def\@sect#1#2#3#4#5#6[#7]#8{%
+   \ifnum #2>\c@secnumdepth
+      \let\@svsec\@empty
+   \else
+      \refstepcounter{#1}%
+      \protected@edef\@svsec{\@seccntformat{#1}\relax}%
+   \fi
+   \@tempskipa #5\relax
+   \ifdim \@tempskipa>\z@
+      \begingroup #6\relax
+         \sec@hangfrom{\hskip #3\relax\@svsec}%
+         {\raggedright
+          \hyphenpenalty \@M
+          \interlinepenalty \@M #8\@@par}%
+      \endgroup
+      \csname #1mark\endcsname{#7\seccounterend}%
+      \addcontentsline{toc}{#1}{\ifnum #2>\c@secnumdepth
+                                \else
+                                   \protect\numberline{\csname the#1\endcsname\seccounterend}%
+                                \fi
+                                #7}%
+   \else
+      \def\@svsechd{%
+         #6\hskip #3\relax
+         \@svsec #8\aftertext\ignorespaces
+         \csname #1mark\endcsname{#7}%
+         \addcontentsline{toc}{#1}{%
+            \ifnum #2>\c@secnumdepth \else
+                \protect\numberline{\csname the#1\endcsname\seccounterend}%
+            \fi
+            #7}}%
+   \fi
+   \@xsect{#5}}
+
+% figures and tables are processed in small print
+\def \@floatboxreset {%
+        \reset@font
+        \small
+        \@setnobreak
+        \@setminipage
+}
+\def\fps@figure{htbp}
+\def\fps@table{htbp}
+
+% Frame for paste-in figures or tables
+\def\mpicplace#1#2{%  #1 =width   #2 =height
+\vbox{\hbox to #1{\vrule\@width \fboxrule \@height #2\hfill}}}
+
+% labels of enumerate
+\renewcommand\labelenumii{\theenumii)}
+\renewcommand\theenumii{\@alph\c@enumii}
+
+% labels of itemize
+\renewcommand\labelitemi{\textbullet}
+\renewcommand\labelitemii{\textendash}
+\let\labelitemiii=\labelitemiv
+
+% labels of description
+\renewcommand*\descriptionlabel[1]{\hspace\labelsep #1\hfil}
+
+% fixed indentation for standard itemize-environment
+\newdimen\svitemindent \setlength{\svitemindent}{\parindent}
+
+
+% make indentations changeable
+
+\def\setitemindent#1{\settowidth{\labelwidth}{#1}%
+        \let\setit@m=Y%
+        \leftmargini\labelwidth
+        \advance\leftmargini\labelsep
+   \def\@listi{\leftmargin\leftmargini
+        \labelwidth\leftmargini\advance\labelwidth by -\labelsep
+        \parsep=\parskip
+        \topsep=\medskipamount
+        \itemsep=\parskip \advance\itemsep by -\parsep}}
+\def\setitemitemindent#1{\settowidth{\labelwidth}{#1}%
+        \let\setit@m=Y%
+        \leftmarginii\labelwidth
+        \advance\leftmarginii\labelsep
+\def\@listii{\leftmargin\leftmarginii
+        \labelwidth\leftmarginii\advance\labelwidth by -\labelsep
+        \parsep=\parskip
+        \topsep=\z@
+        \itemsep=\parskip \advance\itemsep by -\parsep}}
+%
+% adjusted environment "description"
+% if an optional parameter (at the first two levels of lists)
+% is present, its width is considered to be the widest mark
+% throughout the current list.
+\def\description{\@ifnextchar[{\@describe}{\list{}{\labelwidth\z@
+          \itemindent-\leftmargin \let\makelabel\descriptionlabel}}}
+%
+\def\describelabel#1{#1\hfil}
+\def\@describe[#1]{\relax\ifnum\@listdepth=0
+\setitemindent{#1}\else\ifnum\@listdepth=1
+\setitemitemindent{#1}\fi\fi
+\list{--}{\let\makelabel\describelabel}}
+%
+\def\itemize{%
+  \ifnum \@itemdepth >\thr@@\@toodeep\else
+    \advance\@itemdepth\@ne
+    \ifx\setit@m\undefined
+       \ifnum \@itemdepth=1 \leftmargini=\svitemindent
+          \labelwidth\leftmargini\advance\labelwidth-\labelsep
+          \leftmarginii=\leftmargini \leftmarginiii=\leftmargini
+       \fi
+    \fi
+    \edef\@itemitem{labelitem\romannumeral\the\@itemdepth}%
+    \expandafter\list
+      \csname\@itemitem\endcsname
+      {\def\makelabel##1{\rlap{##1}\hss}}%
+  \fi}
+%
+\newdimen\verbatimindent \verbatimindent\parindent
+\def\verbatim{\advance\@totalleftmargin by\verbatimindent
+\@verbatim \frenchspacing\@vobeyspaces \@xverbatim}
+
+%
+%  special signs and characters
+\newcommand{\D}{\mathrm{d}}
+\newcommand{\E}{\mathrm{e}}
+\let\eul=\E
+\newcommand{\I}{{\rm i}}
+\let\imag=\I
+%
+% the definition of uppercase Greek characters
+% Springer likes them as italics to depict variables
+\DeclareMathSymbol{\Gamma}{\mathalpha}{letters}{"00}
+\DeclareMathSymbol{\Delta}{\mathalpha}{letters}{"01}
+\DeclareMathSymbol{\Theta}{\mathalpha}{letters}{"02}
+\DeclareMathSymbol{\Lambda}{\mathalpha}{letters}{"03}
+\DeclareMathSymbol{\Xi}{\mathalpha}{letters}{"04}
+\DeclareMathSymbol{\Pi}{\mathalpha}{letters}{"05}
+\DeclareMathSymbol{\Sigma}{\mathalpha}{letters}{"06}
+\DeclareMathSymbol{\Upsilon}{\mathalpha}{letters}{"07}
+\DeclareMathSymbol{\Phi}{\mathalpha}{letters}{"08}
+\DeclareMathSymbol{\Psi}{\mathalpha}{letters}{"09}
+\DeclareMathSymbol{\Omega}{\mathalpha}{letters}{"0A}
+% the upright forms are defined here as \var<Character>
+\DeclareMathSymbol{\varGamma}{\mathalpha}{operators}{"00}
+\DeclareMathSymbol{\varDelta}{\mathalpha}{operators}{"01}
+\DeclareMathSymbol{\varTheta}{\mathalpha}{operators}{"02}
+\DeclareMathSymbol{\varLambda}{\mathalpha}{operators}{"03}
+\DeclareMathSymbol{\varXi}{\mathalpha}{operators}{"04}
+\DeclareMathSymbol{\varPi}{\mathalpha}{operators}{"05}
+\DeclareMathSymbol{\varSigma}{\mathalpha}{operators}{"06}
+\DeclareMathSymbol{\varUpsilon}{\mathalpha}{operators}{"07}
+\DeclareMathSymbol{\varPhi}{\mathalpha}{operators}{"08}
+\DeclareMathSymbol{\varPsi}{\mathalpha}{operators}{"09}
+\DeclareMathSymbol{\varOmega}{\mathalpha}{operators}{"0A}
+% Upright Lower Case Greek letters without using a new MathAlphabet
+\newcommand{\greeksym}[1]{\usefont{U}{psy}{m}{n}#1}
+\newcommand{\greeksymbold}[1]{{\usefont{U}{psy}{b}{n}#1}}
+\newcommand{\allmodesymb}[2]{\relax\ifmmode{\mathchoice
+{\mbox{\fontsize{\tf@size}{\tf@size}#1{#2}}}
+{\mbox{\fontsize{\tf@size}{\tf@size}#1{#2}}}
+{\mbox{\fontsize{\sf@size}{\sf@size}#1{#2}}}
+{\mbox{\fontsize{\ssf@size}{\ssf@size}#1{#2}}}}
+\else
+\mbox{#1{#2}}\fi}
+% Definition of lower case Greek letters
+\newcommand{\ualpha}{\allmodesymb{\greeksym}{a}}
+\newcommand{\ubeta}{\allmodesymb{\greeksym}{b}}
+\newcommand{\uchi}{\allmodesymb{\greeksym}{c}}
+\newcommand{\udelta}{\allmodesymb{\greeksym}{d}}
+\newcommand{\ugamma}{\allmodesymb{\greeksym}{g}}
+\newcommand{\umu}{\allmodesymb{\greeksym}{m}}
+\newcommand{\unu}{\allmodesymb{\greeksym}{n}}
+\newcommand{\upi}{\allmodesymb{\greeksym}{p}}
+\newcommand{\utau}{\allmodesymb{\greeksym}{t}}
+% redefines the \vec accent to a bold character - if desired
+\def\fig@type{arrow}% temporarily abused
+\ifx\vec@style\fig@type\else
+\@ifundefined{vec@style}{%
+ \def\vec#1{\ensuremath{\mathchoice
+                     {\mbox{\boldmath$\displaystyle\mathbf{#1}$}}
+                     {\mbox{\boldmath$\textstyle\mathbf{#1}$}}
+                     {\mbox{\boldmath$\scriptstyle\mathbf{#1}$}}
+                     {\mbox{\boldmath$\scriptscriptstyle\mathbf{#1}$}}}}%
+}
+{\def\vec#1{\ensuremath{\mathchoice
+                     {\mbox{\boldmath$\displaystyle#1$}}
+                     {\mbox{\boldmath$\textstyle#1$}}
+                     {\mbox{\boldmath$\scriptstyle#1$}}
+                     {\mbox{\boldmath$\scriptscriptstyle#1$}}}}%
+}
+\fi
+% tensor
+\def\tens#1{\relax\ifmmode\mathsf{#1}\else\textsf{#1}\fi}
+
+% end of proof symbol
+\newcommand\qedsymbol{\hbox{\rlap{$\sqcap$}$\sqcup$}}
+\newcommand\qed{\relax\ifmmode\else\unskip\quad\fi\qedsymbol}
+\newcommand\smartqed{\renewcommand\qed{\relax\ifmmode\qedsymbol\else
+  {\unskip\nobreak\hfil\penalty50\hskip1em\null\nobreak\hfil\qedsymbol
+  \parfillskip=\z@\finalhyphendemerits=0\endgraf}\fi}}
+%
+\def\num@book{%
+\renewcommand\thesection{\thechapter.\@arabic\c@section}%
+\renewcommand\thesubsection{\thesection.\@arabic\c@subsection}%
+\renewcommand\theequation{\thechapter.\@arabic\c@equation}%
+\renewcommand\thefigure{\thechapter.\@arabic\c@figure}%
+\renewcommand\thetable{\thechapter.\@arabic\c@table}%
+\@addtoreset{section}{chapter}%
+\@addtoreset{figure}{chapter}%
+\@addtoreset{table}{chapter}%
+\@addtoreset{equation}{chapter}}
+%
+% Ragged bottom for the actual page
+\def\thisbottomragged{\def\@textbottom{\vskip\z@ \@plus.0001fil
+\global\let\@textbottom\relax}}
+
+% This is texte.tex
+% it defines various texts and their translations
+% called up with documentstyle options
+\def\switcht@albion{%
+\def\abstractname{Summary.}%
+\def\ackname{Acknowledgement.}%
+\def\andname{and}%
+\def\bibname{References}%
+\def\lastandname{, and}%
+\def\appendixname{Appendix}%
+\def\chaptername{Chapter}%
+\def\claimname{Claim}%
+\def\conjecturename{Conjecture}%
+\def\contentsname{Contents}%
+\def\corollaryname{Corollary}%
+\def\definitionname{Definition}%
+\def\examplename{Example}%
+\def\exercisename{Exercise}%
+\def\figurename{Fig.}%
+\def\keywordname{{\bf Key words:}}%
+\def\indexname{Index}%
+\def\lemmaname{Lemma}%
+\def\contriblistname{List of Contributors}%
+\def\listfigurename{List of Figures}%
+\def\listtablename{List of Tables}%
+\def\mailname{{\it Correspondence to\/}:}%
+\def\noteaddname{Note added in proof}%
+\def\notename{Note}%
+\def\partname{Part}%
+\def\prefacename{Preface}%
+\def\problemname{Problem}%
+\def\proofname{Proof}%
+\def\propertyname{Property}%
+\def\propositionname{Proposition}%
+\def\questionname{Question}%
+\def\refname{References}%
+\def\remarkname{Remark}%
+\def\seename{see}%
+\def\solutionname{Solution}%
+\def\subclassname{{\it Subject Classifications\/}:}%
+\def\tablename{Table}%
+\def\theoremname{Theorem}}
+\switcht@albion
+% Names of theorem like environments are already defined
+% but must be translated if another language is chosen
+%
+% French section
+\def\switcht@francais{\svlanginfo
+ \def\abstractname{R\'esum\'e.}%
+ \def\ackname{Remerciements.}%
+ \def\andname{et}%
+ \def\lastandname{ et}%
+ \def\appendixname{Appendice}%
+ \def\bibname{Bibliographie}%
+ \def\chaptername{Chapitre}%
+ \def\claimname{Pr\'etention}%
+ \def\conjecturename{Hypoth\`ese}%
+ \def\contentsname{Table des mati\`eres}%
+ \def\corollaryname{Corollaire}%
+ \def\definitionname{D\'efinition}%
+ \def\examplename{Exemple}%
+ \def\exercisename{Exercice}%
+ \def\figurename{Fig.}%
+ \def\keywordname{{\bf Mots-cl\'e:}}%
+ \def\indexname{Index}%
+ \def\lemmaname{Lemme}%
+ \def\contriblistname{Liste des contributeurs}%
+ \def\listfigurename{Liste des figures}%
+ \def\listtablename{Liste des tables}%
+ \def\mailname{{\it Correspondence to\/}:}%
+ \def\noteaddname{Note ajout\'ee \`a l'\'epreuve}%
+ \def\notename{Remarque}%
+ \def\partname{Partie}%
+ \def\prefacename{Avant-propos}%  ou Pr\'eface
+ \def\problemname{Probl\`eme}%
+ \def\proofname{Preuve}%
+ \def\propertyname{Caract\'eristique}%
+%\def\propositionname{Proposition}%
+ \def\questionname{Question}%
+ \def\refname{Litt\'erature}%
+ \def\remarkname{Remarque}%
+ \def\seename{voir}%
+ \def\solutionname{Solution}%
+ \def\subclassname{{\it Subject Classifications\/}:}%
+ \def\tablename{Tableau}%
+ \def\theoremname{Th\'eor\`eme}%
+}
+%
+% German section
+\def\switcht@deutsch{\svlanginfo
+ \def\abstractname{Zusammenfassung.}%
+ \def\ackname{Danksagung.}%
+ \def\andname{und}%
+ \def\lastandname{ und}%
+ \def\appendixname{Anhang}%
+ \def\bibname{Literaturverzeichnis}%
+ \def\chaptername{Kapitel}%
+ \def\claimname{Behauptung}%
+ \def\conjecturename{Hypothese}%
+ \def\contentsname{Inhaltsverzeichnis}%
+ \def\corollaryname{Korollar}%
+%\def\definitionname{Definition}%
+ \def\examplename{Beispiel}%
+ \def\exercisename{\"Ubung}%
+ \def\figurename{Abb.}%
+ \def\keywordname{{\bf Schl\"usselw\"orter:}}%
+ \def\indexname{Sachverzeichnis}%
+%\def\lemmaname{Lemma}%
+ \def\contriblistname{Mitarbeiter}%
+ \def\listfigurename{Abbildungsverzeichnis}%
+ \def\listtablename{Tabellenverzeichnis}%
+ \def\mailname{{\it Correspondence to\/}:}%
+ \def\noteaddname{Nachtrag}%
+ \def\notename{Anmerkung}%
+ \def\partname{Teil}%
+ \def\prefacename{Vorwort}%
+%\def\problemname{Problem}%
+ \def\proofname{Beweis}%
+ \def\propertyname{Eigenschaft}%
+%\def\propositionname{Proposition}%
+ \def\questionname{Frage}%
+ \def\refname{Literaturverzeichnis}%
+ \def\remarkname{Anmerkung}%
+ \def\seename{siehe}%
+ \def\solutionname{L\"osung}%
+ \def\subclassname{{\it Subject Classifications\/}:}%
+ \def\tablename{Tabelle}%
+%\def\theoremname{Theorem}%
+}
+
+\def\getsto{\mathrel{\mathchoice {\vcenter{\offinterlineskip
+\halign{\hfil
+$\displaystyle##$\hfil\cr\gets\cr\to\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr\gets
+\cr\to\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr\gets
+\cr\to\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr
+\gets\cr\to\cr}}}}}
+\def\lid{\mathrel{\mathchoice {\vcenter{\offinterlineskip\halign{\hfil
+$\displaystyle##$\hfil\cr<\cr\noalign{\vskip1.2\p@}=\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr<\cr
+\noalign{\vskip1.2\p@}=\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr<\cr
+\noalign{\vskip\p@}=\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr
+<\cr
+\noalign{\vskip0.9\p@}=\cr}}}}}
+\def\gid{\mathrel{\mathchoice {\vcenter{\offinterlineskip\halign{\hfil
+$\displaystyle##$\hfil\cr>\cr\noalign{\vskip1.2\p@}=\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr>\cr
+\noalign{\vskip1.2\p@}=\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr>\cr
+\noalign{\vskip\p@}=\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr
+>\cr
+\noalign{\vskip0.9\p@}=\cr}}}}}
+\def\grole{\mathrel{\mathchoice {\vcenter{\offinterlineskip
+\halign{\hfil
+$\displaystyle##$\hfil\cr>\cr\noalign{\vskip-\p@}<\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr
+>\cr\noalign{\vskip-\p@}<\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr
+>\cr\noalign{\vskip-0.8\p@}<\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr
+>\cr\noalign{\vskip-0.3\p@}<\cr}}}}}
+\def\bbbr{{\rm I\!R}} %reelle Zahlen
+\def\bbbm{{\rm I\!M}}
+\def\bbbn{{\rm I\!N}} %natuerliche Zahlen
+\def\bbbf{{\rm I\!F}}
+\def\bbbh{{\rm I\!H}}
+\def\bbbk{{\rm I\!K}}
+\def\bbbp{{\rm I\!P}}
+\def\bbbone{{\mathchoice {\rm 1\mskip-4mu l} {\rm 1\mskip-4mu l}
+{\rm 1\mskip-4.5mu l} {\rm 1\mskip-5mu l}}}
+\def\bbbc{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm C$}\hbox{\hbox
+to\z@{\kern0.4\wd0\vrule\@height0.9\ht0\hss}\box0}}
+{\setbox0=\hbox{$\textstyle\rm C$}\hbox{\hbox
+to\z@{\kern0.4\wd0\vrule\@height0.9\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptstyle\rm C$}\hbox{\hbox
+to\z@{\kern0.4\wd0\vrule\@height0.9\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptscriptstyle\rm C$}\hbox{\hbox
+to\z@{\kern0.4\wd0\vrule\@height0.9\ht0\hss}\box0}}}}
+\def\bbbq{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm
+Q$}\hbox{\raise
+0.15\ht0\hbox to\z@{\kern0.4\wd0\vrule\@height0.8\ht0\hss}\box0}}
+{\setbox0=\hbox{$\textstyle\rm Q$}\hbox{\raise
+0.15\ht0\hbox to\z@{\kern0.4\wd0\vrule\@height0.8\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptstyle\rm Q$}\hbox{\raise
+0.15\ht0\hbox to\z@{\kern0.4\wd0\vrule\@height0.7\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptscriptstyle\rm Q$}\hbox{\raise
+0.15\ht0\hbox to\z@{\kern0.4\wd0\vrule\@height0.7\ht0\hss}\box0}}}}
+\def\bbbt{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm
+T$}\hbox{\hbox to\z@{\kern0.3\wd0\vrule\@height0.9\ht0\hss}\box0}}
+{\setbox0=\hbox{$\textstyle\rm T$}\hbox{\hbox
+to\z@{\kern0.3\wd0\vrule\@height0.9\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptstyle\rm T$}\hbox{\hbox
+to\z@{\kern0.3\wd0\vrule\@height0.9\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptscriptstyle\rm T$}\hbox{\hbox
+to\z@{\kern0.3\wd0\vrule\@height0.9\ht0\hss}\box0}}}}
+\def\bbbs{{\mathchoice
+{\setbox0=\hbox{$\displaystyle     \rm S$}\hbox{\raise0.5\ht0\hbox
+to\z@{\kern0.35\wd0\vrule\@height0.45\ht0\hss}\hbox
+to\z@{\kern0.55\wd0\vrule\@height0.5\ht0\hss}\box0}}
+{\setbox0=\hbox{$\textstyle        \rm S$}\hbox{\raise0.5\ht0\hbox
+to\z@{\kern0.35\wd0\vrule\@height0.45\ht0\hss}\hbox
+to\z@{\kern0.55\wd0\vrule\@height0.5\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptstyle      \rm S$}\hbox{\raise0.5\ht0\hbox
+to\z@{\kern0.35\wd0\vrule\@height0.45\ht0\hss}\raise0.05\ht0\hbox
+to\z@{\kern0.5\wd0\vrule\@height0.45\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptscriptstyle\rm S$}\hbox{\raise0.5\ht0\hbox
+to\z@{\kern0.4\wd0\vrule\@height0.45\ht0\hss}\raise0.05\ht0\hbox
+to\z@{\kern0.55\wd0\vrule\@height0.45\ht0\hss}\box0}}}}
+\def\bbbz{{\mathchoice {\hbox{$\textstyle\sf Z\kern-0.4em Z$}}
+{\hbox{$\textstyle\sf Z\kern-0.4em Z$}}
+{\hbox{$\scriptstyle\sf Z\kern-0.3em Z$}}
+{\hbox{$\scriptscriptstyle\sf Z\kern-0.2em Z$}}}}
+
+\let\ts\,
+
+\setlength \labelsep     {5\p@}
+\setlength\leftmargini   {17\p@}
+\setlength\leftmargin    {\leftmargini}
+\setlength\leftmarginii  {\leftmargini}
+\setlength\leftmarginiii {\leftmargini}
+\setlength\leftmarginiv  {\leftmargini}
+\setlength\labelwidth    {\leftmargini}
+\addtolength\labelwidth{-\labelsep}
+
+\def\@listI{\leftmargin\leftmargini
+        \parsep=\parskip
+        \topsep=\medskipamount
+        \itemsep=\parskip \advance\itemsep by -\parsep}
+\let\@listi\@listI
+\@listi
+
+\def\@listii{\leftmargin\leftmarginii
+        \labelwidth\leftmarginii
+        \advance\labelwidth by -\labelsep
+        \parsep=\parskip
+        \topsep=\z@
+        \itemsep=\parskip
+        \advance\itemsep by -\parsep}
+
+\def\@listiii{\leftmargin\leftmarginiii
+        \labelwidth\leftmarginiii\advance\labelwidth by -\labelsep
+        \parsep=\parskip
+        \topsep=\z@
+        \itemsep=\parskip
+        \advance\itemsep by -\parsep
+        \partopsep=\topsep}
+
+\setlength\arraycolsep{1.5\p@}
+\setlength\tabcolsep{1.5\p@}
+
+\def\tableofcontents{\@restonecolfalse\if@twocolumn\@restonecoltrue\onecolumn
+ \fi\chapter*{\contentsname \@mkboth{{\contentsname}}{{\contentsname}}}
+ \@starttoc{toc}\if@restonecol\twocolumn\fi}
+
+\setcounter{tocdepth}{2}
+
+\def\l@part#1#2{\addpenalty{\@secpenalty}%
+   \addvspace{2em \@plus\p@}%
+   \begingroup
+     \parindent \z@
+     \rightskip \z@ \@plus 5em
+     \hrule\vskip5\p@
+     \bfseries\boldmath
+     \leavevmode
+     #1\par
+     \vskip5\p@
+     \hrule
+     \vskip\p@
+     \nobreak
+   \endgroup}
+
+\def\@dotsep{2}
+
+\def\addnumcontentsmark#1#2#3{%
+\addtocontents{#1}{\protect\contentsline{#2}{\protect\numberline
+                                    {\thechapter}#3}{\thepage}}}
+\def\addcontentsmark#1#2#3{%
+\addtocontents{#1}{\protect\contentsline{#2}{#3}{\thepage}}}
+\def\addcontentsmarkwop#1#2#3{%
+\addtocontents{#1}{\protect\contentsline{#2}{#3}{0}}}
+
+\def\@adcmk[#1]{\ifcase #1 \or
+\def\@gtempa{\addnumcontentsmark}%
+  \or    \def\@gtempa{\addcontentsmark}%
+  \or    \def\@gtempa{\addcontentsmarkwop}%
+  \fi\@gtempa{toc}{chapter}}
+\def\addtocmark{\@ifnextchar[{\@adcmk}{\@adcmk[3]}}
+
+\def\l@chapter#1#2{\par\addpenalty{-\@highpenalty}
+ \addvspace{1.0em \@plus \p@}
+ \@tempdima \tocchpnum \begingroup
+ \parindent \z@ \rightskip \@tocrmarg
+ \advance\rightskip by \z@ \@plus 2cm
+ \parfillskip -\rightskip \pretolerance=10000
+ \leavevmode \advance\leftskip\@tempdima \hskip -\leftskip
+ {\bfseries\boldmath#1}\ifx0#2\hfil\null
+ \else
+    \nobreak
+    \leaders\hbox{$\m@th \mkern \@dotsep mu\hbox{.}\mkern
+    \@dotsep mu$}\hfill
+    \nobreak\hbox to\@pnumwidth{\hfil #2}%
+ \fi\par
+ \penalty\@highpenalty \endgroup}
+
+\newdimen\tocchpnum
+\newdimen\tocsecnum
+\newdimen\tocsectotal
+\newdimen\tocsubsecnum
+\newdimen\tocsubsectotal
+\newdimen\tocsubsubsecnum
+\newdimen\tocsubsubsectotal
+\newdimen\tocparanum
+\newdimen\tocparatotal
+\newdimen\tocsubparanum
+\tocchpnum=20\p@            % chapter {\bf 88.} \@plus 5.3\p@
+\tocsecnum=22.5\p@          % section 88.8. plus 4.722\p@
+\tocsubsecnum=30.5\p@       % subsection 88.8.8 plus 4.944\p@
+\tocsubsubsecnum=38\p@      % subsubsection 88.8.8.8 plus 4.666\p@
+\tocparanum=45\p@           % paragraph 88.8.8.8.8 plus 3.888\p@
+\tocsubparanum=53\p@        % subparagraph 88.8.8.8.8.8 plus 4.11\p@
+\def\calctocindent{%
+\tocsectotal=\tocchpnum
+\advance\tocsectotal by\tocsecnum
+\tocsubsectotal=\tocsectotal
+\advance\tocsubsectotal by\tocsubsecnum
+\tocsubsubsectotal=\tocsubsectotal
+\advance\tocsubsubsectotal by\tocsubsubsecnum
+\tocparatotal=\tocsubsubsectotal
+\advance\tocparatotal by\tocparanum}
+\calctocindent
+
+\def\@dottedtocline#1#2#3#4#5{%
+  \ifnum #1>\c@tocdepth \else
+    \vskip \z@ \@plus.2\p@
+    {\leftskip #2\relax \rightskip \@tocrmarg \advance\rightskip by \z@ \@plus 2cm
+               \parfillskip -\rightskip \pretolerance=10000
+     \parindent #2\relax\@afterindenttrue
+     \interlinepenalty\@M
+     \leavevmode
+     \@tempdima #3\relax
+     \advance\leftskip \@tempdima \null\nobreak\hskip -\leftskip
+     {#4}\nobreak
+     \leaders\hbox{$\m@th
+        \mkern \@dotsep mu\hbox{.}\mkern \@dotsep
+        mu$}\hfill
+     \nobreak
+     \hb@xt@\@pnumwidth{\hfil\normalfont \normalcolor #5}%
+     \par}%
+  \fi}
+%
+\def\l@section{\@dottedtocline{1}{\tocchpnum}{\tocsecnum}}
+\def\l@subsection{\@dottedtocline{2}{\tocsectotal}{\tocsubsecnum}}
+\def\l@subsubsection{\@dottedtocline{3}{\tocsubsectotal}{\tocsubsubsecnum}}
+\def\l@paragraph{\@dottedtocline{4}{\tocsubsubsectotal}{\tocparanum}}
+\def\l@subparagraph{\@dottedtocline{5}{\tocparatotal}{\tocsubparanum}}
+
+\renewcommand\listoffigures{%
+    \chapter*{\listfigurename
+      \@mkboth{\listfigurename}{\listfigurename}}%
+    \@starttoc{lof}%
+    }
+
+\renewcommand\listoftables{%
+    \chapter*{\listtablename
+      \@mkboth{\listtablename}{\listtablename}}%
+    \@starttoc{lot}%
+    }
+
+\renewcommand\footnoterule{%
+  \kern-3\p@
+  \hrule\@width 50\p@
+  \kern2.6\p@}
+
+\newdimen\foot@parindent
+\foot@parindent 10.83\p@
+
+\AtBeginDocument{%
+\long\def\@makefntext#1{\@setpar{\@@par\@tempdima \hsize
+         \advance\@tempdima-\foot@parindent\parshape\@ne\foot@parindent
+         \@tempdima}\par
+         \parindent \foot@parindent\noindent \hbox to \z@{%
+         \hss\hss$^{\@thefnmark}$ }#1}}
+
+\if@spthms
+% Definition of the "\spnewtheorem" command.
+%
+% Usage:
+%
+%     \spnewtheorem{env_nam}{caption}[within]{cap_font}{body_font}
+% or  \spnewtheorem{env_nam}[numbered_like]{caption}{cap_font}{body_font}
+% or  \spnewtheorem*{env_nam}{caption}{cap_font}{body_font}
+%
+% New is "cap_font" and "body_font". It stands for
+% fontdefinition of the caption and the text itself.
+%
+% "\spnewtheorem*" gives a theorem without number.
+%
+% A defined spnewthoerem environment is used as described
+% by Lamport.
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\def\@thmcountersep{.}
+\def\@thmcounterend{.}
+\newcommand\nocaption{\noexpand\@gobble}
+\newdimen\spthmsep \spthmsep=3pt
+
+\def\spnewtheorem{\@ifstar{\@sthm}{\@Sthm}}
+
+% definition of \spnewtheorem with number
+
+\def\@spnthm#1#2{%
+  \@ifnextchar[{\@spxnthm{#1}{#2}}{\@spynthm{#1}{#2}}}
+\def\@Sthm#1{\@ifnextchar[{\@spothm{#1}}{\@spnthm{#1}}}
+
+\def\@spxnthm#1#2[#3]#4#5{\expandafter\@ifdefinable\csname #1\endcsname
+   {\@definecounter{#1}\@addtoreset{#1}{#3}%
+   \expandafter\xdef\csname the#1\endcsname{\expandafter\noexpand
+     \csname the#3\endcsname \noexpand\@thmcountersep \@thmcounter{#1}}%
+   \expandafter\xdef\csname #1name\endcsname{#2}%
+   \global\@namedef{#1}{\@spthm{#1}{\csname #1name\endcsname}{#4}{#5}}%
+                              \global\@namedef{end#1}{\@endtheorem}}}
+
+\def\@spynthm#1#2#3#4{\expandafter\@ifdefinable\csname #1\endcsname
+   {\@definecounter{#1}%
+   \expandafter\xdef\csname the#1\endcsname{\@thmcounter{#1}}%
+   \expandafter\xdef\csname #1name\endcsname{#2}%
+   \global\@namedef{#1}{\@spthm{#1}{\csname #1name\endcsname}{#3}{#4}}%
+                               \global\@namedef{end#1}{\@endtheorem}}}
+
+\def\@spothm#1[#2]#3#4#5{%
+  \@ifundefined{c@#2}{\@latexerr{No theorem environment `#2' defined}\@eha}%
+  {\expandafter\@ifdefinable\csname #1\endcsname
+  {\global\@namedef{the#1}{\@nameuse{the#2}}%
+  \expandafter\xdef\csname #1name\endcsname{#3}%
+  \global\@namedef{#1}{\@spthm{#2}{\csname #1name\endcsname}{#4}{#5}}%
+  \global\@namedef{end#1}{\@endtheorem}}}}
+
+\def\@spthm#1#2#3#4{\topsep 7\p@ \@plus2\p@ \@minus4\p@
+\labelsep=\spthmsep\refstepcounter{#1}%
+\@ifnextchar[{\@spythm{#1}{#2}{#3}{#4}}{\@spxthm{#1}{#2}{#3}{#4}}}
+
+\def\@spxthm#1#2#3#4{\@spbegintheorem{#2}{\csname the#1\endcsname}{#3}{#4}%
+                    \ignorespaces}
+
+\def\@spythm#1#2#3#4[#5]{\@spopargbegintheorem{#2}{\csname
+       the#1\endcsname}{#5}{#3}{#4}\ignorespaces}
+
+\def\normalthmheadings{\def\@spbegintheorem##1##2##3##4{\trivlist
+                 \item[\hskip\labelsep{##3##1\ ##2\@thmcounterend}]##4}
+\def\@spopargbegintheorem##1##2##3##4##5{\trivlist
+      \item[\hskip\labelsep{##4##1\ ##2}]{##4(##3)\@thmcounterend\ }##5}}
+\normalthmheadings
+
+\def\reversethmheadings{\def\@spbegintheorem##1##2##3##4{\trivlist
+                 \item[\hskip\labelsep{##3##2\ ##1\@thmcounterend}]##4}
+\def\@spopargbegintheorem##1##2##3##4##5{\trivlist
+      \item[\hskip\labelsep{##4##2\ ##1}]{##4(##3)\@thmcounterend\ }##5}}
+
+% definition of \spnewtheorem* without number
+
+\def\@sthm#1#2{\@Ynthm{#1}{#2}}
+
+\def\@Ynthm#1#2#3#4{\expandafter\@ifdefinable\csname #1\endcsname
+   {\global\@namedef{#1}{\@Thm{\csname #1name\endcsname}{#3}{#4}}%
+    \expandafter\xdef\csname #1name\endcsname{#2}%
+    \global\@namedef{end#1}{\@endtheorem}}}
+
+\def\@Thm#1#2#3{\topsep 7\p@ \@plus2\p@ \@minus4\p@
+\@ifnextchar[{\@Ythm{#1}{#2}{#3}}{\@Xthm{#1}{#2}{#3}}}
+
+\def\@Xthm#1#2#3{\@Begintheorem{#1}{#2}{#3}\ignorespaces}
+
+\def\@Ythm#1#2#3[#4]{\@Opargbegintheorem{#1}
+       {#4}{#2}{#3}\ignorespaces}
+
+\def\@Begintheorem#1#2#3{#3\trivlist
+                           \item[\hskip\labelsep{#2#1\@thmcounterend}]}
+
+\def\@Opargbegintheorem#1#2#3#4{#4\trivlist
+      \item[\hskip\labelsep{#3#1}]{#3(#2)\@thmcounterend\ }}
+
+% initialize theorem environment
+
+\if@envcntshowhiercnt % show hierarchy counter
+   \def\@thmcountersep{.}
+   \spnewtheorem{theorem}{Theorem}[\envankh]{\bfseries}{\itshape}
+   \@addtoreset{theorem}{chapter}
+\else          % theorem counter only
+   \spnewtheorem{theorem}{Theorem}{\bfseries}{\itshape}
+   \if@envcntreset
+      \@addtoreset{theorem}{chapter}
+      \if@envcntresetsect
+         \@addtoreset{theorem}{section}
+      \fi
+   \fi
+\fi
+
+%definition of divers theorem environments
+\spnewtheorem*{claim}{Claim}{\itshape}{\rmfamily}
+\spnewtheorem*{proof}{Proof}{\itshape}{\rmfamily}
+%
+\if@envcntsame % all environments like "Theorem" - using its counter
+   \def\spn@wtheorem#1#2#3#4{\@spothm{#1}[theorem]{#2}{#3}{#4}}
+\else % all environments with their own counter
+   \if@envcntshowhiercnt % show hierarchy counter
+      \def\spn@wtheorem#1#2#3#4{\@spxnthm{#1}{#2}[\envankh]{#3}{#4}}
+   \else          % environment counter only
+      \if@envcntreset % environment counter is reset each section
+         \if@envcntresetsect
+            \def\spn@wtheorem#1#2#3#4{\@spynthm{#1}{#2}{#3}{#4}
+             \@addtoreset{#1}{chapter}\@addtoreset{#1}{section}}
+         \else
+            \def\spn@wtheorem#1#2#3#4{\@spynthm{#1}{#2}{#3}{#4}
+                                      \@addtoreset{#1}{chapter}}
+         \fi
+      \else
+         \let\spn@wtheorem=\@spynthm
+      \fi
+   \fi
+\fi
+%
+\let\spdefaulttheorem=\spn@wtheorem
+%
+\spn@wtheorem{case}{Case}{\itshape}{\rmfamily}
+\spn@wtheorem{conjecture}{Conjecture}{\itshape}{\rmfamily}
+\spn@wtheorem{corollary}{Corollary}{\bfseries}{\itshape}
+\spn@wtheorem{definition}{Definition}{\bfseries}{\itshape}
+\spn@wtheorem{example}{Example}{\itshape}{\rmfamily}
+\spn@wtheorem{exercise}{Exercise}{\bfseries}{\rmfamily}
+\spn@wtheorem{lemma}{Lemma}{\bfseries}{\itshape}
+\spn@wtheorem{note}{Note}{\itshape}{\rmfamily}
+\spn@wtheorem{problem}{Problem}{\bfseries}{\rmfamily}
+\spn@wtheorem{property}{Property}{\itshape}{\rmfamily}
+\spn@wtheorem{proposition}{Proposition}{\bfseries}{\itshape}
+\spn@wtheorem{question}{Question}{\itshape}{\rmfamily}
+\spn@wtheorem{solution}{Solution}{\bfseries}{\rmfamily}
+\spn@wtheorem{remark}{Remark}{\itshape}{\rmfamily}
+%
+\newenvironment{theopargself}
+    {\def\@spopargbegintheorem##1##2##3##4##5{\trivlist
+         \item[\hskip\labelsep{##4##1\ ##2}]{##4##3\@thmcounterend\ }##5}
+     \def\@Opargbegintheorem##1##2##3##4{##4\trivlist
+         \item[\hskip\labelsep{##3##1}]{##3##2\@thmcounterend\ }}}{}
+\newenvironment{theopargself*}
+    {\def\@spopargbegintheorem##1##2##3##4##5{\trivlist
+         \item[\hskip\labelsep{##4##1\ ##2}]{\hspace*{-\labelsep}##4##3\@thmcounterend}##5}
+     \def\@Opargbegintheorem##1##2##3##4{##4\trivlist
+         \item[\hskip\labelsep{##3##1}]{\hspace*{-\labelsep}##3##2\@thmcounterend}}}{}
+%
+\spnewtheorem{prob}{\nocaption}[chapter]{\bfseries}{\rmfamily}
+\newcommand{\probref}[1]{\textbf{\ref{#1}} }
+\newenvironment{sol}{\par\addvspace{6pt}\noindent\probref}{\par\addvspace{6pt}}
+%
+\fi
+
+\def\@takefromreset#1#2{%
+    \def\@tempa{#1}%
+    \let\@tempd\@elt
+    \def\@elt##1{%
+        \def\@tempb{##1}%
+        \ifx\@tempa\@tempb\else
+            \@addtoreset{##1}{#2}%
+        \fi}%
+    \expandafter\expandafter\let\expandafter\@tempc\csname cl@#2\endcsname
+    \expandafter\def\csname cl@#2\endcsname{}%
+    \@tempc
+    \let\@elt\@tempd}
+
+% redefininition of the captions for "figure" and "table" environments
+%
+\@ifundefined{floatlegendstyle}{\def\floatlegendstyle{\bfseries}}{}
+\def\floatcounterend{.\ }
+\def\capstrut{\vrule\@width\z@\@height\topskip}
+\@ifundefined{captionstyle}{\def\captionstyle{\normalfont\small}}{}
+\@ifundefined{instindent}{\newdimen\instindent}{}
+
+\long\def\@caption#1[#2]#3{\par\addcontentsline{\csname
+  ext@#1\endcsname}{#1}{\protect\numberline{\csname
+  the#1\endcsname}{\ignorespaces #2}}\begingroup
+    \@parboxrestore\if@minipage\@setminipage\fi
+    \@makecaption{\csname fnum@#1\endcsname}{\ignorespaces #3}\par
+  \endgroup}
+
+\def\twocaptionwidth#1#2{\def\first@capwidth{#1}\def\second@capwidth{#2}}
+% Default: .46\textwidth
+\twocaptionwidth{.46\textwidth}{.46\textwidth}
+
+\def\leftcaption{\refstepcounter\@captype\@dblarg%
+            {\@leftcaption\@captype}}
+
+\def\rightcaption{\refstepcounter\@captype\@dblarg%
+            {\@rightcaption\@captype}}
+
+\long\def\@leftcaption#1[#2]#3{\addcontentsline{\csname
+  ext@#1\endcsname}{#1}{\protect\numberline{\csname
+  the#1\endcsname}{\ignorespaces #2}}\begingroup
+    \@parboxrestore
+    \vskip\figcapgap
+    \@maketwocaptions{\csname fnum@#1\endcsname}{\ignorespaces #3}%
+    {\first@capwidth}\ignorespaces\hspace{.073\textwidth}\hfill%
+  \endgroup}
+
+\long\def\@rightcaption#1[#2]#3{\addcontentsline{\csname
+  ext@#1\endcsname}{#1}{\protect\numberline{\csname
+  the#1\endcsname}{\ignorespaces #2}}\begingroup
+    \@parboxrestore
+    \@maketwocaptions{\csname fnum@#1\endcsname}{\ignorespaces #3}%
+    {\second@capwidth}\par
+  \endgroup}
+
+\long\def\@maketwocaptions#1#2#3{%
+   \parbox[t]{#3}{{\floatlegendstyle #1\floatcounterend}#2}}
+
+\def\fig@pos{l}
+\newcommand{\leftfigure}[2][\fig@pos]{\makebox[.4635\textwidth][#1]{#2}}
+\let\rightfigure\leftfigure
+
+\newdimen\figgap\figgap=0.5cm  % hgap between figure and sidecaption
+%
+\long\def\@makesidecaption#1#2{%
+   \setbox0=\vbox{\hsize=\@tempdimb
+                  \captionstyle{\floatlegendstyle
+                                         #1\floatcounterend}#2}%
+   \ifdim\instindent<\z@
+      \ifdim\ht0>-\instindent
+         \advance\instindent by\ht0
+         \typeout{^^JClass-Warning: Legend of \string\sidecaption\space for
+                     \@captype\space\csname the\@captype\endcsname
+                  ^^Jis \the\instindent\space taller than the corresponding float -
+                  ^^Jyou'd better switch the environment. }%
+         \instindent\z@
+      \fi
+   \else
+      \ifdim\ht0<\instindent
+         \advance\instindent by-\ht0
+         \advance\instindent by-\dp0\relax
+         \advance\instindent by\topskip
+         \advance\instindent by-11\p@
+      \else
+         \advance\instindent by-\ht0
+         \instindent=-\instindent
+         \typeout{^^JClass-Warning: Legend of \string\sidecaption\space for
+                     \@captype\space\csname the\@captype\endcsname
+                  ^^Jis \the\instindent\space taller than the corresponding float -
+                  ^^Jyou'd better switch the environment. }%
+         \instindent\z@
+      \fi
+   \fi
+   \parbox[b]{\@tempdimb}{\captionstyle{\floatlegendstyle
+                                        #1\floatcounterend}#2%
+                          \ifdim\instindent>\z@ \\
+                               \vrule\@width\z@\@height\instindent
+                                     \@depth\z@
+                          \fi}}
+\def\sidecaption{\@ifnextchar[\sidec@ption{\sidec@ption[b]}}
+\def\sidec@ption[#1]#2\caption{%
+\setbox\@tempboxa=\hbox{\ignorespaces#2\unskip}%
+\if@twocolumn
+ \ifdim\hsize<\textwidth\else
+   \ifdim\wd\@tempboxa<\columnwidth
+      \typeout{Double column float fits into single column -
+            ^^Jyou'd better switch the environment. }%
+   \fi
+ \fi
+\fi
+  \instindent=\ht\@tempboxa
+  \advance\instindent by\dp\@tempboxa
+\if t#1
+\else
+  \instindent=-\instindent
+\fi
+\@tempdimb=\hsize
+\advance\@tempdimb by-\figgap
+\advance\@tempdimb by-\wd\@tempboxa
+\ifdim\@tempdimb<3cm
+   \ClassWarning{SVMono}{\string\sidecaption: No sufficient room for the legend;
+             ^^Jusing normal \string\caption}%
+   \unhbox\@tempboxa
+   \let\@capcommand=\@caption
+\else
+   \ifdim\@tempdimb<4.5cm
+      \ClassWarning{SVMono}{\string\sidecaption: Room for the legend very narrow;
+               ^^Jusing \string\raggedright}%
+      \toks@\expandafter{\captionstyle\sloppy
+                         \rightskip=\z@\@plus6mm\relax}%
+      \def\captionstyle{\the\toks@}%
+   \fi
+   \let\@capcommand=\@sidecaption
+   \leavevmode
+   \unhbox\@tempboxa
+   \hfill
+\fi
+\refstepcounter\@captype
+\@dblarg{\@capcommand\@captype}}
+\long\def\@sidecaption#1[#2]#3{\addcontentsline{\csname
+  ext@#1\endcsname}{#1}{\protect\numberline{\csname
+  the#1\endcsname}{\ignorespaces #2}}\begingroup
+    \@parboxrestore
+    \@makesidecaption{\csname fnum@#1\endcsname}{\ignorespaces #3}\par
+  \endgroup}
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\def\fig@type{figure}
+
+\def\leftlegendglue{\hfil}
+\newdimen\figcapgap\figcapgap=5\p@   % vgap between figure and caption
+\newdimen\tabcapgap\tabcapgap=5.5\p@ % vgap between caption and table
+
+\long\def\@makecaption#1#2{%
+ \captionstyle
+ \ifx\@captype\fig@type
+   \vskip\figcapgap
+ \fi
+ \setbox\@tempboxa\hbox{{\floatlegendstyle #1\floatcounterend}%
+ \capstrut #2}%
+ \ifdim \wd\@tempboxa >\hsize
+   {\floatlegendstyle #1\floatcounterend}\capstrut #2\par
+ \else
+   \hbox to\hsize{\leftlegendglue\unhbox\@tempboxa\hfil}%
+ \fi
+ \ifx\@captype\fig@type\else
+   \vskip\tabcapgap
+ \fi}
+
+\newcounter{merk}
+
+\def\endfigure{\resetsubfig\end@float}
+
+\@namedef{endfigure*}{\resetsubfig\end@dblfloat}
+
+\def\resetsubfig{\global\let\last@subfig=\undefined}
+
+\def\r@setsubfig{\xdef\last@subfig{\number\value{figure}}%
+\setcounter{figure}{\value{merk}}%
+\setcounter{merk}{0}}
+
+\def\subfigures{\refstepcounter{figure}%
+   \@tempcnta=\value{merk}%
+   \setcounter{merk}{\value{figure}}%
+   \setcounter{figure}{\the\@tempcnta}%
+   \def\thefigure{\if@numart\else\thechapter.\fi
+   \@arabic\c@merk\alph{figure}}%
+   \let\resetsubfig=\r@setsubfig}
+
+\def\samenumber{\addtocounter{\@captype}{-1}%
+\@ifundefined{last@subfig}{}{\setcounter{merk}{\last@subfig}}}
+
+% redefinition of the "bibliography" environment
+%
+\def\biblstarthook#1{\gdef\biblst@rthook{#1}}
+%
+\AtBeginDocument{%
+\ifx\secbibl\undefined
+   \def\bibsection{\chapter*{\refname}\markboth{\refname}{\refname}%
+      \addcontentsline{toc}{chapter}{\refname}%
+      \csname biblst@rthook\endcsname}
+\else
+   \def\bibsection{\section*{\refname}\markright{\refname}%
+      \addcontentsline{toc}{section}{\refname}%
+      \csname biblst@rthook\endcsname}
+\fi}
+\ifx\oribibl\undefined % Springer way of life
+   \renewenvironment{thebibliography}[1]{\bibsection
+         \global\let\biblst@rthook=\undefined
+         \def\@biblabel##1{##1.}
+         \small
+         \list{\@biblabel{\@arabic\c@enumiv}}%
+              {\settowidth\labelwidth{\@biblabel{#1}}%
+               \leftmargin\labelwidth
+               \advance\leftmargin\labelsep
+               \if@openbib
+                 \advance\leftmargin\bibindent
+                 \itemindent -\bibindent
+                 \listparindent \itemindent
+                 \parsep \z@
+               \fi
+               \usecounter{enumiv}%
+               \let\p@enumiv\@empty
+               \renewcommand\theenumiv{\@arabic\c@enumiv}}%
+         \if@openbib
+           \renewcommand\newblock{\par}%
+         \else
+           \renewcommand\newblock{\hskip .11em \@plus.33em \@minus.07em}%
+         \fi
+         \sloppy\clubpenalty4000\widowpenalty4000%
+         \sfcode`\.=\@m}
+        {\def\@noitemerr
+          {\@latex@warning{Empty `thebibliography' environment}}%
+         \endlist}
+   \def\@lbibitem[#1]#2{\item[{[#1]}\hfill]\if@filesw
+        {\let\protect\noexpand\immediate
+        \write\@auxout{\string\bibcite{#2}{#1}}}\fi\ignorespaces}
+\else % original bibliography is required
+   \let\bibname=\refname
+   \renewenvironment{thebibliography}[1]
+     {\chapter*{\bibname
+        \@mkboth{\bibname}{\bibname}}%
+      \list{\@biblabel{\@arabic\c@enumiv}}%
+           {\settowidth\labelwidth{\@biblabel{#1}}%
+            \leftmargin\labelwidth
+            \advance\leftmargin\labelsep
+            \@openbib@code
+            \usecounter{enumiv}%
+            \let\p@enumiv\@empty
+            \renewcommand\theenumiv{\@arabic\c@enumiv}}%
+      \sloppy
+      \clubpenalty4000
+      \@clubpenalty \clubpenalty
+      \widowpenalty4000%
+      \sfcode`\.\@m}
+     {\def\@noitemerr
+       {\@latex@warning{Empty `thebibliography' environment}}%
+      \endlist}
+\fi
+
+\let\if@threecolind\iffalse
+\def\threecolindex{\let\if@threecolind\iftrue}
+\def\indexstarthook#1{\gdef\indexst@rthook{#1}}
+\renewenvironment{theindex}
+               {\if@twocolumn
+                  \@restonecolfalse
+                \else
+                  \@restonecoltrue
+                \fi
+                \columnseprule \z@
+                \columnsep 1cc
+                \@nobreaktrue
+                \if@threecolind
+                   \begin{multicols}{3}[\chapter*{\indexname}%
+                \else
+                   \begin{multicols}{2}[\chapter*{\indexname}%
+                \fi
+                {\csname indexst@rthook\endcsname}]%
+                \global\let\indexst@rthook=\undefined
+                \markboth{\indexname}{\indexname}%
+                \addcontentsline{toc}{chapter}{\indexname}%
+                \flushbottom
+                \parindent\z@
+                \rightskip\z@ \@plus 40\p@
+                \parskip\z@ \@plus .3\p@\relax
+                \flushbottom
+                \let\item\@idxitem
+                \def\,{\relax\ifmmode\mskip\thinmuskip
+                             \else\hskip0.2em\ignorespaces\fi}%
+                \normalfont\small}
+               {\end{multicols}
+                \global\let\if@threecolind\iffalse
+                \if@restonecol\onecolumn\else\clearpage\fi}
+
+\def\idxquad{\hskip 10\p@}% space that divides entry from number
+
+\def\@idxitem{\par\setbox0=\hbox{--\,--\,--\enspace}%
+                  \hangindent\wd0\relax}
+
+\def\subitem{\par\noindent\setbox0=\hbox{--\enspace}% second order
+                \kern\wd0\setbox0=\hbox{--\,--\,--\enspace}%
+                \hangindent\wd0\relax}% indexentry
+
+\def\subsubitem{\par\noindent\setbox0=\hbox{--\,--\enspace}% third order
+                \kern\wd0\setbox0=\hbox{--\,--\,--\enspace}%
+                \hangindent\wd0\relax}% indexentry
+
+\def\indexspace{\par \vskip 10\p@ \@plus5\p@ \@minus3\p@\relax}
+
+\def\subtitle#1{\gdef\@subtitle{#1}}
+\def\@subtitle{}
+
+\def\maketitle{\par
+ \begingroup
+   \def\thefootnote{\fnsymbol{footnote}}%
+   \def\@makefnmark{\hbox
+       to\z@{$\m@th^{\@thefnmark}$\hss}}%
+   \if@twocolumn
+     \twocolumn[\@maketitle]%
+     \else \newpage
+     \global\@topnum\z@   % Prevents figures from going at top of page.
+     \@maketitle \fi\thispagestyle{empty}\@thanks
+     \par\penalty -\@M
+ \endgroup
+ \setcounter{footnote}{0}%
+ \let\maketitle\relax
+ \let\@maketitle\relax
+ \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax}
+
+\def\@maketitle{\newpage
+ \null
+ \vskip 2em                 % Vertical space above title.
+\begingroup
+  \def\and{\unskip, }
+  \parindent=\z@
+  \pretolerance=10000
+  \rightskip=\z@ \@plus 3cm
+  {\LARGE                   % each author set in \LARGE
+   \lineskip .5em
+   \@author
+   \par}%
+  \vskip 2cm                % Vertical space after author.
+  {\Huge \@title \par}%     % Title set in \Huge size.
+  \vskip 1cm                % Vertical space after title.
+  \if!\@subtitle!\else
+   {\LARGE\ignorespaces\@subtitle \par}
+   \vskip 1cm                % Vertical space after subtitle.
+  \fi
+  \if!\@date!\else
+    {\large \@date}%          % Date set in \large size.
+    \par
+    \vskip 1.5em               % Vertical space after date.
+  \fi
+ \vfill
+% {\Large Springer\par}
+%\vskip 5\p@
+%\large
+%  Berlin\enspace Heidelberg\enspace New\kern0.1em York\\
+%  Hong\thinspace Kong\enspace London\\
+%  Milan\enspace Paris\enspace Tokyo\par
+\endgroup}
+
+% Useful environments
+\newenvironment{acknowledgement}{\par\addvspace{17\p@}\small\rm
+\trivlist\item[\hskip\labelsep{\it\ackname}]}
+{\endtrivlist\addvspace{6\p@}}
+%
+\newenvironment{noteadd}{\par\addvspace{17\p@}\small\rm
+\trivlist\item[\hskip\labelsep{\it\noteaddname}]}
+{\endtrivlist\addvspace{6\p@}}
+%
+\renewenvironment{abstract}{%
+      \advance\topsep by0.35cm\relax\small
+      \labelwidth=\z@
+      \listparindent=\z@
+      \itemindent\listparindent
+              \trivlist\item[\hskip\labelsep\bfseries\abstractname]%
+              \if!\abstractname!\hskip-\labelsep\fi
+      }
+    {\endtrivlist}
+
+% define the running headings of a twoside text
+\def\runheadsize{\small}
+\def\runheadstyle{\rmfamily\upshape}
+\def\customizhead{\hspace{\headlineindent}}
+
+\def\ps@headings{\let\@mkboth\markboth
+   \let\@oddfoot\@empty\let\@evenfoot\@empty
+   \def\@evenhead{\runheadsize\runheadstyle\rlap{\thepage}\customizhead
+                  \leftmark\hfil}
+   \def\@oddhead{\runheadsize\runheadstyle\hfil\rightmark\customizhead
+                  \llap{\thepage}}
+   \def\chaptermark##1{\markboth{{\ifnum\c@secnumdepth>\m@ne
+      \thechapter\thechapterend\hskip\betweenumberspace\fi ##1}}{{\ifnum %!!!
+      \c@secnumdepth>\m@ne\thechapter\thechapterend\hskip\betweenumberspace\fi ##1}}}%!!!
+   \def\sectionmark##1{\markright{{\ifnum\c@secnumdepth>\z@
+      \thesection\seccounterend\hskip\betweenumberspace\fi ##1}}}}
+
+\def\ps@myheadings{\let\@mkboth\@gobbletwo
+   \let\@oddfoot\@empty\let\@evenfoot\@empty
+   \def\@evenhead{\runheadsize\runheadstyle\rlap{\thepage}\customizhead
+                  \leftmark\hfil}
+   \def\@oddhead{\runheadsize\runheadstyle\hfil\rightmark\customizhead
+                  \llap{\thepage}}
+   \let\chaptermark\@gobble
+   \let\sectionmark\@gobble
+   \let\subsectionmark\@gobble}
+
+
+\ps@headings
+
+\endinput
+%end of file svmono.cls
--- a/src/Doc/ProgProve/Basics.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,145 +0,0 @@
-(*<*)
-theory Basics
-imports Main
-begin
-(*>*)
-text{*
-This chapter introduces HOL as a functional programming language and shows
-how to prove properties of functional programs by induction.
-
-\section{Basics}
-
-\subsection{Types, Terms and Formulas}
-\label{sec:TypesTermsForms}
-
-HOL is a typed logic whose type system resembles that of functional
-programming languages. Thus there are
-\begin{description}
-\item[base types,] 
-in particular @{typ bool}, the type of truth values,
-@{typ nat}, the type of natural numbers ($\mathbb{N}$), and \indexed{@{typ int}}{int},
-the type of mathematical integers ($\mathbb{Z}$).
-\item[type constructors,]
- in particular @{text list}, the type of
-lists, and @{text set}, the type of sets. Type constructors are written
-postfix, i.e., after their arguments. For example,
-@{typ "nat list"} is the type of lists whose elements are natural numbers.
-\item[function types,]
-denoted by @{text"\<Rightarrow>"}.
-\item[type variables,]
-  denoted by @{typ 'a}, @{typ 'b} etc., just like in ML\@.
-\end{description}
-Note that @{typ"'a \<Rightarrow> 'b list"} means @{typ[source]"'a \<Rightarrow> ('b list)"},
-not @{typ"('a \<Rightarrow> 'b) list"}: postfix type constructors have precedence
-over @{text"\<Rightarrow>"}.
-
-\conceptidx{Terms}{term} are formed as in functional programming by
-applying functions to arguments. If @{text f} is a function of type
-@{text"\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2"} and @{text t} is a term of type
-@{text"\<tau>\<^sub>1"} then @{term"f t"} is a term of type @{text"\<tau>\<^sub>2"}. We write @{text "t :: \<tau>"} to mean that term @{text t} has type @{text \<tau>}.
-
-\begin{warn}
-There are many predefined infix symbols like @{text "+"} and @{text"\<le>"}.
-The name of the corresponding binary function is @{term"op +"},
-not just @{text"+"}. That is, @{term"x + y"} is nice surface syntax
-(``syntactic sugar'') for \noquotes{@{term[source]"op + x y"}}.
-\end{warn}
-
-HOL also supports some basic constructs from functional programming:
-\begin{quote}
-@{text "(if b then t\<^sub>1 else t\<^sub>2)"}\\
-@{text "(let x = t in u)"}\\
-@{text "(case t of pat\<^sub>1 \<Rightarrow> t\<^sub>1 | \<dots> | pat\<^sub>n \<Rightarrow> t\<^sub>n)"}
-\end{quote}
-\begin{warn}
-The above three constructs must always be enclosed in parentheses
-if they occur inside other constructs.
-\end{warn}
-Terms may also contain @{text "\<lambda>"}-abstractions. For example,
-@{term "\<lambda>x. x"} is the identity function.
-
-\conceptidx{Formulas}{formula} are terms of type @{text bool}.
-There are the basic constants @{term True} and @{term False} and
-the usual logical connectives (in decreasing order of precedence):
-@{text"\<not>"}, @{text"\<and>"}, @{text"\<or>"}, @{text"\<longrightarrow>"}.
-
-\conceptidx{Equality}{equality} is available in the form of the infix function @{text "="}
-of type @{typ "'a \<Rightarrow> 'a \<Rightarrow> bool"}. It also works for formulas, where
-it means ``if and only if''.
-
-\conceptidx{Quantifiers}{quantifier} are written @{prop"\<forall>x. P"} and @{prop"\<exists>x. P"}.
-
-Isabelle automatically computes the type of each variable in a term. This is
-called \concept{type inference}.  Despite type inference, it is sometimes
-necessary to attach an explicit \concept{type constraint} (or \concept{type
-annotation}) to a variable or term.  The syntax is @{text "t :: \<tau>"} as in
-\mbox{\noquotes{@{prop[source] "m < (n::nat)"}}}. Type constraints may be
-needed to
-disambiguate terms involving overloaded functions such as @{text "+"}, @{text
-"*"} and @{text"\<le>"}.
-
-Finally there are the universal quantifier @{text"\<And>"}\index{$4@\isasymAnd} and the implication
-@{text"\<Longrightarrow>"}\index{$3@\isasymLongrightarrow}. They are part of the Isabelle framework, not the logic
-HOL. Logically, they agree with their HOL counterparts @{text"\<forall>"} and
-@{text"\<longrightarrow>"}, but operationally they behave differently. This will become
-clearer as we go along.
-\begin{warn}
-Right-arrows of all kinds always associate to the right. In particular,
-the formula
-@{text"A\<^sub>1 \<Longrightarrow> A\<^sub>2 \<Longrightarrow> A\<^sub>3"} means @{text "A\<^sub>1 \<Longrightarrow> (A\<^sub>2 \<Longrightarrow> A\<^sub>3)"}.
-The (Isabelle specific) notation \mbox{@{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"}}
-is short for the iterated implication \mbox{@{text"A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> A"}}.
-Sometimes we also employ inference rule notation:
-\inferrule{\mbox{@{text "A\<^sub>1"}}\\ \mbox{@{text "\<dots>"}}\\ \mbox{@{text "A\<^sub>n"}}}
-{\mbox{@{text A}}}
-\end{warn}
-
-
-\subsection{Theories}
-\label{sec:Basic:Theories}
-
-Roughly speaking, a \concept{theory} is a named collection of types,
-functions, and theorems, much like a module in a programming language.
-All the Isabelle text that you ever type needs to go into a theory.
-The general format of a theory @{text T} is
-\begin{quote}
-\indexed{\isacom{theory}}{theory} @{text T}\\
-\indexed{\isacom{imports}}{imports} @{text "T\<^sub>1 \<dots> T\<^sub>n"}\\
-\isacom{begin}\\
-\emph{definitions, theorems and proofs}\\
-\isacom{end}
-\end{quote}
-where @{text "T\<^sub>1 \<dots> T\<^sub>n"} are the names of existing
-theories that @{text T} is based on. The @{text "T\<^sub>i"} are the
-direct \conceptidx{parent theories}{parent theory} of @{text T}.
-Everything defined in the parent theories (and their parents, recursively) is
-automatically visible. Each theory @{text T} must
-reside in a \concept{theory file} named @{text "T.thy"}.
-
-\begin{warn}
-HOL contains a theory @{theory Main}\index{Main@@{theory Main}}, the union of all the basic
-predefined theories like arithmetic, lists, sets, etc.
-Unless you know what you are doing, always include @{text Main}
-as a direct or indirect parent of all your theories.
-\end{warn}
-
-In addition to the theories that come with the Isabelle/HOL distribution
-(see @{url "http://isabelle.in.tum.de/library/HOL/"})
-there is also the \emph{Archive of Formal Proofs}
-at @{url "http://afp.sourceforge.net"}, a growing collection of Isabelle theories
-that everybody can contribute to.
-
-\subsection{Quotation Marks}
-
-The textual definition of a theory follows a fixed syntax with keywords like
-\isacommand{begin} and \isacommand{datatype}.  Embedded in this syntax are
-the types and formulas of HOL.  To distinguish the two levels, everything
-HOL-specific (terms and types) must be enclosed in quotation marks:
-\texttt{"}\dots\texttt{"}. To lessen this burden, quotation marks around a
-single identifier can be dropped.  When Isabelle prints a syntax error
-message, it refers to the HOL syntax as the \concept{inner syntax} and the
-enclosing theory language as the \concept{outer syntax}.
-*}
-(*<*)
-end
-(*>*)
\ No newline at end of file
--- a/src/Doc/ProgProve/Bool_nat_list.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,458 +0,0 @@
-(*<*)
-theory Bool_nat_list
-imports Main
-begin
-(*>*)
-
-text{*
-\vspace{-4ex}
-\section{\texorpdfstring{Types @{typ bool}, @{typ nat} and @{text list}}{Types bool, nat and list}}
-
-These are the most important predefined types. We go through them one by one.
-Based on examples we learn how to define (possibly recursive) functions and
-prove theorems about them by induction and simplification.
-
-\subsection{Type \indexed{@{typ bool}}{bool}}
-
-The type of boolean values is a predefined datatype
-@{datatype[display] bool}
-with the two values \indexed{@{const True}}{True} and \indexed{@{const False}}{False} and
-with many predefined functions:  @{text "\<not>"}, @{text "\<and>"}, @{text "\<or>"}, @{text
-"\<longrightarrow>"} etc. Here is how conjunction could be defined by pattern matching:
-*}
-
-fun conj :: "bool \<Rightarrow> bool \<Rightarrow> bool" where
-"conj True True = True" |
-"conj _ _ = False"
-
-text{* Both the datatype and function definitions roughly follow the syntax
-of functional programming languages.
-
-\subsection{Type \indexed{@{typ nat}}{nat}}
-
-Natural numbers are another predefined datatype:
-@{datatype[display] nat}\index{Suc@@{const Suc}}
-All values of type @{typ nat} are generated by the constructors
-@{text 0} and @{const Suc}. Thus the values of type @{typ nat} are
-@{text 0}, @{term"Suc 0"}, @{term"Suc(Suc 0)"} etc.
-There are many predefined functions: @{text "+"}, @{text "*"}, @{text
-"\<le>"}, etc. Here is how you could define your own addition:
-*}
-
-fun add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
-"add 0 n = n" |
-"add (Suc m) n = Suc(add m n)"
-
-text{* And here is a proof of the fact that @{prop"add m 0 = m"}: *}
-
-lemma add_02: "add m 0 = m"
-apply(induction m)
-apply(auto)
-done
-(*<*)
-lemma "add m 0 = m"
-apply(induction m)
-(*>*)
-txt{* The \isacom{lemma} command starts the proof and gives the lemma
-a name, @{text add_02}. Properties of recursively defined functions
-need to be established by induction in most cases.
-Command \isacom{apply}@{text"(induction m)"} instructs Isabelle to
-start a proof by induction on @{text m}. In response, it will show the
-following proof state:
-@{subgoals[display,indent=0]}
-The numbered lines are known as \emph{subgoals}.
-The first subgoal is the base case, the second one the induction step.
-The prefix @{text"\<And>m."} is Isabelle's way of saying ``for an arbitrary but fixed @{text m}''. The @{text"\<Longrightarrow>"} separates assumptions from the conclusion.
-The command \isacom{apply}@{text"(auto)"} instructs Isabelle to try
-and prove all subgoals automatically, essentially by simplifying them.
-Because both subgoals are easy, Isabelle can do it.
-The base case @{prop"add 0 0 = 0"} holds by definition of @{const add},
-and the induction step is almost as simple:
-@{text"add\<^raw:~>(Suc m) 0 = Suc(add m 0) = Suc m"}
-using first the definition of @{const add} and then the induction hypothesis.
-In summary, both subproofs rely on simplification with function definitions and
-the induction hypothesis.
-As a result of that final \isacom{done}, Isabelle associates the lemma
-just proved with its name. You can now inspect the lemma with the command
-*}
-
-thm add_02
-
-txt{* which displays @{thm[show_question_marks,display] add_02} The free
-variable @{text m} has been replaced by the \concept{unknown}
-@{text"?m"}. There is no logical difference between the two but an
-operational one: unknowns can be instantiated, which is what you want after
-some lemma has been proved.
-
-Note that there is also a proof method @{text induct}, which behaves almost
-like @{text induction}; the difference is explained in \autoref{ch:Isar}.
-
-\begin{warn}
-Terminology: We use \concept{lemma}, \concept{theorem} and \concept{rule}
-interchangeably for propositions that have been proved.
-\end{warn}
-\begin{warn}
-  Numerals (@{text 0}, @{text 1}, @{text 2}, \dots) and most of the standard
-  arithmetic operations (@{text "+"}, @{text "-"}, @{text "*"}, @{text"\<le>"},
-  @{text"<"} etc) are overloaded: they are available
-  not just for natural numbers but for other types as well.
-  For example, given the goal @{text"x + 0 = x"}, there is nothing to indicate
-  that you are talking about natural numbers. Hence Isabelle can only infer
-  that @{term x} is of some arbitrary type where @{text 0} and @{text"+"}
-  exist. As a consequence, you will be unable to prove the goal.
-%  To alert you to such pitfalls, Isabelle flags numerals without a
-%  fixed type in its output: @ {prop"x+0 = x"}.
-  In this particular example, you need to include
-  an explicit type constraint, for example @{text"x+0 = (x::nat)"}. If there
-  is enough contextual information this may not be necessary: @{prop"Suc x =
-  x"} automatically implies @{text"x::nat"} because @{term Suc} is not
-  overloaded.
-\end{warn}
-
-\subsubsection{An Informal Proof}
-
-Above we gave some terse informal explanation of the proof of
-@{prop"add m 0 = m"}. A more detailed informal exposition of the lemma
-might look like this:
-\bigskip
-
-\noindent
-\textbf{Lemma} @{prop"add m 0 = m"}
-
-\noindent
-\textbf{Proof} by induction on @{text m}.
-\begin{itemize}
-\item Case @{text 0} (the base case): @{prop"add 0 0 = 0"}
-  holds by definition of @{const add}.
-\item Case @{term"Suc m"} (the induction step):
-  We assume @{prop"add m 0 = m"}, the induction hypothesis (IH),
-  and we need to show @{text"add (Suc m) 0 = Suc m"}.
-  The proof is as follows:\smallskip
-
-  \begin{tabular}{@ {}rcl@ {\quad}l@ {}}
-  @{term "add (Suc m) 0"} &@{text"="}& @{term"Suc(add m 0)"}
-  & by definition of @{text add}\\
-              &@{text"="}& @{term "Suc m"} & by IH
-  \end{tabular}
-\end{itemize}
-Throughout this book, \concept{IH} will stand for ``induction hypothesis''.
-
-We have now seen three proofs of @{prop"add m 0 = 0"}: the Isabelle one, the
-terse four lines explaining the base case and the induction step, and just now a
-model of a traditional inductive proof. The three proofs differ in the level
-of detail given and the intended reader: the Isabelle proof is for the
-machine, the informal proofs are for humans. Although this book concentrates
-on Isabelle proofs, it is important to be able to rephrase those proofs
-as informal text comprehensible to a reader familiar with traditional
-mathematical proofs. Later on we will introduce an Isabelle proof language
-that is closer to traditional informal mathematical language and is often
-directly readable.
-
-\subsection{Type \indexed{@{text list}}{list}}
-
-Although lists are already predefined, we define our own copy just for
-demonstration purposes:
-*}
-(*<*)
-apply(auto)
-done 
-declare [[names_short]]
-(*>*)
-datatype 'a list = Nil | Cons 'a "'a list"
-
-text{*
-\begin{itemize}
-\item Type @{typ "'a list"} is the type of lists over elements of type @{typ 'a}. Because @{typ 'a} is a type variable, lists are in fact \concept{polymorphic}: the elements of a list can be of arbitrary type (but must all be of the same type).
-\item Lists have two constructors: @{const Nil}, the empty list, and @{const Cons}, which puts an element (of type @{typ 'a}) in front of a list (of type @{typ "'a list"}).
-Hence all lists are of the form @{const Nil}, or @{term"Cons x Nil"},
-or @{term"Cons x (Cons y Nil)"} etc.
-\item \isacom{datatype} requires no quotation marks on the
-left-hand side, but on the right-hand side each of the argument
-types of a constructor needs to be enclosed in quotation marks, unless
-it is just an identifier (e.g.\ @{typ nat} or @{typ 'a}).
-\end{itemize}
-We also define two standard functions, append and reverse: *}
-
-fun app :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where
-"app Nil ys = ys" |
-"app (Cons x xs) ys = Cons x (app xs ys)"
-
-fun rev :: "'a list \<Rightarrow> 'a list" where
-"rev Nil = Nil" |
-"rev (Cons x xs) = app (rev xs) (Cons x Nil)"
-
-text{* By default, variables @{text xs}, @{text ys} and @{text zs} are of
-@{text list} type.
-
-Command \indexed{\isacommand{value}}{value} evaluates a term. For example, *}
-
-value "rev(Cons True (Cons False Nil))"
-
-text{* yields the result @{value "rev(Cons True (Cons False Nil))"}. This works symbolically, too: *}
-
-value "rev(Cons a (Cons b Nil))"
-
-text{* yields @{value "rev(Cons a (Cons b Nil))"}.
-\medskip
-
-Figure~\ref{fig:MyList} shows the theory created so far.
-Because @{text list}, @{const Nil}, @{const Cons} etc are already predefined,
- Isabelle prints qualified (long) names when executing this theory, for example, @{text MyList.Nil}
- instead of @{const Nil}.
- To suppress the qualified names you can insert the command
- \texttt{declare [[names\_short]]}.
- This is not recommended in general but just for this unusual example.
-% Notice where the
-%quotations marks are needed that we mostly sweep under the carpet.  In
-%particular, notice that \isacom{datatype} requires no quotation marks on the
-%left-hand side, but that on the right-hand side each of the argument
-%types of a constructor needs to be enclosed in quotation marks.
-
-\begin{figure}[htbp]
-\begin{alltt}
-\input{MyList.thy}\end{alltt}
-\caption{A Theory of Lists}
-\label{fig:MyList}
-\end{figure}
-
-\subsubsection{Structural Induction for Lists}
-
-Just as for natural numbers, there is a proof principle of induction for
-lists. Induction over a list is essentially induction over the length of
-the list, although the length remains implicit. To prove that some property
-@{text P} holds for all lists @{text xs}, i.e.\ \mbox{@{prop"P(xs)"}},
-you need to prove
-\begin{enumerate}
-\item the base case @{prop"P(Nil)"} and
-\item the inductive case @{prop"P(Cons x xs)"} under the assumption @{prop"P(xs)"}, for some arbitrary but fixed @{text x} and @{text xs}.
-\end{enumerate}
-This is often called \concept{structural induction} for lists.
-
-\subsection{The Proof Process}
-
-We will now demonstrate the typical proof process, which involves
-the formulation and proof of auxiliary lemmas.
-Our goal is to show that reversing a list twice produces the original
-list. *}
-
-theorem rev_rev [simp]: "rev(rev xs) = xs"
-
-txt{* Commands \isacom{theorem} and \isacom{lemma} are
-interchangeable and merely indicate the importance we attach to a
-proposition. Via the bracketed attribute @{text simp} we also tell Isabelle
-to make the eventual theorem a \conceptnoidx{simplification rule}: future proofs
-involving simplification will replace occurrences of @{term"rev(rev xs)"} by
-@{term"xs"}. The proof is by induction: *}
-
-apply(induction xs)
-
-txt{*
-As explained above, we obtain two subgoals, namely the base case (@{const Nil}) and the induction step (@{const Cons}):
-@{subgoals[display,indent=0,margin=65]}
-Let us try to solve both goals automatically:
-*}
-
-apply(auto)
-
-txt{*Subgoal~1 is proved, and disappears; the simplified version
-of subgoal~2 becomes the new subgoal~1:
-@{subgoals[display,indent=0,margin=70]}
-In order to simplify this subgoal further, a lemma suggests itself.
-
-\subsubsection{A First Lemma}
-
-We insert the following lemma in front of the main theorem:
-*}
-(*<*)
-oops
-(*>*)
-lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
-
-txt{* There are two variables that we could induct on: @{text xs} and
-@{text ys}. Because @{const app} is defined by recursion on
-the first argument, @{text xs} is the correct one:
-*}
-
-apply(induction xs)
-
-txt{* This time not even the base case is solved automatically: *}
-apply(auto)
-txt{*
-\vspace{-5ex}
-@{subgoals[display,goals_limit=1]}
-Again, we need to abandon this proof attempt and prove another simple lemma
-first.
-
-\subsubsection{A Second Lemma}
-
-We again try the canonical proof procedure:
-*}
-(*<*)
-oops
-(*>*)
-lemma app_Nil2 [simp]: "app xs Nil = xs"
-apply(induction xs)
-apply(auto)
-done
-
-text{*
-Thankfully, this worked.
-Now we can continue with our stuck proof attempt of the first lemma:
-*}
-
-lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
-apply(induction xs)
-apply(auto)
-
-txt{*
-We find that this time @{text"auto"} solves the base case, but the
-induction step merely simplifies to
-@{subgoals[display,indent=0,goals_limit=1]}
-The missing lemma is associativity of @{const app},
-which we insert in front of the failed lemma @{text rev_app}.
-
-\subsubsection{Associativity of @{const app}}
-
-The canonical proof procedure succeeds without further ado:
-*}
-(*<*)oops(*>*)
-lemma app_assoc [simp]: "app (app xs ys) zs = app xs (app ys zs)"
-apply(induction xs)
-apply(auto)
-done
-(*<*)
-lemma rev_app [simp]: "rev(app xs ys) = app (rev ys)(rev xs)"
-apply(induction xs)
-apply(auto)
-done
-
-theorem rev_rev [simp]: "rev(rev xs) = xs"
-apply(induction xs)
-apply(auto)
-done
-(*>*)
-text{*
-Finally the proofs of @{thm[source] rev_app} and @{thm[source] rev_rev}
-succeed, too.
-
-\subsubsection{Another Informal Proof}
-
-Here is the informal proof of associativity of @{const app}
-corresponding to the Isabelle proof above.
-\bigskip
-
-\noindent
-\textbf{Lemma} @{prop"app (app xs ys) zs = app xs (app ys zs)"}
-
-\noindent
-\textbf{Proof} by induction on @{text xs}.
-\begin{itemize}
-\item Case @{text Nil}: \ @{prop"app (app Nil ys) zs = app ys zs"} @{text"="}
-  \mbox{@{term"app Nil (app ys zs)"}} \ holds by definition of @{text app}.
-\item Case @{text"Cons x xs"}: We assume
-  \begin{center} \hfill @{term"app (app xs ys) zs"} @{text"="}
-  @{term"app xs (app ys zs)"} \hfill (IH) \end{center}
-  and we need to show
-  \begin{center} @{prop"app (app (Cons x xs) ys) zs = app (Cons x xs) (app ys zs)"}.\end{center}
-  The proof is as follows:\smallskip
-
-  \begin{tabular}{@ {}l@ {\quad}l@ {}}
-  @{term"app (app (Cons x xs) ys) zs"}\\
-  @{text"= app (Cons x (app xs ys)) zs"} & by definition of @{text app}\\
-  @{text"= Cons x (app (app xs ys) zs)"} & by definition of @{text app}\\
-  @{text"= Cons x (app xs (app ys zs))"} & by IH\\
-  @{text"= app (Cons x xs) (app ys zs)"} & by definition of @{text app}
-  \end{tabular}
-\end{itemize}
-\medskip
-
-\noindent Didn't we say earlier that all proofs are by simplification? But
-in both cases, going from left to right, the last equality step is not a
-simplification at all! In the base case it is @{prop"app ys zs = app Nil (app
-ys zs)"}. It appears almost mysterious because we suddenly complicate the
-term by appending @{text Nil} on the left. What is really going on is this:
-when proving some equality \mbox{@{prop"s = t"}}, both @{text s} and @{text t} are
-simplified until they ``meet in the middle''. This heuristic for equality proofs
-works well for a functional programming context like ours. In the base case
-both @{term"app (app Nil ys) zs"} and @{term"app Nil (app
-ys zs)"} are simplified to @{term"app ys zs"}, the term in the middle.
-
-\subsection{Predefined Lists}
-\label{sec:predeflists}
-
-Isabelle's predefined lists are the same as the ones above, but with
-more syntactic sugar:
-\begin{itemize}
-\item @{text "[]"} is \indexed{@{const Nil}}{Nil},
-\item @{term"x # xs"} is @{term"Cons x xs"}\index{Cons@@{const Cons}},
-\item @{text"[x\<^sub>1, \<dots>, x\<^sub>n]"} is @{text"x\<^sub>1 # \<dots> # x\<^sub>n # []"}, and
-\item @{term "xs @ ys"} is @{term"app xs ys"}.
-\end{itemize}
-There is also a large library of predefined functions.
-The most important ones are the length function
-@{text"length :: 'a list \<Rightarrow> nat"}\index{length@@{const length}} (with the obvious definition),
-and the \indexed{@{const map}}{map} function that applies a function to all elements of a list:
-\begin{isabelle}
-\isacom{fun} @{const map} @{text"::"} @{typ[source] "('a \<Rightarrow> 'b) \<Rightarrow> 'a list \<Rightarrow> 'b list"}\\
-@{text"\""}@{thm list.map(1)}@{text"\" |"}\\
-@{text"\""}@{thm list.map(2)}@{text"\""}
-\end{isabelle}
-
-\ifsem
-Also useful are the \concept{head} of a list, its first element,
-and the \concept{tail}, the rest of the list:
-\begin{isabelle}\index{hd@@{const hd}}
-\isacom{fun} @{text"hd :: 'a list \<Rightarrow> 'a"}\\
-@{prop"hd(x#xs) = x"}
-\end{isabelle}
-\begin{isabelle}\index{tl@@{const tl}}
-\isacom{fun} @{text"tl :: 'a list \<Rightarrow> 'a list"}\\
-@{prop"tl [] = []"} @{text"|"}\\
-@{prop"tl(x#xs) = xs"}
-\end{isabelle}
-Note that since HOL is a logic of total functions, @{term"hd []"} is defined,
-but we do now know what the result is. That is, @{term"hd []"} is not undefined
-but underdefined.
-\fi
-%
-
-From now on lists are always the predefined lists.
-
-
-\subsection*{Exercises}
-
-\begin{exercise}
-Use the \isacom{value} command to evaluate the following expressions:
-@{term[source] "1 + (2::nat)"}, @{term[source] "1 + (2::int)"},
-@{term[source] "1 - (2::nat)"} and @{term[source] "1 - (2::int)"}.
-\end{exercise}
-
-\begin{exercise}
-Start from the definition of @{const add} given above.
-Prove that @{const add} is associative and commutative.
-Define a recursive function @{text double} @{text"::"} @{typ"nat \<Rightarrow> nat"}
-and prove @{prop"double m = add m m"}.
-\end{exercise}
-
-\begin{exercise}
-Define a function @{text"count ::"} @{typ"'a \<Rightarrow> 'a list \<Rightarrow> nat"}
-that counts the number of occurrences of an element in a list. Prove
-@{prop"count x xs \<le> length xs"}.
-\end{exercise}
-
-\begin{exercise}
-Define a recursive function @{text "snoc ::"} @{typ"'a list \<Rightarrow> 'a \<Rightarrow> 'a list"}
-that appends an element to the end of a list. With the help of @{text snoc}
-define a recursive function @{text "reverse ::"} @{typ"'a list \<Rightarrow> 'a list"}
-that reverses a list. Prove @{prop"reverse(reverse xs) = xs"}.
-\end{exercise}
-
-\begin{exercise}
-Define a recursive function @{text "sum ::"} @{typ"nat \<Rightarrow> nat"} such that
-\mbox{@{text"sum n"}} @{text"="} @{text"0 + ... + n"} and prove
-@{prop" sum(n::nat) = n * (n+1) div 2"}.
-\end{exercise}
-*}
-(*<*)
-end
-(*>*)
--- a/src/Doc/ProgProve/Isar.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1124 +0,0 @@
-(*<*)
-theory Isar
-imports LaTeXsugar
-begin
-declare [[quick_and_dirty]]
-(*>*)
-text{*
-Apply-scripts are unreadable and hard to maintain. The language of choice
-for larger proofs is \concept{Isar}. The two key features of Isar are:
-\begin{itemize}
-\item It is structured, not linear.
-\item It is readable without running it because
-you need to state what you are proving at any given point.
-\end{itemize}
-Whereas apply-scripts are like assembly language programs, Isar proofs
-are like structured programs with comments. A typical Isar proof looks like this:
-*}text{*
-\begin{tabular}{@ {}l}
-\isacom{proof}\\
-\quad\isacom{assume} @{text"\""}$\mathit{formula}_0$@{text"\""}\\
-\quad\isacom{have} @{text"\""}$\mathit{formula}_1$@{text"\""} \quad\isacom{by} @{text simp}\\
-\quad\vdots\\
-\quad\isacom{have} @{text"\""}$\mathit{formula}_n$@{text"\""} \quad\isacom{by} @{text blast}\\
-\quad\isacom{show} @{text"\""}$\mathit{formula}_{n+1}$@{text"\""} \quad\isacom{by} @{text \<dots>}\\
-\isacom{qed}
-\end{tabular}
-*}text{*
-It proves $\mathit{formula}_0 \Longrightarrow \mathit{formula}_{n+1}$
-(provided each proof step succeeds).
-The intermediate \isacom{have} statements are merely stepping stones
-on the way towards the \isacom{show} statement that proves the actual
-goal. In more detail, this is the Isar core syntax:
-\medskip
-
-\begin{tabular}{@ {}lcl@ {}}
-\textit{proof} &=& \indexed{\isacom{by}}{by} \textit{method}\\
-      &$\mid$& \indexed{\isacom{proof}}{proof} [\textit{method}] \ \textit{step}$^*$ \ \indexed{\isacom{qed}}{qed}
-\end{tabular}
-\medskip
-
-\begin{tabular}{@ {}lcl@ {}}
-\textit{step} &=& \indexed{\isacom{fix}}{fix} \textit{variables} \\
-      &$\mid$& \indexed{\isacom{assume}}{assume} \textit{proposition} \\
-      &$\mid$& [\indexed{\isacom{from}}{from} \textit{fact}$^+$] (\indexed{\isacom{have}}{have} $\mid$ \indexed{\isacom{show}}{show}) \ \textit{proposition} \ \textit{proof}
-\end{tabular}
-\medskip
-
-\begin{tabular}{@ {}lcl@ {}}
-\textit{proposition} &=& [\textit{name}:] @{text"\""}\textit{formula}@{text"\""}
-\end{tabular}
-\medskip
-
-\begin{tabular}{@ {}lcl@ {}}
-\textit{fact} &=& \textit{name} \ $\mid$ \ \dots
-\end{tabular}
-\medskip
-
-\noindent A proof can either be an atomic \isacom{by} with a single proof
-method which must finish off the statement being proved, for example @{text
-auto}.  Or it can be a \isacom{proof}--\isacom{qed} block of multiple
-steps. Such a block can optionally begin with a proof method that indicates
-how to start off the proof, e.g.\ \mbox{@{text"(induction xs)"}}.
-
-A step either assumes a proposition or states a proposition
-together with its proof. The optional \isacom{from} clause
-indicates which facts are to be used in the proof.
-Intermediate propositions are stated with \isacom{have}, the overall goal
-with \isacom{show}. A step can also introduce new local variables with
-\isacom{fix}. Logically, \isacom{fix} introduces @{text"\<And>"}-quantified
-variables, \isacom{assume} introduces the assumption of an implication
-(@{text"\<Longrightarrow>"}) and \isacom{have}/\isacom{show} the conclusion.
-
-Propositions are optionally named formulas. These names can be referred to in
-later \isacom{from} clauses. In the simplest case, a fact is such a name.
-But facts can also be composed with @{text OF} and @{text of} as shown in
-\S\ref{sec:forward-proof}---hence the \dots\ in the above grammar.  Note
-that assumptions, intermediate \isacom{have} statements and global lemmas all
-have the same status and are thus collectively referred to as
-\conceptidx{facts}{fact}.
-
-Fact names can stand for whole lists of facts. For example, if @{text f} is
-defined by command \isacom{fun}, @{text"f.simps"} refers to the whole list of
-recursion equations defining @{text f}. Individual facts can be selected by
-writing @{text"f.simps(2)"}, whole sublists by @{text"f.simps(2-4)"}.
-
-
-\section{Isar by Example}
-
-We show a number of proofs of Cantor's theorem that a function from a set to
-its powerset cannot be surjective, illustrating various features of Isar. The
-constant @{const surj} is predefined.
-*}
-
-lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
-proof
-  assume 0: "surj f"
-  from 0 have 1: "\<forall>A. \<exists>a. A = f a" by(simp add: surj_def)
-  from 1 have 2: "\<exists>a. {x. x \<notin> f x} = f a" by blast
-  from 2 show "False" by blast
-qed
-
-text{*
-The \isacom{proof} command lacks an explicit method how to perform
-the proof. In such cases Isabelle tries to use some standard introduction
-rule, in the above case for @{text"\<not>"}:
-\[
-\inferrule{
-\mbox{@{thm (prem 1) notI}}}
-{\mbox{@{thm (concl) notI}}}
-\]
-In order to prove @{prop"~ P"}, assume @{text P} and show @{text False}.
-Thus we may assume @{prop"surj f"}. The proof shows that names of propositions
-may be (single!) digits---meaningful names are hard to invent and are often
-not necessary. Both \isacom{have} steps are obvious. The second one introduces
-the diagonal set @{term"{x. x \<notin> f x}"}, the key idea in the proof.
-If you wonder why @{text 2} directly implies @{text False}: from @{text 2}
-it follows that @{prop"a \<notin> f a \<longleftrightarrow> a \<in> f a"}.
-
-\subsection{\indexed{@{text this}}{this}, \indexed{\isacom{then}}{then}, \indexed{\isacom{hence}}{hence} and \indexed{\isacom{thus}}{thus}}
-
-Labels should be avoided. They interrupt the flow of the reader who has to
-scan the context for the point where the label was introduced. Ideally, the
-proof is a linear flow, where the output of one step becomes the input of the
-next step, piping the previously proved fact into the next proof, just like
-in a UNIX pipe. In such cases the predefined name @{text this} can be used
-to refer to the proposition proved in the previous step. This allows us to
-eliminate all labels from our proof (we suppress the \isacom{lemma} statement):
-*}
-(*<*)
-lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
-(*>*)
-proof
-  assume "surj f"
-  from this have "\<exists>a. {x. x \<notin> f x} = f a" by(auto simp: surj_def)
-  from this show "False" by blast
-qed
-
-text{* We have also taken the opportunity to compress the two \isacom{have}
-steps into one.
-
-To compact the text further, Isar has a few convenient abbreviations:
-\medskip
-
-\begin{tabular}{r@ {\quad=\quad}l}
-\isacom{then} & \isacom{from} @{text this}\\
-\isacom{thus} & \isacom{then} \isacom{show}\\
-\isacom{hence} & \isacom{then} \isacom{have}
-\end{tabular}
-\medskip
-
-\noindent
-With the help of these abbreviations the proof becomes
-*}
-(*<*)
-lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
-(*>*)
-proof
-  assume "surj f"
-  hence "\<exists>a. {x. x \<notin> f x} = f a" by(auto simp: surj_def)
-  thus "False" by blast
-qed
-text{*
-
-There are two further linguistic variations:
-\medskip
-
-\begin{tabular}{r@ {\quad=\quad}l}
-(\isacom{have}$\mid$\isacom{show}) \ \textit{prop} \ \indexed{\isacom{using}}{using} \ \textit{facts}
-&
-\isacom{from} \ \textit{facts} \ (\isacom{have}$\mid$\isacom{show}) \ \textit{prop}\\
-\indexed{\isacom{with}}{with} \ \textit{facts} & \isacom{from} \ \textit{facts} \isa{this}
-\end{tabular}
-\medskip
-
-\noindent The \isacom{using} idiom de-emphasizes the used facts by moving them
-behind the proposition.
-
-\subsection{Structured Lemma Statements: \indexed{\isacom{fixes}}{fixes}, \indexed{\isacom{assumes}}{assumes}, \indexed{\isacom{shows}}{shows}}
-\index{lemma@\isacom{lemma}}
-Lemmas can also be stated in a more structured fashion. To demonstrate this
-feature with Cantor's theorem, we rephrase @{prop"\<not> surj f"}
-a little:
-*}
-
-lemma
-  fixes f :: "'a \<Rightarrow> 'a set"
-  assumes s: "surj f"
-  shows "False"
-
-txt{* The optional \isacom{fixes} part allows you to state the types of
-variables up front rather than by decorating one of their occurrences in the
-formula with a type constraint. The key advantage of the structured format is
-the \isacom{assumes} part that allows you to name each assumption; multiple
-assumptions can be separated by \isacom{and}. The
-\isacom{shows} part gives the goal. The actual theorem that will come out of
-the proof is @{prop"surj f \<Longrightarrow> False"}, but during the proof the assumption
-@{prop"surj f"} is available under the name @{text s} like any other fact.
-*}
-
-proof -
-  have "\<exists> a. {x. x \<notin> f x} = f a" using s
-    by(auto simp: surj_def)
-  thus "False" by blast
-qed
-
-text{*
-\begin{warn}
-Note the hyphen after the \isacom{proof} command.
-It is the null method that does nothing to the goal. Leaving it out would ask
-Isabelle to try some suitable introduction rule on the goal @{const False}---but
-there is no such rule and \isacom{proof} would fail.
-\end{warn}
-In the \isacom{have} step the assumption @{prop"surj f"} is now
-referenced by its name @{text s}. The duplication of @{prop"surj f"} in the
-above proofs (once in the statement of the lemma, once in its proof) has been
-eliminated.
-
-Stating a lemma with \isacom{assumes}-\isacom{shows} implicitly introduces the
-name \indexed{@{text assms}}{assms} that stands for the list of all assumptions. You can refer
-to individual assumptions by @{text"assms(1)"}, @{text"assms(2)"} etc,
-thus obviating the need to name them individually.
-
-\section{Proof Patterns}
-
-We show a number of important basic proof patterns. Many of them arise from
-the rules of natural deduction that are applied by \isacom{proof} by
-default. The patterns are phrased in terms of \isacom{show} but work for
-\isacom{have} and \isacom{lemma}, too.
-
-We start with two forms of \concept{case analysis}:
-starting from a formula @{text P} we have the two cases @{text P} and
-@{prop"~P"}, and starting from a fact @{prop"P \<or> Q"}
-we have the two cases @{text P} and @{text Q}:
-*}text_raw{*
-\begin{tabular}{@ {}ll@ {}}
-\begin{minipage}[t]{.4\textwidth}
-\isa{%
-*}
-(*<*)lemma "R" proof-(*>*)
-show "R"
-proof cases
-  assume "P"
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "R" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-next
-  assume "\<not> P"
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "R" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-qed(*<*)oops(*>*)
-text_raw {* }
-\end{minipage}\index{cases@@{text cases}}
-&
-\begin{minipage}[t]{.4\textwidth}
-\isa{%
-*}
-(*<*)lemma "R" proof-(*>*)
-have "P \<or> Q" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-then show "R"
-proof
-  assume "P"
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "R" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-next
-  assume "Q"
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "R" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-qed(*<*)oops(*>*)
-
-text_raw {* }
-\end{minipage}
-\end{tabular}
-\medskip
-\begin{isamarkuptext}%
-How to prove a logical equivalence:
-\end{isamarkuptext}%
-\isa{%
-*}
-(*<*)lemma "P\<longleftrightarrow>Q" proof-(*>*)
-show "P \<longleftrightarrow> Q"
-proof
-  assume "P"
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "Q" (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
-next
-  assume "Q"
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "P" (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
-qed(*<*)qed(*>*)
-text_raw {* }
-\medskip
-\begin{isamarkuptext}%
-Proofs by contradiction (@{thm[source] ccontr} stands for ``classical contradiction''):
-\end{isamarkuptext}%
-\begin{tabular}{@ {}ll@ {}}
-\begin{minipage}[t]{.4\textwidth}
-\isa{%
-*}
-(*<*)lemma "\<not> P" proof-(*>*)
-show "\<not> P"
-proof
-  assume "P"
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "False" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-qed(*<*)oops(*>*)
-
-text_raw {* }
-\end{minipage}
-&
-\begin{minipage}[t]{.4\textwidth}
-\isa{%
-*}
-(*<*)lemma "P" proof-(*>*)
-show "P"
-proof (rule ccontr)
-  assume "\<not>P"
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "False" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-qed(*<*)oops(*>*)
-
-text_raw {* }
-\end{minipage}
-\end{tabular}
-\medskip
-\begin{isamarkuptext}%
-How to prove quantified formulas:
-\end{isamarkuptext}%
-\begin{tabular}{@ {}ll@ {}}
-\begin{minipage}[t]{.4\textwidth}
-\isa{%
-*}
-(*<*)lemma "ALL x. P x" proof-(*>*)
-show "\<forall>x. P(x)"
-proof
-  fix x
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "P(x)" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-qed(*<*)oops(*>*)
-
-text_raw {* }
-\end{minipage}
-&
-\begin{minipage}[t]{.4\textwidth}
-\isa{%
-*}
-(*<*)lemma "EX x. P(x)" proof-(*>*)
-show "\<exists>x. P(x)"
-proof
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "P(witness)" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-qed
-(*<*)oops(*>*)
-
-text_raw {* }
-\end{minipage}
-\end{tabular}
-\medskip
-\begin{isamarkuptext}%
-In the proof of \noquotes{@{prop[source]"\<forall>x. P(x)"}},
-the step \indexed{\isacom{fix}}{fix}~@{text x} introduces a locally fixed variable @{text x}
-into the subproof, the proverbial ``arbitrary but fixed value''.
-Instead of @{text x} we could have chosen any name in the subproof.
-In the proof of \noquotes{@{prop[source]"\<exists>x. P(x)"}},
-@{text witness} is some arbitrary
-term for which we can prove that it satisfies @{text P}.
-
-How to reason forward from \noquotes{@{prop[source] "\<exists>x. P(x)"}}:
-\end{isamarkuptext}%
-*}
-(*<*)lemma True proof- assume 1: "EX x. P x"(*>*)
-have "\<exists>x. P(x)" (*<*)by(rule 1)(*>*)txt_raw{*\ $\dots$\\*}
-then obtain x where p: "P(x)" by blast
-(*<*)oops(*>*)
-text{*
-After the \indexed{\isacom{obtain}}{obtain} step, @{text x} (we could have chosen any name)
-is a fixed local
-variable, and @{text p} is the name of the fact
-\noquotes{@{prop[source] "P(x)"}}.
-This pattern works for one or more @{text x}.
-As an example of the \isacom{obtain} command, here is the proof of
-Cantor's theorem in more detail:
-*}
-
-lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
-proof
-  assume "surj f"
-  hence  "\<exists>a. {x. x \<notin> f x} = f a" by(auto simp: surj_def)
-  then obtain a where  "{x. x \<notin> f x} = f a"  by blast
-  hence  "a \<notin> f a \<longleftrightarrow> a \<in> f a"  by blast
-  thus "False" by blast
-qed
-
-text_raw{*
-\begin{isamarkuptext}%
-
-Finally, how to prove set equality and subset relationship:
-\end{isamarkuptext}%
-\begin{tabular}{@ {}ll@ {}}
-\begin{minipage}[t]{.4\textwidth}
-\isa{%
-*}
-(*<*)lemma "A = (B::'a set)" proof-(*>*)
-show "A = B"
-proof
-  show "A \<subseteq> B" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-next
-  show "B \<subseteq> A" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-qed(*<*)qed(*>*)
-
-text_raw {* }
-\end{minipage}
-&
-\begin{minipage}[t]{.4\textwidth}
-\isa{%
-*}
-(*<*)lemma "A <= (B::'a set)" proof-(*>*)
-show "A \<subseteq> B"
-proof
-  fix x
-  assume "x \<in> A"
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "x \<in> B" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-qed(*<*)qed(*>*)
-
-text_raw {* }
-\end{minipage}
-\end{tabular}
-\begin{isamarkuptext}%
-\section{Streamlining Proofs}
-
-\subsection{Pattern Matching and Quotations}
-
-In the proof patterns shown above, formulas are often duplicated.
-This can make the text harder to read, write and maintain. Pattern matching
-is an abbreviation mechanism to avoid such duplication. Writing
-\begin{quote}
-\isacom{show} \ \textit{formula} @{text"("}\indexed{\isacom{is}}{is} \textit{pattern}@{text")"}
-\end{quote}
-matches the pattern against the formula, thus instantiating the unknowns in
-the pattern for later use. As an example, consider the proof pattern for
-@{text"\<longleftrightarrow>"}:
-\end{isamarkuptext}%
-*}
-(*<*)lemma "formula\<^sub>1 \<longleftrightarrow> formula\<^sub>2" proof-(*>*)
-show "formula\<^sub>1 \<longleftrightarrow> formula\<^sub>2" (is "?L \<longleftrightarrow> ?R")
-proof
-  assume "?L"
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "?R" (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
-next
-  assume "?R"
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "?L" (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
-qed(*<*)qed(*>*)
-
-text{* Instead of duplicating @{text"formula\<^sub>i"} in the text, we introduce
-the two abbreviations @{text"?L"} and @{text"?R"} by pattern matching.
-Pattern matching works wherever a formula is stated, in particular
-with \isacom{have} and \isacom{lemma}.
-
-The unknown \indexed{@{text"?thesis"}}{thesis} is implicitly matched against any goal stated by
-\isacom{lemma} or \isacom{show}. Here is a typical example: *}
-
-lemma "formula"
-proof -
-  txt_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show ?thesis (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
-qed
-
-text{* 
-Unknowns can also be instantiated with \indexed{\isacom{let}}{let} commands
-\begin{quote}
-\isacom{let} @{text"?t"} = @{text"\""}\textit{some-big-term}@{text"\""}
-\end{quote}
-Later proof steps can refer to @{text"?t"}:
-\begin{quote}
-\isacom{have} @{text"\""}\dots @{text"?t"} \dots@{text"\""}
-\end{quote}
-\begin{warn}
-Names of facts are introduced with @{text"name:"} and refer to proved
-theorems. Unknowns @{text"?X"} refer to terms or formulas.
-\end{warn}
-
-Although abbreviations shorten the text, the reader needs to remember what
-they stand for. Similarly for names of facts. Names like @{text 1}, @{text 2}
-and @{text 3} are not helpful and should only be used in short proofs. For
-longer proofs, descriptive names are better. But look at this example:
-\begin{quote}
-\isacom{have} \ @{text"x_gr_0: \"x > 0\""}\\
-$\vdots$\\
-\isacom{from} @{text "x_gr_0"} \dots
-\end{quote}
-The name is longer than the fact it stands for! Short facts do not need names,
-one can refer to them easily by quoting them:
-\begin{quote}
-\isacom{have} \ @{text"\"x > 0\""}\\
-$\vdots$\\
-\isacom{from} @{text "`x>0`"} \dots\index{$IMP053@@{text"`...`"}}
-\end{quote}
-Note that the quotes around @{text"x>0"} are \conceptnoidx{back quotes}.
-They refer to the fact not by name but by value.
-
-\subsection{\indexed{\isacom{moreover}}{moreover}}
-\index{ultimately@\isacom{ultimately}}
-
-Sometimes one needs a number of facts to enable some deduction. Of course
-one can name these facts individually, as shown on the right,
-but one can also combine them with \isacom{moreover}, as shown on the left:
-*}text_raw{*
-\begin{tabular}{@ {}ll@ {}}
-\begin{minipage}[t]{.4\textwidth}
-\isa{%
-*}
-(*<*)lemma "P" proof-(*>*)
-have "P\<^sub>1" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-moreover have "P\<^sub>2" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-moreover
-txt_raw{*\\$\vdots$\\\hspace{-1.4ex}*}(*<*)have "True" ..(*>*)
-moreover have "P\<^sub>n" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-ultimately have "P"  (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-(*<*)oops(*>*)
-
-text_raw {* }
-\end{minipage}
-&
-\qquad
-\begin{minipage}[t]{.4\textwidth}
-\isa{%
-*}
-(*<*)lemma "P" proof-(*>*)
-have lab\<^sub>1: "P\<^sub>1" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-have lab\<^sub>2: "P\<^sub>2" (*<*)sorry(*>*)txt_raw{*\ $\dots$*}
-txt_raw{*\\$\vdots$\\\hspace{-1.4ex}*}
-have lab\<^sub>n: "P\<^sub>n" (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-from lab\<^sub>1 lab\<^sub>2 txt_raw{*\ $\dots$\\*}
-have "P"  (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-(*<*)oops(*>*)
-
-text_raw {* }
-\end{minipage}
-\end{tabular}
-\begin{isamarkuptext}%
-The \isacom{moreover} version is no shorter but expresses the structure more
-clearly and avoids new names.
-
-\subsection{Raw Proof Blocks}
-
-Sometimes one would like to prove some lemma locally within a proof.
-A lemma that shares the current context of assumptions but that
-has its own assumptions and is generalized over its locally fixed
-variables at the end. This is what a \concept{raw proof block} does:
-\begin{quote}\index{$IMP053@@{text"{ ... }"} (proof block)}
-@{text"{"} \isacom{fix} @{text"x\<^sub>1 \<dots> x\<^sub>n"}\\
-\mbox{}\ \ \ \isacom{assume} @{text"A\<^sub>1 \<dots> A\<^sub>m"}\\
-\mbox{}\ \ \ $\vdots$\\
-\mbox{}\ \ \ \isacom{have} @{text"B"}\\
-@{text"}"}
-\end{quote}
-proves @{text"\<lbrakk> A\<^sub>1; \<dots> ; A\<^sub>m \<rbrakk> \<Longrightarrow> B"}
-where all @{text"x\<^sub>i"} have been replaced by unknowns @{text"?x\<^sub>i"}.
-\begin{warn}
-The conclusion of a raw proof block is \emph{not} indicated by \isacom{show}
-but is simply the final \isacom{have}.
-\end{warn}
-
-As an example we prove a simple fact about divisibility on integers.
-The definition of @{text "dvd"} is @{thm dvd_def}.
-\end{isamarkuptext}%
-*}
-
-lemma fixes a b :: int assumes "b dvd (a+b)" shows "b dvd a"
-proof -
-  { fix k assume k: "a+b = b*k"
-    have "\<exists>k'. a = b*k'"
-    proof
-      show "a = b*(k - 1)" using k by(simp add: algebra_simps)
-    qed }
-  then show ?thesis using assms by(auto simp add: dvd_def)
-qed
-
-text{* Note that the result of a raw proof block has no name. In this example
-it was directly piped (via \isacom{then}) into the final proof, but it can
-also be named for later reference: you simply follow the block directly by a
-\isacom{note} command:
-\begin{quote}
-\indexed{\isacom{note}}{note} \ @{text"name = this"}
-\end{quote}
-This introduces a new name @{text name} that refers to @{text this},
-the fact just proved, in this case the preceding block. In general,
-\isacom{note} introduces a new name for one or more facts.
-
-\subsection*{Exercises}
-
-\exercise
-Give a readable, structured proof of the following lemma:
-*}
-lemma assumes T: "\<forall>x y. T x y \<or> T y x"
-  and A: "\<forall>x y. A x y \<and> A y x \<longrightarrow> x = y"
-  and TA: "\<forall>x y. T x y \<longrightarrow> A x y" and "A x y"
-  shows "T x y"
-(*<*)oops(*>*)
-text{*
-\endexercise
-
-\exercise
-Give a readable, structured proof of the following lemma:
-*}
-lemma "(\<exists>ys zs. xs = ys @ zs \<and> length ys = length zs)
-      \<or> (\<exists>ys zs. xs = ys @ zs \<and> length ys = length zs + 1)"
-(*<*)oops(*>*)
-text{*
-Hint: There are predefined functions @{const_typ take} and @{const_typ drop}
-such that @{text"take k [x\<^sub>1,\<dots>] = [x\<^sub>1,\<dots>,x\<^sub>k]"} and
-@{text"drop k [x\<^sub>1,\<dots>] = [x\<^bsub>k+1\<^esub>,\<dots>]"}. Let sledgehammer find and apply
-the relevant @{const take} and @{const drop} lemmas for you.
-\endexercise
-
-
-\section{Case Analysis and Induction}
-
-\subsection{Datatype Case Analysis}
-\index{case analysis|(}
-
-We have seen case analysis on formulas. Now we want to distinguish
-which form some term takes: is it @{text 0} or of the form @{term"Suc n"},
-is it @{term"[]"} or of the form @{term"x#xs"}, etc. Here is a typical example
-proof by case analysis on the form of @{text xs}:
-*}
-
-lemma "length(tl xs) = length xs - 1"
-proof (cases xs)
-  assume "xs = []"
-  thus ?thesis by simp
-next
-  fix y ys assume "xs = y#ys"
-  thus ?thesis by simp
-qed
-
-text{*\index{cases@@{text"cases"}|(}Function @{text tl} (''tail'') is defined by @{thm list.sel(2)} and
-@{thm list.sel(3)}. Note that the result type of @{const length} is @{typ nat}
-and @{prop"0 - 1 = (0::nat)"}.
-
-This proof pattern works for any term @{text t} whose type is a datatype.
-The goal has to be proved for each constructor @{text C}:
-\begin{quote}
-\isacom{fix} \ @{text"x\<^sub>1 \<dots> x\<^sub>n"} \isacom{assume} @{text"\"t = C x\<^sub>1 \<dots> x\<^sub>n\""}
-\end{quote}\index{case@\isacom{case}|(}
-Each case can be written in a more compact form by means of the \isacom{case}
-command:
-\begin{quote}
-\isacom{case} @{text "(C x\<^sub>1 \<dots> x\<^sub>n)"}
-\end{quote}
-This is equivalent to the explicit \isacom{fix}-\isacom{assume} line
-but also gives the assumption @{text"\"t = C x\<^sub>1 \<dots> x\<^sub>n\""} a name: @{text C},
-like the constructor.
-Here is the \isacom{case} version of the proof above:
-*}
-(*<*)lemma "length(tl xs) = length xs - 1"(*>*)
-proof (cases xs)
-  case Nil
-  thus ?thesis by simp
-next
-  case (Cons y ys)
-  thus ?thesis by simp
-qed
-
-text{* Remember that @{text Nil} and @{text Cons} are the alphanumeric names
-for @{text"[]"} and @{text"#"}. The names of the assumptions
-are not used because they are directly piped (via \isacom{thus})
-into the proof of the claim.
-\index{case analysis|)}
-
-\subsection{Structural Induction}
-\index{induction|(}
-\index{structural induction|(}
-
-We illustrate structural induction with an example based on natural numbers:
-the sum (@{text"\<Sum>"}) of the first @{text n} natural numbers
-(@{text"{0..n::nat}"}) is equal to \mbox{@{term"n*(n+1) div 2::nat"}}.
-Never mind the details, just focus on the pattern:
-*}
-
-lemma "\<Sum>{0..n::nat} = n*(n+1) div 2"
-proof (induction n)
-  show "\<Sum>{0..0::nat} = 0*(0+1) div 2" by simp
-next
-  fix n assume "\<Sum>{0..n::nat} = n*(n+1) div 2"
-  thus "\<Sum>{0..Suc n} = Suc n*(Suc n+1) div 2" by simp
-qed
-
-text{* Except for the rewrite steps, everything is explicitly given. This
-makes the proof easily readable, but the duplication means it is tedious to
-write and maintain. Here is how pattern
-matching can completely avoid any duplication: *}
-
-lemma "\<Sum>{0..n::nat} = n*(n+1) div 2" (is "?P n")
-proof (induction n)
-  show "?P 0" by simp
-next
-  fix n assume "?P n"
-  thus "?P(Suc n)" by simp
-qed
-
-text{* The first line introduces an abbreviation @{text"?P n"} for the goal.
-Pattern matching @{text"?P n"} with the goal instantiates @{text"?P"} to the
-function @{term"\<lambda>n. \<Sum>{0..n::nat} = n*(n+1) div 2"}.  Now the proposition to
-be proved in the base case can be written as @{text"?P 0"}, the induction
-hypothesis as @{text"?P n"}, and the conclusion of the induction step as
-@{text"?P(Suc n)"}.
-
-Induction also provides the \isacom{case} idiom that abbreviates
-the \isacom{fix}-\isacom{assume} step. The above proof becomes
-*}
-(*<*)lemma "\<Sum>{0..n::nat} = n*(n+1) div 2"(*>*)
-proof (induction n)
-  case 0
-  show ?case by simp
-next
-  case (Suc n)
-  thus ?case by simp
-qed
-
-text{*
-The unknown @{text"?case"}\index{case?@@{text"?case"}|(} is set in each case to the required
-claim, i.e.\ @{text"?P 0"} and \mbox{@{text"?P(Suc n)"}} in the above proof,
-without requiring the user to define a @{text "?P"}. The general
-pattern for induction over @{typ nat} is shown on the left-hand side:
-*}text_raw{*
-\begin{tabular}{@ {}ll@ {}}
-\begin{minipage}[t]{.4\textwidth}
-\isa{%
-*}
-(*<*)lemma "P(n::nat)" proof -(*>*)
-show "P(n)"
-proof (induction n)
-  case 0
-  txt_raw{*\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}*}
-  show ?case (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
-next
-  case (Suc n)
-  txt_raw{*\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}*}
-  show ?case (*<*)sorry(*>*) txt_raw{*\ $\dots$\\*}
-qed(*<*)qed(*>*)
-
-text_raw {* }
-\end{minipage}
-&
-\begin{minipage}[t]{.4\textwidth}
-~\\
-~\\
-\isacom{let} @{text"?case = \"P(0)\""}\\
-~\\
-~\\
-~\\[1ex]
-\isacom{fix} @{text n} \isacom{assume} @{text"Suc: \"P(n)\""}\\
-\isacom{let} @{text"?case = \"P(Suc n)\""}\\
-\end{minipage}
-\end{tabular}
-\medskip
-*}
-text{*
-On the right side you can see what the \isacom{case} command
-on the left stands for.
-
-In case the goal is an implication, induction does one more thing: the
-proposition to be proved in each case is not the whole implication but only
-its conclusion; the premises of the implication are immediately made
-assumptions of that case. That is, if in the above proof we replace
-\isacom{show}~@{text"\"P(n)\""} by
-\mbox{\isacom{show}~@{text"\"A(n) \<Longrightarrow> P(n)\""}}
-then \isacom{case}~@{text 0} stands for
-\begin{quote}
-\isacom{assume} \ @{text"0: \"A(0)\""}\\
-\isacom{let} @{text"?case = \"P(0)\""}
-\end{quote}
-and \isacom{case}~@{text"(Suc n)"} stands for
-\begin{quote}
-\isacom{fix} @{text n}\\
-\isacom{assume} @{text"Suc:"}
-  \begin{tabular}[t]{l}@{text"\"A(n) \<Longrightarrow> P(n)\""}\\@{text"\"A(Suc n)\""}\end{tabular}\\
-\isacom{let} @{text"?case = \"P(Suc n)\""}
-\end{quote}
-The list of assumptions @{text Suc} is actually subdivided
-into @{text"Suc.IH"}, the induction hypotheses (here @{text"A(n) \<Longrightarrow> P(n)"})
-and @{text"Suc.prems"}, the premises of the goal being proved
-(here @{text"A(Suc n)"}).
-
-Induction works for any datatype.
-Proving a goal @{text"\<lbrakk> A\<^sub>1(x); \<dots>; A\<^sub>k(x) \<rbrakk> \<Longrightarrow> P(x)"}
-by induction on @{text x} generates a proof obligation for each constructor
-@{text C} of the datatype. The command \isacom{case}~@{text"(C x\<^sub>1 \<dots> x\<^sub>n)"}
-performs the following steps:
-\begin{enumerate}
-\item \isacom{fix} @{text"x\<^sub>1 \<dots> x\<^sub>n"}
-\item \isacom{assume} the induction hypotheses (calling them @{text C.IH}\index{IH@@{text".IH"}})
- and the premises \mbox{@{text"A\<^sub>i(C x\<^sub>1 \<dots> x\<^sub>n)"}} (calling them @{text"C.prems"}\index{prems@@{text".prems"}})
- and calling the whole list @{text C}
-\item \isacom{let} @{text"?case = \"P(C x\<^sub>1 \<dots> x\<^sub>n)\""}
-\end{enumerate}
-\index{structural induction|)}
-
-\subsection{Rule Induction}
-\index{rule induction|(}
-
-Recall the inductive and recursive definitions of even numbers in
-\autoref{sec:inductive-defs}:
-*}
-
-inductive ev :: "nat \<Rightarrow> bool" where
-ev0: "ev 0" |
-evSS: "ev n \<Longrightarrow> ev(Suc(Suc n))"
-
-fun even :: "nat \<Rightarrow> bool" where
-"even 0 = True" |
-"even (Suc 0) = False" |
-"even (Suc(Suc n)) = even n"
-
-text{* We recast the proof of @{prop"ev n \<Longrightarrow> even n"} in Isar. The
-left column shows the actual proof text, the right column shows
-the implicit effect of the two \isacom{case} commands:*}text_raw{*
-\begin{tabular}{@ {}l@ {\qquad}l@ {}}
-\begin{minipage}[t]{.5\textwidth}
-\isa{%
-*}
-
-lemma "ev n \<Longrightarrow> even n"
-proof(induction rule: ev.induct)
-  case ev0
-  show ?case by simp
-next
-  case evSS
-
-
-
-  thus ?case by simp
-qed
-
-text_raw {* }
-\end{minipage}
-&
-\begin{minipage}[t]{.5\textwidth}
-~\\
-~\\
-\isacom{let} @{text"?case = \"even 0\""}\\
-~\\
-~\\
-\isacom{fix} @{text n}\\
-\isacom{assume} @{text"evSS:"}
-  \begin{tabular}[t]{l} @{text"\"ev n\""}\\@{text"\"even n\""}\end{tabular}\\
-\isacom{let} @{text"?case = \"even(Suc(Suc n))\""}\\
-\end{minipage}
-\end{tabular}
-\medskip
-*}
-text{*
-The proof resembles structural induction, but the induction rule is given
-explicitly and the names of the cases are the names of the rules in the
-inductive definition.
-Let us examine the two assumptions named @{thm[source]evSS}:
-@{prop "ev n"} is the premise of rule @{thm[source]evSS}, which we may assume
-because we are in the case where that rule was used; @{prop"even n"}
-is the induction hypothesis.
-\begin{warn}
-Because each \isacom{case} command introduces a list of assumptions
-named like the case name, which is the name of a rule of the inductive
-definition, those rules now need to be accessed with a qualified name, here
-@{thm[source] ev.ev0} and @{thm[source] ev.evSS}
-\end{warn}
-
-In the case @{thm[source]evSS} of the proof above we have pretended that the
-system fixes a variable @{text n}.  But unless the user provides the name
-@{text n}, the system will just invent its own name that cannot be referred
-to.  In the above proof, we do not need to refer to it, hence we do not give
-it a specific name. In case one needs to refer to it one writes
-\begin{quote}
-\isacom{case} @{text"(evSS m)"}
-\end{quote}
-just like \isacom{case}~@{text"(Suc n)"} in earlier structural inductions.
-The name @{text m} is an arbitrary choice. As a result,
-case @{thm[source] evSS} is derived from a renamed version of
-rule @{thm[source] evSS}: @{text"ev m \<Longrightarrow> ev(Suc(Suc m))"}.
-Here is an example with a (contrived) intermediate step that refers to @{text m}:
-*}
-
-lemma "ev n \<Longrightarrow> even n"
-proof(induction rule: ev.induct)
-  case ev0 show ?case by simp
-next
-  case (evSS m)
-  have "even(Suc(Suc m)) = even m" by simp
-  thus ?case using `even m` by blast
-qed
-
-text{*
-\indent
-In general, let @{text I} be a (for simplicity unary) inductively defined
-predicate and let the rules in the definition of @{text I}
-be called @{text "rule\<^sub>1"}, \dots, @{text "rule\<^sub>n"}. A proof by rule
-induction follows this pattern:\index{inductionrule@@{text"induction ... rule:"}}
-*}
-
-(*<*)
-inductive I where rule\<^sub>1: "I()" |  rule\<^sub>2: "I()" |  rule\<^sub>n: "I()"
-lemma "I x \<Longrightarrow> P x" proof-(*>*)
-show "I x \<Longrightarrow> P x"
-proof(induction rule: I.induct)
-  case rule\<^sub>1
-  txt_raw{*\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
-  show ?case (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-next
-  txt_raw{*\\[-.4ex]$\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
-(*<*)
-  case rule\<^sub>2
-  show ?case sorry
-(*>*)
-next
-  case rule\<^sub>n
-  txt_raw{*\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
-  show ?case (*<*)sorry(*>*)txt_raw{*\ $\dots$\\*}
-qed(*<*)qed(*>*)
-
-text{*
-One can provide explicit variable names by writing
-\isacom{case}~@{text"(rule\<^sub>i x\<^sub>1 \<dots> x\<^sub>k)"}, thus renaming the first @{text k}
-free variables in rule @{text i} to @{text"x\<^sub>1 \<dots> x\<^sub>k"},
-going through rule @{text i} from left to right.
-
-\subsection{Assumption Naming}
-\label{sec:assm-naming}
-
-In any induction, \isacom{case}~@{text name} sets up a list of assumptions
-also called @{text name}, which is subdivided into three parts:
-\begin{description}
-\item[@{text name.IH}]\index{IH@@{text".IH"}} contains the induction hypotheses.
-\item[@{text name.hyps}]\index{hyps@@{text".hyps"}} contains all the other hypotheses of this case in the
-induction rule. For rule inductions these are the hypotheses of rule
-@{text name}, for structural inductions these are empty.
-\item[@{text name.prems}]\index{prems@@{text".prems"}} contains the (suitably instantiated) premises
-of the statement being proved, i.e. the @{text A\<^sub>i} when
-proving @{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"}.
-\end{description}
-\begin{warn}
-Proof method @{text induct} differs from @{text induction}
-only in this naming policy: @{text induct} does not distinguish
-@{text IH} from @{text hyps} but subsumes @{text IH} under @{text hyps}.
-\end{warn}
-
-More complicated inductive proofs than the ones we have seen so far
-often need to refer to specific assumptions---just @{text name} or even
-@{text name.prems} and @{text name.IH} can be too unspecific.
-This is where the indexing of fact lists comes in handy, e.g.\
-@{text"name.IH(2)"} or @{text"name.prems(1-2)"}.
-
-\subsection{Rule Inversion}
-\label{sec:rule-inversion}
-\index{rule inversion|(}
-
-Rule inversion is case analysis of which rule could have been used to
-derive some fact. The name \conceptnoidx{rule inversion} emphasizes that we are
-reasoning backwards: by which rules could some given fact have been proved?
-For the inductive definition of @{const ev}, rule inversion can be summarized
-like this:
-@{prop[display]"ev n \<Longrightarrow> n = 0 \<or> (EX k. n = Suc(Suc k) \<and> ev k)"}
-The realisation in Isabelle is a case analysis.
-A simple example is the proof that @{prop"ev n \<Longrightarrow> ev (n - 2)"}. We
-already went through the details informally in \autoref{sec:Logic:even}. This
-is the Isar proof:
-*}
-(*<*)
-notepad
-begin fix n
-(*>*)
-  assume "ev n"
-  from this have "ev(n - 2)"
-  proof cases
-    case ev0 thus "ev(n - 2)" by (simp add: ev.ev0)
-  next
-    case (evSS k) thus "ev(n - 2)" by (simp add: ev.evSS)
-  qed
-(*<*)
-end
-(*>*)
-
-text{* The key point here is that a case analysis over some inductively
-defined predicate is triggered by piping the given fact
-(here: \isacom{from}~@{text this}) into a proof by @{text cases}.
-Let us examine the assumptions available in each case. In case @{text ev0}
-we have @{text"n = 0"} and in case @{text evSS} we have @{prop"n = Suc(Suc k)"}
-and @{prop"ev k"}. In each case the assumptions are available under the name
-of the case; there is no fine grained naming schema like for induction.
-
-Sometimes some rules could not have been used to derive the given fact
-because constructors clash. As an extreme example consider
-rule inversion applied to @{prop"ev(Suc 0)"}: neither rule @{text ev0} nor
-rule @{text evSS} can yield @{prop"ev(Suc 0)"} because @{text"Suc 0"} unifies
-neither with @{text 0} nor with @{term"Suc(Suc n)"}. Impossible cases do not
-have to be proved. Hence we can prove anything from @{prop"ev(Suc 0)"}:
-*}
-(*<*)
-notepad begin fix P
-(*>*)
-  assume "ev(Suc 0)" then have P by cases
-(*<*)
-end
-(*>*)
-
-text{* That is, @{prop"ev(Suc 0)"} is simply not provable: *}
-
-lemma "\<not> ev(Suc 0)"
-proof
-  assume "ev(Suc 0)" then show False by cases
-qed
-
-text{* Normally not all cases will be impossible. As a simple exercise,
-prove that \mbox{@{prop"\<not> ev(Suc(Suc(Suc 0)))"}.}
-
-\subsection{Advanced Rule Induction}
-\label{sec:advanced-rule-induction}
-
-So far, rule induction was always applied to goals of the form @{text"I x y z \<Longrightarrow> \<dots>"}
-where @{text I} is some inductively defined predicate and @{text x}, @{text y}, @{text z}
-are variables. In some rare situations one needs to deal with an assumption where
-not all arguments @{text r}, @{text s}, @{text t} are variables:
-\begin{isabelle}
-\isacom{lemma} @{text[source]"I r s t \<Longrightarrow> \<dots>"}
-\end{isabelle}
-Applying the standard form of
-rule induction in such a situation will lead to strange and typically unprovable goals.
-We can easily reduce this situation to the standard one by introducing
-new variables @{text x}, @{text y}, @{text z} and reformulating the goal like this:
-\begin{isabelle}
-\isacom{lemma} @{text[source]"I x y z \<Longrightarrow> x = r \<Longrightarrow> y = s \<Longrightarrow> z = t \<Longrightarrow> \<dots>"}
-\end{isabelle}
-Standard rule induction will worke fine now, provided the free variables in
-@{text r}, @{text s}, @{text t} are generalized via @{text"arbitrary"}.
-
-However, induction can do the above transformation for us, behind the curtains, so we never
-need to see the expanded version of the lemma. This is what we need to write:
-\begin{isabelle}
-\isacom{lemma} @{text[source]"I r s t \<Longrightarrow> \<dots>"}\isanewline
-\isacom{proof}@{text"(induction \"r\" \"s\" \"t\" arbitrary: \<dots> rule: I.induct)"}\index{inductionrule@@{text"induction ... rule:"}}\index{arbitrary@@{text"arbitrary:"}}
-\end{isabelle}
-Just like for rule inversion, cases that are impossible because of constructor clashes
-will not show up at all. Here is a concrete example: *}
-
-lemma "ev (Suc m) \<Longrightarrow> \<not> ev m"
-proof(induction "Suc m" arbitrary: m rule: ev.induct)
-  fix n assume IH: "\<And>m. n = Suc m \<Longrightarrow> \<not> ev m"
-  show "\<not> ev (Suc n)"
-  proof --"contradiction"
-    assume "ev(Suc n)"
-    thus False
-    proof cases --"rule inversion"
-      fix k assume "n = Suc k" "ev k"
-      thus False using IH by auto
-    qed
-  qed
-qed
-
-text{*
-Remarks:
-\begin{itemize}
-\item 
-Instead of the \isacom{case} and @{text ?case} magic we have spelled all formulas out.
-This is merely for greater clarity.
-\item
-We only need to deal with one case because the @{thm[source] ev0} case is impossible.
-\item
-The form of the @{text IH} shows us that internally the lemma was expanded as explained
-above: \noquotes{@{prop[source]"ev x \<Longrightarrow> x = Suc m \<Longrightarrow> \<not> ev m"}}.
-\item
-The goal @{prop"\<not> ev (Suc n)"} may suprise. The expanded version of the lemma
-would suggest that we have a \isacom{fix} @{text m} \isacom{assume} @{prop"Suc(Suc n) = Suc m"}
-and need to show @{prop"\<not> ev m"}. What happened is that Isabelle immediately
-simplified @{prop"Suc(Suc n) = Suc m"} to @{prop"Suc n = m"} and could then eliminate
-@{text m}. Beware of such nice surprises with this advanced form of induction.
-\end{itemize}
-\begin{warn}
-This advanced form of induction does not support the @{text IH}
-naming schema explained in \autoref{sec:assm-naming}:
-the induction hypotheses are instead found under the name @{text hyps}, like for the simpler
-@{text induct} method.
-\end{warn}
-\index{induction|)}
-\index{cases@@{text"cases"}|)}
-\index{case@\isacom{case}|)}
-\index{case?@@{text"?case"}|)}
-\index{rule induction|)}
-\index{rule inversion|)}
-
-\subsection*{Exercises}
-
-
-\exercise
-Give a structured proof by rule inversion:
-*}
-
-lemma assumes a: "ev(Suc(Suc n))" shows "ev n"
-(*<*)oops(*>*)
-
-text{*
-\endexercise
-
-\begin{exercise}
-Give a structured proof of @{prop "\<not> ev(Suc(Suc(Suc 0)))"}
-by rule inversions. If there are no cases to be proved you can close
-a proof immediateley with \isacom{qed}.
-\end{exercise}
-
-\begin{exercise}
-Recall predicate @{text star} from \autoref{sec:star} and @{text iter}
-from Exercise~\ref{exe:iter}. Prove @{prop "iter r n x y \<Longrightarrow> star r x y"}
-in a structured style, do not just sledgehammer each case of the
-required induction.
-\end{exercise}
-
-\begin{exercise}
-Define a recursive function @{text "elems ::"} @{typ"'a list \<Rightarrow> 'a set"}
-and prove @{prop "x : elems xs \<Longrightarrow> \<exists>ys zs. xs = ys @ x # zs \<and> x \<notin> elems ys"}.
-\end{exercise}
-*}
-
-(*<*)
-end
-(*>*)
--- a/src/Doc/ProgProve/LaTeXsugar.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-(*  Title:      HOL/Library/LaTeXsugar.thy
-    Author:     Gerwin Klein, Tobias Nipkow, Norbert Schirmer
-    Copyright   2005 NICTA and TUM
-*)
-
-(*<*)
-theory LaTeXsugar
-imports Main
-begin
-
-(* DUMMY *)
-consts DUMMY :: 'a ("\<^raw:\_>")
-
-(* THEOREMS *)
-notation (Rule output)
-  Pure.imp  ("\<^raw:\mbox{}\inferrule{\mbox{>_\<^raw:}}>\<^raw:{\mbox{>_\<^raw:}}>")
-
-syntax (Rule output)
-  "_bigimpl" :: "asms \<Rightarrow> prop \<Rightarrow> prop"
-  ("\<^raw:\mbox{}\inferrule{>_\<^raw:}>\<^raw:{\mbox{>_\<^raw:}}>")
-
-  "_asms" :: "prop \<Rightarrow> asms \<Rightarrow> asms" 
-  ("\<^raw:\mbox{>_\<^raw:}\\>/ _")
-
-  "_asm" :: "prop \<Rightarrow> asms" ("\<^raw:\mbox{>_\<^raw:}>")
-
-notation (Axiom output)
-  "Trueprop"  ("\<^raw:\mbox{}\inferrule{\mbox{}}{\mbox{>_\<^raw:}}>")
-
-notation (IfThen output)
-  Pure.imp  ("\<^raw:{\normalsize{}>If\<^raw:\,}> _/ \<^raw:{\normalsize \,>then\<^raw:\,}>/ _.")
-syntax (IfThen output)
-  "_bigimpl" :: "asms \<Rightarrow> prop \<Rightarrow> prop"
-  ("\<^raw:{\normalsize{}>If\<^raw:\,}> _ /\<^raw:{\normalsize \,>then\<^raw:\,}>/ _.")
-  "_asms" :: "prop \<Rightarrow> asms \<Rightarrow> asms" ("\<^raw:\mbox{>_\<^raw:}> /\<^raw:{\normalsize \,>and\<^raw:\,}>/ _")
-  "_asm" :: "prop \<Rightarrow> asms" ("\<^raw:\mbox{>_\<^raw:}>")
-
-notation (IfThenNoBox output)
-  Pure.imp  ("\<^raw:{\normalsize{}>If\<^raw:\,}> _/ \<^raw:{\normalsize \,>then\<^raw:\,}>/ _.")
-syntax (IfThenNoBox output)
-  "_bigimpl" :: "asms \<Rightarrow> prop \<Rightarrow> prop"
-  ("\<^raw:{\normalsize{}>If\<^raw:\,}> _ /\<^raw:{\normalsize \,>then\<^raw:\,}>/ _.")
-  "_asms" :: "prop \<Rightarrow> asms \<Rightarrow> asms" ("_ /\<^raw:{\normalsize \,>and\<^raw:\,}>/ _")
-  "_asm" :: "prop \<Rightarrow> asms" ("_")
-
-setup{*
-  let
-    fun pretty ctxt c =
-      let val tc = Proof_Context.read_const {proper = true, strict = false} ctxt c
-      in Pretty.block [Thy_Output.pretty_term ctxt tc, Pretty.str " ::",
-            Pretty.brk 1, Syntax.pretty_typ ctxt (fastype_of tc)]
-      end
-  in
-    Thy_Output.antiquotation @{binding "const_typ"}
-     (Scan.lift Args.name_inner_syntax)
-       (fn {source = src, context = ctxt, ...} => fn arg =>
-          Thy_Output.output ctxt
-            (Thy_Output.maybe_pretty_source pretty ctxt src [arg]))
-  end;
-*}
-
-end
-(*>*)
\ No newline at end of file
--- a/src/Doc/ProgProve/Logic.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,883 +0,0 @@
-(*<*)
-theory Logic
-imports LaTeXsugar
-begin
-(*>*)
-text{*
-\vspace{-5ex}
-\section{Formulas}
-
-The core syntax of formulas (\textit{form} below)
-provides the standard logical constructs, in decreasing order of precedence:
-\[
-\begin{array}{rcl}
-
-\mathit{form} & ::= &
-  @{text"(form)"} ~\mid~
-  @{const True} ~\mid~
-  @{const False} ~\mid~
-  @{prop "term = term"}\\
- &\mid& @{prop"\<not> form"}\index{$HOL4@\isasymnot} ~\mid~
-  @{prop "form \<and> form"}\index{$HOL0@\isasymand} ~\mid~
-  @{prop "form \<or> form"}\index{$HOL1@\isasymor} ~\mid~
-  @{prop "form \<longrightarrow> form"}\index{$HOL2@\isasymlongrightarrow}\\
- &\mid& @{prop"\<forall>x. form"}\index{$HOL6@\isasymforall} ~\mid~  @{prop"\<exists>x. form"}\index{$HOL7@\isasymexists}
-\end{array}
-\]
-Terms are the ones we have seen all along, built from constants, variables,
-function application and @{text"\<lambda>"}-abstraction, including all the syntactic
-sugar like infix symbols, @{text "if"}, @{text "case"} etc.
-\begin{warn}
-Remember that formulas are simply terms of type @{text bool}. Hence
-@{text "="} also works for formulas. Beware that @{text"="} has a higher
-precedence than the other logical operators. Hence @{prop"s = t \<and> A"} means
-@{text"(s = t) \<and> A"}, and @{prop"A\<and>B = B\<and>A"} means @{text"A \<and> (B = B) \<and> A"}.
-Logical equivalence can also be written with
-@{text "\<longleftrightarrow>"} instead of @{text"="}, where @{text"\<longleftrightarrow>"} has the same low
-precedence as @{text"\<longrightarrow>"}. Hence @{text"A \<and> B \<longleftrightarrow> B \<and> A"} really means
-@{text"(A \<and> B) \<longleftrightarrow> (B \<and> A)"}.
-\end{warn}
-\begin{warn}
-Quantifiers need to be enclosed in parentheses if they are nested within
-other constructs (just like @{text "if"}, @{text case} and @{text let}).
-\end{warn}
-The most frequent logical symbols and their ASCII representations are shown
-in Fig.~\ref{fig:log-symbols}.
-\begin{figure}
-\begin{center}
-\begin{tabular}{l@ {\qquad}l@ {\qquad}l}
-@{text "\<forall>"} & \xsymbol{forall} & \texttt{ALL}\\
-@{text "\<exists>"} & \xsymbol{exists} & \texttt{EX}\\
-@{text "\<lambda>"} & \xsymbol{lambda} & \texttt{\%}\\
-@{text "\<longrightarrow>"} & \texttt{-{}->}\\
-@{text "\<longleftrightarrow>"} & \texttt{<->}\\
-@{text "\<and>"} & \texttt{/\char`\\} & \texttt{\&}\\
-@{text "\<or>"} & \texttt{\char`\\/} & \texttt{|}\\
-@{text "\<not>"} & \xsymbol{not} & \texttt{\char`~}\\
-@{text "\<noteq>"} & \xsymbol{noteq} & \texttt{\char`~=}
-\end{tabular}
-\end{center}
-\caption{Logical symbols and their ASCII forms}
-\label{fig:log-symbols}
-\end{figure}
-The first column shows the symbols, the other columns ASCII representations.
-The \texttt{\char`\\}\texttt{<...>} form is always converted into the symbolic form
-by the Isabelle interfaces, the treatment of the other ASCII forms
-depends on the interface. The ASCII forms \texttt{/\char`\\} and
-\texttt{\char`\\/}
-are special in that they are merely keyboard shortcuts for the interface and
-not logical symbols by themselves.
-\begin{warn}
-The implication @{text"\<Longrightarrow>"} is part of the Isabelle framework. It structures
-theorems and proof states, separating assumptions from conclusions.
-The implication @{text"\<longrightarrow>"} is part of the logic HOL and can occur inside the
-formulas that make up the assumptions and conclusion.
-Theorems should be of the form @{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"},
-not @{text"A\<^sub>1 \<and> \<dots> \<and> A\<^sub>n \<longrightarrow> A"}. Both are logically equivalent
-but the first one works better when using the theorem in further proofs.
-\end{warn}
-
-\section{Sets}
-\label{sec:Sets}
-
-Sets of elements of type @{typ 'a} have type @{typ"'a set"}\index{set@@{text set}}.
-They can be finite or infinite. Sets come with the usual notation:
-\begin{itemize}
-\item \indexed{@{term"{}"}}{$IMP042},\quad @{text"{e\<^sub>1,\<dots>,e\<^sub>n}"}
-\item @{prop"e \<in> A"}\index{$HOLSet0@\isasymin},\quad @{prop"A \<subseteq> B"}\index{$HOLSet2@\isasymsubseteq}
-\item @{term"A \<union> B"}\index{$HOLSet4@\isasymunion},\quad @{term"A \<inter> B"}\index{$HOLSet5@\isasyminter},\quad @{term"A - B"},\quad @{term"-A"}
-\end{itemize}
-(where @{term"A-B"} and @{text"-A"} are set difference and complement)
-and much more. @{const UNIV} is the set of all elements of some type.
-Set comprehension\index{set comprehension} is written
-@{term"{x. P}"}\index{$IMP042@@{term"{x. P}"}} rather than @{text"{x | P}"}.
-\begin{warn}
-In @{term"{x. P}"} the @{text x} must be a variable. Set comprehension
-involving a proper term @{text t} must be written
-\noquotes{@{term[source] "{t | x y. P}"}}\index{$IMP042@@{text"{t |x. P}"}},
-where @{text "x y"} are those free variables in @{text t}
-that occur in @{text P}.
-This is just a shorthand for @{term"{v. EX x y. v = t \<and> P}"}, where
-@{text v} is a new variable. For example, @{term"{x+y|x. x \<in> A}"}
-is short for \noquotes{@{term[source]"{v. \<exists>x. v = x+y \<and> x \<in> A}"}}.
-\end{warn}
-
-Here are the ASCII representations of the mathematical symbols:
-\begin{center}
-\begin{tabular}{l@ {\quad}l@ {\quad}l}
-@{text "\<in>"} & \texttt{\char`\\\char`\<in>} & \texttt{:}\\
-@{text "\<subseteq>"} & \texttt{\char`\\\char`\<subseteq>} & \texttt{<=}\\
-@{text "\<union>"} & \texttt{\char`\\\char`\<union>} & \texttt{Un}\\
-@{text "\<inter>"} & \texttt{\char`\\\char`\<inter>} & \texttt{Int}
-\end{tabular}
-\end{center}
-Sets also allow bounded quantifications @{prop"ALL x : A. P"} and
-@{prop"EX x : A. P"}.
-
-For the more ambitious, there are also @{text"\<Union>"}\index{$HOLSet6@\isasymUnion}
-and @{text"\<Inter>"}\index{$HOLSet7@\isasymInter}:
-\begin{center}
-@{thm Union_eq} \qquad @{thm Inter_eq}
-\end{center}
-The ASCII forms of @{text"\<Union>"} are \texttt{\char`\\\char`\<Union>} and \texttt{Union},
-those of @{text"\<Inter>"} are \texttt{\char`\\\char`\<Inter>} and \texttt{Inter}.
-There are also indexed unions and intersections:
-\begin{center}
-@{thm UNION_eq} \\ @{thm INTER_eq}
-\end{center}
-The ASCII forms are \ \texttt{UN x:A.~B} \ and \ \texttt{INT x:A. B} \
-where \texttt{x} may occur in \texttt{B}.
-If \texttt{A} is \texttt{UNIV} you can just write \ \texttt{UN x.~B} \ and \ \texttt{INT x. B}.
-
-Some other frequently useful functions on sets are the following:
-\begin{center}
-\begin{tabular}{l@ {\quad}l}
-@{const_typ set}\index{set@@{const set}} & converts a list to the set of its elements\\
-@{const_typ finite}\index{finite@@{const finite}} & is true iff its argument is finite\\
-@{const_typ card}\index{card@@{const card}} & is the cardinality of a finite set\\
- & and is @{text 0} for all infinite sets\\
-@{thm image_def}\index{$IMP042@@{term"f ` A"}} & is the image of a function over a set
-\end{tabular}
-\end{center}
-See \cite{Nipkow-Main} for the wealth of further predefined functions in theory
-@{theory Main}.
-
-
-\subsection*{Exercises}
-
-\exercise
-Start from the data type of binary trees defined earlier:
-*}
-
-datatype 'a tree = Tip | Node "'a tree" 'a "'a tree"
-
-text{*
-Define a function @{text "set ::"} @{typ "'a tree \<Rightarrow> 'a set"}
-that returns the elements in a tree and a function
-@{text "ord ::"} @{typ "int tree \<Rightarrow> bool"}
-the tests if an @{typ "int tree"} is ordered.
-
-Define a function @{text ins} that inserts an element into an ordered @{typ "int tree"}
-while maintaining the order of the tree. If the element is already in the tree, the
-same tree should be returned. Prove correctness of @{text ins}:
-@{prop "set(ins x t) = {x} \<union> set t"} and @{prop "ord t \<Longrightarrow> ord(ins i t)"}.
-\endexercise
-
-
-\section{Proof Automation}
-
-So far we have only seen @{text simp} and \indexed{@{text auto}}{auto}: Both perform
-rewriting, both can also prove linear arithmetic facts (no multiplication),
-and @{text auto} is also able to prove simple logical or set-theoretic goals:
-*}
-
-lemma "\<forall>x. \<exists>y. x = y"
-by auto
-
-lemma "A \<subseteq> B \<inter> C \<Longrightarrow> A \<subseteq> B \<union> C"
-by auto
-
-text{* where
-\begin{quote}
-\isacom{by} \textit{proof-method}
-\end{quote}
-is short for
-\begin{quote}
-\isacom{apply} \textit{proof-method}\\
-\isacom{done}
-\end{quote}
-The key characteristics of both @{text simp} and @{text auto} are
-\begin{itemize}
-\item They show you were they got stuck, giving you an idea how to continue.
-\item They perform the obvious steps but are highly incomplete.
-\end{itemize}
-A proof method is \conceptnoidx{complete} if it can prove all true formulas.
-There is no complete proof method for HOL, not even in theory.
-Hence all our proof methods only differ in how incomplete they are.
-
-A proof method that is still incomplete but tries harder than @{text auto} is
-\indexed{@{text fastforce}}{fastforce}.  It either succeeds or fails, it acts on the first
-subgoal only, and it can be modified just like @{text auto}, e.g.\
-with @{text "simp add"}. Here is a typical example of what @{text fastforce}
-can do:
-*}
-
-lemma "\<lbrakk> \<forall>xs \<in> A. \<exists>ys. xs = ys @ ys;  us \<in> A \<rbrakk>
-   \<Longrightarrow> \<exists>n. length us = n+n"
-by fastforce
-
-text{* This lemma is out of reach for @{text auto} because of the
-quantifiers.  Even @{text fastforce} fails when the quantifier structure
-becomes more complicated. In a few cases, its slow version @{text force}
-succeeds where @{text fastforce} fails.
-
-The method of choice for complex logical goals is \indexed{@{text blast}}{blast}. In the
-following example, @{text T} and @{text A} are two binary predicates. It
-is shown that if @{text T} is total, @{text A} is antisymmetric and @{text T} is
-a subset of @{text A}, then @{text A} is a subset of @{text T}:
-*}
-
-lemma
-  "\<lbrakk> \<forall>x y. T x y \<or> T y x;
-     \<forall>x y. A x y \<and> A y x \<longrightarrow> x = y;
-     \<forall>x y. T x y \<longrightarrow> A x y \<rbrakk>
-   \<Longrightarrow> \<forall>x y. A x y \<longrightarrow> T x y"
-by blast
-
-text{*
-We leave it to the reader to figure out why this lemma is true.
-Method @{text blast}
-\begin{itemize}
-\item is (in principle) a complete proof procedure for first-order formulas,
-  a fragment of HOL. In practice there is a search bound.
-\item does no rewriting and knows very little about equality.
-\item covers logic, sets and relations.
-\item either succeeds or fails.
-\end{itemize}
-Because of its strength in logic and sets and its weakness in equality reasoning, it complements the earlier proof methods.
-
-
-\subsection{\concept{Sledgehammer}}
-
-Command \isacom{sledgehammer} calls a number of external automatic
-theorem provers (ATPs) that run for up to 30 seconds searching for a
-proof. Some of these ATPs are part of the Isabelle installation, others are
-queried over the internet. If successful, a proof command is generated and can
-be inserted into your proof.  The biggest win of \isacom{sledgehammer} is
-that it will take into account the whole lemma library and you do not need to
-feed in any lemma explicitly. For example,*}
-
-lemma "\<lbrakk> xs @ ys = ys @ xs;  length xs = length ys \<rbrakk> \<Longrightarrow> xs = ys"
-
-txt{* cannot be solved by any of the standard proof methods, but
-\isacom{sledgehammer} finds the following proof: *}
-
-by (metis append_eq_conv_conj)
-
-text{* We do not explain how the proof was found but what this command
-means. For a start, Isabelle does not trust external tools (and in particular
-not the translations from Isabelle's logic to those tools!)
-and insists on a proof that it can check. This is what \indexed{@{text metis}}{metis} does.
-It is given a list of lemmas and tries to find a proof just using those lemmas
-(and pure logic). In contrast to @{text simp} and friends that know a lot of
-lemmas already, using @{text metis} manually is tedious because one has
-to find all the relevant lemmas first. But that is precisely what
-\isacom{sledgehammer} does for us.
-In this case lemma @{thm[source]append_eq_conv_conj} alone suffices:
-@{thm[display] append_eq_conv_conj}
-We leave it to the reader to figure out why this lemma suffices to prove
-the above lemma, even without any knowledge of what the functions @{const take}
-and @{const drop} do. Keep in mind that the variables in the two lemmas
-are independent of each other, despite the same names, and that you can
-substitute arbitrary values for the free variables in a lemma.
-
-Just as for the other proof methods we have seen, there is no guarantee that
-\isacom{sledgehammer} will find a proof if it exists. Nor is
-\isacom{sledgehammer} superior to the other proof methods.  They are
-incomparable. Therefore it is recommended to apply @{text simp} or @{text
-auto} before invoking \isacom{sledgehammer} on what is left.
-
-\subsection{Arithmetic}
-
-By arithmetic formulas we mean formulas involving variables, numbers, @{text
-"+"}, @{text"-"}, @{text "="}, @{text "<"}, @{text "\<le>"} and the usual logical
-connectives @{text"\<not>"}, @{text"\<and>"}, @{text"\<or>"}, @{text"\<longrightarrow>"},
-@{text"\<longleftrightarrow>"}. Strictly speaking, this is known as \concept{linear arithmetic}
-because it does not involve multiplication, although multiplication with
-numbers, e.g.\ @{text"2*n"} is allowed. Such formulas can be proved by
-\indexed{@{text arith}}{arith}:
-*}
-
-lemma "\<lbrakk> (a::nat) \<le> x + b; 2*x < c \<rbrakk> \<Longrightarrow> 2*a + 1 \<le> 2*b + c"
-by arith
-
-text{* In fact, @{text auto} and @{text simp} can prove many linear
-arithmetic formulas already, like the one above, by calling a weak but fast
-version of @{text arith}. Hence it is usually not necessary to invoke
-@{text arith} explicitly.
-
-The above example involves natural numbers, but integers (type @{typ int})
-and real numbers (type @{text real}) are supported as well. As are a number
-of further operators like @{const min} and @{const max}. On @{typ nat} and
-@{typ int}, @{text arith} can even prove theorems with quantifiers in them,
-but we will not enlarge on that here.
-
-
-\subsection{Trying Them All}
-
-If you want to try all of the above automatic proof methods you simply type
-\begin{isabelle}
-\isacom{try}
-\end{isabelle}
-You can also add specific simplification and introduction rules:
-\begin{isabelle}
-\isacom{try} @{text"simp: \<dots> intro: \<dots>"}
-\end{isabelle}
-There is also a lightweight variant \isacom{try0} that does not call
-sledgehammer.
-
-\section{Single Step Proofs}
-
-Although automation is nice, it often fails, at least initially, and you need
-to find out why. When @{text fastforce} or @{text blast} simply fail, you have
-no clue why. At this point, the stepwise
-application of proof rules may be necessary. For example, if @{text blast}
-fails on @{prop"A \<and> B"}, you want to attack the two
-conjuncts @{text A} and @{text B} separately. This can
-be achieved by applying \emph{conjunction introduction}
-\[ @{thm[mode=Rule,show_question_marks]conjI}\ @{text conjI}
-\]
-to the proof state. We will now examine the details of this process.
-
-\subsection{Instantiating Unknowns}
-
-We had briefly mentioned earlier that after proving some theorem,
-Isabelle replaces all free variables @{text x} by so called \conceptidx{unknowns}{unknown}
-@{text "?x"}. We can see this clearly in rule @{thm[source] conjI}.
-These unknowns can later be instantiated explicitly or implicitly:
-\begin{itemize}
-\item By hand, using \indexed{@{text of}}{of}.
-The expression @{text"conjI[of \"a=b\" \"False\"]"}
-instantiates the unknowns in @{thm[source] conjI} from left to right with the
-two formulas @{text"a=b"} and @{text False}, yielding the rule
-@{thm[display,mode=Rule]conjI[of "a=b" False]}
-
-In general, @{text"th[of string\<^sub>1 \<dots> string\<^sub>n]"} instantiates
-the unknowns in the theorem @{text th} from left to right with the terms
-@{text string\<^sub>1} to @{text string\<^sub>n}.
-
-\item By unification. \conceptidx{Unification}{unification} is the process of making two
-terms syntactically equal by suitable instantiations of unknowns. For example,
-unifying @{text"?P \<and> ?Q"} with \mbox{@{prop"a=b \<and> False"}} instantiates
-@{text "?P"} with @{prop "a=b"} and @{text "?Q"} with @{prop False}.
-\end{itemize}
-We need not instantiate all unknowns. If we want to skip a particular one we
-can just write @{text"_"} instead, for example @{text "conjI[of _ \"False\"]"}.
-Unknowns can also be instantiated by name using \indexed{@{text "where"}}{where}, for example
-@{text "conjI[where ?P = \"a=b\""} \isacom{and} @{text"?Q = \"False\"]"}.
-
-
-\subsection{Rule Application}
-
-\conceptidx{Rule application}{rule application} means applying a rule backwards to a proof state.
-For example, applying rule @{thm[source]conjI} to a proof state
-\begin{quote}
-@{text"1.  \<dots>  \<Longrightarrow> A \<and> B"}
-\end{quote}
-results in two subgoals, one for each premise of @{thm[source]conjI}:
-\begin{quote}
-@{text"1.  \<dots>  \<Longrightarrow> A"}\\
-@{text"2.  \<dots>  \<Longrightarrow> B"}
-\end{quote}
-In general, the application of a rule @{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"}
-to a subgoal \mbox{@{text"\<dots> \<Longrightarrow> C"}} proceeds in two steps:
-\begin{enumerate}
-\item
-Unify @{text A} and @{text C}, thus instantiating the unknowns in the rule.
-\item
-Replace the subgoal @{text C} with @{text n} new subgoals @{text"A\<^sub>1"} to @{text"A\<^sub>n"}.
-\end{enumerate}
-This is the command to apply rule @{text xyz}:
-\begin{quote}
-\isacom{apply}@{text"(rule xyz)"}\index{rule@@{text rule}}
-\end{quote}
-This is also called \concept{backchaining} with rule @{text xyz}.
-
-\subsection{Introduction Rules}
-
-Conjunction introduction (@{thm[source] conjI}) is one example of a whole
-class of rules known as \conceptidx{introduction rules}{introduction rule}. They explain under which
-premises some logical construct can be introduced. Here are some further
-useful introduction rules:
-\[
-\inferrule*[right=\mbox{@{text impI}}]{\mbox{@{text"?P \<Longrightarrow> ?Q"}}}{\mbox{@{text"?P \<longrightarrow> ?Q"}}}
-\qquad
-\inferrule*[right=\mbox{@{text allI}}]{\mbox{@{text"\<And>x. ?P x"}}}{\mbox{@{text"\<forall>x. ?P x"}}}
-\]
-\[
-\inferrule*[right=\mbox{@{text iffI}}]{\mbox{@{text"?P \<Longrightarrow> ?Q"}} \\ \mbox{@{text"?Q \<Longrightarrow> ?P"}}}
-  {\mbox{@{text"?P = ?Q"}}}
-\]
-These rules are part of the logical system of \concept{natural deduction}
-(e.g.\ \cite{HuthRyan}). Although we intentionally de-emphasize the basic rules
-of logic in favour of automatic proof methods that allow you to take bigger
-steps, these rules are helpful in locating where and why automation fails.
-When applied backwards, these rules decompose the goal:
-\begin{itemize}
-\item @{thm[source] conjI} and @{thm[source]iffI} split the goal into two subgoals,
-\item @{thm[source] impI} moves the left-hand side of a HOL implication into the list of assumptions,
-\item and @{thm[source] allI} removes a @{text "\<forall>"} by turning the quantified variable into a fixed local variable of the subgoal.
-\end{itemize}
-Isabelle knows about these and a number of other introduction rules.
-The command
-\begin{quote}
-\isacom{apply} @{text rule}\index{rule@@{text rule}}
-\end{quote}
-automatically selects the appropriate rule for the current subgoal.
-
-You can also turn your own theorems into introduction rules by giving them
-the \indexed{@{text"intro"}}{intro} attribute, analogous to the @{text simp} attribute.  In
-that case @{text blast}, @{text fastforce} and (to a limited extent) @{text
-auto} will automatically backchain with those theorems. The @{text intro}
-attribute should be used with care because it increases the search space and
-can lead to nontermination.  Sometimes it is better to use it only in
-specific calls of @{text blast} and friends. For example,
-@{thm[source] le_trans}, transitivity of @{text"\<le>"} on type @{typ nat},
-is not an introduction rule by default because of the disastrous effect
-on the search space, but can be useful in specific situations:
-*}
-
-lemma "\<lbrakk> (a::nat) \<le> b; b \<le> c; c \<le> d; d \<le> e \<rbrakk> \<Longrightarrow> a \<le> e"
-by(blast intro: le_trans)
-
-text{*
-Of course this is just an example and could be proved by @{text arith}, too.
-
-\subsection{Forward Proof}
-\label{sec:forward-proof}
-
-Forward proof means deriving new theorems from old theorems. We have already
-seen a very simple form of forward proof: the @{text of} operator for
-instantiating unknowns in a theorem. The big brother of @{text of} is
-\indexed{@{text OF}}{OF} for applying one theorem to others. Given a theorem @{prop"A \<Longrightarrow> B"} called
-@{text r} and a theorem @{text A'} called @{text r'}, the theorem @{text
-"r[OF r']"} is the result of applying @{text r} to @{text r'}, where @{text
-r} should be viewed as a function taking a theorem @{text A} and returning
-@{text B}.  More precisely, @{text A} and @{text A'} are unified, thus
-instantiating the unknowns in @{text B}, and the result is the instantiated
-@{text B}. Of course, unification may also fail.
-\begin{warn}
-Application of rules to other rules operates in the forward direction: from
-the premises to the conclusion of the rule; application of rules to proof
-states operates in the backward direction, from the conclusion to the
-premises.
-\end{warn}
-
-In general @{text r} can be of the form @{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"}
-and there can be multiple argument theorems @{text r\<^sub>1} to @{text r\<^sub>m}
-(with @{text"m \<le> n"}), in which case @{text "r[OF r\<^sub>1 \<dots> r\<^sub>m]"} is obtained
-by unifying and thus proving @{text "A\<^sub>i"} with @{text "r\<^sub>i"}, @{text"i = 1\<dots>m"}.
-Here is an example, where @{thm[source]refl} is the theorem
-@{thm[show_question_marks] refl}:
-*}
-
-thm conjI[OF refl[of "a"] refl[of "b"]]
-
-text{* yields the theorem @{thm conjI[OF refl[of "a"] refl[of "b"]]}.
-The command \isacom{thm} merely displays the result.
-
-Forward reasoning also makes sense in connection with proof states.
-Therefore @{text blast}, @{text fastforce} and @{text auto} support a modifier
-@{text dest} which instructs the proof method to use certain rules in a
-forward fashion. If @{text r} is of the form \mbox{@{text "A \<Longrightarrow> B"}}, the modifier
-\mbox{@{text"dest: r"}}\index{dest@@{text"dest:"}}
-allows proof search to reason forward with @{text r}, i.e.\
-to replace an assumption @{text A'}, where @{text A'} unifies with @{text A},
-with the correspondingly instantiated @{text B}. For example, @{thm[source,show_question_marks] Suc_leD} is the theorem \mbox{@{thm Suc_leD}}, which works well for forward reasoning:
-*}
-
-lemma "Suc(Suc(Suc a)) \<le> b \<Longrightarrow> a \<le> b"
-by(blast dest: Suc_leD)
-
-text{* In this particular example we could have backchained with
-@{thm[source] Suc_leD}, too, but because the premise is more complicated than the conclusion this can easily lead to nontermination.
-
-%\subsection{Finding Theorems}
-%
-%Command \isacom{find{\isacharunderscorekeyword}theorems} searches for specific theorems in the current
-%theory. Search criteria include pattern matching on terms and on names.
-%For details see the Isabelle/Isar Reference Manual~\cite{IsarRef}.
-%\bigskip
-
-\begin{warn}
-To ease readability we will drop the question marks
-in front of unknowns from now on.
-\end{warn}
-
-
-\section{Inductive Definitions}
-\label{sec:inductive-defs}\index{inductive definition|(}
-
-Inductive definitions are the third important definition facility, after
-datatypes and recursive function.
-\ifsem
-In fact, they are the key construct in the
-definition of operational semantics in the second part of the book.
-\fi
-
-\subsection{An Example: Even Numbers}
-\label{sec:Logic:even}
-
-Here is a simple example of an inductively defined predicate:
-\begin{itemize}
-\item 0 is even
-\item If $n$ is even, so is $n+2$.
-\end{itemize}
-The operative word ``inductive'' means that these are the only even numbers.
-In Isabelle we give the two rules the names @{text ev0} and @{text evSS}
-and write
-*}
-
-inductive ev :: "nat \<Rightarrow> bool" where
-ev0:    "ev 0" |
-evSS:  (*<*)"ev n \<Longrightarrow> ev (Suc(Suc n))"(*>*)
-text_raw{* @{prop[source]"ev n \<Longrightarrow> ev (n + 2)"} *}
-
-text{* To get used to inductive definitions, we will first prove a few
-properties of @{const ev} informally before we descend to the Isabelle level.
-
-How do we prove that some number is even, e.g.\ @{prop "ev 4"}? Simply by combining the defining rules for @{const ev}:
-\begin{quote}
-@{text "ev 0 \<Longrightarrow> ev (0 + 2) \<Longrightarrow> ev((0 + 2) + 2) = ev 4"}
-\end{quote}
-
-\subsubsection{Rule Induction}\index{rule induction|(}
-
-Showing that all even numbers have some property is more complicated.  For
-example, let us prove that the inductive definition of even numbers agrees
-with the following recursive one:*}
-
-fun even :: "nat \<Rightarrow> bool" where
-"even 0 = True" |
-"even (Suc 0) = False" |
-"even (Suc(Suc n)) = even n"
-
-text{* We prove @{prop"ev m \<Longrightarrow> even m"}.  That is, we
-assume @{prop"ev m"} and by induction on the form of its derivation
-prove @{prop"even m"}. There are two cases corresponding to the two rules
-for @{const ev}:
-\begin{description}
-\item[Case @{thm[source]ev0}:]
- @{prop"ev m"} was derived by rule @{prop "ev 0"}: \\
- @{text"\<Longrightarrow>"} @{prop"m=(0::nat)"} @{text"\<Longrightarrow>"} @{text "even m = even 0 = True"}
-\item[Case @{thm[source]evSS}:]
- @{prop"ev m"} was derived by rule @{prop "ev n \<Longrightarrow> ev(n+2)"}: \\
-@{text"\<Longrightarrow>"} @{prop"m=n+(2::nat)"} and by induction hypothesis @{prop"even n"}\\
-@{text"\<Longrightarrow>"} @{text"even m = even(n + 2) = even n = True"}
-\end{description}
-
-What we have just seen is a special case of \concept{rule induction}.
-Rule induction applies to propositions of this form
-\begin{quote}
-@{prop "ev n \<Longrightarrow> P n"}
-\end{quote}
-That is, we want to prove a property @{prop"P n"}
-for all even @{text n}. But if we assume @{prop"ev n"}, then there must be
-some derivation of this assumption using the two defining rules for
-@{const ev}. That is, we must prove
-\begin{description}
-\item[Case @{thm[source]ev0}:] @{prop"P(0::nat)"}
-\item[Case @{thm[source]evSS}:] @{prop"\<lbrakk> ev n; P n \<rbrakk> \<Longrightarrow> P(n + 2::nat)"}
-\end{description}
-The corresponding rule is called @{thm[source] ev.induct} and looks like this:
-\[
-\inferrule{
-\mbox{@{thm (prem 1) ev.induct[of "n"]}}\\
-\mbox{@{thm (prem 2) ev.induct}}\\
-\mbox{@{prop"!!n. \<lbrakk> ev n; P n \<rbrakk> \<Longrightarrow> P(n+2)"}}}
-{\mbox{@{thm (concl) ev.induct[of "n"]}}}
-\]
-The first premise @{prop"ev n"} enforces that this rule can only be applied
-in situations where we know that @{text n} is even.
-
-Note that in the induction step we may not just assume @{prop"P n"} but also
-\mbox{@{prop"ev n"}}, which is simply the premise of rule @{thm[source]
-evSS}.  Here is an example where the local assumption @{prop"ev n"} comes in
-handy: we prove @{prop"ev m \<Longrightarrow> ev(m - 2)"} by induction on @{prop"ev m"}.
-Case @{thm[source]ev0} requires us to prove @{prop"ev(0 - 2)"}, which follows
-from @{prop"ev 0"} because @{prop"0 - 2 = (0::nat)"} on type @{typ nat}. In
-case @{thm[source]evSS} we have \mbox{@{prop"m = n+(2::nat)"}} and may assume
-@{prop"ev n"}, which implies @{prop"ev (m - 2)"} because @{text"m - 2 = (n +
-2) - 2 = n"}. We did not need the induction hypothesis at all for this proof,
-it is just a case analysis of which rule was used, but having @{prop"ev
-n"} at our disposal in case @{thm[source]evSS} was essential.
-This case analysis of rules is also called ``rule inversion''
-and is discussed in more detail in \autoref{ch:Isar}.
-
-\subsubsection{In Isabelle}
-
-Let us now recast the above informal proofs in Isabelle. For a start,
-we use @{const Suc} terms instead of numerals in rule @{thm[source]evSS}:
-@{thm[display] evSS}
-This avoids the difficulty of unifying @{text"n+2"} with some numeral,
-which is not automatic.
-
-The simplest way to prove @{prop"ev(Suc(Suc(Suc(Suc 0))))"} is in a forward
-direction: @{text "evSS[OF evSS[OF ev0]]"} yields the theorem @{thm evSS[OF
-evSS[OF ev0]]}. Alternatively, you can also prove it as a lemma in backwards
-fashion. Although this is more verbose, it allows us to demonstrate how each
-rule application changes the proof state: *}
-
-lemma "ev(Suc(Suc(Suc(Suc 0))))"
-txt{*
-@{subgoals[display,indent=0,goals_limit=1]}
-*}
-apply(rule evSS)
-txt{*
-@{subgoals[display,indent=0,goals_limit=1]}
-*}
-apply(rule evSS)
-txt{*
-@{subgoals[display,indent=0,goals_limit=1]}
-*}
-apply(rule ev0)
-done
-
-text{* \indent
-Rule induction is applied by giving the induction rule explicitly via the
-@{text"rule:"} modifier:\index{inductionrule@@{text"induction ... rule:"}}*}
-
-lemma "ev m \<Longrightarrow> even m"
-apply(induction rule: ev.induct)
-by(simp_all)
-
-text{* Both cases are automatic. Note that if there are multiple assumptions
-of the form @{prop"ev t"}, method @{text induction} will induct on the leftmost
-one.
-
-As a bonus, we also prove the remaining direction of the equivalence of
-@{const ev} and @{const even}:
-*}
-
-lemma "even n \<Longrightarrow> ev n"
-apply(induction n rule: even.induct)
-
-txt{* This is a proof by computation induction on @{text n} (see
-\autoref{sec:recursive-funs}) that sets up three subgoals corresponding to
-the three equations for @{const even}:
-@{subgoals[display,indent=0]}
-The first and third subgoals follow with @{thm[source]ev0} and @{thm[source]evSS}, and the second subgoal is trivially true because @{prop"even(Suc 0)"} is @{const False}:
-*}
-
-by (simp_all add: ev0 evSS)
-
-text{* The rules for @{const ev} make perfect simplification and introduction
-rules because their premises are always smaller than the conclusion. It
-makes sense to turn them into simplification and introduction rules
-permanently, to enhance proof automation. They are named @{thm[source] ev.intros}
-\index{intros@@{text".intros"}} by Isabelle: *}
-
-declare ev.intros[simp,intro]
-
-text{* The rules of an inductive definition are not simplification rules by
-default because, in contrast to recursive functions, there is no termination
-requirement for inductive definitions.
-
-\subsubsection{Inductive Versus Recursive}
-
-We have seen two definitions of the notion of evenness, an inductive and a
-recursive one. Which one is better? Much of the time, the recursive one is
-more convenient: it allows us to do rewriting in the middle of terms, and it
-expresses both the positive information (which numbers are even) and the
-negative information (which numbers are not even) directly. An inductive
-definition only expresses the positive information directly. The negative
-information, for example, that @{text 1} is not even, has to be proved from
-it (by induction or rule inversion). On the other hand, rule induction is
-tailor-made for proving \mbox{@{prop"ev n \<Longrightarrow> P n"}} because it only asks you
-to prove the positive cases. In the proof of @{prop"even n \<Longrightarrow> P n"} by
-computation induction via @{thm[source]even.induct}, we are also presented
-with the trivial negative cases. If you want the convenience of both
-rewriting and rule induction, you can make two definitions and show their
-equivalence (as above) or make one definition and prove additional properties
-from it, for example rule induction from computation induction.
-
-But many concepts do not admit a recursive definition at all because there is
-no datatype for the recursion (for example, the transitive closure of a
-relation), or the recursion would not terminate (for example,
-an interpreter for a programming language). Even if there is a recursive
-definition, if we are only interested in the positive information, the
-inductive definition may be much simpler.
-
-\subsection{The Reflexive Transitive Closure}
-\label{sec:star}
-
-Evenness is really more conveniently expressed recursively than inductively.
-As a second and very typical example of an inductive definition we define the
-reflexive transitive closure.
-\ifsem
-It will also be an important building block for
-some of the semantics considered in the second part of the book.
-\fi
-
-The reflexive transitive closure, called @{text star} below, is a function
-that maps a binary predicate to another binary predicate: if @{text r} is of
-type @{text"\<tau> \<Rightarrow> \<tau> \<Rightarrow> bool"} then @{term "star r"} is again of type @{text"\<tau> \<Rightarrow>
-\<tau> \<Rightarrow> bool"}, and @{prop"star r x y"} means that @{text x} and @{text y} are in
-the relation @{term"star r"}. Think @{term"r^*"} when you see @{term"star
-r"}, because @{text"star r"} is meant to be the reflexive transitive closure.
-That is, @{prop"star r x y"} is meant to be true if from @{text x} we can
-reach @{text y} in finitely many @{text r} steps. This concept is naturally
-defined inductively: *}
-
-inductive star :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"  for r where
-refl:  "star r x x" |
-step:  "r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
-
-text{* The base case @{thm[source] refl} is reflexivity: @{term "x=y"}. The
-step case @{thm[source]step} combines an @{text r} step (from @{text x} to
-@{text y}) and a @{term"star r"} step (from @{text y} to @{text z}) into a
-@{term"star r"} step (from @{text x} to @{text z}).
-The ``\isacom{for}~@{text r}'' in the header is merely a hint to Isabelle
-that @{text r} is a fixed parameter of @{const star}, in contrast to the
-further parameters of @{const star}, which change. As a result, Isabelle
-generates a simpler induction rule.
-
-By definition @{term"star r"} is reflexive. It is also transitive, but we
-need rule induction to prove that: *}
-
-lemma star_trans: "star r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
-apply(induction rule: star.induct)
-(*<*)
-defer
-apply(rename_tac u x y)
-defer
-(*>*)
-txt{* The induction is over @{prop"star r x y"} (the first matching assumption)
-and we try to prove \mbox{@{prop"star r y z \<Longrightarrow> star r x z"}},
-which we abbreviate by @{prop"P x y"}. These are our two subgoals:
-@{subgoals[display,indent=0]}
-The first one is @{prop"P x x"}, the result of case @{thm[source]refl},
-and it is trivial:\index{assumption@@{text assumption}}
-*}
-apply(assumption)
-txt{* Let us examine subgoal @{text 2}, case @{thm[source] step}.
-Assumptions @{prop"r u x"} and \mbox{@{prop"star r x y"}}
-are the premises of rule @{thm[source]step}.
-Assumption @{prop"star r y z \<Longrightarrow> star r x z"} is \mbox{@{prop"P x y"}},
-the IH coming from @{prop"star r x y"}. We have to prove @{prop"P u y"},
-which we do by assuming @{prop"star r y z"} and proving @{prop"star r u z"}.
-The proof itself is straightforward: from \mbox{@{prop"star r y z"}} the IH
-leads to @{prop"star r x z"} which, together with @{prop"r u x"},
-leads to \mbox{@{prop"star r u z"}} via rule @{thm[source]step}:
-*}
-apply(metis step)
-done
-
-text{*\index{rule induction|)}
-
-\subsection{The General Case}
-
-Inductive definitions have approximately the following general form:
-\begin{quote}
-\isacom{inductive} @{text"I :: \"\<tau> \<Rightarrow> bool\""} \isacom{where}
-\end{quote}
-followed by a sequence of (possibly named) rules of the form
-\begin{quote}
-@{text "\<lbrakk> I a\<^sub>1; \<dots>; I a\<^sub>n \<rbrakk> \<Longrightarrow> I a"}
-\end{quote}
-separated by @{text"|"}. As usual, @{text n} can be 0.
-The corresponding rule induction principle
-@{text I.induct} applies to propositions of the form
-\begin{quote}
-@{prop "I x \<Longrightarrow> P x"}
-\end{quote}
-where @{text P} may itself be a chain of implications.
-\begin{warn}
-Rule induction is always on the leftmost premise of the goal.
-Hence @{text "I x"} must be the first premise.
-\end{warn}
-Proving @{prop "I x \<Longrightarrow> P x"} by rule induction means proving
-for every rule of @{text I} that @{text P} is invariant:
-\begin{quote}
-@{text "\<lbrakk> I a\<^sub>1; P a\<^sub>1; \<dots>; I a\<^sub>n; P a\<^sub>n \<rbrakk> \<Longrightarrow> P a"}
-\end{quote}
-
-The above format for inductive definitions is simplified in a number of
-respects. @{text I} can have any number of arguments and each rule can have
-additional premises not involving @{text I}, so-called \conceptidx{side
-conditions}{side condition}. In rule inductions, these side conditions appear as additional
-assumptions. The \isacom{for} clause seen in the definition of the reflexive
-transitive closure simplifies the induction rule.
-\index{inductive definition|)}
-
-\subsection*{Exercises}
-
-\begin{exercise}
-Formalise the following definition of palindromes
-\begin{itemize}
-\item The empty list and a singleton list are palindromes.
-\item If @{text xs} is a palindrome, so is @{term "a # xs @ [a]"}.
-\end{itemize}
-as an inductive predicate @{text "palindrome ::"} @{typ "'a list \<Rightarrow> bool"}
-and prove that @{prop "rev xs = xs"} if @{text xs} is a palindrome.
-\end{exercise}
-
-\exercise
-We could also have defined @{const star} as follows:
-*}
-
-inductive star' :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" for r where
-refl': "star' r x x" |
-step': "star' r x y \<Longrightarrow> r y z \<Longrightarrow> star' r x z"
-
-text{*
-The single @{text r} step is performed after rather than before the @{text star'}
-steps. Prove @{prop "star' r x y \<Longrightarrow> star r x y"} and
-@{prop "star r x y \<Longrightarrow> star r' x y"}. You may need lemmas.
-Note that rule induction fails
-if the assumption about the inductive predicate is not the first assumption.
-\endexercise
-
-\begin{exercise}\label{exe:iter}
-Analogous to @{const star}, give an inductive definition of the @{text n}-fold iteration
-of a relation @{text r}: @{term "iter r n x y"} should hold if there are @{text x\<^sub>0}, \dots, @{text x\<^sub>n}
-such that @{prop"x = x\<^sub>0"}, @{prop"x\<^sub>n = y"} and @{text"r x\<^bsub>i\<^esub> x\<^bsub>i+1\<^esub>"} for
-all @{prop"i < n"}. Correct and prove the following claim:
-@{prop"star r x y \<Longrightarrow> iter r n x y"}.
-\end{exercise}
-
-\begin{exercise}
-A context-free grammar can be seen as an inductive definition where each
-nonterminal $A$ is an inductively defined predicate on lists of terminal
-symbols: $A(w)$ means that $w$ is in the language generated by $A$.
-For example, the production $S \to a S b$ can be viewed as the implication
-@{prop"S w \<Longrightarrow> S (a # w @ [b])"} where @{text a} and @{text b} are terminal symbols,
-i.e., elements of some alphabet. The alphabet can be defined like this:
-\isacom{datatype} @{text"alpha = a | b | \<dots>"}
-
-Define the two grammars (where $\varepsilon$ is the empty word)
-\[
-\begin{array}{r@ {\quad}c@ {\quad}l}
-S &\to& \varepsilon \quad\mid\quad aSb \quad\mid\quad SS \\
-T &\to& \varepsilon \quad\mid\quad TaTb
-\end{array}
-\]
-as two inductive predicates.
-If you think of @{text a} and @{text b} as ``@{text "("}'' and  ``@{text ")"}'',
-the grammars defines strings of balanced parentheses.
-Prove @{prop"T w \<Longrightarrow> S w"} and @{prop "S w \<Longrightarrow> T w"} separately and conclude
-@{prop "S w = T w"}.
-\end{exercise}
-
-\ifsem
-\begin{exercise}
-In \autoref{sec:AExp} we defined a recursive evaluation function
-@{text "aval :: aexp \<Rightarrow> state \<Rightarrow> val"}.
-Define an inductive evaluation predicate
-@{text "aval_rel :: aexp \<Rightarrow> state \<Rightarrow> val \<Rightarrow> bool"}
-and prove that it agrees with the recursive function:
-@{prop "aval_rel a s v \<Longrightarrow> aval a s = v"}, 
-@{prop "aval a s = v \<Longrightarrow> aval_rel a s v"} and thus
-\noquotes{@{prop [source] "aval_rel a s v \<longleftrightarrow> aval a s = v"}}.
-\end{exercise}
-
-\begin{exercise}
-Consider the stack machine from Chapter~3
-and recall the concept of \concept{stack underflow}
-from Exercise~\ref{exe:stack-underflow}.
-Define an inductive predicate
-@{text "ok :: nat \<Rightarrow> instr list \<Rightarrow> nat \<Rightarrow> bool"}
-such that @{text "ok n is n'"} means that with any initial stack of length
-@{text n} the instructions @{text "is"} can be executed
-without stack underflow and that the final stack has length @{text n'}.
-Prove that @{text ok} correctly computes the final stack size
-@{prop[display] "\<lbrakk>ok n is n'; length stk = n\<rbrakk> \<Longrightarrow> length (exec is s stk) = n'"}
-and that instruction sequences generated by @{text comp}
-cannot cause stack underflow: \ @{text "ok n (comp a) ?"} \ for
-some suitable value of @{text "?"}.
-\end{exercise}
-\fi
-*}
-(*<*)
-end
-(*>*)
--- a/src/Doc/ProgProve/MyList.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,17 +0,0 @@
-theory MyList
-imports Main
-begin
-
-datatype 'a list = Nil | Cons 'a "'a list"
-
-fun app :: "'a list => 'a list => 'a list" where
-"app Nil ys = ys" |
-"app (Cons x xs) ys = Cons x (app xs ys)"
-
-fun rev :: "'a list => 'a list" where
-"rev Nil = Nil" |
-"rev (Cons x xs) = app (rev xs) (Cons x Nil)"
-
-value "rev(Cons True (Cons False Nil))"
-
-end
--- a/src/Doc/ProgProve/Types_and_funs.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,593 +0,0 @@
-(*<*)
-theory Types_and_funs
-imports Main
-begin
-(*>*)
-text{*
-\vspace{-5ex}
-\section{Type and Function Definitions}
-
-Type synonyms are abbreviations for existing types, for example
-*}
-
-type_synonym string = "char list"
-
-text{*\index{string@@{text string}}
-Type synonyms are expanded after parsing and are not present in internal representation and output. They are mere conveniences for the reader.
-
-\subsection{Datatypes}
-\label{sec:datatypes}
-
-The general form of a datatype definition looks like this:
-\begin{quote}
-\begin{tabular}{@ {}rclcll}
-\indexed{\isacom{datatype}}{datatype} @{text "('a\<^sub>1,\<dots>,'a\<^sub>n)t"}
-     & = & $C_1 \ @{text"\""}\tau_{1,1}@{text"\""} \dots @{text"\""}\tau_{1,n_1}@{text"\""}$ \\
-     & $|$ & \dots \\
-     & $|$ & $C_k \ @{text"\""}\tau_{k,1}@{text"\""} \dots @{text"\""}\tau_{k,n_k}@{text"\""}$
-\end{tabular}
-\end{quote}
-It introduces the constructors \
-$C_i :: \tau_{i,1}\Rightarrow \cdots \Rightarrow \tau_{i,n_i} \Rightarrow$~@{text "('a\<^sub>1,\<dots>,'a\<^sub>n)t"} \ and expresses that any value of this type is built from these constructors in a unique manner. Uniqueness is implied by the following
-properties of the constructors:
-\begin{itemize}
-\item \emph{Distinctness:} $C_i\ \ldots \neq C_j\ \dots$ \quad if $i \neq j$
-\item \emph{Injectivity:}
-\begin{tabular}[t]{l}
- $(C_i \ x_1 \dots x_{n_i} = C_i \ y_1 \dots y_{n_i}) =$\\
- $(x_1 = y_1 \land \dots \land x_{n_i} = y_{n_i})$
-\end{tabular}
-\end{itemize}
-The fact that any value of the datatype is built from the constructors implies
-the \concept{structural induction}\index{induction} rule: to show
-$P~x$ for all $x$ of type @{text "('a\<^sub>1,\<dots>,'a\<^sub>n)t"},
-one needs to show $P(C_i\ x_1 \dots x_{n_i})$ (for each $i$) assuming
-$P(x_j)$ for all $j$ where $\tau_{i,j} =$~@{text "('a\<^sub>1,\<dots>,'a\<^sub>n)t"}.
-Distinctness and injectivity are applied automatically by @{text auto}
-and other proof methods. Induction must be applied explicitly.
-
-Datatype values can be taken apart with case expressions\index{case expression}\index{case expression@@{text "case ... of"}}, for example
-\begin{quote}
-\noquotes{@{term[source] "(case xs of [] \<Rightarrow> 0 | x # _ \<Rightarrow> Suc x)"}}
-\end{quote}
-just like in functional programming languages. Case expressions
-must be enclosed in parentheses.
-
-As an example, consider binary trees:
-*}
-
-datatype 'a tree = Tip | Node  "'a tree"  'a  "'a tree"
-
-text{* with a mirror function: *}
-
-fun mirror :: "'a tree \<Rightarrow> 'a tree" where
-"mirror Tip = Tip" |
-"mirror (Node l a r) = Node (mirror r) a (mirror l)"
-
-text{* The following lemma illustrates induction: *}
-
-lemma "mirror(mirror t) = t"
-apply(induction t)
-
-txt{* yields
-@{subgoals[display]}
-The induction step contains two induction hypotheses, one for each subtree.
-An application of @{text auto} finishes the proof.
-
-A very simple but also very useful datatype is the predefined
-@{datatype[display] option}\index{option@@{text option}}\index{None@@{const None}}\index{Some@@{const Some}}
-Its sole purpose is to add a new element @{const None} to an existing
-type @{typ 'a}. To make sure that @{const None} is distinct from all the
-elements of @{typ 'a}, you wrap them up in @{const Some} and call
-the new type @{typ"'a option"}. A typical application is a lookup function
-on a list of key-value pairs, often called an association list:
-*}
-(*<*)
-apply auto
-done
-(*>*)
-fun lookup :: "('a * 'b) list \<Rightarrow> 'a \<Rightarrow> 'b option" where
-"lookup [] x = None" |
-"lookup ((a,b) # ps) x = (if a = x then Some b else lookup ps x)"
-
-text{*
-Note that @{text"\<tau>\<^sub>1 * \<tau>\<^sub>2"} is the type of pairs, also written @{text"\<tau>\<^sub>1 \<times> \<tau>\<^sub>2"}.
-Pairs can be taken apart either by pattern matching (as above) or with the
-projection functions @{const fst} and @{const snd}: @{thm fst_conv} and @{thm snd_conv}. Tuples are simulated by pairs nested to the right: @{term"(a,b,c)"}
-abbreviates @{text"(a, (b, c))"} and @{text "\<tau>\<^sub>1 \<times> \<tau>\<^sub>2 \<times> \<tau>\<^sub>3"} abbreviates
-@{text "\<tau>\<^sub>1 \<times> (\<tau>\<^sub>2 \<times> \<tau>\<^sub>3)"}.
-
-\subsection{Definitions}
-
-Non recursive functions can be defined as in the following example:
-\index{definition@\isacom{definition}}*}
-
-definition sq :: "nat \<Rightarrow> nat" where
-"sq n = n * n"
-
-text{* Such definitions do not allow pattern matching but only
-@{text"f x\<^sub>1 \<dots> x\<^sub>n = t"}, where @{text f} does not occur in @{text t}.
-
-\subsection{Abbreviations}
-
-Abbreviations are similar to definitions:
-\index{abbreviation@\isacom{abbreviation}}*}
-
-abbreviation sq' :: "nat \<Rightarrow> nat" where
-"sq' n \<equiv> n * n"
-
-text{* The key difference is that @{const sq'} is only syntactic sugar:
-after parsing, @{term"sq' t"} is replaced by \mbox{@{term"t*t"}}, and
-before printing, every occurrence of @{term"u*u"} is replaced by
-\mbox{@{term"sq' u"}}.  Internally, @{const sq'} does not exist.
-This is the
-advantage of abbreviations over definitions: definitions need to be expanded
-explicitly (\autoref{sec:rewr-defs}) whereas abbreviations are already
-expanded upon parsing. However, abbreviations should be introduced sparingly:
-if abused, they can lead to a confusing discrepancy between the internal and
-external view of a term.
-
-The ASCII representation of @{text"\<equiv>"} is \texttt{==} or \xsymbol{equiv}.
-
-\subsection{Recursive Functions}
-\label{sec:recursive-funs}
-
-Recursive functions are defined with \indexed{\isacom{fun}}{fun} by pattern matching
-over datatype constructors. The order of equations matters. Just as in
-functional programming languages. However, all HOL functions must be
-total. This simplifies the logic---terms are always defined---but means
-that recursive functions must terminate. Otherwise one could define a
-function @{prop"f n = f n + (1::nat)"} and conclude \mbox{@{prop"(0::nat) = 1"}} by
-subtracting @{term"f n"} on both sides.
-
-Isabelle's automatic termination checker requires that the arguments of
-recursive calls on the right-hand side must be strictly smaller than the
-arguments on the left-hand side. In the simplest case, this means that one
-fixed argument position decreases in size with each recursive call. The size
-is measured as the number of constructors (excluding 0-ary ones, e.g.\ @{text
-Nil}). Lexicographic combinations are also recognized. In more complicated
-situations, the user may have to prove termination by hand. For details
-see~\cite{Krauss}.
-
-Functions defined with \isacom{fun} come with their own induction schema
-that mirrors the recursion schema and is derived from the termination
-order. For example,
-*}
-
-fun div2 :: "nat \<Rightarrow> nat" where
-"div2 0 = 0" |
-"div2 (Suc 0) = 0" |
-"div2 (Suc(Suc n)) = Suc(div2 n)"
-
-text{* does not just define @{const div2} but also proves a
-customized induction rule:
-\[
-\inferrule{
-\mbox{@{thm (prem 1) div2.induct}}\\
-\mbox{@{thm (prem 2) div2.induct}}\\
-\mbox{@{thm (prem 3) div2.induct}}}
-{\mbox{@{thm (concl) div2.induct[of _ "m"]}}}
-\]
-This customized induction rule can simplify inductive proofs. For example,
-*}
-
-lemma "div2(n) = n div 2"
-apply(induction n rule: div2.induct)
-
-txt{* (where the infix @{text div} is the predefined division operation)
-yields the 3 subgoals
-@{subgoals[display,margin=65]}
-An application of @{text auto} finishes the proof.
-Had we used ordinary structural induction on @{text n},
-the proof would have needed an additional
-case analysis in the induction step.
-
-This example leads to the following induction heuristic:
-\begin{quote}
-\emph{Let @{text f} be a recursive function.
-If the definition of @{text f} is more complicated
-than having one equation for each constructor of some datatype,
-then properties of @{text f} are best proved via @{text "f.induct"}.\index{inductionrule@@{text".induct"}}}
-\end{quote}
-
-The general case is often called \concept{computation induction},
-because the induction follows the (terminating!) computation.
-For every defining equation
-\begin{quote}
-@{text "f(e) = \<dots> f(r\<^sub>1) \<dots> f(r\<^sub>k) \<dots>"}
-\end{quote}
-where @{text"f(r\<^sub>i)"}, @{text"i=1\<dots>k"}, are all the recursive calls,
-the induction rule @{text"f.induct"} contains one premise of the form
-\begin{quote}
-@{text"P(r\<^sub>1) \<Longrightarrow> \<dots> \<Longrightarrow> P(r\<^sub>k) \<Longrightarrow> P(e)"}
-\end{quote}
-If @{text "f :: \<tau>\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> \<tau>\<^sub>n \<Rightarrow> \<tau>"} then @{text"f.induct"} is applied like this:
-\begin{quote}
-\isacom{apply}@{text"(induction x\<^sub>1 \<dots> x\<^sub>n rule: f.induct)"}\index{inductionrule@@{text"induction ... rule:"}}
-\end{quote}
-where typically there is a call @{text"f x\<^sub>1 \<dots> x\<^sub>n"} in the goal.
-But note that the induction rule does not mention @{text f} at all,
-except in its name, and is applicable independently of @{text f}.
-
-
-\subsection*{Exercises}
-
-\begin{exercise}
-Starting from the type @{text "'a tree"} defined in the text, define
-a function @{text "contents ::"} @{typ "'a tree \<Rightarrow> 'a list"}
-that collects all values in a tree in a list, in any order,
-without removing duplicates.
-Then define a function @{text "treesum ::"} @{typ "nat tree \<Rightarrow> nat"}
-that sums up all values in a tree of natural numbers
-and prove @{prop "treesum t = listsum(contents t)"}.
-\end{exercise}
-
-\begin{exercise}
-Define a new type @{text "'a tree2"} of binary trees where values are also
-stored in the leaves of the tree.  Also reformulate the
-@{const mirror} function accordingly. Define two functions
-@{text "pre_order"} and @{text "post_order"} of type @{text "'a tree2 \<Rightarrow> 'a list"}
-that traverse a tree and collect all stored values in the respective order in
-a list. Prove @{prop "pre_order (mirror t) = rev (post_order t)"}.
-\end{exercise}
-
-\begin{exercise}
-Define a function @{text "intersperse ::"} @{typ "'a \<Rightarrow> 'a list \<Rightarrow> 'a list"}
-such that @{text "intersperse a [x\<^sub>1, ..., x\<^sub>n] = [x\<^sub>1, a, x\<^sub>2, a, ..., a, x\<^sub>n]"}.
-Now prove that @{prop "map f (intersperse a xs) = intersperse (f a) (map f xs)"}.
-\end{exercise}
-
-
-\section{Induction Heuristics}\index{induction heuristics}
-
-We have already noted that theorems about recursive functions are proved by
-induction. In case the function has more than one argument, we have followed
-the following heuristic in the proofs about the append function:
-\begin{quote}
-\emph{Perform induction on argument number $i$\\
- if the function is defined by recursion on argument number $i$.}
-\end{quote}
-The key heuristic, and the main point of this section, is to
-\emph{generalize the goal before induction}.
-The reason is simple: if the goal is
-too specific, the induction hypothesis is too weak to allow the induction
-step to go through. Let us illustrate the idea with an example.
-
-Function @{const rev} has quadratic worst-case running time
-because it calls append for each element of the list and
-append is linear in its first argument.  A linear time version of
-@{const rev} requires an extra argument where the result is accumulated
-gradually, using only~@{text"#"}:
-*}
-(*<*)
-apply auto
-done
-(*>*)
-fun itrev :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where
-"itrev []        ys = ys" |
-"itrev (x#xs) ys = itrev xs (x#ys)"
-
-text{* The behaviour of @{const itrev} is simple: it reverses
-its first argument by stacking its elements onto the second argument,
-and it returns that second argument when the first one becomes
-empty. Note that @{const itrev} is tail-recursive: it can be
-compiled into a loop, no stack is necessary for executing it.
-
-Naturally, we would like to show that @{const itrev} does indeed reverse
-its first argument provided the second one is empty:
-*}
-
-lemma "itrev xs [] = rev xs"
-
-txt{* There is no choice as to the induction variable:
-*}
-
-apply(induction xs)
-apply(auto)
-
-txt{*
-Unfortunately, this attempt does not prove
-the induction step:
-@{subgoals[display,margin=70]}
-The induction hypothesis is too weak.  The fixed
-argument,~@{term"[]"}, prevents it from rewriting the conclusion.  
-This example suggests a heuristic:
-\begin{quote}
-\emph{Generalize goals for induction by replacing constants by variables.}
-\end{quote}
-Of course one cannot do this na\"{\i}vely: @{prop"itrev xs ys = rev xs"} is
-just not true.  The correct generalization is
-*};
-(*<*)oops;(*>*)
-lemma "itrev xs ys = rev xs @ ys"
-(*<*)apply(induction xs, auto)(*>*)
-txt{*
-If @{text ys} is replaced by @{term"[]"}, the right-hand side simplifies to
-@{term"rev xs"}, as required.
-In this instance it was easy to guess the right generalization.
-Other situations can require a good deal of creativity.  
-
-Although we now have two variables, only @{text xs} is suitable for
-induction, and we repeat our proof attempt. Unfortunately, we are still
-not there:
-@{subgoals[display,margin=65]}
-The induction hypothesis is still too weak, but this time it takes no
-intuition to generalize: the problem is that the @{text ys}
-in the induction hypothesis is fixed,
-but the induction hypothesis needs to be applied with
-@{term"a # ys"} instead of @{text ys}. Hence we prove the theorem
-for all @{text ys} instead of a fixed one. We can instruct induction
-to perform this generalization for us by adding @{text "arbitrary: ys"}\index{arbitrary@@{text"arbitrary:"}}.
-*}
-(*<*)oops
-lemma "itrev xs ys = rev xs @ ys"
-(*>*)
-apply(induction xs arbitrary: ys)
-
-txt{* The induction hypothesis in the induction step is now universally quantified over @{text ys}:
-@{subgoals[display,margin=65]}
-Thus the proof succeeds:
-*}
-
-apply auto
-done
-
-text{*
-This leads to another heuristic for generalization:
-\begin{quote}
-\emph{Generalize induction by generalizing all free
-variables\\ {\em(except the induction variable itself)}.}
-\end{quote}
-Generalization is best performed with @{text"arbitrary: y\<^sub>1 \<dots> y\<^sub>k"}. 
-This heuristic prevents trivial failures like the one above.
-However, it should not be applied blindly.
-It is not always required, and the additional quantifiers can complicate
-matters in some cases. The variables that need to be quantified are typically
-those that change in recursive calls.
-
-
-\subsection*{Exercises}
-
-\begin{exercise}
-Write a tail-recursive variant of the @{text add} function on @{typ nat}:
-@{term "itadd :: nat \<Rightarrow> nat \<Rightarrow> nat"}.
-Tail-recursive means that in the recursive case, @{text itadd} needs to call
-itself directly: \mbox{@{term"itadd (Suc m) n"}} @{text"= itadd \<dots>"}.
-Prove @{prop "itadd m n = add m n"}.
-\end{exercise}
-
-
-\section{Simplification}
-
-So far we have talked a lot about simplifying terms without explaining the concept.
-\conceptidx{Simplification}{simplification} means
-\begin{itemize}
-\item using equations $l = r$ from left to right (only),
-\item as long as possible.
-\end{itemize}
-To emphasize the directionality, equations that have been given the
-@{text"simp"} attribute are called \conceptidx{simplification rules}{simplification rule}.
-Logically, they are still symmetric, but proofs by
-simplification use them only in the left-to-right direction.  The proof tool
-that performs simplifications is called the \concept{simplifier}. It is the
-basis of @{text auto} and other related proof methods.
-
-The idea of simplification is best explained by an example. Given the
-simplification rules
-\[
-\begin{array}{rcl@ {\quad}l}
-@{term"0 + n::nat"} &@{text"="}& @{text n} & (1) \\
-@{term"(Suc m) + n"} &@{text"="}& @{term"Suc (m + n)"} & (2) \\
-@{text"(Suc m \<le> Suc n)"} &@{text"="}& @{text"(m \<le> n)"} & (3)\\
-@{text"(0 \<le> m)"} &@{text"="}& @{const True} & (4)
-\end{array}
-\]
-the formula @{prop"0 + Suc 0 \<le> Suc 0 + x"} is simplified to @{const True}
-as follows:
-\[
-\begin{array}{r@ {}c@ {}l@ {\quad}l}
-@{text"(0 + Suc 0"} & \leq & @{text"Suc 0 + x)"}  & \stackrel{(1)}{=} \\
-@{text"(Suc 0"}     & \leq & @{text"Suc 0 + x)"}  & \stackrel{(2)}{=} \\
-@{text"(Suc 0"}     & \leq & @{text"Suc (0 + x)"} & \stackrel{(3)}{=} \\
-@{text"(0"}         & \leq & @{text"0 + x)"}      & \stackrel{(4)}{=} \\[1ex]
- & @{const True}
-\end{array}
-\]
-Simplification is often also called \concept{rewriting}
-and simplification rules \conceptidx{rewrite rules}{rewrite rule}.
-
-\subsection{Simplification Rules}
-
-The attribute @{text"simp"} declares theorems to be simplification rules,
-which the simplifier will use automatically. In addition,
-\isacom{datatype} and \isacom{fun} commands implicitly declare some
-simplification rules: \isacom{datatype} the distinctness and injectivity
-rules, \isacom{fun} the defining equations. Definitions are not declared
-as simplification rules automatically! Nearly any theorem can become a
-simplification rule. The simplifier will try to transform it into an
-equation. For example, the theorem @{prop"\<not> P"} is turned into @{prop"P = False"}.
-
-Only equations that really simplify, like @{prop"rev (rev xs) = xs"} and
-@{prop"xs @ [] = xs"}, should be declared as simplification
-rules. Equations that may be counterproductive as simplification rules
-should only be used in specific proof steps (see \S\ref{sec:simp} below).
-Distributivity laws, for example, alter the structure of terms
-and can produce an exponential blow-up.
-
-\subsection{Conditional Simplification Rules}
-
-Simplification rules can be conditional.  Before applying such a rule, the
-simplifier will first try to prove the preconditions, again by
-simplification. For example, given the simplification rules
-\begin{quote}
-@{prop"p(0::nat) = True"}\\
-@{prop"p(x) \<Longrightarrow> f(x) = g(x)"},
-\end{quote}
-the term @{term"f(0::nat)"} simplifies to @{term"g(0::nat)"}
-but @{term"f(1::nat)"} does not simplify because @{prop"p(1::nat)"}
-is not provable.
-
-\subsection{Termination}
-
-Simplification can run forever, for example if both @{prop"f x = g x"} and
-@{prop"g(x) = f(x)"} are simplification rules. It is the user's
-responsibility not to include simplification rules that can lead to
-nontermination, either on their own or in combination with other
-simplification rules. The right-hand side of a simplification rule should
-always be ``simpler'' than the left-hand side---in some sense. But since
-termination is undecidable, such a check cannot be automated completely
-and Isabelle makes little attempt to detect nontermination.
-
-When conditional simplification rules are applied, their preconditions are
-proved first. Hence all preconditions need to be
-simpler than the left-hand side of the conclusion. For example
-\begin{quote}
-@{prop "n < m \<Longrightarrow> (n < Suc m) = True"}
-\end{quote}
-is suitable as a simplification rule: both @{prop"n<m"} and @{const True}
-are simpler than \mbox{@{prop"n < Suc m"}}. But
-\begin{quote}
-@{prop "Suc n < m \<Longrightarrow> (n < m) = True"}
-\end{quote}
-leads to nontermination: when trying to rewrite @{prop"n<m"} to @{const True}
-one first has to prove \mbox{@{prop"Suc n < m"}}, which can be rewritten to @{const True} provided @{prop"Suc(Suc n) < m"}, \emph{ad infinitum}.
-
-\subsection{The \indexed{@{text simp}}{simp} Proof Method}
-\label{sec:simp}
-
-So far we have only used the proof method @{text auto}.  Method @{text simp}
-is the key component of @{text auto}, but @{text auto} can do much more. In
-some cases, @{text auto} is overeager and modifies the proof state too much.
-In such cases the more predictable @{text simp} method should be used.
-Given a goal
-\begin{quote}
-@{text"1. \<lbrakk> P\<^sub>1; \<dots>; P\<^sub>m \<rbrakk> \<Longrightarrow> C"}
-\end{quote}
-the command
-\begin{quote}
-\isacom{apply}@{text"(simp add: th\<^sub>1 \<dots> th\<^sub>n)"}
-\end{quote}
-simplifies the assumptions @{text "P\<^sub>i"} and the conclusion @{text C} using
-\begin{itemize}
-\item all simplification rules, including the ones coming from \isacom{datatype} and \isacom{fun},
-\item the additional lemmas @{text"th\<^sub>1 \<dots> th\<^sub>n"}, and
-\item the assumptions.
-\end{itemize}
-In addition to or instead of @{text add} there is also @{text del} for removing
-simplification rules temporarily. Both are optional. Method @{text auto}
-can be modified similarly:
-\begin{quote}
-\isacom{apply}@{text"(auto simp add: \<dots> simp del: \<dots>)"}
-\end{quote}
-Here the modifiers are @{text"simp add"} and @{text"simp del"}
-instead of just @{text add} and @{text del} because @{text auto}
-does not just perform simplification.
-
-Note that @{text simp} acts only on subgoal 1, @{text auto} acts on all
-subgoals. There is also @{text simp_all}, which applies @{text simp} to
-all subgoals.
-
-\subsection{Rewriting With Definitions}
-\label{sec:rewr-defs}
-
-Definitions introduced by the command \isacom{definition}
-can also be used as simplification rules,
-but by default they are not: the simplifier does not expand them
-automatically. Definitions are intended for introducing abstract concepts and
-not merely as abbreviations. Of course, we need to expand the definition
-initially, but once we have proved enough abstract properties of the new
-constant, we can forget its original definition. This style makes proofs more
-robust: if the definition has to be changed, only the proofs of the abstract
-properties will be affected.
-
-The definition of a function @{text f} is a theorem named @{text f_def}
-and can be added to a call of @{text simp} just like any other theorem:
-\begin{quote}
-\isacom{apply}@{text"(simp add: f_def)"}
-\end{quote}
-In particular, let-expressions can be unfolded by
-making @{thm[source] Let_def} a simplification rule.
-
-\subsection{Case Splitting With @{text simp}}
-
-Goals containing if-expressions are automatically split into two cases by
-@{text simp} using the rule
-\begin{quote}
-@{prop"P(if A then s else t) = ((A \<longrightarrow> P(s)) \<and> (~A \<longrightarrow> P(t)))"}
-\end{quote}
-For example, @{text simp} can prove
-\begin{quote}
-@{prop"(A \<and> B) = (if A then B else False)"}
-\end{quote}
-because both @{prop"A \<longrightarrow> (A & B) = B"} and @{prop"~A \<longrightarrow> (A & B) = False"}
-simplify to @{const True}.
-
-We can split case-expressions similarly. For @{text nat} the rule looks like this:
-@{prop[display,margin=65,indent=4]"P(case e of 0 \<Rightarrow> a | Suc n \<Rightarrow> b n) = ((e = 0 \<longrightarrow> P a) & (ALL n. e = Suc n \<longrightarrow> P(b n)))"}
-Case expressions are not split automatically by @{text simp}, but @{text simp}
-can be instructed to do so:
-\begin{quote}
-\isacom{apply}@{text"(simp split: nat.split)"}
-\end{quote}
-splits all case-expressions over natural numbers. For an arbitrary
-datatype @{text t} it is @{text "t.split"}\index{split@@{text".split"}} instead of @{thm[source] nat.split}.
-Method @{text auto} can be modified in exactly the same way.
-The modifier \indexed{@{text "split:"}}{split} can be followed by multiple names.
-Splitting if or case-expressions in the assumptions requires 
-@{text "split: if_splits"} or @{text "split: t.splits"}.
-
-
-\subsection*{Exercises}
-
-\exercise\label{exe:tree0}
-Define a datatype @{text tree0} of binary tree skeletons which do not store
-any information, neither in the inner nodes nor in the leaves.
-Define a function @{text "nodes :: tree0 \<Rightarrow> nat"} that counts the number of
-all nodes (inner nodes and leaves) in such a tree.
-Consider the following recursive function:
-*}
-(*<*)
-datatype tree0 = Tip | Node tree0 tree0
-(*>*)
-fun explode :: "nat \<Rightarrow> tree0 \<Rightarrow> tree0" where
-"explode 0 t = t" |
-"explode (Suc n) t = explode n (Node t t)"
-
-text {*
-Find an equation expressing the size of a tree after exploding it
-(\noquotes{@{term [source] "nodes (explode n t)"}}) as a function
-of @{term "nodes t"} and @{text n}. Prove your equation.
-You may use the usual arithmetic operators including the exponentiation
-operator ``@{text"^"}''. For example, \noquotes{@{prop [source] "2 ^ 2 = 4"}}.
-
-Hint: simplifying with the list of theorems @{thm[source] algebra_simps}
-takes care of common algebraic properties of the arithmetic operators.
-\endexercise
-
-\exercise
-Define arithmetic expressions in one variable over integers (type @{typ int})
-as a data type:
-*}
-
-datatype exp = Var | Const int | Add exp exp | Mult exp exp
-
-text{*
-Define a function \noquotes{@{term [source]"eval :: exp \<Rightarrow> int \<Rightarrow> int"}}
-such that @{term"eval e x"} evaluates @{text e} at the value
-@{text x}.
-
-A polynomial can be represented as a list of coefficients, starting with
-the constant. For example, @{term "[4, 2, -1, 3::int]"} represents the
-polynomial $4 + 2x - x^2 + 3x^3$.
-Define a function \noquotes{@{term [source] "evalp :: int list \<Rightarrow> int \<Rightarrow> int"}}
-that evaluates a polynomial at the given value.
-Define a function \noquotes{@{term[source] "coeffs :: exp \<Rightarrow> int list"}}
-that transforms an expression into a polynomial. This may require auxiliary
-functions. Prove that @{text coeffs} preserves the value of the expression:
-\mbox{@{prop"evalp (coeffs e) x = eval e x"}.}
-Hint: consider the hint in Exercise~\ref{exe:tree0}.
-\endexercise
-*}
-(*<*)
-end
-(*>*)
Binary file src/Doc/ProgProve/document/bang.pdf has changed
--- a/src/Doc/ProgProve/document/build	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,13 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-FORMAT="$1"
-VARIANT="$2"
-
-"$ISABELLE_TOOL" logo HOL
-
-cp "$ISABELLE_HOME/src/Doc/ProgProve/MyList.thy" .
-
-"$ISABELLE_HOME/src/Doc/prepare_document" "$FORMAT"
-
--- a/src/Doc/ProgProve/document/intro-isabelle.tex	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,94 +0,0 @@
-Isabelle is a generic system for
-implementing logical formalisms, and Isabelle/HOL is the specialization
-of Isabelle for HOL, which abbreviates Higher-Order Logic. We introduce
-HOL step by step following the equation
-\[ \mbox{HOL} = \mbox{Functional Programming} + \mbox{Logic}. \]
-We assume that the reader is used to logical and set theoretic notation
-and is familiar with the basic concepts of functional programming.
-\ifsem
-Open-minded readers have been known to pick up functional
-programming through the wealth of examples in \autoref{sec:FP}
-and \autoref{sec:CaseStudyExp}.
-\fi
-
-\autoref{sec:FP} introduces HOL as a functional programming language and
-explains how to write simple inductive proofs of mostly equational properties
-of recursive functions.
-\ifsem
-\autoref{sec:CaseStudyExp} contains a
-small case study: arithmetic and boolean expressions, their evaluation,
-optimization and compilation.
-\fi
-\autoref{ch:Logic} introduces the rest of HOL: the
-language of formulas beyond equality, automatic proof tools, single
-step proofs, and inductive definitions, an essential specification construct.
-\autoref{ch:Isar} introduces Isar, Isabelle's language for writing structured
-proofs.
-
-%Further material (slides, demos etc) can be found online at
-%\url{http://www.in.tum.de/~nipkow}.
-
-% Relics:
-% We aim to minimise the amount of background knowledge of logic we expect
-% from the reader
-% We have focussed on the core material
-% in the intersection of computation and logic.
-
-This introduction to the core of Isabelle is intentionally concrete and
-example-based: we concentrate on examples that illustrate the typical cases
-without explaining the general case if it can be inferred from the examples.
-We cover the essentials (from a functional programming point of view) as
-quickly and compactly as possible.
-\ifsem
-After all, this book is primarily about semantics.
-\fi
-
-For a comprehensive treatment of all things Isabelle we recommend the
-\emph{Isabelle/Isar Reference Manual}~\cite{IsarRef}, which comes with the
-Isabelle distribution.
-The tutorial by Nipkow, Paulson and Wenzel~\cite{LNCS2283} (in its updated version that comes with the Isabelle distribution) is still recommended for the wealth of examples and material, but its proof style is outdated. In particular it does not cover the structured proof language Isar.
-
-%This introduction to Isabelle has grown out of many years of teaching
-%Isabelle courses. 
-
-\ifsem
-\subsection*{Getting Started with Isabelle}
-
-If you have not done so already, download and install Isabelle
-from \url{http://isabelle.in.tum.de}. You can start it by clicking
-on the application icon. This will launch Isabelle's
-user interface based on the text editor \concept{jedit}. Below you see
-a typical example snapshot of a jedit session. At this point we merely explain
-the layout of the window, not its contents.
-
-\begin{center}
-\includegraphics[width=\textwidth]{jedit.png}
-\end{center}
-The upper part of the window shows the input typed by the user, i.e.\ the
-gradually growing Isabelle text of definitions, theorems, proofs, etc.  The
-interface processes the user input automatically while it is typed, just like
-modern Java IDEs.  Isabelle's response to the user input is shown in the
-lower part of the window. You can examine the response to any input phrase
-by clicking on that phrase or by hovering over underlined text.
-
-This should suffice to get started with the jedit interface.
-Now you need to learn what to type into it.
-\else
-If you want to apply what you have learned about Isabelle we recommend you
-donwload and read the book
-\href{http://www.in.tum.de/~nipkow/Concrete/}{Concrete
-Semantics}~\cite{ConcreteSemantics}, a guided tour of the wonderful world of
-programming langage semantics formalised in Isabelle.  In fact,
-\emph{Programming and Proving in Isabelle/HOL} constitutes part~I of
-\href{http://www.in.tum.de/~nipkow/Concrete/}{Concrete Semantics}.  The web
-pages for \href{http://www.in.tum.de/~nipkow/Concrete/}{Concrete Semantics}
-also provide a set of \LaTeX-based slides for teaching \emph{Programming and
-Proving in Isabelle/HOL}.
-\fi
-
-\ifsem\else
-\paragraph{Acknowledgements}
-I wish to thank the following people for their comments on this document:
-Florian Haftmann, Ren\'{e} Thiemann, Sean Seefried, Christian Sternagel
-and Carl Witty.
-\fi
\ No newline at end of file
--- a/src/Doc/ProgProve/document/mathpartir.sty	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,388 +0,0 @@
-%  Mathpartir --- Math Paragraph for Typesetting Inference Rules
-%
-%  Copyright (C) 2001, 2002, 2003 Didier Rémy
-%
-%  Author         : Didier Remy 
-%  Version        : 1.1.1
-%  Bug Reports    : to author
-%  Web Site       : http://pauillac.inria.fr/~remy/latex/
-% 
-%  WhizzyTeX is free software; you can redistribute it and/or modify
-%  it under the terms of the GNU General Public License as published by
-%  the Free Software Foundation; either version 2, or (at your option)
-%  any later version.
-%  
-%  Mathpartir is distributed in the hope that it will be useful,
-%  but WITHOUT ANY WARRANTY; without even the implied warranty of
-%  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-%  GNU General Public License for more details 
-%  (http://pauillac.inria.fr/~remy/license/GPL).
-%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%  File mathpartir.sty (LaTeX macros)
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\NeedsTeXFormat{LaTeX2e}
-\ProvidesPackage{mathpartir}
-    [2003/07/10 version 1.1.1 Math Paragraph for Typesetting Inference Rules]
-
-%%
-
-%% Identification
-%% Preliminary declarations
-
-\RequirePackage {keyval}
-
-%% Options
-%% More declarations
-
-%% PART I: Typesetting maths in paragraphe mode
-
-\newdimen \mpr@tmpdim
-
-% To ensure hevea \hva compatibility, \hva should expands to nothing 
-% in mathpar or in inferrule
-\let \mpr@hva \empty
-
-%% normal paragraph parametters, should rather be taken dynamically
-\def \mpr@savepar {%
-  \edef \MathparNormalpar
-     {\noexpand \lineskiplimit \the\lineskiplimit
-      \noexpand \lineskip \the\lineskip}%
-  }
-
-\def \mpr@rulelineskip {\lineskiplimit=0.3em\lineskip=0.2em plus 0.1em}
-\def \mpr@lesslineskip {\lineskiplimit=0.6em\lineskip=0.5em plus 0.2em}
-\def \mpr@lineskip  {\lineskiplimit=1.2em\lineskip=1.2em plus 0.2em}
-\let \MathparLineskip \mpr@lineskip
-\def \mpr@paroptions {\MathparLineskip}
-\let \mpr@prebindings \relax
-
-\newskip \mpr@andskip \mpr@andskip 2em plus 0.5fil minus 0.5em
-
-\def \mpr@goodbreakand
-   {\hskip -\mpr@andskip  \penalty -1000\hskip \mpr@andskip}
-\def \mpr@and {\hskip \mpr@andskip}
-\def \mpr@andcr {\penalty 50\mpr@and}
-\def \mpr@cr {\penalty -10000\mpr@and}
-\def \mpr@eqno #1{\mpr@andcr #1\hskip 0em plus -1fil \penalty 10}
-
-\def \mpr@bindings {%
-  \let \and \mpr@andcr
-  \let \par \mpr@andcr
-  \let \\\mpr@cr
-  \let \eqno \mpr@eqno
-  \let \hva \mpr@hva
-  } 
-\let \MathparBindings \mpr@bindings
-
-% \@ifundefined {ignorespacesafterend}
-%    {\def \ignorespacesafterend {\aftergroup \ignorespaces}
-
-\newenvironment{mathpar}[1][]
-  {$$\mpr@savepar \parskip 0em \hsize \linewidth \centering
-     \vbox \bgroup \mpr@prebindings \mpr@paroptions #1\ifmmode $\else
-     \noindent $\displaystyle\fi
-     \MathparBindings}
-  {\unskip \ifmmode $\fi\egroup $$\ignorespacesafterend}
-
-% \def \math@mathpar #1{\setbox0 \hbox {$\displaystyle #1$}\ifnum
-%     \wd0 < \hsize  $$\box0$$\else \bmathpar #1\emathpar \fi}
-
-%%% HOV BOXES
-
-\def \mathvbox@ #1{\hbox \bgroup \mpr@normallineskip 
-  \vbox \bgroup \tabskip 0em \let \\ \cr
-  \halign \bgroup \hfil $##$\hfil\cr #1\crcr \egroup \egroup
-  \egroup}
-
-\def \mathhvbox@ #1{\setbox0 \hbox {\let \\\qquad $#1$}\ifnum \wd0 < \hsize
-      \box0\else \mathvbox {#1}\fi}
-
-
-%% Part II -- operations on lists
-
-\newtoks \mpr@lista
-\newtoks \mpr@listb
-
-\long \def\mpr@cons #1\mpr@to#2{\mpr@lista {\\{#1}}\mpr@listb \expandafter
-{#2}\edef #2{\the \mpr@lista \the \mpr@listb}}
-
-\long \def\mpr@snoc #1\mpr@to#2{\mpr@lista {\\{#1}}\mpr@listb \expandafter
-{#2}\edef #2{\the \mpr@listb\the\mpr@lista}}
-
-\long \def \mpr@concat#1=#2\mpr@to#3{\mpr@lista \expandafter {#2}\mpr@listb
-\expandafter {#3}\edef #1{\the \mpr@listb\the\mpr@lista}}
-
-\def \mpr@head #1\mpr@to #2{\expandafter \mpr@head@ #1\mpr@head@ #1#2}
-\long \def \mpr@head@ #1#2\mpr@head@ #3#4{\def #4{#1}\def#3{#2}}
-
-\def \mpr@flatten #1\mpr@to #2{\expandafter \mpr@flatten@ #1\mpr@flatten@ #1#2}
-\long \def \mpr@flatten@ \\#1\\#2\mpr@flatten@ #3#4{\def #4{#1}\def #3{\\#2}}
-
-\def \mpr@makelist #1\mpr@to #2{\def \mpr@all {#1}%
-   \mpr@lista {\\}\mpr@listb \expandafter {\mpr@all}\edef \mpr@all {\the
-   \mpr@lista \the \mpr@listb \the \mpr@lista}\let #2\empty 
-   \def \mpr@stripof ##1##2\mpr@stripend{\def \mpr@stripped{##2}}\loop
-     \mpr@flatten \mpr@all \mpr@to \mpr@one
-     \expandafter \mpr@snoc \mpr@one \mpr@to #2\expandafter \mpr@stripof
-     \mpr@all \mpr@stripend  
-     \ifx \mpr@stripped \empty \let \mpr@isempty 0\else \let \mpr@isempty 1\fi
-     \ifx 1\mpr@isempty
-   \repeat
-}
-
-%% Part III -- Type inference rules
-
-\def \mpr@rev #1\mpr@to #2{\let \mpr@tmp \empty
-   \def \\##1{\mpr@cons ##1\mpr@to \mpr@tmp}#1\let #2\mpr@tmp}
-
-\newif \if@premisse
-\newbox \mpr@hlist
-\newbox \mpr@vlist
-\newif \ifmpr@center \mpr@centertrue
-\def \mpr@htovlist {%
-   \setbox \mpr@hlist
-      \hbox {\strut
-             \ifmpr@center \hskip -0.5\wd\mpr@hlist\fi
-             \unhbox \mpr@hlist}%
-   \setbox \mpr@vlist
-      \vbox {\if@premisse  \box \mpr@hlist \unvbox \mpr@vlist
-             \else \unvbox \mpr@vlist \box \mpr@hlist
-             \fi}%
-}
-% OLD version
-% \def \mpr@htovlist {%
-%    \setbox \mpr@hlist
-%       \hbox {\strut \hskip -0.5\wd\mpr@hlist \unhbox \mpr@hlist}%
-%    \setbox \mpr@vlist
-%       \vbox {\if@premisse  \box \mpr@hlist \unvbox \mpr@vlist
-%              \else \unvbox \mpr@vlist \box \mpr@hlist
-%              \fi}%
-% }
-
-\def \mpr@sep{1.5em}
-\def \mpr@blank { }
-\def \mpr@hovbox #1#2{\hbox
-  \bgroup
-  \ifx #1T\@premissetrue
-  \else \ifx #1B\@premissefalse
-  \else
-     \PackageError{mathpartir}
-       {Premisse orientation should either be P or B}
-       {Fatal error in Package}%
-  \fi \fi
-  \def \@test {#2}\ifx \@test \mpr@blank\else
-  \setbox \mpr@hlist \hbox {}%
-  \setbox \mpr@vlist \vbox {}%
-  \if@premisse \let \snoc \mpr@cons \else \let \snoc \mpr@snoc \fi
-  \let \@hvlist \empty \let \@rev \empty
-  \mpr@tmpdim 0em
-  \expandafter \mpr@makelist #2\mpr@to \mpr@flat
-  \if@premisse \mpr@rev \mpr@flat \mpr@to \@rev \else \let \@rev \mpr@flat \fi
-  \def \\##1{%
-     \def \@test {##1}\ifx \@test \empty
-        \mpr@htovlist
-        \mpr@tmpdim 0em %%% last bug fix not extensively checked
-     \else
-      \setbox0 \hbox{$\displaystyle {##1}$}\relax
-      \advance \mpr@tmpdim by \wd0
-      %\mpr@tmpdim 1.02\mpr@tmpdim
-      \ifnum \mpr@tmpdim < \hsize
-         \ifnum \wd\mpr@hlist > 0
-           \if@premisse
-             \setbox \mpr@hlist 
-                \hbox {\unhbox0 \hskip \mpr@sep \unhbox \mpr@hlist}%
-           \else
-             \setbox \mpr@hlist
-                \hbox {\unhbox \mpr@hlist  \hskip \mpr@sep \unhbox0}%
-           \fi
-         \else 
-         \setbox \mpr@hlist \hbox {\unhbox0}%
-         \fi
-      \else
-         \ifnum \wd \mpr@hlist > 0
-            \mpr@htovlist 
-            \mpr@tmpdim \wd0
-         \fi
-         \setbox \mpr@hlist \hbox {\unhbox0}%
-      \fi
-      \advance \mpr@tmpdim by \mpr@sep
-   \fi
-   }%
-   \@rev
-   \mpr@htovlist
-   \ifmpr@center \hskip \wd\mpr@vlist\fi \box \mpr@vlist
-   \fi
-   \egroup
-}
-
-%%% INFERENCE RULES
-
-\@ifundefined{@@over}{%
-    \let\@@over\over % fallback if amsmath is not loaded
-    \let\@@overwithdelims\overwithdelims
-    \let\@@atop\atop \let\@@atopwithdelims\atopwithdelims
-    \let\@@above\above \let\@@abovewithdelims\abovewithdelims
-  }{}
-
-
-\def \mpr@@fraction #1#2{\hbox {\advance \hsize by -0.5em
-    $\displaystyle {#1\@@over #2}$}}
-\let \mpr@fraction \mpr@@fraction
-\def \mpr@@reduce #1#2{\hbox
-    {$\lower 0.01pt \mpr@@fraction {#1}{#2}\mkern -15mu\rightarrow$}}
-\def \mpr@@rewrite #1#2#3{\hbox
-    {$\lower 0.01pt \mpr@@fraction {#2}{#3}\mkern -8mu#1$}}
-\def \mpr@infercenter #1{\vcenter {\mpr@hovbox{T}{#1}}}
-
-\def \mpr@empty {}
-\def \mpr@inferrule
-  {\bgroup
-     \ifnum \linewidth<\hsize \hsize \linewidth\fi
-     \mpr@rulelineskip
-     \let \and \qquad
-     \let \hva \mpr@hva
-     \let \@rulename \mpr@empty
-     \let \@rule@options \mpr@empty
-     \mpr@inferrule@}
-\newcommand {\mpr@inferrule@}[3][]
-  {\everymath={\displaystyle}%       
-   \def \@test {#2}\ifx \empty \@test
-      \setbox0 \hbox {$\vcenter {\mpr@hovbox{B}{#3}}$}%
-   \else 
-   \def \@test {#3}\ifx \empty \@test
-      \setbox0 \hbox {$\vcenter {\mpr@hovbox{T}{#2}}$}%
-   \else
-   \setbox0 \mpr@fraction {\mpr@hovbox{T}{#2}}{\mpr@hovbox{B}{#3}}%
-   \fi \fi
-   \def \@test {#1}\ifx \@test\empty \box0
-   \else \vbox 
-%%% Suggestion de Francois pour les etiquettes longues
-%%%   {\hbox to \wd0 {\RefTirName {#1}\hfil}\box0}\fi
-      {\hbox {\RefTirName {#1}}\box0}\fi
-   \egroup}
-
-\def \mpr@vdotfil #1{\vbox to #1{\leaders \hbox{$\cdot$} \vfil}}
-
-% They are two forms
-% \inferrule [label]{[premisses}{conclusions}
-% or
-% \inferrule* [options]{[premisses}{conclusions}
-%
-% Premisses and conclusions are lists of elements separated by \\
-% Each \\ produces a break, attempting horizontal breaks if possible, 
-% and  vertical breaks if needed. 
-% 
-% An empty element obtained by \\\\ produces a vertical break in all cases. 
-%
-% The former rule is aligned on the fraction bar. 
-% The optional label appears on top of the rule
-% The second form to be used in a derivation tree is aligned on the last
-% line of its conclusion
-% 
-% The second form can be parameterized, using the key=val interface. The
-% folloiwng keys are recognized:
-%       
-%  width                set the width of the rule to val
-%  narrower             set the width of the rule to val\hsize
-%  before               execute val at the beginning/left
-%  lab                  put a label [Val] on top of the rule
-%  lskip                add negative skip on the right
-%  left                 put a left label [Val]
-%  Left                 put a left label [Val],  ignoring its width 
-%  right                put a right label [Val]
-%  Right                put a right label [Val], ignoring its width
-%  leftskip             skip negative space on the left-hand side
-%  rightskip            skip negative space on the right-hand side
-%  vdots                lift the rule by val and fill vertical space with dots
-%  after                execute val at the end/right
-%  
-%  Note that most options must come in this order to avoid strange
-%  typesetting (in particular  leftskip must preceed left and Left and
-%  rightskip must follow Right or right; vdots must come last 
-%  or be only followed by rightskip. 
-%  
-
-\define@key {mprset}{flushleft}[]{\mpr@centerfalse}
-\define@key {mprset}{center}[]{\mpr@centertrue}
-\def \mprset #1{\setkeys{mprset}{#1}}
-
-\newbox \mpr@right
-\define@key {mpr}{flushleft}[]{\mpr@centerfalse}
-\define@key {mpr}{center}[]{\mpr@centertrue}
-\define@key {mpr}{left}{\setbox0 \hbox {$\TirName {#1}\;$}\relax
-     \advance \hsize by -\wd0\box0}
-\define@key {mpr}{width}{\hsize #1}
-\define@key {mpr}{sep}{\def\mpr@sep{#1}}
-\define@key {mpr}{before}{#1}
-\define@key {mpr}{lab}{\let \RefTirName \TirName \def \mpr@rulename {#1}}
-\define@key {mpr}{Lab}{\let \RefTirName \TirName \def \mpr@rulename {#1}}
-\define@key {mpr}{narrower}{\hsize #1\hsize}
-\define@key {mpr}{leftskip}{\hskip -#1}
-\define@key {mpr}{reduce}[]{\let \mpr@fraction \mpr@@reduce}
-\define@key {mpr}{rightskip}
-  {\setbox \mpr@right \hbox {\unhbox \mpr@right \hskip -#1}}
-\define@key {mpr}{LEFT}{\setbox0 \hbox {$#1$}\relax
-     \advance \hsize by -\wd0\box0}
-\define@key {mpr}{left}{\setbox0 \hbox {$\TirName {#1}\;$}\relax
-     \advance \hsize by -\wd0\box0}
-\define@key {mpr}{Left}{\llap{$\TirName {#1}\;$}}
-\define@key {mpr}{right}
-  {\setbox0 \hbox {$\;\TirName {#1}$}\relax \advance \hsize by -\wd0
-   \setbox \mpr@right \hbox {\unhbox \mpr@right \unhbox0}}
-\define@key {mpr}{RIGHT}
-  {\setbox0 \hbox {$#1$}\relax \advance \hsize by -\wd0
-   \setbox \mpr@right \hbox {\unhbox \mpr@right \unhbox0}}
-\define@key {mpr}{Right}
-  {\setbox \mpr@right \hbox {\unhbox \mpr@right \rlap {$\;\TirName {#1}$}}}
-\define@key {mpr}{vdots}{\def \mpr@vdots {\@@atop \mpr@vdotfil{#1}}}
-\define@key {mpr}{after}{\edef \mpr@after {\mpr@after #1}}
-
-\newdimen \rule@dimen
-\newcommand \mpr@inferstar@ [3][]{\setbox0
-  \hbox {\let \mpr@rulename \mpr@empty \let \mpr@vdots \relax
-         \setbox \mpr@right \hbox{}%
-         $\setkeys{mpr}{#1}%
-          \ifx \mpr@rulename \mpr@empty \mpr@inferrule {#2}{#3}\else
-          \mpr@inferrule [{\mpr@rulename}]{#2}{#3}\fi
-          \box \mpr@right \mpr@vdots$}
-  \setbox1 \hbox {\strut}
-  \rule@dimen \dp0 \advance \rule@dimen by -\dp1
-  \raise \rule@dimen \box0}
-
-\def \mpr@infer {\@ifnextchar *{\mpr@inferstar}{\mpr@inferrule}}
-\newcommand \mpr@err@skipargs[3][]{}
-\def \mpr@inferstar*{\ifmmode 
-    \let \@do \mpr@inferstar@
-  \else 
-    \let \@do \mpr@err@skipargs
-    \PackageError {mathpartir}
-      {\string\inferrule* can only be used in math mode}{}%
-  \fi \@do}
-
-
-%%% Exports
-
-% Envirnonment mathpar
-
-\let \inferrule \mpr@infer
-
-% make a short name \infer is not already defined
-\@ifundefined {infer}{\let \infer \mpr@infer}{}
-
-\def \tir@name #1{\hbox {\small \sc #1}}
-\let \TirName \tir@name
-\let \RefTirName \tir@name
-
-%%% Other Exports
-
-% \let \listcons \mpr@cons
-% \let \listsnoc \mpr@snoc
-% \let \listhead \mpr@head
-% \let \listmake \mpr@makelist
-
-
-
-
-\endinput
--- a/src/Doc/ProgProve/document/prelude.tex	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-\usepackage{makeidx}         % allows index generation
-\usepackage{graphicx}        % standard LaTeX graphics tool
-                             % when including figure files
-\usepackage{multicol}        % used for the two-column index
-\usepackage[bottom]{footmisc}% places footnotes at page bottom
-\usepackage{alltt}
-
-\usepackage[T1]{fontenc}
-\usepackage{ccfonts}
-\usepackage[euler-digits]{eulervm}
-
-\usepackage{isabelle,isabellesym}
-\usepackage{mathpartir}
-\usepackage{amssymb}
-
-\renewcommand*\descriptionlabel[1]{\hspace\labelsep \textbf{#1}\hfil}
-
-% this should be the last package used
-\usepackage{xcolor}
-\definecolor{linkcolor}{rgb}{0,0,0.4}
-\usepackage[colorlinks=true,linkcolor=linkcolor,citecolor=linkcolor,
-            filecolor=linkcolor,pagecolor=linkcolor,
-            urlcolor=linkcolor]{hyperref}
-
-% urls in roman style, theory text in math-similar italics
-\urlstyle{tt}
-\isabellestyle{it}
-
-\renewcommand{\isadigit}[1]{\ensuremath{#1}}
-
-% font size
-\renewcommand{\isastyle}{\isastyleminor}
-
-% indexing
-\usepackage{ifthen}
-\newcommand{\indexdef}[3]%
-{\ifthenelse{\equal{}{#1}}{\index{#3 (#2)|bold}}{\index{#3 (#1\ #2)|bold}}}
-
-% section commands
-\renewcommand{\chapterautorefname}{Chapter}
-\renewcommand{\sectionautorefname}{Section}
-\renewcommand{\subsectionautorefname}{Section}
-
-\renewcommand{\isamarkupheader}[1]{{\rmfamily\subsection{#1}}}
-\renewcommand{\isamarkupsection}[1]{{\rmfamily\subsection{#1}}}
-\renewcommand{\isamarkupsubsection}[1]{{\rmfamily\subsubsection{#1}}}
-% isabelle in-text command font
-\newcommand{\isacom}[1]{\isa{\isacommand{#1}}}
-
-\protected\def\isacharunderscore{\raisebox{2pt}{\_\kern-1.7pt}}
-\protected\def\isacharunderscorekeyword{\raisebox{2pt}{\_}}
-% isabelle keyword, adapted from isabelle.sty
-\renewcommand{\isakeyword}[1]
-{\emph{\def\isachardot{.}\def\isacharunderscore{\isacharunderscorekeyword}%
-\def\isacharbraceleft{\{}\def\isacharbraceright{\}}\def\bfdefault{sbc}\textbf{#1}}}
-
-% add \noindent to text blocks automatically
-\renewenvironment{isamarkuptext}{\par\isastyletext\begin{isapar}\noindent}{\end{isapar}}
-\renewenvironment{isamarkuptxt}{\par\isastyletext\begin{isapar}\noindent}{\end{isapar}}
-
-\newcommand{\noquotes}[1]{{\renewcommand{\isachardoublequote}{}\renewcommand{\isachardoublequoteopen}{}\renewcommand{\isachardoublequoteclose}{}#1}}
-\newcommand{\xsymbol}[1]{\texttt{\char`\\\char`<#1>}}
-
-%index
-\newcommand{\conceptnoidx}[1]{\textbf{#1}}
-\newcommand{\concept}[1]{\conceptnoidx{#1}\index{#1}}
-\newcommand{\conceptidx}[2]{\conceptnoidx{#1}\index{#2}}
-\newcommand{\indexed}[2]{#1\index{#2@\protect#1}}
-
-\newcommand{\isabox}{\fbox}
-\newcommand{\bigisabox}[1]
-{\isabox{\renewcommand{\isanewline}{\\}%
- \begin{tabular}{l}#1\end{tabular}}}
-
-%%%% ``WARNING'' environment: 2 ! characters separated by negative thin space
-%\def\warnbang{\vtop to 0pt{\vss\hbox{\let\bfdefault=\bfdefaultold\Huge\textbf{!$\!$!}}\vss}}
-
-\def\warnbang{\vtop to 0pt{\vss\hbox{\includegraphics[width=3ex, height=5.5ex]{bang}}\vss}}
-
-\newenvironment{warn}{\begin{trivlist}\small\item[]\noindent%
-    \begingroup\hangindent=\parindent\hangafter=-2%\clubpenalty=10000%
-    \def\par{\endgraf\endgroup}%
-    \hbox to0pt{\hskip-\hangindent\warnbang\hfill}\ignorespaces}
-  {\par\end{trivlist}}
-
-\chardef\isachardoublequote=`\"%
-\chardef\isachardoublequoteopen=`\"%
-\chardef\isachardoublequoteclose=`\"%
-
-\renewcommand{\isacharbackquoteopen}{\isacharbackquote}
-\renewcommand{\isacharbackquoteclose}{\isacharbackquote}
-
-\hyphenation{Isa-belle}
--- a/src/Doc/ProgProve/document/root.bib	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-@string{CUP="Cambridge University Press"}
-@string{LNCS="Lect.\ Notes in Comp.\ Sci."}
-@string{Springer="Springer-Verlag"}
-
-@book{HuthRyan,author="Michael Huth and Mark Ryan",
-title={Logic in Computer Science},publisher=CUP,year=2004}
-
-@manual{Krauss,author={Alexander Krauss},
-title={Defining Recursive Functions in Isabelle/HOL},
-note={\url{http://isabelle.in.tum.de/doc/functions.pdf}}}
-
-@manual{Nipkow-Main,author={Tobias Nipkow},title={What's in Main},
-note={\url{http://isabelle.in.tum.de/doc/main.pdf}}}
-
-@book{LNCS2283,author={Tobias Nipkow and Lawrence Paulson and Markus Wenzel},
-title="Isabelle/HOL --- A Proof Assistant for Higher-Order Logic",
-publisher=Springer,series=LNCS,volume=2283,year=2002}
-
-@book{ConcreteSemantics,author={Tobias Nipkow and Gerwin Klein},
-title="Concrete Semantics. A Proof Assistant Approach",
-publisher={\url{http://www.in.tum.de/~nipkow/Concrete}},year=2013}
-
-@manual{IsarRef,author={Makarius Wenzel},
-title={The Isabelle/Isar Reference Manual},
-note={\url{http://isabelle.in.tum.de/doc/isar-ref.pdf}}}
-
--- a/src/Doc/ProgProve/document/root.tex	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-\documentclass[envcountsame,envcountchap]{svmono}
-
-\input{prelude}
-
-\newif\ifsem
-
-\begin{document}
-
-\title{Programming and Proving in Isabelle/HOL}
-\subtitle{\includegraphics[scale=.7]{isabelle_hol}}
-\author{Tobias Nipkow}
-\maketitle
-
-\frontmatter%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\setcounter{tocdepth}{1}
-\tableofcontents
-
-
-\mainmatter%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-%\part{Isabelle}
-
-\chapter{Introduction}
-\input{intro-isabelle.tex}
-
-\chapter{Programming and Proving}
-\label{sec:FP}
-\input{Basics.tex}
-\input{Bool_nat_list}
-\input{Types_and_funs}
-
-%\chapter{Case Study: IMP Expressions}
-%\label{sec:CaseStudyExp}
-%\input{../generated/Expressions}
-
-\chapter{Logic and Proof Beyond Equality}
-\label{ch:Logic}
-\input{Logic}
-
-\chapter{Isar: A Language for Structured Proofs}
-\label{ch:Isar}
-\input{Isar}
-
-\backmatter%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\bibliographystyle{plain}
-\bibliography{root}
-
-%\printindex
-
-\end{document}
--- a/src/Doc/ProgProve/document/svmono.cls	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1691 +0,0 @@
-% SVMONO DOCUMENT CLASS -- version 4.17 (31-Oct-06)
-% Springer Verlag global LaTeX2e support for monographs
-%%
-%%
-%% \CharacterTable
-%%  {Upper-case    \A\B\C\D\E\F\G\H\I\J\K\L\M\N\O\P\Q\R\S\T\U\V\W\X\Y\Z
-%%   Lower-case    \a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\u\v\w\x\y\z
-%%   Digits        \0\1\2\3\4\5\6\7\8\9
-%%   Exclamation   \!     Double quote  \"     Hash (number) \#
-%%   Dollar        \$     Percent       \%     Ampersand     \&
-%%   Acute accent  \'     Left paren    \(     Right paren   \)
-%%   Asterisk      \*     Plus          \+     Comma         \,
-%%   Minus         \-     Point         \.     Solidus       \/
-%%   Colon         \:     Semicolon     \;     Less than     \<
-%%   Equals        \=     Greater than  \>     Question mark \?
-%%   Commercial at \@     Left bracket  \[     Backslash     \\
-%%   Right bracket \]     Circumflex    \^     Underscore    \_
-%%   Grave accent  \`     Left brace    \{     Vertical bar  \|
-%%   Right brace   \}     Tilde         \~}
-%%
-\NeedsTeXFormat{LaTeX2e}[1995/12/01]
-\ProvidesClass{svmono}[2006/10/31 v4.17
-^^JSpringer Verlag global LaTeX document class for monographs]
-
-% Options
-% citations
-\DeclareOption{natbib}{\ExecuteOptions{oribibl}%
-\AtEndOfClass{% Loading package 'NATBIB'
-\RequirePackage{natbib}
-% Changing some parameters of NATBIB
-\setlength{\bibhang}{\parindent}
-%\setlength{\bibsep}{0mm}
-\let\bibfont=\small
-\def\@biblabel#1{#1.}
-\newcommand{\etal}{\textit{et al}.}
-%\bibpunct[,]{(}{)}{;}{a}{}{,}}}
-}}
-% Springer environment
-\let\if@spthms\iftrue
-\DeclareOption{nospthms}{\let\if@spthms\iffalse}
-%
-\let\envankh\@empty   % no anchor for "theorems"
-%
-\let\if@envcntreset\iffalse % environment counter is not reset
-\let\if@envcntresetsect=\iffalse % reset each section?
-\DeclareOption{envcountresetchap}{\let\if@envcntreset\iftrue}
-\DeclareOption{envcountresetsect}{\let\if@envcntreset\iftrue
-\let\if@envcntresetsect=\iftrue}
-%
-\let\if@envcntsame\iffalse  % NOT all environments work like "Theorem",
-                            % each using its own counter
-\DeclareOption{envcountsame}{\let\if@envcntsame\iftrue}
-%
-\let\if@envcntshowhiercnt=\iffalse % do not show hierarchy counter at all
-%
-% enhance theorem counter
-\DeclareOption{envcountchap}{\def\envankh{chapter}% show \thechapter along with theorem number
-\let\if@envcntshowhiercnt=\iftrue
-\ExecuteOptions{envcountreset}}
-%
-\DeclareOption{envcountsect}{\def\envankh{section}% show \thesection along with theorem number
-\let\if@envcntshowhiercnt=\iftrue
-\ExecuteOptions{envcountreset}}
-%
-% languages
-\let\switcht@@therlang\relax
-\let\svlanginfo\relax
-\def\ds@deutsch{\def\switcht@@therlang{\switcht@deutsch}%
-\gdef\svlanginfo{\typeout{Man spricht deutsch.}\global\let\svlanginfo\relax}}
-\def\ds@francais{\def\switcht@@therlang{\switcht@francais}%
-\gdef\svlanginfo{\typeout{On parle francais.}\global\let\svlanginfo\relax}}
-%
-\AtBeginDocument{\@ifpackageloaded{babel}{%
-\@ifundefined{extrasamerican}{}{\addto\extrasamerican{\switcht@albion}}%
-\@ifundefined{extrasaustralian}{}{\addto\extrasaustralian{\switcht@albion}}%
-\@ifundefined{extrasbritish}{}{\addto\extrasbritish{\switcht@albion}}%
-\@ifundefined{extrascanadian}{}{\addto\extrascanadian{\switcht@albion}}%
-\@ifundefined{extrasenglish}{}{\addto\extrasenglish{\switcht@albion}}%
-\@ifundefined{extrasnewzealand}{}{\addto\extrasnewzealand{\switcht@albion}}%
-\@ifundefined{extrasUKenglish}{}{\addto\extrasUKenglish{\switcht@albion}}%
-\@ifundefined{extrasUSenglish}{}{\addto\extrasUSenglish{\switcht@albion}}%
-\@ifundefined{captionsfrench}{}{\addto\captionsfrench{\switcht@francais}}%
-\@ifundefined{extrasgerman}{}{\addto\extrasgerman{\switcht@deutsch}}%
-\@ifundefined{extrasngerman}{}{\addto\extrasngerman{\switcht@deutsch}}%
-}{\switcht@@therlang}%
-}
-% numbering style of floats, equations
-\newif\if@numart   \@numartfalse
-\DeclareOption{numart}{\@numarttrue}
-\def\set@numbering{\if@numart\else\num@book\fi}
-\AtEndOfClass{\set@numbering}
-% style for vectors
-\DeclareOption{vecphys}{\def\vec@style{phys}}
-\DeclareOption{vecarrow}{\def\vec@style{arrow}}
-% running heads
-\let\if@runhead\iftrue
-\DeclareOption{norunningheads}{\let\if@runhead\iffalse}
-% referee option
-\let\if@referee\iffalse
-\def\makereferee{\def\baselinestretch{2}\selectfont
-\newbox\refereebox
-\setbox\refereebox=\vbox to\z@{\vskip0.5cm%
-  \hbox to\textwidth{\normalsize\tt\hrulefill\lower0.5ex
-        \hbox{\kern5\p@ referee's copy\kern5\p@}\hrulefill}\vss}%
-\def\@oddfoot{\copy\refereebox}\let\@evenfoot=\@oddfoot}
-\DeclareOption{referee}{\let\if@referee\iftrue
-\AtBeginDocument{\makereferee\small\normalsize}}
-% modification of thebibliography
-\let\if@openbib\iffalse
-\DeclareOption{openbib}{\let\if@openbib\iftrue}
-% LaTeX standard, sectionwise references
-\DeclareOption{oribibl}{\let\oribibl=Y}
-\DeclareOption{sectrefs}{\let\secbibl=Y}
-%
-% footinfo option (provides an informatory line on every page)
-\def\SpringerMacroPackageNameA{svmono.cls}
-% \thetime, \thedate and \timstamp are macros to include
-% time, date (or both) of the TeX run in the document
-\def\maketimestamp{\count255=\time
-\divide\count255 by 60\relax
-\edef\thetime{\the\count255:}%
-\multiply\count255 by-60\relax
-\advance\count255 by\time
-\edef\thetime{\thetime\ifnum\count255<10 0\fi\the\count255}
-\edef\thedate{\number\day-\ifcase\month\or Jan\or Feb\or Mar\or
-             Apr\or May\or Jun\or Jul\or Aug\or Sep\or Oct\or
-             Nov\or Dec\fi-\number\year}
-\def\timstamp{\hbox to\hsize{\tt\hfil\thedate\hfil\thetime\hfil}}}
-\maketimestamp
-%
-% \footinfo generates a info footline on every page containing
-% pagenumber, jobname, macroname, and timestamp
-\DeclareOption{footinfo}{\AtBeginDocument{\maketimestamp
-   \def\ps@empty{\let\@mkboth\@gobbletwo
-   \let\@oddhead\@empty\let\@evenhead\@empty}%
-   \def\@oddfoot{\scriptsize\tt Page:\,\thepage\space\hfil
-                 job:\,\jobname\space\hfil
-                 macro:\,\SpringerMacroPackageNameA\space\hfil
-                 date/time:\,\thedate/\thetime}%
-   \let\@evenfoot=\@oddfoot}}
-%
-% start new chapter on any page
-\newif\if@openright \@openrighttrue
-\DeclareOption{openany}{\@openrightfalse}
-%
-% no size changing allowed
-\DeclareOption{11pt}{\OptionNotUsed}
-\DeclareOption{12pt}{\OptionNotUsed}
-% options for the article class
-\def\@rticle@options{10pt,twoside}
-% fleqn
-\DeclareOption{fleqn}{\def\@rticle@options{10pt,twoside,fleqn}%
-\AtEndOfClass{\let\leftlegendglue\relax}%
-\AtBeginDocument{\mathindent\parindent}}
-% hanging sectioning titles
-\let\if@sechang\iffalse
-\DeclareOption{sechang}{\let\if@sechang\iftrue}
-\def\ClassInfoNoLine#1#2{%
-   \ClassInfo{#1}{#2\@gobble}%
-}
-\let\SVMonoOpt\@empty
-\DeclareOption*{\InputIfFileExists{sv\CurrentOption.clo}{%
-\global\let\SVMonoOpt\CurrentOption}{%
-\ClassWarning{Springer-SVMono}{Specified option or subpackage
-"\CurrentOption" \MessageBreak not found
-passing it to article class \MessageBreak
--}\PassOptionsToClass{\CurrentOption}{article}%
-}}
-\ProcessOptions\relax
-\ifx\SVMonoOpt\@empty\relax
-\ClassInfoNoLine{Springer-SVMono}{extra/valid Springer sub-package
-\MessageBreak not found in option list - using "global" style}{}
-\fi
-\LoadClass[\@rticle@options]{article}
-\raggedbottom
-
-% various sizes and settings for monographs
-
-\setlength{\textwidth}{28pc}   %  11.8cm
-%\setlength{\textheight}{12pt}\multiply\textheight by 45\relax
-\setlength{\textheight}{540\p@}
-\setlength{\topmargin}{0cm}
-\setlength\oddsidemargin   {63\p@}
-\setlength\evensidemargin  {63\p@}
-\setlength\marginparwidth{90\p@}
-\setlength\headsep   {12\p@}
-
-\setlength{\parindent}{15\p@}
-\setlength{\parskip}{\z@ \@plus \p@}
-\setlength{\hfuzz}{2\p@}
-\setlength{\arraycolsep}{1.5\p@}
-
-\frenchspacing
-
-\tolerance=500
-
-\predisplaypenalty=0
-\clubpenalty=10000
-\widowpenalty=10000
-
-\setlength\footnotesep{7.7\p@}
-
-\newdimen\betweenumberspace          % dimension for space between
-\betweenumberspace=5\p@               % number and text of titles
-\newdimen\headlineindent             % dimension for space of
-\headlineindent=2.5cc                % number and gap of running heads
-
-% fonts, sizes, and the like
-\renewcommand\small{%
-   \@setfontsize\small\@ixpt{11}%
-   \abovedisplayskip 8.5\p@ \@plus3\p@ \@minus4\p@
-   \abovedisplayshortskip \z@ \@plus2\p@
-   \belowdisplayshortskip 4\p@ \@plus2\p@ \@minus2\p@
-   \def\@listi{\leftmargin\leftmargini
-               \parsep \z@ \@plus\p@ \@minus\p@
-               \topsep 6\p@ \@plus2\p@ \@minus4\p@
-               \itemsep\z@}%
-   \belowdisplayskip \abovedisplayskip
-}
-%
-\let\footnotesize=\small
-%
-\newenvironment{petit}{\par\addvspace{6\p@}\small}{\par\addvspace{6\p@}}
-%
-
-% modification of automatic positioning of floating objects
-\setlength\@fptop{\z@ }
-\setlength\@fpsep{12\p@ }
-\setlength\@fpbot{\z@ \@plus 1fil }
-\def\textfraction{.01}
-\def\floatpagefraction{.8}
-\setlength{\intextsep}{20\p@ \@plus 2\p@ \@minus 2\p@}
-\setcounter{topnumber}{4}
-\def\topfraction{.9}
-\setcounter{bottomnumber}{2}
-\def\bottomfraction{.7}
-\setcounter{totalnumber}{6}
-%
-% size and style of headings
-\newcommand{\partsize}{\Large}
-\newcommand{\partstyle}{\bfseries\boldmath}
-\newcommand{\chapsize}{\Large}
-\newcommand{\chapstyle}{\bfseries\boldmath}
-\newcommand{\chapshooksize}{\small}
-\newcommand{\chapshookstyle}{\itshape\unboldmath}
-\newcommand{\secsize}{\large}
-\newcommand{\secstyle}{\bfseries\boldmath}
-\newcommand{\subsecsize}{\normalsize}
-\newcommand{\subsecstyle}{\bfseries\boldmath}
-%
-\def\cleardoublepage{\clearpage\if@twoside \ifodd\c@page\else
-    \hbox{}\newpage\if@twocolumn\hbox{}\newpage\fi\fi\fi}
-
-\newcommand{\clearemptydoublepage}{%
-        \clearpage{\pagestyle{empty}\cleardoublepage}}
-\newcommand{\startnewpage}{\if@openright\clearemptydoublepage\else\clearpage\fi}
-
-% redefinition of \part
-\renewcommand\part{\clearemptydoublepage
-         \thispagestyle{empty}
-         \if@twocolumn
-            \onecolumn
-            \@tempswatrue
-         \else
-            \@tempswafalse
-         \fi
-         \@ifundefined{thispagecropped}{}{\thispagecropped}
-         \secdef\@part\@spart}
-
-\def\@part[#1]#2{\ifnum \c@secnumdepth >-2\relax
-        \refstepcounter{part}
-        \addcontentsline{toc}{part}{\partname\
-        \thepart\thechapterend\hspace{\betweenumberspace}%
-        #1}\else
-        \addcontentsline{toc}{part}{#1}\fi
-   \markboth{}{}
-   {\raggedleft
-    \ifnum \c@secnumdepth >-2\relax
-      \normalfont\partstyle\partsize\vrule height 34pt width 0pt depth 0pt%
-     \partname\ \thepart\llap{\smash{\lower 5pt\hbox to\textwidth{\hrulefill}}}
-    \par
-    \vskip 128.3\p@ \fi
-    #2\par}\@endpart}
-%
-% \@endpart finishes the part page
-%
-\def\@endpart{\vfil\newpage
-   \if@twoside
-       \hbox{}
-       \thispagestyle{empty}
-       \newpage
-   \fi
-   \if@tempswa
-     \twocolumn
-   \fi}
-%
-\def\@spart#1{{\raggedleft
-   \normalfont\partsize\partstyle
-   #1\par}\@endpart}
-%
-\newenvironment{partbacktext}{\def\@endpart{\vfil\newpage}}
-{\thispagestyle{empty} \newpage \if@tempswa\twocolumn\fi}
-%
-% (re)define sectioning
-\setcounter{secnumdepth}{2}
-
-\def\seccounterend{}
-\def\seccountergap{\hskip\betweenumberspace}
-\def\@seccntformat#1{\csname the#1\endcsname\seccounterend\seccountergap\ignorespaces}
-%
-\let\firstmark=\botmark
-%
-\@ifundefined{thechapterend}{\def\thechapterend{}}{}
-%
-\if@sechang
-   \def\sec@hangfrom#1{\setbox\@tempboxa\hbox{#1}%
-         \hangindent\wd\@tempboxa\noindent\box\@tempboxa}
-\else
-   \def\sec@hangfrom#1{\setbox\@tempboxa\hbox{#1}%
-         \hangindent\z@\noindent\box\@tempboxa}
-\fi
-
-\def\chap@hangfrom#1{\noindent\vrule height 34pt width 0pt depth 0pt
-\rlap{\smash{\lower 5pt\hbox to\textwidth{\hrulefill}}}\hbox{#1}
-\vskip10pt}
-\def\schap@hangfrom{\chap@hangfrom{}}
-
-\newcounter{chapter}
-%
-\@addtoreset{section}{chapter}
-\@addtoreset{footnote}{chapter}
-
-\newif\if@mainmatter \@mainmattertrue
-\newcommand\frontmatter{\startnewpage
-            \@mainmatterfalse\pagenumbering{Roman}
-            \setcounter{page}{5}}
-%
-\newcommand\mainmatter{\clearemptydoublepage
-       \@mainmattertrue\pagenumbering{arabic}}
-%
-\newcommand\backmatter{\clearemptydoublepage\@mainmatterfalse}
-
-\def\@chapapp{\chaptername}
-
-\newdimen\chapstarthookwidth
-\newcommand\chapstarthook[2][0.66\textwidth]{%
-\setlength{\chapstarthookwidth}{#1}%
-\gdef\chapst@rthook{#2}}
-
-\newcommand{\processchapstarthook}{\@ifundefined{chapst@rthook}{}{%
-    \setbox0=\hbox{\vbox{\hyphenpenalty=50
-    \begin{flushright}
-    \begin{minipage}{\chapstarthookwidth}
-       \vrule\@width\z@\@height21\p@\@depth\z@
-       \normalfont\chapshooksize\chapshookstyle\chapst@rthook
-    \end{minipage}
-    \end{flushright}}}%
-    \@tempdima=\pagetotal
-    \advance\@tempdima by\ht0
-    \ifdim\@tempdima<106\p@
-       \multiply\@tempdima by-1
-       \advance\@tempdima by106\p@
-       \vskip\@tempdima
-    \fi
-    \box0\par
-    \global\let\chapst@rthook=\undefined}}
-
-\newcommand\chapter{\startnewpage
-                    \@ifundefined{thispagecropped}{}{\thispagecropped}
-                    \thispagestyle{empty}%
-                    \global\@topnum\z@
-                    \@afterindentfalse
-                    \secdef\@chapter\@schapter}
-
-\def\@chapter[#1]#2{\ifnum \c@secnumdepth >\m@ne
-                       \refstepcounter{chapter}%
-                       \if@mainmatter
-                         \typeout{\@chapapp\space\thechapter.}%
-                         \addcontentsline{toc}{chapter}{\protect
-                                  \numberline{\thechapter\thechapterend}#1}%
-                       \else
-                         \addcontentsline{toc}{chapter}{#1}%
-                       \fi
-                    \else
-                      \addcontentsline{toc}{chapter}{#1}%
-                    \fi
-                    \chaptermark{#1}%
-                    \addtocontents{lof}{\protect\addvspace{10\p@}}%
-                    \addtocontents{lot}{\protect\addvspace{10\p@}}%
-                    \if@twocolumn
-                      \@topnewpage[\@makechapterhead{#2}]%
-                    \else
-                      \@makechapterhead{#2}%
-                      \@afterheading
-                    \fi}
-
-\def\@schapter#1{\if@twocolumn
-                   \@topnewpage[\@makeschapterhead{#1}]%
-                 \else
-                   \@makeschapterhead{#1}%
-                   \@afterheading
-                 \fi}
-
-%%changes position and layout of numbered chapter headings
-\def\@makechapterhead#1{{\parindent\z@\raggedright\normalfont
-  \hyphenpenalty \@M
-  \interlinepenalty\@M
-  \chapsize\chapstyle
-  \chap@hangfrom{\thechapter\thechapterend\hskip\betweenumberspace}%!!!
-  \ignorespaces#1\par\nobreak
-  \processchapstarthook
-  \ifdim\pagetotal>157\p@
-     \vskip 11\p@
-  \else
-     \@tempdima=168\p@\advance\@tempdima by-\pagetotal
-     \vskip\@tempdima
-  \fi}}
-
-%%changes position and layout of unnumbered chapter headings
-\def\@makeschapterhead#1{{\parindent \z@ \raggedright\normalfont
-  \hyphenpenalty \@M
-  \interlinepenalty\@M
-  \chapsize\chapstyle
-  \schap@hangfrom
-  \ignorespaces#1\par\nobreak
-  \processchapstarthook
-  \ifdim\pagetotal>157\p@
-     \vskip 11\p@
-  \else
-     \@tempdima=168\p@\advance\@tempdima by-\pagetotal
-     \vskip\@tempdima
-  \fi}}
-
-% predefined unnumbered headings
-\newcommand{\preface}[1][\prefacename]{\chapter*{#1}\markboth{#1}{#1}}
-% same with TOC entry
-\newcommand{\Preface}[1][\prefacename]{\chapter*{#1}\markboth{#1}{#1}%
-\addcontentsline{toc}{chapter}{#1}}
-
-% measures and setting of sections
-\renewcommand\section{\@startsection{section}{1}{\z@}%
-                       {-24\p@ \@plus -4\p@ \@minus -4\p@}%
-                       {12\p@ \@plus 4\p@ \@minus 4\p@}%
-                       {\normalfont\secsize\secstyle
-                        \rightskip=\z@ \@plus 8em\pretolerance=10000 }}
-\renewcommand\subsection{\@startsection{subsection}{2}{\z@}%
-                       {-17\p@ \@plus -4\p@ \@minus -4\p@}%
-                       {10\p@ \@plus 4\p@ \@minus 4\p@}%
-                       {\normalfont\subsecsize\subsecstyle
-                        \rightskip=\z@ \@plus 8em\pretolerance=10000 }}
-\renewcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}%
-                       {-17\p@ \@plus -4\p@ \@minus -4\p@}%
-                       {10\p@ \@plus 4\p@ \@minus 4\p@}%
-                       {\normalfont\normalsize\subsecstyle
-                        \rightskip=\z@ \@plus 8em\pretolerance=10000 }}
-\renewcommand\paragraph{\@startsection{paragraph}{4}{\z@}%
-                       {-10\p@ \@plus -4\p@ \@minus -4\p@}%
-                       {10\p@ \@plus 4\p@ \@minus 4\p@}%
-                       {\normalfont\normalsize\itshape
-                        \rightskip=\z@ \@plus 8em\pretolerance=10000 }}
-\def\subparagraph{\@startsection{subparagraph}{5}{\z@}%
-    {-5.388\p@ \@plus-4\p@ \@minus-4\p@}{-5\p@}{\normalfont\normalsize\itshape}}
-
-% Appendix
-\renewcommand\appendix{\par
-                \stepcounter{chapter}
-                \setcounter{chapter}{0}
-                \stepcounter{section}
-                \setcounter{section}{0}
-                \setcounter{equation}{0}
-                \setcounter{figure}{0}
-                \setcounter{table}{0}
-                \setcounter{footnote}{0}
-  \def\@chapapp{\appendixname}%
-  \renewcommand\thechapter{\@Alph\c@chapter}}
-
-%  definition of sections
-%  \hyphenpenalty and \raggedright added, so that there is no
-%  hyphenation and the text is set ragged-right in sectioning
-
-\def\runinsep{}
-\def\aftertext{\unskip\runinsep}
-%
-\def\thesection{\thechapter.\arabic{section}}
-\def\thesubsection{\thesection.\arabic{subsection}}
-\def\thesubsubsection{\thesubsection.\arabic{subsubsection}}
-\def\theparagraph{\thesubsubsection.\arabic{paragraph}}
-\def\thesubparagraph{\theparagraph.\arabic{subparagraph}}
-\def\chaptermark#1{}
-%
-\def\@ssect#1#2#3#4#5{%
-  \@tempskipa #3\relax
-  \ifdim \@tempskipa>\z@
-    \begingroup
-      #4{%
-        \@hangfrom{\hskip #1}%
-          \raggedright
-          \hyphenpenalty \@M
-          \interlinepenalty \@M #5\@@par}%
-    \endgroup
-  \else
-    \def\@svsechd{#4{\hskip #1\relax #5}}%
-  \fi
-  \@xsect{#3}}
-%
-\def\@sect#1#2#3#4#5#6[#7]#8{%
-   \ifnum #2>\c@secnumdepth
-      \let\@svsec\@empty
-   \else
-      \refstepcounter{#1}%
-      \protected@edef\@svsec{\@seccntformat{#1}\relax}%
-   \fi
-   \@tempskipa #5\relax
-   \ifdim \@tempskipa>\z@
-      \begingroup #6\relax
-         \sec@hangfrom{\hskip #3\relax\@svsec}%
-         {\raggedright
-          \hyphenpenalty \@M
-          \interlinepenalty \@M #8\@@par}%
-      \endgroup
-      \csname #1mark\endcsname{#7\seccounterend}%
-      \addcontentsline{toc}{#1}{\ifnum #2>\c@secnumdepth
-                                \else
-                                   \protect\numberline{\csname the#1\endcsname\seccounterend}%
-                                \fi
-                                #7}%
-   \else
-      \def\@svsechd{%
-         #6\hskip #3\relax
-         \@svsec #8\aftertext\ignorespaces
-         \csname #1mark\endcsname{#7}%
-         \addcontentsline{toc}{#1}{%
-            \ifnum #2>\c@secnumdepth \else
-                \protect\numberline{\csname the#1\endcsname\seccounterend}%
-            \fi
-            #7}}%
-   \fi
-   \@xsect{#5}}
-
-% figures and tables are processed in small print
-\def \@floatboxreset {%
-        \reset@font
-        \small
-        \@setnobreak
-        \@setminipage
-}
-\def\fps@figure{htbp}
-\def\fps@table{htbp}
-
-% Frame for paste-in figures or tables
-\def\mpicplace#1#2{%  #1 =width   #2 =height
-\vbox{\hbox to #1{\vrule\@width \fboxrule \@height #2\hfill}}}
-
-% labels of enumerate
-\renewcommand\labelenumii{\theenumii)}
-\renewcommand\theenumii{\@alph\c@enumii}
-
-% labels of itemize
-\renewcommand\labelitemi{\textbullet}
-\renewcommand\labelitemii{\textendash}
-\let\labelitemiii=\labelitemiv
-
-% labels of description
-\renewcommand*\descriptionlabel[1]{\hspace\labelsep #1\hfil}
-
-% fixed indentation for standard itemize-environment
-\newdimen\svitemindent \setlength{\svitemindent}{\parindent}
-
-
-% make indentations changeable
-
-\def\setitemindent#1{\settowidth{\labelwidth}{#1}%
-        \let\setit@m=Y%
-        \leftmargini\labelwidth
-        \advance\leftmargini\labelsep
-   \def\@listi{\leftmargin\leftmargini
-        \labelwidth\leftmargini\advance\labelwidth by -\labelsep
-        \parsep=\parskip
-        \topsep=\medskipamount
-        \itemsep=\parskip \advance\itemsep by -\parsep}}
-\def\setitemitemindent#1{\settowidth{\labelwidth}{#1}%
-        \let\setit@m=Y%
-        \leftmarginii\labelwidth
-        \advance\leftmarginii\labelsep
-\def\@listii{\leftmargin\leftmarginii
-        \labelwidth\leftmarginii\advance\labelwidth by -\labelsep
-        \parsep=\parskip
-        \topsep=\z@
-        \itemsep=\parskip \advance\itemsep by -\parsep}}
-%
-% adjusted environment "description"
-% if an optional parameter (at the first two levels of lists)
-% is present, its width is considered to be the widest mark
-% throughout the current list.
-\def\description{\@ifnextchar[{\@describe}{\list{}{\labelwidth\z@
-          \itemindent-\leftmargin \let\makelabel\descriptionlabel}}}
-%
-\def\describelabel#1{#1\hfil}
-\def\@describe[#1]{\relax\ifnum\@listdepth=0
-\setitemindent{#1}\else\ifnum\@listdepth=1
-\setitemitemindent{#1}\fi\fi
-\list{--}{\let\makelabel\describelabel}}
-%
-\def\itemize{%
-  \ifnum \@itemdepth >\thr@@\@toodeep\else
-    \advance\@itemdepth\@ne
-    \ifx\setit@m\undefined
-       \ifnum \@itemdepth=1 \leftmargini=\svitemindent
-          \labelwidth\leftmargini\advance\labelwidth-\labelsep
-          \leftmarginii=\leftmargini \leftmarginiii=\leftmargini
-       \fi
-    \fi
-    \edef\@itemitem{labelitem\romannumeral\the\@itemdepth}%
-    \expandafter\list
-      \csname\@itemitem\endcsname
-      {\def\makelabel##1{\rlap{##1}\hss}}%
-  \fi}
-%
-\newdimen\verbatimindent \verbatimindent\parindent
-\def\verbatim{\advance\@totalleftmargin by\verbatimindent
-\@verbatim \frenchspacing\@vobeyspaces \@xverbatim}
-
-%
-%  special signs and characters
-\newcommand{\D}{\mathrm{d}}
-\newcommand{\E}{\mathrm{e}}
-\let\eul=\E
-\newcommand{\I}{{\rm i}}
-\let\imag=\I
-%
-% the definition of uppercase Greek characters
-% Springer likes them as italics to depict variables
-\DeclareMathSymbol{\Gamma}{\mathalpha}{letters}{"00}
-\DeclareMathSymbol{\Delta}{\mathalpha}{letters}{"01}
-\DeclareMathSymbol{\Theta}{\mathalpha}{letters}{"02}
-\DeclareMathSymbol{\Lambda}{\mathalpha}{letters}{"03}
-\DeclareMathSymbol{\Xi}{\mathalpha}{letters}{"04}
-\DeclareMathSymbol{\Pi}{\mathalpha}{letters}{"05}
-\DeclareMathSymbol{\Sigma}{\mathalpha}{letters}{"06}
-\DeclareMathSymbol{\Upsilon}{\mathalpha}{letters}{"07}
-\DeclareMathSymbol{\Phi}{\mathalpha}{letters}{"08}
-\DeclareMathSymbol{\Psi}{\mathalpha}{letters}{"09}
-\DeclareMathSymbol{\Omega}{\mathalpha}{letters}{"0A}
-% the upright forms are defined here as \var<Character>
-\DeclareMathSymbol{\varGamma}{\mathalpha}{operators}{"00}
-\DeclareMathSymbol{\varDelta}{\mathalpha}{operators}{"01}
-\DeclareMathSymbol{\varTheta}{\mathalpha}{operators}{"02}
-\DeclareMathSymbol{\varLambda}{\mathalpha}{operators}{"03}
-\DeclareMathSymbol{\varXi}{\mathalpha}{operators}{"04}
-\DeclareMathSymbol{\varPi}{\mathalpha}{operators}{"05}
-\DeclareMathSymbol{\varSigma}{\mathalpha}{operators}{"06}
-\DeclareMathSymbol{\varUpsilon}{\mathalpha}{operators}{"07}
-\DeclareMathSymbol{\varPhi}{\mathalpha}{operators}{"08}
-\DeclareMathSymbol{\varPsi}{\mathalpha}{operators}{"09}
-\DeclareMathSymbol{\varOmega}{\mathalpha}{operators}{"0A}
-% Upright Lower Case Greek letters without using a new MathAlphabet
-\newcommand{\greeksym}[1]{\usefont{U}{psy}{m}{n}#1}
-\newcommand{\greeksymbold}[1]{{\usefont{U}{psy}{b}{n}#1}}
-\newcommand{\allmodesymb}[2]{\relax\ifmmode{\mathchoice
-{\mbox{\fontsize{\tf@size}{\tf@size}#1{#2}}}
-{\mbox{\fontsize{\tf@size}{\tf@size}#1{#2}}}
-{\mbox{\fontsize{\sf@size}{\sf@size}#1{#2}}}
-{\mbox{\fontsize{\ssf@size}{\ssf@size}#1{#2}}}}
-\else
-\mbox{#1{#2}}\fi}
-% Definition of lower case Greek letters
-\newcommand{\ualpha}{\allmodesymb{\greeksym}{a}}
-\newcommand{\ubeta}{\allmodesymb{\greeksym}{b}}
-\newcommand{\uchi}{\allmodesymb{\greeksym}{c}}
-\newcommand{\udelta}{\allmodesymb{\greeksym}{d}}
-\newcommand{\ugamma}{\allmodesymb{\greeksym}{g}}
-\newcommand{\umu}{\allmodesymb{\greeksym}{m}}
-\newcommand{\unu}{\allmodesymb{\greeksym}{n}}
-\newcommand{\upi}{\allmodesymb{\greeksym}{p}}
-\newcommand{\utau}{\allmodesymb{\greeksym}{t}}
-% redefines the \vec accent to a bold character - if desired
-\def\fig@type{arrow}% temporarily abused
-\ifx\vec@style\fig@type\else
-\@ifundefined{vec@style}{%
- \def\vec#1{\ensuremath{\mathchoice
-                     {\mbox{\boldmath$\displaystyle\mathbf{#1}$}}
-                     {\mbox{\boldmath$\textstyle\mathbf{#1}$}}
-                     {\mbox{\boldmath$\scriptstyle\mathbf{#1}$}}
-                     {\mbox{\boldmath$\scriptscriptstyle\mathbf{#1}$}}}}%
-}
-{\def\vec#1{\ensuremath{\mathchoice
-                     {\mbox{\boldmath$\displaystyle#1$}}
-                     {\mbox{\boldmath$\textstyle#1$}}
-                     {\mbox{\boldmath$\scriptstyle#1$}}
-                     {\mbox{\boldmath$\scriptscriptstyle#1$}}}}%
-}
-\fi
-% tensor
-\def\tens#1{\relax\ifmmode\mathsf{#1}\else\textsf{#1}\fi}
-
-% end of proof symbol
-\newcommand\qedsymbol{\hbox{\rlap{$\sqcap$}$\sqcup$}}
-\newcommand\qed{\relax\ifmmode\else\unskip\quad\fi\qedsymbol}
-\newcommand\smartqed{\renewcommand\qed{\relax\ifmmode\qedsymbol\else
-  {\unskip\nobreak\hfil\penalty50\hskip1em\null\nobreak\hfil\qedsymbol
-  \parfillskip=\z@\finalhyphendemerits=0\endgraf}\fi}}
-%
-\def\num@book{%
-\renewcommand\thesection{\thechapter.\@arabic\c@section}%
-\renewcommand\thesubsection{\thesection.\@arabic\c@subsection}%
-\renewcommand\theequation{\thechapter.\@arabic\c@equation}%
-\renewcommand\thefigure{\thechapter.\@arabic\c@figure}%
-\renewcommand\thetable{\thechapter.\@arabic\c@table}%
-\@addtoreset{section}{chapter}%
-\@addtoreset{figure}{chapter}%
-\@addtoreset{table}{chapter}%
-\@addtoreset{equation}{chapter}}
-%
-% Ragged bottom for the actual page
-\def\thisbottomragged{\def\@textbottom{\vskip\z@ \@plus.0001fil
-\global\let\@textbottom\relax}}
-
-% This is texte.tex
-% it defines various texts and their translations
-% called up with documentstyle options
-\def\switcht@albion{%
-\def\abstractname{Summary.}%
-\def\ackname{Acknowledgement.}%
-\def\andname{and}%
-\def\bibname{References}%
-\def\lastandname{, and}%
-\def\appendixname{Appendix}%
-\def\chaptername{Chapter}%
-\def\claimname{Claim}%
-\def\conjecturename{Conjecture}%
-\def\contentsname{Contents}%
-\def\corollaryname{Corollary}%
-\def\definitionname{Definition}%
-\def\examplename{Example}%
-\def\exercisename{Exercise}%
-\def\figurename{Fig.}%
-\def\keywordname{{\bf Key words:}}%
-\def\indexname{Index}%
-\def\lemmaname{Lemma}%
-\def\contriblistname{List of Contributors}%
-\def\listfigurename{List of Figures}%
-\def\listtablename{List of Tables}%
-\def\mailname{{\it Correspondence to\/}:}%
-\def\noteaddname{Note added in proof}%
-\def\notename{Note}%
-\def\partname{Part}%
-\def\prefacename{Preface}%
-\def\problemname{Problem}%
-\def\proofname{Proof}%
-\def\propertyname{Property}%
-\def\propositionname{Proposition}%
-\def\questionname{Question}%
-\def\refname{References}%
-\def\remarkname{Remark}%
-\def\seename{see}%
-\def\solutionname{Solution}%
-\def\subclassname{{\it Subject Classifications\/}:}%
-\def\tablename{Table}%
-\def\theoremname{Theorem}}
-\switcht@albion
-% Names of theorem like environments are already defined
-% but must be translated if another language is chosen
-%
-% French section
-\def\switcht@francais{\svlanginfo
- \def\abstractname{R\'esum\'e.}%
- \def\ackname{Remerciements.}%
- \def\andname{et}%
- \def\lastandname{ et}%
- \def\appendixname{Appendice}%
- \def\bibname{Bibliographie}%
- \def\chaptername{Chapitre}%
- \def\claimname{Pr\'etention}%
- \def\conjecturename{Hypoth\`ese}%
- \def\contentsname{Table des mati\`eres}%
- \def\corollaryname{Corollaire}%
- \def\definitionname{D\'efinition}%
- \def\examplename{Exemple}%
- \def\exercisename{Exercice}%
- \def\figurename{Fig.}%
- \def\keywordname{{\bf Mots-cl\'e:}}%
- \def\indexname{Index}%
- \def\lemmaname{Lemme}%
- \def\contriblistname{Liste des contributeurs}%
- \def\listfigurename{Liste des figures}%
- \def\listtablename{Liste des tables}%
- \def\mailname{{\it Correspondence to\/}:}%
- \def\noteaddname{Note ajout\'ee \`a l'\'epreuve}%
- \def\notename{Remarque}%
- \def\partname{Partie}%
- \def\prefacename{Avant-propos}%  ou Pr\'eface
- \def\problemname{Probl\`eme}%
- \def\proofname{Preuve}%
- \def\propertyname{Caract\'eristique}%
-%\def\propositionname{Proposition}%
- \def\questionname{Question}%
- \def\refname{Litt\'erature}%
- \def\remarkname{Remarque}%
- \def\seename{voir}%
- \def\solutionname{Solution}%
- \def\subclassname{{\it Subject Classifications\/}:}%
- \def\tablename{Tableau}%
- \def\theoremname{Th\'eor\`eme}%
-}
-%
-% German section
-\def\switcht@deutsch{\svlanginfo
- \def\abstractname{Zusammenfassung.}%
- \def\ackname{Danksagung.}%
- \def\andname{und}%
- \def\lastandname{ und}%
- \def\appendixname{Anhang}%
- \def\bibname{Literaturverzeichnis}%
- \def\chaptername{Kapitel}%
- \def\claimname{Behauptung}%
- \def\conjecturename{Hypothese}%
- \def\contentsname{Inhaltsverzeichnis}%
- \def\corollaryname{Korollar}%
-%\def\definitionname{Definition}%
- \def\examplename{Beispiel}%
- \def\exercisename{\"Ubung}%
- \def\figurename{Abb.}%
- \def\keywordname{{\bf Schl\"usselw\"orter:}}%
- \def\indexname{Sachverzeichnis}%
-%\def\lemmaname{Lemma}%
- \def\contriblistname{Mitarbeiter}%
- \def\listfigurename{Abbildungsverzeichnis}%
- \def\listtablename{Tabellenverzeichnis}%
- \def\mailname{{\it Correspondence to\/}:}%
- \def\noteaddname{Nachtrag}%
- \def\notename{Anmerkung}%
- \def\partname{Teil}%
- \def\prefacename{Vorwort}%
-%\def\problemname{Problem}%
- \def\proofname{Beweis}%
- \def\propertyname{Eigenschaft}%
-%\def\propositionname{Proposition}%
- \def\questionname{Frage}%
- \def\refname{Literaturverzeichnis}%
- \def\remarkname{Anmerkung}%
- \def\seename{siehe}%
- \def\solutionname{L\"osung}%
- \def\subclassname{{\it Subject Classifications\/}:}%
- \def\tablename{Tabelle}%
-%\def\theoremname{Theorem}%
-}
-
-\def\getsto{\mathrel{\mathchoice {\vcenter{\offinterlineskip
-\halign{\hfil
-$\displaystyle##$\hfil\cr\gets\cr\to\cr}}}
-{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr\gets
-\cr\to\cr}}}
-{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr\gets
-\cr\to\cr}}}
-{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr
-\gets\cr\to\cr}}}}}
-\def\lid{\mathrel{\mathchoice {\vcenter{\offinterlineskip\halign{\hfil
-$\displaystyle##$\hfil\cr<\cr\noalign{\vskip1.2\p@}=\cr}}}
-{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr<\cr
-\noalign{\vskip1.2\p@}=\cr}}}
-{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr<\cr
-\noalign{\vskip\p@}=\cr}}}
-{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr
-<\cr
-\noalign{\vskip0.9\p@}=\cr}}}}}
-\def\gid{\mathrel{\mathchoice {\vcenter{\offinterlineskip\halign{\hfil
-$\displaystyle##$\hfil\cr>\cr\noalign{\vskip1.2\p@}=\cr}}}
-{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr>\cr
-\noalign{\vskip1.2\p@}=\cr}}}
-{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr>\cr
-\noalign{\vskip\p@}=\cr}}}
-{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr
->\cr
-\noalign{\vskip0.9\p@}=\cr}}}}}
-\def\grole{\mathrel{\mathchoice {\vcenter{\offinterlineskip
-\halign{\hfil
-$\displaystyle##$\hfil\cr>\cr\noalign{\vskip-\p@}<\cr}}}
-{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr
->\cr\noalign{\vskip-\p@}<\cr}}}
-{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr
->\cr\noalign{\vskip-0.8\p@}<\cr}}}
-{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr
->\cr\noalign{\vskip-0.3\p@}<\cr}}}}}
-\def\bbbr{{\rm I\!R}} %reelle Zahlen
-\def\bbbm{{\rm I\!M}}
-\def\bbbn{{\rm I\!N}} %natuerliche Zahlen
-\def\bbbf{{\rm I\!F}}
-\def\bbbh{{\rm I\!H}}
-\def\bbbk{{\rm I\!K}}
-\def\bbbp{{\rm I\!P}}
-\def\bbbone{{\mathchoice {\rm 1\mskip-4mu l} {\rm 1\mskip-4mu l}
-{\rm 1\mskip-4.5mu l} {\rm 1\mskip-5mu l}}}
-\def\bbbc{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm C$}\hbox{\hbox
-to\z@{\kern0.4\wd0\vrule\@height0.9\ht0\hss}\box0}}
-{\setbox0=\hbox{$\textstyle\rm C$}\hbox{\hbox
-to\z@{\kern0.4\wd0\vrule\@height0.9\ht0\hss}\box0}}
-{\setbox0=\hbox{$\scriptstyle\rm C$}\hbox{\hbox
-to\z@{\kern0.4\wd0\vrule\@height0.9\ht0\hss}\box0}}
-{\setbox0=\hbox{$\scriptscriptstyle\rm C$}\hbox{\hbox
-to\z@{\kern0.4\wd0\vrule\@height0.9\ht0\hss}\box0}}}}
-\def\bbbq{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm
-Q$}\hbox{\raise
-0.15\ht0\hbox to\z@{\kern0.4\wd0\vrule\@height0.8\ht0\hss}\box0}}
-{\setbox0=\hbox{$\textstyle\rm Q$}\hbox{\raise
-0.15\ht0\hbox to\z@{\kern0.4\wd0\vrule\@height0.8\ht0\hss}\box0}}
-{\setbox0=\hbox{$\scriptstyle\rm Q$}\hbox{\raise
-0.15\ht0\hbox to\z@{\kern0.4\wd0\vrule\@height0.7\ht0\hss}\box0}}
-{\setbox0=\hbox{$\scriptscriptstyle\rm Q$}\hbox{\raise
-0.15\ht0\hbox to\z@{\kern0.4\wd0\vrule\@height0.7\ht0\hss}\box0}}}}
-\def\bbbt{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm
-T$}\hbox{\hbox to\z@{\kern0.3\wd0\vrule\@height0.9\ht0\hss}\box0}}
-{\setbox0=\hbox{$\textstyle\rm T$}\hbox{\hbox
-to\z@{\kern0.3\wd0\vrule\@height0.9\ht0\hss}\box0}}
-{\setbox0=\hbox{$\scriptstyle\rm T$}\hbox{\hbox
-to\z@{\kern0.3\wd0\vrule\@height0.9\ht0\hss}\box0}}
-{\setbox0=\hbox{$\scriptscriptstyle\rm T$}\hbox{\hbox
-to\z@{\kern0.3\wd0\vrule\@height0.9\ht0\hss}\box0}}}}
-\def\bbbs{{\mathchoice
-{\setbox0=\hbox{$\displaystyle     \rm S$}\hbox{\raise0.5\ht0\hbox
-to\z@{\kern0.35\wd0\vrule\@height0.45\ht0\hss}\hbox
-to\z@{\kern0.55\wd0\vrule\@height0.5\ht0\hss}\box0}}
-{\setbox0=\hbox{$\textstyle        \rm S$}\hbox{\raise0.5\ht0\hbox
-to\z@{\kern0.35\wd0\vrule\@height0.45\ht0\hss}\hbox
-to\z@{\kern0.55\wd0\vrule\@height0.5\ht0\hss}\box0}}
-{\setbox0=\hbox{$\scriptstyle      \rm S$}\hbox{\raise0.5\ht0\hbox
-to\z@{\kern0.35\wd0\vrule\@height0.45\ht0\hss}\raise0.05\ht0\hbox
-to\z@{\kern0.5\wd0\vrule\@height0.45\ht0\hss}\box0}}
-{\setbox0=\hbox{$\scriptscriptstyle\rm S$}\hbox{\raise0.5\ht0\hbox
-to\z@{\kern0.4\wd0\vrule\@height0.45\ht0\hss}\raise0.05\ht0\hbox
-to\z@{\kern0.55\wd0\vrule\@height0.45\ht0\hss}\box0}}}}
-\def\bbbz{{\mathchoice {\hbox{$\textstyle\sf Z\kern-0.4em Z$}}
-{\hbox{$\textstyle\sf Z\kern-0.4em Z$}}
-{\hbox{$\scriptstyle\sf Z\kern-0.3em Z$}}
-{\hbox{$\scriptscriptstyle\sf Z\kern-0.2em Z$}}}}
-
-\let\ts\,
-
-\setlength \labelsep     {5\p@}
-\setlength\leftmargini   {17\p@}
-\setlength\leftmargin    {\leftmargini}
-\setlength\leftmarginii  {\leftmargini}
-\setlength\leftmarginiii {\leftmargini}
-\setlength\leftmarginiv  {\leftmargini}
-\setlength\labelwidth    {\leftmargini}
-\addtolength\labelwidth{-\labelsep}
-
-\def\@listI{\leftmargin\leftmargini
-        \parsep=\parskip
-        \topsep=\medskipamount
-        \itemsep=\parskip \advance\itemsep by -\parsep}
-\let\@listi\@listI
-\@listi
-
-\def\@listii{\leftmargin\leftmarginii
-        \labelwidth\leftmarginii
-        \advance\labelwidth by -\labelsep
-        \parsep=\parskip
-        \topsep=\z@
-        \itemsep=\parskip
-        \advance\itemsep by -\parsep}
-
-\def\@listiii{\leftmargin\leftmarginiii
-        \labelwidth\leftmarginiii\advance\labelwidth by -\labelsep
-        \parsep=\parskip
-        \topsep=\z@
-        \itemsep=\parskip
-        \advance\itemsep by -\parsep
-        \partopsep=\topsep}
-
-\setlength\arraycolsep{1.5\p@}
-\setlength\tabcolsep{1.5\p@}
-
-\def\tableofcontents{\@restonecolfalse\if@twocolumn\@restonecoltrue\onecolumn
- \fi\chapter*{\contentsname \@mkboth{{\contentsname}}{{\contentsname}}}
- \@starttoc{toc}\if@restonecol\twocolumn\fi}
-
-\setcounter{tocdepth}{2}
-
-\def\l@part#1#2{\addpenalty{\@secpenalty}%
-   \addvspace{2em \@plus\p@}%
-   \begingroup
-     \parindent \z@
-     \rightskip \z@ \@plus 5em
-     \hrule\vskip5\p@
-     \bfseries\boldmath
-     \leavevmode
-     #1\par
-     \vskip5\p@
-     \hrule
-     \vskip\p@
-     \nobreak
-   \endgroup}
-
-\def\@dotsep{2}
-
-\def\addnumcontentsmark#1#2#3{%
-\addtocontents{#1}{\protect\contentsline{#2}{\protect\numberline
-                                    {\thechapter}#3}{\thepage}}}
-\def\addcontentsmark#1#2#3{%
-\addtocontents{#1}{\protect\contentsline{#2}{#3}{\thepage}}}
-\def\addcontentsmarkwop#1#2#3{%
-\addtocontents{#1}{\protect\contentsline{#2}{#3}{0}}}
-
-\def\@adcmk[#1]{\ifcase #1 \or
-\def\@gtempa{\addnumcontentsmark}%
-  \or    \def\@gtempa{\addcontentsmark}%
-  \or    \def\@gtempa{\addcontentsmarkwop}%
-  \fi\@gtempa{toc}{chapter}}
-\def\addtocmark{\@ifnextchar[{\@adcmk}{\@adcmk[3]}}
-
-\def\l@chapter#1#2{\par\addpenalty{-\@highpenalty}
- \addvspace{1.0em \@plus \p@}
- \@tempdima \tocchpnum \begingroup
- \parindent \z@ \rightskip \@tocrmarg
- \advance\rightskip by \z@ \@plus 2cm
- \parfillskip -\rightskip \pretolerance=10000
- \leavevmode \advance\leftskip\@tempdima \hskip -\leftskip
- {\bfseries\boldmath#1}\ifx0#2\hfil\null
- \else
-    \nobreak
-    \leaders\hbox{$\m@th \mkern \@dotsep mu\hbox{.}\mkern
-    \@dotsep mu$}\hfill
-    \nobreak\hbox to\@pnumwidth{\hfil #2}%
- \fi\par
- \penalty\@highpenalty \endgroup}
-
-\newdimen\tocchpnum
-\newdimen\tocsecnum
-\newdimen\tocsectotal
-\newdimen\tocsubsecnum
-\newdimen\tocsubsectotal
-\newdimen\tocsubsubsecnum
-\newdimen\tocsubsubsectotal
-\newdimen\tocparanum
-\newdimen\tocparatotal
-\newdimen\tocsubparanum
-\tocchpnum=20\p@            % chapter {\bf 88.} \@plus 5.3\p@
-\tocsecnum=22.5\p@          % section 88.8. plus 4.722\p@
-\tocsubsecnum=30.5\p@       % subsection 88.8.8 plus 4.944\p@
-\tocsubsubsecnum=38\p@      % subsubsection 88.8.8.8 plus 4.666\p@
-\tocparanum=45\p@           % paragraph 88.8.8.8.8 plus 3.888\p@
-\tocsubparanum=53\p@        % subparagraph 88.8.8.8.8.8 plus 4.11\p@
-\def\calctocindent{%
-\tocsectotal=\tocchpnum
-\advance\tocsectotal by\tocsecnum
-\tocsubsectotal=\tocsectotal
-\advance\tocsubsectotal by\tocsubsecnum
-\tocsubsubsectotal=\tocsubsectotal
-\advance\tocsubsubsectotal by\tocsubsubsecnum
-\tocparatotal=\tocsubsubsectotal
-\advance\tocparatotal by\tocparanum}
-\calctocindent
-
-\def\@dottedtocline#1#2#3#4#5{%
-  \ifnum #1>\c@tocdepth \else
-    \vskip \z@ \@plus.2\p@
-    {\leftskip #2\relax \rightskip \@tocrmarg \advance\rightskip by \z@ \@plus 2cm
-               \parfillskip -\rightskip \pretolerance=10000
-     \parindent #2\relax\@afterindenttrue
-     \interlinepenalty\@M
-     \leavevmode
-     \@tempdima #3\relax
-     \advance\leftskip \@tempdima \null\nobreak\hskip -\leftskip
-     {#4}\nobreak
-     \leaders\hbox{$\m@th
-        \mkern \@dotsep mu\hbox{.}\mkern \@dotsep
-        mu$}\hfill
-     \nobreak
-     \hb@xt@\@pnumwidth{\hfil\normalfont \normalcolor #5}%
-     \par}%
-  \fi}
-%
-\def\l@section{\@dottedtocline{1}{\tocchpnum}{\tocsecnum}}
-\def\l@subsection{\@dottedtocline{2}{\tocsectotal}{\tocsubsecnum}}
-\def\l@subsubsection{\@dottedtocline{3}{\tocsubsectotal}{\tocsubsubsecnum}}
-\def\l@paragraph{\@dottedtocline{4}{\tocsubsubsectotal}{\tocparanum}}
-\def\l@subparagraph{\@dottedtocline{5}{\tocparatotal}{\tocsubparanum}}
-
-\renewcommand\listoffigures{%
-    \chapter*{\listfigurename
-      \@mkboth{\listfigurename}{\listfigurename}}%
-    \@starttoc{lof}%
-    }
-
-\renewcommand\listoftables{%
-    \chapter*{\listtablename
-      \@mkboth{\listtablename}{\listtablename}}%
-    \@starttoc{lot}%
-    }
-
-\renewcommand\footnoterule{%
-  \kern-3\p@
-  \hrule\@width 50\p@
-  \kern2.6\p@}
-
-\newdimen\foot@parindent
-\foot@parindent 10.83\p@
-
-\AtBeginDocument{%
-\long\def\@makefntext#1{\@setpar{\@@par\@tempdima \hsize
-         \advance\@tempdima-\foot@parindent\parshape\@ne\foot@parindent
-         \@tempdima}\par
-         \parindent \foot@parindent\noindent \hbox to \z@{%
-         \hss\hss$^{\@thefnmark}$ }#1}}
-
-\if@spthms
-% Definition of the "\spnewtheorem" command.
-%
-% Usage:
-%
-%     \spnewtheorem{env_nam}{caption}[within]{cap_font}{body_font}
-% or  \spnewtheorem{env_nam}[numbered_like]{caption}{cap_font}{body_font}
-% or  \spnewtheorem*{env_nam}{caption}{cap_font}{body_font}
-%
-% New is "cap_font" and "body_font". It stands for
-% fontdefinition of the caption and the text itself.
-%
-% "\spnewtheorem*" gives a theorem without number.
-%
-% A defined spnewthoerem environment is used as described
-% by Lamport.
-%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\def\@thmcountersep{.}
-\def\@thmcounterend{.}
-\newcommand\nocaption{\noexpand\@gobble}
-\newdimen\spthmsep \spthmsep=3pt
-
-\def\spnewtheorem{\@ifstar{\@sthm}{\@Sthm}}
-
-% definition of \spnewtheorem with number
-
-\def\@spnthm#1#2{%
-  \@ifnextchar[{\@spxnthm{#1}{#2}}{\@spynthm{#1}{#2}}}
-\def\@Sthm#1{\@ifnextchar[{\@spothm{#1}}{\@spnthm{#1}}}
-
-\def\@spxnthm#1#2[#3]#4#5{\expandafter\@ifdefinable\csname #1\endcsname
-   {\@definecounter{#1}\@addtoreset{#1}{#3}%
-   \expandafter\xdef\csname the#1\endcsname{\expandafter\noexpand
-     \csname the#3\endcsname \noexpand\@thmcountersep \@thmcounter{#1}}%
-   \expandafter\xdef\csname #1name\endcsname{#2}%
-   \global\@namedef{#1}{\@spthm{#1}{\csname #1name\endcsname}{#4}{#5}}%
-                              \global\@namedef{end#1}{\@endtheorem}}}
-
-\def\@spynthm#1#2#3#4{\expandafter\@ifdefinable\csname #1\endcsname
-   {\@definecounter{#1}%
-   \expandafter\xdef\csname the#1\endcsname{\@thmcounter{#1}}%
-   \expandafter\xdef\csname #1name\endcsname{#2}%
-   \global\@namedef{#1}{\@spthm{#1}{\csname #1name\endcsname}{#3}{#4}}%
-                               \global\@namedef{end#1}{\@endtheorem}}}
-
-\def\@spothm#1[#2]#3#4#5{%
-  \@ifundefined{c@#2}{\@latexerr{No theorem environment `#2' defined}\@eha}%
-  {\expandafter\@ifdefinable\csname #1\endcsname
-  {\global\@namedef{the#1}{\@nameuse{the#2}}%
-  \expandafter\xdef\csname #1name\endcsname{#3}%
-  \global\@namedef{#1}{\@spthm{#2}{\csname #1name\endcsname}{#4}{#5}}%
-  \global\@namedef{end#1}{\@endtheorem}}}}
-
-\def\@spthm#1#2#3#4{\topsep 7\p@ \@plus2\p@ \@minus4\p@
-\labelsep=\spthmsep\refstepcounter{#1}%
-\@ifnextchar[{\@spythm{#1}{#2}{#3}{#4}}{\@spxthm{#1}{#2}{#3}{#4}}}
-
-\def\@spxthm#1#2#3#4{\@spbegintheorem{#2}{\csname the#1\endcsname}{#3}{#4}%
-                    \ignorespaces}
-
-\def\@spythm#1#2#3#4[#5]{\@spopargbegintheorem{#2}{\csname
-       the#1\endcsname}{#5}{#3}{#4}\ignorespaces}
-
-\def\normalthmheadings{\def\@spbegintheorem##1##2##3##4{\trivlist
-                 \item[\hskip\labelsep{##3##1\ ##2\@thmcounterend}]##4}
-\def\@spopargbegintheorem##1##2##3##4##5{\trivlist
-      \item[\hskip\labelsep{##4##1\ ##2}]{##4(##3)\@thmcounterend\ }##5}}
-\normalthmheadings
-
-\def\reversethmheadings{\def\@spbegintheorem##1##2##3##4{\trivlist
-                 \item[\hskip\labelsep{##3##2\ ##1\@thmcounterend}]##4}
-\def\@spopargbegintheorem##1##2##3##4##5{\trivlist
-      \item[\hskip\labelsep{##4##2\ ##1}]{##4(##3)\@thmcounterend\ }##5}}
-
-% definition of \spnewtheorem* without number
-
-\def\@sthm#1#2{\@Ynthm{#1}{#2}}
-
-\def\@Ynthm#1#2#3#4{\expandafter\@ifdefinable\csname #1\endcsname
-   {\global\@namedef{#1}{\@Thm{\csname #1name\endcsname}{#3}{#4}}%
-    \expandafter\xdef\csname #1name\endcsname{#2}%
-    \global\@namedef{end#1}{\@endtheorem}}}
-
-\def\@Thm#1#2#3{\topsep 7\p@ \@plus2\p@ \@minus4\p@
-\@ifnextchar[{\@Ythm{#1}{#2}{#3}}{\@Xthm{#1}{#2}{#3}}}
-
-\def\@Xthm#1#2#3{\@Begintheorem{#1}{#2}{#3}\ignorespaces}
-
-\def\@Ythm#1#2#3[#4]{\@Opargbegintheorem{#1}
-       {#4}{#2}{#3}\ignorespaces}
-
-\def\@Begintheorem#1#2#3{#3\trivlist
-                           \item[\hskip\labelsep{#2#1\@thmcounterend}]}
-
-\def\@Opargbegintheorem#1#2#3#4{#4\trivlist
-      \item[\hskip\labelsep{#3#1}]{#3(#2)\@thmcounterend\ }}
-
-% initialize theorem environment
-
-\if@envcntshowhiercnt % show hierarchy counter
-   \def\@thmcountersep{.}
-   \spnewtheorem{theorem}{Theorem}[\envankh]{\bfseries}{\itshape}
-   \@addtoreset{theorem}{chapter}
-\else          % theorem counter only
-   \spnewtheorem{theorem}{Theorem}{\bfseries}{\itshape}
-   \if@envcntreset
-      \@addtoreset{theorem}{chapter}
-      \if@envcntresetsect
-         \@addtoreset{theorem}{section}
-      \fi
-   \fi
-\fi
-
-%definition of divers theorem environments
-\spnewtheorem*{claim}{Claim}{\itshape}{\rmfamily}
-\spnewtheorem*{proof}{Proof}{\itshape}{\rmfamily}
-%
-\if@envcntsame % all environments like "Theorem" - using its counter
-   \def\spn@wtheorem#1#2#3#4{\@spothm{#1}[theorem]{#2}{#3}{#4}}
-\else % all environments with their own counter
-   \if@envcntshowhiercnt % show hierarchy counter
-      \def\spn@wtheorem#1#2#3#4{\@spxnthm{#1}{#2}[\envankh]{#3}{#4}}
-   \else          % environment counter only
-      \if@envcntreset % environment counter is reset each section
-         \if@envcntresetsect
-            \def\spn@wtheorem#1#2#3#4{\@spynthm{#1}{#2}{#3}{#4}
-             \@addtoreset{#1}{chapter}\@addtoreset{#1}{section}}
-         \else
-            \def\spn@wtheorem#1#2#3#4{\@spynthm{#1}{#2}{#3}{#4}
-                                      \@addtoreset{#1}{chapter}}
-         \fi
-      \else
-         \let\spn@wtheorem=\@spynthm
-      \fi
-   \fi
-\fi
-%
-\let\spdefaulttheorem=\spn@wtheorem
-%
-\spn@wtheorem{case}{Case}{\itshape}{\rmfamily}
-\spn@wtheorem{conjecture}{Conjecture}{\itshape}{\rmfamily}
-\spn@wtheorem{corollary}{Corollary}{\bfseries}{\itshape}
-\spn@wtheorem{definition}{Definition}{\bfseries}{\itshape}
-\spn@wtheorem{example}{Example}{\itshape}{\rmfamily}
-\spn@wtheorem{exercise}{Exercise}{\bfseries}{\rmfamily}
-\spn@wtheorem{lemma}{Lemma}{\bfseries}{\itshape}
-\spn@wtheorem{note}{Note}{\itshape}{\rmfamily}
-\spn@wtheorem{problem}{Problem}{\bfseries}{\rmfamily}
-\spn@wtheorem{property}{Property}{\itshape}{\rmfamily}
-\spn@wtheorem{proposition}{Proposition}{\bfseries}{\itshape}
-\spn@wtheorem{question}{Question}{\itshape}{\rmfamily}
-\spn@wtheorem{solution}{Solution}{\bfseries}{\rmfamily}
-\spn@wtheorem{remark}{Remark}{\itshape}{\rmfamily}
-%
-\newenvironment{theopargself}
-    {\def\@spopargbegintheorem##1##2##3##4##5{\trivlist
-         \item[\hskip\labelsep{##4##1\ ##2}]{##4##3\@thmcounterend\ }##5}
-     \def\@Opargbegintheorem##1##2##3##4{##4\trivlist
-         \item[\hskip\labelsep{##3##1}]{##3##2\@thmcounterend\ }}}{}
-\newenvironment{theopargself*}
-    {\def\@spopargbegintheorem##1##2##3##4##5{\trivlist
-         \item[\hskip\labelsep{##4##1\ ##2}]{\hspace*{-\labelsep}##4##3\@thmcounterend}##5}
-     \def\@Opargbegintheorem##1##2##3##4{##4\trivlist
-         \item[\hskip\labelsep{##3##1}]{\hspace*{-\labelsep}##3##2\@thmcounterend}}}{}
-%
-\spnewtheorem{prob}{\nocaption}[chapter]{\bfseries}{\rmfamily}
-\newcommand{\probref}[1]{\textbf{\ref{#1}} }
-\newenvironment{sol}{\par\addvspace{6pt}\noindent\probref}{\par\addvspace{6pt}}
-%
-\fi
-
-\def\@takefromreset#1#2{%
-    \def\@tempa{#1}%
-    \let\@tempd\@elt
-    \def\@elt##1{%
-        \def\@tempb{##1}%
-        \ifx\@tempa\@tempb\else
-            \@addtoreset{##1}{#2}%
-        \fi}%
-    \expandafter\expandafter\let\expandafter\@tempc\csname cl@#2\endcsname
-    \expandafter\def\csname cl@#2\endcsname{}%
-    \@tempc
-    \let\@elt\@tempd}
-
-% redefininition of the captions for "figure" and "table" environments
-%
-\@ifundefined{floatlegendstyle}{\def\floatlegendstyle{\bfseries}}{}
-\def\floatcounterend{.\ }
-\def\capstrut{\vrule\@width\z@\@height\topskip}
-\@ifundefined{captionstyle}{\def\captionstyle{\normalfont\small}}{}
-\@ifundefined{instindent}{\newdimen\instindent}{}
-
-\long\def\@caption#1[#2]#3{\par\addcontentsline{\csname
-  ext@#1\endcsname}{#1}{\protect\numberline{\csname
-  the#1\endcsname}{\ignorespaces #2}}\begingroup
-    \@parboxrestore\if@minipage\@setminipage\fi
-    \@makecaption{\csname fnum@#1\endcsname}{\ignorespaces #3}\par
-  \endgroup}
-
-\def\twocaptionwidth#1#2{\def\first@capwidth{#1}\def\second@capwidth{#2}}
-% Default: .46\textwidth
-\twocaptionwidth{.46\textwidth}{.46\textwidth}
-
-\def\leftcaption{\refstepcounter\@captype\@dblarg%
-            {\@leftcaption\@captype}}
-
-\def\rightcaption{\refstepcounter\@captype\@dblarg%
-            {\@rightcaption\@captype}}
-
-\long\def\@leftcaption#1[#2]#3{\addcontentsline{\csname
-  ext@#1\endcsname}{#1}{\protect\numberline{\csname
-  the#1\endcsname}{\ignorespaces #2}}\begingroup
-    \@parboxrestore
-    \vskip\figcapgap
-    \@maketwocaptions{\csname fnum@#1\endcsname}{\ignorespaces #3}%
-    {\first@capwidth}\ignorespaces\hspace{.073\textwidth}\hfill%
-  \endgroup}
-
-\long\def\@rightcaption#1[#2]#3{\addcontentsline{\csname
-  ext@#1\endcsname}{#1}{\protect\numberline{\csname
-  the#1\endcsname}{\ignorespaces #2}}\begingroup
-    \@parboxrestore
-    \@maketwocaptions{\csname fnum@#1\endcsname}{\ignorespaces #3}%
-    {\second@capwidth}\par
-  \endgroup}
-
-\long\def\@maketwocaptions#1#2#3{%
-   \parbox[t]{#3}{{\floatlegendstyle #1\floatcounterend}#2}}
-
-\def\fig@pos{l}
-\newcommand{\leftfigure}[2][\fig@pos]{\makebox[.4635\textwidth][#1]{#2}}
-\let\rightfigure\leftfigure
-
-\newdimen\figgap\figgap=0.5cm  % hgap between figure and sidecaption
-%
-\long\def\@makesidecaption#1#2{%
-   \setbox0=\vbox{\hsize=\@tempdimb
-                  \captionstyle{\floatlegendstyle
-                                         #1\floatcounterend}#2}%
-   \ifdim\instindent<\z@
-      \ifdim\ht0>-\instindent
-         \advance\instindent by\ht0
-         \typeout{^^JClass-Warning: Legend of \string\sidecaption\space for
-                     \@captype\space\csname the\@captype\endcsname
-                  ^^Jis \the\instindent\space taller than the corresponding float -
-                  ^^Jyou'd better switch the environment. }%
-         \instindent\z@
-      \fi
-   \else
-      \ifdim\ht0<\instindent
-         \advance\instindent by-\ht0
-         \advance\instindent by-\dp0\relax
-         \advance\instindent by\topskip
-         \advance\instindent by-11\p@
-      \else
-         \advance\instindent by-\ht0
-         \instindent=-\instindent
-         \typeout{^^JClass-Warning: Legend of \string\sidecaption\space for
-                     \@captype\space\csname the\@captype\endcsname
-                  ^^Jis \the\instindent\space taller than the corresponding float -
-                  ^^Jyou'd better switch the environment. }%
-         \instindent\z@
-      \fi
-   \fi
-   \parbox[b]{\@tempdimb}{\captionstyle{\floatlegendstyle
-                                        #1\floatcounterend}#2%
-                          \ifdim\instindent>\z@ \\
-                               \vrule\@width\z@\@height\instindent
-                                     \@depth\z@
-                          \fi}}
-\def\sidecaption{\@ifnextchar[\sidec@ption{\sidec@ption[b]}}
-\def\sidec@ption[#1]#2\caption{%
-\setbox\@tempboxa=\hbox{\ignorespaces#2\unskip}%
-\if@twocolumn
- \ifdim\hsize<\textwidth\else
-   \ifdim\wd\@tempboxa<\columnwidth
-      \typeout{Double column float fits into single column -
-            ^^Jyou'd better switch the environment. }%
-   \fi
- \fi
-\fi
-  \instindent=\ht\@tempboxa
-  \advance\instindent by\dp\@tempboxa
-\if t#1
-\else
-  \instindent=-\instindent
-\fi
-\@tempdimb=\hsize
-\advance\@tempdimb by-\figgap
-\advance\@tempdimb by-\wd\@tempboxa
-\ifdim\@tempdimb<3cm
-   \ClassWarning{SVMono}{\string\sidecaption: No sufficient room for the legend;
-             ^^Jusing normal \string\caption}%
-   \unhbox\@tempboxa
-   \let\@capcommand=\@caption
-\else
-   \ifdim\@tempdimb<4.5cm
-      \ClassWarning{SVMono}{\string\sidecaption: Room for the legend very narrow;
-               ^^Jusing \string\raggedright}%
-      \toks@\expandafter{\captionstyle\sloppy
-                         \rightskip=\z@\@plus6mm\relax}%
-      \def\captionstyle{\the\toks@}%
-   \fi
-   \let\@capcommand=\@sidecaption
-   \leavevmode
-   \unhbox\@tempboxa
-   \hfill
-\fi
-\refstepcounter\@captype
-\@dblarg{\@capcommand\@captype}}
-\long\def\@sidecaption#1[#2]#3{\addcontentsline{\csname
-  ext@#1\endcsname}{#1}{\protect\numberline{\csname
-  the#1\endcsname}{\ignorespaces #2}}\begingroup
-    \@parboxrestore
-    \@makesidecaption{\csname fnum@#1\endcsname}{\ignorespaces #3}\par
-  \endgroup}
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\def\fig@type{figure}
-
-\def\leftlegendglue{\hfil}
-\newdimen\figcapgap\figcapgap=5\p@   % vgap between figure and caption
-\newdimen\tabcapgap\tabcapgap=5.5\p@ % vgap between caption and table
-
-\long\def\@makecaption#1#2{%
- \captionstyle
- \ifx\@captype\fig@type
-   \vskip\figcapgap
- \fi
- \setbox\@tempboxa\hbox{{\floatlegendstyle #1\floatcounterend}%
- \capstrut #2}%
- \ifdim \wd\@tempboxa >\hsize
-   {\floatlegendstyle #1\floatcounterend}\capstrut #2\par
- \else
-   \hbox to\hsize{\leftlegendglue\unhbox\@tempboxa\hfil}%
- \fi
- \ifx\@captype\fig@type\else
-   \vskip\tabcapgap
- \fi}
-
-\newcounter{merk}
-
-\def\endfigure{\resetsubfig\end@float}
-
-\@namedef{endfigure*}{\resetsubfig\end@dblfloat}
-
-\def\resetsubfig{\global\let\last@subfig=\undefined}
-
-\def\r@setsubfig{\xdef\last@subfig{\number\value{figure}}%
-\setcounter{figure}{\value{merk}}%
-\setcounter{merk}{0}}
-
-\def\subfigures{\refstepcounter{figure}%
-   \@tempcnta=\value{merk}%
-   \setcounter{merk}{\value{figure}}%
-   \setcounter{figure}{\the\@tempcnta}%
-   \def\thefigure{\if@numart\else\thechapter.\fi
-   \@arabic\c@merk\alph{figure}}%
-   \let\resetsubfig=\r@setsubfig}
-
-\def\samenumber{\addtocounter{\@captype}{-1}%
-\@ifundefined{last@subfig}{}{\setcounter{merk}{\last@subfig}}}
-
-% redefinition of the "bibliography" environment
-%
-\def\biblstarthook#1{\gdef\biblst@rthook{#1}}
-%
-\AtBeginDocument{%
-\ifx\secbibl\undefined
-   \def\bibsection{\chapter*{\refname}\markboth{\refname}{\refname}%
-      \addcontentsline{toc}{chapter}{\refname}%
-      \csname biblst@rthook\endcsname}
-\else
-   \def\bibsection{\section*{\refname}\markright{\refname}%
-      \addcontentsline{toc}{section}{\refname}%
-      \csname biblst@rthook\endcsname}
-\fi}
-\ifx\oribibl\undefined % Springer way of life
-   \renewenvironment{thebibliography}[1]{\bibsection
-         \global\let\biblst@rthook=\undefined
-         \def\@biblabel##1{##1.}
-         \small
-         \list{\@biblabel{\@arabic\c@enumiv}}%
-              {\settowidth\labelwidth{\@biblabel{#1}}%
-               \leftmargin\labelwidth
-               \advance\leftmargin\labelsep
-               \if@openbib
-                 \advance\leftmargin\bibindent
-                 \itemindent -\bibindent
-                 \listparindent \itemindent
-                 \parsep \z@
-               \fi
-               \usecounter{enumiv}%
-               \let\p@enumiv\@empty
-               \renewcommand\theenumiv{\@arabic\c@enumiv}}%
-         \if@openbib
-           \renewcommand\newblock{\par}%
-         \else
-           \renewcommand\newblock{\hskip .11em \@plus.33em \@minus.07em}%
-         \fi
-         \sloppy\clubpenalty4000\widowpenalty4000%
-         \sfcode`\.=\@m}
-        {\def\@noitemerr
-          {\@latex@warning{Empty `thebibliography' environment}}%
-         \endlist}
-   \def\@lbibitem[#1]#2{\item[{[#1]}\hfill]\if@filesw
-        {\let\protect\noexpand\immediate
-        \write\@auxout{\string\bibcite{#2}{#1}}}\fi\ignorespaces}
-\else % original bibliography is required
-   \let\bibname=\refname
-   \renewenvironment{thebibliography}[1]
-     {\chapter*{\bibname
-        \@mkboth{\bibname}{\bibname}}%
-      \list{\@biblabel{\@arabic\c@enumiv}}%
-           {\settowidth\labelwidth{\@biblabel{#1}}%
-            \leftmargin\labelwidth
-            \advance\leftmargin\labelsep
-            \@openbib@code
-            \usecounter{enumiv}%
-            \let\p@enumiv\@empty
-            \renewcommand\theenumiv{\@arabic\c@enumiv}}%
-      \sloppy
-      \clubpenalty4000
-      \@clubpenalty \clubpenalty
-      \widowpenalty4000%
-      \sfcode`\.\@m}
-     {\def\@noitemerr
-       {\@latex@warning{Empty `thebibliography' environment}}%
-      \endlist}
-\fi
-
-\let\if@threecolind\iffalse
-\def\threecolindex{\let\if@threecolind\iftrue}
-\def\indexstarthook#1{\gdef\indexst@rthook{#1}}
-\renewenvironment{theindex}
-               {\if@twocolumn
-                  \@restonecolfalse
-                \else
-                  \@restonecoltrue
-                \fi
-                \columnseprule \z@
-                \columnsep 1cc
-                \@nobreaktrue
-                \if@threecolind
-                   \begin{multicols}{3}[\chapter*{\indexname}%
-                \else
-                   \begin{multicols}{2}[\chapter*{\indexname}%
-                \fi
-                {\csname indexst@rthook\endcsname}]%
-                \global\let\indexst@rthook=\undefined
-                \markboth{\indexname}{\indexname}%
-                \addcontentsline{toc}{chapter}{\indexname}%
-                \flushbottom
-                \parindent\z@
-                \rightskip\z@ \@plus 40\p@
-                \parskip\z@ \@plus .3\p@\relax
-                \flushbottom
-                \let\item\@idxitem
-                \def\,{\relax\ifmmode\mskip\thinmuskip
-                             \else\hskip0.2em\ignorespaces\fi}%
-                \normalfont\small}
-               {\end{multicols}
-                \global\let\if@threecolind\iffalse
-                \if@restonecol\onecolumn\else\clearpage\fi}
-
-\def\idxquad{\hskip 10\p@}% space that divides entry from number
-
-\def\@idxitem{\par\setbox0=\hbox{--\,--\,--\enspace}%
-                  \hangindent\wd0\relax}
-
-\def\subitem{\par\noindent\setbox0=\hbox{--\enspace}% second order
-                \kern\wd0\setbox0=\hbox{--\,--\,--\enspace}%
-                \hangindent\wd0\relax}% indexentry
-
-\def\subsubitem{\par\noindent\setbox0=\hbox{--\,--\enspace}% third order
-                \kern\wd0\setbox0=\hbox{--\,--\,--\enspace}%
-                \hangindent\wd0\relax}% indexentry
-
-\def\indexspace{\par \vskip 10\p@ \@plus5\p@ \@minus3\p@\relax}
-
-\def\subtitle#1{\gdef\@subtitle{#1}}
-\def\@subtitle{}
-
-\def\maketitle{\par
- \begingroup
-   \def\thefootnote{\fnsymbol{footnote}}%
-   \def\@makefnmark{\hbox
-       to\z@{$\m@th^{\@thefnmark}$\hss}}%
-   \if@twocolumn
-     \twocolumn[\@maketitle]%
-     \else \newpage
-     \global\@topnum\z@   % Prevents figures from going at top of page.
-     \@maketitle \fi\thispagestyle{empty}\@thanks
-     \par\penalty -\@M
- \endgroup
- \setcounter{footnote}{0}%
- \let\maketitle\relax
- \let\@maketitle\relax
- \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax}
-
-\def\@maketitle{\newpage
- \null
- \vskip 2em                 % Vertical space above title.
-\begingroup
-  \def\and{\unskip, }
-  \parindent=\z@
-  \pretolerance=10000
-  \rightskip=\z@ \@plus 3cm
-  {\LARGE                   % each author set in \LARGE
-   \lineskip .5em
-   \@author
-   \par}%
-  \vskip 2cm                % Vertical space after author.
-  {\Huge \@title \par}%     % Title set in \Huge size.
-  \vskip 1cm                % Vertical space after title.
-  \if!\@subtitle!\else
-   {\LARGE\ignorespaces\@subtitle \par}
-   \vskip 1cm                % Vertical space after subtitle.
-  \fi
-  \if!\@date!\else
-    {\large \@date}%          % Date set in \large size.
-    \par
-    \vskip 1.5em               % Vertical space after date.
-  \fi
- \vfill
-% {\Large Springer\par}
-%\vskip 5\p@
-%\large
-%  Berlin\enspace Heidelberg\enspace New\kern0.1em York\\
-%  Hong\thinspace Kong\enspace London\\
-%  Milan\enspace Paris\enspace Tokyo\par
-\endgroup}
-
-% Useful environments
-\newenvironment{acknowledgement}{\par\addvspace{17\p@}\small\rm
-\trivlist\item[\hskip\labelsep{\it\ackname}]}
-{\endtrivlist\addvspace{6\p@}}
-%
-\newenvironment{noteadd}{\par\addvspace{17\p@}\small\rm
-\trivlist\item[\hskip\labelsep{\it\noteaddname}]}
-{\endtrivlist\addvspace{6\p@}}
-%
-\renewenvironment{abstract}{%
-      \advance\topsep by0.35cm\relax\small
-      \labelwidth=\z@
-      \listparindent=\z@
-      \itemindent\listparindent
-              \trivlist\item[\hskip\labelsep\bfseries\abstractname]%
-              \if!\abstractname!\hskip-\labelsep\fi
-      }
-    {\endtrivlist}
-
-% define the running headings of a twoside text
-\def\runheadsize{\small}
-\def\runheadstyle{\rmfamily\upshape}
-\def\customizhead{\hspace{\headlineindent}}
-
-\def\ps@headings{\let\@mkboth\markboth
-   \let\@oddfoot\@empty\let\@evenfoot\@empty
-   \def\@evenhead{\runheadsize\runheadstyle\rlap{\thepage}\customizhead
-                  \leftmark\hfil}
-   \def\@oddhead{\runheadsize\runheadstyle\hfil\rightmark\customizhead
-                  \llap{\thepage}}
-   \def\chaptermark##1{\markboth{{\ifnum\c@secnumdepth>\m@ne
-      \thechapter\thechapterend\hskip\betweenumberspace\fi ##1}}{{\ifnum %!!!
-      \c@secnumdepth>\m@ne\thechapter\thechapterend\hskip\betweenumberspace\fi ##1}}}%!!!
-   \def\sectionmark##1{\markright{{\ifnum\c@secnumdepth>\z@
-      \thesection\seccounterend\hskip\betweenumberspace\fi ##1}}}}
-
-\def\ps@myheadings{\let\@mkboth\@gobbletwo
-   \let\@oddfoot\@empty\let\@evenfoot\@empty
-   \def\@evenhead{\runheadsize\runheadstyle\rlap{\thepage}\customizhead
-                  \leftmark\hfil}
-   \def\@oddhead{\runheadsize\runheadstyle\hfil\rightmark\customizhead
-                  \llap{\thepage}}
-   \let\chaptermark\@gobble
-   \let\sectionmark\@gobble
-   \let\subsectionmark\@gobble}
-
-
-\ps@headings
-
-\endinput
-%end of file svmono.cls
--- a/src/Doc/ROOT	Sat Apr 05 17:52:29 2014 +0100
+++ b/src/Doc/ROOT	Sat Apr 05 11:37:00 2014 +0200
@@ -82,7 +82,7 @@
     "document/build"
     "document/root.tex"
 
-session IsarImplementation (doc) in "IsarImplementation" = "HOL-Proofs" +
+session Implementation (doc) in "Implementation" = "HOL-Proofs" +
   options [document_variants = "implementation"]
   theories
     Eq
@@ -109,7 +109,7 @@
     "document/root.tex"
     "document/style.sty"
 
-session IsarRef (doc) in "IsarRef" = HOL +
+session "Isar-Ref" (doc) in "Isar-Ref" = HOL +
   options [document_variants = "isar-ref", quick_and_dirty, thy_output_source]
   theories
     Preface
@@ -148,7 +148,7 @@
   theories
     JEdit
   files
-    "../IsarRef/document/style.sty"
+    "../Isar-Ref/document/style.sty"
     "../extra.sty"
     "../iman.sty"
     "../isar.sty"
@@ -163,7 +163,7 @@
     "document/popup2.png"
     "document/root.tex"
 
-session LaTeXsugar (doc) in "LaTeXsugar" = HOL +
+session Sugar (doc) in "Sugar" = HOL +
   options [document_variants = "sugar"]
   theories [document = ""]
     "~~/src/HOL/Library/LaTeXsugar"
@@ -208,7 +208,7 @@
     "document/root.tex"
     "document/syntax.tex"
 
-session "Logics-ZF" (doc) in "ZF" = ZF +
+session "Logics-ZF" (doc) in "Logics-ZF" = ZF +
   options [document_variants = "logics-ZF", print_mode = "brackets",
     thy_output_source]
   theories
@@ -247,7 +247,7 @@
     "document/build"
     "document/root.tex"
 
-session ProgProve (doc) in "ProgProve" = HOL +
+session "Prog-Prove" (doc) in "Prog-Prove" = HOL +
   options [document_variants = "prog-prove", show_question_marks = false]
   theories
     Basics
@@ -290,7 +290,7 @@
     Misc
   files
     "../prepare_document"
-    "../IsarRef/document/style.sty"
+    "../Isar-Ref/document/style.sty"
     "../pdfsetup.sty"
     "../iman.sty"
     "../extra.sty"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Sugar/Sugar.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,542 @@
+(*<*)
+theory Sugar
+imports "~~/src/HOL/Library/LaTeXsugar" "~~/src/HOL/Library/OptionalSugar"
+begin
+(*>*)
+text{*
+\section{Introduction}
+
+This document is for those Isabelle users who have mastered
+the art of mixing \LaTeX\ text and Isabelle theories and never want to
+typeset a theorem by hand anymore because they have experienced the
+bliss of writing \verb!@!\verb!{thm[display]setsum_cartesian_product[no_vars]}!
+and seeing Isabelle typeset it for them:
+@{thm[display,eta_contract=false] setsum_cartesian_product[no_vars]}
+No typos, no omissions, no sweat.
+If you have not experienced that joy, read Chapter 4, \emph{Presenting
+Theories}, \cite{LNCS2283} first.
+
+If you have mastered the art of Isabelle's \emph{antiquotations},
+i.e.\ things like the above \verb!@!\verb!{thm...}!, beware: in your vanity
+you may be tempted to think that all readers of the stunning ps or pdf
+documents you can now produce at the drop of a hat will be struck with
+awe at the beauty unfolding in front of their eyes. Until one day you
+come across that very critical of readers known as the ``common referee''.
+He has the nasty habit of refusing to understand unfamiliar notation
+like Isabelle's infamous @{text"\<lbrakk> \<rbrakk> \<Longrightarrow>"} no matter how many times you
+explain it in your paper. Even worse, he thinks that using @{text"\<lbrakk>
+\<rbrakk>"} for anything other than denotational semantics is a cardinal sin
+that must be punished by instant rejection.
+
+
+This document shows you how to make Isabelle and \LaTeX\ cooperate to
+produce ordinary looking mathematics that hides the fact that it was
+typeset by a machine. You merely need to load the right files:
+\begin{itemize}
+\item Import theory \texttt{LaTeXsugar} in the header of your own
+theory.  You may also want bits of \texttt{OptionalSugar}, which you can
+copy selectively into your own theory or import as a whole.  Both
+theories live in \texttt{HOL/Library} and are found automatically.
+
+\item Should you need additional \LaTeX\ packages (the text will tell
+you so), you include them at the beginning of your \LaTeX\ document,
+typically in \texttt{root.tex}. For a start, you should
+\verb!\usepackage{amssymb}! --- otherwise typesetting
+@{prop[source]"\<not>(\<exists>x. P x)"} will fail because the AMS symbol
+@{text"\<nexists>"} is missing.
+\end{itemize}
+
+
+\section{HOL syntax}
+
+\subsection{Logic}
+
+The formula @{prop[source]"\<not>(\<exists>x. P x)"} is typeset as @{prop"~(EX x. P x)"}.
+
+The predefined constructs @{text"if"}, @{text"let"} and
+@{text"case"} are set in sans serif font to distinguish them from
+other functions. This improves readability:
+\begin{itemize}
+\item @{term"if b then e\<^sub>1 else e\<^sub>2"} instead of @{text"if b then e\<^sub>1 else e\<^sub>2"}.
+\item @{term"let x = e\<^sub>1 in e\<^sub>2"} instead of @{text"let x = e\<^sub>1 in e\<^sub>2"}.
+\item @{term"case x of True \<Rightarrow> e\<^sub>1 | False \<Rightarrow> e\<^sub>2"} instead of\\
+      @{text"case x of True \<Rightarrow> e\<^sub>1 | False \<Rightarrow> e\<^sub>2"}.
+\end{itemize}
+
+\subsection{Sets}
+
+Although set syntax in HOL is already close to
+standard, we provide a few further improvements:
+\begin{itemize}
+\item @{term"{x. P}"} instead of @{text"{x. P}"}.
+\item @{term"{}"} instead of @{text"{}"}, where
+ @{term"{}"} is also input syntax.
+\item @{term"insert a (insert b (insert c M))"} instead of @{text"insert a (insert b (insert c M))"}.
+\end{itemize}
+
+
+\subsection{Lists}
+
+If lists are used heavily, the following notations increase readability:
+\begin{itemize}
+\item @{term"x # xs"} instead of @{text"x # xs"},
+      where @{term"x # xs"} is also input syntax.
+If you prefer more space around the $\cdot$ you have to redefine
+\verb!\isasymcdot! in \LaTeX:
+\verb!\renewcommand{\isasymcdot}{\isamath{\,\cdot\,}}!
+
+\item @{term"length xs"} instead of @{text"length xs"}.
+\item @{term"nth xs n"} instead of @{text"nth xs n"},
+      the $n$th element of @{text xs}.
+
+\item Human readers are good at converting automatically from lists to
+sets. Hence \texttt{OptionalSugar} contains syntax for suppressing the
+conversion function @{const set}: for example, @{prop[source]"x \<in> set xs"}
+becomes @{prop"x \<in> set xs"}.
+
+\item The @{text"@"} operation associates implicitly to the right,
+which leads to unpleasant line breaks if the term is too long for one
+line. To avoid this, \texttt{OptionalSugar} contains syntax to group
+@{text"@"}-terms to the left before printing, which leads to better
+line breaking behaviour:
+@{term[display]"term\<^sub>0 @ term\<^sub>1 @ term\<^sub>2 @ term\<^sub>3 @ term\<^sub>4 @ term\<^sub>5 @ term\<^sub>6 @ term\<^sub>7 @ term\<^sub>8 @ term\<^sub>9 @ term\<^sub>1\<^sub>0"}
+
+\end{itemize}
+
+
+\subsection{Numbers}
+
+Coercions between numeric types are alien to mathematicians who
+consider, for example, @{typ nat} as a subset of @{typ int}.
+\texttt{OptionalSugar} contains syntax for suppressing numeric coercions such
+as @{const int} @{text"::"} @{typ"nat \<Rightarrow> int"}. For example,
+@{term[source]"int 5"} is printed as @{term "int 5"}. Embeddings of types
+@{typ nat}, @{typ int}, @{typ real} are covered; non-injective coercions such
+as @{const nat} @{text"::"} @{typ"int \<Rightarrow> nat"} are not and should not be
+hidden.
+
+
+\section{Printing theorems}
+
+\subsection{Question marks}
+
+If you print anything, especially theorems, containing
+schematic variables they are prefixed with a question mark:
+\verb!@!\verb!{thm conjI}! results in @{thm conjI}. Most of the time
+you would rather not see the question marks. There is an attribute
+\verb!no_vars! that you can attach to the theorem that turns its
+schematic into ordinary free variables: \verb!@!\verb!{thm conjI[no_vars]}!
+results in @{thm conjI[no_vars]}.
+
+This \verb!no_vars! business can become a bit tedious.
+If you would rather never see question marks, simply put
+\begin{quote}
+\verb!options [show_question_marks = false]!
+\end{quote}
+into the relevant \texttt{ROOT} file, just before the \texttt{theories} for that session.
+The rest of this document is produced with this flag set to \texttt{false}.
+
+Hint: Setting \verb!show_question_marks! to \texttt{false} only
+suppresses question marks; variables that end in digits,
+e.g. @{text"x1"}, are still printed with a trailing @{text".0"},
+e.g. @{text"x1.0"}, their internal index. This can be avoided by
+turning the last digit into a subscript: write \verb!x\<^sub>1! and
+obtain the much nicer @{text"x\<^sub>1"}. *}
+
+(*<*)declare [[show_question_marks = false]](*>*)
+
+subsection {*Qualified names*}
+
+text{* If there are multiple declarations of the same name, Isabelle prints
+the qualified name, for example @{text "T.length"}, where @{text T} is the
+theory it is defined in, to distinguish it from the predefined @{const[source]
+"List.length"}. In case there is no danger of confusion, you can insist on
+short names (no qualifiers) by setting the \verb!names_short!
+configuration option in the context.
+
+
+\subsection {Variable names\label{sec:varnames}}
+
+It sometimes happens that you want to change the name of a
+variable in a theorem before printing it. This can easily be achieved
+with the help of Isabelle's instantiation attribute \texttt{where}:
+@{thm conjI[where P = \<phi> and Q = \<psi>]} is the result of
+\begin{quote}
+\verb!@!\verb!{thm conjI[where P = \<phi> and Q = \<psi>]}!
+\end{quote}
+To support the ``\_''-notation for irrelevant variables
+the constant \texttt{DUMMY} has been introduced:
+@{thm fst_conv[of _ DUMMY]} is produced by
+\begin{quote}
+\verb!@!\verb!{thm fst_conv[of _ DUMMY]}!
+\end{quote}
+Variables that are bound by quantifiers or lambdas cannot be renamed
+like this. Instead, the attribute \texttt{rename\_abs} does the
+job. It expects a list of names or underscores, similar to the
+\texttt{of} attribute:
+\begin{quote}
+\verb!@!\verb!{thm split_paired_All[rename_abs _ l r]}!
+\end{quote}
+produces @{thm split_paired_All[rename_abs _ l r]}.
+
+
+\subsection{Inference rules}
+
+To print theorems as inference rules you need to include Didier
+R\'emy's \texttt{mathpartir} package~\cite{mathpartir}
+for typesetting inference rules in your \LaTeX\ file.
+
+Writing \verb!@!\verb!{thm[mode=Rule] conjI}! produces
+@{thm[mode=Rule] conjI}, even in the middle of a sentence.
+If you prefer your inference rule on a separate line, maybe with a name,
+\begin{center}
+@{thm[mode=Rule] conjI} {\sc conjI}
+\end{center}
+is produced by
+\begin{quote}
+\verb!\begin{center}!\\
+\verb!@!\verb!{thm[mode=Rule] conjI} {\sc conjI}!\\
+\verb!\end{center}!
+\end{quote}
+It is not recommended to use the standard \texttt{display} option
+together with \texttt{Rule} because centering does not work and because
+the line breaking mechanisms of \texttt{display} and \texttt{mathpartir} can
+clash.
+
+Of course you can display multiple rules in this fashion:
+\begin{quote}
+\verb!\begin{center}!\\
+\verb!@!\verb!{thm[mode=Rule] conjI} {\sc conjI} \\[1ex]!\\
+\verb!@!\verb!{thm[mode=Rule] conjE} {\sc disjI$_1$} \qquad!\\
+\verb!@!\verb!{thm[mode=Rule] disjE} {\sc disjI$_2$}!\\
+\verb!\end{center}!
+\end{quote}
+yields
+\begin{center}\small
+@{thm[mode=Rule] conjI} {\sc conjI} \\[1ex]
+@{thm[mode=Rule] disjI1} {\sc disjI$_1$} \qquad
+@{thm[mode=Rule] disjI2} {\sc disjI$_2$}
+\end{center}
+
+The \texttt{mathpartir} package copes well if there are too many
+premises for one line:
+\begin{center}
+@{prop[mode=Rule] "\<lbrakk> A \<longrightarrow> B; B \<longrightarrow> C; C \<longrightarrow> D; D \<longrightarrow> E; E \<longrightarrow> F; F \<longrightarrow> G;
+ G \<longrightarrow> H; H \<longrightarrow> I; I \<longrightarrow> J; J \<longrightarrow> K \<rbrakk> \<Longrightarrow> A \<longrightarrow> K"}
+\end{center}
+
+Limitations: 1. Premises and conclusion must each not be longer than
+the line.  2. Premises that are @{text"\<Longrightarrow>"}-implications are again
+displayed with a horizontal line, which looks at least unusual.
+
+
+In case you print theorems without premises no rule will be printed by the
+\texttt{Rule} print mode. However, you can use \texttt{Axiom} instead:
+\begin{quote}
+\verb!\begin{center}!\\
+\verb!@!\verb!{thm[mode=Axiom] refl} {\sc refl}! \\
+\verb!\end{center}!
+\end{quote}
+yields
+\begin{center}
+@{thm[mode=Axiom] refl} {\sc refl} 
+\end{center}
+
+
+\subsection{Displays and font sizes}
+
+When displaying theorems with the \texttt{display} option, for example as in
+\verb!@!\verb!{thm[display] refl}! @{thm[display] refl} the theorem is
+set in small font. It uses the \LaTeX-macro \verb!\isastyle!,
+which is also the style that regular theory text is set in, e.g. *}
+
+lemma "t = t"
+(*<*)oops(*>*)
+
+text{* \noindent Otherwise \verb!\isastyleminor! is used,
+which does not modify the font size (assuming you stick to the default
+\verb!\isabellestyle{it}! in \texttt{root.tex}). If you prefer
+normal font size throughout your text, include
+\begin{quote}
+\verb!\renewcommand{\isastyle}{\isastyleminor}!
+\end{quote}
+in \texttt{root.tex}. On the other hand, if you like the small font,
+just put \verb!\isastyle! in front of the text in question,
+e.g.\ at the start of one of the center-environments above.
+
+The advantage of the display option is that you can display a whole
+list of theorems in one go. For example,
+\verb!@!\verb!{thm[display] append.simps}!
+generates @{thm[display] append.simps}
+
+
+\subsection{If-then}
+
+If you prefer a fake ``natural language'' style you can produce
+the body of
+\newtheorem{theorem}{Theorem}
+\begin{theorem}
+@{thm[mode=IfThen] le_trans}
+\end{theorem}
+by typing
+\begin{quote}
+\verb!@!\verb!{thm[mode=IfThen] le_trans}!
+\end{quote}
+
+In order to prevent odd line breaks, the premises are put into boxes.
+At times this is too drastic:
+\begin{theorem}
+@{prop[mode=IfThen] "longpremise \<Longrightarrow> longerpremise \<Longrightarrow> P(f(f(f(f(f(f(f(f(f(x)))))))))) \<Longrightarrow> longestpremise \<Longrightarrow> conclusion"}
+\end{theorem}
+In which case you should use \texttt{IfThenNoBox} instead of
+\texttt{IfThen}:
+\begin{theorem}
+@{prop[mode=IfThenNoBox] "longpremise \<Longrightarrow> longerpremise \<Longrightarrow> P(f(f(f(f(f(f(f(f(f(x)))))))))) \<Longrightarrow> longestpremise \<Longrightarrow> conclusion"}
+\end{theorem}
+
+
+\subsection{Doing it yourself\label{sec:yourself}}
+
+If for some reason you want or need to present theorems your
+own way, you can extract the premises and the conclusion explicitly
+and combine them as you like:
+\begin{itemize}
+\item \verb!@!\verb!{thm (prem 1)! $thm$\verb!}!
+prints premise 1 of $thm$.
+\item \verb!@!\verb!{thm (concl)! $thm$\verb!}!
+prints the conclusion of $thm$.
+\end{itemize}
+For example, ``from @{thm (prem 2) conjI} and
+@{thm (prem 1) conjI} we conclude @{thm (concl) conjI}''
+is produced by
+\begin{quote}
+\verb!from !\verb!@!\verb!{thm (prem 2) conjI}! \verb!and !\verb!@!\verb!{thm (prem 1) conjI}!\\
+\verb!we conclude !\verb!@!\verb!{thm (concl) conjI}!
+\end{quote}
+Thus you can rearrange or hide premises and typeset the theorem as you like.
+Styles like \verb!(prem 1)! are a general mechanism explained
+in \S\ref{sec:styles}.
+
+
+\subsection{Patterns}
+
+
+In \S\ref{sec:varnames} we shows how to create patterns containing ``@{term DUMMY}''.
+You can drive this game even further and extend the syntax of let
+bindings such that certain functions like @{term fst}, @{term hd}, 
+etc.\ are printed as patterns. \texttt{OptionalSugar} provides the following:
+
+\begin{center}
+\begin{tabular}{l@ {~~produced by~~}l}
+@{term "let x = fst p in t"} & \verb!@!\verb!{term "let x = fst p in t"}!\\
+@{term "let x = snd p in t"} & \verb!@!\verb!{term "let x = snd p in t"}!\\
+@{term "let x = hd xs in t"} & \verb!@!\verb!{term "let x = hd xs in t"}!\\
+@{term "let x = tl xs in t"} & \verb!@!\verb!{term "let x = tl xs in t"}!\\
+@{term "let x = the y in t"} & \verb!@!\verb!{term "let x = the y in t"}!\\
+\end{tabular}
+\end{center}
+
+
+\section {Styles\label{sec:styles}}
+
+The \verb!thm! antiquotation works nicely for single theorems, but
+sets of equations as used in definitions are more difficult to
+typeset nicely: people tend to prefer aligned @{text "="} signs.
+
+To deal with such cases where it is desirable to dive into the structure
+of terms and theorems, Isabelle offers antiquotations featuring ``styles'':
+
+\begin{quote}
+\verb!@!\verb!{thm (style) thm}!\\
+\verb!@!\verb!{prop (style) thm}!\\
+\verb!@!\verb!{term (style) term}!\\
+\verb!@!\verb!{term_type (style) term}!\\
+\verb!@!\verb!{typeof (style) term}!\\
+\end{quote}
+
+ A ``style'' is a transformation of a term. There are predefined
+ styles, namely \verb!lhs! and \verb!rhs!, \verb!prem! with one argument, and \verb!concl!.
+For example, the output
+\begin{center}
+\begin{tabular}{l@ {~~@{text "="}~~}l}
+@{thm (lhs) append_Nil} & @{thm (rhs) append_Nil}\\
+@{thm (lhs) append_Cons} & @{thm (rhs) append_Cons}
+\end{tabular}
+\end{center}
+is produced by the following code:
+\begin{quote}
+  \verb!\begin{center}!\\
+  \verb!\begin{tabular}{l@ {~~!\verb!@!\verb!{text "="}~~}l}!\\
+  \verb!@!\verb!{thm (lhs) append_Nil} & @!\verb!{thm (rhs) append_Nil}\\!\\
+  \verb!@!\verb!{thm (lhs) append_Cons} & @!\verb!{thm (rhs) append_Cons}!\\
+  \verb!\end{tabular}!\\
+  \verb!\end{center}!
+\end{quote}
+Note the space between \verb!@! and \verb!{! in the tabular argument.
+It prevents Isabelle from interpreting \verb!@ {~~...~~}! 
+as an antiquotation. The styles \verb!lhs! and \verb!rhs!
+extract the left hand side (or right hand side respectively) from the
+conclusion of propositions consisting of a binary operator
+(e.~g.~@{text "="}, @{text "\<equiv>"}, @{text "<"}).
+
+Likewise, \verb!concl! may be used as a style to show just the
+conclusion of a proposition. For example, take \verb!hd_Cons_tl!:
+\begin{center}
+  @{thm hd_Cons_tl}
+\end{center}
+To print just the conclusion,
+\begin{center}
+  @{thm (concl) hd_Cons_tl}
+\end{center}
+type
+\begin{quote}
+  \verb!\begin{center}!\\
+  \verb!@!\verb!{thm (concl) hd_Cons_tl}!\\
+  \verb!\end{center}!
+\end{quote}
+Beware that any options must be placed \emph{before} the style, as in this example.
+
+Further use cases can be found in \S\ref{sec:yourself}.
+If you are not afraid of ML, you may also define your own styles.
+Have a look at module @{ML_structure Term_Style}.
+
+
+\section {Proofs}
+
+Full proofs, even if written in beautiful Isar style, are
+likely to be too long and detailed to be included in conference
+papers, but some key lemmas might be of interest.
+It is usually easiest to put them in figures like the one in Fig.\
+\ref{fig:proof}. This was achieved with the \isakeyword{text\_raw} command:
+*}
+text_raw {*
+  \begin{figure}
+  \begin{center}\begin{minipage}{0.6\textwidth}  
+  \isastyleminor\isamarkuptrue
+*}
+lemma True
+proof -
+  -- "pretty trivial"
+  show True by force
+qed
+text_raw {*    
+  \end{minipage}\end{center}
+  \caption{Example proof in a figure.}\label{fig:proof}
+  \end{figure}
+*}
+text {*
+
+\begin{quote}
+\small
+\verb!text_raw {!\verb!*!\\
+\verb!  \begin{figure}!\\
+\verb!  \begin{center}\begin{minipage}{0.6\textwidth}!\\
+\verb!  \isastyleminor\isamarkuptrue!\\
+\verb!*!\verb!}!\\
+\verb!lemma True!\\
+\verb!proof -!\\
+\verb!  -- "pretty trivial"!\\
+\verb!  show True by force!\\
+\verb!qed!\\
+\verb!text_raw {!\verb!*!\\
+\verb!  \end{minipage}\end{center}!\\
+\verb!  \caption{Example proof in a figure.}\label{fig:proof}!\\
+\verb!  \end{figure}!\\
+\verb!*!\verb!}!
+\end{quote}
+
+Other theory text, e.g.\ definitions, can be put in figures, too.
+
+\section{Theory snippets}
+
+This section describes how to include snippets of a theory text in some other \LaTeX\ document.
+The typical scenario is that the description of your theory is not part of the theory text but
+a separate document that antiquotes bits of the theory. This works well for terms and theorems
+but there are no antiquotations, for example, for function definitions or proofs. Even if there are antiquotations,
+the output is usually a reformatted (by Isabelle) version of the input and may not look like
+you wanted it to look. Here is how to include a snippet of theory text (in \LaTeX\ form) in some
+other \LaTeX\ document, in 4 easy steps. Beware that these snippets are not processed by
+any antiquotation mechanism: the resulting \LaTeX\ text is more or less exactly what you wrote
+in the theory, without any added sugar.
+
+\subsection{Theory markup}
+
+Include some markers at the beginning and the end of the theory snippet you want to cut out.
+You have to place the following lines before and after the snippet, where snippets must always be
+consecutive lines of theory text:
+\begin{quote}
+\verb!\text_raw{!\verb!*\snip{!\emph{snippetname}\verb!}{1}{2}{%*!\verb!}!\\
+\emph{theory text}\\
+\verb!\text_raw{!\verb!*!\verb!}%endsnip*!\verb!}!
+\end{quote}
+where \emph{snippetname} should be a unique name for the snippet. The numbers \texttt{1}
+and \texttt{2} are explained in a moment.
+
+\subsection{Generate the \texttt{.tex} file}
+
+Run your theory \texttt{T} with the \texttt{isabelle} \texttt{build} tool
+to generate the \LaTeX-file \texttt{T.tex} which is needed for the next step,
+extraction of marked snippets.
+You may also want to process \texttt{T.tex} to generate a pdf document.
+This requires a definition of \texttt{\char`\\snippet}:
+\begin{verbatim}
+\newcommand{\repeatisanl}[1]
+  {\ifnum#1=0\else\isanewline\repeatisanl{\numexpr#1-1}\fi}
+\newcommand{\snip}[4]{\repeatisanl#2#4\repeatisanl#3}
+\end{verbatim}
+Parameter 2 and 3 of \texttt{\char`\\snippet} are numbers (the \texttt{1}
+and \texttt{2} above) and determine how many newlines are inserted before and after the snippet.
+Unfortunately \texttt{text\_raw} eats up all preceding and following newlines
+and they have to be inserted again in this manner. Otherwise the document generated from \texttt{T.tex}
+will look ugly around the snippets. It can take some iterations to get the number of required
+newlines exactly right.
+
+\subsection{Extract marked snippets}
+\label{subsec:extract}
+
+Extract the marked bits of text with a shell-level script, e.g.
+\begin{quote}
+\verb!sed -n '/\\snip{/,/endsnip/p' T.tex > !\emph{snippets}\verb!.tex!
+\end{quote}
+File \emph{snippets}\texttt{.tex} (the name is arbitrary) now contains a sequence of blocks like this
+\begin{quote}
+\verb!\snip{!\emph{snippetname}\verb!}{1}{2}{%!\\
+\emph{theory text}\\
+\verb!}%endsnip!
+\end{quote}
+
+\subsection{Including snippets}
+
+In the preamble of the document where the snippets are to be used you define \texttt{\char`\\snip}
+and input \emph{snippets}\texttt{.tex}:
+\begin{verbatim}
+\newcommand{\snip}[4]
+  {\expandafter\newcommand\csname #1\endcsname{#4}}
+\input{snippets}
+\end{verbatim}
+This definition of \texttt{\char`\\snip} simply has the effect of defining for each snippet
+\emph{snippetname} a \LaTeX\ command \texttt{\char`\\}\emph{snippetname}
+that produces the corresponding snippet text. In the body of your document you can display that text
+like this:
+\begin{quote}
+\verb!\begin{isabelle}!\\
+\texttt{\char`\\}\emph{snippetname}\\
+\verb!\end{isabelle}!
+\end{quote}
+The \texttt{isabelle} environment is the one defined in the standard file
+\texttt{isabelle.sty} which most likely you are loading anyway.
+
+
+\section{Antiquotation}
+
+You want to show a constant and its type? Instead of going
+\verb!@!\verb!{const myconst}! \verb!@!\verb!{text "::"}! \verb!@!\verb!{typeof myconst}!,
+you can just write \verb!@!\verb!{const_typ myconst}! using the new antiquotation
+\texttt{const\_typ} defined in \texttt{LaTeXsugar}. For example,
+\verb!@!\verb!{const_typ length}! produces @{const_typ length}.
+
+*}
+
+(*<*)
+end
+(*>*)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Sugar/document/build	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+set -e
+
+FORMAT="$1"
+VARIANT="$2"
+
+"$ISABELLE_HOME/src/Doc/prepare_document" "$FORMAT"
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Sugar/document/mathpartir.sty	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,388 @@
+%  Mathpartir --- Math Paragraph for Typesetting Inference Rules
+%
+%  Copyright (C) 2001, 2002, 2003 Didier Rémy
+%
+%  Author         : Didier Remy 
+%  Version        : 1.1.1
+%  Bug Reports    : to author
+%  Web Site       : http://pauillac.inria.fr/~remy/latex/
+% 
+%  WhizzyTeX is free software; you can redistribute it and/or modify
+%  it under the terms of the GNU General Public License as published by
+%  the Free Software Foundation; either version 2, or (at your option)
+%  any later version.
+%  
+%  Mathpartir is distributed in the hope that it will be useful,
+%  but WITHOUT ANY WARRANTY; without even the implied warranty of
+%  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+%  GNU General Public License for more details 
+%  (http://pauillac.inria.fr/~remy/license/GPL).
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%  File mathpartir.sty (LaTeX macros)
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\NeedsTeXFormat{LaTeX2e}
+\ProvidesPackage{mathpartir}
+    [2003/07/10 version 1.1.1 Math Paragraph for Typesetting Inference Rules]
+
+%%
+
+%% Identification
+%% Preliminary declarations
+
+\RequirePackage {keyval}
+
+%% Options
+%% More declarations
+
+%% PART I: Typesetting maths in paragraphe mode
+
+\newdimen \mpr@tmpdim
+
+% To ensure hevea \hva compatibility, \hva should expands to nothing 
+% in mathpar or in inferrule
+\let \mpr@hva \empty
+
+%% normal paragraph parametters, should rather be taken dynamically
+\def \mpr@savepar {%
+  \edef \MathparNormalpar
+     {\noexpand \lineskiplimit \the\lineskiplimit
+      \noexpand \lineskip \the\lineskip}%
+  }
+
+\def \mpr@rulelineskip {\lineskiplimit=0.3em\lineskip=0.2em plus 0.1em}
+\def \mpr@lesslineskip {\lineskiplimit=0.6em\lineskip=0.5em plus 0.2em}
+\def \mpr@lineskip  {\lineskiplimit=1.2em\lineskip=1.2em plus 0.2em}
+\let \MathparLineskip \mpr@lineskip
+\def \mpr@paroptions {\MathparLineskip}
+\let \mpr@prebindings \relax
+
+\newskip \mpr@andskip \mpr@andskip 2em plus 0.5fil minus 0.5em
+
+\def \mpr@goodbreakand
+   {\hskip -\mpr@andskip  \penalty -1000\hskip \mpr@andskip}
+\def \mpr@and {\hskip \mpr@andskip}
+\def \mpr@andcr {\penalty 50\mpr@and}
+\def \mpr@cr {\penalty -10000\mpr@and}
+\def \mpr@eqno #1{\mpr@andcr #1\hskip 0em plus -1fil \penalty 10}
+
+\def \mpr@bindings {%
+  \let \and \mpr@andcr
+  \let \par \mpr@andcr
+  \let \\\mpr@cr
+  \let \eqno \mpr@eqno
+  \let \hva \mpr@hva
+  } 
+\let \MathparBindings \mpr@bindings
+
+% \@ifundefined {ignorespacesafterend}
+%    {\def \ignorespacesafterend {\aftergroup \ignorespaces}
+
+\newenvironment{mathpar}[1][]
+  {$$\mpr@savepar \parskip 0em \hsize \linewidth \centering
+     \vbox \bgroup \mpr@prebindings \mpr@paroptions #1\ifmmode $\else
+     \noindent $\displaystyle\fi
+     \MathparBindings}
+  {\unskip \ifmmode $\fi\egroup $$\ignorespacesafterend}
+
+% \def \math@mathpar #1{\setbox0 \hbox {$\displaystyle #1$}\ifnum
+%     \wd0 < \hsize  $$\box0$$\else \bmathpar #1\emathpar \fi}
+
+%%% HOV BOXES
+
+\def \mathvbox@ #1{\hbox \bgroup \mpr@normallineskip 
+  \vbox \bgroup \tabskip 0em \let \\ \cr
+  \halign \bgroup \hfil $##$\hfil\cr #1\crcr \egroup \egroup
+  \egroup}
+
+\def \mathhvbox@ #1{\setbox0 \hbox {\let \\\qquad $#1$}\ifnum \wd0 < \hsize
+      \box0\else \mathvbox {#1}\fi}
+
+
+%% Part II -- operations on lists
+
+\newtoks \mpr@lista
+\newtoks \mpr@listb
+
+\long \def\mpr@cons #1\mpr@to#2{\mpr@lista {\\{#1}}\mpr@listb \expandafter
+{#2}\edef #2{\the \mpr@lista \the \mpr@listb}}
+
+\long \def\mpr@snoc #1\mpr@to#2{\mpr@lista {\\{#1}}\mpr@listb \expandafter
+{#2}\edef #2{\the \mpr@listb\the\mpr@lista}}
+
+\long \def \mpr@concat#1=#2\mpr@to#3{\mpr@lista \expandafter {#2}\mpr@listb
+\expandafter {#3}\edef #1{\the \mpr@listb\the\mpr@lista}}
+
+\def \mpr@head #1\mpr@to #2{\expandafter \mpr@head@ #1\mpr@head@ #1#2}
+\long \def \mpr@head@ #1#2\mpr@head@ #3#4{\def #4{#1}\def#3{#2}}
+
+\def \mpr@flatten #1\mpr@to #2{\expandafter \mpr@flatten@ #1\mpr@flatten@ #1#2}
+\long \def \mpr@flatten@ \\#1\\#2\mpr@flatten@ #3#4{\def #4{#1}\def #3{\\#2}}
+
+\def \mpr@makelist #1\mpr@to #2{\def \mpr@all {#1}%
+   \mpr@lista {\\}\mpr@listb \expandafter {\mpr@all}\edef \mpr@all {\the
+   \mpr@lista \the \mpr@listb \the \mpr@lista}\let #2\empty 
+   \def \mpr@stripof ##1##2\mpr@stripend{\def \mpr@stripped{##2}}\loop
+     \mpr@flatten \mpr@all \mpr@to \mpr@one
+     \expandafter \mpr@snoc \mpr@one \mpr@to #2\expandafter \mpr@stripof
+     \mpr@all \mpr@stripend  
+     \ifx \mpr@stripped \empty \let \mpr@isempty 0\else \let \mpr@isempty 1\fi
+     \ifx 1\mpr@isempty
+   \repeat
+}
+
+%% Part III -- Type inference rules
+
+\def \mpr@rev #1\mpr@to #2{\let \mpr@tmp \empty
+   \def \\##1{\mpr@cons ##1\mpr@to \mpr@tmp}#1\let #2\mpr@tmp}
+
+\newif \if@premisse
+\newbox \mpr@hlist
+\newbox \mpr@vlist
+\newif \ifmpr@center \mpr@centertrue
+\def \mpr@htovlist {%
+   \setbox \mpr@hlist
+      \hbox {\strut
+             \ifmpr@center \hskip -0.5\wd\mpr@hlist\fi
+             \unhbox \mpr@hlist}%
+   \setbox \mpr@vlist
+      \vbox {\if@premisse  \box \mpr@hlist \unvbox \mpr@vlist
+             \else \unvbox \mpr@vlist \box \mpr@hlist
+             \fi}%
+}
+% OLD version
+% \def \mpr@htovlist {%
+%    \setbox \mpr@hlist
+%       \hbox {\strut \hskip -0.5\wd\mpr@hlist \unhbox \mpr@hlist}%
+%    \setbox \mpr@vlist
+%       \vbox {\if@premisse  \box \mpr@hlist \unvbox \mpr@vlist
+%              \else \unvbox \mpr@vlist \box \mpr@hlist
+%              \fi}%
+% }
+
+\def \mpr@sep{2em}
+\def \mpr@blank { }
+\def \mpr@hovbox #1#2{\hbox
+  \bgroup
+  \ifx #1T\@premissetrue
+  \else \ifx #1B\@premissefalse
+  \else
+     \PackageError{mathpartir}
+       {Premisse orientation should either be P or B}
+       {Fatal error in Package}%
+  \fi \fi
+  \def \@test {#2}\ifx \@test \mpr@blank\else
+  \setbox \mpr@hlist \hbox {}%
+  \setbox \mpr@vlist \vbox {}%
+  \if@premisse \let \snoc \mpr@cons \else \let \snoc \mpr@snoc \fi
+  \let \@hvlist \empty \let \@rev \empty
+  \mpr@tmpdim 0em
+  \expandafter \mpr@makelist #2\mpr@to \mpr@flat
+  \if@premisse \mpr@rev \mpr@flat \mpr@to \@rev \else \let \@rev \mpr@flat \fi
+  \def \\##1{%
+     \def \@test {##1}\ifx \@test \empty
+        \mpr@htovlist
+        \mpr@tmpdim 0em %%% last bug fix not extensively checked
+     \else
+      \setbox0 \hbox{$\displaystyle {##1}$}\relax
+      \advance \mpr@tmpdim by \wd0
+      %\mpr@tmpdim 1.02\mpr@tmpdim
+      \ifnum \mpr@tmpdim < \hsize
+         \ifnum \wd\mpr@hlist > 0
+           \if@premisse
+             \setbox \mpr@hlist 
+                \hbox {\unhbox0 \hskip \mpr@sep \unhbox \mpr@hlist}%
+           \else
+             \setbox \mpr@hlist
+                \hbox {\unhbox \mpr@hlist  \hskip \mpr@sep \unhbox0}%
+           \fi
+         \else 
+         \setbox \mpr@hlist \hbox {\unhbox0}%
+         \fi
+      \else
+         \ifnum \wd \mpr@hlist > 0
+            \mpr@htovlist 
+            \mpr@tmpdim \wd0
+         \fi
+         \setbox \mpr@hlist \hbox {\unhbox0}%
+      \fi
+      \advance \mpr@tmpdim by \mpr@sep
+   \fi
+   }%
+   \@rev
+   \mpr@htovlist
+   \ifmpr@center \hskip \wd\mpr@vlist\fi \box \mpr@vlist
+   \fi
+   \egroup
+}
+
+%%% INFERENCE RULES
+
+\@ifundefined{@@over}{%
+    \let\@@over\over % fallback if amsmath is not loaded
+    \let\@@overwithdelims\overwithdelims
+    \let\@@atop\atop \let\@@atopwithdelims\atopwithdelims
+    \let\@@above\above \let\@@abovewithdelims\abovewithdelims
+  }{}
+
+
+\def \mpr@@fraction #1#2{\hbox {\advance \hsize by -0.5em
+    $\displaystyle {#1\@@over #2}$}}
+\let \mpr@fraction \mpr@@fraction
+\def \mpr@@reduce #1#2{\hbox
+    {$\lower 0.01pt \mpr@@fraction {#1}{#2}\mkern -15mu\rightarrow$}}
+\def \mpr@@rewrite #1#2#3{\hbox
+    {$\lower 0.01pt \mpr@@fraction {#2}{#3}\mkern -8mu#1$}}
+\def \mpr@infercenter #1{\vcenter {\mpr@hovbox{T}{#1}}}
+
+\def \mpr@empty {}
+\def \mpr@inferrule
+  {\bgroup
+     \ifnum \linewidth<\hsize \hsize \linewidth\fi
+     \mpr@rulelineskip
+     \let \and \qquad
+     \let \hva \mpr@hva
+     \let \@rulename \mpr@empty
+     \let \@rule@options \mpr@empty
+     \mpr@inferrule@}
+\newcommand {\mpr@inferrule@}[3][]
+  {\everymath={\displaystyle}%       
+   \def \@test {#2}\ifx \empty \@test
+      \setbox0 \hbox {$\vcenter {\mpr@hovbox{B}{#3}}$}%
+   \else 
+   \def \@test {#3}\ifx \empty \@test
+      \setbox0 \hbox {$\vcenter {\mpr@hovbox{T}{#2}}$}%
+   \else
+   \setbox0 \mpr@fraction {\mpr@hovbox{T}{#2}}{\mpr@hovbox{B}{#3}}%
+   \fi \fi
+   \def \@test {#1}\ifx \@test\empty \box0
+   \else \vbox 
+%%% Suggestion de Francois pour les etiquettes longues
+%%%   {\hbox to \wd0 {\RefTirName {#1}\hfil}\box0}\fi
+      {\hbox {\RefTirName {#1}}\box0}\fi
+   \egroup}
+
+\def \mpr@vdotfil #1{\vbox to #1{\leaders \hbox{$\cdot$} \vfil}}
+
+% They are two forms
+% \inferrule [label]{[premisses}{conclusions}
+% or
+% \inferrule* [options]{[premisses}{conclusions}
+%
+% Premisses and conclusions are lists of elements separated by \\
+% Each \\ produces a break, attempting horizontal breaks if possible, 
+% and  vertical breaks if needed. 
+% 
+% An empty element obtained by \\\\ produces a vertical break in all cases. 
+%
+% The former rule is aligned on the fraction bar. 
+% The optional label appears on top of the rule
+% The second form to be used in a derivation tree is aligned on the last
+% line of its conclusion
+% 
+% The second form can be parameterized, using the key=val interface. The
+% folloiwng keys are recognized:
+%       
+%  width                set the width of the rule to val
+%  narrower             set the width of the rule to val\hsize
+%  before               execute val at the beginning/left
+%  lab                  put a label [Val] on top of the rule
+%  lskip                add negative skip on the right
+%  left                 put a left label [Val]
+%  Left                 put a left label [Val],  ignoring its width 
+%  right                put a right label [Val]
+%  Right                put a right label [Val], ignoring its width
+%  leftskip             skip negative space on the left-hand side
+%  rightskip            skip negative space on the right-hand side
+%  vdots                lift the rule by val and fill vertical space with dots
+%  after                execute val at the end/right
+%  
+%  Note that most options must come in this order to avoid strange
+%  typesetting (in particular  leftskip must preceed left and Left and
+%  rightskip must follow Right or right; vdots must come last 
+%  or be only followed by rightskip. 
+%  
+
+\define@key {mprset}{flushleft}[]{\mpr@centerfalse}
+\define@key {mprset}{center}[]{\mpr@centertrue}
+\def \mprset #1{\setkeys{mprset}{#1}}
+
+\newbox \mpr@right
+\define@key {mpr}{flushleft}[]{\mpr@centerfalse}
+\define@key {mpr}{center}[]{\mpr@centertrue}
+\define@key {mpr}{left}{\setbox0 \hbox {$\TirName {#1}\;$}\relax
+     \advance \hsize by -\wd0\box0}
+\define@key {mpr}{width}{\hsize #1}
+\define@key {mpr}{sep}{\def\mpr@sep{#1}}
+\define@key {mpr}{before}{#1}
+\define@key {mpr}{lab}{\let \RefTirName \TirName \def \mpr@rulename {#1}}
+\define@key {mpr}{Lab}{\let \RefTirName \TirName \def \mpr@rulename {#1}}
+\define@key {mpr}{narrower}{\hsize #1\hsize}
+\define@key {mpr}{leftskip}{\hskip -#1}
+\define@key {mpr}{reduce}[]{\let \mpr@fraction \mpr@@reduce}
+\define@key {mpr}{rightskip}
+  {\setbox \mpr@right \hbox {\unhbox \mpr@right \hskip -#1}}
+\define@key {mpr}{LEFT}{\setbox0 \hbox {$#1$}\relax
+     \advance \hsize by -\wd0\box0}
+\define@key {mpr}{left}{\setbox0 \hbox {$\TirName {#1}\;$}\relax
+     \advance \hsize by -\wd0\box0}
+\define@key {mpr}{Left}{\llap{$\TirName {#1}\;$}}
+\define@key {mpr}{right}
+  {\setbox0 \hbox {$\;\TirName {#1}$}\relax \advance \hsize by -\wd0
+   \setbox \mpr@right \hbox {\unhbox \mpr@right \unhbox0}}
+\define@key {mpr}{RIGHT}
+  {\setbox0 \hbox {$#1$}\relax \advance \hsize by -\wd0
+   \setbox \mpr@right \hbox {\unhbox \mpr@right \unhbox0}}
+\define@key {mpr}{Right}
+  {\setbox \mpr@right \hbox {\unhbox \mpr@right \rlap {$\;\TirName {#1}$}}}
+\define@key {mpr}{vdots}{\def \mpr@vdots {\@@atop \mpr@vdotfil{#1}}}
+\define@key {mpr}{after}{\edef \mpr@after {\mpr@after #1}}
+
+\newdimen \rule@dimen
+\newcommand \mpr@inferstar@ [3][]{\setbox0
+  \hbox {\let \mpr@rulename \mpr@empty \let \mpr@vdots \relax
+         \setbox \mpr@right \hbox{}%
+         $\setkeys{mpr}{#1}%
+          \ifx \mpr@rulename \mpr@empty \mpr@inferrule {#2}{#3}\else
+          \mpr@inferrule [{\mpr@rulename}]{#2}{#3}\fi
+          \box \mpr@right \mpr@vdots$}
+  \setbox1 \hbox {\strut}
+  \rule@dimen \dp0 \advance \rule@dimen by -\dp1
+  \raise \rule@dimen \box0}
+
+\def \mpr@infer {\@ifnextchar *{\mpr@inferstar}{\mpr@inferrule}}
+\newcommand \mpr@err@skipargs[3][]{}
+\def \mpr@inferstar*{\ifmmode 
+    \let \@do \mpr@inferstar@
+  \else 
+    \let \@do \mpr@err@skipargs
+    \PackageError {mathpartir}
+      {\string\inferrule* can only be used in math mode}{}%
+  \fi \@do}
+
+
+%%% Exports
+
+% Envirnonment mathpar
+
+\let \inferrule \mpr@infer
+
+% make a short name \infer is not already defined
+\@ifundefined {infer}{\let \infer \mpr@infer}{}
+
+\def \tir@name #1{\hbox {\small \sc #1}}
+\let \TirName \tir@name
+\let \RefTirName \tir@name
+
+%%% Other Exports
+
+% \let \listcons \mpr@cons
+% \let \listsnoc \mpr@snoc
+% \let \listhead \mpr@head
+% \let \listmake \mpr@makelist
+
+
+
+
+\endinput
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Sugar/document/root.bib	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,12 @@
+@book{LNCS2283,author={Tobias Nipkow and Lawrence Paulson and Markus Wenzel},
+title="Isabelle/HOL --- A Proof Assistant for Higher-Order Logic",
+publisher=Springer,series=LNCS,volume=2283,year=2002,
+note={\url{http://www.in.tum.de/~nipkow/LNCS2283/}}}
+
+@misc{mathpartir,author={Didier R\'emy},title={mathpartir},
+note={\url{http://cristal.inria.fr/~remy/latex/}}}
+
+@misc{tar,author={Gerwin Klein and Norber Schirmer and Tobias Nipkow},
+title={{LaTeX} sugar theories and support files}, 
+note={\url{http://isabelle.in.tum.de/sugar.tar.gz}}}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Sugar/document/root.tex	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,41 @@
+\documentclass[11pt,a4paper]{article}
+\usepackage{isabelle,isabellesym}
+
+\usepackage{amssymb}
+
+\usepackage{mathpartir}
+
+% this should be the last package used
+\usepackage{pdfsetup}
+
+% urls in roman style, theory text in math-similar italics
+\urlstyle{rm}
+\isabellestyle{it}
+
+\hyphenation{Isa-belle}
+\begin{document}
+
+\title{\LaTeX\ Sugar for Isabelle Documents}
+\author{Florian Haftmann, Gerwin Klein, Tobias Nipkow, Norbert Schirmer}
+\maketitle
+
+\begin{abstract}
+This document shows how to typset mathematics in Isabelle-based
+documents in a style close to that in ordinary computer science papers.
+\end{abstract}
+
+%\tableofcontents
+
+% generated text of all theories
+\input{Sugar.tex}
+
+% optional bibliography
+\bibliographystyle{abbrv}
+\bibliography{root}
+
+\end{document}
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: t
+%%% End:
--- a/src/Doc/System/document/build	Sat Apr 05 17:52:29 2014 +0100
+++ b/src/Doc/System/document/build	Sat Apr 05 11:37:00 2014 +0200
@@ -7,7 +7,7 @@
 
 "$ISABELLE_TOOL" logo
 
-cp "$ISABELLE_HOME/src/Doc/IsarRef/document/style.sty" .
+cp "$ISABELLE_HOME/src/Doc/Isar-Ref/document/style.sty" .
 cp "$ISABELLE_HOME/src/Doc/iman.sty" .
 cp "$ISABELLE_HOME/src/Doc/extra.sty" .
 cp "$ISABELLE_HOME/src/Doc/isar.sty" .
--- a/src/Doc/ZF/FOL_examples.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-header{*Examples of Classical Reasoning*}
-
-theory FOL_examples imports "~~/src/FOL/FOL" begin
-
-lemma "EX y. ALL x. P(y)-->P(x)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule exCI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule allI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule allE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-txt{*see below for @{text allI} combined with @{text swap}*}
-apply (erule allI [THEN [2] swap])
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule notE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply assumption
-done
-
-text {*
-@{thm[display] allI [THEN [2] swap]}
-*}
-
-lemma "EX y. ALL x. P(y)-->P(x)"
-by blast
-
-end
-
-
--- a/src/Doc/ZF/IFOL_examples.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,58 +0,0 @@
-header{*Examples of Intuitionistic Reasoning*}
-
-theory IFOL_examples imports "~~/src/FOL/IFOL" begin
-
-text{*Quantifier example from the book Logic and Computation*}
-lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule allI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule exI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule exE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule allE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-txt{*Now @{text "apply assumption"} fails*}
-oops
-
-text{*Trying again, with the same first two steps*}
-lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule allI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule exE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule exI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule allE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply assumption
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-done
-
-lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
-by (tactic {*IntPr.fast_tac @{context} 1*})
-
-text{*Example of Dyckhoff's method*}
-lemma "~ ~ ((P-->Q) | (Q-->P))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (unfold not_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule disj_impE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule imp_impE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
- apply (erule imp_impE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply assumption 
-apply (erule FalseE)+
-done
-
-end
--- a/src/Doc/ZF/If.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,85 +0,0 @@
-(*  Title:      Doc/ZF/If.thy
-    Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
-    Copyright   1991  University of Cambridge
-
-First-Order Logic: the 'if' example.
-*)
-
-theory If imports "~~/src/FOL/FOL" begin
-
-definition "if" :: "[o,o,o]=>o" where
-  "if(P,Q,R) == P&Q | ~P&R"
-
-lemma ifI:
-    "[| P ==> Q; ~P ==> R |] ==> if(P,Q,R)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (simp add: if_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply blast
-done
-
-lemma ifE:
-   "[| if(P,Q,R);  [| P; Q |] ==> S; [| ~P; R |] ==> S |] ==> S"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (simp add: if_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply blast
-done
-
-lemma if_commute: "if(P, if(Q,A,B), if(Q,C,D)) <-> if(Q, if(P,A,C), if(P,B,D))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule iffI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule ifE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule ifE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule ifI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule ifI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-oops
-
-text{*Trying again from the beginning in order to use @{text blast}*}
-declare ifI [intro!]
-declare ifE [elim!]
-
-lemma if_commute: "if(P, if(Q,A,B), if(Q,C,D)) <-> if(Q, if(P,A,C), if(P,B,D))"
-by blast
-
-
-lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-by blast
-
-text{*Trying again from the beginning in order to prove from the definitions*}
-lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (simp add: if_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply blast
-done
-
-
-text{*An invalid formula.  High-level rules permit a simpler diagnosis*}
-lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply auto
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-(*The next step will fail unless subgoals remain*)
-apply (tactic all_tac)
-oops
-
-text{*Trying again from the beginning in order to prove from the definitions*}
-lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (simp add: if_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (auto) 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-(*The next step will fail unless subgoals remain*)
-apply (tactic all_tac)
-oops
-
-
-end
--- a/src/Doc/ZF/ZF_Isar.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,138 +0,0 @@
-theory ZF_Isar
-imports Main
-begin
-
-(*<*)
-ML_file "../antiquote_setup.ML"
-(*>*)
-
-chapter {* Some Isar language elements *}
-
-section {* Type checking *}
-
-text {*
-  The ZF logic is essentially untyped, so the concept of ``type
-  checking'' is performed as logical reasoning about set-membership
-  statements.  A special method assists users in this task; a version
-  of this is already declared as a ``solver'' in the standard
-  Simplifier setup.
-
-  \begin{matharray}{rcl}
-    @{command_def (ZF) "print_tcset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{method_def (ZF) typecheck} & : & @{text method} \\
-    @{attribute_def (ZF) TC} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{attribute (ZF) TC} (() | 'add' | 'del')
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command (ZF) "print_tcset"} prints the collection of
-  typechecking rules of the current context.
-  
-  \item @{method (ZF) typecheck} attempts to solve any pending
-  type-checking problems in subgoals.
-  
-  \item @{attribute (ZF) TC} adds or deletes type-checking rules from
-  the context.
-
-  \end{description}
-*}
-
-
-section {* (Co)Inductive sets and datatypes *}
-
-subsection {* Set definitions *}
-
-text {*
-  In ZF everything is a set.  The generic inductive package also
-  provides a specific view for ``datatype'' specifications.
-  Coinductive definitions are available in both cases, too.
-
-  \begin{matharray}{rcl}
-    @{command_def (ZF) "inductive"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (ZF) "coinductive"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (ZF) "datatype"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (ZF) "codatatype"} & : & @{text "theory \<rightarrow> theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{command (ZF) inductive} | @@{command (ZF) coinductive}) domains intros hints
-    ;
-
-    domains: @'domains' (@{syntax term} + '+') ('<=' | '\<subseteq>') @{syntax term}
-    ;
-    intros: @'intros' (@{syntax thmdecl}? @{syntax prop} +)
-    ;
-    hints: @{syntax (ZF) "monos"}? condefs? \<newline>
-      @{syntax (ZF) typeintros}? @{syntax (ZF) typeelims}?
-    ;
-    @{syntax_def (ZF) "monos"}: @'monos' @{syntax thmrefs}
-    ;
-    condefs: @'con_defs' @{syntax thmrefs}
-    ;
-    @{syntax_def (ZF) typeintros}: @'type_intros' @{syntax thmrefs}
-    ;
-    @{syntax_def (ZF) typeelims}: @'type_elims' @{syntax thmrefs}
-  \<close>}
-
-  In the following syntax specification @{text "monos"}, @{text
-  typeintros}, and @{text typeelims} are the same as above.
-
-  @{rail \<open>
-    (@@{command (ZF) datatype} | @@{command (ZF) codatatype}) domain? (dtspec + @'and') hints
-    ;
-
-    domain: ('<=' | '\<subseteq>') @{syntax term}
-    ;
-    dtspec: @{syntax term} '=' (con + '|')
-    ;
-    con: @{syntax name} ('(' (@{syntax term} ',' +) ')')?
-    ;
-    hints: @{syntax (ZF) "monos"}? @{syntax (ZF) typeintros}? @{syntax (ZF) typeelims}?
-  \<close>}
-
-  See \cite{isabelle-ZF} for further information on inductive
-  definitions in ZF, but note that this covers the old-style theory
-  format.
-*}
-
-
-subsection {* Primitive recursive functions *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (ZF) "primrec"} & : & @{text "theory \<rightarrow> theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (ZF) primrec} (@{syntax thmdecl}? @{syntax prop} +)
-  \<close>}
-*}
-
-
-subsection {* Cases and induction: emulating tactic scripts *}
-
-text {*
-  The following important tactical tools of Isabelle/ZF have been
-  ported to Isar.  These should not be used in proper proof texts.
-
-  \begin{matharray}{rcl}
-    @{method_def (ZF) case_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def (ZF) induct_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def (ZF) ind_cases}@{text "\<^sup>*"} & : & @{text method} \\
-    @{command_def (ZF) "inductive_cases"} & : & @{text "theory \<rightarrow> theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{method (ZF) case_tac} | @@{method (ZF) induct_tac}) @{syntax goal_spec}? @{syntax name}
-    ;
-    @@{method (ZF) ind_cases} (@{syntax prop} +)
-    ;
-    @@{command (ZF) inductive_cases} (@{syntax thmdecl}? (@{syntax prop} +) + @'and')
-  \<close>}
-*}
-
-end
--- a/src/Doc/ZF/ZF_examples.thy	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,202 +0,0 @@
-header{*Examples of Reasoning in ZF Set Theory*}
-
-theory ZF_examples imports Main_ZFC begin
-
-subsection {* Binary Trees *}
-
-consts
-  bt :: "i => i"
-
-datatype "bt(A)" =
-  Lf | Br ("a \<in> A", "t1 \<in> bt(A)", "t2 \<in> bt(A)")
-
-declare bt.intros [simp]
-
-text{*Induction via tactic emulation*}
-lemma Br_neq_left [rule_format]: "l \<in> bt(A) ==> \<forall>x r. Br(x, l, r) \<noteq> l"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-  apply (induct_tac l)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-  apply auto
-  done
-
-(*
-  apply (Inductive.case_tac l)
-  apply (tactic {*exhaust_tac "l" 1*})
-*)
-
-text{*The new induction method, which I don't understand*}
-lemma Br_neq_left': "l \<in> bt(A) ==> (!!x r. Br(x, l, r) \<noteq> l)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-  apply (induct set: bt)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-  apply auto
-  done
-
-lemma Br_iff: "Br(a,l,r) = Br(a',l',r') <-> a=a' & l=l' & r=r'"
-  -- "Proving a freeness theorem."
-  by (blast elim!: bt.free_elims)
-
-inductive_cases Br_in_bt: "Br(a,l,r) \<in> bt(A)"
-  -- "An elimination rule, for type-checking."
-
-text {*
-@{thm[display] Br_in_bt[no_vars]}
-*};
-
-subsection{*Primitive recursion*}
-
-consts  n_nodes :: "i => i"
-primrec
-  "n_nodes(Lf) = 0"
-  "n_nodes(Br(a,l,r)) = succ(n_nodes(l) #+ n_nodes(r))"
-
-lemma n_nodes_type [simp]: "t \<in> bt(A) ==> n_nodes(t) \<in> nat"
-  by (induct_tac t, auto) 
-
-consts  n_nodes_aux :: "i => i"
-primrec
-  "n_nodes_aux(Lf) = (\<lambda>k \<in> nat. k)"
-  "n_nodes_aux(Br(a,l,r)) = 
-      (\<lambda>k \<in> nat. n_nodes_aux(r) `  (n_nodes_aux(l) ` succ(k)))"
-
-lemma n_nodes_aux_eq [rule_format]:
-     "t \<in> bt(A) ==> \<forall>k \<in> nat. n_nodes_aux(t)`k = n_nodes(t) #+ k"
-  by (induct_tac t, simp_all) 
-
-definition n_nodes_tail :: "i => i" where
-   "n_nodes_tail(t) == n_nodes_aux(t) ` 0"
-
-lemma "t \<in> bt(A) ==> n_nodes_tail(t) = n_nodes(t)"
- by (simp add: n_nodes_tail_def n_nodes_aux_eq) 
-
-
-subsection {*Inductive definitions*}
-
-consts  Fin       :: "i=>i"
-inductive
-  domains   "Fin(A)" \<subseteq> "Pow(A)"
-  intros
-    emptyI:  "0 \<in> Fin(A)"
-    consI:   "[| a \<in> A;  b \<in> Fin(A) |] ==> cons(a,b) \<in> Fin(A)"
-  type_intros  empty_subsetI cons_subsetI PowI
-  type_elims   PowD [elim_format]
-
-
-consts  acc :: "i => i"
-inductive
-  domains "acc(r)" \<subseteq> "field(r)"
-  intros
-    vimage:  "[| r-``{a}: Pow(acc(r)); a \<in> field(r) |] ==> a \<in> acc(r)"
-  monos      Pow_mono
-
-
-consts
-  llist  :: "i=>i";
-
-codatatype
-  "llist(A)" = LNil | LCons ("a \<in> A", "l \<in> llist(A)")
-
-
-(*Coinductive definition of equality*)
-consts
-  lleq :: "i=>i"
-
-(*Previously used <*> in the domain and variant pairs as elements.  But
-  standard pairs work just as well.  To use variant pairs, must change prefix
-  a q/Q to the Sigma, Pair and converse rules.*)
-coinductive
-  domains "lleq(A)" \<subseteq> "llist(A) * llist(A)"
-  intros
-    LNil:  "<LNil, LNil> \<in> lleq(A)"
-    LCons: "[| a \<in> A; <l,l'> \<in> lleq(A) |] 
-            ==> <LCons(a,l), LCons(a,l')> \<in> lleq(A)"
-  type_intros  llist.intros
-
-
-subsection{*Powerset example*}
-
-lemma Pow_mono: "A\<subseteq>B  ==>  Pow(A) \<subseteq> Pow(B)"
-apply (rule subsetI)
-apply (rule PowI)
-apply (drule PowD)
-apply (erule subset_trans, assumption)
-done
-
-lemma "Pow(A Int B) = Pow(A) Int Pow(B)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule equalityI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule Int_greatest)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule Int_lower1 [THEN Pow_mono])
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule Int_lower2 [THEN Pow_mono])
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule subsetI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule IntE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule PowI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (drule PowD)+
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule Int_greatest)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (assumption+)
-done
-
-text{*Trying again from the beginning in order to use @{text blast}*}
-lemma "Pow(A Int B) = Pow(A) Int Pow(B)"
-by blast
-
-
-lemma "C\<subseteq>D ==> Union(C) \<subseteq> Union(D)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule subsetI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule UnionE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule UnionI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule subsetD)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply assumption 
-done
-
-text{*A more abstract version of the same proof*}
-
-lemma "C\<subseteq>D ==> Union(C) \<subseteq> Union(D)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule Union_least)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule Union_upper)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (erule subsetD, assumption)
-done
-
-
-lemma "[| a \<in> A;  f \<in> A->B;  g \<in> C->D;  A \<inter> C = 0 |] ==> (f \<union> g)`a = f`a"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule apply_equality)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule UnI1)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule apply_Pair)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply (rule fun_disjoint_Un)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-apply assumption 
-done
-
-end
--- a/src/Doc/ZF/document/FOL.tex	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,855 +0,0 @@
-%!TEX root = logics-ZF.tex
-\chapter{First-Order Logic}
-\index{first-order logic|(}
-
-Isabelle implements Gentzen's natural deduction systems {\sc nj} and {\sc
-  nk}.  Intuitionistic first-order logic is defined first, as theory
-\thydx{IFOL}.  Classical logic, theory \thydx{FOL}, is
-obtained by adding the double negation rule.  Basic proof procedures are
-provided.  The intuitionistic prover works with derived rules to simplify
-implications in the assumptions.  Classical~\texttt{FOL} employs Isabelle's
-classical reasoner, which simulates a sequent calculus.
-
-\section{Syntax and rules of inference}
-The logic is many-sorted, using Isabelle's type classes.  The class of
-first-order terms is called \cldx{term} and is a subclass of \isa{logic}.
-No types of individuals are provided, but extensions can define types such
-as \isa{nat::term} and type constructors such as \isa{list::(term)term}
-(see the examples directory, \texttt{FOL/ex}).  Below, the type variable
-$\alpha$ ranges over class \isa{term}; the equality symbol and quantifiers
-are polymorphic (many-sorted).  The type of formulae is~\tydx{o}, which
-belongs to class~\cldx{logic}.  Figure~\ref{fol-syntax} gives the syntax.
-Note that $a$\verb|~=|$b$ is translated to $\neg(a=b)$.
-
-Figure~\ref{fol-rules} shows the inference rules with their names.
-Negation is defined in the usual way for intuitionistic logic; $\neg P$
-abbreviates $P\imp\bot$.  The biconditional~($\bimp$) is defined through
-$\conj$ and~$\imp$; introduction and elimination rules are derived for it.
-
-The unique existence quantifier, $\exists!x.P(x)$, is defined in terms
-of~$\exists$ and~$\forall$.  An Isabelle binder, it admits nested
-quantifications.  For instance, $\exists!x\;y.P(x,y)$ abbreviates
-$\exists!x. \exists!y.P(x,y)$; note that this does not mean that there
-exists a unique pair $(x,y)$ satisfying~$P(x,y)$.
-
-Some intuitionistic derived rules are shown in
-Fig.\ts\ref{fol-int-derived}, again with their names.  These include
-rules for the defined symbols $\neg$, $\bimp$ and $\exists!$.  Natural
-deduction typically involves a combination of forward and backward
-reasoning, particularly with the destruction rules $(\conj E)$,
-$({\imp}E)$, and~$(\forall E)$.  Isabelle's backward style handles these
-rules badly, so sequent-style rules are derived to eliminate conjunctions,
-implications, and universal quantifiers.  Used with elim-resolution,
-\tdx{allE} eliminates a universal quantifier while \tdx{all_dupE}
-re-inserts the quantified formula for later use.  The rules
-\isa{conj\_impE}, etc., support the intuitionistic proof procedure
-(see~\S\ref{fol-int-prover}).
-
-See the on-line theory library for complete listings of the rules and
-derived rules.
-
-\begin{figure} 
-\begin{center}
-\begin{tabular}{rrr} 
-  \it name      &\it meta-type  & \it description \\ 
-  \cdx{Trueprop}& $o\To prop$           & coercion to $prop$\\
-  \cdx{Not}     & $o\To o$              & negation ($\neg$) \\
-  \cdx{True}    & $o$                   & tautology ($\top$) \\
-  \cdx{False}   & $o$                   & absurdity ($\bot$)
-\end{tabular}
-\end{center}
-\subcaption{Constants}
-
-\begin{center}
-\begin{tabular}{llrrr} 
-  \it symbol &\it name     &\it meta-type & \it priority & \it description \\
-  \sdx{ALL}  & \cdx{All}  & $(\alpha\To o)\To o$ & 10 & 
-        universal quantifier ($\forall$) \\
-  \sdx{EX}   & \cdx{Ex}   & $(\alpha\To o)\To o$ & 10 & 
-        existential quantifier ($\exists$) \\
-  \isa{EX!}  & \cdx{Ex1}  & $(\alpha\To o)\To o$ & 10 & 
-        unique existence ($\exists!$)
-\end{tabular}
-\index{*"E"X"! symbol}
-\end{center}
-\subcaption{Binders} 
-
-\begin{center}
-\index{*"= symbol}
-\index{&@{\tt\&} symbol}
-\index{"!@{\tt\char124} symbol} %\char124 is vertical bar. We use ! because | stopped working
-\index{*"-"-"> symbol}
-\index{*"<"-"> symbol}
-\begin{tabular}{rrrr} 
-  \it symbol    & \it meta-type         & \it priority & \it description \\ 
-  \tt =         & $[\alpha,\alpha]\To o$ & Left 50 & equality ($=$) \\
-  \tt \&        & $[o,o]\To o$          & Right 35 & conjunction ($\conj$) \\
-  \tt |         & $[o,o]\To o$          & Right 30 & disjunction ($\disj$) \\
-  \tt -->       & $[o,o]\To o$          & Right 25 & implication ($\imp$) \\
-  \tt <->       & $[o,o]\To o$          & Right 25 & biconditional ($\bimp$) 
-\end{tabular}
-\end{center}
-\subcaption{Infixes}
-
-\dquotes
-\[\begin{array}{rcl}
- formula & = & \hbox{expression of type~$o$} \\
-         & | & term " = " term \quad| \quad term " \ttilde= " term \\
-         & | & "\ttilde\ " formula \\
-         & | & formula " \& " formula \\
-         & | & formula " | " formula \\
-         & | & formula " --> " formula \\
-         & | & formula " <-> " formula \\
-         & | & "ALL~" id~id^* " . " formula \\
-         & | & "EX~~" id~id^* " . " formula \\
-         & | & "EX!~" id~id^* " . " formula
-  \end{array}
-\]
-\subcaption{Grammar}
-\caption{Syntax of \texttt{FOL}} \label{fol-syntax}
-\end{figure}
-
-
-\begin{figure} 
-\begin{ttbox}
-\tdx{refl}        a=a
-\tdx{subst}       [| a=b;  P(a) |] ==> P(b)
-\subcaption{Equality rules}
-
-\tdx{conjI}       [| P;  Q |] ==> P&Q
-\tdx{conjunct1}   P&Q ==> P
-\tdx{conjunct2}   P&Q ==> Q
-
-\tdx{disjI1}      P ==> P|Q
-\tdx{disjI2}      Q ==> P|Q
-\tdx{disjE}       [| P|Q;  P ==> R;  Q ==> R |] ==> R
-
-\tdx{impI}        (P ==> Q) ==> P-->Q
-\tdx{mp}          [| P-->Q;  P |] ==> Q
-
-\tdx{FalseE}      False ==> P
-\subcaption{Propositional rules}
-
-\tdx{allI}        (!!x. P(x))  ==> (ALL x.P(x))
-\tdx{spec}        (ALL x.P(x)) ==> P(x)
-
-\tdx{exI}         P(x) ==> (EX x.P(x))
-\tdx{exE}         [| EX x.P(x);  !!x. P(x) ==> R |] ==> R
-\subcaption{Quantifier rules}
-
-\tdx{True_def}    True        == False-->False
-\tdx{not_def}     ~P          == P-->False
-\tdx{iff_def}     P<->Q       == (P-->Q) & (Q-->P)
-\tdx{ex1_def}     EX! x. P(x) == EX x. P(x) & (ALL y. P(y) --> y=x)
-\subcaption{Definitions}
-\end{ttbox}
-
-\caption{Rules of intuitionistic logic} \label{fol-rules}
-\end{figure}
-
-
-\begin{figure} 
-\begin{ttbox}
-\tdx{sym}       a=b ==> b=a
-\tdx{trans}     [| a=b;  b=c |] ==> a=c
-\tdx{ssubst}    [| b=a;  P(a) |] ==> P(b)
-\subcaption{Derived equality rules}
-
-\tdx{TrueI}     True
-
-\tdx{notI}      (P ==> False) ==> ~P
-\tdx{notE}      [| ~P;  P |] ==> R
-
-\tdx{iffI}      [| P ==> Q;  Q ==> P |] ==> P<->Q
-\tdx{iffE}      [| P <-> Q;  [| P-->Q; Q-->P |] ==> R |] ==> R
-\tdx{iffD1}     [| P <-> Q;  P |] ==> Q            
-\tdx{iffD2}     [| P <-> Q;  Q |] ==> P
-
-\tdx{ex1I}      [| P(a);  !!x. P(x) ==> x=a |]  ==>  EX! x. P(x)
-\tdx{ex1E}      [| EX! x.P(x);  !!x.[| P(x);  ALL y. P(y) --> y=x |] ==> R 
-          |] ==> R
-\subcaption{Derived rules for \(\top\), \(\neg\), \(\bimp\) and \(\exists!\)}
-
-\tdx{conjE}     [| P&Q;  [| P; Q |] ==> R |] ==> R
-\tdx{impE}      [| P-->Q;  P;  Q ==> R |] ==> R
-\tdx{allE}      [| ALL x.P(x);  P(x) ==> R |] ==> R
-\tdx{all_dupE}  [| ALL x.P(x);  [| P(x); ALL x.P(x) |] ==> R |] ==> R
-\subcaption{Sequent-style elimination rules}
-
-\tdx{conj_impE} [| (P&Q)-->S;  P-->(Q-->S) ==> R |] ==> R
-\tdx{disj_impE} [| (P|Q)-->S;  [| P-->S; Q-->S |] ==> R |] ==> R
-\tdx{imp_impE}  [| (P-->Q)-->S;  [| P; Q-->S |] ==> Q;  S ==> R |] ==> R
-\tdx{not_impE}  [| ~P --> S;  P ==> False;  S ==> R |] ==> R
-\tdx{iff_impE}  [| (P<->Q)-->S; [| P; Q-->S |] ==> Q; [| Q; P-->S |] ==> P;
-             S ==> R |] ==> R
-\tdx{all_impE}  [| (ALL x.P(x))-->S;  !!x.P(x);  S ==> R |] ==> R
-\tdx{ex_impE}   [| (EX x.P(x))-->S;  P(a)-->S ==> R |] ==> R
-\end{ttbox}
-\subcaption{Intuitionistic simplification of implication}
-\caption{Derived rules for intuitionistic logic} \label{fol-int-derived}
-\end{figure}
-
-
-\section{Generic packages}
-FOL instantiates most of Isabelle's generic packages.
-\begin{itemize}
-\item 
-It instantiates the simplifier, which is invoked through the method 
-\isa{simp}.  Both equality ($=$) and the biconditional
-($\bimp$) may be used for rewriting.  
-
-\item 
-It instantiates the classical reasoner, which is invoked mainly through the 
-methods \isa{blast} and \isa{auto}.  See~\S\ref{fol-cla-prover}
-for details. 
-%
-%\item FOL provides the tactic \ttindex{hyp_subst_tac}, which substitutes for
-%  an equality throughout a subgoal and its hypotheses.  This tactic uses FOL's
-%  general substitution rule.
-\end{itemize}
-
-\begin{warn}\index{simplification!of conjunctions}%
-  Simplifying $a=b\conj P(a)$ to $a=b\conj P(b)$ is often advantageous.  The
-  left part of a conjunction helps in simplifying the right part.  This effect
-  is not available by default: it can be slow.  It can be obtained by
-  including the theorem \isa{conj_cong}\index{*conj_cong (rule)}%
-  as a congruence rule in
-  simplification, \isa{simp cong:\ conj\_cong}.
-\end{warn}
-
-
-\section{Intuitionistic proof procedures} \label{fol-int-prover}
-Implication elimination (the rules~\isa{mp} and~\isa{impE}) pose
-difficulties for automated proof.  In intuitionistic logic, the assumption
-$P\imp Q$ cannot be treated like $\neg P\disj Q$.  Given $P\imp Q$, we may
-use~$Q$ provided we can prove~$P$; the proof of~$P$ may require repeated
-use of $P\imp Q$.  If the proof of~$P$ fails then the whole branch of the
-proof must be abandoned.  Thus intuitionistic propositional logic requires
-backtracking.  
-
-For an elementary example, consider the intuitionistic proof of $Q$ from
-$P\imp Q$ and $(P\imp Q)\imp P$.  The implication $P\imp Q$ is needed
-twice:
-\[ \infer[({\imp}E)]{Q}{P\imp Q &
-       \infer[({\imp}E)]{P}{(P\imp Q)\imp P & P\imp Q}} 
-\]
-The theorem prover for intuitionistic logic does not use~\isa{impE}.\@
-Instead, it simplifies implications using derived rules
-(Fig.\ts\ref{fol-int-derived}).  It reduces the antecedents of implications
-to atoms and then uses Modus Ponens: from $P\imp Q$ and~$P$ deduce~$Q$.
-The rules \tdx{conj_impE} and \tdx{disj_impE} are 
-straightforward: $(P\conj Q)\imp S$ is equivalent to $P\imp (Q\imp S)$, and
-$(P\disj Q)\imp S$ is equivalent to the conjunction of $P\imp S$ and $Q\imp
-S$.  The other \ldots\isa{\_impE} rules are unsafe; the method requires
-backtracking.  All the rules are derived in the same simple manner.
-
-Dyckhoff has independently discovered similar rules, and (more importantly)
-has demonstrated their completeness for propositional
-logic~\cite{dyckhoff}.  However, the tactics given below are not complete
-for first-order logic because they discard universally quantified
-assumptions after a single use. These are \ML{} functions, and are listed
-below with their \ML{} type:
-\begin{ttbox} 
-mp_tac              : int -> tactic
-eq_mp_tac           : int -> tactic
-IntPr.safe_step_tac : int -> tactic
-IntPr.safe_tac      :        tactic
-IntPr.inst_step_tac : int -> tactic
-IntPr.step_tac      : int -> tactic
-IntPr.fast_tac      : int -> tactic
-IntPr.best_tac      : int -> tactic
-\end{ttbox}
-Most of these belong to the structure \ML{} structure \texttt{IntPr} and resemble the
-tactics of Isabelle's classical reasoner.  There are no corresponding
-Isar methods, but you can use the 
-\isa{tactic} method to call \ML{} tactics in an Isar script:
-\begin{isabelle}
-\isacommand{apply}\ (tactic\ {* IntPr.fast\_tac 1*})
-\end{isabelle}
-Here is a description of each tactic:
-
-\begin{ttdescription}
-\item[\ttindexbold{mp_tac} {\it i}] 
-attempts to use \tdx{notE} or \tdx{impE} within the assumptions in
-subgoal $i$.  For each assumption of the form $\neg P$ or $P\imp Q$, it
-searches for another assumption unifiable with~$P$.  By
-contradiction with $\neg P$ it can solve the subgoal completely; by Modus
-Ponens it can replace the assumption $P\imp Q$ by $Q$.  The tactic can
-produce multiple outcomes, enumerating all suitable pairs of assumptions.
-
-\item[\ttindexbold{eq_mp_tac} {\it i}] 
-is like \texttt{mp_tac} {\it i}, but may not instantiate unknowns --- thus, it
-is safe.
-
-\item[\ttindexbold{IntPr.safe_step_tac} $i$] performs a safe step on
-subgoal~$i$.  This may include proof by assumption or Modus Ponens (taking
-care not to instantiate unknowns), or \texttt{hyp_subst_tac}. 
-
-\item[\ttindexbold{IntPr.safe_tac}] repeatedly performs safe steps on all 
-subgoals.  It is deterministic, with at most one outcome.
-
-\item[\ttindexbold{IntPr.inst_step_tac} $i$] is like \texttt{safe_step_tac},
-but allows unknowns to be instantiated.
-
-\item[\ttindexbold{IntPr.step_tac} $i$] tries \texttt{safe_tac} or {\tt
-    inst_step_tac}, or applies an unsafe rule.  This is the basic step of
-  the intuitionistic proof procedure.
-
-\item[\ttindexbold{IntPr.fast_tac} $i$] applies \texttt{step_tac}, using
-depth-first search, to solve subgoal~$i$.
-
-\item[\ttindexbold{IntPr.best_tac} $i$] applies \texttt{step_tac}, using
-best-first search (guided by the size of the proof state) to solve subgoal~$i$.
-\end{ttdescription}
-Here are some of the theorems that \texttt{IntPr.fast_tac} proves
-automatically.  The latter three date from {\it Principia Mathematica}
-(*11.53, *11.55, *11.61)~\cite{principia}.
-\begin{ttbox}
-~~P & ~~(P --> Q) --> ~~Q
-(ALL x y. P(x) --> Q(y)) <-> ((EX x. P(x)) --> (ALL y. Q(y)))
-(EX x y. P(x) & Q(x,y)) <-> (EX x. P(x) & (EX y. Q(x,y)))
-(EX y. ALL x. P(x) --> Q(x,y)) --> (ALL x. P(x) --> (EX y. Q(x,y)))
-\end{ttbox}
-
-
-
-\begin{figure} 
-\begin{ttbox}
-\tdx{excluded_middle}    ~P | P
-
-\tdx{disjCI}    (~Q ==> P) ==> P|Q
-\tdx{exCI}      (ALL x. ~P(x) ==> P(a)) ==> EX x.P(x)
-\tdx{impCE}     [| P-->Q; ~P ==> R; Q ==> R |] ==> R
-\tdx{iffCE}     [| P<->Q;  [| P; Q |] ==> R;  [| ~P; ~Q |] ==> R |] ==> R
-\tdx{notnotD}   ~~P ==> P
-\tdx{swap}      ~P ==> (~Q ==> P) ==> Q
-\end{ttbox}
-\caption{Derived rules for classical logic} \label{fol-cla-derived}
-\end{figure}
-
-
-\section{Classical proof procedures} \label{fol-cla-prover}
-The classical theory, \thydx{FOL}, consists of intuitionistic logic plus
-the rule
-$$ \vcenter{\infer{P}{\infer*{P}{[\neg P]}}} \eqno(classical) $$
-\noindent
-Natural deduction in classical logic is not really all that natural.  FOL
-derives classical introduction rules for $\disj$ and~$\exists$, as well as
-classical elimination rules for~$\imp$ and~$\bimp$, and the swap rule (see
-Fig.\ts\ref{fol-cla-derived}).
-
-The classical reasoner is installed.  The most useful methods are
-\isa{blast} (pure classical reasoning) and \isa{auto} (classical reasoning
-combined with simplification), but the full range of
-methods is available, including \isa{clarify},
-\isa{fast}, \isa{best} and \isa{force}. 
- See the 
-\iflabelundefined{chap:classical}{the {\em Reference Manual\/}}%
-        {Chap.\ts\ref{chap:classical}} 
-and the \emph{Tutorial}~\cite{isa-tutorial}
-for more discussion of classical proof methods.
-
-
-\section{An intuitionistic example}
-Here is a session similar to one in the book {\em Logic and Computation}
-\cite[pages~222--3]{paulson87}. It illustrates the treatment of
-quantifier reasoning, which is different in Isabelle than it is in
-{\sc lcf}-based theorem provers such as {\sc hol}.  
-
-The theory header specifies that we are working in intuitionistic
-logic by designating \isa{IFOL} rather than \isa{FOL} as the parent
-theory:
-\begin{isabelle}
-\isacommand{theory}\ IFOL\_examples\ \isacommand{imports}\ IFOL\isanewline
-\isacommand{begin}
-\end{isabelle}
-The proof begins by entering the goal, then applying the rule $({\imp}I)$.
-\begin{isabelle}
-\isacommand{lemma}\ "(EX\ y.\ ALL\ x.\ Q(x,y))\ -->\ \ (ALL\ x.\ EX\ y.\ Q(x,y))"\isanewline
-\ 1.\ (\isasymexists y.\ \isasymforall x.\ Q(x,\ y))\
-\isasymlongrightarrow \ (\isasymforall x.\ \isasymexists y.\ Q(x,\ y))
-\isanewline
-\isacommand{apply}\ (rule\ impI)\isanewline
-\ 1.\ \isasymexists y.\ \isasymforall x.\ Q(x,\ y)\
-\isasymLongrightarrow \ \isasymforall x.\ \isasymexists y.\ Q(x,\ y)
-\end{isabelle}
-Isabelle's output is shown as it would appear using Proof General.
-In this example, we shall never have more than one subgoal.  Applying
-$({\imp}I)$ replaces~\isa{\isasymlongrightarrow}
-by~\isa{\isasymLongrightarrow}, so that
-\(\ex{y}\all{x}Q(x,y)\) becomes an assumption.  We have the choice of
-$({\exists}E)$ and $({\forall}I)$; let us try the latter.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ allI)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymexists y.\ \isasymforall x.\ Q(x,\ y)\
-\isasymLongrightarrow \ \isasymexists y.\ Q(x,\ y)\hfill\((*)\)
-\end{isabelle}
-Applying $({\forall}I)$ replaces the \isa{\isasymforall
-x} (in ASCII, \isa{ALL~x}) by \isa{\isasymAnd x}
-(or \isa{!!x}), replacing FOL's universal quantifier by Isabelle's 
-meta universal quantifier.  The bound variable is a {\bf parameter} of
-the subgoal.  We now must choose between $({\exists}I)$ and
-$({\exists}E)$.  What happens if the wrong rule is chosen?
-\begin{isabelle}
-\isacommand{apply}\ (rule\ exI)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymexists y.\ \isasymforall x.\ Q(x,\ y)\
-\isasymLongrightarrow \ Q(x,\ ?y2(x))
-\end{isabelle}
-The new subgoal~1 contains the function variable \isa{?y2}.  Instantiating
-\isa{?y2} can replace~\isa{?y2(x)} by a term containing~\isa{x}, even
-though~\isa{x} is a bound variable.  Now we analyse the assumption
-\(\exists y.\forall x. Q(x,y)\) using elimination rules:
-\begin{isabelle}
-\isacommand{apply}\ (erule\ exE)\isanewline
-\ 1.\ \isasymAnd x\ y.\ \isasymforall x.\ Q(x,\ y)\ \isasymLongrightarrow \ Q(x,\ ?y2(x))
-\end{isabelle}
-Applying $(\exists E)$ has produced the parameter \isa{y} and stripped the
-existential quantifier from the assumption.  But the subgoal is unprovable:
-there is no way to unify \isa{?y2(x)} with the bound variable~\isa{y}.
-Using Proof General, we can return to the critical point, marked
-$(*)$ above.  This time we apply $({\exists}E)$:
-\begin{isabelle}
-\isacommand{apply}\ (erule\ exE)\isanewline
-\ 1.\ \isasymAnd x\ y.\ \isasymforall x.\ Q(x,\ y)\
-\isasymLongrightarrow \ \isasymexists y.\ Q(x,\ y)
-\end{isabelle}
-We now have two parameters and no scheme variables.  Applying
-$({\exists}I)$ and $({\forall}E)$ produces two scheme variables, which are
-applied to those parameters.  Parameters should be produced early, as this
-example demonstrates.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ exI)\isanewline
-\ 1.\ \isasymAnd x\ y.\ \isasymforall x.\ Q(x,\ y)\
-\isasymLongrightarrow \ Q(x,\ ?y3(x,\ y))
-\isanewline
-\isacommand{apply}\ (erule\ allE)\isanewline
-\ 1.\ \isasymAnd x\ y.\ Q(?x4(x,\ y),\ y)\ \isasymLongrightarrow \
-Q(x,\ ?y3(x,\ y))
-\end{isabelle}
-The subgoal has variables \isa{?y3} and \isa{?x4} applied to both
-parameters.  The obvious projection functions unify \isa{?x4(x,y)} with~\isa{
-x} and \isa{?y3(x,y)} with~\isa{y}.
-\begin{isabelle}
-\isacommand{apply}\ assumption\isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-The theorem was proved in six method invocations, not counting the
-abandoned ones.  But proof checking is tedious, and the \ML{} tactic
-\ttindex{IntPr.fast_tac} can prove the theorem in one step.
-\begin{isabelle}
-\isacommand{lemma}\ "(EX\ y.\ ALL\ x.\ Q(x,y))\ -->\ \ (ALL\ x.\ EX\ y.\ Q(x,y))"\isanewline
-\ 1.\ (\isasymexists y.\ \isasymforall x.\ Q(x,\ y))\
-\isasymlongrightarrow \ (\isasymforall x.\ \isasymexists y.\ Q(x,\ y))
-\isanewline
-\isacommand{by} (tactic \ttlbrace*IntPr.fast_tac 1*\ttrbrace)\isanewline
-No\ subgoals!
-\end{isabelle}
-
-
-\section{An example of intuitionistic negation}
-The following example demonstrates the specialized forms of implication
-elimination.  Even propositional formulae can be difficult to prove from
-the basic rules; the specialized rules help considerably.  
-
-Propositional examples are easy to invent.  As Dummett notes~\cite[page
-28]{dummett}, $\neg P$ is classically provable if and only if it is
-intuitionistically provable;  therefore, $P$ is classically provable if and
-only if $\neg\neg P$ is intuitionistically provable.%
-\footnote{This remark holds only for propositional logic, not if $P$ is
-  allowed to contain quantifiers.}
-%
-Proving $\neg\neg P$ intuitionistically is
-much harder than proving~$P$ classically.
-
-Our example is the double negation of the classical tautology $(P\imp
-Q)\disj (Q\imp P)$.  The first step is apply the
-\isa{unfold} method, which expands
-negations to implications using the definition $\neg P\equiv P\imp\bot$
-and allows use of the special implication rules.
-\begin{isabelle}
-\isacommand{lemma}\ "\isachartilde \ \isachartilde \ ((P-->Q)\ |\ (Q-->P))"\isanewline
-\ 1.\ \isasymnot \ \isasymnot \ ((P\ \isasymlongrightarrow \ Q)\ \isasymor \ (Q\ \isasymlongrightarrow \ P))
-\isanewline
-\isacommand{apply}\ (unfold\ not\_def)\isanewline
-\ 1.\ ((P\ \isasymlongrightarrow \ Q)\ \isasymor \ (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False)\ \isasymlongrightarrow \ False%
-\end{isabelle}
-The next step is a trivial use of $(\imp I)$.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ impI)\isanewline
-\ 1.\ (P\ \isasymlongrightarrow \ Q)\ \isasymor \ (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False\ \isasymLongrightarrow \ False%
-\end{isabelle}
-By $(\imp E)$ it would suffice to prove $(P\imp Q)\disj (Q\imp P)$, but
-that formula is not a theorem of intuitionistic logic.  Instead, we
-apply the specialized implication rule \tdx{disj_impE}.  It splits the
-assumption into two assumptions, one for each disjunct.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ disj\_impE)\isanewline
-\ 1.\ \isasymlbrakk (P\ \isasymlongrightarrow \ Q)\ \isasymlongrightarrow \ False;\ (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False\isasymrbrakk \ \isasymLongrightarrow \
-False
-\end{isabelle}
-We cannot hope to prove $P\imp Q$ or $Q\imp P$ separately, but
-their negations are inconsistent.  Applying \tdx{imp_impE} breaks down
-the assumption $\neg(P\imp Q)$, asking to show~$Q$ while providing new
-assumptions~$P$ and~$\neg Q$.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ imp\_impE)\isanewline
-\ 1.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False;\ P;\ Q\ \isasymlongrightarrow \ False\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
-\ 2.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False;\ False\isasymrbrakk \ \isasymLongrightarrow \
-False
-\end{isabelle}
-Subgoal~2 holds trivially; let us ignore it and continue working on
-subgoal~1.  Thanks to the assumption~$P$, we could prove $Q\imp P$;
-applying \tdx{imp_impE} is simpler.
-\begin{isabelle}
-\ \isacommand{apply}\ (erule\ imp\_impE)\isanewline
-\ 1.\ \isasymlbrakk P;\ Q\ \isasymlongrightarrow \ False;\ Q;\ P\ \isasymlongrightarrow \ False\isasymrbrakk \ \isasymLongrightarrow \ P\isanewline
-\ 2.\ \isasymlbrakk P;\ Q\ \isasymlongrightarrow \ False;\ False\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
-\ 3.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False;\ False\isasymrbrakk \ \isasymLongrightarrow \ False%
-\end{isabelle}
-The three subgoals are all trivial.
-\begin{isabelle}
-\isacommand{apply}\ assumption\ \isanewline
-\ 1.\ \isasymlbrakk P;\ Q\ \isasymlongrightarrow \ False;\
-False\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
-\ 2.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\
-\isasymlongrightarrow \ False;\ False\isasymrbrakk \
-\isasymLongrightarrow \ False%
-\isanewline
-\isacommand{apply}\ (erule\ FalseE)+\isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-This proof is also trivial for the \ML{} tactic \isa{IntPr.fast_tac}.
-
-
-\section{A classical example} \label{fol-cla-example}
-To illustrate classical logic, we shall prove the theorem
-$\ex{y}\all{x}P(y)\imp P(x)$.  Informally, the theorem can be proved as
-follows.  Choose~$y$ such that~$\neg P(y)$, if such exists; otherwise
-$\all{x}P(x)$ is true.  Either way the theorem holds.  First, we must
-work in a theory based on classical logic, the theory \isa{FOL}:
-\begin{isabelle}
-\isacommand{theory}\ FOL\_examples\ \isacommand{imports}\ FOL\isanewline
-\isacommand{begin}
-\end{isabelle}
-
-The formal proof does not conform in any obvious way to the sketch given
-above.  Its key step is its first rule, \tdx{exCI}, a classical
-version of~$(\exists I)$ that allows multiple instantiation of the
-quantifier.
-\begin{isabelle}
-\isacommand{lemma}\ "EX\ y.\ ALL\ x.\ P(y)-->P(x)"\isanewline
-\ 1.\ \isasymexists y.\ \isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x)
-\isanewline
-\isacommand{apply}\ (rule\ exCI)\isanewline
-\ 1.\ \isasymforall y.\ \isasymnot \ (\isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x))\ \isasymLongrightarrow \ \isasymforall x.\ P(?a)\ \isasymlongrightarrow \ P(x)
-\end{isabelle}
-We can either exhibit a term \isa{?a} to satisfy the conclusion of
-subgoal~1, or produce a contradiction from the assumption.  The next
-steps are routine.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ allI)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymforall y.\ \isasymnot \ (\isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x))\ \isasymLongrightarrow \ P(?a)\ \isasymlongrightarrow \ P(x)
-\isanewline
-\isacommand{apply}\ (rule\ impI)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk \isasymforall y.\ \isasymnot \ (\isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x));\ P(?a)\isasymrbrakk \ \isasymLongrightarrow \ P(x)
-\end{isabelle}
-By the duality between $\exists$ and~$\forall$, applying~$(\forall E)$
-is equivalent to applying~$(\exists I)$ again.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ allE)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk P(?a);\ \isasymnot \ (\isasymforall xa.\ P(?y3(x))\ \isasymlongrightarrow \ P(xa))\isasymrbrakk \ \isasymLongrightarrow \ P(x)
-\end{isabelle}
-In classical logic, a negated assumption is equivalent to a conclusion.  To
-get this effect, we create a swapped version of $(\forall I)$ and apply it
-using \ttindex{erule}.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ allI\ [THEN\ [2]\ swap])\isanewline
-\ 1.\ \isasymAnd x\ xa.\ \isasymlbrakk P(?a);\ \isasymnot \ P(x)\isasymrbrakk \ \isasymLongrightarrow \ P(?y3(x))\ \isasymlongrightarrow \ P(xa)
-\end{isabelle}
-The operand of \isa{erule} above denotes the following theorem:
-\begin{isabelle}
-\ \ \ \ \isasymlbrakk \isasymnot \ (\isasymforall x.\ ?P1(x));\
-\isasymAnd x.\ \isasymnot \ ?P\ \isasymLongrightarrow \
-?P1(x)\isasymrbrakk \
-\isasymLongrightarrow \ ?P%
-\end{isabelle}
-The previous conclusion, \isa{P(x)}, has become a negated assumption.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ impI)\isanewline
-\ 1.\ \isasymAnd x\ xa.\ \isasymlbrakk P(?a);\ \isasymnot \ P(x);\ P(?y3(x))\isasymrbrakk \ \isasymLongrightarrow \ P(xa)
-\end{isabelle}
-The subgoal has three assumptions.  We produce a contradiction between the
-\index{assumptions!contradictory} assumptions~\isa{\isasymnot P(x)}
-and~\isa{P(?y3(x))}.   The proof never instantiates the
-unknown~\isa{?a}.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ notE)\isanewline
-\ 1.\ \isasymAnd x\ xa.\ \isasymlbrakk P(?a);\ P(?y3(x))\isasymrbrakk \ \isasymLongrightarrow \ P(x)
-\isanewline
-\isacommand{apply}\ assumption\isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-The civilised way to prove this theorem is using the
-\methdx{blast} method, which automatically uses the classical form
-of the rule~$(\exists I)$:
-\begin{isabelle}
-\isacommand{lemma}\ "EX\ y.\ ALL\ x.\ P(y)-->P(x)"\isanewline
-\ 1.\ \isasymexists y.\ \isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x)
-\isanewline
-\isacommand{by}\ blast\isanewline
-No\ subgoals!
-\end{isabelle}
-If this theorem seems counterintuitive, then perhaps you are an
-intuitionist.  In constructive logic, proving $\ex{y}\all{x}P(y)\imp P(x)$
-requires exhibiting a particular term~$t$ such that $\all{x}P(t)\imp P(x)$,
-which we cannot do without further knowledge about~$P$.
-
-
-\section{Derived rules and the classical tactics}
-Classical first-order logic can be extended with the propositional
-connective $if(P,Q,R)$, where 
-$$ if(P,Q,R) \equiv P\conj Q \disj \neg P \conj R. \eqno(if) $$
-Theorems about $if$ can be proved by treating this as an abbreviation,
-replacing $if(P,Q,R)$ by $P\conj Q \disj \neg P \conj R$ in subgoals.  But
-this duplicates~$P$, causing an exponential blowup and an unreadable
-formula.  Introducing further abbreviations makes the problem worse.
-
-Natural deduction demands rules that introduce and eliminate $if(P,Q,R)$
-directly, without reference to its definition.  The simple identity
-\[ if(P,Q,R) \,\bimp\, (P\imp Q)\conj (\neg P\imp R) \]
-suggests that the
-$if$-introduction rule should be
-\[ \infer[({if}\,I)]{if(P,Q,R)}{\infer*{Q}{[P]}  &  \infer*{R}{[\neg P]}} \]
-The $if$-elimination rule reflects the definition of $if(P,Q,R)$ and the
-elimination rules for~$\disj$ and~$\conj$.
-\[ \infer[({if}\,E)]{S}{if(P,Q,R) & \infer*{S}{[P,Q]}
-                                  & \infer*{S}{[\neg P,R]}} 
-\]
-Having made these plans, we get down to work with Isabelle.  The theory of
-classical logic, \texttt{FOL}, is extended with the constant
-$if::[o,o,o]\To o$.  The axiom \tdx{if_def} asserts the
-equation~$(if)$.
-\begin{isabelle}
-\isacommand{theory}\ If\ \isacommand{imports}\ FOL\isanewline
-\isacommand{begin}\isanewline
-\isacommand{constdefs}\isanewline
-\ \ if\ ::\ "[o,o,o]=>o"\isanewline
-\ \ \ "if(P,Q,R)\ ==\ P\&Q\ |\ \isachartilde P\&R"
-\end{isabelle}
-We create the file \texttt{If.thy} containing these declarations.  (This file
-is on directory \texttt{FOL/ex} in the Isabelle distribution.)  Typing
-\begin{isabelle}
-use_thy "If";  
-\end{isabelle}
-loads that theory and sets it to be the current context.
-
-
-\subsection{Deriving the introduction rule}
-
-The derivations of the introduction and elimination rules demonstrate the
-methods for rewriting with definitions.  Classical reasoning is required,
-so we use \isa{blast}.
-
-The introduction rule, given the premises $P\Imp Q$ and $\neg P\Imp R$,
-concludes $if(P,Q,R)$.  We propose this lemma and immediately simplify
-using \isa{if\_def} to expand the definition of \isa{if} in the
-subgoal.
-\begin{isabelle}
-\isacommand{lemma}\ ifI: "[|\ P\ ==>\ Q;\ \isachartilde P\ ==>\ R\
-|]\ ==>\ if(P,Q,R)"\isanewline
-\ 1.\ \isasymlbrakk P\ \isasymLongrightarrow \ Q;\ \isasymnot \ P\ \isasymLongrightarrow \ R\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ Q,\ R)
-\isanewline
-\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
-\ 1.\ \isasymlbrakk P\ \isasymLongrightarrow \ Q;\ \isasymnot \ P\ \isasymLongrightarrow \ R\isasymrbrakk \ \isasymLongrightarrow \ P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \
-R
-\end{isabelle}
-The rule's premises, although expressed using meta-level implication
-(\isa{\isasymLongrightarrow}) are passed as ordinary implications to
-\methdx{blast}.  
-\begin{isabelle}
-\isacommand{apply}\ blast\isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-
-
-\subsection{Deriving the elimination rule}
-The elimination rule has three premises, two of which are themselves rules.
-The conclusion is simply $S$.
-\begin{isabelle}
-\isacommand{lemma}\ ifE:\isanewline
-\ \ \ "[|\ if(P,Q,R);\ \ [|P;\ Q|]\ ==>\ S;\ [|\isachartilde P;\ R|]\ ==>\ S\ |]\ ==>\ S"\isanewline
-\ 1.\ \isasymlbrakk if(P,\ Q,\ R);\ \isasymlbrakk P;\ Q\isasymrbrakk \ \isasymLongrightarrow \ S;\ \isasymlbrakk \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ S\isasymrbrakk \ \isasymLongrightarrow \ S%
-\isanewline
-\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
-\ 1.\ \isasymlbrakk P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \ R;\ \isasymlbrakk P;\ Q\isasymrbrakk \ \isasymLongrightarrow \ S;\ \isasymlbrakk \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ S\isasymrbrakk \ \isasymLongrightarrow \ S%
-\end{isabelle}
-The proof script is the same as before: \isa{simp} followed by
-\isa{blast}:
-\begin{isabelle}
-\isacommand{apply}\ blast\isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-
-
-\subsection{Using the derived rules}
-Our new derived rules, \tdx{ifI} and~\tdx{ifE}, permit natural
-proofs of theorems such as the following:
-\begin{eqnarray*}
-    if(P, if(Q,A,B), if(Q,C,D)) & \bimp & if(Q,if(P,A,C),if(P,B,D)) \\
-    if(if(P,Q,R), A, B)         & \bimp & if(P,if(Q,A,B),if(R,A,B))
-\end{eqnarray*}
-Proofs also require the classical reasoning rules and the $\bimp$
-introduction rule (called~\tdx{iffI}: do not confuse with~\isa{ifI}). 
-
-To display the $if$-rules in action, let us analyse a proof step by step.
-\begin{isabelle}
-\isacommand{lemma}\ if\_commute:\isanewline
-\ \ \ \ \ "if(P,\ if(Q,A,B),\
-if(Q,C,D))\ <->\ if(Q,\ if(P,A,C),\ if(P,B,D))"\isanewline
-\isacommand{apply}\ (rule\ iffI)\isanewline
-\ 1.\ if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))\ \isasymLongrightarrow \isanewline
-\isaindent{\ 1.\ }if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 2.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
-\isaindent{\ 2.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
-\end{isabelle}
-The $if$-elimination rule can be applied twice in succession.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ ifE)\isanewline
-\ 1.\ \isasymlbrakk P;\ if(Q,\ A,\ B)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 2.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 3.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
-\isaindent{\ 3.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
-\isanewline
-\isacommand{apply}\ (erule\ ifE)\isanewline
-\ 1.\ \isasymlbrakk P;\ Q;\ A\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 2.\ \isasymlbrakk P;\ \isasymnot \ Q;\ B\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 3.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 4.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
-\isaindent{\ 4.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
-\end{isabelle}
-%
-In the first two subgoals, all assumptions have been reduced to atoms.  Now
-$if$-introduction can be applied.  Observe how the $if$-rules break down
-occurrences of $if$ when they become the outermost connective.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ ifI)\isanewline
-\ 1.\ \isasymlbrakk P;\ Q;\ A;\ Q\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ A,\ C)\isanewline
-\ 2.\ \isasymlbrakk P;\ Q;\ A;\ \isasymnot \ Q\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ B,\ D)\isanewline
-\ 3.\ \isasymlbrakk P;\ \isasymnot \ Q;\ B\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 4.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 5.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
-\isaindent{\ 5.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
-\isanewline
-\isacommand{apply}\ (rule\ ifI)\isanewline
-\ 1.\ \isasymlbrakk P;\ Q;\ A;\ Q;\ P\isasymrbrakk \ \isasymLongrightarrow \ A\isanewline
-\ 2.\ \isasymlbrakk P;\ Q;\ A;\ Q;\ \isasymnot \ P\isasymrbrakk \ \isasymLongrightarrow \ C\isanewline
-\ 3.\ \isasymlbrakk P;\ Q;\ A;\ \isasymnot \ Q\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ B,\ D)\isanewline
-\ 4.\ \isasymlbrakk P;\ \isasymnot \ Q;\ B\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 5.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 6.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
-\isaindent{\ 6.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
-\end{isabelle}
-Where do we stand?  The first subgoal holds by assumption; the second and
-third, by contradiction.  This is getting tedious.  We could use the classical
-reasoner, but first let us extend the default claset with the derived rules
-for~$if$.
-\begin{isabelle}
-\isacommand{declare}\ ifI\ [intro!]\isanewline
-\isacommand{declare}\ ifE\ [elim!]
-\end{isabelle}
-With these declarations, we could have proved this theorem with a single
-call to \isa{blast}.  Here is another example:
-\begin{isabelle}
-\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,A,B))"\isanewline
-\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ A,\ B))
-\isanewline
-\isacommand{by}\ blast
-\end{isabelle}
-
-
-\subsection{Derived rules versus definitions}
-Dispensing with the derived rules, we can treat $if$ as an
-abbreviation, and let \ttindex{blast_tac} prove the expanded formula.  Let
-us redo the previous proof:
-\begin{isabelle}
-\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,A,B))"\isanewline
-\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ A,\ B))
-\end{isabelle}
-This time, we simply unfold using the definition of $if$:
-\begin{isabelle}
-\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
-\ 1.\ (P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \ R)\ \isasymand \ A\ \isasymor \ (\isasymnot \ P\ \isasymor \ \isasymnot \ Q)\ \isasymand \ (P\ \isasymor \ \isasymnot \ R)\ \isasymand \ B\ \isasymlongleftrightarrow \isanewline
-\isaindent{\ 1.\ }P\ \isasymand \ (Q\ \isasymand \ A\ \isasymor \ \isasymnot \ Q\ \isasymand \ B)\ \isasymor \ \isasymnot \ P\ \isasymand \ (R\ \isasymand \ A\ \isasymor \ \isasymnot \ R\ \isasymand \ B)
-\end{isabelle}
-We are left with a subgoal in pure first-order logic, and it falls to
-\isa{blast}:
-\begin{isabelle}
-\isacommand{apply}\ blast\isanewline
-No\ subgoals!
-\end{isabelle}
-Expanding definitions reduces the extended logic to the base logic.  This
-approach has its merits, but it can be slow.  In these examples, proofs
-using the derived rules for~\isa{if} run about six
-times faster  than proofs using just the rules of first-order logic.
-
-Expanding definitions can also make it harder to diagnose errors. 
-Suppose we are having difficulties in proving some goal.  If by expanding
-definitions we have made it unreadable, then we have little hope of
-diagnosing the problem.
-
-Attempts at program verification often yield invalid assertions.
-Let us try to prove one:
-\begin{isabelle}
-\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,B,A))"\isanewline
-\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ B,\
-A))
-\end{isabelle}
-Calling \isa{blast} yields an uninformative failure message. We can
-get a closer look at the situation by applying \methdx{auto}.
-\begin{isabelle}
-\isacommand{apply}\ auto\isanewline
-\ 1.\ \isasymlbrakk A;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ B\isanewline
-\ 2.\ \isasymlbrakk B;\ \isasymnot \ P;\ \isasymnot \ R\isasymrbrakk \ \isasymLongrightarrow \ A\isanewline
-\ 3.\ \isasymlbrakk B;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ A\isanewline
-\ 4.\ \isasymlbrakk \isasymnot \ R;\ A;\ \isasymnot \ B;\ \isasymnot \ P\isasymrbrakk \ \isasymLongrightarrow \
-False
-\end{isabelle}
-Subgoal~1 is unprovable and yields a countermodel: $P$ and~$B$ are false
-while~$R$ and~$A$ are true.  This truth assignment reduces the main goal to
-$true\bimp false$, which is of course invalid.
-
-We can repeat this analysis by expanding definitions, using just the rules of
-first-order logic:
-\begin{isabelle}
-\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,B,A))"\isanewline
-\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ B,\
-A))
-\isanewline
-\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
-\ 1.\ (P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \ R)\ \isasymand \ A\ \isasymor \ (\isasymnot \ P\ \isasymor \ \isasymnot \ Q)\ \isasymand \ (P\ \isasymor \ \isasymnot \ R)\ \isasymand \ B\ \isasymlongleftrightarrow \isanewline
-\isaindent{\ 1.\ }P\ \isasymand \ (Q\ \isasymand \ A\ \isasymor \ \isasymnot \ Q\ \isasymand \ B)\ \isasymor \ \isasymnot \ P\ \isasymand \ (R\ \isasymand \ B\ \isasymor \ \isasymnot \ R\ \isasymand \ A)
-\end{isabelle}
-Again \isa{blast} would fail, so we try \methdx{auto}:
-\begin{isabelle}
-\isacommand{apply}\ (auto)\ \isanewline
-\ 1.\ \isasymlbrakk A;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ B\isanewline
-\ 2.\ \isasymlbrakk A;\ \isasymnot \ P;\ R;\ \isasymnot \ B\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
-\ 3.\ \isasymlbrakk B;\ \isasymnot \ R;\ \isasymnot \ P;\ \isasymnot \ A\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
-\ 4.\ \isasymlbrakk B;\ \isasymnot \ P;\ \isasymnot \ A;\ \isasymnot \ R;\ Q\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
-\ 5.\ \isasymlbrakk B;\ \isasymnot \ Q;\ \isasymnot \ R;\ \isasymnot \ P;\ \isasymnot \ A\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
-\ 6.\ \isasymlbrakk B;\ \isasymnot \ A;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
-\ 7.\ \isasymlbrakk \isasymnot \ P;\ A;\ \isasymnot \ B;\ \isasymnot \ R\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
-\ 8.\ \isasymlbrakk \isasymnot \ P;\ A;\ \isasymnot \ B;\ \isasymnot \ R\isasymrbrakk \ \isasymLongrightarrow \ Q%
-\end{isabelle}
-Subgoal~1 yields the same countermodel as before.  But each proof step has
-taken six times as long, and the final result contains twice as many subgoals.
-
-Expanding your definitions usually makes proofs more difficult.  This is
-why the classical prover has been designed to accept derived rules.
-
-\index{first-order logic|)}
--- a/src/Doc/ZF/document/ZF.tex	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2717 +0,0 @@
-\chapter{Zermelo-Fraenkel Set Theory}
-\index{set theory|(}
-
-The theory~\thydx{ZF} implements Zermelo-Fraenkel set
-theory~\cite{halmos60,suppes72} as an extension of~\texttt{FOL}, classical
-first-order logic.  The theory includes a collection of derived natural
-deduction rules, for use with Isabelle's classical reasoner.  Some
-of it is based on the work of No\"el~\cite{noel}.
-
-A tremendous amount of set theory has been formally developed, including the
-basic properties of relations, functions, ordinals and cardinals.  Significant
-results have been proved, such as the Schr\"oder-Bernstein Theorem, the
-Wellordering Theorem and a version of Ramsey's Theorem.  \texttt{ZF} provides
-both the integers and the natural numbers.  General methods have been
-developed for solving recursion equations over monotonic functors; these have
-been applied to yield constructions of lists, trees, infinite lists, etc.
-
-\texttt{ZF} has a flexible package for handling inductive definitions,
-such as inference systems, and datatype definitions, such as lists and
-trees.  Moreover it handles coinductive definitions, such as
-bisimulation relations, and codatatype definitions, such as streams.  It
-provides a streamlined syntax for defining primitive recursive functions over
-datatypes. 
-
-Published articles~\cite{paulson-set-I,paulson-set-II} describe \texttt{ZF}
-less formally than this chapter.  Isabelle employs a novel treatment of
-non-well-founded data structures within the standard {\sc zf} axioms including
-the Axiom of Foundation~\cite{paulson-mscs}.
-
-
-\section{Which version of axiomatic set theory?}
-The two main axiom systems for set theory are Bernays-G\"odel~({\sc bg})
-and Zermelo-Fraenkel~({\sc zf}).  Resolution theorem provers can use {\sc
-  bg} because it is finite~\cite{boyer86,quaife92}.  {\sc zf} does not
-have a finite axiom system because of its Axiom Scheme of Replacement.
-This makes it awkward to use with many theorem provers, since instances
-of the axiom scheme have to be invoked explicitly.  Since Isabelle has no
-difficulty with axiom schemes, we may adopt either axiom system.
-
-These two theories differ in their treatment of {\bf classes}, which are
-collections that are `too big' to be sets.  The class of all sets,~$V$,
-cannot be a set without admitting Russell's Paradox.  In {\sc bg}, both
-classes and sets are individuals; $x\in V$ expresses that $x$ is a set.  In
-{\sc zf}, all variables denote sets; classes are identified with unary
-predicates.  The two systems define essentially the same sets and classes,
-with similar properties.  In particular, a class cannot belong to another
-class (let alone a set).
-
-Modern set theorists tend to prefer {\sc zf} because they are mainly concerned
-with sets, rather than classes.  {\sc bg} requires tiresome proofs that various
-collections are sets; for instance, showing $x\in\{x\}$ requires showing that
-$x$ is a set.
-
-
-\begin{figure} \small
-\begin{center}
-\begin{tabular}{rrr} 
-  \it name      &\it meta-type  & \it description \\ 
-  \cdx{Let}     & $[\alpha,\alpha\To\beta]\To\beta$ & let binder\\
-  \cdx{0}       & $i$           & empty set\\
-  \cdx{cons}    & $[i,i]\To i$  & finite set constructor\\
-  \cdx{Upair}   & $[i,i]\To i$  & unordered pairing\\
-  \cdx{Pair}    & $[i,i]\To i$  & ordered pairing\\
-  \cdx{Inf}     & $i$   & infinite set\\
-  \cdx{Pow}     & $i\To i$      & powerset\\
-  \cdx{Union} \cdx{Inter} & $i\To i$    & set union/intersection \\
-  \cdx{split}   & $[[i,i]\To i, i] \To i$ & generalized projection\\
-  \cdx{fst} \cdx{snd}   & $i\To i$      & projections\\
-  \cdx{converse}& $i\To i$      & converse of a relation\\
-  \cdx{succ}    & $i\To i$      & successor\\
-  \cdx{Collect} & $[i,i\To o]\To i$     & separation\\
-  \cdx{Replace} & $[i, [i,i]\To o] \To i$       & replacement\\
-  \cdx{PrimReplace} & $[i, [i,i]\To o] \To i$   & primitive replacement\\
-  \cdx{RepFun}  & $[i, i\To i] \To i$   & functional replacement\\
-  \cdx{Pi} \cdx{Sigma}  & $[i,i\To i]\To i$     & general product/sum\\
-  \cdx{domain}  & $i\To i$      & domain of a relation\\
-  \cdx{range}   & $i\To i$      & range of a relation\\
-  \cdx{field}   & $i\To i$      & field of a relation\\
-  \cdx{Lambda}  & $[i, i\To i]\To i$    & $\lambda$-abstraction\\
-  \cdx{restrict}& $[i, i] \To i$        & restriction of a function\\
-  \cdx{The}     & $[i\To o]\To i$       & definite description\\
-  \cdx{if}      & $[o,i,i]\To i$        & conditional\\
-  \cdx{Ball} \cdx{Bex}  & $[i, i\To o]\To o$    & bounded quantifiers
-\end{tabular}
-\end{center}
-\subcaption{Constants}
-
-\begin{center}
-\index{*"`"` symbol}
-\index{*"-"`"` symbol}
-\index{*"` symbol}\index{function applications}
-\index{*"- symbol}
-\index{*": symbol}
-\index{*"<"= symbol}
-\begin{tabular}{rrrr} 
-  \it symbol  & \it meta-type & \it priority & \it description \\ 
-  \tt ``        & $[i,i]\To i$  &  Left 90      & image \\
-  \tt -``       & $[i,i]\To i$  &  Left 90      & inverse image \\
-  \tt `         & $[i,i]\To i$  &  Left 90      & application \\
-  \sdx{Int}     & $[i,i]\To i$  &  Left 70      & intersection ($\int$) \\
-  \sdx{Un}      & $[i,i]\To i$  &  Left 65      & union ($\un$) \\
-  \tt -         & $[i,i]\To i$  &  Left 65      & set difference ($-$) \\[1ex]
-  \tt:          & $[i,i]\To o$  &  Left 50      & membership ($\in$) \\
-  \tt <=        & $[i,i]\To o$  &  Left 50      & subset ($\subseteq$) 
-\end{tabular}
-\end{center}
-\subcaption{Infixes}
-\caption{Constants of ZF} \label{zf-constants}
-\end{figure} 
-
-
-\section{The syntax of set theory}
-The language of set theory, as studied by logicians, has no constants.  The
-traditional axioms merely assert the existence of empty sets, unions,
-powersets, etc.; this would be intolerable for practical reasoning.  The
-Isabelle theory declares constants for primitive sets.  It also extends
-\texttt{FOL} with additional syntax for finite sets, ordered pairs,
-comprehension, general union/intersection, general sums/products, and
-bounded quantifiers.  In most other respects, Isabelle implements precisely
-Zermelo-Fraenkel set theory.
-
-Figure~\ref{zf-constants} lists the constants and infixes of~ZF, while
-Figure~\ref{zf-trans} presents the syntax translations.  Finally,
-Figure~\ref{zf-syntax} presents the full grammar for set theory, including the
-constructs of FOL.
-
-Local abbreviations can be introduced by a \isa{let} construct whose
-syntax appears in Fig.\ts\ref{zf-syntax}.  Internally it is translated into
-the constant~\cdx{Let}.  It can be expanded by rewriting with its
-definition, \tdx{Let_def}.
-
-Apart from \isa{let}, set theory does not use polymorphism.  All terms in
-ZF have type~\tydx{i}, which is the type of individuals and has
-class~\cldx{term}.  The type of first-order formulae, remember, 
-is~\tydx{o}.
-
-Infix operators include binary union and intersection ($A\un B$ and
-$A\int B$), set difference ($A-B$), and the subset and membership
-relations.  Note that $a$\verb|~:|$b$ is translated to $\lnot(a\in b)$,
-which is equivalent to  $a\notin b$.  The
-union and intersection operators ($\bigcup A$ and $\bigcap A$) form the
-union or intersection of a set of sets; $\bigcup A$ means the same as
-$\bigcup@{x\in A}x$.  Of these operators, only $\bigcup A$ is primitive.
-
-The constant \cdx{Upair} constructs unordered pairs; thus \isa{Upair($A$,$B$)} denotes the set~$\{A,B\}$ and
-\isa{Upair($A$,$A$)} denotes the singleton~$\{A\}$.  General union is
-used to define binary union.  The Isabelle version goes on to define
-the constant
-\cdx{cons}:
-\begin{eqnarray*}
-   A\cup B              & \equiv &       \bigcup(\isa{Upair}(A,B)) \\
-   \isa{cons}(a,B)      & \equiv &        \isa{Upair}(a,a) \un B
-\end{eqnarray*}
-The $\{a@1, \ldots\}$ notation abbreviates finite sets constructed in the
-obvious manner using~\isa{cons} and~$\emptyset$ (the empty set) \isasymin \begin{eqnarray*}
- \{a,b,c\} & \equiv & \isa{cons}(a,\isa{cons}(b,\isa{cons}(c,\emptyset)))
-\end{eqnarray*}
-
-The constant \cdx{Pair} constructs ordered pairs, as in \isa{Pair($a$,$b$)}.  Ordered pairs may also be written within angle brackets,
-as {\tt<$a$,$b$>}.  The $n$-tuple {\tt<$a@1$,\ldots,$a@{n-1}$,$a@n$>}
-abbreviates the nest of pairs\par\nobreak
-\centerline{\isa{Pair($a@1$,\ldots,Pair($a@{n-1}$,$a@n$)\ldots).}}
-
-In ZF, a function is a set of pairs.  A ZF function~$f$ is simply an
-individual as far as Isabelle is concerned: its Isabelle type is~$i$, not say
-$i\To i$.  The infix operator~{\tt`} denotes the application of a function set
-to its argument; we must write~$f{\tt`}x$, not~$f(x)$.  The syntax for image
-is~$f{\tt``}A$ and that for inverse image is~$f{\tt-``}A$.
-
-
-\begin{figure} 
-\index{lambda abs@$\lambda$-abstractions}
-\index{*"-"> symbol}
-\index{*"* symbol}
-\begin{center} \footnotesize\tt\frenchspacing
-\begin{tabular}{rrr} 
-  \it external          & \it internal  & \it description \\ 
-  $a$ \ttilde: $b$      & \ttilde($a$ : $b$)    & \rm negated membership\\
-  \ttlbrace$a@1$, $\ldots$, $a@n$\ttrbrace  &  cons($a@1$,$\ldots$,cons($a@n$,0)) &
-        \rm finite set \\
-  <$a@1$, $\ldots$, $a@{n-1}$, $a@n$> & 
-        Pair($a@1$,\ldots,Pair($a@{n-1}$,$a@n$)\ldots) &
-        \rm ordered $n$-tuple \\
-  \ttlbrace$x$:$A . P[x]$\ttrbrace    &  Collect($A$,$\lambda x. P[x]$) &
-        \rm separation \\
-  \ttlbrace$y . x$:$A$, $Q[x,y]$\ttrbrace  &  Replace($A$,$\lambda x\,y. Q[x,y]$) &
-        \rm replacement \\
-  \ttlbrace$b[x] . x$:$A$\ttrbrace  &  RepFun($A$,$\lambda x. b[x]$) &
-        \rm functional replacement \\
-  \sdx{INT} $x$:$A . B[x]$      & Inter(\ttlbrace$B[x] . x$:$A$\ttrbrace) &
-        \rm general intersection \\
-  \sdx{UN}  $x$:$A . B[x]$      & Union(\ttlbrace$B[x] . x$:$A$\ttrbrace) &
-        \rm general union \\
-  \sdx{PROD} $x$:$A . B[x]$     & Pi($A$,$\lambda x. B[x]$) & 
-        \rm general product \\
-  \sdx{SUM}  $x$:$A . B[x]$     & Sigma($A$,$\lambda x. B[x]$) & 
-        \rm general sum \\
-  $A$ -> $B$            & Pi($A$,$\lambda x. B$) & 
-        \rm function space \\
-  $A$ * $B$             & Sigma($A$,$\lambda x. B$) & 
-        \rm binary product \\
-  \sdx{THE}  $x . P[x]$ & The($\lambda x. P[x]$) & 
-        \rm definite description \\
-  \sdx{lam}  $x$:$A . b[x]$     & Lambda($A$,$\lambda x. b[x]$) & 
-        \rm $\lambda$-abstraction\\[1ex]
-  \sdx{ALL} $x$:$A . P[x]$      & Ball($A$,$\lambda x. P[x]$) & 
-        \rm bounded $\forall$ \\
-  \sdx{EX}  $x$:$A . P[x]$      & Bex($A$,$\lambda x. P[x]$) & 
-        \rm bounded $\exists$
-\end{tabular}
-\end{center}
-\caption{Translations for ZF} \label{zf-trans}
-\end{figure} 
-
-
-\begin{figure} 
-\index{*let symbol}
-\index{*in symbol}
-\dquotes
-\[\begin{array}{rcl}
-    term & = & \hbox{expression of type~$i$} \\
-         & | & "let"~id~"="~term";"\dots";"~id~"="~term~"in"~term \\
-         & | & "if"~term~"then"~term~"else"~term \\
-         & | & "{\ttlbrace} " term\; ("," term)^* " {\ttrbrace}" \\
-         & | & "< "  term\; ("," term)^* " >"  \\
-         & | & "{\ttlbrace} " id ":" term " . " formula " {\ttrbrace}" \\
-         & | & "{\ttlbrace} " id " . " id ":" term ", " formula " {\ttrbrace}" \\
-         & | & "{\ttlbrace} " term " . " id ":" term " {\ttrbrace}" \\
-         & | & term " `` " term \\
-         & | & term " -`` " term \\
-         & | & term " ` " term \\
-         & | & term " * " term \\
-         & | & term " \isasyminter " term \\
-         & | & term " \isasymunion " term \\
-         & | & term " - " term \\
-         & | & term " -> " term \\
-         & | & "THE~~"  id  " . " formula\\
-         & | & "lam~~"  id ":" term " . " term \\
-         & | & "INT~~"  id ":" term " . " term \\
-         & | & "UN~~~"  id ":" term " . " term \\
-         & | & "PROD~"  id ":" term " . " term \\
-         & | & "SUM~~"  id ":" term " . " term \\[2ex]
- formula & = & \hbox{expression of type~$o$} \\
-         & | & term " : " term \\
-         & | & term " \ttilde: " term \\
-         & | & term " <= " term \\
-         & | & term " = " term \\
-         & | & term " \ttilde= " term \\
-         & | & "\ttilde\ " formula \\
-         & | & formula " \& " formula \\
-         & | & formula " | " formula \\
-         & | & formula " --> " formula \\
-         & | & formula " <-> " formula \\
-         & | & "ALL " id ":" term " . " formula \\
-         & | & "EX~~" id ":" term " . " formula \\
-         & | & "ALL~" id~id^* " . " formula \\
-         & | & "EX~~" id~id^* " . " formula \\
-         & | & "EX!~" id~id^* " . " formula
-  \end{array}
-\]
-\caption{Full grammar for ZF} \label{zf-syntax}
-\end{figure} 
-
-
-\section{Binding operators}
-The constant \cdx{Collect} constructs sets by the principle of {\bf
-  separation}.  The syntax for separation is
-\hbox{\tt\ttlbrace$x$:$A$.\ $P[x]$\ttrbrace}, where $P[x]$ is a formula
-that may contain free occurrences of~$x$.  It abbreviates the set \isa{Collect($A$,$\lambda x. P[x]$)}, which consists of all $x\in A$ that
-satisfy~$P[x]$.  Note that \isa{Collect} is an unfortunate choice of
-name: some set theories adopt a set-formation principle, related to
-replacement, called collection.
-
-The constant \cdx{Replace} constructs sets by the principle of {\bf
-  replacement}.  The syntax
-\hbox{\tt\ttlbrace$y$.\ $x$:$A$,$Q[x,y]$\ttrbrace} denotes the set 
-\isa{Replace($A$,$\lambda x\,y. Q[x,y]$)}, which consists of all~$y$ such
-that there exists $x\in A$ satisfying~$Q[x,y]$.  The Replacement Axiom
-has the condition that $Q$ must be single-valued over~$A$: for
-all~$x\in A$ there exists at most one $y$ satisfying~$Q[x,y]$.  A
-single-valued binary predicate is also called a {\bf class function}.
-
-The constant \cdx{RepFun} expresses a special case of replacement,
-where $Q[x,y]$ has the form $y=b[x]$.  Such a $Q$ is trivially
-single-valued, since it is just the graph of the meta-level
-function~$\lambda x. b[x]$.  The resulting set consists of all $b[x]$
-for~$x\in A$.  This is analogous to the \ML{} functional \isa{map},
-since it applies a function to every element of a set.  The syntax is
-\isa{\ttlbrace$b[x]$.\ $x$:$A$\ttrbrace}, which expands to 
-\isa{RepFun($A$,$\lambda x. b[x]$)}.
-
-\index{*INT symbol}\index{*UN symbol} 
-General unions and intersections of indexed
-families of sets, namely $\bigcup@{x\in A}B[x]$ and $\bigcap@{x\in A}B[x]$,
-are written \isa{UN $x$:$A$.\ $B[x]$} and \isa{INT $x$:$A$.\ $B[x]$}.
-Their meaning is expressed using \isa{RepFun} as
-\[
-\bigcup(\{B[x]. x\in A\}) \qquad\hbox{and}\qquad 
-\bigcap(\{B[x]. x\in A\}). 
-\]
-General sums $\sum@{x\in A}B[x]$ and products $\prod@{x\in A}B[x]$ can be
-constructed in set theory, where $B[x]$ is a family of sets over~$A$.  They
-have as special cases $A\times B$ and $A\to B$, where $B$ is simply a set.
-This is similar to the situation in Constructive Type Theory (set theory
-has `dependent sets') and calls for similar syntactic conventions.  The
-constants~\cdx{Sigma} and~\cdx{Pi} construct general sums and
-products.  Instead of \isa{Sigma($A$,$B$)} and \isa{Pi($A$,$B$)} we may
-write 
-\isa{SUM $x$:$A$.\ $B[x]$} and \isa{PROD $x$:$A$.\ $B[x]$}.  
-\index{*SUM symbol}\index{*PROD symbol}%
-The special cases as \hbox{\tt$A$*$B$} and \hbox{\tt$A$->$B$} abbreviate
-general sums and products over a constant family.\footnote{Unlike normal
-infix operators, {\tt*} and {\tt->} merely define abbreviations; there are
-no constants~\isa{op~*} and~\isa{op~->}.} Isabelle accepts these
-abbreviations in parsing and uses them whenever possible for printing.
-
-\index{*THE symbol} As mentioned above, whenever the axioms assert the
-existence and uniqueness of a set, Isabelle's set theory declares a constant
-for that set.  These constants can express the {\bf definite description}
-operator~$\iota x. P[x]$, which stands for the unique~$a$ satisfying~$P[a]$,
-if such exists.  Since all terms in ZF denote something, a description is
-always meaningful, but we do not know its value unless $P[x]$ defines it
-uniquely.  Using the constant~\cdx{The}, we may write descriptions as 
-\isa{The($\lambda x. P[x]$)} or use the syntax \isa{THE $x$.\ $P[x]$}.
-
-\index{*lam symbol}
-Function sets may be written in $\lambda$-notation; $\lambda x\in A. b[x]$
-stands for the set of all pairs $\pair{x,b[x]}$ for $x\in A$.  In order for
-this to be a set, the function's domain~$A$ must be given.  Using the
-constant~\cdx{Lambda}, we may express function sets as \isa{Lambda($A$,$\lambda x. b[x]$)} or use the syntax \isa{lam $x$:$A$.\ $b[x]$}.
-
-Isabelle's set theory defines two {\bf bounded quantifiers}:
-\begin{eqnarray*}
-   \forall x\in A. P[x] &\hbox{abbreviates}& \forall x. x\in A\imp P[x] \\
-   \exists x\in A. P[x] &\hbox{abbreviates}& \exists x. x\in A\conj P[x]
-\end{eqnarray*}
-The constants~\cdx{Ball} and~\cdx{Bex} are defined
-accordingly.  Instead of \isa{Ball($A$,$P$)} and \isa{Bex($A$,$P$)} we may
-write
-\isa{ALL $x$:$A$.\ $P[x]$} and \isa{EX $x$:$A$.\ $P[x]$}.
-
-
-%%%% ZF.thy
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{Let_def}:           Let(s, f) == f(s)
-
-\tdx{Ball_def}:          Ball(A,P) == {\isasymforall}x. x \isasymin A --> P(x)
-\tdx{Bex_def}:           Bex(A,P)  == {\isasymexists}x. x \isasymin A & P(x)
-
-\tdx{subset_def}:        A \isasymsubseteq B  == {\isasymforall}x \isasymin A. x \isasymin B
-\tdx{extension}:         A = B  <->  A \isasymsubseteq B & B \isasymsubseteq A
-
-\tdx{Union_iff}:         A \isasymin Union(C) <-> ({\isasymexists}B \isasymin C. A \isasymin B)
-\tdx{Pow_iff}:           A \isasymin Pow(B) <-> A \isasymsubseteq B
-\tdx{foundation}:        A=0 | ({\isasymexists}x \isasymin A. {\isasymforall}y \isasymin x. y \isasymnotin A)
-
-\tdx{replacement}:       ({\isasymforall}x \isasymin A. {\isasymforall}y z. P(x,y) & P(x,z) --> y=z) ==>
-                   b \isasymin PrimReplace(A,P) <-> ({\isasymexists}x{\isasymin}A. P(x,b))
-\subcaption{The Zermelo-Fraenkel Axioms}
-
-\tdx{Replace_def}: Replace(A,P) == 
-                   PrimReplace(A, \%x y. (\isasymexists!z. P(x,z)) & P(x,y))
-\tdx{RepFun_def}:  RepFun(A,f)  == {\ttlbrace}y . x \isasymin A, y=f(x)\ttrbrace
-\tdx{the_def}:     The(P)       == Union({\ttlbrace}y . x \isasymin {\ttlbrace}0{\ttrbrace}, P(y){\ttrbrace})
-\tdx{if_def}:      if(P,a,b)    == THE z. P & z=a | ~P & z=b
-\tdx{Collect_def}: Collect(A,P) == {\ttlbrace}y . x \isasymin A, x=y & P(x){\ttrbrace}
-\tdx{Upair_def}:   Upair(a,b)   == 
-               {\ttlbrace}y. x\isasymin{}Pow(Pow(0)), x=0 & y=a | x=Pow(0) & y=b{\ttrbrace}
-\subcaption{Consequences of replacement}
-
-\tdx{Inter_def}:   Inter(A) == {\ttlbrace}x \isasymin Union(A) . {\isasymforall}y \isasymin A. x \isasymin y{\ttrbrace}
-\tdx{Un_def}:      A \isasymunion B  == Union(Upair(A,B))
-\tdx{Int_def}:     A \isasyminter B  == Inter(Upair(A,B))
-\tdx{Diff_def}:    A - B    == {\ttlbrace}x \isasymin A . x \isasymnotin B{\ttrbrace}
-\subcaption{Union, intersection, difference}
-\end{alltt*}
-\caption{Rules and axioms of ZF} \label{zf-rules}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{cons_def}:    cons(a,A) == Upair(a,a) \isasymunion A
-\tdx{succ_def}:    succ(i) == cons(i,i)
-\tdx{infinity}:    0 \isasymin Inf & ({\isasymforall}y \isasymin Inf. succ(y) \isasymin Inf)
-\subcaption{Finite and infinite sets}
-
-\tdx{Pair_def}:      <a,b>      == {\ttlbrace}{\ttlbrace}a,a{\ttrbrace}, {\ttlbrace}a,b{\ttrbrace}{\ttrbrace}
-\tdx{split_def}:     split(c,p) == THE y. {\isasymexists}a b. p=<a,b> & y=c(a,b)
-\tdx{fst_def}:       fst(A)     == split(\%x y. x, p)
-\tdx{snd_def}:       snd(A)     == split(\%x y. y, p)
-\tdx{Sigma_def}:     Sigma(A,B) == {\isasymUnion}x \isasymin A. {\isasymUnion}y \isasymin B(x). {\ttlbrace}<x,y>{\ttrbrace}
-\subcaption{Ordered pairs and Cartesian products}
-
-\tdx{converse_def}: converse(r) == {\ttlbrace}z. w\isasymin{}r, {\isasymexists}x y. w=<x,y> & z=<y,x>{\ttrbrace}
-\tdx{domain_def}:   domain(r)   == {\ttlbrace}x. w \isasymin r, {\isasymexists}y. w=<x,y>{\ttrbrace}
-\tdx{range_def}:    range(r)    == domain(converse(r))
-\tdx{field_def}:    field(r)    == domain(r) \isasymunion range(r)
-\tdx{image_def}:    r `` A      == {\ttlbrace}y\isasymin{}range(r) . {\isasymexists}x \isasymin A. <x,y> \isasymin r{\ttrbrace}
-\tdx{vimage_def}:   r -`` A     == converse(r)``A
-\subcaption{Operations on relations}
-
-\tdx{lam_def}:   Lambda(A,b) == {\ttlbrace}<x,b(x)> . x \isasymin A{\ttrbrace}
-\tdx{apply_def}: f`a         == THE y. <a,y> \isasymin f
-\tdx{Pi_def}: Pi(A,B) == {\ttlbrace}f\isasymin{}Pow(Sigma(A,B)). {\isasymforall}x\isasymin{}A. \isasymexists!y. <x,y>\isasymin{}f{\ttrbrace}
-\tdx{restrict_def}:  restrict(f,A) == lam x \isasymin A. f`x
-\subcaption{Functions and general product}
-\end{alltt*}
-\caption{Further definitions of ZF} \label{zf-defs}
-\end{figure}
-
-
-
-\section{The Zermelo-Fraenkel axioms}
-The axioms appear in Fig.\ts \ref{zf-rules}.  They resemble those
-presented by Suppes~\cite{suppes72}.  Most of the theory consists of
-definitions.  In particular, bounded quantifiers and the subset relation
-appear in other axioms.  Object-level quantifiers and implications have
-been replaced by meta-level ones wherever possible, to simplify use of the
-axioms.  
-
-The traditional replacement axiom asserts
-\[ y \in \isa{PrimReplace}(A,P) \bimp (\exists x\in A. P(x,y)) \]
-subject to the condition that $P(x,y)$ is single-valued for all~$x\in A$.
-The Isabelle theory defines \cdx{Replace} to apply
-\cdx{PrimReplace} to the single-valued part of~$P$, namely
-\[ (\exists!z. P(x,z)) \conj P(x,y). \]
-Thus $y\in \isa{Replace}(A,P)$ if and only if there is some~$x$ such that
-$P(x,-)$ holds uniquely for~$y$.  Because the equivalence is unconditional,
-\isa{Replace} is much easier to use than \isa{PrimReplace}; it defines the
-same set, if $P(x,y)$ is single-valued.  The nice syntax for replacement
-expands to \isa{Replace}.
-
-Other consequences of replacement include replacement for 
-meta-level functions
-(\cdx{RepFun}) and definite descriptions (\cdx{The}).
-Axioms for separation (\cdx{Collect}) and unordered pairs
-(\cdx{Upair}) are traditionally assumed, but they actually follow
-from replacement~\cite[pages 237--8]{suppes72}.
-
-The definitions of general intersection, etc., are straightforward.  Note
-the definition of \isa{cons}, which underlies the finite set notation.
-The axiom of infinity gives us a set that contains~0 and is closed under
-successor (\cdx{succ}).  Although this set is not uniquely defined,
-the theory names it (\cdx{Inf}) in order to simplify the
-construction of the natural numbers.
-                                             
-Further definitions appear in Fig.\ts\ref{zf-defs}.  Ordered pairs are
-defined in the standard way, $\pair{a,b}\equiv\{\{a\},\{a,b\}\}$.  Recall
-that \cdx{Sigma}$(A,B)$ generalizes the Cartesian product of two
-sets.  It is defined to be the union of all singleton sets
-$\{\pair{x,y}\}$, for $x\in A$ and $y\in B(x)$.  This is a typical usage of
-general union.
-
-The projections \cdx{fst} and~\cdx{snd} are defined in terms of the
-generalized projection \cdx{split}.  The latter has been borrowed from
-Martin-L\"of's Type Theory, and is often easier to use than \cdx{fst}
-and~\cdx{snd}.
-
-Operations on relations include converse, domain, range, and image.  The
-set $\isa{Pi}(A,B)$ generalizes the space of functions between two sets.
-Note the simple definitions of $\lambda$-abstraction (using
-\cdx{RepFun}) and application (using a definite description).  The
-function \cdx{restrict}$(f,A)$ has the same values as~$f$, but only
-over the domain~$A$.
-
-
-%%%% zf.thy
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{ballI}:     [| !!x. x\isasymin{}A ==> P(x) |] ==> {\isasymforall}x\isasymin{}A. P(x)
-\tdx{bspec}:     [| {\isasymforall}x\isasymin{}A. P(x);  x\isasymin{}A |] ==> P(x)
-\tdx{ballE}:     [| {\isasymforall}x\isasymin{}A. P(x);  P(x) ==> Q;  x \isasymnotin A ==> Q |] ==> Q
-
-\tdx{ball_cong}:  [| A=A';  !!x. x\isasymin{}A' ==> P(x) <-> P'(x) |] ==> 
-             ({\isasymforall}x\isasymin{}A. P(x)) <-> ({\isasymforall}x\isasymin{}A'. P'(x))
-
-\tdx{bexI}:      [| P(x);  x\isasymin{}A |] ==> {\isasymexists}x\isasymin{}A. P(x)
-\tdx{bexCI}:     [| {\isasymforall}x\isasymin{}A. ~P(x) ==> P(a);  a\isasymin{}A |] ==> {\isasymexists}x\isasymin{}A. P(x)
-\tdx{bexE}:      [| {\isasymexists}x\isasymin{}A. P(x);  !!x. [| x\isasymin{}A; P(x) |] ==> Q |] ==> Q
-
-\tdx{bex_cong}:  [| A=A';  !!x. x\isasymin{}A' ==> P(x) <-> P'(x) |] ==> 
-             ({\isasymexists}x\isasymin{}A. P(x)) <-> ({\isasymexists}x\isasymin{}A'. P'(x))
-\subcaption{Bounded quantifiers}
-
-\tdx{subsetI}:     (!!x. x \isasymin A ==> x \isasymin B) ==> A \isasymsubseteq B
-\tdx{subsetD}:     [| A \isasymsubseteq B;  c \isasymin A |] ==> c \isasymin B
-\tdx{subsetCE}:    [| A \isasymsubseteq B;  c \isasymnotin A ==> P;  c \isasymin B ==> P |] ==> P
-\tdx{subset_refl}:  A \isasymsubseteq A
-\tdx{subset_trans}: [| A \isasymsubseteq B;  B \isasymsubseteq C |] ==> A \isasymsubseteq C
-
-\tdx{equalityI}:   [| A \isasymsubseteq B;  B \isasymsubseteq A |] ==> A = B
-\tdx{equalityD1}:  A = B ==> A \isasymsubseteq B
-\tdx{equalityD2}:  A = B ==> B \isasymsubseteq A
-\tdx{equalityE}:   [| A = B;  [| A \isasymsubseteq B; B \isasymsubseteq A |] ==> P |]  ==>  P
-\subcaption{Subsets and extensionality}
-
-\tdx{emptyE}:        a \isasymin 0 ==> P
-\tdx{empty_subsetI}:  0 \isasymsubseteq A
-\tdx{equals0I}:      [| !!y. y \isasymin A ==> False |] ==> A=0
-\tdx{equals0D}:      [| A=0;  a \isasymin A |] ==> P
-
-\tdx{PowI}:          A \isasymsubseteq B ==> A \isasymin Pow(B)
-\tdx{PowD}:          A \isasymin Pow(B)  ==>  A \isasymsubseteq B
-\subcaption{The empty set; power sets}
-\end{alltt*}
-\caption{Basic derived rules for ZF} \label{zf-lemmas1}
-\end{figure}
-
-
-\section{From basic lemmas to function spaces}
-Faced with so many definitions, it is essential to prove lemmas.  Even
-trivial theorems like $A \int B = B \int A$ would be difficult to
-prove from the definitions alone.  Isabelle's set theory derives many
-rules using a natural deduction style.  Ideally, a natural deduction
-rule should introduce or eliminate just one operator, but this is not
-always practical.  For most operators, we may forget its definition
-and use its derived rules instead.
-
-\subsection{Fundamental lemmas}
-Figure~\ref{zf-lemmas1} presents the derived rules for the most basic
-operators.  The rules for the bounded quantifiers resemble those for the
-ordinary quantifiers, but note that \tdx{ballE} uses a negated assumption
-in the style of Isabelle's classical reasoner.  The \rmindex{congruence
-  rules} \tdx{ball_cong} and \tdx{bex_cong} are required by Isabelle's
-simplifier, but have few other uses.  Congruence rules must be specially
-derived for all binding operators, and henceforth will not be shown.
-
-Figure~\ref{zf-lemmas1} also shows rules for the subset and equality
-relations (proof by extensionality), and rules about the empty set and the
-power set operator.
-
-Figure~\ref{zf-lemmas2} presents rules for replacement and separation.
-The rules for \cdx{Replace} and \cdx{RepFun} are much simpler than
-comparable rules for \isa{PrimReplace} would be.  The principle of
-separation is proved explicitly, although most proofs should use the
-natural deduction rules for \isa{Collect}.  The elimination rule
-\tdx{CollectE} is equivalent to the two destruction rules
-\tdx{CollectD1} and \tdx{CollectD2}, but each rule is suited to
-particular circumstances.  Although too many rules can be confusing, there
-is no reason to aim for a minimal set of rules.  
-
-Figure~\ref{zf-lemmas3} presents rules for general union and intersection.
-The empty intersection should be undefined.  We cannot have
-$\bigcap(\emptyset)=V$ because $V$, the universal class, is not a set.  All
-expressions denote something in ZF set theory; the definition of
-intersection implies $\bigcap(\emptyset)=\emptyset$, but this value is
-arbitrary.  The rule \tdx{InterI} must have a premise to exclude
-the empty intersection.  Some of the laws governing intersections require
-similar premises.
-
-
-%the [p] gives better page breaking for the book
-\begin{figure}[p]
-\begin{alltt*}\isastyleminor
-\tdx{ReplaceI}:   [| x\isasymin{}A;  P(x,b);  !!y. P(x,y) ==> y=b |] ==> 
-            b\isasymin{}{\ttlbrace}y. x\isasymin{}A, P(x,y){\ttrbrace}
-
-\tdx{ReplaceE}:   [| b\isasymin{}{\ttlbrace}y. x\isasymin{}A, P(x,y){\ttrbrace};  
-               !!x. [| x\isasymin{}A; P(x,b); {\isasymforall}y. P(x,y)-->y=b |] ==> R 
-            |] ==> R
-
-\tdx{RepFunI}:    [| a\isasymin{}A |] ==> f(a)\isasymin{}{\ttlbrace}f(x). x\isasymin{}A{\ttrbrace}
-\tdx{RepFunE}:    [| b\isasymin{}{\ttlbrace}f(x). x\isasymin{}A{\ttrbrace};  
-                !!x.[| x\isasymin{}A;  b=f(x) |] ==> P |] ==> P
-
-\tdx{separation}:  a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace} <-> a\isasymin{}A & P(a)
-\tdx{CollectI}:    [| a\isasymin{}A;  P(a) |] ==> a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace}
-\tdx{CollectE}:    [| a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace};  [| a\isasymin{}A; P(a) |] ==> R |] ==> R
-\tdx{CollectD1}:   a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace} ==> a\isasymin{}A
-\tdx{CollectD2}:   a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace} ==> P(a)
-\end{alltt*}
-\caption{Replacement and separation} \label{zf-lemmas2}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{UnionI}: [| B\isasymin{}C;  A\isasymin{}B |] ==> A\isasymin{}Union(C)
-\tdx{UnionE}: [| A\isasymin{}Union(C);  !!B.[| A\isasymin{}B;  B\isasymin{}C |] ==> R |] ==> R
-
-\tdx{InterI}: [| !!x. x\isasymin{}C ==> A\isasymin{}x;  c\isasymin{}C |] ==> A\isasymin{}Inter(C)
-\tdx{InterD}: [| A\isasymin{}Inter(C);  B\isasymin{}C |] ==> A\isasymin{}B
-\tdx{InterE}: [| A\isasymin{}Inter(C);  A\isasymin{}B ==> R;  B \isasymnotin C ==> R |] ==> R
-
-\tdx{UN_I}:   [| a\isasymin{}A;  b\isasymin{}B(a) |] ==> b\isasymin{}({\isasymUnion}x\isasymin{}A. B(x))
-\tdx{UN_E}:   [| b\isasymin{}({\isasymUnion}x\isasymin{}A. B(x));  !!x.[| x\isasymin{}A;  b\isasymin{}B(x) |] ==> R 
-           |] ==> R
-
-\tdx{INT_I}:  [| !!x. x\isasymin{}A ==> b\isasymin{}B(x);  a\isasymin{}A |] ==> b\isasymin{}({\isasymInter}x\isasymin{}A. B(x))
-\tdx{INT_E}:  [| b\isasymin{}({\isasymInter}x\isasymin{}A. B(x));  a\isasymin{}A |] ==> b\isasymin{}B(a)
-\end{alltt*}
-\caption{General union and intersection} \label{zf-lemmas3}
-\end{figure}
-
-
-%%% upair.thy
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{pairing}:   a\isasymin{}Upair(b,c) <-> (a=b | a=c)
-\tdx{UpairI1}:   a\isasymin{}Upair(a,b)
-\tdx{UpairI2}:   b\isasymin{}Upair(a,b)
-\tdx{UpairE}:    [| a\isasymin{}Upair(b,c);  a=b ==> P;  a=c ==> P |] ==> P
-\end{alltt*}
-\caption{Unordered pairs} \label{zf-upair1}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{UnI1}:      c\isasymin{}A ==> c\isasymin{}A \isasymunion B
-\tdx{UnI2}:      c\isasymin{}B ==> c\isasymin{}A \isasymunion B
-\tdx{UnCI}:      (c \isasymnotin B ==> c\isasymin{}A) ==> c\isasymin{}A \isasymunion B
-\tdx{UnE}:       [| c\isasymin{}A \isasymunion B;  c\isasymin{}A ==> P;  c\isasymin{}B ==> P |] ==> P
-
-\tdx{IntI}:      [| c\isasymin{}A;  c\isasymin{}B |] ==> c\isasymin{}A \isasyminter B
-\tdx{IntD1}:     c\isasymin{}A \isasyminter B ==> c\isasymin{}A
-\tdx{IntD2}:     c\isasymin{}A \isasyminter B ==> c\isasymin{}B
-\tdx{IntE}:      [| c\isasymin{}A \isasyminter B;  [| c\isasymin{}A; c\isasymin{}B |] ==> P |] ==> P
-
-\tdx{DiffI}:     [| c\isasymin{}A;  c \isasymnotin B |] ==> c\isasymin{}A - B
-\tdx{DiffD1}:    c\isasymin{}A - B ==> c\isasymin{}A
-\tdx{DiffD2}:    c\isasymin{}A - B ==> c  \isasymnotin  B
-\tdx{DiffE}:     [| c\isasymin{}A - B;  [| c\isasymin{}A; c \isasymnotin B |] ==> P |] ==> P
-\end{alltt*}
-\caption{Union, intersection, difference} \label{zf-Un}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{consI1}:    a\isasymin{}cons(a,B)
-\tdx{consI2}:    a\isasymin{}B ==> a\isasymin{}cons(b,B)
-\tdx{consCI}:    (a \isasymnotin B ==> a=b) ==> a\isasymin{}cons(b,B)
-\tdx{consE}:     [| a\isasymin{}cons(b,A);  a=b ==> P;  a\isasymin{}A ==> P |] ==> P
-
-\tdx{singletonI}:  a\isasymin{}{\ttlbrace}a{\ttrbrace}
-\tdx{singletonE}:  [| a\isasymin{}{\ttlbrace}b{\ttrbrace}; a=b ==> P |] ==> P
-\end{alltt*}
-\caption{Finite and singleton sets} \label{zf-upair2}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{succI1}:    i\isasymin{}succ(i)
-\tdx{succI2}:    i\isasymin{}j ==> i\isasymin{}succ(j)
-\tdx{succCI}:    (i \isasymnotin j ==> i=j) ==> i\isasymin{}succ(j)
-\tdx{succE}:     [| i\isasymin{}succ(j);  i=j ==> P;  i\isasymin{}j ==> P |] ==> P
-\tdx{succ_neq_0}:  [| succ(n)=0 |] ==> P
-\tdx{succ_inject}: succ(m) = succ(n) ==> m=n
-\end{alltt*}
-\caption{The successor function} \label{zf-succ}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{the_equality}: [| P(a); !!x. P(x) ==> x=a |] ==> (THE x. P(x))=a
-\tdx{theI}:         \isasymexists! x. P(x) ==> P(THE x. P(x))
-
-\tdx{if_P}:          P ==> (if P then a else b) = a
-\tdx{if_not_P}:     ~P ==> (if P then a else b) = b
-
-\tdx{mem_asym}:     [| a\isasymin{}b;  b\isasymin{}a |] ==> P
-\tdx{mem_irrefl}:   a\isasymin{}a ==> P
-\end{alltt*}
-\caption{Descriptions; non-circularity} \label{zf-the}
-\end{figure}
-
-
-\subsection{Unordered pairs and finite sets}
-Figure~\ref{zf-upair1} presents the principle of unordered pairing, along
-with its derived rules.  Binary union and intersection are defined in terms
-of ordered pairs (Fig.\ts\ref{zf-Un}).  Set difference is also included.  The
-rule \tdx{UnCI} is useful for classical reasoning about unions,
-like \isa{disjCI}\@; it supersedes \tdx{UnI1} and
-\tdx{UnI2}, but these rules are often easier to work with.  For
-intersection and difference we have both elimination and destruction rules.
-Again, there is no reason to provide a minimal rule set.
-
-Figure~\ref{zf-upair2} is concerned with finite sets: it presents rules
-for~\isa{cons}, the finite set constructor, and rules for singleton
-sets.  Figure~\ref{zf-succ} presents derived rules for the successor
-function, which is defined in terms of~\isa{cons}.  The proof that 
-\isa{succ} is injective appears to require the Axiom of Foundation.
-
-Definite descriptions (\sdx{THE}) are defined in terms of the singleton
-set~$\{0\}$, but their derived rules fortunately hide this
-(Fig.\ts\ref{zf-the}).  The rule~\tdx{theI} is difficult to apply
-because of the two occurrences of~$\Var{P}$.  However,
-\tdx{the_equality} does not have this problem and the files contain
-many examples of its use.
-
-Finally, the impossibility of having both $a\in b$ and $b\in a$
-(\tdx{mem_asym}) is proved by applying the Axiom of Foundation to
-the set $\{a,b\}$.  The impossibility of $a\in a$ is a trivial consequence.
-
-
-%%% subset.thy?
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{Union_upper}:    B\isasymin{}A ==> B \isasymsubseteq Union(A)
-\tdx{Union_least}:    [| !!x. x\isasymin{}A ==> x \isasymsubseteq C |] ==> Union(A) \isasymsubseteq C
-
-\tdx{Inter_lower}:    B\isasymin{}A ==> Inter(A) \isasymsubseteq B
-\tdx{Inter_greatest}: [| a\isasymin{}A; !!x. x\isasymin{}A ==> C \isasymsubseteq x |] ==> C\isasymsubseteq{}Inter(A)
-
-\tdx{Un_upper1}:      A \isasymsubseteq A \isasymunion B
-\tdx{Un_upper2}:      B \isasymsubseteq A \isasymunion B
-\tdx{Un_least}:       [| A \isasymsubseteq C;  B \isasymsubseteq C |] ==> A \isasymunion B \isasymsubseteq C
-
-\tdx{Int_lower1}:     A \isasyminter B \isasymsubseteq A
-\tdx{Int_lower2}:     A \isasyminter B \isasymsubseteq B
-\tdx{Int_greatest}:   [| C \isasymsubseteq A;  C \isasymsubseteq B |] ==> C \isasymsubseteq A \isasyminter B
-
-\tdx{Diff_subset}:    A-B \isasymsubseteq A
-\tdx{Diff_contains}:  [| C \isasymsubseteq A;  C \isasyminter B = 0 |] ==> C \isasymsubseteq A-B
-
-\tdx{Collect_subset}: Collect(A,P) \isasymsubseteq A
-\end{alltt*}
-\caption{Subset and lattice properties} \label{zf-subset}
-\end{figure}
-
-
-\subsection{Subset and lattice properties}
-The subset relation is a complete lattice.  Unions form least upper bounds;
-non-empty intersections form greatest lower bounds.  Figure~\ref{zf-subset}
-shows the corresponding rules.  A few other laws involving subsets are
-included. 
-Reasoning directly about subsets often yields clearer proofs than
-reasoning about the membership relation.  Section~\ref{sec:ZF-pow-example}
-below presents an example of this, proving the equation 
-${\isa{Pow}(A)\cap \isa{Pow}(B)}= \isa{Pow}(A\cap B)$.
-
-%%% pair.thy
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{Pair_inject1}: <a,b> = <c,d> ==> a=c
-\tdx{Pair_inject2}: <a,b> = <c,d> ==> b=d
-\tdx{Pair_inject}:  [| <a,b> = <c,d>;  [| a=c; b=d |] ==> P |] ==> P
-\tdx{Pair_neq_0}:   <a,b>=0 ==> P
-
-\tdx{fst_conv}:     fst(<a,b>) = a
-\tdx{snd_conv}:     snd(<a,b>) = b
-\tdx{split}:        split(\%x y. c(x,y), <a,b>) = c(a,b)
-
-\tdx{SigmaI}:     [| a\isasymin{}A;  b\isasymin{}B(a) |] ==> <a,b>\isasymin{}Sigma(A,B)
-
-\tdx{SigmaE}:     [| c\isasymin{}Sigma(A,B);  
-                !!x y.[| x\isasymin{}A; y\isasymin{}B(x); c=<x,y> |] ==> P |] ==> P
-
-\tdx{SigmaE2}:    [| <a,b>\isasymin{}Sigma(A,B);    
-                [| a\isasymin{}A;  b\isasymin{}B(a) |] ==> P   |] ==> P
-\end{alltt*}
-\caption{Ordered pairs; projections; general sums} \label{zf-pair}
-\end{figure}
-
-
-\subsection{Ordered pairs} \label{sec:pairs}
-
-Figure~\ref{zf-pair} presents the rules governing ordered pairs,
-projections and general sums --- in particular, that
-$\{\{a\},\{a,b\}\}$ functions as an ordered pair.  This property is
-expressed as two destruction rules,
-\tdx{Pair_inject1} and \tdx{Pair_inject2}, and equivalently
-as the elimination rule \tdx{Pair_inject}.
-
-The rule \tdx{Pair_neq_0} asserts $\pair{a,b}\neq\emptyset$.  This
-is a property of $\{\{a\},\{a,b\}\}$, and need not hold for other 
-encodings of ordered pairs.  The non-standard ordered pairs mentioned below
-satisfy $\pair{\emptyset;\emptyset}=\emptyset$.
-
-The natural deduction rules \tdx{SigmaI} and \tdx{SigmaE}
-assert that \cdx{Sigma}$(A,B)$ consists of all pairs of the form
-$\pair{x,y}$, for $x\in A$ and $y\in B(x)$.  The rule \tdx{SigmaE2}
-merely states that $\pair{a,b}\in \isa{Sigma}(A,B)$ implies $a\in A$ and
-$b\in B(a)$.
-
-In addition, it is possible to use tuples as patterns in abstractions:
-\begin{center}
-{\tt\%<$x$,$y$>. $t$} \quad stands for\quad \isa{split(\%$x$ $y$.\ $t$)}
-\end{center}
-Nested patterns are translated recursively:
-{\tt\%<$x$,$y$,$z$>. $t$} $\leadsto$ {\tt\%<$x$,<$y$,$z$>>. $t$} $\leadsto$
-\isa{split(\%$x$.\%<$y$,$z$>. $t$)} $\leadsto$ \isa{split(\%$x$. split(\%$y$
-  $z$.\ $t$))}.  The reverse translation is performed upon printing.
-\begin{warn}
-  The translation between patterns and \isa{split} is performed automatically
-  by the parser and printer.  Thus the internal and external form of a term
-  may differ, which affects proofs.  For example the term \isa{(\%<x,y>.<y,x>)<a,b>} requires the theorem \isa{split} to rewrite to
-  {\tt<b,a>}.
-\end{warn}
-In addition to explicit $\lambda$-abstractions, patterns can be used in any
-variable binding construct which is internally described by a
-$\lambda$-abstraction.  Here are some important examples:
-\begin{description}
-\item[Let:] \isa{let {\it pattern} = $t$ in $u$}
-\item[Choice:] \isa{THE~{\it pattern}~.~$P$}
-\item[Set operations:] \isa{\isasymUnion~{\it pattern}:$A$.~$B$}
-\item[Comprehension:] \isa{{\ttlbrace}~{\it pattern}:$A$~.~$P$~{\ttrbrace}}
-\end{description}
-
-
-%%% domrange.thy?
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{domainI}:     <a,b>\isasymin{}r ==> a\isasymin{}domain(r)
-\tdx{domainE}:     [| a\isasymin{}domain(r); !!y. <a,y>\isasymin{}r ==> P |] ==> P
-\tdx{domain_subset}: domain(Sigma(A,B)) \isasymsubseteq A
-
-\tdx{rangeI}:      <a,b>\isasymin{}r ==> b\isasymin{}range(r)
-\tdx{rangeE}:      [| b\isasymin{}range(r); !!x. <x,b>\isasymin{}r ==> P |] ==> P
-\tdx{range_subset}: range(A*B) \isasymsubseteq B
-
-\tdx{fieldI1}:     <a,b>\isasymin{}r ==> a\isasymin{}field(r)
-\tdx{fieldI2}:     <a,b>\isasymin{}r ==> b\isasymin{}field(r)
-\tdx{fieldCI}:     (<c,a> \isasymnotin r ==> <a,b>\isasymin{}r) ==> a\isasymin{}field(r)
-
-\tdx{fieldE}:      [| a\isasymin{}field(r); 
-                !!x. <a,x>\isasymin{}r ==> P; 
-                !!x. <x,a>\isasymin{}r ==> P      
-             |] ==> P
-
-\tdx{field_subset}:  field(A*A) \isasymsubseteq A
-\end{alltt*}
-\caption{Domain, range and field of a relation} \label{zf-domrange}
-\end{figure}
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{imageI}:      [| <a,b>\isasymin{}r; a\isasymin{}A |] ==> b\isasymin{}r``A
-\tdx{imageE}:      [| b\isasymin{}r``A; !!x.[| <x,b>\isasymin{}r; x\isasymin{}A |] ==> P |] ==> P
-
-\tdx{vimageI}:     [| <a,b>\isasymin{}r; b\isasymin{}B |] ==> a\isasymin{}r-``B
-\tdx{vimageE}:     [| a\isasymin{}r-``B; !!x.[| <a,x>\isasymin{}r;  x\isasymin{}B |] ==> P |] ==> P
-\end{alltt*}
-\caption{Image and inverse image} \label{zf-domrange2}
-\end{figure}
-
-
-\subsection{Relations}
-Figure~\ref{zf-domrange} presents rules involving relations, which are sets
-of ordered pairs.  The converse of a relation~$r$ is the set of all pairs
-$\pair{y,x}$ such that $\pair{x,y}\in r$; if $r$ is a function, then
-{\cdx{converse}$(r)$} is its inverse.  The rules for the domain
-operation, namely \tdx{domainI} and~\tdx{domainE}, assert that
-\cdx{domain}$(r)$ consists of all~$x$ such that $r$ contains
-some pair of the form~$\pair{x,y}$.  The range operation is similar, and
-the field of a relation is merely the union of its domain and range.  
-
-Figure~\ref{zf-domrange2} presents rules for images and inverse images.
-Note that these operations are generalisations of range and domain,
-respectively. 
-
-
-%%% func.thy
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{fun_is_rel}:     f\isasymin{}Pi(A,B) ==> f \isasymsubseteq Sigma(A,B)
-
-\tdx{apply_equality}:  [| <a,b>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> f`a = b
-\tdx{apply_equality2}: [| <a,b>\isasymin{}f; <a,c>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> b=c
-
-\tdx{apply_type}:     [| f\isasymin{}Pi(A,B); a\isasymin{}A |] ==> f`a\isasymin{}B(a)
-\tdx{apply_Pair}:     [| f\isasymin{}Pi(A,B); a\isasymin{}A |] ==> <a,f`a>\isasymin{}f
-\tdx{apply_iff}:      f\isasymin{}Pi(A,B) ==> <a,b>\isasymin{}f <-> a\isasymin{}A & f`a = b
-
-\tdx{fun_extension}:  [| f\isasymin{}Pi(A,B); g\isasymin{}Pi(A,D);
-                   !!x. x\isasymin{}A ==> f`x = g`x     |] ==> f=g
-
-\tdx{domain_type}:    [| <a,b>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> a\isasymin{}A
-\tdx{range_type}:     [| <a,b>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> b\isasymin{}B(a)
-
-\tdx{Pi_type}:        [| f\isasymin{}A->C; !!x. x\isasymin{}A ==> f`x\isasymin{}B(x) |] ==> f\isasymin{}Pi(A,B)
-\tdx{domain_of_fun}:  f\isasymin{}Pi(A,B) ==> domain(f)=A
-\tdx{range_of_fun}:   f\isasymin{}Pi(A,B) ==> f\isasymin{}A->range(f)
-
-\tdx{restrict}:       a\isasymin{}A ==> restrict(f,A) ` a = f`a
-\tdx{restrict_type}:  [| !!x. x\isasymin{}A ==> f`x\isasymin{}B(x) |] ==> 
-                restrict(f,A)\isasymin{}Pi(A,B)
-\end{alltt*}
-\caption{Functions} \label{zf-func1}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{lamI}:     a\isasymin{}A ==> <a,b(a)>\isasymin{}(lam x\isasymin{}A. b(x))
-\tdx{lamE}:     [| p\isasymin{}(lam x\isasymin{}A. b(x)); !!x.[| x\isasymin{}A; p=<x,b(x)> |] ==> P 
-          |] ==>  P
-
-\tdx{lam_type}: [| !!x. x\isasymin{}A ==> b(x)\isasymin{}B(x) |] ==> (lam x\isasymin{}A. b(x))\isasymin{}Pi(A,B)
-
-\tdx{beta}:     a\isasymin{}A ==> (lam x\isasymin{}A. b(x)) ` a = b(a)
-\tdx{eta}:      f\isasymin{}Pi(A,B) ==> (lam x\isasymin{}A. f`x) = f
-\end{alltt*}
-\caption{$\lambda$-abstraction} \label{zf-lam}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{fun_empty}:           0\isasymin{}0->0
-\tdx{fun_single}:          {\ttlbrace}<a,b>{\ttrbrace}\isasymin{}{\ttlbrace}a{\ttrbrace} -> {\ttlbrace}b{\ttrbrace}
-
-\tdx{fun_disjoint_Un}:     [| f\isasymin{}A->B; g\isasymin{}C->D; A \isasyminter C = 0  |] ==>  
-                     (f \isasymunion g)\isasymin{}(A \isasymunion C) -> (B \isasymunion D)
-
-\tdx{fun_disjoint_apply1}: [| a\isasymin{}A; f\isasymin{}A->B; g\isasymin{}C->D;  A\isasyminter{}C = 0 |] ==>  
-                     (f \isasymunion g)`a = f`a
-
-\tdx{fun_disjoint_apply2}: [| c\isasymin{}C; f\isasymin{}A->B; g\isasymin{}C->D;  A\isasyminter{}C = 0 |] ==>  
-                     (f \isasymunion g)`c = g`c
-\end{alltt*}
-\caption{Constructing functions from smaller sets} \label{zf-func2}
-\end{figure}
-
-
-\subsection{Functions}
-Functions, represented by graphs, are notoriously difficult to reason
-about.  The ZF theory provides many derived rules, which overlap more
-than they ought.  This section presents the more important rules.
-
-Figure~\ref{zf-func1} presents the basic properties of \cdx{Pi}$(A,B)$,
-the generalized function space.  For example, if $f$ is a function and
-$\pair{a,b}\in f$, then $f`a=b$ (\tdx{apply_equality}).  Two functions
-are equal provided they have equal domains and deliver equals results
-(\tdx{fun_extension}).
-
-By \tdx{Pi_type}, a function typing of the form $f\in A\to C$ can be
-refined to the dependent typing $f\in\prod@{x\in A}B(x)$, given a suitable
-family of sets $\{B(x)\}@{x\in A}$.  Conversely, by \tdx{range_of_fun},
-any dependent typing can be flattened to yield a function type of the form
-$A\to C$; here, $C=\isa{range}(f)$.
-
-Among the laws for $\lambda$-abstraction, \tdx{lamI} and \tdx{lamE}
-describe the graph of the generated function, while \tdx{beta} and
-\tdx{eta} are the standard conversions.  We essentially have a
-dependently-typed $\lambda$-calculus (Fig.\ts\ref{zf-lam}).
-
-Figure~\ref{zf-func2} presents some rules that can be used to construct
-functions explicitly.  We start with functions consisting of at most one
-pair, and may form the union of two functions provided their domains are
-disjoint.  
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{Int_absorb}:        A \isasyminter A = A
-\tdx{Int_commute}:       A \isasyminter B = B \isasyminter A
-\tdx{Int_assoc}:         (A \isasyminter B) \isasyminter C  =  A \isasyminter (B \isasyminter C)
-\tdx{Int_Un_distrib}:    (A \isasymunion B) \isasyminter C  =  (A \isasyminter C) \isasymunion (B \isasyminter C)
-
-\tdx{Un_absorb}:         A \isasymunion A = A
-\tdx{Un_commute}:        A \isasymunion B = B \isasymunion A
-\tdx{Un_assoc}:          (A \isasymunion B) \isasymunion C  =  A \isasymunion (B \isasymunion C)
-\tdx{Un_Int_distrib}:    (A \isasyminter B) \isasymunion C  =  (A \isasymunion C) \isasyminter (B \isasymunion C)
-
-\tdx{Diff_cancel}:       A-A = 0
-\tdx{Diff_disjoint}:     A \isasyminter (B-A) = 0
-\tdx{Diff_partition}:    A \isasymsubseteq B ==> A \isasymunion (B-A) = B
-\tdx{double_complement}: [| A \isasymsubseteq B; B \isasymsubseteq C |] ==> (B - (C-A)) = A
-\tdx{Diff_Un}:           A - (B \isasymunion C) = (A-B) \isasyminter (A-C)
-\tdx{Diff_Int}:          A - (B \isasyminter C) = (A-B) \isasymunion (A-C)
-
-\tdx{Union_Un_distrib}:  Union(A \isasymunion B) = Union(A) \isasymunion Union(B)
-\tdx{Inter_Un_distrib}:  [| a \isasymin A;  b \isasymin B |] ==> 
-                   Inter(A \isasymunion B) = Inter(A) \isasyminter Inter(B)
-
-\tdx{Int_Union_RepFun}:  A \isasyminter Union(B) = ({\isasymUnion}C \isasymin B. A \isasyminter C)
-
-\tdx{Un_Inter_RepFun}:   b \isasymin B ==> 
-                   A \isasymunion Inter(B) = ({\isasymInter}C \isasymin B. A \isasymunion C)
-
-\tdx{SUM_Un_distrib1}:   (SUM x \isasymin A \isasymunion B. C(x)) = 
-                   (SUM x \isasymin A. C(x)) \isasymunion (SUM x \isasymin B. C(x))
-
-\tdx{SUM_Un_distrib2}:   (SUM x \isasymin C. A(x) \isasymunion B(x)) =
-                   (SUM x \isasymin C. A(x)) \isasymunion (SUM x \isasymin C. B(x))
-
-\tdx{SUM_Int_distrib1}:  (SUM x \isasymin A \isasyminter B. C(x)) =
-                   (SUM x \isasymin A. C(x)) \isasyminter (SUM x \isasymin B. C(x))
-
-\tdx{SUM_Int_distrib2}:  (SUM x \isasymin C. A(x) \isasyminter B(x)) =
-                   (SUM x \isasymin C. A(x)) \isasyminter (SUM x \isasymin C. B(x))
-\end{alltt*}
-\caption{Equalities} \label{zf-equalities}
-\end{figure}
-
-
-\begin{figure}
-%\begin{constants} 
-%  \cdx{1}       & $i$           &       & $\{\emptyset\}$       \\
-%  \cdx{bool}    & $i$           &       & the set $\{\emptyset,1\}$     \\
-%  \cdx{cond}   & $[i,i,i]\To i$ &       & conditional for \isa{bool}    \\
-%  \cdx{not}    & $i\To i$       &       & negation for \isa{bool}       \\
-%  \sdx{and}    & $[i,i]\To i$   & Left 70 & conjunction for \isa{bool}  \\
-%  \sdx{or}     & $[i,i]\To i$   & Left 65 & disjunction for \isa{bool}  \\
-%  \sdx{xor}    & $[i,i]\To i$   & Left 65 & exclusive-or for \isa{bool}
-%\end{constants}
-%
-\begin{alltt*}\isastyleminor
-\tdx{bool_def}:      bool == {\ttlbrace}0,1{\ttrbrace}
-\tdx{cond_def}:      cond(b,c,d) == if b=1 then c else d
-\tdx{not_def}:       not(b)  == cond(b,0,1)
-\tdx{and_def}:       a and b == cond(a,b,0)
-\tdx{or_def}:        a or b  == cond(a,1,b)
-\tdx{xor_def}:       a xor b == cond(a,not(b),b)
-
-\tdx{bool_1I}:       1 \isasymin bool
-\tdx{bool_0I}:       0 \isasymin bool
-\tdx{boolE}:         [| c \isasymin bool;  c=1 ==> P;  c=0 ==> P |] ==> P
-\tdx{cond_1}:        cond(1,c,d) = c
-\tdx{cond_0}:        cond(0,c,d) = d
-\end{alltt*}
-\caption{The booleans} \label{zf-bool}
-\end{figure}
-
-
-\section{Further developments}
-The next group of developments is complex and extensive, and only
-highlights can be covered here.  It involves many theories and proofs. 
-
-Figure~\ref{zf-equalities} presents commutative, associative, distributive,
-and idempotency laws of union and intersection, along with other equations.
-
-Theory \thydx{Bool} defines $\{0,1\}$ as a set of booleans, with the usual
-operators including a conditional (Fig.\ts\ref{zf-bool}).  Although ZF is a
-first-order theory, you can obtain the effect of higher-order logic using
-\isa{bool}-valued functions, for example.  The constant~\isa{1} is
-translated to \isa{succ(0)}.
-
-\begin{figure}
-\index{*"+ symbol}
-\begin{constants}
-  \it symbol    & \it meta-type & \it priority & \it description \\ 
-  \tt +         & $[i,i]\To i$  &  Right 65     & disjoint union operator\\
-  \cdx{Inl}~~\cdx{Inr}  & $i\To i$      &       & injections\\
-  \cdx{case}    & $[i\To i,i\To i, i]\To i$ &   & conditional for $A+B$
-\end{constants}
-\begin{alltt*}\isastyleminor
-\tdx{sum_def}:   A+B == {\ttlbrace}0{\ttrbrace}*A \isasymunion {\ttlbrace}1{\ttrbrace}*B
-\tdx{Inl_def}:   Inl(a) == <0,a>
-\tdx{Inr_def}:   Inr(b) == <1,b>
-\tdx{case_def}:  case(c,d,u) == split(\%y z. cond(y, d(z), c(z)), u)
-
-\tdx{InlI}:      a \isasymin A ==> Inl(a) \isasymin A+B
-\tdx{InrI}:      b \isasymin B ==> Inr(b) \isasymin A+B
-
-\tdx{Inl_inject}:  Inl(a)=Inl(b) ==> a=b
-\tdx{Inr_inject}:  Inr(a)=Inr(b) ==> a=b
-\tdx{Inl_neq_Inr}: Inl(a)=Inr(b) ==> P
-
-\tdx{sum_iff}:  u \isasymin A+B <-> ({\isasymexists}x\isasymin{}A. u=Inl(x)) | ({\isasymexists}y\isasymin{}B. u=Inr(y))
-
-\tdx{case_Inl}:  case(c,d,Inl(a)) = c(a)
-\tdx{case_Inr}:  case(c,d,Inr(b)) = d(b)
-\end{alltt*}
-\caption{Disjoint unions} \label{zf-sum}
-\end{figure}
-
-
-\subsection{Disjoint unions}
-
-Theory \thydx{Sum} defines the disjoint union of two sets, with
-injections and a case analysis operator (Fig.\ts\ref{zf-sum}).  Disjoint
-unions play a role in datatype definitions, particularly when there is
-mutual recursion~\cite{paulson-set-II}.
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{QPair_def}:      <a;b> == a+b
-\tdx{qsplit_def}:     qsplit(c,p)  == THE y. {\isasymexists}a b. p=<a;b> & y=c(a,b)
-\tdx{qfsplit_def}:    qfsplit(R,z) == {\isasymexists}x y. z=<x;y> & R(x,y)
-\tdx{qconverse_def}:  qconverse(r) == {\ttlbrace}z. w \isasymin r, {\isasymexists}x y. w=<x;y> & z=<y;x>{\ttrbrace}
-\tdx{QSigma_def}:     QSigma(A,B)  == {\isasymUnion}x \isasymin A. {\isasymUnion}y \isasymin B(x). {\ttlbrace}<x;y>{\ttrbrace}
-
-\tdx{qsum_def}:       A <+> B      == ({\ttlbrace}0{\ttrbrace} <*> A) \isasymunion ({\ttlbrace}1{\ttrbrace} <*> B)
-\tdx{QInl_def}:       QInl(a)      == <0;a>
-\tdx{QInr_def}:       QInr(b)      == <1;b>
-\tdx{qcase_def}:      qcase(c,d)   == qsplit(\%y z. cond(y, d(z), c(z)))
-\end{alltt*}
-\caption{Non-standard pairs, products and sums} \label{zf-qpair}
-\end{figure}
-
-
-\subsection{Non-standard ordered pairs}
-
-Theory \thydx{QPair} defines a notion of ordered pair that admits
-non-well-founded tupling (Fig.\ts\ref{zf-qpair}).  Such pairs are written
-{\tt<$a$;$b$>}.  It also defines the eliminator \cdx{qsplit}, the
-converse operator \cdx{qconverse}, and the summation operator
-\cdx{QSigma}.  These are completely analogous to the corresponding
-versions for standard ordered pairs.  The theory goes on to define a
-non-standard notion of disjoint sum using non-standard pairs.  All of these
-concepts satisfy the same properties as their standard counterparts; in
-addition, {\tt<$a$;$b$>} is continuous.  The theory supports coinductive
-definitions, for example of infinite lists~\cite{paulson-mscs}.
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{bnd_mono_def}:  bnd_mono(D,h) == 
-               h(D)\isasymsubseteq{}D & ({\isasymforall}W X. W\isasymsubseteq{}X --> X\isasymsubseteq{}D --> h(W)\isasymsubseteq{}h(X))
-
-\tdx{lfp_def}:       lfp(D,h) == Inter({\ttlbrace}X \isasymin Pow(D). h(X) \isasymsubseteq X{\ttrbrace})
-\tdx{gfp_def}:       gfp(D,h) == Union({\ttlbrace}X \isasymin Pow(D). X \isasymsubseteq h(X){\ttrbrace})
-
-
-\tdx{lfp_lowerbound}: [| h(A) \isasymsubseteq A;  A \isasymsubseteq D |] ==> lfp(D,h) \isasymsubseteq A
-
-\tdx{lfp_subset}:    lfp(D,h) \isasymsubseteq D
-
-\tdx{lfp_greatest}:  [| bnd_mono(D,h);  
-                  !!X. [| h(X) \isasymsubseteq X;  X \isasymsubseteq D |] ==> A \isasymsubseteq X 
-               |] ==> A \isasymsubseteq lfp(D,h)
-
-\tdx{lfp_Tarski}:    bnd_mono(D,h) ==> lfp(D,h) = h(lfp(D,h))
-
-\tdx{induct}:        [| a \isasymin lfp(D,h);  bnd_mono(D,h);
-                  !!x. x \isasymin h(Collect(lfp(D,h),P)) ==> P(x)
-               |] ==> P(a)
-
-\tdx{lfp_mono}:      [| bnd_mono(D,h);  bnd_mono(E,i);
-                  !!X. X \isasymsubseteq D ==> h(X) \isasymsubseteq i(X)  
-               |] ==> lfp(D,h) \isasymsubseteq lfp(E,i)
-
-\tdx{gfp_upperbound}: [| A \isasymsubseteq h(A);  A \isasymsubseteq D |] ==> A \isasymsubseteq gfp(D,h)
-
-\tdx{gfp_subset}:    gfp(D,h) \isasymsubseteq D
-
-\tdx{gfp_least}:     [| bnd_mono(D,h);  
-                  !!X. [| X \isasymsubseteq h(X);  X \isasymsubseteq D |] ==> X \isasymsubseteq A
-               |] ==> gfp(D,h) \isasymsubseteq A
-
-\tdx{gfp_Tarski}:    bnd_mono(D,h) ==> gfp(D,h) = h(gfp(D,h))
-
-\tdx{coinduct}:      [| bnd_mono(D,h); a \isasymin X; X \isasymsubseteq h(X \isasymunion gfp(D,h)); X \isasymsubseteq D 
-               |] ==> a \isasymin gfp(D,h)
-
-\tdx{gfp_mono}:      [| bnd_mono(D,h);  D \isasymsubseteq E;
-                  !!X. X \isasymsubseteq D ==> h(X) \isasymsubseteq i(X)  
-               |] ==> gfp(D,h) \isasymsubseteq gfp(E,i)
-\end{alltt*}
-\caption{Least and greatest fixedpoints} \label{zf-fixedpt}
-\end{figure}
-
-
-\subsection{Least and greatest fixedpoints}
-
-The Knaster-Tarski Theorem states that every monotone function over a
-complete lattice has a fixedpoint.  Theory \thydx{Fixedpt} proves the
-Theorem only for a particular lattice, namely the lattice of subsets of a
-set (Fig.\ts\ref{zf-fixedpt}).  The theory defines least and greatest
-fixedpoint operators with corresponding induction and coinduction rules.
-These are essential to many definitions that follow, including the natural
-numbers and the transitive closure operator.  The (co)inductive definition
-package also uses the fixedpoint operators~\cite{paulson-CADE}.  See
-Davey and Priestley~\cite{davey-priestley} for more on the Knaster-Tarski
-Theorem and my paper~\cite{paulson-set-II} for discussion of the Isabelle
-proofs.
-
-Monotonicity properties are proved for most of the set-forming operations:
-union, intersection, Cartesian product, image, domain, range, etc.  These
-are useful for applying the Knaster-Tarski Fixedpoint Theorem.  The proofs
-themselves are trivial applications of Isabelle's classical reasoner. 
-
-
-\subsection{Finite sets and lists}
-
-Theory \texttt{Finite} (Figure~\ref{zf-fin}) defines the finite set operator;
-$\isa{Fin}(A)$ is the set of all finite sets over~$A$.  The theory employs
-Isabelle's inductive definition package, which proves various rules
-automatically.  The induction rule shown is stronger than the one proved by
-the package.  The theory also defines the set of all finite functions
-between two given sets.
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{Fin.emptyI}      0 \isasymin Fin(A)
-\tdx{Fin.consI}       [| a \isasymin A;  b \isasymin Fin(A) |] ==> cons(a,b) \isasymin Fin(A)
-
-\tdx{Fin_induct}
-    [| b \isasymin Fin(A);
-       P(0);
-       !!x y. [| x\isasymin{}A; y\isasymin{}Fin(A); x\isasymnotin{}y; P(y) |] ==> P(cons(x,y))
-    |] ==> P(b)
-
-\tdx{Fin_mono}:       A \isasymsubseteq B ==> Fin(A) \isasymsubseteq Fin(B)
-\tdx{Fin_UnI}:        [| b \isasymin Fin(A);  c \isasymin Fin(A) |] ==> b \isasymunion c \isasymin Fin(A)
-\tdx{Fin_UnionI}:     C \isasymin Fin(Fin(A)) ==> Union(C) \isasymin Fin(A)
-\tdx{Fin_subset}:     [| c \isasymsubseteq b;  b \isasymin Fin(A) |] ==> c \isasymin Fin(A)
-\end{alltt*}
-\caption{The finite set operator} \label{zf-fin}
-\end{figure}
-
-\begin{figure}
-\begin{constants}
-  \it symbol  & \it meta-type & \it priority & \it description \\ 
-  \cdx{list}    & $i\To i$      && lists over some set\\
-  \cdx{list_case} & $[i, [i,i]\To i, i] \To i$  && conditional for $list(A)$ \\
-  \cdx{map}     & $[i\To i, i] \To i$   &       & mapping functional\\
-  \cdx{length}  & $i\To i$              &       & length of a list\\
-  \cdx{rev}     & $i\To i$              &       & reverse of a list\\
-  \tt \at       & $[i,i]\To i$  &  Right 60     & append for lists\\
-  \cdx{flat}    & $i\To i$   &                  & append of list of lists
-\end{constants}
-
-\underscoreon %%because @ is used here
-\begin{alltt*}\isastyleminor
-\tdx{NilI}:       Nil \isasymin list(A)
-\tdx{ConsI}:      [| a \isasymin A;  l \isasymin list(A) |] ==> Cons(a,l) \isasymin list(A)
-
-\tdx{List.induct}
-    [| l \isasymin list(A);
-       P(Nil);
-       !!x y. [| x \isasymin A;  y \isasymin list(A);  P(y) |] ==> P(Cons(x,y))
-    |] ==> P(l)
-
-\tdx{Cons_iff}:       Cons(a,l)=Cons(a',l') <-> a=a' & l=l'
-\tdx{Nil_Cons_iff}:    Nil \isasymnoteq Cons(a,l)
-
-\tdx{list_mono}:      A \isasymsubseteq B ==> list(A) \isasymsubseteq list(B)
-
-\tdx{map_ident}:      l\isasymin{}list(A) ==> map(\%u. u, l) = l
-\tdx{map_compose}:    l\isasymin{}list(A) ==> map(h, map(j,l)) = map(\%u. h(j(u)), l)
-\tdx{map_app_distrib}: xs\isasymin{}list(A) ==> map(h, xs@ys) = map(h,xs)@map(h,ys)
-\tdx{map_type}
-    [| l\isasymin{}list(A); !!x. x\isasymin{}A ==> h(x)\isasymin{}B |] ==> map(h,l)\isasymin{}list(B)
-\tdx{map_flat}
-    ls: list(list(A)) ==> map(h, flat(ls)) = flat(map(map(h),ls))
-\end{alltt*}
-\caption{Lists} \label{zf-list}
-\end{figure}
-
-
-Figure~\ref{zf-list} presents the set of lists over~$A$, $\isa{list}(A)$.  The
-definition employs Isabelle's datatype package, which defines the introduction
-and induction rules automatically, as well as the constructors, case operator
-(\isa{list\_case}) and recursion operator.  The theory then defines the usual
-list functions by primitive recursion.  See theory \texttt{List}.
-
-
-\subsection{Miscellaneous}
-
-\begin{figure}
-\begin{constants} 
-  \it symbol  & \it meta-type & \it priority & \it description \\ 
-  \sdx{O}       & $[i,i]\To i$  &  Right 60     & composition ($\circ$) \\
-  \cdx{id}      & $i\To i$      &       & identity function \\
-  \cdx{inj}     & $[i,i]\To i$  &       & injective function space\\
-  \cdx{surj}    & $[i,i]\To i$  &       & surjective function space\\
-  \cdx{bij}     & $[i,i]\To i$  &       & bijective function space
-\end{constants}
-
-\begin{alltt*}\isastyleminor
-\tdx{comp_def}: r O s     == {\ttlbrace}xz \isasymin domain(s)*range(r) . 
-                        {\isasymexists}x y z. xz=<x,z> & <x,y> \isasymin s & <y,z> \isasymin r{\ttrbrace}
-\tdx{id_def}:   id(A)     == (lam x \isasymin A. x)
-\tdx{inj_def}:  inj(A,B)  == {\ttlbrace} f\isasymin{}A->B. {\isasymforall}w\isasymin{}A. {\isasymforall}x\isasymin{}A. f`w=f`x --> w=x {\ttrbrace}
-\tdx{surj_def}: surj(A,B) == {\ttlbrace} f\isasymin{}A->B . {\isasymforall}y\isasymin{}B. {\isasymexists}x\isasymin{}A. f`x=y {\ttrbrace}
-\tdx{bij_def}:  bij(A,B)  == inj(A,B) \isasyminter surj(A,B)
-
-
-\tdx{left_inverse}:    [| f\isasymin{}inj(A,B);  a\isasymin{}A |] ==> converse(f)`(f`a) = a
-\tdx{right_inverse}:   [| f\isasymin{}inj(A,B);  b\isasymin{}range(f) |] ==> 
-                 f`(converse(f)`b) = b
-
-\tdx{inj_converse_inj}: f\isasymin{}inj(A,B) ==> converse(f) \isasymin inj(range(f),A)
-\tdx{bij_converse_bij}: f\isasymin{}bij(A,B) ==> converse(f) \isasymin bij(B,A)
-
-\tdx{comp_type}:     [| s \isasymsubseteq A*B;  r \isasymsubseteq B*C |] ==> (r O s) \isasymsubseteq A*C
-\tdx{comp_assoc}:    (r O s) O t = r O (s O t)
-
-\tdx{left_comp_id}:  r \isasymsubseteq A*B ==> id(B) O r = r
-\tdx{right_comp_id}: r \isasymsubseteq A*B ==> r O id(A) = r
-
-\tdx{comp_func}:     [| g\isasymin{}A->B; f\isasymin{}B->C |] ==> (f O g) \isasymin A->C
-\tdx{comp_func_apply}: [| g\isasymin{}A->B; f\isasymin{}B->C; a\isasymin{}A |] ==> (f O g)`a = f`(g`a)
-
-\tdx{comp_inj}:      [| g\isasymin{}inj(A,B);  f\isasymin{}inj(B,C)  |] ==> (f O g)\isasymin{}inj(A,C)
-\tdx{comp_surj}:     [| g\isasymin{}surj(A,B); f\isasymin{}surj(B,C) |] ==> (f O g)\isasymin{}surj(A,C)
-\tdx{comp_bij}:      [| g\isasymin{}bij(A,B); f\isasymin{}bij(B,C) |] ==> (f O g)\isasymin{}bij(A,C)
-
-\tdx{left_comp_inverse}:    f\isasymin{}inj(A,B) ==> converse(f) O f = id(A)
-\tdx{right_comp_inverse}:   f\isasymin{}surj(A,B) ==> f O converse(f) = id(B)
-
-\tdx{bij_disjoint_Un}:  
-    [| f\isasymin{}bij(A,B);  g\isasymin{}bij(C,D);  A \isasyminter C = 0;  B \isasyminter D = 0 |] ==> 
-    (f \isasymunion g)\isasymin{}bij(A \isasymunion C, B \isasymunion D)
-
-\tdx{restrict_bij}: [| f\isasymin{}inj(A,B); C\isasymsubseteq{}A |] ==> restrict(f,C)\isasymin{}bij(C, f``C)
-\end{alltt*}
-\caption{Permutations} \label{zf-perm}
-\end{figure}
-
-The theory \thydx{Perm} is concerned with permutations (bijections) and
-related concepts.  These include composition of relations, the identity
-relation, and three specialized function spaces: injective, surjective and
-bijective.  Figure~\ref{zf-perm} displays many of their properties that
-have been proved.  These results are fundamental to a treatment of
-equipollence and cardinality.
-
-Theory \thydx{Univ} defines a `universe' $\isa{univ}(A)$, which is used by
-the datatype package.  This set contains $A$ and the
-natural numbers.  Vitally, it is closed under finite products: 
-$\isa{univ}(A)\times\isa{univ}(A)\subseteq\isa{univ}(A)$.  This theory also
-defines the cumulative hierarchy of axiomatic set theory, which
-traditionally is written $V@\alpha$ for an ordinal~$\alpha$.  The
-`universe' is a simple generalization of~$V@\omega$.
-
-Theory \thydx{QUniv} defines a `universe' $\isa{quniv}(A)$, which is used by
-the datatype package to construct codatatypes such as streams.  It is
-analogous to $\isa{univ}(A)$ (and is defined in terms of it) but is closed
-under the non-standard product and sum.
-
-
-\section{Automatic Tools}
-
-ZF provides the simplifier and the classical reasoner.  Moreover it supplies a
-specialized tool to infer `types' of terms.
-
-\subsection{Simplification and Classical Reasoning}
-
-ZF inherits simplification from FOL but adopts it for set theory.  The
-extraction of rewrite rules takes the ZF primitives into account.  It can
-strip bounded universal quantifiers from a formula; for example, ${\forall
-  x\in A. f(x)=g(x)}$ yields the conditional rewrite rule $x\in A \Imp
-f(x)=g(x)$.  Given $a\in\{x\in A. P(x)\}$ it extracts rewrite rules from $a\in
-A$ and~$P(a)$.  It can also break down $a\in A\int B$ and $a\in A-B$.
-
-The default simpset used by \isa{simp} contains congruence rules for all of ZF's
-binding operators.  It contains all the conversion rules, such as
-\isa{fst} and
-\isa{snd}, as well as the rewrites shown in Fig.\ts\ref{zf-simpdata}.
-
-Classical reasoner methods such as \isa{blast} and \isa{auto} refer to
-a rich collection of built-in axioms for all the set-theoretic
-primitives.
-
-
-\begin{figure}
-\begin{eqnarray*}
-  a\in \emptyset        & \bimp &  \bot\\
-  a \in A \un B      & \bimp &  a\in A \disj a\in B\\
-  a \in A \int B      & \bimp &  a\in A \conj a\in B\\
-  a \in A-B             & \bimp &  a\in A \conj \lnot (a\in B)\\
-  \pair{a,b}\in \isa{Sigma}(A,B)
-                        & \bimp &  a\in A \conj b\in B(a)\\
-  a \in \isa{Collect}(A,P)      & \bimp &  a\in A \conj P(a)\\
-  (\forall x \in \emptyset. P(x)) & \bimp &  \top\\
-  (\forall x \in A. \top)       & \bimp &  \top
-\end{eqnarray*}
-\caption{Some rewrite rules for set theory} \label{zf-simpdata}
-\end{figure}
-
-
-\subsection{Type-Checking Tactics}
-\index{type-checking tactics}
-
-Isabelle/ZF provides simple tactics to help automate those proofs that are
-essentially type-checking.  Such proofs are built by applying rules such as
-these:
-\begin{ttbox}\isastyleminor
-[| ?P ==> ?a \isasymin ?A; ~?P ==> ?b \isasymin ?A |] 
-==> (if ?P then ?a else ?b) \isasymin ?A
-
-[| ?m \isasymin nat; ?n \isasymin nat |] ==> ?m #+ ?n \isasymin nat
-
-?a \isasymin ?A ==> Inl(?a) \isasymin ?A + ?B  
-\end{ttbox}
-In typical applications, the goal has the form $t\in\Var{A}$: in other words,
-we have a specific term~$t$ and need to infer its `type' by instantiating the
-set variable~$\Var{A}$.  Neither the simplifier nor the classical reasoner
-does this job well.  The if-then-else rule, and many similar ones, can make
-the classical reasoner loop.  The simplifier refuses (on principle) to
-instantiate variables during rewriting, so goals such as \isa{i\#+j \isasymin \ ?A}
-are left unsolved.
-
-The simplifier calls the type-checker to solve rewritten subgoals: this stage
-can indeed instantiate variables.  If you have defined new constants and
-proved type-checking rules for them, then declare the rules using
-the attribute \isa{TC} and the rest should be automatic.  In
-particular, the simplifier will use type-checking to help satisfy
-conditional rewrite rules. Call the method \ttindex{typecheck} to
-break down all subgoals using type-checking rules. You can add new
-type-checking rules temporarily like this:
-\begin{isabelle}
-\isacommand{apply}\ (typecheck add:\ inj_is_fun)
-\end{isabelle}
-
-
-%Though the easiest way to invoke the type-checker is via the simplifier,
-%specialized applications may require more detailed knowledge of
-%the type-checking primitives.  They are modelled on the simplifier's:
-%\begin{ttdescription}
-%\item[\ttindexbold{tcset}] is the type of tcsets: sets of type-checking rules.
-%
-%\item[\ttindexbold{addTCs}] is an infix operator to add type-checking rules to
-%  a tcset.
-%  
-%\item[\ttindexbold{delTCs}] is an infix operator to remove type-checking rules
-%  from a tcset.
-%
-%\item[\ttindexbold{typecheck_tac}] is a tactic for attempting to prove all
-%  subgoals using the rules given in its argument, a tcset.
-%\end{ttdescription}
-%
-%Tcsets, like simpsets, are associated with theories and are merged when
-%theories are merged.  There are further primitives that use the default tcset.
-%\begin{ttdescription}
-%\item[\ttindexbold{tcset}] is a function to return the default tcset; use the
-%  expression \isa{tcset()}.
-%
-%\item[\ttindexbold{AddTCs}] adds type-checking rules to the default tcset.
-%  
-%\item[\ttindexbold{DelTCs}] removes type-checking rules from the default
-%  tcset.
-%
-%\item[\ttindexbold{Typecheck_tac}] calls \isa{typecheck_tac} using the
-%  default tcset.
-%\end{ttdescription}
-%
-%To supply some type-checking rules temporarily, using \isa{Addrules} and
-%later \isa{Delrules} is the simplest way.  There is also a high-tech
-%approach.  Call the simplifier with a new solver expressed using
-%\ttindexbold{type_solver_tac} and your temporary type-checking rules.
-%\begin{ttbox}\isastyleminor
-%by (asm_simp_tac 
-%     (simpset() setSolver type_solver_tac (tcset() addTCs prems)) 2);
-%\end{ttbox}
-
-
-\section{Natural number and integer arithmetic}
-
-\index{arithmetic|(}
-
-\begin{figure}\small
-\index{#*@{\tt\#*} symbol}
-\index{*div symbol}
-\index{*mod symbol}
-\index{#+@{\tt\#+} symbol}
-\index{#-@{\tt\#-} symbol}
-\begin{constants}
-  \it symbol  & \it meta-type & \it priority & \it description \\ 
-  \cdx{nat}     & $i$                   &       & set of natural numbers \\
-  \cdx{nat_case}& $[i,i\To i,i]\To i$     &     & conditional for $nat$\\
-  \tt \#*       & $[i,i]\To i$  &  Left 70      & multiplication \\
-  \tt div       & $[i,i]\To i$  &  Left 70      & division\\
-  \tt mod       & $[i,i]\To i$  &  Left 70      & modulus\\
-  \tt \#+       & $[i,i]\To i$  &  Left 65      & addition\\
-  \tt \#-       & $[i,i]\To i$  &  Left 65      & subtraction
-\end{constants}
-
-\begin{alltt*}\isastyleminor
-\tdx{nat_def}: nat == lfp(lam r \isasymin Pow(Inf). {\ttlbrace}0{\ttrbrace} \isasymunion {\ttlbrace}succ(x). x \isasymin r{\ttrbrace}
-
-\tdx{nat_case_def}:  nat_case(a,b,k) == 
-              THE y. k=0 & y=a | ({\isasymexists}x. k=succ(x) & y=b(x))
-
-\tdx{nat_0I}:           0 \isasymin nat
-\tdx{nat_succI}:        n \isasymin nat ==> succ(n) \isasymin nat
-
-\tdx{nat_induct}:        
-    [| n \isasymin nat;  P(0);  !!x. [| x \isasymin nat;  P(x) |] ==> P(succ(x)) 
-    |] ==> P(n)
-
-\tdx{nat_case_0}:       nat_case(a,b,0) = a
-\tdx{nat_case_succ}:    nat_case(a,b,succ(m)) = b(m)
-
-\tdx{add_0_natify}:     0 #+ n = natify(n)
-\tdx{add_succ}:         succ(m) #+ n = succ(m #+ n)
-
-\tdx{mult_type}:        m #* n \isasymin nat
-\tdx{mult_0}:           0 #* n = 0
-\tdx{mult_succ}:        succ(m) #* n = n #+ (m #* n)
-\tdx{mult_commute}:     m #* n = n #* m
-\tdx{add_mult_dist}:    (m #+ n) #* k = (m #* k) #+ (n #* k)
-\tdx{mult_assoc}:       (m #* n) #* k = m #* (n #* k)
-\tdx{mod_div_equality}: m \isasymin nat ==> (m div n)#*n #+ m mod n = m
-\end{alltt*}
-\caption{The natural numbers} \label{zf-nat}
-\end{figure}
-
-\index{natural numbers}
-
-Theory \thydx{Nat} defines the natural numbers and mathematical
-induction, along with a case analysis operator.  The set of natural
-numbers, here called \isa{nat}, is known in set theory as the ordinal~$\omega$.
-
-Theory \thydx{Arith} develops arithmetic on the natural numbers
-(Fig.\ts\ref{zf-nat}).  Addition, multiplication and subtraction are defined
-by primitive recursion.  Division and remainder are defined by repeated
-subtraction, which requires well-founded recursion; the termination argument
-relies on the divisor's being non-zero.  Many properties are proved:
-commutative, associative and distributive laws, identity and cancellation
-laws, etc.  The most interesting result is perhaps the theorem $a \bmod b +
-(a/b)\times b = a$.
-
-To minimize the need for tedious proofs of $t\in\isa{nat}$, the arithmetic
-operators coerce their arguments to be natural numbers.  The function
-\cdx{natify} is defined such that $\isa{natify}(n) = n$ if $n$ is a natural
-number, $\isa{natify}(\isa{succ}(x)) =
-\isa{succ}(\isa{natify}(x))$ for all $x$, and finally
-$\isa{natify}(x)=0$ in all other cases.  The benefit is that the addition,
-subtraction, multiplication, division and remainder operators always return
-natural numbers, regardless of their arguments.  Algebraic laws (commutative,
-associative, distributive) are unconditional.  Occurrences of \isa{natify}
-as operands of those operators are simplified away.  Any remaining occurrences
-can either be tolerated or else eliminated by proving that the argument is a
-natural number.
-
-The simplifier automatically cancels common terms on the opposite sides of
-subtraction and of relations ($=$, $<$ and $\le$).  Here is an example:
-\begin{isabelle}
- 1. i \#+ j \#+ k \#- j < k \#+ l\isanewline
-\isacommand{apply}\ simp\isanewline
- 1. natify(i) < natify(l)
-\end{isabelle}
-Given the assumptions \isa{i \isasymin nat} and \isa{l \isasymin nat}, both occurrences of
-\cdx{natify} would be simplified away.
-
-
-\begin{figure}\small
-\index{$*@{\tt\$*} symbol}
-\index{$+@{\tt\$+} symbol}
-\index{$-@{\tt\$-} symbol}
-\begin{constants}
-  \it symbol  & \it meta-type & \it priority & \it description \\ 
-  \cdx{int}     & $i$                   &       & set of integers \\
-  \tt \$*       & $[i,i]\To i$  &  Left 70      & multiplication \\
-  \tt \$+       & $[i,i]\To i$  &  Left 65      & addition\\
-  \tt \$-       & $[i,i]\To i$  &  Left 65      & subtraction\\
-  \tt \$<       & $[i,i]\To o$  &  Left 50      & $<$ on integers\\
-  \tt \$<=      & $[i,i]\To o$  &  Left 50      & $\le$ on integers
-\end{constants}
-
-\begin{alltt*}\isastyleminor
-\tdx{zadd_0_intify}:    0 $+ n = intify(n)
-
-\tdx{zmult_type}:       m $* n \isasymin int
-\tdx{zmult_0}:          0 $* n = 0
-\tdx{zmult_commute}:    m $* n = n $* m
-\tdx{zadd_zmult_dist}:   (m $+ n) $* k = (m $* k) $+ (n $* k)
-\tdx{zmult_assoc}:      (m $* n) $* k = m $* (n $* k)
-\end{alltt*}
-\caption{The integers} \label{zf-int}
-\end{figure}
-
-
-\index{integers}
-
-Theory \thydx{Int} defines the integers, as equivalence classes of natural
-numbers.   Figure~\ref{zf-int} presents a tidy collection of laws.  In
-fact, a large library of facts is proved, including monotonicity laws for
-addition and multiplication, covering both positive and negative operands.  
-
-As with the natural numbers, the need for typing proofs is minimized.  All the
-operators defined in Fig.\ts\ref{zf-int} coerce their operands to integers by
-applying the function \cdx{intify}.  This function is the identity on integers
-and maps other operands to zero.
-
-Decimal notation is provided for the integers.  Numbers, written as
-\isa{\#$nnn$} or \isa{\#-$nnn$}, are represented internally in
-two's-complement binary.  Expressions involving addition, subtraction and
-multiplication of numeral constants are evaluated (with acceptable efficiency)
-by simplification.  The simplifier also collects similar terms, multiplying
-them by a numerical coefficient.  It also cancels occurrences of the same
-terms on the other side of the relational operators.  Example:
-\begin{isabelle}
- 1. y \$+ z \$+ \#-3 \$* x \$+ y \$<=  x \$* \#2 \$+
-z\isanewline
-\isacommand{apply}\ simp\isanewline
- 1. \#2 \$* y \$<= \#5 \$* x
-\end{isabelle}
-For more information on the integers, please see the theories on directory
-\texttt{ZF/Integ}. 
-
-\index{arithmetic|)}
-
-
-\section{Datatype definitions}
-\label{sec:ZF:datatype}
-\index{*datatype|(}
-
-The \ttindex{datatype} definition package of ZF constructs inductive datatypes
-similar to \ML's.  It can also construct coinductive datatypes
-(codatatypes), which are non-well-founded structures such as streams.  It
-defines the set using a fixed-point construction and proves induction rules,
-as well as theorems for recursion and case combinators.  It supplies
-mechanisms for reasoning about freeness.  The datatype package can handle both
-mutual and indirect recursion.
-
-
-\subsection{Basics}
-\label{subsec:datatype:basics}
-
-A \isa{datatype} definition has the following form:
-\[
-\begin{array}{llcl}
-\mathtt{datatype} & t@1(A@1,\ldots,A@h) & = &
-  constructor^1@1 ~\mid~ \ldots ~\mid~ constructor^1@{k@1} \\
- & & \vdots \\
-\mathtt{and} & t@n(A@1,\ldots,A@h) & = &
-  constructor^n@1~ ~\mid~ \ldots ~\mid~ constructor^n@{k@n}
-\end{array}
-\]
-Here $t@1$, \ldots,~$t@n$ are identifiers and $A@1$, \ldots,~$A@h$ are
-variables: the datatype's parameters.  Each constructor specification has the
-form \dquotesoff
-\[ C \hbox{\tt~( } \hbox{\tt"} x@1 \hbox{\tt:} T@1 \hbox{\tt"},\;
-                   \ldots,\;
-                   \hbox{\tt"} x@m \hbox{\tt:} T@m \hbox{\tt"}
-     \hbox{\tt~)}
-\]
-Here $C$ is the constructor name, and variables $x@1$, \ldots,~$x@m$ are the
-constructor arguments, belonging to the sets $T@1$, \ldots, $T@m$,
-respectively.  Typically each $T@j$ is either a constant set, a datatype
-parameter (one of $A@1$, \ldots, $A@h$) or a recursive occurrence of one of
-the datatypes, say $t@i(A@1,\ldots,A@h)$.  More complex possibilities exist,
-but they are much harder to realize.  Often, additional information must be
-supplied in the form of theorems.
-
-A datatype can occur recursively as the argument of some function~$F$.  This
-is called a {\em nested} (or \emph{indirect}) occurrence.  It is only allowed
-if the datatype package is given a theorem asserting that $F$ is monotonic.
-If the datatype has indirect occurrences, then Isabelle/ZF does not support
-recursive function definitions.
-
-A simple example of a datatype is \isa{list}, which is built-in, and is
-defined by
-\begin{alltt*}\isastyleminor
-consts     list :: "i=>i"
-datatype  "list(A)" = Nil | Cons ("a \isasymin A", "l \isasymin list(A)")
-\end{alltt*}
-Note that the datatype operator must be declared as a constant first.
-However, the package declares the constructors.  Here, \isa{Nil} gets type
-$i$ and \isa{Cons} gets type $[i,i]\To i$.
-
-Trees and forests can be modelled by the mutually recursive datatype
-definition
-\begin{alltt*}\isastyleminor
-consts   
-  tree :: "i=>i"
-  forest :: "i=>i"
-  tree_forest :: "i=>i"
-datatype  "tree(A)"   = Tcons ("a{\isasymin}A",  "f{\isasymin}forest(A)")
-and "forest(A)" = Fnil | Fcons ("t{\isasymin}tree(A)",  "f{\isasymin}forest(A)")
-\end{alltt*}
-Here $\isa{tree}(A)$ is the set of trees over $A$, $\isa{forest}(A)$ is
-the set of forests over $A$, and  $\isa{tree_forest}(A)$ is the union of
-the previous two sets.  All three operators must be declared first.
-
-The datatype \isa{term}, which is defined by
-\begin{alltt*}\isastyleminor
-consts     term :: "i=>i"
-datatype  "term(A)" = Apply ("a \isasymin A", "l \isasymin list(term(A))")
-  monos list_mono
-  type_elims list_univ [THEN subsetD, elim_format]
-\end{alltt*}
-is an example of nested recursion.  (The theorem \isa{list_mono} is proved
-in theory \isa{List}, and the \isa{term} example is developed in
-theory
-\thydx{Induct/Term}.)
-
-\subsubsection{Freeness of the constructors}
-
-Constructors satisfy {\em freeness} properties.  Constructions are distinct,
-for example $\isa{Nil}\not=\isa{Cons}(a,l)$, and they are injective, for
-example $\isa{Cons}(a,l)=\isa{Cons}(a',l') \bimp a=a' \conj l=l'$.
-Because the number of freeness is quadratic in the number of constructors, the
-datatype package does not prove them.  Instead, it ensures that simplification
-will prove them dynamically: when the simplifier encounters a formula
-asserting the equality of two datatype constructors, it performs freeness
-reasoning.  
-
-Freeness reasoning can also be done using the classical reasoner, but it is
-more complicated.  You have to add some safe elimination rules rules to the
-claset.  For the \isa{list} datatype, they are called
-\isa{list.free_elims}.  Occasionally this exposes the underlying
-representation of some constructor, which can be rectified using the command
-\isa{unfold list.con_defs [symmetric]}.
-
-
-\subsubsection{Structural induction}
-
-The datatype package also provides structural induction rules.  For datatypes
-without mutual or nested recursion, the rule has the form exemplified by
-\isa{list.induct} in Fig.\ts\ref{zf-list}.  For mutually recursive
-datatypes, the induction rule is supplied in two forms.  Consider datatype
-\isa{TF}.  The rule \isa{tree_forest.induct} performs induction over a
-single predicate~\isa{P}, which is presumed to be defined for both trees
-and forests:
-\begin{alltt*}\isastyleminor
-[| x \isasymin tree_forest(A);
-   !!a f. [| a \isasymin A; f \isasymin forest(A); P(f) |] ==> P(Tcons(a, f)); 
-   P(Fnil);
-   !!f t. [| t \isasymin tree(A); P(t); f \isasymin forest(A); P(f) |]
-          ==> P(Fcons(t, f)) 
-|] ==> P(x)
-\end{alltt*}
-The rule \isa{tree_forest.mutual_induct} performs induction over two
-distinct predicates, \isa{P_tree} and \isa{P_forest}.
-\begin{alltt*}\isastyleminor
-[| !!a f.
-      [| a{\isasymin}A; f{\isasymin}forest(A); P_forest(f) |] ==> P_tree(Tcons(a,f));
-   P_forest(Fnil);
-   !!f t. [| t{\isasymin}tree(A); P_tree(t); f{\isasymin}forest(A); P_forest(f) |]
-          ==> P_forest(Fcons(t, f)) 
-|] ==> ({\isasymforall}za. za \isasymin tree(A) --> P_tree(za)) &
-    ({\isasymforall}za. za \isasymin forest(A) --> P_forest(za))
-\end{alltt*}
-
-For datatypes with nested recursion, such as the \isa{term} example from
-above, things are a bit more complicated.  The rule \isa{term.induct}
-refers to the monotonic operator, \isa{list}:
-\begin{alltt*}\isastyleminor
-[| x \isasymin term(A);
-   !!a l. [| a\isasymin{}A; l\isasymin{}list(Collect(term(A), P)) |] ==> P(Apply(a,l)) 
-|] ==> P(x)
-\end{alltt*}
-The theory \isa{Induct/Term.thy} derives two higher-level induction rules,
-one of which is particularly useful for proving equations:
-\begin{alltt*}\isastyleminor
-[| t \isasymin term(A);
-   !!x zs. [| x \isasymin A; zs \isasymin list(term(A)); map(f, zs) = map(g, zs) |]
-           ==> f(Apply(x, zs)) = g(Apply(x, zs)) 
-|] ==> f(t) = g(t)  
-\end{alltt*}
-How this can be generalized to other nested datatypes is a matter for future
-research.
-
-
-\subsubsection{The \isa{case} operator}
-
-The package defines an operator for performing case analysis over the
-datatype.  For \isa{list}, it is called \isa{list_case} and satisfies
-the equations
-\begin{ttbox}\isastyleminor
-list_case(f_Nil, f_Cons, []) = f_Nil
-list_case(f_Nil, f_Cons, Cons(a, l)) = f_Cons(a, l)
-\end{ttbox}
-Here \isa{f_Nil} is the value to return if the argument is \isa{Nil} and
-\isa{f_Cons} is a function that computes the value to return if the
-argument has the form $\isa{Cons}(a,l)$.  The function can be expressed as
-an abstraction, over patterns if desired (\S\ref{sec:pairs}).
-
-For mutually recursive datatypes, there is a single \isa{case} operator.
-In the tree/forest example, the constant \isa{tree_forest_case} handles all
-of the constructors of the two datatypes.
-
-
-\subsection{Defining datatypes}
-
-The theory syntax for datatype definitions is shown in the
-Isabelle/Isar reference manual.  In order to be well-formed, a
-datatype definition has to obey the rules stated in the previous
-section.  As a result the theory is extended with the new types, the
-constructors, and the theorems listed in the previous section.
-
-Codatatypes are declared like datatypes and are identical to them in every
-respect except that they have a coinduction rule instead of an induction rule.
-Note that while an induction rule has the effect of limiting the values
-contained in the set, a coinduction rule gives a way of constructing new
-values of the set.
-
-Most of the theorems about datatypes become part of the default simpset.  You
-never need to see them again because the simplifier applies them
-automatically.  
-
-\subsubsection{Specialized methods for datatypes}
-
-Induction and case-analysis can be invoked using these special-purpose
-methods:
-\begin{ttdescription}
-\item[\methdx{induct_tac} $x$] applies structural
-  induction on variable $x$ to subgoal~1, provided the type of $x$ is a
-  datatype.  The induction variable should not occur among other assumptions
-  of the subgoal.
-\end{ttdescription}
-% 
-% we also have the ind_cases method, but what does it do?
-In some situations, induction is overkill and a case distinction over all
-constructors of the datatype suffices.
-\begin{ttdescription}
-\item[\methdx{case_tac} $x$]
- performs a case analysis for the variable~$x$.
-\end{ttdescription}
-
-Both tactics can only be applied to a variable, whose typing must be given in
-some assumption, for example the assumption \isa{x \isasymin \ list(A)}.  The tactics
-also work for the natural numbers (\isa{nat}) and disjoint sums, although
-these sets were not defined using the datatype package.  (Disjoint sums are
-not recursive, so only \isa{case_tac} is available.)
-
-Structured Isar methods are also available. Below, $t$ 
-stands for the name of the datatype.
-\begin{ttdescription}
-\item[\methdx{induct} \isa{set:}\ $t$] is the Isar induction tactic.
-\item[\methdx{cases} \isa{set:}\ $t$] is the Isar case-analysis tactic.
-\end{ttdescription}
-
-
-\subsubsection{The theorems proved by a datatype declaration}
-
-Here are some more details for the technically minded.  Processing the
-datatype declaration of a set~$t$ produces a name space~$t$ containing
-the following theorems:
-\begin{ttbox}\isastyleminor
-intros          \textrm{the introduction rules}
-cases           \textrm{the case analysis rule}
-induct          \textrm{the standard induction rule}
-mutual_induct   \textrm{the mutual induction rule, if needed}
-case_eqns       \textrm{equations for the case operator}
-recursor_eqns   \textrm{equations for the recursor}
-simps           \textrm{the union of} case_eqns \textrm{and} recursor_eqns
-con_defs        \textrm{definitions of the case operator and constructors}
-free_iffs       \textrm{logical equivalences for proving freeness}
-free_elims      \textrm{elimination rules for proving freeness}
-defs            \textrm{datatype definition(s)}
-\end{ttbox}
-Furthermore there is the theorem $C$ for every constructor~$C$; for
-example, the \isa{list} datatype's introduction rules are bound to the
-identifiers \isa{Nil} and \isa{Cons}.
-
-For a codatatype, the component \isa{coinduct} is the coinduction rule,
-replacing the \isa{induct} component.
-
-See the theories \isa{Induct/Ntree} and \isa{Induct/Brouwer} for examples of
-infinitely branching datatypes.  See theory \isa{Induct/LList} for an example
-of a codatatype.  Some of these theories illustrate the use of additional,
-undocumented features of the datatype package.  Datatype definitions are
-reduced to inductive definitions, and the advanced features should be
-understood in that light.
-
-
-\subsection{Examples}
-
-\subsubsection{The datatype of binary trees}
-
-Let us define the set $\isa{bt}(A)$ of binary trees over~$A$.  The theory
-must contain these lines:
-\begin{alltt*}\isastyleminor
-consts   bt :: "i=>i"
-datatype "bt(A)" = Lf | Br ("a\isasymin{}A", "t1\isasymin{}bt(A)", "t2\isasymin{}bt(A)")
-\end{alltt*}
-After loading the theory, we can prove some theorem.  
-We begin by declaring the constructor's typechecking rules
-as simplification rules:
-\begin{isabelle}
-\isacommand{declare}\ bt.intros\ [simp]%
-\end{isabelle}
-
-Our first example is the theorem that no tree equals its
-left branch.  To make the inductive hypothesis strong enough, 
-the proof requires a quantified induction formula, but 
-the \isa{rule\_format} attribute will remove the quantifiers 
-before the theorem is stored.
-\begin{isabelle}
-\isacommand{lemma}\ Br\_neq\_left\ [rule\_format]:\ "l\isasymin bt(A)\ ==>\ \isasymforall x\ r.\ Br(x,l,r)\isasymnoteq{}l"\isanewline
-\ 1.\ l\ \isasymin \ bt(A)\ \isasymLongrightarrow \ \isasymforall x\ r.\ Br(x,\ l,\ r)\ \isasymnoteq \ l%
-\end{isabelle}
-This can be proved by the structural induction tactic:
-\begin{isabelle}
-\ \ \isacommand{apply}\ (induct\_tac\ l)\isanewline
-\ 1.\ \isasymforall x\ r.\ Br(x,\ Lf,\ r)\ \isasymnoteq \ Lf\isanewline
-\ 2.\ \isasymAnd a\ t1\ t2.\isanewline
-\isaindent{\ 2.\ \ \ \ }\isasymlbrakk a\ \isasymin \ A;\ t1\ \isasymin \ bt(A);\ \isasymforall x\ r.\ Br(x,\ t1,\ r)\ \isasymnoteq \ t1;\ t2\ \isasymin \ bt(A);\isanewline
-\isaindent{\ 2.\ \ \ \ \ \ \ }\isasymforall x\ r.\ Br(x,\ t2,\ r)\ \isasymnoteq \ t2\isasymrbrakk \isanewline
-\isaindent{\ 2.\ \ \ \ }\isasymLongrightarrow \ \isasymforall x\ r.\ Br(x,\ Br(a,\ t1,\ t2),\ r)\ \isasymnoteq \ Br(a,\ t1,\ t2)
-\end{isabelle}
-Both subgoals are proved using \isa{auto}, which performs the necessary
-freeness reasoning. 
-\begin{isabelle}
-\ \ \isacommand{apply}\ auto\isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-
-An alternative proof uses Isar's fancy \isa{induct} method, which 
-automatically quantifies over all free variables:
-
-\begin{isabelle}
-\isacommand{lemma}\ Br\_neq\_left':\ "l\ \isasymin \ bt(A)\ ==>\ (!!x\ r.\ Br(x,\ l,\ r)\ \isasymnoteq \ l)"\isanewline
-\ \ \isacommand{apply}\ (induct\ set:\ bt)\isanewline
-\ 1.\ \isasymAnd x\ r.\ Br(x,\ Lf,\ r)\ \isasymnoteq \ Lf\isanewline
-\ 2.\ \isasymAnd a\ t1\ t2\ x\ r.\isanewline
-\isaindent{\ 2.\ \ \ \ }\isasymlbrakk a\ \isasymin \ A;\ t1\ \isasymin \ bt(A);\ \isasymAnd x\ r.\ Br(x,\ t1,\ r)\ \isasymnoteq \ t1;\ t2\ \isasymin \ bt(A);\isanewline
-\isaindent{\ 2.\ \ \ \ \ \ \ }\isasymAnd x\ r.\ Br(x,\ t2,\ r)\ \isasymnoteq \ t2\isasymrbrakk \isanewline
-\isaindent{\ 2.\ \ \ \ }\isasymLongrightarrow \ Br(x,\ Br(a,\ t1,\ t2),\ r)\ \isasymnoteq \ Br(a,\ t1,\ t2)
-\end{isabelle}
-Compare the form of the induction hypotheses with the corresponding ones in
-the previous proof. As before, to conclude requires only \isa{auto}.
-
-When there are only a few constructors, we might prefer to prove the freenness
-theorems for each constructor.  This is simple:
-\begin{isabelle}
-\isacommand{lemma}\ Br\_iff:\ "Br(a,l,r)\ =\ Br(a',l',r')\ <->\ a=a'\ \&\ l=l'\ \&\ r=r'"\isanewline
-\ \ \isacommand{by}\ (blast\ elim!:\ bt.free\_elims)
-\end{isabelle}
-Here we see a demonstration of freeness reasoning using
-\isa{bt.free\_elims}, but simpler still is just to apply \isa{auto}.
-
-An \ttindex{inductive\_cases} declaration generates instances of the
-case analysis rule that have been simplified  using freeness
-reasoning. 
-\begin{isabelle}
-\isacommand{inductive\_cases}\ Br\_in\_bt:\ "Br(a,\ l,\ r)\ \isasymin \ bt(A)"
-\end{isabelle}
-The theorem just created is 
-\begin{isabelle}
-\isasymlbrakk Br(a,\ l,\ r)\ \isasymin \ bt(A);\ \isasymlbrakk a\ \isasymin \ A;\ l\ \isasymin \ bt(A);\ r\ \isasymin \ bt(A)\isasymrbrakk \ \isasymLongrightarrow \ Q\isasymrbrakk \ \isasymLongrightarrow \ Q.
-\end{isabelle}
-It is an elimination rule that from $\isa{Br}(a,l,r)\in\isa{bt}(A)$
-lets us infer $a\in A$, $l\in\isa{bt}(A)$ and
-$r\in\isa{bt}(A)$.
-
-
-\subsubsection{Mixfix syntax in datatypes}
-
-Mixfix syntax is sometimes convenient.  The theory \isa{Induct/PropLog} makes a
-deep embedding of propositional logic:
-\begin{alltt*}\isastyleminor
-consts     prop :: i
-datatype  "prop" = Fls
-                 | Var ("n \isasymin nat")                ("#_" [100] 100)
-                 | "=>" ("p \isasymin prop", "q \isasymin prop")   (infixr 90)
-\end{alltt*}
-The second constructor has a special $\#n$ syntax, while the third constructor
-is an infixed arrow.
-
-
-\subsubsection{A giant enumeration type}
-
-This example shows a datatype that consists of 60 constructors:
-\begin{alltt*}\isastyleminor
-consts  enum :: i
-datatype
-  "enum" = C00 | C01 | C02 | C03 | C04 | C05 | C06 | C07 | C08 | C09
-         | C10 | C11 | C12 | C13 | C14 | C15 | C16 | C17 | C18 | C19
-         | C20 | C21 | C22 | C23 | C24 | C25 | C26 | C27 | C28 | C29
-         | C30 | C31 | C32 | C33 | C34 | C35 | C36 | C37 | C38 | C39
-         | C40 | C41 | C42 | C43 | C44 | C45 | C46 | C47 | C48 | C49
-         | C50 | C51 | C52 | C53 | C54 | C55 | C56 | C57 | C58 | C59
-end
-\end{alltt*}
-The datatype package scales well.  Even though all properties are proved
-rather than assumed, full processing of this definition takes around two seconds
-(on a 1.8GHz machine).  The constructors have a balanced representation,
-related to binary notation, so freeness properties can be proved fast.
-\begin{isabelle}
-\isacommand{lemma}\ "C00 \isasymnoteq\ C01"\isanewline
-\ \ \isacommand{by}\ simp
-\end{isabelle}
-You need not derive such inequalities explicitly.  The simplifier will
-dispose of them automatically.
-
-\index{*datatype|)}
-
-
-\subsection{Recursive function definitions}\label{sec:ZF:recursive}
-\index{recursive functions|see{recursion}}
-\index{*primrec|(}
-\index{recursion!primitive|(}
-
-Datatypes come with a uniform way of defining functions, {\bf primitive
-  recursion}.  Such definitions rely on the recursion operator defined by the
-datatype package.  Isabelle proves the desired recursion equations as
-theorems.
-
-In principle, one could introduce primitive recursive functions by asserting
-their reduction rules as axioms.  Here is a dangerous way of defining a
-recursive function over binary trees:
-\begin{isabelle}
-\isacommand{consts}\ \ n\_nodes\ ::\ "i\ =>\ i"\isanewline
-\isacommand{axioms}\isanewline
-\ \ n\_nodes\_Lf:\ "n\_nodes(Lf)\ =\ 0"\isanewline
-\ \ n\_nodes\_Br:\ "n\_nodes(Br(a,l,r))\ =\ succ(n\_nodes(l)\ \#+\ n\_nodes(r))"
-\end{isabelle}
-Asserting axioms brings the danger of accidentally introducing
-contradictions.  It should be avoided whenever possible.
-
-The \ttindex{primrec} declaration is a safe means of defining primitive
-recursive functions on datatypes:
-\begin{isabelle}
-\isacommand{consts}\ \ n\_nodes\ ::\ "i\ =>\ i"\isanewline
-\isacommand{primrec}\isanewline
-\ \ "n\_nodes(Lf)\ =\ 0"\isanewline
-\ \ "n\_nodes(Br(a,\ l,\ r))\ =\ succ(n\_nodes(l)\ \#+\ n\_nodes(r))"
-\end{isabelle}
-Isabelle will now derive the two equations from a low-level definition  
-based upon well-founded recursion.  If they do not define a legitimate
-recursion, then Isabelle will reject the declaration.
-
-
-\subsubsection{Syntax of recursive definitions}
-
-The general form of a primitive recursive definition is
-\begin{ttbox}\isastyleminor
-primrec
-    {\it reduction rules}
-\end{ttbox}
-where \textit{reduction rules} specify one or more equations of the form
-\[ f \, x@1 \, \dots \, x@m \, (C \, y@1 \, \dots \, y@k) \, z@1 \,
-\dots \, z@n = r \] such that $C$ is a constructor of the datatype, $r$
-contains only the free variables on the left-hand side, and all recursive
-calls in $r$ are of the form $f \, \dots \, y@i \, \dots$ for some $i$.  
-There must be at most one reduction rule for each constructor.  The order is
-immaterial.  For missing constructors, the function is defined to return zero.
-
-All reduction rules are added to the default simpset.
-If you would like to refer to some rule by name, then you must prefix
-the rule with an identifier.  These identifiers, like those in the
-\isa{rules} section of a theory, will be visible in proof scripts.
-
-The reduction rules become part of the default simpset, which
-leads to short proof scripts:
-\begin{isabelle}
-\isacommand{lemma}\ n\_nodes\_type\ [simp]:\ "t\ \isasymin \ bt(A)\ ==>\ n\_nodes(t)\ \isasymin \ nat"\isanewline
-\ \ \isacommand{by}\ (induct\_tac\ t,\ auto)
-\end{isabelle}
-
-You can even use the \isa{primrec} form with non-recursive datatypes and
-with codatatypes.  Recursion is not allowed, but it provides a convenient
-syntax for defining functions by cases.
-
-
-\subsubsection{Example: varying arguments}
-
-All arguments, other than the recursive one, must be the same in each equation
-and in each recursive call.  To get around this restriction, use explict
-$\lambda$-abstraction and function application.  For example, let us
-define the tail-recursive version of \isa{n\_nodes}, using an 
-accumulating argument for the counter.  The second argument, $k$, varies in
-recursive calls.
-\begin{isabelle}
-\isacommand{consts}\ \ n\_nodes\_aux\ ::\ "i\ =>\ i"\isanewline
-\isacommand{primrec}\isanewline
-\ \ "n\_nodes\_aux(Lf)\ =\ (\isasymlambda k\ \isasymin \ nat.\ k)"\isanewline
-\ \ "n\_nodes\_aux(Br(a,l,r))\ =\ \isanewline
-\ \ \ \ \ \ (\isasymlambda k\ \isasymin \ nat.\ n\_nodes\_aux(r)\ `\ \ (n\_nodes\_aux(l)\ `\ succ(k)))"
-\end{isabelle}
-Now \isa{n\_nodes\_aux(t)\ `\ k} is our function in two arguments. We
-can prove a theorem relating it to \isa{n\_nodes}. Note the quantification
-over \isa{k\ \isasymin \ nat}:
-\begin{isabelle}
-\isacommand{lemma}\ n\_nodes\_aux\_eq\ [rule\_format]:\isanewline
-\ \ \ \ \ "t\ \isasymin \ bt(A)\ ==>\ \isasymforall k\ \isasymin \ nat.\ n\_nodes\_aux(t)`k\ =\ n\_nodes(t)\ \#+\ k"\isanewline
-\ \ \isacommand{by}\ (induct\_tac\ t,\ simp\_all)
-\end{isabelle}
-
-Now, we can use \isa{n\_nodes\_aux} to define a tail-recursive version
-of \isa{n\_nodes}:
-\begin{isabelle}
-\isacommand{constdefs}\isanewline
-\ \ n\_nodes\_tail\ ::\ "i\ =>\ i"\isanewline
-\ \ \ "n\_nodes\_tail(t)\ ==\ n\_nodes\_aux(t)\ `\ 0"
-\end{isabelle}
-It is easy to
-prove that \isa{n\_nodes\_tail} is equivalent to \isa{n\_nodes}:
-\begin{isabelle}
-\isacommand{lemma}\ "t\ \isasymin \ bt(A)\ ==>\ n\_nodes\_tail(t)\ =\ n\_nodes(t)"\isanewline
-\ \isacommand{by}\ (simp\ add:\ n\_nodes\_tail\_def\ n\_nodes\_aux\_eq)
-\end{isabelle}
-
-
-
-
-\index{recursion!primitive|)}
-\index{*primrec|)}
-
-
-\section{Inductive and coinductive definitions}
-\index{*inductive|(}
-\index{*coinductive|(}
-
-An {\bf inductive definition} specifies the least set~$R$ closed under given
-rules.  (Applying a rule to elements of~$R$ yields a result within~$R$.)  For
-example, a structural operational semantics is an inductive definition of an
-evaluation relation.  Dually, a {\bf coinductive definition} specifies the
-greatest set~$R$ consistent with given rules.  (Every element of~$R$ can be
-seen as arising by applying a rule to elements of~$R$.)  An important example
-is using bisimulation relations to formalise equivalence of processes and
-infinite data structures.
-
-A theory file may contain any number of inductive and coinductive
-definitions.  They may be intermixed with other declarations; in
-particular, the (co)inductive sets {\bf must} be declared separately as
-constants, and may have mixfix syntax or be subject to syntax translations.
-
-Each (co)inductive definition adds definitions to the theory and also
-proves some theorems.  It behaves identially to the analogous
-inductive definition except that instead of an induction rule there is
-a coinduction rule.  Its treatment of coinduction is described in
-detail in a separate paper,%
-\footnote{It appeared in CADE~\cite{paulson-CADE}; a longer version is
-  distributed with Isabelle as \emph{A Fixedpoint Approach to 
- (Co)Inductive and (Co)Datatype Definitions}.}  %
-which you might refer to for background information.
-
-
-\subsection{The syntax of a (co)inductive definition}
-An inductive definition has the form
-\begin{ttbox}\isastyleminor
-inductive
-  domains     {\it domain declarations}
-  intros      {\it introduction rules}
-  monos       {\it monotonicity theorems}
-  con_defs    {\it constructor definitions}
-  type_intros {\it introduction rules for type-checking}
-  type_elims  {\it elimination rules for type-checking}
-\end{ttbox}
-A coinductive definition is identical, but starts with the keyword
-\isa{co\-inductive}.  
-
-The \isa{monos}, \isa{con\_defs}, \isa{type\_intros} and \isa{type\_elims}
-sections are optional.  If present, each is specified as a list of
-theorems, which may contain Isar attributes as usual.
-
-\begin{description}
-\item[\it domain declarations] are items of the form
-  {\it string\/}~\isa{\isasymsubseteq }~{\it string}, associating each recursive set with
-  its domain.  (The domain is some existing set that is large enough to
-  hold the new set being defined.)
-
-\item[\it introduction rules] specify one or more introduction rules in
-  the form {\it ident\/}~{\it string}, where the identifier gives the name of
-  the rule in the result structure.
-
-\item[\it monotonicity theorems] are required for each operator applied to
-  a recursive set in the introduction rules.  There \textbf{must} be a theorem
-  of the form $A\subseteq B\Imp M(A)\subseteq M(B)$, for each premise $t\in M(R_i)$
-  in an introduction rule!
-
-\item[\it constructor definitions] contain definitions of constants
-  appearing in the introduction rules.  The (co)datatype package supplies
-  the constructors' definitions here.  Most (co)inductive definitions omit
-  this section; one exception is the primitive recursive functions example;
-  see theory \isa{Induct/Primrec}.
-  
-\item[\it type\_intros] consists of introduction rules for type-checking the
-  definition: for demonstrating that the new set is included in its domain.
-  (The proof uses depth-first search.)
-
-\item[\it type\_elims] consists of elimination rules for type-checking the
-  definition.  They are presumed to be safe and are applied as often as
-  possible prior to the \isa{type\_intros} search.
-\end{description}
-
-The package has a few restrictions:
-\begin{itemize}
-\item The theory must separately declare the recursive sets as
-  constants.
-
-\item The names of the recursive sets must be identifiers, not infix
-operators.  
-
-\item Side-conditions must not be conjunctions.  However, an introduction rule
-may contain any number of side-conditions.
-
-\item Side-conditions of the form $x=t$, where the variable~$x$ does not
-  occur in~$t$, will be substituted through the rule \isa{mutual\_induct}.
-\end{itemize}
-
-
-\subsection{Example of an inductive definition}
-
-Below, we shall see how Isabelle/ZF defines the finite powerset
-operator.  The first step is to declare the constant~\isa{Fin}.  Then we
-must declare it inductively, with two introduction rules:
-\begin{isabelle}
-\isacommand{consts}\ \ Fin\ ::\ "i=>i"\isanewline
-\isacommand{inductive}\isanewline
-\ \ \isakeyword{domains}\ \ \ "Fin(A)"\ \isasymsubseteq\ "Pow(A)"\isanewline
-\ \ \isakeyword{intros}\isanewline
-\ \ \ \ emptyI:\ \ "0\ \isasymin\ Fin(A)"\isanewline
-\ \ \ \ consI:\ \ \ "[|\ a\ \isasymin\ A;\ \ b\ \isasymin\ Fin(A)\ |]\ ==>\ cons(a,b)\ \isasymin\ Fin(A)"\isanewline
-\ \ \isakeyword{type\_intros}\ \ empty\_subsetI\ cons\_subsetI\ PowI\isanewline
-\ \ \isakeyword{type\_elims}\ \ \ PowD\ [THEN\ revcut\_rl]\end{isabelle}
-The resulting theory contains a name space, called~\isa{Fin}.
-The \isa{Fin}$~A$ introduction rules can be referred to collectively as
-\isa{Fin.intros}, and also individually as \isa{Fin.emptyI} and
-\isa{Fin.consI}.  The induction rule is \isa{Fin.induct}.
-
-The chief problem with making (co)inductive definitions involves type-checking
-the rules.  Sometimes, additional theorems need to be supplied under
-\isa{type_intros} or \isa{type_elims}.  If the package fails when trying
-to prove your introduction rules, then set the flag \ttindexbold{trace_induct}
-to \isa{true} and try again.  (See the manual \emph{A Fixedpoint Approach
-  \ldots} for more discussion of type-checking.)
-
-In the example above, $\isa{Pow}(A)$ is given as the domain of
-$\isa{Fin}(A)$, for obviously every finite subset of~$A$ is a subset
-of~$A$.  However, the inductive definition package can only prove that given a
-few hints.
-Here is the output that results (with the flag set) when the
-\isa{type_intros} and \isa{type_elims} are omitted from the inductive
-definition above:
-\begin{alltt*}\isastyleminor
-Inductive definition Finite.Fin
-Fin(A) ==
-lfp(Pow(A),
-    \%X. {z\isasymin{}Pow(A) . z = 0 | ({\isasymexists}a b. z = cons(a,b) & a\isasymin{}A & b\isasymin{}X)})
-  Proving monotonicity...
-\ttbreak
-  Proving the introduction rules...
-The type-checking subgoal:
-0 \isasymin Fin(A)
- 1. 0 \isasymin Pow(A)
-\ttbreak
-The subgoal after monos, type_elims:
-0 \isasymin Fin(A)
- 1. 0 \isasymin Pow(A)
-*** prove_goal: tactic failed
-\end{alltt*}
-We see the need to supply theorems to let the package prove
-$\emptyset\in\isa{Pow}(A)$.  Restoring the \isa{type_intros} but not the
-\isa{type_elims}, we again get an error message:
-\begin{alltt*}\isastyleminor
-The type-checking subgoal:
-0 \isasymin Fin(A)
- 1. 0 \isasymin Pow(A)
-\ttbreak
-The subgoal after monos, type_elims:
-0 \isasymin Fin(A)
- 1. 0 \isasymin Pow(A)
-\ttbreak
-The type-checking subgoal:
-cons(a, b) \isasymin Fin(A)
- 1. [| a \isasymin A; b \isasymin Fin(A) |] ==> cons(a, b) \isasymin Pow(A)
-\ttbreak
-The subgoal after monos, type_elims:
-cons(a, b) \isasymin Fin(A)
- 1. [| a \isasymin A; b \isasymin Pow(A) |] ==> cons(a, b) \isasymin Pow(A)
-*** prove_goal: tactic failed
-\end{alltt*}
-The first rule has been type-checked, but the second one has failed.  The
-simplest solution to such problems is to prove the failed subgoal separately
-and to supply it under \isa{type_intros}.  The solution actually used is
-to supply, under \isa{type_elims}, a rule that changes
-$b\in\isa{Pow}(A)$ to $b\subseteq A$; together with \isa{cons_subsetI}
-and \isa{PowI}, it is enough to complete the type-checking.
-
-
-
-\subsection{Further examples}
-
-An inductive definition may involve arbitrary monotonic operators.  Here is a
-standard example: the accessible part of a relation.  Note the use
-of~\isa{Pow} in the introduction rule and the corresponding mention of the
-rule \isa{Pow\_mono} in the \isa{monos} list.  If the desired rule has a
-universally quantified premise, usually the effect can be obtained using
-\isa{Pow}.
-\begin{isabelle}
-\isacommand{consts}\ \ acc\ ::\ "i\ =>\ i"\isanewline
-\isacommand{inductive}\isanewline
-\ \ \isakeyword{domains}\ "acc(r)"\ \isasymsubseteq \ "field(r)"\isanewline
-\ \ \isakeyword{intros}\isanewline
-\ \ \ \ vimage:\ \ "[|\ r-``\isacharbraceleft a\isacharbraceright\ \isasymin\ Pow(acc(r));\ a\ \isasymin \ field(r)\ |]
-\isanewline
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ ==>\ a\ \isasymin \ acc(r)"\isanewline
-\ \ \isakeyword{monos}\ \ Pow\_mono
-\end{isabelle}
-
-Finally, here are some coinductive definitions.  We begin by defining
-lazy (potentially infinite) lists as a codatatype:
-\begin{isabelle}
-\isacommand{consts}\ \ llist\ \ ::\ "i=>i"\isanewline
-\isacommand{codatatype}\isanewline
-\ \ "llist(A)"\ =\ LNil\ |\ LCons\ ("a\ \isasymin \ A",\ "l\ \isasymin \ llist(A)")\isanewline
-\end{isabelle}
-
-The notion of equality on such lists is modelled as a bisimulation:
-\begin{isabelle}
-\isacommand{consts}\ \ lleq\ ::\ "i=>i"\isanewline
-\isacommand{coinductive}\isanewline
-\ \ \isakeyword{domains}\ "lleq(A)"\ <=\ "llist(A)\ *\ llist(A)"\isanewline
-\ \ \isakeyword{intros}\isanewline
-\ \ \ \ LNil:\ \ "<LNil,\ LNil>\ \isasymin \ lleq(A)"\isanewline
-\ \ \ \ LCons:\ "[|\ a\ \isasymin \ A;\ <l,l'>\ \isasymin \ lleq(A)\ |]\ \isanewline
-\ \ \ \ \ \ \ \ \ \ \ \ ==>\ <LCons(a,l),\ LCons(a,l')>\ \isasymin \ lleq(A)"\isanewline
-\ \ \isakeyword{type\_intros}\ \ llist.intros
-\end{isabelle}
-This use of \isa{type_intros} is typical: the relation concerns the
-codatatype \isa{llist}, so naturally the introduction rules for that
-codatatype will be required for type-checking the rules.
-
-The Isabelle distribution contains many other inductive definitions.  Simple
-examples are collected on subdirectory \isa{ZF/Induct}.  The directory
-\isa{Coind} and the theory \isa{ZF/Induct/LList} contain coinductive
-definitions.  Larger examples may be found on other subdirectories of
-\isa{ZF}, such as \isa{IMP}, and \isa{Resid}.
-
-
-\subsection{Theorems generated}
-
-Each (co)inductive set defined in a theory file generates a name space
-containing the following elements:
-\begin{ttbox}\isastyleminor
-intros        \textrm{the introduction rules}
-elim          \textrm{the elimination (case analysis) rule}
-induct        \textrm{the standard induction rule}
-mutual_induct \textrm{the mutual induction rule, if needed}
-defs          \textrm{definitions of inductive sets}
-bnd_mono      \textrm{monotonicity property}
-dom_subset    \textrm{inclusion in `bounding set'}
-\end{ttbox}
-Furthermore, each introduction rule is available under its declared
-name. For a codatatype, the component \isa{coinduct} is the coinduction rule,
-replacing the \isa{induct} component.
-
-Recall that the \ttindex{inductive\_cases} declaration generates
-simplified instances of the case analysis rule.  It is as useful for
-inductive definitions as it is for datatypes.  There are many examples
-in the theory
-\isa{Induct/Comb}, which is discussed at length
-elsewhere~\cite{paulson-generic}.  The theory first defines the
-datatype
-\isa{comb} of combinators:
-\begin{alltt*}\isastyleminor
-consts comb :: i
-datatype  "comb" = K
-                 | S
-                 | "#" ("p \isasymin comb", "q \isasymin comb")   (infixl 90)
-\end{alltt*}
-The theory goes on to define contraction and parallel contraction
-inductively.  Then the theory \isa{Induct/Comb.thy} defines special
-cases of contraction, such as this one:
-\begin{isabelle}
-\isacommand{inductive\_cases}\ K\_contractE [elim!]:\ "K -1-> r"
-\end{isabelle}
-The theorem just created is \isa{K -1-> r \ \isasymLongrightarrow \ Q},
-which expresses that the combinator \isa{K} cannot reduce to
-anything.  (From the assumption \isa{K-1->r}, we can conclude any desired
-formula \isa{Q}\@.)  Similar elimination rules for \isa{S} and application are also
-generated. The attribute \isa{elim!}\ shown above supplies the generated
-theorem to the classical reasoner.  This mode of working allows
-effective reasoniung about operational semantics.
-
-\index{*coinductive|)} \index{*inductive|)}
-
-
-
-\section{The outer reaches of set theory}
-
-The constructions of the natural numbers and lists use a suite of
-operators for handling recursive function definitions.  I have described
-the developments in detail elsewhere~\cite{paulson-set-II}.  Here is a brief
-summary:
-\begin{itemize}
-  \item Theory \isa{Trancl} defines the transitive closure of a relation
-    (as a least fixedpoint).
-
-  \item Theory \isa{WF} proves the well-founded recursion theorem, using an
-    elegant approach of Tobias Nipkow.  This theorem permits general
-    recursive definitions within set theory.
-
-  \item Theory \isa{Ord} defines the notions of transitive set and ordinal
-    number.  It derives transfinite induction.  A key definition is {\bf
-      less than}: $i<j$ if and only if $i$ and $j$ are both ordinals and
-    $i\in j$.  As a special case, it includes less than on the natural
-    numbers.
-    
-  \item Theory \isa{Epsilon} derives $\varepsilon$-induction and
-    $\varepsilon$-recursion, which are generalisations of transfinite
-    induction and recursion.  It also defines \cdx{rank}$(x)$, which is the
-    least ordinal $\alpha$ such that $x$ is constructed at stage $\alpha$ of
-    the cumulative hierarchy (thus $x\in V@{\alpha+1}$).
-\end{itemize}
-
-Other important theories lead to a theory of cardinal numbers.  They have
-not yet been written up anywhere.  Here is a summary:
-\begin{itemize}
-\item Theory \isa{Rel} defines the basic properties of relations, such as
-  reflexivity, symmetry and transitivity.
-
-\item Theory \isa{EquivClass} develops a theory of equivalence
-  classes, not using the Axiom of Choice.
-
-\item Theory \isa{Order} defines partial orderings, total orderings and
-  wellorderings.
-
-\item Theory \isa{OrderArith} defines orderings on sum and product sets.
-  These can be used to define ordinal arithmetic and have applications to
-  cardinal arithmetic.
-
-\item Theory \isa{OrderType} defines order types.  Every wellordering is
-  equivalent to a unique ordinal, which is its order type.
-
-\item Theory \isa{Cardinal} defines equipollence and cardinal numbers.
- 
-\item Theory \isa{CardinalArith} defines cardinal addition and
-  multiplication, and proves their elementary laws.  It proves that there
-  is no greatest cardinal.  It also proves a deep result, namely
-  $\kappa\otimes\kappa=\kappa$ for every infinite cardinal~$\kappa$; see
-  Kunen~\cite[page 29]{kunen80}.  None of these results assume the Axiom of
-  Choice, which complicates their proofs considerably.  
-\end{itemize}
-
-The following developments involve the Axiom of Choice (AC):
-\begin{itemize}
-\item Theory \isa{AC} asserts the Axiom of Choice and proves some simple
-  equivalent forms.
-
-\item Theory \isa{Zorn} proves Hausdorff's Maximal Principle, Zorn's Lemma
-  and the Wellordering Theorem, following Abrial and
-  Laffitte~\cite{abrial93}.
-
-\item Theory \isa{Cardinal\_AC} uses AC to prove simplified theorems about
-  the cardinals.  It also proves a theorem needed to justify
-  infinitely branching datatype declarations: if $\kappa$ is an infinite
-  cardinal and $|X(\alpha)| \le \kappa$ for all $\alpha<\kappa$ then
-  $|\union\sb{\alpha<\kappa} X(\alpha)| \le \kappa$.
-
-\item Theory \isa{InfDatatype} proves theorems to justify infinitely
-  branching datatypes.  Arbitrary index sets are allowed, provided their
-  cardinalities have an upper bound.  The theory also justifies some
-  unusual cases of finite branching, involving the finite powerset operator
-  and the finite function space operator.
-\end{itemize}
-
-
-
-\section{The examples directories}
-Directory \isa{HOL/IMP} contains a mechanised version of a semantic
-equivalence proof taken from Winskel~\cite{winskel93}.  It formalises the
-denotational and operational semantics of a simple while-language, then
-proves the two equivalent.  It contains several datatype and inductive
-definitions, and demonstrates their use.
-
-The directory \isa{ZF/ex} contains further developments in ZF set theory.
-Here is an overview; see the files themselves for more details.  I describe
-much of this material in other
-publications~\cite{paulson-set-I,paulson-set-II,paulson-fixedpt-milner}.
-\begin{itemize}
-\item File \isa{misc.ML} contains miscellaneous examples such as
-  Cantor's Theorem, the Schr\"oder-Bernstein Theorem and the `Composition
-  of homomorphisms' challenge~\cite{boyer86}.
-
-\item Theory \isa{Ramsey} proves the finite exponent 2 version of
-  Ramsey's Theorem, following Basin and Kaufmann's
-  presentation~\cite{basin91}.
-
-\item Theory \isa{Integ} develops a theory of the integers as
-  equivalence classes of pairs of natural numbers.
-
-\item Theory \isa{Primrec} develops some computation theory.  It
-  inductively defines the set of primitive recursive functions and presents a
-  proof that Ackermann's function is not primitive recursive.
-
-\item Theory \isa{Primes} defines the Greatest Common Divisor of two
-  natural numbers and and the ``divides'' relation.
-
-\item Theory \isa{Bin} defines a datatype for two's complement binary
-  integers, then proves rewrite rules to perform binary arithmetic.  For
-  instance, $1359\times {-}2468 = {-}3354012$ takes 0.3 seconds.
-
-\item Theory \isa{BT} defines the recursive data structure $\isa{bt}(A)$, labelled binary trees.
-
-\item Theory \isa{Term} defines a recursive data structure for terms
-  and term lists.  These are simply finite branching trees.
-
-\item Theory \isa{TF} defines primitives for solving mutually
-  recursive equations over sets.  It constructs sets of trees and forests
-  as an example, including induction and recursion rules that handle the
-  mutual recursion.
-
-\item Theory \isa{Prop} proves soundness and completeness of
-  propositional logic~\cite{paulson-set-II}.  This illustrates datatype
-  definitions, inductive definitions, structural induction and rule
-  induction.
-
-\item Theory \isa{ListN} inductively defines the lists of $n$
-  elements~\cite{paulin-tlca}.
-
-\item Theory \isa{Acc} inductively defines the accessible part of a
-  relation~\cite{paulin-tlca}.
-
-\item Theory \isa{Comb} defines the datatype of combinators and
-  inductively defines contraction and parallel contraction.  It goes on to
-  prove the Church-Rosser Theorem.  This case study follows Camilleri and
-  Melham~\cite{camilleri92}.
-
-\item Theory \isa{LList} defines lazy lists and a coinduction
-  principle for proving equations between them.
-\end{itemize}
-
-
-\section{A proof about powersets}\label{sec:ZF-pow-example}
-To demonstrate high-level reasoning about subsets, let us prove the
-equation ${\isa{Pow}(A)\cap \isa{Pow}(B)}= \isa{Pow}(A\cap B)$.  Compared
-with first-order logic, set theory involves a maze of rules, and theorems
-have many different proofs.  Attempting other proofs of the theorem might
-be instructive.  This proof exploits the lattice properties of
-intersection.  It also uses the monotonicity of the powerset operation,
-from \isa{ZF/mono.ML}:
-\begin{isabelle}
-\tdx{Pow_mono}:     A \isasymsubseteq B ==> Pow(A) \isasymsubseteq Pow(B)
-\end{isabelle}
-We enter the goal and make the first step, which breaks the equation into
-two inclusions by extensionality:\index{*equalityI theorem}
-\begin{isabelle}
-\isacommand{lemma}\ "Pow(A\ Int\ B)\ =\ Pow(A)\ Int\ Pow(B)"\isanewline
-\ 1.\ Pow(A\ \isasyminter \ B)\ =\ Pow(A)\ \isasyminter \ Pow(B)\isanewline
-\isacommand{apply}\ (rule\ equalityI)\isanewline
-\ 1.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(A)\ \isasyminter \ Pow(B)\isanewline
-\ 2.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
-\end{isabelle}
-Both inclusions could be tackled straightforwardly using \isa{subsetI}.
-A shorter proof results from noting that intersection forms the greatest
-lower bound:\index{*Int_greatest theorem}
-\begin{isabelle}
-\isacommand{apply}\ (rule\ Int\_greatest)\isanewline
-\ 1.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(A)\isanewline
-\ 2.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(B)\isanewline
-\ 3.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
-\end{isabelle}
-Subgoal~1 follows by applying the monotonicity of \isa{Pow} to $A\int
-B\subseteq A$; subgoal~2 follows similarly:
-\index{*Int_lower1 theorem}\index{*Int_lower2 theorem}
-\begin{isabelle}
-\isacommand{apply}\ (rule\ Int\_lower1\ [THEN\ Pow\_mono])\isanewline
-\ 1.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(B)\isanewline
-\ 2.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
-\isanewline
-\isacommand{apply}\ (rule\ Int\_lower2\ [THEN\ Pow\_mono])\isanewline
-\ 1.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
-\end{isabelle}
-We are left with the opposite inclusion, which we tackle in the
-straightforward way:\index{*subsetI theorem}
-\begin{isabelle}
-\isacommand{apply}\ (rule\ subsetI)\isanewline
-\ 1.\ \isasymAnd x.\ x\ \isasymin \ Pow(A)\ \isasyminter \ Pow(B)\ \isasymLongrightarrow \ x\ \isasymin \ Pow(A\ \isasyminter \ B)
-\end{isabelle}
-The subgoal is to show $x\in \isa{Pow}(A\cap B)$ assuming $x\in\isa{Pow}(A)\cap \isa{Pow}(B)$; eliminating this assumption produces two
-subgoals.  The rule \tdx{IntE} treats the intersection like a conjunction
-instead of unfolding its definition.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ IntE)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymin \ Pow(A);\ x\ \isasymin \ Pow(B)\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ Pow(A\ \isasyminter \ B)
-\end{isabelle}
-The next step replaces the \isa{Pow} by the subset
-relation~($\subseteq$).\index{*PowI theorem}
-\begin{isabelle}
-\isacommand{apply}\ (rule\ PowI)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymin \ Pow(A);\ x\ \isasymin \ Pow(B)\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ A\ \isasyminter \ B%
-\end{isabelle}
-We perform the same replacement in the assumptions.  This is a good
-demonstration of the tactic \ttindex{drule}:\index{*PowD theorem}
-\begin{isabelle}
-\isacommand{apply}\ (drule\ PowD)+\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymsubseteq \ A;\ x\ \isasymsubseteq \ B\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ A\ \isasyminter \ B%
-\end{isabelle}
-The assumptions are that $x$ is a lower bound of both $A$ and~$B$, but
-$A\int B$ is the greatest lower bound:\index{*Int_greatest theorem}
-\begin{isabelle}
-\isacommand{apply}\ (rule\ Int\_greatest)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymsubseteq \ A;\ x\ \isasymsubseteq \ B\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ A\isanewline
-\ 2.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymsubseteq \ A;\ x\ \isasymsubseteq \ B\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ B%
-\end{isabelle}
-To conclude the proof, we clear up the trivial subgoals:
-\begin{isabelle}
-\isacommand{apply}\ (assumption+)\isanewline
-\isacommand{done}%
-\end{isabelle}
-
-We could have performed this proof instantly by calling
-\ttindex{blast}:
-\begin{isabelle}
-\isacommand{lemma}\ "Pow(A\ Int\ B)\ =\ Pow(A)\ Int\ Pow(B)"\isanewline
-\isacommand{by}
-\end{isabelle}
-Past researchers regarded this as a difficult proof, as indeed it is if all
-the symbols are replaced by their definitions.
-\goodbreak
-
-\section{Monotonicity of the union operator}
-For another example, we prove that general union is monotonic:
-${C\subseteq D}$ implies $\bigcup(C)\subseteq \bigcup(D)$.  To begin, we
-tackle the inclusion using \tdx{subsetI}:
-\begin{isabelle}
-\isacommand{lemma}\ "C\isasymsubseteq D\ ==>\ Union(C)\
-\isasymsubseteq \ Union(D)"\isanewline
-\isacommand{apply}\ (rule\ subsetI)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ \isasymUnion C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ \isasymUnion D%
-\end{isabelle}
-Big union is like an existential quantifier --- the occurrence in the
-assumptions must be eliminated early, since it creates parameters.
-\index{*UnionE theorem}
-\begin{isabelle}
-\isacommand{apply}\ (erule\ UnionE)\isanewline
-\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ \isasymUnion D%
-\end{isabelle}
-Now we may apply \tdx{UnionI}, which creates an unknown involving the
-parameters.  To show \isa{x\ \isasymin \ \isasymUnion D} it suffices to show that~\isa{x} belongs
-to some element, say~\isa{?B2(x,B)}, of~\isa{D}\@.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ UnionI)\isanewline
-\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ ?B2(x,\ B)\ \isasymin \ D\isanewline
-\ 2.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ ?B2(x,\ B)
-\end{isabelle}
-Combining the rule \tdx{subsetD} with the assumption \isa{C\ \isasymsubseteq \ D} yields 
-$\Var{a}\in C \Imp \Var{a}\in D$, which reduces subgoal~1.  Note that
-\isa{erule} removes the subset assumption.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ subsetD)\isanewline
-\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ ?B2(x,\ B)\ \isasymin \ C\isanewline
-\ 2.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ ?B2(x,\ B)
-\end{isabelle}
-The rest is routine.  Observe how the first call to \isa{assumption}
-instantiates \isa{?B2(x,B)} to~\isa{B}\@.
-\begin{isabelle}
-\isacommand{apply}\ assumption\ \isanewline
-\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ B%
-\isanewline
-\isacommand{apply}\ assumption\ \isanewline
-No\ subgoals!\isanewline
-\isacommand{done}%
-\end{isabelle}
-Again, \isa{blast} can prove this theorem in one step.
-
-The theory \isa{ZF/equalities.thy} has many similar proofs.  Reasoning about
-general intersection can be difficult because of its anomalous behaviour on
-the empty set.  However, \isa{blast} copes well with these.  Here is
-a typical example, borrowed from Devlin~\cite[page 12]{devlin79}:
-\[ a\in C \,\Imp\, \inter@{x\in C} \Bigl(A(x) \int B(x)\Bigr) =        
-       \Bigl(\inter@{x\in C} A(x)\Bigr)  \int  
-       \Bigl(\inter@{x\in C} B(x)\Bigr)  \]
-
-\section{Low-level reasoning about functions}
-The derived rules \isa{lamI}, \isa{lamE}, \isa{lam_type}, \isa{beta}
-and \isa{eta} support reasoning about functions in a
-$\lambda$-calculus style.  This is generally easier than regarding
-functions as sets of ordered pairs.  But sometimes we must look at the
-underlying representation, as in the following proof
-of~\tdx{fun_disjoint_apply1}.  This states that if $f$ and~$g$ are
-functions with disjoint domains~$A$ and~$C$, and if $a\in A$, then
-$(f\un g)`a = f`a$:
-\begin{isabelle}
-\isacommand{lemma}\ "[|\ a\ \isasymin \ A;\ \ f\ \isasymin \ A->B;\ \ g\ \isasymin \ C->D;\ \ A\ \isasyminter \ C\ =\ 0\ |]
-\isanewline
-\ \ \ \ \ \ \ \ ==>\ (f\ \isasymunion \ g)`a\ =\ f`a"
-\end{isabelle}
-Using \tdx{apply_equality}, we reduce the equality to reasoning about
-ordered pairs.  The second subgoal is to verify that \isa{f\ \isasymunion \ g} is a function, since
-\isa{Pi(?A,?B)} denotes a dependent function space.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ apply\_equality)\isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
-\isaindent{\ 1.\ }\isasymLongrightarrow \ \isasymlangle a,\ f\ `\ a\isasymrangle \ \isasymin \ f\ \isasymunion \ g\isanewline
-\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
-\isaindent{\ 2.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
-\end{isabelle}
-We must show that the pair belongs to~$f$ or~$g$; by~\tdx{UnI1} we
-choose~$f$:
-\begin{isabelle}
-\isacommand{apply}\ (rule\ UnI1)\isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ \isasymlangle a,\ f\ `\ a\isasymrangle \ \isasymin \ f\isanewline
-\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
-\isaindent{\ 2.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
-\end{isabelle}
-To show $\pair{a,f`a}\in f$ we use \tdx{apply_Pair}, which is
-essentially the converse of \tdx{apply_equality}:
-\begin{isabelle}
-\isacommand{apply}\ (rule\ apply\_Pair)\isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ f\ \isasymin \ Pi(?A2,?B2)\isanewline
-\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ a\ \isasymin \ ?A2\isanewline
-\ 3.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
-\isaindent{\ 3.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
-\end{isabelle}
-Using the assumptions $f\in A\to B$ and $a\in A$, we solve the two subgoals
-from \tdx{apply_Pair}.  Recall that a $\Pi$-set is merely a generalized
-function space, and observe that~{\tt?A2} gets instantiated to~\isa{A}.
-\begin{isabelle}
-\isacommand{apply}\ assumption\ \isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ a\ \isasymin \ A\isanewline
-\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
-\isaindent{\ 2.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
-\isanewline
-\isacommand{apply}\ assumption\ \isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
-\isaindent{\ 1.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
-\end{isabelle}
-To construct functions of the form $f\un g$, we apply
-\tdx{fun_disjoint_Un}:
-\begin{isabelle}
-\isacommand{apply}\ (rule\ fun\_disjoint\_Un)\isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ f\ \isasymin \ ?A3\ \isasymrightarrow \ ?B3\isanewline
-\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ g\ \isasymin \ ?C3\ \isasymrightarrow \ ?D3\isanewline
-\ 3.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ ?A3\ \isasyminter \ ?C3\ =\ 0
-\end{isabelle}
-The remaining subgoals are instances of the assumptions.  Again, observe how
-unknowns become instantiated:
-\begin{isabelle}
-\isacommand{apply}\ assumption\ \isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ g\ \isasymin \ ?C3\ \isasymrightarrow \ ?D3\isanewline
-\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ A\ \isasyminter \ ?C3\ =\ 0
-\isanewline
-\isacommand{apply}\ assumption\ \isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ A\ \isasyminter \ C\ =\ 0
-\isanewline
-\isacommand{apply}\ assumption\ \isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-See the theories \isa{ZF/func.thy} and \isa{ZF/WF.thy} for more
-examples of reasoning about functions.
-
-\index{set theory|)}
--- a/src/Doc/ZF/document/build	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,16 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-FORMAT="$1"
-VARIANT="$2"
-
-"$ISABELLE_TOOL" logo ZF
-
-cp "$ISABELLE_HOME/src/Doc/isar.sty" .
-cp "$ISABELLE_HOME/src/Doc/ttbox.sty" .
-cp "$ISABELLE_HOME/src/Doc/manual.bib" .
-cp "$ISABELLE_HOME/src/Doc/Logics/document/syntax.tex" .
-
-"$ISABELLE_HOME/src/Doc/prepare_document" "$FORMAT"
-
--- a/src/Doc/ZF/document/logics.sty	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-% logics.sty : Logics Manuals Page Layout
%
\typeout{Document Style logics. Released 18 August 2003}

\hyphenation{Isa-belle man-u-script man-u-scripts ap-pen-dix mut-u-al-ly}
\hyphenation{data-type data-types co-data-type co-data-types }

%usage: \iflabelundefined{LABEL}{if not defined}{if defined}
\newcommand{\iflabelundefined}[1]{\@ifundefined{r@#1}}


%%%INDEXING  use isa-index to process the index

\newcommand\seealso[2]{\emph{see also} #1}
\usepackage{makeidx}

%index, putting page numbers of definitions in boldface
\def\bold#1{\textbf{#1}}
\newcommand\fnote[1]{#1n}
\newcommand\indexbold[1]{\index{#1|bold}}

% The alternative to \protect\isa in the indexing macros is
% \noexpand\noexpand \noexpand\isa
% need TWO levels of \noexpand to delay the expansion of \isa:
%  the \noexpand\noexpand will leave one \noexpand, to be given to the
%  (still unexpanded) \isa token.  See TeX by Topic, page 122.

%%%% for indexing constants, symbols, theorems, ...
\newcommand\cdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (constant)}}
\newcommand\sdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (symbol)}}

\newcommand\tdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (theorem)}}
\newcommand\tdxbold[1]{\isa{#1}\index{#1@\protect\isa{#1} (theorem)|bold}}

\newcommand\cldx[1]{\isa{#1}\index{#1@\protect\isa{#1} (class)}}
\newcommand\tydx[1]{\isa{#1}\index{#1@\protect\isa{#1} (type)}}
\newcommand\thydx[1]{\isa{#1}\index{#1@\protect\isa{#1} (theory)}}

\newcommand\attrdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (attribute)}}
\newcommand\cmmdx[1]{\index{#1@\protect\isacommand{#1} (command)}}
\newcommand\commdx[1]{\isacommand{#1}\index{#1@\protect\isacommand{#1} (command)}}
\newcommand\methdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (method)}}
\newcommand\tooldx[1]{\isa{#1}\index{#1@\protect\isa{#1} (tool)}}
\newcommand\settdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (setting)}}

%set argument in \bf font and index in ROMAN font (for definitions in text!)
\newcommand\bfindex[1]{{\bf#1}\index{#1|bold}\@}

\newcommand\rmindex[1]{{#1}\index{#1}\@}
\newcommand\ttindex[1]{\texttt{#1}\index{#1@\texttt{#1}}\@}
\newcommand\ttindexbold[1]{\texttt{#1}\index{#1@\texttt{#1}|bold}\@}

\newcommand{\indexboldpos}[2]{#1\@}
\newcommand{\ttindexboldpos}[2]{\isa{#1}\@}

%\newtheorem{theorem}{Theorem}[section]
\newtheorem{Exercise}{Exercise}[section]
\newenvironment{exercise}{\begin{Exercise}\rm}{\end{Exercise}}
\newcommand{\ttlbr}{\texttt{[|}}
\newcommand{\ttrbr}{\texttt{|]}}
\newcommand{\ttor}{\texttt{|}}
\newcommand{\ttall}{\texttt{!}}
\newcommand{\ttuniquex}{\texttt{?!}}
\newcommand{\ttEXU}{\texttt{EX!}}
\newcommand{\ttAnd}{\texttt{!!}}

\newcommand{\isasymignore}{}
\newcommand{\isasymimp}{\isasymlongrightarrow}
\newcommand{\isasymImp}{\isasymLongrightarrow}
\newcommand{\isasymFun}{\isasymRightarrow}
\newcommand{\isasymuniqex}{\isamath{\exists!\,}}
\renewcommand{\S}{Sect.\ts}

\renewenvironment{isamarkuptxt}{\begin{isamarkuptext}}{\end{isamarkuptext}}

\newif\ifremarks
\newcommand{\REMARK}[1]{\ifremarks\marginpar{\raggedright\footnotesize#1}\fi}

%names of Isabelle rules
\newcommand{\rulename}[1]{\hfill(#1)}
\newcommand{\rulenamedx}[1]{\hfill(#1\index{#1@\protect\isa{#1} (theorem)|bold})}

%%%% meta-logical connectives

\let\Forall=\bigwedge
\let\Imp=\Longrightarrow
\let\To=\Rightarrow
\newcommand{\Var}[1]{{?\!#1}}

%%% underscores as ordinary characters, not for subscripting
%%  use @ or \sb for subscripting; use \at for @
%%  only works in \tt font
%%  must not make _ an active char; would make \ttindex fail!
\gdef\underscoreoff{\catcode`\@=8\catcode`\_=\other}
\gdef\underscoreon{\catcode`\_=8\makeatother}
\chardef\other=12
\chardef\at=`\@

% alternative underscore
\def\_{\leavevmode\kern.06em\vbox{\hrule height.2ex width.3em}\hskip0.1em}


%%%% ``WARNING'' environment
\def\dbend{\vtop to 0pt{\vss\hbox{\Huge\bf!}\vss}}
\newenvironment{warn}{\medskip\medbreak\begingroup \clubpenalty=10000 
         \small %%WAS\baselineskip=0.9\baselineskip
         \noindent \hangindent\parindent \hangafter=-2 
         \hbox to0pt{\hskip-\hangindent\dbend\hfill}\ignorespaces}%
        {\par\endgroup\medbreak}


%%%% Standard logical symbols
\let\turn=\vdash
\let\conj=\wedge
\let\disj=\vee
\let\imp=\rightarrow
\let\bimp=\leftrightarrow
\newcommand\all[1]{\forall#1.}  %quantification
\newcommand\ex[1]{\exists#1.}
\newcommand{\pair}[1]{\langle#1\rangle}

\newcommand{\lparr}{\mathopen{(\!|}}
\newcommand{\rparr}{\mathclose{|\!)}}
\newcommand{\fs}{\mathpunct{,\,}}
\newcommand{\ty}{\mathrel{::}}
\newcommand{\asn}{\mathrel{:=}}
\newcommand{\more}{\ldots}
\newcommand{\record}[1]{\lparr #1 \rparr}
\newcommand{\dtt}{\mathord.}

\newcommand\lbrakk{\mathopen{[\![}}
\newcommand\rbrakk{\mathclose{]\!]}}
\newcommand\List[1]{\lbrakk#1\rbrakk}  %was \obj
\newcommand\vpile[1]{\begin{array}{c}#1\end{array}}
\newenvironment{matharray}[1]{\[\begin{array}{#1}}{\end{array}\]}
\newcommand{\Text}[1]{\mbox{#1}}

\DeclareMathSymbol{\dshsym}{\mathalpha}{letters}{"2D}
\newcommand{\dsh}{\mathit{\dshsym}}

\let\int=\cap
\let\un=\cup
\let\inter=\bigcap
\let\union=\bigcup

\def\ML{{\sc ml}}
\def\AST{{\sc ast}}

%macros to change the treatment of symbols
\def\relsemicolon{\mathcode`\;="303B}   %treat ; like a relation
\def\binperiod{\mathcode`\.="213A}   %treat . like a binary operator
\def\binvert{\mathcode`\|="226A}     %treat | like a binary operator

%redefinition of \sloppy and \fussy to use \emergencystretch
\def\sloppy{\tolerance2000 \hfuzz.5pt \vfuzz.5pt \emergencystretch=15pt}
\def\fussy{\tolerance200 \hfuzz.1pt \vfuzz.1pt \emergencystretch=0pt}

%non-bf version of description
\def\descrlabel#1{\hspace\labelsep #1}
\def\descr{\list{}{\labelwidth\z@ \itemindent-\leftmargin\let\makelabel\descrlabel}}
\let\enddescr\endlist

% The mathcodes for the letters A, ..., Z, a, ..., z are changed to
% generate text italic rather than math italic by default. This makes
% multi-letter identifiers look better. The mathcode for character c
% is set to |"7000| (variable family) + |"400| (text italic) + |c|.
%
\DeclareSymbolFont{italics}{\encodingdefault}{\rmdefault}{m}{it}%
\def\@setmcodes#1#2#3{{\count0=#1 \count1=#3
        \loop \global\mathcode\count0=\count1 \ifnum \count0<#2
        \advance\count0 by1 \advance\count1 by1 \repeat}}
\@setmcodes{`A}{`Z}{"7\hexnumber@\symitalics41}
\@setmcodes{`a}{`z}{"7\hexnumber@\symitalics61}

%%% \dquotes permits usage of "..." for \hbox{...} 
%%%   also taken from under.sty
{\catcode`\"=\active
\gdef\dquotes{\catcode`\"=\active  \let"=\@mathText}%
\gdef\@mathText#1"{\hbox{\mathTextFont #1\/}}}
\def\mathTextFont{\frenchspacing\tt}
\def\dquotesoff{\catcode`\"=\other}
\ No newline at end of file
--- a/src/Doc/ZF/document/root.tex	Sat Apr 05 17:52:29 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,91 +0,0 @@
-\documentclass[11pt,a4paper]{report}
-\usepackage{isabelle,isabellesym,railsetup}
-\usepackage{graphicx,logics,ttbox,proof,latexsym}
-\usepackage{isar}
-
-\usepackage{pdfsetup}   
-%last package!
-
-\remarkstrue 
-
-%%% to index derived rls: ^\([a-zA-Z0-9][a-zA-Z0-9_]*\)        \\tdx{\1}  
-%%% to index rulenames:   ^ *(\([a-zA-Z0-9][a-zA-Z0-9_]*\),     \\tdx{\1}  
-%%% to index constants:   \\tt \([a-zA-Z0-9][a-zA-Z0-9_]*\)     \\cdx{\1}  
-%%% to deverbify:         \\verb|\([^|]*\)|     \\ttindex{\1}  
-
-\title{\includegraphics[scale=0.5]{isabelle_zf} \\[4ex] 
-       Isabelle's Logics: FOL and ZF}
-
-\author{{\em Lawrence C. Paulson}\\
-        Computer Laboratory \\ University of Cambridge \\
-        \texttt{lcp@cl.cam.ac.uk}\\[3ex] 
-        With Contributions by Tobias Nipkow and Markus Wenzel}
-
-\newcommand\subcaption[1]{\par {\centering\normalsize\sc#1\par}\bigskip
-  \hrule\bigskip}
-\newenvironment{constants}{\begin{center}\small\begin{tabular}{rrrr}}{\end{tabular}\end{center}}
-
-\let\ts=\thinspace
-
-\makeindex
-
-\underscoreoff
-
-\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}  %% {secnumdepth}{2}???
-
-\pagestyle{headings}
-\sloppy
-\binperiod     %%%treat . like a binary operator
-
-
-\isadroptag{theory}
-
-\railtermfont{\isabellestyle{tt}}
-\railnontermfont{\isabellestyle{literal}}
-\railnamefont{\isabellestyle{literal}}
-
-
-\begin{document}
-\maketitle 
-
-\begin{abstract}
-This manual describes Isabelle's formalizations of many-sorted first-order
-logic (\texttt{FOL}) and Zermelo-Fraenkel set theory (\texttt{ZF}).  See the
-\emph{Reference Manual} for general Isabelle commands, and \emph{Introduction
-  to Isabelle} for an overall tutorial.
-
-This manual is part of the earlier Isabelle documentation, 
-which is somewhat superseded by the Isabelle/HOL
-\emph{Tutorial}~\cite{isa-tutorial}. However, the present document is the
-only available documentation for Isabelle's versions of first-order logic
-and set theory. Much of it is concerned with 
-the primitives for conducting proofs 
-using the ML top level.  It has been rewritten to use the Isar proof
-language, but evidence of the old \ML{} orientation remains.
-\end{abstract}
-
-
-\subsubsection*{Acknowledgements} 
-Markus Wenzel made numerous improvements.
-    Philippe de Groote contributed to~ZF.  Philippe No\"el and
-    Martin Coen made many contributions to~ZF.  The research has 
-    been funded by the EPSRC (grants GR/G53279, GR/H40570, GR/K57381,
-    GR/K77051, GR/M75440) and by ESPRIT (projects 3245:
-    Logical Frameworks, and 6453: Types) and by the DFG Schwerpunktprogramm
-    \emph{Deduktion}.
-    
-\pagenumbering{roman} \tableofcontents \cleardoublepage
-\pagenumbering{arabic} 
-\setcounter{page}{1} 
-\input{syntax}
-\input{FOL}
-\input{ZF}
-
-\isabellestyle{literal}
-\input{ZF_Isar}
-\isabellestyle{tt}
-
-\bibliographystyle{plain}
-\bibliography{manual}
-\printindex
-\end{document}