even more standardized doc session names after #b266e7a86485
authorhaftmann
Tue, 08 Apr 2014 12:46:38 +0200
changeset 56451 856492b0f755
parent 56450 16d4213d4cbc
child 56452 0c98c9118407
even more standardized doc session names after #b266e7a86485
src/Doc/Isar-Ref/Base.thy
src/Doc/Isar-Ref/Document_Preparation.thy
src/Doc/Isar-Ref/First_Order_Logic.thy
src/Doc/Isar-Ref/Framework.thy
src/Doc/Isar-Ref/Generic.thy
src/Doc/Isar-Ref/HOL_Specific.thy
src/Doc/Isar-Ref/Inner_Syntax.thy
src/Doc/Isar-Ref/ML_Tactic.thy
src/Doc/Isar-Ref/Misc.thy
src/Doc/Isar-Ref/Outer_Syntax.thy
src/Doc/Isar-Ref/Preface.thy
src/Doc/Isar-Ref/Proof.thy
src/Doc/Isar-Ref/Quick_Reference.thy
src/Doc/Isar-Ref/Spec.thy
src/Doc/Isar-Ref/Symbols.thy
src/Doc/Isar-Ref/Synopsis.thy
src/Doc/Isar-Ref/document/build
src/Doc/Isar-Ref/document/isar-vm.pdf
src/Doc/Isar-Ref/document/isar-vm.svg
src/Doc/Isar-Ref/document/root.tex
src/Doc/Isar-Ref/document/showsymbols
src/Doc/Isar-Ref/document/style.sty
src/Doc/Isar_Ref/Base.thy
src/Doc/Isar_Ref/Document_Preparation.thy
src/Doc/Isar_Ref/First_Order_Logic.thy
src/Doc/Isar_Ref/Framework.thy
src/Doc/Isar_Ref/Generic.thy
src/Doc/Isar_Ref/HOL_Specific.thy
src/Doc/Isar_Ref/Inner_Syntax.thy
src/Doc/Isar_Ref/ML_Tactic.thy
src/Doc/Isar_Ref/Misc.thy
src/Doc/Isar_Ref/Outer_Syntax.thy
src/Doc/Isar_Ref/Preface.thy
src/Doc/Isar_Ref/Proof.thy
src/Doc/Isar_Ref/Quick_Reference.thy
src/Doc/Isar_Ref/Spec.thy
src/Doc/Isar_Ref/Symbols.thy
src/Doc/Isar_Ref/Synopsis.thy
src/Doc/Isar_Ref/document/build
src/Doc/Isar_Ref/document/isar-vm.pdf
src/Doc/Isar_Ref/document/isar-vm.svg
src/Doc/Isar_Ref/document/root.tex
src/Doc/Isar_Ref/document/showsymbols
src/Doc/Isar_Ref/document/style.sty
src/Doc/JEdit/document/build
src/Doc/Logics-ZF/FOL_examples.thy
src/Doc/Logics-ZF/IFOL_examples.thy
src/Doc/Logics-ZF/If.thy
src/Doc/Logics-ZF/ZF_Isar.thy
src/Doc/Logics-ZF/ZF_examples.thy
src/Doc/Logics-ZF/document/FOL.tex
src/Doc/Logics-ZF/document/ZF.tex
src/Doc/Logics-ZF/document/build
src/Doc/Logics-ZF/document/logics.sty
src/Doc/Logics-ZF/document/root.tex
src/Doc/Logics_ZF/FOL_examples.thy
src/Doc/Logics_ZF/IFOL_examples.thy
src/Doc/Logics_ZF/If.thy
src/Doc/Logics_ZF/ZF_Isar.thy
src/Doc/Logics_ZF/ZF_examples.thy
src/Doc/Logics_ZF/document/FOL.tex
src/Doc/Logics_ZF/document/ZF.tex
src/Doc/Logics_ZF/document/build
src/Doc/Logics_ZF/document/logics.sty
src/Doc/Logics_ZF/document/root.tex
src/Doc/Prog-Prove/Basics.thy
src/Doc/Prog-Prove/Bool_nat_list.thy
src/Doc/Prog-Prove/Isar.thy
src/Doc/Prog-Prove/LaTeXsugar.thy
src/Doc/Prog-Prove/Logic.thy
src/Doc/Prog-Prove/MyList.thy
src/Doc/Prog-Prove/Types_and_funs.thy
src/Doc/Prog-Prove/document/bang.pdf
src/Doc/Prog-Prove/document/build
src/Doc/Prog-Prove/document/intro-isabelle.tex
src/Doc/Prog-Prove/document/mathpartir.sty
src/Doc/Prog-Prove/document/prelude.tex
src/Doc/Prog-Prove/document/root.bib
src/Doc/Prog-Prove/document/root.tex
src/Doc/Prog-Prove/document/svmono.cls
src/Doc/Prog_Prove/Basics.thy
src/Doc/Prog_Prove/Bool_nat_list.thy
src/Doc/Prog_Prove/Isar.thy
src/Doc/Prog_Prove/LaTeXsugar.thy
src/Doc/Prog_Prove/Logic.thy
src/Doc/Prog_Prove/MyList.thy
src/Doc/Prog_Prove/Types_and_funs.thy
src/Doc/Prog_Prove/document/bang.pdf
src/Doc/Prog_Prove/document/build
src/Doc/Prog_Prove/document/intro-isabelle.tex
src/Doc/Prog_Prove/document/mathpartir.sty
src/Doc/Prog_Prove/document/prelude.tex
src/Doc/Prog_Prove/document/root.bib
src/Doc/Prog_Prove/document/root.tex
src/Doc/Prog_Prove/document/svmono.cls
src/Doc/ROOT
src/Doc/System/document/build
--- a/src/Doc/Isar-Ref/Base.thy	Mon Apr 07 16:37:57 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-theory Base
-imports Pure
-begin
-
-ML_file "../antiquote_setup.ML"
-
-end
--- a/src/Doc/Isar-Ref/Document_Preparation.thy	Mon Apr 07 16:37:57 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,589 +0,0 @@
-theory Document_Preparation
-imports Base Main
-begin
-
-chapter {* Document preparation \label{ch:document-prep} *}
-
-text {* Isabelle/Isar provides a simple document preparation system
-  based on {PDF-\LaTeX}, with support for hyperlinks and bookmarks
-  within that format.  This allows to produce papers, books, theses
-  etc.\ from Isabelle theory sources.
-
-  {\LaTeX} output is generated while processing a \emph{session} in
-  batch mode, as explained in the \emph{The Isabelle System Manual}
-  \cite{isabelle-sys}.  The main Isabelle tools to get started with
-  document preparation are @{tool_ref mkroot} and @{tool_ref build}.
-
-  The classic Isabelle/HOL tutorial \cite{isabelle-hol-book} also
-  explains some aspects of theory presentation.  *}
-
-
-section {* Markup commands \label{sec:markup} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "header"} & : & @{text "toplevel \<rightarrow> toplevel"} \\[0.5ex]
-    @{command_def "chapter"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "section"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "subsection"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "subsubsection"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "text"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "text_raw"} & : & @{text "local_theory \<rightarrow> local_theory"} \\[0.5ex]
-    @{command_def "sect"} & : & @{text "proof \<rightarrow> proof"} \\
-    @{command_def "subsect"} & : & @{text "proof \<rightarrow> proof"} \\
-    @{command_def "subsubsect"} & : & @{text "proof \<rightarrow> proof"} \\
-    @{command_def "txt"} & : & @{text "proof \<rightarrow> proof"} \\
-    @{command_def "txt_raw"} & : & @{text "proof \<rightarrow> proof"} \\
-  \end{matharray}
-
-  Markup commands provide a structured way to insert text into the
-  document generated from a theory.  Each markup command takes a
-  single @{syntax text} argument, which is passed as argument to a
-  corresponding {\LaTeX} macro.  The default macros provided by
-  @{file "~~/lib/texinputs/isabelle.sty"} can be redefined according
-  to the needs of the underlying document and {\LaTeX} styles.
-
-  Note that formal comments (\secref{sec:comments}) are similar to
-  markup commands, but have a different status within Isabelle/Isar
-  syntax.
-
-  @{rail \<open>
-    (@@{command chapter} | @@{command section} | @@{command subsection} |
-      @@{command subsubsection} | @@{command text}) @{syntax target}? @{syntax text}
-    ;
-    (@@{command header} | @@{command text_raw} | @@{command sect} | @@{command subsect} |
-      @@{command subsubsect} | @@{command txt} | @@{command txt_raw}) @{syntax text}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command header} provides plain text markup just preceding
-  the formal beginning of a theory.  The corresponding {\LaTeX} macro
-  is @{verbatim "\\isamarkupheader"}, which acts like @{command
-  section} by default.
-  
-  \item @{command chapter}, @{command section}, @{command subsection},
-  and @{command subsubsection} mark chapter and section headings
-  within the main theory body or local theory targets.  The
-  corresponding {\LaTeX} macros are @{verbatim "\\isamarkupchapter"},
-  @{verbatim "\\isamarkupsection"}, @{verbatim
-  "\\isamarkupsubsection"} etc.
-
-  \item @{command sect}, @{command subsect}, and @{command subsubsect}
-  mark section headings within proofs.  The corresponding {\LaTeX}
-  macros are @{verbatim "\\isamarkupsect"}, @{verbatim
-  "\\isamarkupsubsect"} etc.
-
-  \item @{command text} and @{command txt} specify paragraphs of plain
-  text.  This corresponds to a {\LaTeX} environment @{verbatim
-  "\\begin{isamarkuptext}"} @{text "\<dots>"} @{verbatim
-  "\\end{isamarkuptext}"} etc.
-
-  \item @{command text_raw} and @{command txt_raw} insert {\LaTeX}
-  source into the output, without additional markup.  Thus the full
-  range of document manipulations becomes available, at the risk of
-  messing up document output.
-
-  \end{description}
-
-  Except for @{command "text_raw"} and @{command "txt_raw"}, the text
-  passed to any of the above markup commands may refer to formal
-  entities via \emph{document antiquotations}, see also
-  \secref{sec:antiq}.  These are interpreted in the present theory or
-  proof context, or the named @{text "target"}.
-
-  \medskip The proof markup commands closely resemble those for theory
-  specifications, but have a different formal status and produce
-  different {\LaTeX} macros.  The default definitions coincide for
-  analogous commands such as @{command section} and @{command sect}.
-*}
-
-
-section {* Document Antiquotations \label{sec:antiq} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{antiquotation_def "theory"} & : & @{text antiquotation} \\
-    @{antiquotation_def "thm"} & : & @{text antiquotation} \\
-    @{antiquotation_def "lemma"} & : & @{text antiquotation} \\
-    @{antiquotation_def "prop"} & : & @{text antiquotation} \\
-    @{antiquotation_def "term"} & : & @{text antiquotation} \\
-    @{antiquotation_def term_type} & : & @{text antiquotation} \\
-    @{antiquotation_def typeof} & : & @{text antiquotation} \\
-    @{antiquotation_def const} & : & @{text antiquotation} \\
-    @{antiquotation_def abbrev} & : & @{text antiquotation} \\
-    @{antiquotation_def typ} & : & @{text antiquotation} \\
-    @{antiquotation_def type} & : & @{text antiquotation} \\
-    @{antiquotation_def class} & : & @{text antiquotation} \\
-    @{antiquotation_def "text"} & : & @{text antiquotation} \\
-    @{antiquotation_def goals} & : & @{text antiquotation} \\
-    @{antiquotation_def subgoals} & : & @{text antiquotation} \\
-    @{antiquotation_def prf} & : & @{text antiquotation} \\
-    @{antiquotation_def full_prf} & : & @{text antiquotation} \\
-    @{antiquotation_def ML} & : & @{text antiquotation} \\
-    @{antiquotation_def ML_op} & : & @{text antiquotation} \\
-    @{antiquotation_def ML_type} & : & @{text antiquotation} \\
-    @{antiquotation_def ML_structure} & : & @{text antiquotation} \\
-    @{antiquotation_def ML_functor} & : & @{text antiquotation} \\
-    @{antiquotation_def "file"} & : & @{text antiquotation} \\
-    @{antiquotation_def "url"} & : & @{text antiquotation} \\
-  \end{matharray}
-
-  The overall content of an Isabelle/Isar theory may alternate between
-  formal and informal text.  The main body consists of formal
-  specification and proof commands, interspersed with markup commands
-  (\secref{sec:markup}) or document comments (\secref{sec:comments}).
-  The argument of markup commands quotes informal text to be printed
-  in the resulting document, but may again refer to formal entities
-  via \emph{document antiquotations}.
-
-  For example, embedding of ``@{text [source=false] "@{term [show_types] \"f x = a + x\"}"}''
-  within a text block makes
-  \isa{{\isacharparenleft}f{\isasymColon}{\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a{\isacharparenright}\ {\isacharparenleft}x{\isasymColon}{\isacharprime}a{\isacharparenright}\ {\isacharequal}\ {\isacharparenleft}a{\isasymColon}{\isacharprime}a{\isacharparenright}\ {\isacharplus}\ x} appear in the final {\LaTeX} document.
-
-  Antiquotations usually spare the author tedious typing of logical
-  entities in full detail.  Even more importantly, some degree of
-  consistency-checking between the main body of formal text and its
-  informal explanation is achieved, since terms and types appearing in
-  antiquotations are checked within the current theory or proof
-  context.
-
-  %% FIXME less monolithic presentation, move to individual sections!?
-  @{rail \<open>
-    '@{' antiquotation '}'
-    ;
-    @{syntax_def antiquotation}:
-      @@{antiquotation theory} options @{syntax name} |
-      @@{antiquotation thm} options styles @{syntax thmrefs} |
-      @@{antiquotation lemma} options @{syntax prop} @'by' @{syntax method} @{syntax method}? |
-      @@{antiquotation prop} options styles @{syntax prop} |
-      @@{antiquotation term} options styles @{syntax term} |
-      @@{antiquotation (HOL) value} options styles @{syntax term} |
-      @@{antiquotation term_type} options styles @{syntax term} |
-      @@{antiquotation typeof} options styles @{syntax term} |
-      @@{antiquotation const} options @{syntax term} |
-      @@{antiquotation abbrev} options @{syntax term} |
-      @@{antiquotation typ} options @{syntax type} |
-      @@{antiquotation type} options @{syntax name} |
-      @@{antiquotation class} options @{syntax name} |
-      @@{antiquotation text} options @{syntax name}
-    ;
-    @{syntax antiquotation}:
-      @@{antiquotation goals} options |
-      @@{antiquotation subgoals} options |
-      @@{antiquotation prf} options @{syntax thmrefs} |
-      @@{antiquotation full_prf} options @{syntax thmrefs} |
-      @@{antiquotation ML} options @{syntax name} |
-      @@{antiquotation ML_op} options @{syntax name} |
-      @@{antiquotation ML_type} options @{syntax name} |
-      @@{antiquotation ML_structure} options @{syntax name} |
-      @@{antiquotation ML_functor} options @{syntax name} |
-      @@{antiquotation "file"} options @{syntax name} |
-      @@{antiquotation file_unchecked} options @{syntax name} |
-      @@{antiquotation url} options @{syntax name}
-    ;
-    options: '[' (option * ',') ']'
-    ;
-    option: @{syntax name} | @{syntax name} '=' @{syntax name}
-    ;
-    styles: '(' (style + ',') ')'
-    ;
-    style: (@{syntax name} +)
-  \<close>}
-
-  Note that the syntax of antiquotations may \emph{not} include source
-  comments @{verbatim "(*"}~@{text "\<dots>"}~@{verbatim "*)"} nor verbatim
-  text @{verbatim "{"}@{verbatim "*"}~@{text "\<dots>"}~@{verbatim
-  "*"}@{verbatim "}"}.
-
-  \begin{description}
-  
-  \item @{text "@{theory A}"} prints the name @{text "A"}, which is
-  guaranteed to refer to a valid ancestor theory in the current
-  context.
-
-  \item @{text "@{thm a\<^sub>1 \<dots> a\<^sub>n}"} prints theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}.
-  Full fact expressions are allowed here, including attributes
-  (\secref{sec:syn-att}).
-
-  \item @{text "@{prop \<phi>}"} prints a well-typed proposition @{text
-  "\<phi>"}.
-
-  \item @{text "@{lemma \<phi> by m}"} proves a well-typed proposition
-  @{text "\<phi>"} by method @{text m} and prints the original @{text "\<phi>"}.
-
-  \item @{text "@{term t}"} prints a well-typed term @{text "t"}.
-  
-  \item @{text "@{value t}"} evaluates a term @{text "t"} and prints
-  its result, see also @{command_ref (HOL) value}.
-
-  \item @{text "@{term_type t}"} prints a well-typed term @{text "t"}
-  annotated with its type.
-
-  \item @{text "@{typeof t}"} prints the type of a well-typed term
-  @{text "t"}.
-
-  \item @{text "@{const c}"} prints a logical or syntactic constant
-  @{text "c"}.
-  
-  \item @{text "@{abbrev c x\<^sub>1 \<dots> x\<^sub>n}"} prints a constant abbreviation
-  @{text "c x\<^sub>1 \<dots> x\<^sub>n \<equiv> rhs"} as defined in the current context.
-
-  \item @{text "@{typ \<tau>}"} prints a well-formed type @{text "\<tau>"}.
-
-  \item @{text "@{type \<kappa>}"} prints a (logical or syntactic) type
-    constructor @{text "\<kappa>"}.
-
-  \item @{text "@{class c}"} prints a class @{text c}.
-
-  \item @{text "@{text s}"} prints uninterpreted source text @{text
-  s}.  This is particularly useful to print portions of text according
-  to the Isabelle document style, without demanding well-formedness,
-  e.g.\ small pieces of terms that should not be parsed or
-  type-checked yet.
-
-  \item @{text "@{goals}"} prints the current \emph{dynamic} goal
-  state.  This is mainly for support of tactic-emulation scripts
-  within Isar.  Presentation of goal states does not conform to the
-  idea of human-readable proof documents!
-
-  When explaining proofs in detail it is usually better to spell out
-  the reasoning via proper Isar proof commands, instead of peeking at
-  the internal machine configuration.
-  
-  \item @{text "@{subgoals}"} is similar to @{text "@{goals}"}, but
-  does not print the main goal.
-  
-  \item @{text "@{prf a\<^sub>1 \<dots> a\<^sub>n}"} prints the (compact) proof terms
-  corresponding to the theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}. Note that this
-  requires proof terms to be switched on for the current logic
-  session.
-  
-  \item @{text "@{full_prf a\<^sub>1 \<dots> a\<^sub>n}"} is like @{text "@{prf a\<^sub>1 \<dots>
-  a\<^sub>n}"}, but prints the full proof terms, i.e.\ also displays
-  information omitted in the compact proof term, which is denoted by
-  ``@{text _}'' placeholders there.
-  
-  \item @{text "@{ML s}"}, @{text "@{ML_op s}"}, @{text "@{ML_type
-  s}"}, @{text "@{ML_structure s}"}, and @{text "@{ML_functor s}"}
-  check text @{text s} as ML value, infix operator, type, structure,
-  and functor respectively.  The source is printed verbatim.
-
-  \item @{text "@{file path}"} checks that @{text "path"} refers to a
-  file (or directory) and prints it verbatim.
-
-  \item @{text "@{file_unchecked path}"} is like @{text "@{file
-  path}"}, but does not check the existence of the @{text "path"}
-  within the file-system.
-
-  \item @{text "@{url name}"} produces markup for the given URL, which
-  results in an active hyperlink within the text.
-
-  \end{description}
-*}
-
-
-subsection {* Styled antiquotations *}
-
-text {* The antiquotations @{text thm}, @{text prop} and @{text
-  term} admit an extra \emph{style} specification to modify the
-  printed result.  A style is specified by a name with a possibly
-  empty number of arguments;  multiple styles can be sequenced with
-  commas.  The following standard styles are available:
-
-  \begin{description}
-  
-  \item @{text lhs} extracts the first argument of any application
-  form with at least two arguments --- typically meta-level or
-  object-level equality, or any other binary relation.
-  
-  \item @{text rhs} is like @{text lhs}, but extracts the second
-  argument.
-  
-  \item @{text "concl"} extracts the conclusion @{text C} from a rule
-  in Horn-clause normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}.
-  
-  \item @{text "prem"} @{text n} extract premise number
-  @{text "n"} from from a rule in Horn-clause
-  normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}
-
-  \end{description}
-*}
-
-
-subsection {* General options *}
-
-text {* The following options are available to tune the printed output
-  of antiquotations.  Note that many of these coincide with system and
-  configuration options of the same names.
-
-  \begin{description}
-
-  \item @{antiquotation_option_def show_types}~@{text "= bool"} and
-  @{antiquotation_option_def show_sorts}~@{text "= bool"} control
-  printing of explicit type and sort constraints.
-
-  \item @{antiquotation_option_def show_structs}~@{text "= bool"}
-  controls printing of implicit structures.
-
-  \item @{antiquotation_option_def show_abbrevs}~@{text "= bool"}
-  controls folding of abbreviations.
-
-  \item @{antiquotation_option_def names_long}~@{text "= bool"} forces
-  names of types and constants etc.\ to be printed in their fully
-  qualified internal form.
-
-  \item @{antiquotation_option_def names_short}~@{text "= bool"}
-  forces names of types and constants etc.\ to be printed unqualified.
-  Note that internalizing the output again in the current context may
-  well yield a different result.
-
-  \item @{antiquotation_option_def names_unique}~@{text "= bool"}
-  determines whether the printed version of qualified names should be
-  made sufficiently long to avoid overlap with names declared further
-  back.  Set to @{text false} for more concise output.
-
-  \item @{antiquotation_option_def eta_contract}~@{text "= bool"}
-  prints terms in @{text \<eta>}-contracted form.
-
-  \item @{antiquotation_option_def display}~@{text "= bool"} indicates
-  if the text is to be output as multi-line ``display material'',
-  rather than a small piece of text without line breaks (which is the
-  default).
-
-  In this mode the embedded entities are printed in the same style as
-  the main theory text.
-
-  \item @{antiquotation_option_def break}~@{text "= bool"} controls
-  line breaks in non-display material.
-
-  \item @{antiquotation_option_def quotes}~@{text "= bool"} indicates
-  if the output should be enclosed in double quotes.
-
-  \item @{antiquotation_option_def mode}~@{text "= name"} adds @{text
-  name} to the print mode to be used for presentation.  Note that the
-  standard setup for {\LaTeX} output is already present by default,
-  including the modes @{text latex} and @{text xsymbols}.
-
-  \item @{antiquotation_option_def margin}~@{text "= nat"} and
-  @{antiquotation_option_def indent}~@{text "= nat"} change the margin
-  or indentation for pretty printing of display material.
-
-  \item @{antiquotation_option_def goals_limit}~@{text "= nat"}
-  determines the maximum number of subgoals to be printed (for goal-based
-  antiquotation).
-
-  \item @{antiquotation_option_def source}~@{text "= bool"} prints the
-  original source text of the antiquotation arguments, rather than its
-  internal representation.  Note that formal checking of
-  @{antiquotation "thm"}, @{antiquotation "term"}, etc. is still
-  enabled; use the @{antiquotation "text"} antiquotation for unchecked
-  output.
-
-  Regular @{text "term"} and @{text "typ"} antiquotations with @{text
-  "source = false"} involve a full round-trip from the original source
-  to an internalized logical entity back to a source form, according
-  to the syntax of the current context.  Thus the printed output is
-  not under direct control of the author, it may even fluctuate a bit
-  as the underlying theory is changed later on.
-
-  In contrast, @{antiquotation_option source}~@{text "= true"}
-  admits direct printing of the given source text, with the desirable
-  well-formedness check in the background, but without modification of
-  the printed text.
-
-  \end{description}
-
-  For boolean flags, ``@{text "name = true"}'' may be abbreviated as
-  ``@{text name}''.  All of the above flags are disabled by default,
-  unless changed specifically for a logic session in the corresponding
-  @{verbatim "ROOT"} file.  *}
-
-
-section {* Markup via command tags \label{sec:tags} *}
-
-text {* Each Isabelle/Isar command may be decorated by additional
-  presentation tags, to indicate some modification in the way it is
-  printed in the document.
-
-  @{rail \<open>
-    @{syntax_def tags}: ( tag * )
-    ;
-    tag: '%' (@{syntax ident} | @{syntax string})
-  \<close>}
-
-  Some tags are pre-declared for certain classes of commands, serving
-  as default markup if no tags are given in the text:
-
-  \medskip
-  \begin{tabular}{ll}
-    @{text "theory"} & theory begin/end \\
-    @{text "proof"} & all proof commands \\
-    @{text "ML"} & all commands involving ML code \\
-  \end{tabular}
-
-  \medskip The Isabelle document preparation system
-  \cite{isabelle-sys} allows tagged command regions to be presented
-  specifically, e.g.\ to fold proof texts, or drop parts of the text
-  completely.
-
-  For example ``@{command "by"}~@{text "%invisible auto"}'' causes
-  that piece of proof to be treated as @{text invisible} instead of
-  @{text "proof"} (the default), which may be shown or hidden
-  depending on the document setup.  In contrast, ``@{command
-  "by"}~@{text "%visible auto"}'' forces this text to be shown
-  invariably.
-
-  Explicit tag specifications within a proof apply to all subsequent
-  commands of the same level of nesting.  For example, ``@{command
-  "proof"}~@{text "%visible \<dots>"}~@{command "qed"}'' forces the whole
-  sub-proof to be typeset as @{text visible} (unless some of its parts
-  are tagged differently).
-
-  \medskip Command tags merely produce certain markup environments for
-  type-setting.  The meaning of these is determined by {\LaTeX}
-  macros, as defined in @{file "~~/lib/texinputs/isabelle.sty"} or
-  by the document author.  The Isabelle document preparation tools
-  also provide some high-level options to specify the meaning of
-  arbitrary tags to ``keep'', ``drop'', or ``fold'' the corresponding
-  parts of the text.  Logic sessions may also specify ``document
-  versions'', where given tags are interpreted in some particular way.
-  Again see \cite{isabelle-sys} for further details.
-*}
-
-
-section {* Railroad diagrams *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{antiquotation_def "rail"} & : & @{text antiquotation} \\
-  \end{matharray}
-
-  @{rail \<open>
-    'rail' (@{syntax string} | @{syntax cartouche})
-  \<close>}
-
-  The @{antiquotation rail} antiquotation allows to include syntax
-  diagrams into Isabelle documents.  {\LaTeX} requires the style file
-  @{file "~~/lib/texinputs/pdfsetup.sty"}, which can be used via
-  @{verbatim "\\usepackage{pdfsetup}"} in @{verbatim "root.tex"}, for
-  example.
-
-  The rail specification language is quoted here as Isabelle @{syntax
-  string} or text @{syntax "cartouche"}; it has its own grammar given
-  below.
-
-  \begingroup
-  \def\isasymnewline{\isatext{\tt\isacharbackslash<newline>}}
-  @{rail \<open>
-  rule? + ';'
-  ;
-  rule: ((identifier | @{syntax antiquotation}) ':')? body
-  ;
-  body: concatenation + '|'
-  ;
-  concatenation: ((atom '?'?) +) (('*' | '+') atom?)?
-  ;
-  atom: '(' body? ')' | identifier |
-    '@'? (string | @{syntax antiquotation}) |
-    '\<newline>'
-  \<close>}
-  \endgroup
-
-  The lexical syntax of @{text "identifier"} coincides with that of
-  @{syntax ident} in regular Isabelle syntax, but @{text string} uses
-  single quotes instead of double quotes of the standard @{syntax
-  string} category.
-
-  Each @{text rule} defines a formal language (with optional name),
-  using a notation that is similar to EBNF or regular expressions with
-  recursion.  The meaning and visual appearance of these rail language
-  elements is illustrated by the following representative examples.
-
-  \begin{itemize}
-
-  \item Empty @{verbatim "()"}
-
-  @{rail \<open>()\<close>}
-
-  \item Nonterminal @{verbatim "A"}
-
-  @{rail \<open>A\<close>}
-
-  \item Nonterminal via Isabelle antiquotation
-  @{verbatim "@{syntax method}"}
-
-  @{rail \<open>@{syntax method}\<close>}
-
-  \item Terminal @{verbatim "'xyz'"}
-
-  @{rail \<open>'xyz'\<close>}
-
-  \item Terminal in keyword style @{verbatim "@'xyz'"}
-
-  @{rail \<open>@'xyz'\<close>}
-
-  \item Terminal via Isabelle antiquotation
-  @{verbatim "@@{method rule}"}
-
-  @{rail \<open>@@{method rule}\<close>}
-
-  \item Concatenation @{verbatim "A B C"}
-
-  @{rail \<open>A B C\<close>}
-
-  \item Newline inside concatenation
-  @{verbatim "A B C \<newline> D E F"}
-
-  @{rail \<open>A B C \<newline> D E F\<close>}
-
-  \item Variants @{verbatim "A | B | C"}
-
-  @{rail \<open>A | B | C\<close>}
-
-  \item Option @{verbatim "A ?"}
-
-  @{rail \<open>A ?\<close>}
-
-  \item Repetition @{verbatim "A *"}
-
-  @{rail \<open>A *\<close>}
-
-  \item Repetition with separator @{verbatim "A * sep"}
-
-  @{rail \<open>A * sep\<close>}
-
-  \item Strict repetition @{verbatim "A +"}
-
-  @{rail \<open>A +\<close>}
-
-  \item Strict repetition with separator @{verbatim "A + sep"}
-
-  @{rail \<open>A + sep\<close>}
-
-  \end{itemize}
-*}
-
-
-section {* Draft presentation *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "display_drafts"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command display_drafts} (@{syntax name} +)
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "display_drafts"}~@{text paths} performs simple output of a
-  given list of raw source files. Only those symbols that do not require
-  additional {\LaTeX} packages are displayed properly, everything else is left
-  verbatim.
-
-  \end{description}
-*}
-
-end
--- a/src/Doc/Isar-Ref/First_Order_Logic.thy	Mon Apr 07 16:37:57 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,520 +0,0 @@
-
-header {* Example: First-Order Logic *}
-
-theory %visible First_Order_Logic
-imports Base  (* FIXME Pure!? *)
-begin
-
-text {*
-  \noindent In order to commence a new object-logic within
-  Isabelle/Pure we introduce abstract syntactic categories @{text "i"}
-  for individuals and @{text "o"} for object-propositions.  The latter
-  is embedded into the language of Pure propositions by means of a
-  separate judgment.
-*}
-
-typedecl i
-typedecl o
-
-judgment
-  Trueprop :: "o \<Rightarrow> prop"    ("_" 5)
-
-text {*
-  \noindent Note that the object-logic judgement is implicit in the
-  syntax: writing @{prop A} produces @{term "Trueprop A"} internally.
-  From the Pure perspective this means ``@{prop A} is derivable in the
-  object-logic''.
-*}
-
-
-subsection {* Equational reasoning \label{sec:framework-ex-equal} *}
-
-text {*
-  Equality is axiomatized as a binary predicate on individuals, with
-  reflexivity as introduction, and substitution as elimination
-  principle.  Note that the latter is particularly convenient in a
-  framework like Isabelle, because syntactic congruences are
-  implicitly produced by unification of @{term "B x"} against
-  expressions containing occurrences of @{term x}.
-*}
-
-axiomatization
-  equal :: "i \<Rightarrow> i \<Rightarrow> o"  (infix "=" 50)
-where
-  refl [intro]: "x = x" and
-  subst [elim]: "x = y \<Longrightarrow> B x \<Longrightarrow> B y"
-
-text {*
-  \noindent Substitution is very powerful, but also hard to control in
-  full generality.  We derive some common symmetry~/ transitivity
-  schemes of @{term equal} as particular consequences.
-*}
-
-theorem sym [sym]:
-  assumes "x = y"
-  shows "y = x"
-proof -
-  have "x = x" ..
-  with `x = y` show "y = x" ..
-qed
-
-theorem forw_subst [trans]:
-  assumes "y = x" and "B x"
-  shows "B y"
-proof -
-  from `y = x` have "x = y" ..
-  from this and `B x` show "B y" ..
-qed
-
-theorem back_subst [trans]:
-  assumes "B x" and "x = y"
-  shows "B y"
-proof -
-  from `x = y` and `B x`
-  show "B y" ..
-qed
-
-theorem trans [trans]:
-  assumes "x = y" and "y = z"
-  shows "x = z"
-proof -
-  from `y = z` and `x = y`
-  show "x = z" ..
-qed
-
-
-subsection {* Basic group theory *}
-
-text {*
-  As an example for equational reasoning we consider some bits of
-  group theory.  The subsequent locale definition postulates group
-  operations and axioms; we also derive some consequences of this
-  specification.
-*}
-
-locale group =
-  fixes prod :: "i \<Rightarrow> i \<Rightarrow> i"  (infix "\<circ>" 70)
-    and inv :: "i \<Rightarrow> i"  ("(_\<inverse>)" [1000] 999)
-    and unit :: i  ("1")
-  assumes assoc: "(x \<circ> y) \<circ> z = x \<circ> (y \<circ> z)"
-    and left_unit:  "1 \<circ> x = x"
-    and left_inv: "x\<inverse> \<circ> x = 1"
-begin
-
-theorem right_inv: "x \<circ> x\<inverse> = 1"
-proof -
-  have "x \<circ> x\<inverse> = 1 \<circ> (x \<circ> x\<inverse>)" by (rule left_unit [symmetric])
-  also have "\<dots> = (1 \<circ> x) \<circ> x\<inverse>" by (rule assoc [symmetric])
-  also have "1 = (x\<inverse>)\<inverse> \<circ> x\<inverse>" by (rule left_inv [symmetric])
-  also have "\<dots> \<circ> x = (x\<inverse>)\<inverse> \<circ> (x\<inverse> \<circ> x)" by (rule assoc)
-  also have "x\<inverse> \<circ> x = 1" by (rule left_inv)
-  also have "((x\<inverse>)\<inverse> \<circ> \<dots>) \<circ> x\<inverse> = (x\<inverse>)\<inverse> \<circ> (1 \<circ> x\<inverse>)" by (rule assoc)
-  also have "1 \<circ> x\<inverse> = x\<inverse>" by (rule left_unit)
-  also have "(x\<inverse>)\<inverse> \<circ> \<dots> = 1" by (rule left_inv)
-  finally show "x \<circ> x\<inverse> = 1" .
-qed
-
-theorem right_unit: "x \<circ> 1 = x"
-proof -
-  have "1 = x\<inverse> \<circ> x" by (rule left_inv [symmetric])
-  also have "x \<circ> \<dots> = (x \<circ> x\<inverse>) \<circ> x" by (rule assoc [symmetric])
-  also have "x \<circ> x\<inverse> = 1" by (rule right_inv)
-  also have "\<dots> \<circ> x = x" by (rule left_unit)
-  finally show "x \<circ> 1 = x" .
-qed
-
-text {*
-  \noindent Reasoning from basic axioms is often tedious.  Our proofs
-  work by producing various instances of the given rules (potentially
-  the symmetric form) using the pattern ``@{command have}~@{text
-  eq}~@{command "by"}~@{text "(rule r)"}'' and composing the chain of
-  results via @{command also}/@{command finally}.  These steps may
-  involve any of the transitivity rules declared in
-  \secref{sec:framework-ex-equal}, namely @{thm trans} in combining
-  the first two results in @{thm right_inv} and in the final steps of
-  both proofs, @{thm forw_subst} in the first combination of @{thm
-  right_unit}, and @{thm back_subst} in all other calculational steps.
-
-  Occasional substitutions in calculations are adequate, but should
-  not be over-emphasized.  The other extreme is to compose a chain by
-  plain transitivity only, with replacements occurring always in
-  topmost position. For example:
-*}
-
-(*<*)
-theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
-proof -
-  assume [symmetric, defn]: "\<And>x y. (x \<equiv> y) \<equiv> Trueprop (x = y)"
-(*>*)
-  have "x \<circ> 1 = x \<circ> (x\<inverse> \<circ> x)" unfolding left_inv ..
-  also have "\<dots> = (x \<circ> x\<inverse>) \<circ> x" unfolding assoc ..
-  also have "\<dots> = 1 \<circ> x" unfolding right_inv ..
-  also have "\<dots> = x" unfolding left_unit ..
-  finally have "x \<circ> 1 = x" .
-(*<*)
-qed
-(*>*)
-
-text {*
-  \noindent Here we have re-used the built-in mechanism for unfolding
-  definitions in order to normalize each equational problem.  A more
-  realistic object-logic would include proper setup for the Simplifier
-  (\secref{sec:simplifier}), the main automated tool for equational
-  reasoning in Isabelle.  Then ``@{command unfolding}~@{thm
-  left_inv}~@{command ".."}'' would become ``@{command "by"}~@{text
-  "(simp only: left_inv)"}'' etc.
-*}
-
-end
-
-
-subsection {* Propositional logic \label{sec:framework-ex-prop} *}
-
-text {*
-  We axiomatize basic connectives of propositional logic: implication,
-  disjunction, and conjunction.  The associated rules are modeled
-  after Gentzen's system of Natural Deduction \cite{Gentzen:1935}.
-*}
-
-axiomatization
-  imp :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<longrightarrow>" 25) where
-  impI [intro]: "(A \<Longrightarrow> B) \<Longrightarrow> A \<longrightarrow> B" and
-  impD [dest]: "(A \<longrightarrow> B) \<Longrightarrow> A \<Longrightarrow> B"
-
-axiomatization
-  disj :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<or>" 30) where
-  disjI\<^sub>1 [intro]: "A \<Longrightarrow> A \<or> B" and
-  disjI\<^sub>2 [intro]: "B \<Longrightarrow> A \<or> B" and
-  disjE [elim]: "A \<or> B \<Longrightarrow> (A \<Longrightarrow> C) \<Longrightarrow> (B \<Longrightarrow> C) \<Longrightarrow> C"
-
-axiomatization
-  conj :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<and>" 35) where
-  conjI [intro]: "A \<Longrightarrow> B \<Longrightarrow> A \<and> B" and
-  conjD\<^sub>1: "A \<and> B \<Longrightarrow> A" and
-  conjD\<^sub>2: "A \<and> B \<Longrightarrow> B"
-
-text {*
-  \noindent The conjunctive destructions have the disadvantage that
-  decomposing @{prop "A \<and> B"} involves an immediate decision which
-  component should be projected.  The more convenient simultaneous
-  elimination @{prop "A \<and> B \<Longrightarrow> (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C"} can be derived as
-  follows:
-*}
-
-theorem conjE [elim]:
-  assumes "A \<and> B"
-  obtains A and B
-proof
-  from `A \<and> B` show A by (rule conjD\<^sub>1)
-  from `A \<and> B` show B by (rule conjD\<^sub>2)
-qed
-
-text {*
-  \noindent Here is an example of swapping conjuncts with a single
-  intermediate elimination step:
-*}
-
-(*<*)
-lemma "\<And>A. PROP A \<Longrightarrow> PROP A"
-proof -
-(*>*)
-  assume "A \<and> B"
-  then obtain B and A ..
-  then have "B \<and> A" ..
-(*<*)
-qed
-(*>*)
-
-text {*
-  \noindent Note that the analogous elimination rule for disjunction
-  ``@{text "\<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"}'' coincides with
-  the original axiomatization of @{thm disjE}.
-
-  \medskip We continue propositional logic by introducing absurdity
-  with its characteristic elimination.  Plain truth may then be
-  defined as a proposition that is trivially true.
-*}
-
-axiomatization
-  false :: o  ("\<bottom>") where
-  falseE [elim]: "\<bottom> \<Longrightarrow> A"
-
-definition
-  true :: o  ("\<top>") where
-  "\<top> \<equiv> \<bottom> \<longrightarrow> \<bottom>"
-
-theorem trueI [intro]: \<top>
-  unfolding true_def ..
-
-text {*
-  \medskip\noindent Now negation represents an implication towards
-  absurdity:
-*}
-
-definition
-  not :: "o \<Rightarrow> o"  ("\<not> _" [40] 40) where
-  "\<not> A \<equiv> A \<longrightarrow> \<bottom>"
-
-theorem notI [intro]:
-  assumes "A \<Longrightarrow> \<bottom>"
-  shows "\<not> A"
-unfolding not_def
-proof
-  assume A
-  then show \<bottom> by (rule `A \<Longrightarrow> \<bottom>`)
-qed
-
-theorem notE [elim]:
-  assumes "\<not> A" and A
-  shows B
-proof -
-  from `\<not> A` have "A \<longrightarrow> \<bottom>" unfolding not_def .
-  from `A \<longrightarrow> \<bottom>` and `A` have \<bottom> ..
-  then show B ..
-qed
-
-
-subsection {* Classical logic *}
-
-text {*
-  Subsequently we state the principle of classical contradiction as a
-  local assumption.  Thus we refrain from forcing the object-logic
-  into the classical perspective.  Within that context, we may derive
-  well-known consequences of the classical principle.
-*}
-
-locale classical =
-  assumes classical: "(\<not> C \<Longrightarrow> C) \<Longrightarrow> C"
-begin
-
-theorem double_negation:
-  assumes "\<not> \<not> C"
-  shows C
-proof (rule classical)
-  assume "\<not> C"
-  with `\<not> \<not> C` show C ..
-qed
-
-theorem tertium_non_datur: "C \<or> \<not> C"
-proof (rule double_negation)
-  show "\<not> \<not> (C \<or> \<not> C)"
-  proof
-    assume "\<not> (C \<or> \<not> C)"
-    have "\<not> C"
-    proof
-      assume C then have "C \<or> \<not> C" ..
-      with `\<not> (C \<or> \<not> C)` show \<bottom> ..
-    qed
-    then have "C \<or> \<not> C" ..
-    with `\<not> (C \<or> \<not> C)` show \<bottom> ..
-  qed
-qed
-
-text {*
-  \noindent These examples illustrate both classical reasoning and
-  non-trivial propositional proofs in general.  All three rules
-  characterize classical logic independently, but the original rule is
-  already the most convenient to use, because it leaves the conclusion
-  unchanged.  Note that @{prop "(\<not> C \<Longrightarrow> C) \<Longrightarrow> C"} fits again into our
-  format for eliminations, despite the additional twist that the
-  context refers to the main conclusion.  So we may write @{thm
-  classical} as the Isar statement ``@{text "\<OBTAINS> \<not> thesis"}''.
-  This also explains nicely how classical reasoning really works:
-  whatever the main @{text thesis} might be, we may always assume its
-  negation!
-*}
-
-end
-
-
-subsection {* Quantifiers \label{sec:framework-ex-quant} *}
-
-text {*
-  Representing quantifiers is easy, thanks to the higher-order nature
-  of the underlying framework.  According to the well-known technique
-  introduced by Church \cite{church40}, quantifiers are operators on
-  predicates, which are syntactically represented as @{text "\<lambda>"}-terms
-  of type @{typ "i \<Rightarrow> o"}.  Binder notation turns @{text "All (\<lambda>x. B
-  x)"} into @{text "\<forall>x. B x"} etc.
-*}
-
-axiomatization
-  All :: "(i \<Rightarrow> o) \<Rightarrow> o"  (binder "\<forall>" 10) where
-  allI [intro]: "(\<And>x. B x) \<Longrightarrow> \<forall>x. B x" and
-  allD [dest]: "(\<forall>x. B x) \<Longrightarrow> B a"
-
-axiomatization
-  Ex :: "(i \<Rightarrow> o) \<Rightarrow> o"  (binder "\<exists>" 10) where
-  exI [intro]: "B a \<Longrightarrow> (\<exists>x. B x)" and
-  exE [elim]: "(\<exists>x. B x) \<Longrightarrow> (\<And>x. B x \<Longrightarrow> C) \<Longrightarrow> C"
-
-text {*
-  \noindent The statement of @{thm exE} corresponds to ``@{text
-  "\<ASSUMES> \<exists>x. B x \<OBTAINS> x \<WHERE> B x"}'' in Isar.  In the
-  subsequent example we illustrate quantifier reasoning involving all
-  four rules:
-*}
-
-theorem
-  assumes "\<exists>x. \<forall>y. R x y"
-  shows "\<forall>y. \<exists>x. R x y"
-proof    -- {* @{text "\<forall>"} introduction *}
-  obtain x where "\<forall>y. R x y" using `\<exists>x. \<forall>y. R x y` ..    -- {* @{text "\<exists>"} elimination *}
-  fix y have "R x y" using `\<forall>y. R x y` ..    -- {* @{text "\<forall>"} destruction *}
-  then show "\<exists>x. R x y" ..    -- {* @{text "\<exists>"} introduction *}
-qed
-
-
-subsection {* Canonical reasoning patterns *}
-
-text {*
-  The main rules of first-order predicate logic from
-  \secref{sec:framework-ex-prop} and \secref{sec:framework-ex-quant}
-  can now be summarized as follows, using the native Isar statement
-  format of \secref{sec:framework-stmt}.
-
-  \medskip
-  \begin{tabular}{l}
-  @{text "impI: \<ASSUMES> A \<Longrightarrow> B \<SHOWS> A \<longrightarrow> B"} \\
-  @{text "impD: \<ASSUMES> A \<longrightarrow> B \<AND> A \<SHOWS> B"} \\[1ex]
-
-  @{text "disjI\<^sub>1: \<ASSUMES> A \<SHOWS> A \<or> B"} \\
-  @{text "disjI\<^sub>2: \<ASSUMES> B \<SHOWS> A \<or> B"} \\
-  @{text "disjE: \<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"} \\[1ex]
-
-  @{text "conjI: \<ASSUMES> A \<AND> B \<SHOWS> A \<and> B"} \\
-  @{text "conjE: \<ASSUMES> A \<and> B \<OBTAINS> A \<AND> B"} \\[1ex]
-
-  @{text "falseE: \<ASSUMES> \<bottom> \<SHOWS> A"} \\
-  @{text "trueI: \<SHOWS> \<top>"} \\[1ex]
-
-  @{text "notI: \<ASSUMES> A \<Longrightarrow> \<bottom> \<SHOWS> \<not> A"} \\
-  @{text "notE: \<ASSUMES> \<not> A \<AND> A \<SHOWS> B"} \\[1ex]
-
-  @{text "allI: \<ASSUMES> \<And>x. B x \<SHOWS> \<forall>x. B x"} \\
-  @{text "allE: \<ASSUMES> \<forall>x. B x \<SHOWS> B a"} \\[1ex]
-
-  @{text "exI: \<ASSUMES> B a \<SHOWS> \<exists>x. B x"} \\
-  @{text "exE: \<ASSUMES> \<exists>x. B x \<OBTAINS> a \<WHERE> B a"}
-  \end{tabular}
-  \medskip
-
-  \noindent This essentially provides a declarative reading of Pure
-  rules as Isar reasoning patterns: the rule statements tells how a
-  canonical proof outline shall look like.  Since the above rules have
-  already been declared as @{attribute (Pure) intro}, @{attribute
-  (Pure) elim}, @{attribute (Pure) dest} --- each according to its
-  particular shape --- we can immediately write Isar proof texts as
-  follows:
-*}
-
-(*<*)
-theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
-proof -
-(*>*)
-
-  txt_raw {*\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "A \<longrightarrow> B"
-  proof
-    assume A
-    show B sorry %noproof
-  qed
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "A \<longrightarrow> B" and A sorry %noproof
-  then have B ..
-
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have A sorry %noproof
-  then have "A \<or> B" ..
-
-  have B sorry %noproof
-  then have "A \<or> B" ..
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "A \<or> B" sorry %noproof
-  then have C
-  proof
-    assume A
-    then show C sorry %noproof
-  next
-    assume B
-    then show C sorry %noproof
-  qed
-
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have A and B sorry %noproof
-  then have "A \<and> B" ..
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "A \<and> B" sorry %noproof
-  then obtain A and B ..
-
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<bottom>" sorry %noproof
-  then have A ..
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<top>" ..
-
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<not> A"
-  proof
-    assume A
-    then show "\<bottom>" sorry %noproof
-  qed
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<not> A" and A sorry %noproof
-  then have B ..
-
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<forall>x. B x"
-  proof
-    fix x
-    show "B x" sorry %noproof
-  qed
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<forall>x. B x" sorry %noproof
-  then have "B a" ..
-
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<exists>x. B x"
-  proof
-    show "B a" sorry %noproof
-  qed
-
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
-
-  have "\<exists>x. B x" sorry %noproof
-  then obtain a where "B a" ..
-
-  txt_raw {*\end{minipage}*}
-
-(*<*)
-qed
-(*>*)
-
-text {*
-  \bigskip\noindent Of course, these proofs are merely examples.  As
-  sketched in \secref{sec:framework-subproof}, there is a fair amount
-  of flexibility in expressing Pure deductions in Isar.  Here the user
-  is asked to express himself adequately, aiming at proof texts of
-  literary quality.
-*}
-
-end %visible
--- a/src/Doc/Isar-Ref/Framework.thy	Mon Apr 07 16:37:57 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1016 +0,0 @@
-theory Framework
-imports Base Main
-begin
-
-chapter {* The Isabelle/Isar Framework \label{ch:isar-framework} *}
-
-text {*
-  Isabelle/Isar
-  \cite{Wenzel:1999:TPHOL,Wenzel-PhD,Nipkow-TYPES02,Wenzel-Paulson:2006,Wenzel:2006:Festschrift}
-  is intended as a generic framework for developing formal
-  mathematical documents with full proof checking.  Definitions and
-  proofs are organized as theories.  An assembly of theory sources may
-  be presented as a printed document; see also
-  \chref{ch:document-prep}.
-
-  The main objective of Isar is the design of a human-readable
-  structured proof language, which is called the ``primary proof
-  format'' in Isar terminology.  Such a primary proof language is
-  somewhere in the middle between the extremes of primitive proof
-  objects and actual natural language.  In this respect, Isar is a bit
-  more formalistic than Mizar
-  \cite{Trybulec:1993:MizarFeatures,Rudnicki:1992:MizarOverview,Wiedijk:1999:Mizar},
-  using logical symbols for certain reasoning schemes where Mizar
-  would prefer English words; see \cite{Wenzel-Wiedijk:2002} for
-  further comparisons of these systems.
-
-  So Isar challenges the traditional way of recording informal proofs
-  in mathematical prose, as well as the common tendency to see fully
-  formal proofs directly as objects of some logical calculus (e.g.\
-  @{text "\<lambda>"}-terms in a version of type theory).  In fact, Isar is
-  better understood as an interpreter of a simple block-structured
-  language for describing the data flow of local facts and goals,
-  interspersed with occasional invocations of proof methods.
-  Everything is reduced to logical inferences internally, but these
-  steps are somewhat marginal compared to the overall bookkeeping of
-  the interpretation process.  Thanks to careful design of the syntax
-  and semantics of Isar language elements, a formal record of Isar
-  instructions may later appear as an intelligible text to the
-  attentive reader.
-
-  The Isar proof language has emerged from careful analysis of some
-  inherent virtues of the existing logical framework of Isabelle/Pure
-  \cite{paulson-found,paulson700}, notably composition of higher-order
-  natural deduction rules, which is a generalization of Gentzen's
-  original calculus \cite{Gentzen:1935}.  The approach of generic
-  inference systems in Pure is continued by Isar towards actual proof
-  texts.
-
-  Concrete applications require another intermediate layer: an
-  object-logic.  Isabelle/HOL \cite{isa-tutorial} (simply-typed
-  set-theory) is being used most of the time; Isabelle/ZF
-  \cite{isabelle-ZF} is less extensively developed, although it would
-  probably fit better for classical mathematics.
-
-  \medskip In order to illustrate natural deduction in Isar, we shall
-  refer to the background theory and library of Isabelle/HOL.  This
-  includes common notions of predicate logic, naive set-theory etc.\
-  using fairly standard mathematical notation.  From the perspective
-  of generic natural deduction there is nothing special about the
-  logical connectives of HOL (@{text "\<and>"}, @{text "\<or>"}, @{text "\<forall>"},
-  @{text "\<exists>"}, etc.), only the resulting reasoning principles are
-  relevant to the user.  There are similar rules available for
-  set-theory operators (@{text "\<inter>"}, @{text "\<union>"}, @{text "\<Inter>"}, @{text
-  "\<Union>"}, etc.), or any other theory developed in the library (lattice
-  theory, topology etc.).
-
-  Subsequently we briefly review fragments of Isar proof texts
-  corresponding directly to such general deduction schemes.  The
-  examples shall refer to set-theory, to minimize the danger of
-  understanding connectives of predicate logic as something special.
-
-  \medskip The following deduction performs @{text "\<inter>"}-introduction,
-  working forwards from assumptions towards the conclusion.  We give
-  both the Isar text, and depict the primitive rule involved, as
-  determined by unification of the problem against rules that are
-  declared in the library context.
-*}
-
-text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
-
-(*<*)
-notepad
-begin
-(*>*)
-    assume "x \<in> A" and "x \<in> B"
-    then have "x \<in> A \<inter> B" ..
-(*<*)
-end
-(*>*)
-
-text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
-
-text {*
-  \infer{@{prop "x \<in> A \<inter> B"}}{@{prop "x \<in> A"} & @{prop "x \<in> B"}}
-*}
-
-text_raw {*\end{minipage}*}
-
-text {*
-  \medskip\noindent Note that @{command assume} augments the proof
-  context, @{command then} indicates that the current fact shall be
-  used in the next step, and @{command have} states an intermediate
-  goal.  The two dots ``@{command ".."}'' refer to a complete proof of
-  this claim, using the indicated facts and a canonical rule from the
-  context.  We could have been more explicit here by spelling out the
-  final proof step via the @{command "by"} command:
-*}
-
-(*<*)
-notepad
-begin
-(*>*)
-    assume "x \<in> A" and "x \<in> B"
-    then have "x \<in> A \<inter> B" by (rule IntI)
-(*<*)
-end
-(*>*)
-
-text {*
-  \noindent The format of the @{text "\<inter>"}-introduction rule represents
-  the most basic inference, which proceeds from given premises to a
-  conclusion, without any nested proof context involved.
-
-  The next example performs backwards introduction on @{term "\<Inter>\<A>"},
-  the intersection of all sets within a given set.  This requires a
-  nested proof of set membership within a local context, where @{term
-  A} is an arbitrary-but-fixed member of the collection:
-*}
-
-text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
-
-(*<*)
-notepad
-begin
-(*>*)
-    have "x \<in> \<Inter>\<A>"
-    proof
-      fix A
-      assume "A \<in> \<A>"
-      show "x \<in> A" sorry %noproof
-    qed
-(*<*)
-end
-(*>*)
-
-text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
-
-text {*
-  \infer{@{prop "x \<in> \<Inter>\<A>"}}{\infer*{@{prop "x \<in> A"}}{@{text "[A][A \<in> \<A>]"}}}
-*}
-
-text_raw {*\end{minipage}*}
-
-text {*
-  \medskip\noindent This Isar reasoning pattern again refers to the
-  primitive rule depicted above.  The system determines it in the
-  ``@{command proof}'' step, which could have been spelt out more
-  explicitly as ``@{command proof}~@{text "(rule InterI)"}''.  Note
-  that the rule involves both a local parameter @{term "A"} and an
-  assumption @{prop "A \<in> \<A>"} in the nested reasoning.  This kind of
-  compound rule typically demands a genuine sub-proof in Isar, working
-  backwards rather than forwards as seen before.  In the proof body we
-  encounter the @{command fix}-@{command assume}-@{command show}
-  outline of nested sub-proofs that is typical for Isar.  The final
-  @{command show} is like @{command have} followed by an additional
-  refinement of the enclosing claim, using the rule derived from the
-  proof body.
-
-  \medskip The next example involves @{term "\<Union>\<A>"}, which can be
-  characterized as the set of all @{term "x"} such that @{prop "\<exists>A. x
-  \<in> A \<and> A \<in> \<A>"}.  The elimination rule for @{prop "x \<in> \<Union>\<A>"} does
-  not mention @{text "\<exists>"} and @{text "\<and>"} at all, but admits to obtain
-  directly a local @{term "A"} such that @{prop "x \<in> A"} and @{prop "A
-  \<in> \<A>"} hold.  This corresponds to the following Isar proof and
-  inference rule, respectively:
-*}
-
-text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
-
-(*<*)
-notepad
-begin
-(*>*)
-    assume "x \<in> \<Union>\<A>"
-    then have C
-    proof
-      fix A
-      assume "x \<in> A" and "A \<in> \<A>"
-      show C sorry %noproof
-    qed
-(*<*)
-end
-(*>*)
-
-text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
-
-text {*
-  \infer{@{prop "C"}}{@{prop "x \<in> \<Union>\<A>"} & \infer*{@{prop "C"}~}{@{text "[A][x \<in> A, A \<in> \<A>]"}}}
-*}
-
-text_raw {*\end{minipage}*}
-
-text {*
-  \medskip\noindent Although the Isar proof follows the natural
-  deduction rule closely, the text reads not as natural as
-  anticipated.  There is a double occurrence of an arbitrary
-  conclusion @{prop "C"}, which represents the final result, but is
-  irrelevant for now.  This issue arises for any elimination rule
-  involving local parameters.  Isar provides the derived language
-  element @{command obtain}, which is able to perform the same
-  elimination proof more conveniently:
-*}
-
-(*<*)
-notepad
-begin
-(*>*)
-    assume "x \<in> \<Union>\<A>"
-    then obtain A where "x \<in> A" and "A \<in> \<A>" ..
-(*<*)
-end
-(*>*)
-
-text {*
-  \noindent Here we avoid to mention the final conclusion @{prop "C"}
-  and return to plain forward reasoning.  The rule involved in the
-  ``@{command ".."}'' proof is the same as before.
-*}
-
-
-section {* The Pure framework \label{sec:framework-pure} *}
-
-text {*
-  The Pure logic \cite{paulson-found,paulson700} is an intuitionistic
-  fragment of higher-order logic \cite{church40}.  In type-theoretic
-  parlance, there are three levels of @{text "\<lambda>"}-calculus with
-  corresponding arrows @{text "\<Rightarrow>"}/@{text "\<And>"}/@{text "\<Longrightarrow>"}:
-
-  \medskip
-  \begin{tabular}{ll}
-  @{text "\<alpha> \<Rightarrow> \<beta>"} & syntactic function space (terms depending on terms) \\
-  @{text "\<And>x. B(x)"} & universal quantification (proofs depending on terms) \\
-  @{text "A \<Longrightarrow> B"} & implication (proofs depending on proofs) \\
-  \end{tabular}
-  \medskip
-
-  \noindent Here only the types of syntactic terms, and the
-  propositions of proof terms have been shown.  The @{text
-  "\<lambda>"}-structure of proofs can be recorded as an optional feature of
-  the Pure inference kernel \cite{Berghofer-Nipkow:2000:TPHOL}, but
-  the formal system can never depend on them due to \emph{proof
-  irrelevance}.
-
-  On top of this most primitive layer of proofs, Pure implements a
-  generic calculus for nested natural deduction rules, similar to
-  \cite{Schroeder-Heister:1984}.  Here object-logic inferences are
-  internalized as formulae over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
-  Combining such rule statements may involve higher-order unification
-  \cite{paulson-natural}.
-*}
-
-
-subsection {* Primitive inferences *}
-
-text {*
-  Term syntax provides explicit notation for abstraction @{text "\<lambda>x ::
-  \<alpha>. b(x)"} and application @{text "b a"}, while types are usually
-  implicit thanks to type-inference; terms of type @{text "prop"} are
-  called propositions.  Logical statements are composed via @{text "\<And>x
-  :: \<alpha>. B(x)"} and @{text "A \<Longrightarrow> B"}.  Primitive reasoning operates on
-  judgments of the form @{text "\<Gamma> \<turnstile> \<phi>"}, with standard introduction
-  and elimination rules for @{text "\<And>"} and @{text "\<Longrightarrow>"} that refer to
-  fixed parameters @{text "x\<^sub>1, \<dots>, x\<^sub>m"} and hypotheses
-  @{text "A\<^sub>1, \<dots>, A\<^sub>n"} from the context @{text "\<Gamma>"};
-  the corresponding proof terms are left implicit.  The subsequent
-  inference rules define @{text "\<Gamma> \<turnstile> \<phi>"} inductively, relative to a
-  collection of axioms:
-
-  \[
-  \infer{@{text "\<turnstile> A"}}{(@{text "A"} \text{~axiom})}
-  \qquad
-  \infer{@{text "A \<turnstile> A"}}{}
-  \]
-
-  \[
-  \infer{@{text "\<Gamma> \<turnstile> \<And>x. B(x)"}}{@{text "\<Gamma> \<turnstile> B(x)"} & @{text "x \<notin> \<Gamma>"}}
-  \qquad
-  \infer{@{text "\<Gamma> \<turnstile> B(a)"}}{@{text "\<Gamma> \<turnstile> \<And>x. B(x)"}}
-  \]
-
-  \[
-  \infer{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
-  \qquad
-  \infer{@{text "\<Gamma>\<^sub>1 \<union> \<Gamma>\<^sub>2 \<turnstile> B"}}{@{text "\<Gamma>\<^sub>1 \<turnstile> A \<Longrightarrow> B"} & @{text "\<Gamma>\<^sub>2 \<turnstile> A"}}
-  \]
-
-  Furthermore, Pure provides a built-in equality @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow>
-  prop"} with axioms for reflexivity, substitution, extensionality,
-  and @{text "\<alpha>\<beta>\<eta>"}-conversion on @{text "\<lambda>"}-terms.
-
-  \medskip An object-logic introduces another layer on top of Pure,
-  e.g.\ with types @{text "i"} for individuals and @{text "o"} for
-  propositions, term constants @{text "Trueprop :: o \<Rightarrow> prop"} as
-  (implicit) derivability judgment and connectives like @{text "\<and> :: o
-  \<Rightarrow> o \<Rightarrow> o"} or @{text "\<forall> :: (i \<Rightarrow> o) \<Rightarrow> o"}, and axioms for object-level
-  rules such as @{text "conjI: A \<Longrightarrow> B \<Longrightarrow> A \<and> B"} or @{text "allI: (\<And>x. B
-  x) \<Longrightarrow> \<forall>x. B x"}.  Derived object rules are represented as theorems of
-  Pure.  After the initial object-logic setup, further axiomatizations
-  are usually avoided; plain definitions and derived principles are
-  used exclusively.
-*}
-
-
-subsection {* Reasoning with rules \label{sec:framework-resolution} *}
-
-text {*
-  Primitive inferences mostly serve foundational purposes.  The main
-  reasoning mechanisms of Pure operate on nested natural deduction
-  rules expressed as formulae, using @{text "\<And>"} to bind local
-  parameters and @{text "\<Longrightarrow>"} to express entailment.  Multiple
-  parameters and premises are represented by repeating these
-  connectives in a right-associative manner.
-
-  Since @{text "\<And>"} and @{text "\<Longrightarrow>"} commute thanks to the theorem
-  @{prop "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"}, we may assume w.l.o.g.\
-  that rule statements always observe the normal form where
-  quantifiers are pulled in front of implications at each level of
-  nesting.  This means that any Pure proposition may be presented as a
-  \emph{Hereditary Harrop Formula} \cite{Miller:1991} which is of the
-  form @{text "\<And>x\<^sub>1 \<dots> x\<^sub>m. H\<^sub>1 \<Longrightarrow> \<dots> H\<^sub>n \<Longrightarrow>
-  A"} for @{text "m, n \<ge> 0"}, and @{text "A"} atomic, and @{text
-  "H\<^sub>1, \<dots>, H\<^sub>n"} being recursively of the same format.
-  Following the convention that outermost quantifiers are implicit,
-  Horn clauses @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> A"} are a special
-  case of this.
-
-  For example, @{text "\<inter>"}-introduction rule encountered before is
-  represented as a Pure theorem as follows:
-  \[
-  @{text "IntI:"}~@{prop "x \<in> A \<Longrightarrow> x \<in> B \<Longrightarrow> x \<in> A \<inter> B"}
-  \]
-
-  \noindent This is a plain Horn clause, since no further nesting on
-  the left is involved.  The general @{text "\<Inter>"}-introduction
-  corresponds to a Hereditary Harrop Formula with one additional level
-  of nesting:
-  \[
-  @{text "InterI:"}~@{prop "(\<And>A. A \<in> \<A> \<Longrightarrow> x \<in> A) \<Longrightarrow> x \<in> \<Inter>\<A>"}
-  \]
-
-  \medskip Goals are also represented as rules: @{text "A\<^sub>1 \<Longrightarrow>
-  \<dots> A\<^sub>n \<Longrightarrow> C"} states that the sub-goals @{text "A\<^sub>1, \<dots>,
-  A\<^sub>n"} entail the result @{text "C"}; for @{text "n = 0"} the
-  goal is finished.  To allow @{text "C"} being a rule statement
-  itself, we introduce the protective marker @{text "# :: prop \<Rightarrow>
-  prop"}, which is defined as identity and hidden from the user.  We
-  initialize and finish goal states as follows:
-
-  \[
-  \begin{array}{c@ {\qquad}c}
-  \infer[(@{inference_def init})]{@{text "C \<Longrightarrow> #C"}}{} &
-  \infer[(@{inference_def finish})]{@{text C}}{@{text "#C"}}
-  \end{array}
-  \]
-
-  \noindent Goal states are refined in intermediate proof steps until
-  a finished form is achieved.  Here the two main reasoning principles
-  are @{inference resolution}, for back-chaining a rule against a
-  sub-goal (replacing it by zero or more sub-goals), and @{inference
-  assumption}, for solving a sub-goal (finding a short-circuit with
-  local assumptions).  Below @{text "\<^vec>x"} stands for @{text
-  "x\<^sub>1, \<dots>, x\<^sub>n"} (@{text "n \<ge> 0"}).
-
-  \[
-  \infer[(@{inference_def resolution})]
-  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>A (\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
-  {\begin{tabular}{rl}
-    @{text "rule:"} &
-    @{text "\<^vec>A \<^vec>a \<Longrightarrow> B \<^vec>a"} \\
-    @{text "goal:"} &
-    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
-    @{text "goal unifier:"} &
-    @{text "(\<lambda>\<^vec>x. B (\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
-   \end{tabular}}
-  \]
-
-  \medskip
-
-  \[
-  \infer[(@{inference_def assumption})]{@{text "C\<vartheta>"}}
-  {\begin{tabular}{rl}
-    @{text "goal:"} &
-    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> A \<^vec>x) \<Longrightarrow> C"} \\
-    @{text "assm unifier:"} & @{text "A\<vartheta> = H\<^sub>i\<vartheta>"}~~\text{(for some~@{text "H\<^sub>i"})} \\
-   \end{tabular}}
-  \]
-
-  The following trace illustrates goal-oriented reasoning in
-  Isabelle/Pure:
-
-  {\footnotesize
-  \medskip
-  \begin{tabular}{r@ {\quad}l}
-  @{text "(A \<and> B \<Longrightarrow> B \<and> A) \<Longrightarrow> #(A \<and> B \<Longrightarrow> B \<and> A)"} & @{text "(init)"} \\
-  @{text "(A \<and> B \<Longrightarrow> B) \<Longrightarrow> (A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution B \<Longrightarrow> A \<Longrightarrow> B \<and> A)"} \\
-  @{text "(A \<and> B \<Longrightarrow> A \<and> B) \<Longrightarrow> (A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution A \<and> B \<Longrightarrow> B)"} \\
-  @{text "(A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(assumption)"} \\
-  @{text "(A \<and> B \<Longrightarrow> A \<and> B) \<Longrightarrow> #\<dots>"} & @{text "(resolution A \<and> B \<Longrightarrow> A)"} \\
-  @{text "#\<dots>"} & @{text "(assumption)"} \\
-  @{text "A \<and> B \<Longrightarrow> B \<and> A"} & @{text "(finish)"} \\
-  \end{tabular}
-  \medskip
-  }
-
-  Compositions of @{inference assumption} after @{inference
-  resolution} occurs quite often, typically in elimination steps.
-  Traditional Isabelle tactics accommodate this by a combined
-  @{inference_def elim_resolution} principle.  In contrast, Isar uses
-  a slightly more refined combination, where the assumptions to be
-  closed are marked explicitly, using again the protective marker
-  @{text "#"}:
-
-  \[
-  \infer[(@{inference refinement})]
-  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>G' (\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
-  {\begin{tabular}{rl}
-    @{text "sub\<hyphen>proof:"} &
-    @{text "\<^vec>G \<^vec>a \<Longrightarrow> B \<^vec>a"} \\
-    @{text "goal:"} &
-    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
-    @{text "goal unifier:"} &
-    @{text "(\<lambda>\<^vec>x. B (\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
-    @{text "assm unifiers:"} &
-    @{text "(\<lambda>\<^vec>x. G\<^sub>j (\<^vec>a \<^vec>x))\<vartheta> = #H\<^sub>i\<vartheta>"} \\
-    & \quad (for each marked @{text "G\<^sub>j"} some @{text "#H\<^sub>i"}) \\
-   \end{tabular}}
-  \]
-
-  \noindent Here the @{text "sub\<hyphen>proof"} rule stems from the
-  main @{command fix}-@{command assume}-@{command show} outline of
-  Isar (cf.\ \secref{sec:framework-subproof}): each assumption
-  indicated in the text results in a marked premise @{text "G"} above.
-  The marking enforces resolution against one of the sub-goal's
-  premises.  Consequently, @{command fix}-@{command assume}-@{command
-  show} enables to fit the result of a sub-proof quite robustly into a
-  pending sub-goal, while maintaining a good measure of flexibility.
-*}
-
-
-section {* The Isar proof language \label{sec:framework-isar} *}
-
-text {*
-  Structured proofs are presented as high-level expressions for
-  composing entities of Pure (propositions, facts, and goals).  The
-  Isar proof language allows to organize reasoning within the
-  underlying rule calculus of Pure, but Isar is not another logical
-  calculus!
-
-  Isar is an exercise in sound minimalism.  Approximately half of the
-  language is introduced as primitive, the rest defined as derived
-  concepts.  The following grammar describes the core language
-  (category @{text "proof"}), which is embedded into theory
-  specification elements such as @{command theorem}; see also
-  \secref{sec:framework-stmt} for the separate category @{text
-  "statement"}.
-
-  \medskip
-  \begin{tabular}{rcl}
-    @{text "theory\<hyphen>stmt"} & = & @{command "theorem"}~@{text "statement proof  |"}~~@{command "definition"}~@{text "\<dots>  |  \<dots>"} \\[1ex]
-
-    @{text "proof"} & = & @{text "prfx\<^sup>*"}~@{command "proof"}~@{text "method\<^sup>? stmt\<^sup>*"}~@{command "qed"}~@{text "method\<^sup>?"} \\[1ex]
-
-    @{text prfx} & = & @{command "using"}~@{text "facts"} \\
-    & @{text "|"} & @{command "unfolding"}~@{text "facts"} \\
-
-    @{text stmt} & = & @{command "{"}~@{text "stmt\<^sup>*"}~@{command "}"} \\
-    & @{text "|"} & @{command "next"} \\
-    & @{text "|"} & @{command "note"}~@{text "name = facts"} \\
-    & @{text "|"} & @{command "let"}~@{text "term = term"} \\
-    & @{text "|"} & @{command "fix"}~@{text "var\<^sup>+"} \\
-    & @{text "|"} & @{command assume}~@{text "\<guillemotleft>inference\<guillemotright> name: props"} \\
-    & @{text "|"} & @{command "then"}@{text "\<^sup>?"}~@{text goal} \\
-    @{text goal} & = & @{command "have"}~@{text "name: props proof"} \\
-    & @{text "|"} & @{command "show"}~@{text "name: props proof"} \\
-  \end{tabular}
-
-  \medskip Simultaneous propositions or facts may be separated by the
-  @{keyword "and"} keyword.
-
-  \medskip The syntax for terms and propositions is inherited from
-  Pure (and the object-logic).  A @{text "pattern"} is a @{text
-  "term"} with schematic variables, to be bound by higher-order
-  matching.
-
-  \medskip Facts may be referenced by name or proposition.  For
-  example, the result of ``@{command have}~@{text "a: A \<langle>proof\<rangle>"}''
-  becomes available both as @{text "a"} and
-  \isacharbackquoteopen@{text "A"}\isacharbackquoteclose.  Moreover,
-  fact expressions may involve attributes that modify either the
-  theorem or the background context.  For example, the expression
-  ``@{text "a [OF b]"}'' refers to the composition of two facts
-  according to the @{inference resolution} inference of
-  \secref{sec:framework-resolution}, while ``@{text "a [intro]"}''
-  declares a fact as introduction rule in the context.
-
-  The special fact called ``@{fact this}'' always refers to the last
-  result, as produced by @{command note}, @{command assume}, @{command
-  have}, or @{command show}.  Since @{command note} occurs
-  frequently together with @{command then} we provide some
-  abbreviations:
-
-  \medskip
-  \begin{tabular}{rcl}
-    @{command from}~@{text a} & @{text "\<equiv>"} & @{command note}~@{text a}~@{command then} \\
-    @{command with}~@{text a} & @{text "\<equiv>"} & @{command from}~@{text "a \<AND> this"} \\
-  \end{tabular}
-  \medskip
-
-  The @{text "method"} category is essentially a parameter and may be
-  populated later.  Methods use the facts indicated by @{command
-  "then"} or @{command using}, and then operate on the goal state.
-  Some basic methods are predefined: ``@{method "-"}'' leaves the goal
-  unchanged, ``@{method this}'' applies the facts as rules to the
-  goal, ``@{method (Pure) "rule"}'' applies the facts to another rule and the
-  result to the goal (both ``@{method this}'' and ``@{method (Pure) rule}''
-  refer to @{inference resolution} of
-  \secref{sec:framework-resolution}).  The secondary arguments to
-  ``@{method (Pure) rule}'' may be specified explicitly as in ``@{text "(rule
-  a)"}'', or picked from the context.  In the latter case, the system
-  first tries rules declared as @{attribute (Pure) elim} or
-  @{attribute (Pure) dest}, followed by those declared as @{attribute
-  (Pure) intro}.
-
-  The default method for @{command proof} is ``@{method (Pure) rule}''
-  (arguments picked from the context), for @{command qed} it is
-  ``@{method "-"}''.  Further abbreviations for terminal proof steps
-  are ``@{command "by"}~@{text "method\<^sub>1 method\<^sub>2"}'' for
-  ``@{command proof}~@{text "method\<^sub>1"}~@{command qed}~@{text
-  "method\<^sub>2"}'', and ``@{command ".."}'' for ``@{command
-  "by"}~@{method (Pure) rule}, and ``@{command "."}'' for ``@{command
-  "by"}~@{method this}''.  The @{command unfolding} element operates
-  directly on the current facts and goal by applying equalities.
-
-  \medskip Block structure can be indicated explicitly by ``@{command
-  "{"}~@{text "\<dots>"}~@{command "}"}'', although the body of a sub-proof
-  already involves implicit nesting.  In any case, @{command next}
-  jumps into the next section of a block, i.e.\ it acts like closing
-  an implicit block scope and opening another one; there is no direct
-  correspondence to subgoals here.
-
-  The remaining elements @{command fix} and @{command assume} build up
-  a local context (see \secref{sec:framework-context}), while
-  @{command show} refines a pending sub-goal by the rule resulting
-  from a nested sub-proof (see \secref{sec:framework-subproof}).
-  Further derived concepts will support calculational reasoning (see
-  \secref{sec:framework-calc}).
-*}
-
-
-subsection {* Context elements \label{sec:framework-context} *}
-
-text {*
-  In judgments @{text "\<Gamma> \<turnstile> \<phi>"} of the primitive framework, @{text "\<Gamma>"}
-  essentially acts like a proof context.  Isar elaborates this idea
-  towards a higher-level notion, with additional information for
-  type-inference, term abbreviations, local facts, hypotheses etc.
-
-  The element @{command fix}~@{text "x :: \<alpha>"} declares a local
-  parameter, i.e.\ an arbitrary-but-fixed entity of a given type; in
-  results exported from the context, @{text "x"} may become anything.
-  The @{command assume}~@{text "\<guillemotleft>inference\<guillemotright>"} element provides a
-  general interface to hypotheses: ``@{command assume}~@{text
-  "\<guillemotleft>inference\<guillemotright> A"}'' produces @{text "A \<turnstile> A"} locally, while the
-  included inference tells how to discharge @{text A} from results
-  @{text "A \<turnstile> B"} later on.  There is no user-syntax for @{text
-  "\<guillemotleft>inference\<guillemotright>"}, i.e.\ it may only occur internally when derived
-  commands are defined in ML.
-
-  At the user-level, the default inference for @{command assume} is
-  @{inference discharge} as given below.  The additional variants
-  @{command presume} and @{command def} are defined as follows:
-
-  \medskip
-  \begin{tabular}{rcl}
-    @{command presume}~@{text A} & @{text "\<equiv>"} & @{command assume}~@{text "\<guillemotleft>weak\<hyphen>discharge\<guillemotright> A"} \\
-    @{command def}~@{text "x \<equiv> a"} & @{text "\<equiv>"} & @{command fix}~@{text x}~@{command assume}~@{text "\<guillemotleft>expansion\<guillemotright> x \<equiv> a"} \\
-  \end{tabular}
-  \medskip
-
-  \[
-  \infer[(@{inference_def discharge})]{@{text "\<strut>\<Gamma> - A \<turnstile> #A \<Longrightarrow> B"}}{@{text "\<strut>\<Gamma> \<turnstile> B"}}
-  \]
-  \[
-  \infer[(@{inference_def "weak\<hyphen>discharge"})]{@{text "\<strut>\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<strut>\<Gamma> \<turnstile> B"}}
-  \]
-  \[
-  \infer[(@{inference_def expansion})]{@{text "\<strut>\<Gamma> - (x \<equiv> a) \<turnstile> B a"}}{@{text "\<strut>\<Gamma> \<turnstile> B x"}}
-  \]
-
-  \medskip Note that @{inference discharge} and @{inference
-  "weak\<hyphen>discharge"} differ in the marker for @{prop A}, which is
-  relevant when the result of a @{command fix}-@{command
-  assume}-@{command show} outline is composed with a pending goal,
-  cf.\ \secref{sec:framework-subproof}.
-
-  The most interesting derived context element in Isar is @{command
-  obtain} \cite[\S5.3]{Wenzel-PhD}, which supports generalized
-  elimination steps in a purely forward manner.  The @{command obtain}
-  command takes a specification of parameters @{text "\<^vec>x"} and
-  assumptions @{text "\<^vec>A"} to be added to the context, together
-  with a proof of a case rule stating that this extension is
-  conservative (i.e.\ may be removed from closed results later on):
-
-  \medskip
-  \begin{tabular}{l}
-  @{text "\<langle>facts\<rangle>"}~~@{command obtain}~@{text "\<^vec>x \<WHERE> \<^vec>A \<^vec>x  \<langle>proof\<rangle> \<equiv>"} \\[0.5ex]
-  \quad @{command have}~@{text "case: \<And>thesis. (\<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis) \<Longrightarrow> thesis\<rangle>"} \\
-  \quad @{command proof}~@{method "-"} \\
-  \qquad @{command fix}~@{text thesis} \\
-  \qquad @{command assume}~@{text "[intro]: \<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis"} \\
-  \qquad @{command show}~@{text thesis}~@{command using}~@{text "\<langle>facts\<rangle> \<langle>proof\<rangle>"} \\
-  \quad @{command qed} \\
-  \quad @{command fix}~@{text "\<^vec>x"}~@{command assume}~@{text "\<guillemotleft>elimination case\<guillemotright> \<^vec>A \<^vec>x"} \\
-  \end{tabular}
-  \medskip
-
-  \[
-  \infer[(@{inference elimination})]{@{text "\<Gamma> \<turnstile> B"}}{
-    \begin{tabular}{rl}
-    @{text "case:"} &
-    @{text "\<Gamma> \<turnstile> \<And>thesis. (\<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis) \<Longrightarrow> thesis"} \\[0.2ex]
-    @{text "result:"} &
-    @{text "\<Gamma> \<union> \<^vec>A \<^vec>y \<turnstile> B"} \\[0.2ex]
-    \end{tabular}}
-  \]
-
-  \noindent Here the name ``@{text thesis}'' is a specific convention
-  for an arbitrary-but-fixed proposition; in the primitive natural
-  deduction rules shown before we have occasionally used @{text C}.
-  The whole statement of ``@{command obtain}~@{text x}~@{keyword
-  "where"}~@{text "A x"}'' may be read as a claim that @{text "A x"}
-  may be assumed for some arbitrary-but-fixed @{text "x"}.  Also note
-  that ``@{command obtain}~@{text "A \<AND> B"}'' without parameters
-  is similar to ``@{command have}~@{text "A \<AND> B"}'', but the
-  latter involves multiple sub-goals.
-
-  \medskip The subsequent Isar proof texts explain all context
-  elements introduced above using the formal proof language itself.
-  After finishing a local proof within a block, we indicate the
-  exported result via @{command note}.
-*}
-
-(*<*)
-theorem True
-proof
-(*>*)
-  txt_raw {* \begin{minipage}[t]{0.45\textwidth} *}
-  {
-    fix x
-    have "B x" sorry %noproof
-  }
-  note `\<And>x. B x`
-  txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
-  {
-    assume A
-    have B sorry %noproof
-  }
-  note `A \<Longrightarrow> B`
-  txt_raw {* \end{minipage}\\[3ex]\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
-  {
-    def x \<equiv> a
-    have "B x" sorry %noproof
-  }
-  note `B a`
-  txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
-  {
-    obtain x where "A x" sorry %noproof
-    have B sorry %noproof
-  }
-  note `B`
-  txt_raw {* \end{minipage} *}
-(*<*)
-qed
-(*>*)
-
-text {*
-  \bigskip\noindent This illustrates the meaning of Isar context
-  elements without goals getting in between.
-*}
-
-subsection {* Structured statements \label{sec:framework-stmt} *}
-
-text {*
-  The category @{text "statement"} of top-level theorem specifications
-  is defined as follows:
-
-  \medskip
-  \begin{tabular}{rcl}
-  @{text "statement"} & @{text "\<equiv>"} & @{text "name: props \<AND> \<dots>"} \\
-  & @{text "|"} & @{text "context\<^sup>* conclusion"} \\[0.5ex]
-
-  @{text "context"} & @{text "\<equiv>"} & @{text "\<FIXES> vars \<AND> \<dots>"} \\
-  & @{text "|"} & @{text "\<ASSUMES> name: props \<AND> \<dots>"} \\
-
-  @{text "conclusion"} & @{text "\<equiv>"} & @{text "\<SHOWS> name: props \<AND> \<dots>"} \\
-  & @{text "|"} & @{text "\<OBTAINS> vars \<AND> \<dots> \<WHERE> name: props \<AND> \<dots>"} \\
-  & & \quad @{text "\<BBAR> \<dots>"} \\
-  \end{tabular}
-
-  \medskip\noindent A simple @{text "statement"} consists of named
-  propositions.  The full form admits local context elements followed
-  by the actual conclusions, such as ``@{keyword "fixes"}~@{text
-  x}~@{keyword "assumes"}~@{text "A x"}~@{keyword "shows"}~@{text "B
-  x"}''.  The final result emerges as a Pure rule after discharging
-  the context: @{prop "\<And>x. A x \<Longrightarrow> B x"}.
-
-  The @{keyword "obtains"} variant is another abbreviation defined
-  below; unlike @{command obtain} (cf.\
-  \secref{sec:framework-context}) there may be several ``cases''
-  separated by ``@{text "\<BBAR>"}'', each consisting of several
-  parameters (@{text "vars"}) and several premises (@{text "props"}).
-  This specifies multi-branch elimination rules.
-
-  \medskip
-  \begin{tabular}{l}
-  @{text "\<OBTAINS> \<^vec>x \<WHERE> \<^vec>A \<^vec>x   \<BBAR>   \<dots>   \<equiv>"} \\[0.5ex]
-  \quad @{text "\<FIXES> thesis"} \\
-  \quad @{text "\<ASSUMES> [intro]: \<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis  \<AND>  \<dots>"} \\
-  \quad @{text "\<SHOWS> thesis"} \\
-  \end{tabular}
-  \medskip
-
-  Presenting structured statements in such an ``open'' format usually
-  simplifies the subsequent proof, because the outer structure of the
-  problem is already laid out directly.  E.g.\ consider the following
-  canonical patterns for @{text "\<SHOWS>"} and @{text "\<OBTAINS>"},
-  respectively:
-*}
-
-text_raw {*\begin{minipage}{0.5\textwidth}*}
-
-theorem
-  fixes x and y
-  assumes "A x" and "B y"
-  shows "C x y"
-proof -
-  from `A x` and `B y`
-  show "C x y" sorry %noproof
-qed
-
-text_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
-
-theorem
-  obtains x and y
-  where "A x" and "B y"
-proof -
-  have "A a" and "B b" sorry %noproof
-  then show thesis ..
-qed
-
-text_raw {*\end{minipage}*}
-
-text {*
-  \medskip\noindent Here local facts \isacharbackquoteopen@{text "A
-  x"}\isacharbackquoteclose\ and \isacharbackquoteopen@{text "B
-  y"}\isacharbackquoteclose\ are referenced immediately; there is no
-  need to decompose the logical rule structure again.  In the second
-  proof the final ``@{command then}~@{command show}~@{text
-  thesis}~@{command ".."}''  involves the local rule case @{text "\<And>x
-  y. A x \<Longrightarrow> B y \<Longrightarrow> thesis"} for the particular instance of terms @{text
-  "a"} and @{text "b"} produced in the body.
-*}
-
-
-subsection {* Structured proof refinement \label{sec:framework-subproof} *}
-
-text {*
-  By breaking up the grammar for the Isar proof language, we may
-  understand a proof text as a linear sequence of individual proof
-  commands.  These are interpreted as transitions of the Isar virtual
-  machine (Isar/VM), which operates on a block-structured
-  configuration in single steps.  This allows users to write proof
-  texts in an incremental manner, and inspect intermediate
-  configurations for debugging.
-
-  The basic idea is analogous to evaluating algebraic expressions on a
-  stack machine: @{text "(a + b) \<cdot> c"} then corresponds to a sequence
-  of single transitions for each symbol @{text "(, a, +, b, ), \<cdot>, c"}.
-  In Isar the algebraic values are facts or goals, and the operations
-  are inferences.
-
-  \medskip The Isar/VM state maintains a stack of nodes, each node
-  contains the local proof context, the linguistic mode, and a pending
-  goal (optional).  The mode determines the type of transition that
-  may be performed next, it essentially alternates between forward and
-  backward reasoning, with an intermediate stage for chained facts
-  (see \figref{fig:isar-vm}).
-
-  \begin{figure}[htb]
-  \begin{center}
-  \includegraphics[width=0.8\textwidth]{isar-vm}
-  \end{center}
-  \caption{Isar/VM modes}\label{fig:isar-vm}
-  \end{figure}
-
-  For example, in @{text "state"} mode Isar acts like a mathematical
-  scratch-pad, accepting declarations like @{command fix}, @{command
-  assume}, and claims like @{command have}, @{command show}.  A goal
-  statement changes the mode to @{text "prove"}, which means that we
-  may now refine the problem via @{command unfolding} or @{command
-  proof}.  Then we are again in @{text "state"} mode of a proof body,
-  which may issue @{command show} statements to solve pending
-  sub-goals.  A concluding @{command qed} will return to the original
-  @{text "state"} mode one level upwards.  The subsequent Isar/VM
-  trace indicates block structure, linguistic mode, goal state, and
-  inferences:
-*}
-
-text_raw {* \begingroup\footnotesize *}
-(*<*)notepad begin
-(*>*)
-  txt_raw {* \begin{minipage}[t]{0.18\textwidth} *}
-  have "A \<longrightarrow> B"
-  proof
-    assume A
-    show B
-      sorry %noproof
-  qed
-  txt_raw {* \end{minipage}\quad
-\begin{minipage}[t]{0.06\textwidth}
-@{text "begin"} \\
-\\
-\\
-@{text "begin"} \\
-@{text "end"} \\
-@{text "end"} \\
-\end{minipage}
-\begin{minipage}[t]{0.08\textwidth}
-@{text "prove"} \\
-@{text "state"} \\
-@{text "state"} \\
-@{text "prove"} \\
-@{text "state"} \\
-@{text "state"} \\
-\end{minipage}\begin{minipage}[t]{0.35\textwidth}
-@{text "(A \<longrightarrow> B) \<Longrightarrow> #(A \<longrightarrow> B)"} \\
-@{text "(A \<Longrightarrow> B) \<Longrightarrow> #(A \<longrightarrow> B)"} \\
-\\
-\\
-@{text "#(A \<longrightarrow> B)"} \\
-@{text "A \<longrightarrow> B"} \\
-\end{minipage}\begin{minipage}[t]{0.4\textwidth}
-@{text "(init)"} \\
-@{text "(resolution impI)"} \\
-\\
-\\
-@{text "(refinement #A \<Longrightarrow> B)"} \\
-@{text "(finish)"} \\
-\end{minipage} *}
-(*<*)
-end
-(*>*)
-text_raw {* \endgroup *}
-
-text {*
-  \noindent Here the @{inference refinement} inference from
-  \secref{sec:framework-resolution} mediates composition of Isar
-  sub-proofs nicely.  Observe that this principle incorporates some
-  degree of freedom in proof composition.  In particular, the proof
-  body allows parameters and assumptions to be re-ordered, or commuted
-  according to Hereditary Harrop Form.  Moreover, context elements
-  that are not used in a sub-proof may be omitted altogether.  For
-  example:
-*}
-
-text_raw {*\begin{minipage}{0.5\textwidth}*}
-
-(*<*)
-notepad
-begin
-(*>*)
-  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
-  proof -
-    fix x and y
-    assume "A x" and "B y"
-    show "C x y" sorry %noproof
-  qed
-
-txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
-
-(*<*)
-next
-(*>*)
-  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
-  proof -
-    fix x assume "A x"
-    fix y assume "B y"
-    show "C x y" sorry %noproof
-  qed
-
-txt_raw {*\end{minipage}\\[3ex]\begin{minipage}{0.5\textwidth}*}
-
-(*<*)
-next
-(*>*)
-  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
-  proof -
-    fix y assume "B y"
-    fix x assume "A x"
-    show "C x y" sorry
-  qed
-
-txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
-(*<*)
-next
-(*>*)
-  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
-  proof -
-    fix y assume "B y"
-    fix x
-    show "C x y" sorry
-  qed
-(*<*)
-end
-(*>*)
-
-text_raw {*\end{minipage}*}
-
-text {*
-  \medskip\noindent Such ``peephole optimizations'' of Isar texts are
-  practically important to improve readability, by rearranging
-  contexts elements according to the natural flow of reasoning in the
-  body, while still observing the overall scoping rules.
-
-  \medskip This illustrates the basic idea of structured proof
-  processing in Isar.  The main mechanisms are based on natural
-  deduction rule composition within the Pure framework.  In
-  particular, there are no direct operations on goal states within the
-  proof body.  Moreover, there is no hidden automated reasoning
-  involved, just plain unification.
-*}
-
-
-subsection {* Calculational reasoning \label{sec:framework-calc} *}
-
-text {*
-  The existing Isar infrastructure is sufficiently flexible to support
-  calculational reasoning (chains of transitivity steps) as derived
-  concept.  The generic proof elements introduced below depend on
-  rules declared as @{attribute trans} in the context.  It is left to
-  the object-logic to provide a suitable rule collection for mixed
-  relations of @{text "="}, @{text "<"}, @{text "\<le>"}, @{text "\<subset>"},
-  @{text "\<subseteq>"} etc.  Due to the flexibility of rule composition
-  (\secref{sec:framework-resolution}), substitution of equals by
-  equals is covered as well, even substitution of inequalities
-  involving monotonicity conditions; see also \cite[\S6]{Wenzel-PhD}
-  and \cite{Bauer-Wenzel:2001}.
-
-  The generic calculational mechanism is based on the observation that
-  rules such as @{text "trans:"}~@{prop "x = y \<Longrightarrow> y = z \<Longrightarrow> x = z"}
-  proceed from the premises towards the conclusion in a deterministic
-  fashion.  Thus we may reason in forward mode, feeding intermediate
-  results into rules selected from the context.  The course of
-  reasoning is organized by maintaining a secondary fact called
-  ``@{fact calculation}'', apart from the primary ``@{fact this}''
-  already provided by the Isar primitives.  In the definitions below,
-  @{attribute OF} refers to @{inference resolution}
-  (\secref{sec:framework-resolution}) with multiple rule arguments,
-  and @{text "trans"} represents to a suitable rule from the context:
-
-  \begin{matharray}{rcl}
-    @{command "also"}@{text "\<^sub>0"} & \equiv & @{command "note"}~@{text "calculation = this"} \\
-    @{command "also"}@{text "\<^sub>n\<^sub>+\<^sub>1"} & \equiv & @{command "note"}~@{text "calculation = trans [OF calculation this]"} \\[0.5ex]
-    @{command "finally"} & \equiv & @{command "also"}~@{command "from"}~@{text calculation} \\
-  \end{matharray}
-
-  \noindent The start of a calculation is determined implicitly in the
-  text: here @{command also} sets @{fact calculation} to the current
-  result; any subsequent occurrence will update @{fact calculation} by
-  combination with the next result and a transitivity rule.  The
-  calculational sequence is concluded via @{command finally}, where
-  the final result is exposed for use in a concluding claim.
-
-  Here is a canonical proof pattern, using @{command have} to
-  establish the intermediate results:
-*}
-
-(*<*)
-notepad
-begin
-(*>*)
-  have "a = b" sorry
-  also have "\<dots> = c" sorry
-  also have "\<dots> = d" sorry
-  finally have "a = d" .
-(*<*)
-end
-(*>*)
-
-text {*
-  \noindent The term ``@{text "\<dots>"}'' above is a special abbreviation
-  provided by the Isabelle/Isar syntax layer: it statically refers to
-  the right-hand side argument of the previous statement given in the
-  text.  Thus it happens to coincide with relevant sub-expressions in
-  the calculational chain, but the exact correspondence is dependent
-  on the transitivity rules being involved.
-
-  \medskip Symmetry rules such as @{prop "x = y \<Longrightarrow> y = x"} are like
-  transitivities with only one premise.  Isar maintains a separate
-  rule collection declared via the @{attribute sym} attribute, to be
-  used in fact expressions ``@{text "a [symmetric]"}'', or single-step
-  proofs ``@{command assume}~@{text "x = y"}~@{command then}~@{command
-  have}~@{text "y = x"}~@{command ".."}''.
-*}
-
-end
\ No newline at end of file
--- a/src/Doc/Isar-Ref/Generic.thy	Mon Apr 07 16:37:57 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2016 +0,0 @@
-theory Generic
-imports Base Main
-begin
-
-chapter {* Generic tools and packages \label{ch:gen-tools} *}
-
-section {* Configuration options \label{sec:config} *}
-
-text {* Isabelle/Pure maintains a record of named configuration
-  options within the theory or proof context, with values of type
-  @{ML_type bool}, @{ML_type int}, @{ML_type real}, or @{ML_type
-  string}.  Tools may declare options in ML, and then refer to these
-  values (relative to the context).  Thus global reference variables
-  are easily avoided.  The user may change the value of a
-  configuration option by means of an associated attribute of the same
-  name.  This form of context declaration works particularly well with
-  commands such as @{command "declare"} or @{command "using"} like
-  this:
-*}
-
-declare [[show_main_goal = false]]
-
-notepad
-begin
-  note [[show_main_goal = true]]
-end
-
-text {* For historical reasons, some tools cannot take the full proof
-  context into account and merely refer to the background theory.
-  This is accommodated by configuration options being declared as
-  ``global'', which may not be changed within a local context.
-
-  \begin{matharray}{rcll}
-    @{command_def "print_options"} & : & @{text "context \<rightarrow>"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @{syntax name} ('=' ('true' | 'false' | @{syntax int} | @{syntax float} | @{syntax name}))?
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command "print_options"} prints the available configuration
-  options, with names, types, and current values.
-  
-  \item @{text "name = value"} as an attribute expression modifies the
-  named option, with the syntax of the value depending on the option's
-  type.  For @{ML_type bool} the default value is @{text true}.  Any
-  attempt to change a global option in a local context is ignored.
-
-  \end{description}
-*}
-
-
-section {* Basic proof tools *}
-
-subsection {* Miscellaneous methods and attributes \label{sec:misc-meth-att} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def unfold} & : & @{text method} \\
-    @{method_def fold} & : & @{text method} \\
-    @{method_def insert} & : & @{text method} \\[0.5ex]
-    @{method_def erule}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def drule}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def frule}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def intro} & : & @{text method} \\
-    @{method_def elim} & : & @{text method} \\
-    @{method_def succeed} & : & @{text method} \\
-    @{method_def fail} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{method fold} | @@{method unfold} | @@{method insert}) @{syntax thmrefs}
-    ;
-    (@@{method erule} | @@{method drule} | @@{method frule})
-      ('(' @{syntax nat} ')')? @{syntax thmrefs}
-    ;
-    (@@{method intro} | @@{method elim}) @{syntax thmrefs}?
-  \<close>}
-
-  \begin{description}
-  
-  \item @{method unfold}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{method fold}~@{text
-  "a\<^sub>1 \<dots> a\<^sub>n"} expand (or fold back) the given definitions throughout
-  all goals; any chained facts provided are inserted into the goal and
-  subject to rewriting as well.
-
-  \item @{method insert}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} inserts theorems as facts
-  into all goals of the proof state.  Note that current facts
-  indicated for forward chaining are ignored.
-
-  \item @{method erule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, @{method
-  drule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, and @{method frule}~@{text
-  "a\<^sub>1 \<dots> a\<^sub>n"} are similar to the basic @{method rule}
-  method (see \secref{sec:pure-meth-att}), but apply rules by
-  elim-resolution, destruct-resolution, and forward-resolution,
-  respectively \cite{isabelle-implementation}.  The optional natural
-  number argument (default 0) specifies additional assumption steps to
-  be performed here.
-
-  Note that these methods are improper ones, mainly serving for
-  experimentation and tactic script emulation.  Different modes of
-  basic rule application are usually expressed in Isar at the proof
-  language level, rather than via implicit proof state manipulations.
-  For example, a proper single-step elimination would be done using
-  the plain @{method rule} method, with forward chaining of current
-  facts.
-
-  \item @{method intro} and @{method elim} repeatedly refine some goal
-  by intro- or elim-resolution, after having inserted any chained
-  facts.  Exactly the rules given as arguments are taken into account;
-  this allows fine-tuned decomposition of a proof problem, in contrast
-  to common automated tools.
-
-  \item @{method succeed} yields a single (unchanged) result; it is
-  the identity of the ``@{text ","}'' method combinator (cf.\
-  \secref{sec:proof-meth}).
-
-  \item @{method fail} yields an empty result sequence; it is the
-  identity of the ``@{text "|"}'' method combinator (cf.\
-  \secref{sec:proof-meth}).
-
-  \end{description}
-
-  \begin{matharray}{rcl}
-    @{attribute_def tagged} & : & @{text attribute} \\
-    @{attribute_def untagged} & : & @{text attribute} \\[0.5ex]
-    @{attribute_def THEN} & : & @{text attribute} \\
-    @{attribute_def unfolded} & : & @{text attribute} \\
-    @{attribute_def folded} & : & @{text attribute} \\
-    @{attribute_def abs_def} & : & @{text attribute} \\[0.5ex]
-    @{attribute_def rotated} & : & @{text attribute} \\
-    @{attribute_def (Pure) elim_format} & : & @{text attribute} \\
-    @{attribute_def no_vars}@{text "\<^sup>*"} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{attribute tagged} @{syntax name} @{syntax name}
-    ;
-    @@{attribute untagged} @{syntax name}
-    ;
-    @@{attribute THEN} ('[' @{syntax nat} ']')? @{syntax thmref}
-    ;
-    (@@{attribute unfolded} | @@{attribute folded}) @{syntax thmrefs}
-    ;
-    @@{attribute rotated} @{syntax int}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{attribute tagged}~@{text "name value"} and @{attribute
-  untagged}~@{text name} add and remove \emph{tags} of some theorem.
-  Tags may be any list of string pairs that serve as formal comment.
-  The first string is considered the tag name, the second its value.
-  Note that @{attribute untagged} removes any tags of the same name.
-
-  \item @{attribute THEN}~@{text a} composes rules by resolution; it
-  resolves with the first premise of @{text a} (an alternative
-  position may be also specified).  See also @{ML_op "RS"} in
-  \cite{isabelle-implementation}.
-  
-  \item @{attribute unfolded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{attribute
-  folded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} expand and fold back again the given
-  definitions throughout a rule.
-
-  \item @{attribute abs_def} turns an equation of the form @{prop "f x
-  y \<equiv> t"} into @{prop "f \<equiv> \<lambda>x y. t"}, which ensures that @{method
-  simp} or @{method unfold} steps always expand it.  This also works
-  for object-logic equality.
-
-  \item @{attribute rotated}~@{text n} rotate the premises of a
-  theorem by @{text n} (default 1).
-
-  \item @{attribute (Pure) elim_format} turns a destruction rule into
-  elimination rule format, by resolving with the rule @{prop "PROP A \<Longrightarrow>
-  (PROP A \<Longrightarrow> PROP B) \<Longrightarrow> PROP B"}.
-  
-  Note that the Classical Reasoner (\secref{sec:classical}) provides
-  its own version of this operation.
-
-  \item @{attribute no_vars} replaces schematic variables by free
-  ones; this is mainly for tuning output of pretty printed theorems.
-
-  \end{description}
-*}
-
-
-subsection {* Low-level equational reasoning *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def subst} & : & @{text method} \\
-    @{method_def hypsubst} & : & @{text method} \\
-    @{method_def split} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method subst} ('(' 'asm' ')')? \<newline> ('(' (@{syntax nat}+) ')')? @{syntax thmref}
-    ;
-    @@{method split} @{syntax thmrefs}
-  \<close>}
-
-  These methods provide low-level facilities for equational reasoning
-  that are intended for specialized applications only.  Normally,
-  single step calculations would be performed in a structured text
-  (see also \secref{sec:calculation}), while the Simplifier methods
-  provide the canonical way for automated normalization (see
-  \secref{sec:simplifier}).
-
-  \begin{description}
-
-  \item @{method subst}~@{text eq} performs a single substitution step
-  using rule @{text eq}, which may be either a meta or object
-  equality.
-
-  \item @{method subst}~@{text "(asm) eq"} substitutes in an
-  assumption.
-
-  \item @{method subst}~@{text "(i \<dots> j) eq"} performs several
-  substitutions in the conclusion. The numbers @{text i} to @{text j}
-  indicate the positions to substitute at.  Positions are ordered from
-  the top of the term tree moving down from left to right. For
-  example, in @{text "(a + b) + (c + d)"} there are three positions
-  where commutativity of @{text "+"} is applicable: 1 refers to @{text
-  "a + b"}, 2 to the whole term, and 3 to @{text "c + d"}.
-
-  If the positions in the list @{text "(i \<dots> j)"} are non-overlapping
-  (e.g.\ @{text "(2 3)"} in @{text "(a + b) + (c + d)"}) you may
-  assume all substitutions are performed simultaneously.  Otherwise
-  the behaviour of @{text subst} is not specified.
-
-  \item @{method subst}~@{text "(asm) (i \<dots> j) eq"} performs the
-  substitutions in the assumptions. The positions refer to the
-  assumptions in order from left to right.  For example, given in a
-  goal of the form @{text "P (a + b) \<Longrightarrow> P (c + d) \<Longrightarrow> \<dots>"}, position 1 of
-  commutativity of @{text "+"} is the subterm @{text "a + b"} and
-  position 2 is the subterm @{text "c + d"}.
-
-  \item @{method hypsubst} performs substitution using some
-  assumption; this only works for equations of the form @{text "x =
-  t"} where @{text x} is a free or bound variable.
-
-  \item @{method split}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} performs single-step case
-  splitting using the given rules.  Splitting is performed in the
-  conclusion or some assumption of the subgoal, depending of the
-  structure of the rule.
-  
-  Note that the @{method simp} method already involves repeated
-  application of split rules as declared in the current context, using
-  @{attribute split}, for example.
-
-  \end{description}
-*}
-
-
-subsection {* Further tactic emulations \label{sec:tactics} *}
-
-text {*
-  The following improper proof methods emulate traditional tactics.
-  These admit direct access to the goal state, which is normally
-  considered harmful!  In particular, this may involve both numbered
-  goal addressing (default 1), and dynamic instantiation within the
-  scope of some subgoal.
-
-  \begin{warn}
-    Dynamic instantiations refer to universally quantified parameters
-    of a subgoal (the dynamic context) rather than fixed variables and
-    term abbreviations of a (static) Isar context.
-  \end{warn}
-
-  Tactic emulation methods, unlike their ML counterparts, admit
-  simultaneous instantiation from both dynamic and static contexts.
-  If names occur in both contexts goal parameters hide locally fixed
-  variables.  Likewise, schematic variables refer to term
-  abbreviations, if present in the static context.  Otherwise the
-  schematic variable is interpreted as a schematic variable and left
-  to be solved by unification with certain parts of the subgoal.
-
-  Note that the tactic emulation proof methods in Isabelle/Isar are
-  consistently named @{text foo_tac}.  Note also that variable names
-  occurring on left hand sides of instantiations must be preceded by a
-  question mark if they coincide with a keyword or contain dots.  This
-  is consistent with the attribute @{attribute "where"} (see
-  \secref{sec:pure-meth-att}).
-
-  \begin{matharray}{rcl}
-    @{method_def rule_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def erule_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def drule_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def frule_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def cut_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def thin_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def subgoal_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def rename_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def rotate_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def tactic}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def raw_tactic}@{text "\<^sup>*"} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{method rule_tac} | @@{method erule_tac} | @@{method drule_tac} |
-      @@{method frule_tac} | @@{method cut_tac} | @@{method thin_tac}) @{syntax goal_spec}? \<newline>
-    ( dynamic_insts @'in' @{syntax thmref} | @{syntax thmrefs} )
-    ;
-    @@{method subgoal_tac} @{syntax goal_spec}? (@{syntax prop} +)
-    ;
-    @@{method rename_tac} @{syntax goal_spec}? (@{syntax name} +)
-    ;
-    @@{method rotate_tac} @{syntax goal_spec}? @{syntax int}?
-    ;
-    (@@{method tactic} | @@{method raw_tactic}) @{syntax text}
-    ;
-
-    dynamic_insts: ((@{syntax name} '=' @{syntax term}) + @'and')
-  \<close>}
-
-\begin{description}
-
-  \item @{method rule_tac} etc. do resolution of rules with explicit
-  instantiation.  This works the same way as the ML tactics @{ML
-  res_inst_tac} etc. (see \cite{isabelle-implementation})
-
-  Multiple rules may be only given if there is no instantiation; then
-  @{method rule_tac} is the same as @{ML resolve_tac} in ML (see
-  \cite{isabelle-implementation}).
-
-  \item @{method cut_tac} inserts facts into the proof state as
-  assumption of a subgoal; instantiations may be given as well.  Note
-  that the scope of schematic variables is spread over the main goal
-  statement and rule premises are turned into new subgoals.  This is
-  in contrast to the regular method @{method insert} which inserts
-  closed rule statements.
-
-  \item @{method thin_tac}~@{text \<phi>} deletes the specified premise
-  from a subgoal.  Note that @{text \<phi>} may contain schematic
-  variables, to abbreviate the intended proposition; the first
-  matching subgoal premise will be deleted.  Removing useless premises
-  from a subgoal increases its readability and can make search tactics
-  run faster.
-
-  \item @{method subgoal_tac}~@{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} adds the propositions
-  @{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} as local premises to a subgoal, and poses the same
-  as new subgoals (in the original context).
-
-  \item @{method rename_tac}~@{text "x\<^sub>1 \<dots> x\<^sub>n"} renames parameters of a
-  goal according to the list @{text "x\<^sub>1, \<dots>, x\<^sub>n"}, which refers to the
-  \emph{suffix} of variables.
-
-  \item @{method rotate_tac}~@{text n} rotates the premises of a
-  subgoal by @{text n} positions: from right to left if @{text n} is
-  positive, and from left to right if @{text n} is negative; the
-  default value is 1.
-
-  \item @{method tactic}~@{text "text"} produces a proof method from
-  any ML text of type @{ML_type tactic}.  Apart from the usual ML
-  environment and the current proof context, the ML code may refer to
-  the locally bound values @{ML_text facts}, which indicates any
-  current facts used for forward-chaining.
-
-  \item @{method raw_tactic} is similar to @{method tactic}, but
-  presents the goal state in its raw internal form, where simultaneous
-  subgoals appear as conjunction of the logical framework instead of
-  the usual split into several subgoals.  While feature this is useful
-  for debugging of complex method definitions, it should not never
-  appear in production theories.
-
-  \end{description}
-*}
-
-
-section {* The Simplifier \label{sec:simplifier} *}
-
-text {* The Simplifier performs conditional and unconditional
-  rewriting and uses contextual information: rule declarations in the
-  background theory or local proof context are taken into account, as
-  well as chained facts and subgoal premises (``local assumptions'').
-  There are several general hooks that allow to modify the
-  simplification strategy, or incorporate other proof tools that solve
-  sub-problems, produce rewrite rules on demand etc.
-
-  The rewriting strategy is always strictly bottom up, except for
-  congruence rules, which are applied while descending into a term.
-  Conditions in conditional rewrite rules are solved recursively
-  before the rewrite rule is applied.
-
-  The default Simplifier setup of major object logics (HOL, HOLCF,
-  FOL, ZF) makes the Simplifier ready for immediate use, without
-  engaging into the internal structures.  Thus it serves as
-  general-purpose proof tool with the main focus on equational
-  reasoning, and a bit more than that.
-*}
-
-
-subsection {* Simplification methods \label{sec:simp-meth} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def simp} & : & @{text method} \\
-    @{method_def simp_all} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{method simp} | @@{method simp_all}) opt? (@{syntax simpmod} * )
-    ;
-
-    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use' | 'asm_lr' ) ')'
-    ;
-    @{syntax_def simpmod}: ('add' | 'del' | 'only' | 'split' (() | 'add' | 'del') |
-      'cong' (() | 'add' | 'del')) ':' @{syntax thmrefs}
-  \<close>}
-
-  \begin{description}
-
-  \item @{method simp} invokes the Simplifier on the first subgoal,
-  after inserting chained facts as additional goal premises; further
-  rule declarations may be included via @{text "(simp add: facts)"}.
-  The proof method fails if the subgoal remains unchanged after
-  simplification.
-
-  Note that the original goal premises and chained facts are subject
-  to simplification themselves, while declarations via @{text
-  "add"}/@{text "del"} merely follow the policies of the object-logic
-  to extract rewrite rules from theorems, without further
-  simplification.  This may lead to slightly different behavior in
-  either case, which might be required precisely like that in some
-  boundary situations to perform the intended simplification step!
-
-  \medskip The @{text only} modifier first removes all other rewrite
-  rules, looper tactics (including split rules), congruence rules, and
-  then behaves like @{text add}.  Implicit solvers remain, which means
-  that trivial rules like reflexivity or introduction of @{text
-  "True"} are available to solve the simplified subgoals, but also
-  non-trivial tools like linear arithmetic in HOL.  The latter may
-  lead to some surprise of the meaning of ``only'' in Isabelle/HOL
-  compared to English!
-
-  \medskip The @{text split} modifiers add or delete rules for the
-  Splitter (see also \secref{sec:simp-strategies} on the looper).
-  This works only if the Simplifier method has been properly setup to
-  include the Splitter (all major object logics such HOL, HOLCF, FOL,
-  ZF do this already).
-
-  There is also a separate @{method_ref split} method available for
-  single-step case splitting.  The effect of repeatedly applying
-  @{text "(split thms)"} can be imitated by ``@{text "(simp only:
-  split: thms)"}''.
-
-  \medskip The @{text cong} modifiers add or delete Simplifier
-  congruence rules (see also \secref{sec:simp-rules}); the default is
-  to add.
-
-  \item @{method simp_all} is similar to @{method simp}, but acts on
-  all goals, working backwards from the last to the first one as usual
-  in Isabelle.\footnote{The order is irrelevant for goals without
-  schematic variables, so simplification might actually be performed
-  in parallel here.}
-
-  Chained facts are inserted into all subgoals, before the
-  simplification process starts.  Further rule declarations are the
-  same as for @{method simp}.
-
-  The proof method fails if all subgoals remain unchanged after
-  simplification.
-
-  \end{description}
-
-  By default the Simplifier methods above take local assumptions fully
-  into account, using equational assumptions in the subsequent
-  normalization process, or simplifying assumptions themselves.
-  Further options allow to fine-tune the behavior of the Simplifier
-  in this respect, corresponding to a variety of ML tactics as
-  follows.\footnote{Unlike the corresponding Isar proof methods, the
-  ML tactics do not insist in changing the goal state.}
-
-  \begin{center}
-  \small
-  \begin{supertabular}{|l|l|p{0.3\textwidth}|}
-  \hline
-  Isar method & ML tactic & behavior \\\hline
-
-  @{text "(simp (no_asm))"} & @{ML simp_tac} & assumptions are ignored
-  completely \\\hline
-
-  @{text "(simp (no_asm_simp))"} & @{ML asm_simp_tac} & assumptions
-  are used in the simplification of the conclusion but are not
-  themselves simplified \\\hline
-
-  @{text "(simp (no_asm_use))"} & @{ML full_simp_tac} & assumptions
-  are simplified but are not used in the simplification of each other
-  or the conclusion \\\hline
-
-  @{text "(simp)"} & @{ML asm_full_simp_tac} & assumptions are used in
-  the simplification of the conclusion and to simplify other
-  assumptions \\\hline
-
-  @{text "(simp (asm_lr))"} & @{ML asm_lr_simp_tac} & compatibility
-  mode: an assumption is only used for simplifying assumptions which
-  are to the right of it \\\hline
-
-  \end{supertabular}
-  \end{center}
-*}
-
-
-subsubsection {* Examples *}
-
-text {* We consider basic algebraic simplifications in Isabelle/HOL.
-  The rather trivial goal @{prop "0 + (x + 0) = x + 0 + 0"} looks like
-  a good candidate to be solved by a single call of @{method simp}:
-*}
-
-lemma "0 + (x + 0) = x + 0 + 0" apply simp? oops
-
-text {* The above attempt \emph{fails}, because @{term "0"} and @{term
-  "op +"} in the HOL library are declared as generic type class
-  operations, without stating any algebraic laws yet.  More specific
-  types are required to get access to certain standard simplifications
-  of the theory context, e.g.\ like this: *}
-
-lemma fixes x :: nat shows "0 + (x + 0) = x + 0 + 0" by simp
-lemma fixes x :: int shows "0 + (x + 0) = x + 0 + 0" by simp
-lemma fixes x :: "'a :: monoid_add" shows "0 + (x + 0) = x + 0 + 0" by simp
-
-text {*
-  \medskip In many cases, assumptions of a subgoal are also needed in
-  the simplification process.  For example:
-*}
-
-lemma fixes x :: nat shows "x = 0 \<Longrightarrow> x + x = 0" by simp
-lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" apply simp oops
-lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" using assms by simp
-
-text {* As seen above, local assumptions that shall contribute to
-  simplification need to be part of the subgoal already, or indicated
-  explicitly for use by the subsequent method invocation.  Both too
-  little or too much information can make simplification fail, for
-  different reasons.
-
-  In the next example the malicious assumption @{prop "\<And>x::nat. f x =
-  g (f (g x))"} does not contribute to solve the problem, but makes
-  the default @{method simp} method loop: the rewrite rule @{text "f
-  ?x \<equiv> g (f (g ?x))"} extracted from the assumption does not
-  terminate.  The Simplifier notices certain simple forms of
-  nontermination, but not this one.  The problem can be solved
-  nonetheless, by ignoring assumptions via special options as
-  explained before:
-*}
-
-lemma "(\<And>x::nat. f x = g (f (g x))) \<Longrightarrow> f 0 = f 0 + 0"
-  by (simp (no_asm))
-
-text {* The latter form is typical for long unstructured proof
-  scripts, where the control over the goal content is limited.  In
-  structured proofs it is usually better to avoid pushing too many
-  facts into the goal state in the first place.  Assumptions in the
-  Isar proof context do not intrude the reasoning if not used
-  explicitly.  This is illustrated for a toplevel statement and a
-  local proof body as follows:
-*}
-
-lemma
-  assumes "\<And>x::nat. f x = g (f (g x))"
-  shows "f 0 = f 0 + 0" by simp
-
-notepad
-begin
-  assume "\<And>x::nat. f x = g (f (g x))"
-  have "f 0 = f 0 + 0" by simp
-end
-
-text {* \medskip Because assumptions may simplify each other, there
-  can be very subtle cases of nontermination. For example, the regular
-  @{method simp} method applied to @{prop "P (f x) \<Longrightarrow> y = x \<Longrightarrow> f x = f y
-  \<Longrightarrow> Q"} gives rise to the infinite reduction sequence
-  \[
-  @{text "P (f x)"} \stackrel{@{text "f x \<equiv> f y"}}{\longmapsto}
-  @{text "P (f y)"} \stackrel{@{text "y \<equiv> x"}}{\longmapsto}
-  @{text "P (f x)"} \stackrel{@{text "f x \<equiv> f y"}}{\longmapsto} \cdots
-  \]
-  whereas applying the same to @{prop "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow>
-  Q"} terminates (without solving the goal):
-*}
-
-lemma "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow> Q"
-  apply simp
-  oops
-
-text {* See also \secref{sec:simp-config} for options to enable
-  Simplifier trace mode, which often helps to diagnose problems with
-  rewrite systems.
-*}
-
-
-subsection {* Declaring rules \label{sec:simp-rules} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{attribute_def simp} & : & @{text attribute} \\
-    @{attribute_def split} & : & @{text attribute} \\
-    @{attribute_def cong} & : & @{text attribute} \\
-    @{command_def "print_simpset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{attribute simp} | @@{attribute split} | @@{attribute cong})
-      (() | 'add' | 'del')
-  \<close>}
-
-  \begin{description}
-
-  \item @{attribute simp} declares rewrite rules, by adding or
-  deleting them from the simpset within the theory or proof context.
-  Rewrite rules are theorems expressing some form of equality, for
-  example:
-
-  @{text "Suc ?m + ?n = ?m + Suc ?n"} \\
-  @{text "?P \<and> ?P \<longleftrightarrow> ?P"} \\
-  @{text "?A \<union> ?B \<equiv> {x. x \<in> ?A \<or> x \<in> ?B}"}
-
-  \smallskip
-  Conditional rewrites such as @{text "?m < ?n \<Longrightarrow> ?m div ?n = 0"} are
-  also permitted; the conditions can be arbitrary formulas.
-
-  \medskip Internally, all rewrite rules are translated into Pure
-  equalities, theorems with conclusion @{text "lhs \<equiv> rhs"}. The
-  simpset contains a function for extracting equalities from arbitrary
-  theorems, which is usually installed when the object-logic is
-  configured initially. For example, @{text "\<not> ?x \<in> {}"} could be
-  turned into @{text "?x \<in> {} \<equiv> False"}. Theorems that are declared as
-  @{attribute simp} and local assumptions within a goal are treated
-  uniformly in this respect.
-
-  The Simplifier accepts the following formats for the @{text "lhs"}
-  term:
-
-  \begin{enumerate}
-
-  \item First-order patterns, considering the sublanguage of
-  application of constant operators to variable operands, without
-  @{text "\<lambda>"}-abstractions or functional variables.
-  For example:
-
-  @{text "(?x + ?y) + ?z \<equiv> ?x + (?y + ?z)"} \\
-  @{text "f (f ?x ?y) ?z \<equiv> f ?x (f ?y ?z)"}
-
-  \item Higher-order patterns in the sense of \cite{nipkow-patterns}.
-  These are terms in @{text "\<beta>"}-normal form (this will always be the
-  case unless you have done something strange) where each occurrence
-  of an unknown is of the form @{text "?F x\<^sub>1 \<dots> x\<^sub>n"}, where the
-  @{text "x\<^sub>i"} are distinct bound variables.
-
-  For example, @{text "(\<forall>x. ?P x \<and> ?Q x) \<equiv> (\<forall>x. ?P x) \<and> (\<forall>x. ?Q x)"}
-  or its symmetric form, since the @{text "rhs"} is also a
-  higher-order pattern.
-
-  \item Physical first-order patterns over raw @{text "\<lambda>"}-term
-  structure without @{text "\<alpha>\<beta>\<eta>"}-equality; abstractions and bound
-  variables are treated like quasi-constant term material.
-
-  For example, the rule @{text "?f ?x \<in> range ?f = True"} rewrites the
-  term @{text "g a \<in> range g"} to @{text "True"}, but will fail to
-  match @{text "g (h b) \<in> range (\<lambda>x. g (h x))"}. However, offending
-  subterms (in our case @{text "?f ?x"}, which is not a pattern) can
-  be replaced by adding new variables and conditions like this: @{text
-  "?y = ?f ?x \<Longrightarrow> ?y \<in> range ?f = True"} is acceptable as a conditional
-  rewrite rule of the second category since conditions can be
-  arbitrary terms.
-
-  \end{enumerate}
-
-  \item @{attribute split} declares case split rules.
-
-  \item @{attribute cong} declares congruence rules to the Simplifier
-  context.
-
-  Congruence rules are equalities of the form @{text [display]
-  "\<dots> \<Longrightarrow> f ?x\<^sub>1 \<dots> ?x\<^sub>n = f ?y\<^sub>1 \<dots> ?y\<^sub>n"}
-
-  This controls the simplification of the arguments of @{text f}.  For
-  example, some arguments can be simplified under additional
-  assumptions: @{text [display] "?P\<^sub>1 \<longleftrightarrow> ?Q\<^sub>1 \<Longrightarrow> (?Q\<^sub>1 \<Longrightarrow> ?P\<^sub>2 \<longleftrightarrow> ?Q\<^sub>2) \<Longrightarrow>
-  (?P\<^sub>1 \<longrightarrow> ?P\<^sub>2) \<longleftrightarrow> (?Q\<^sub>1 \<longrightarrow> ?Q\<^sub>2)"}
-
-  Given this rule, the simplifier assumes @{text "?Q\<^sub>1"} and extracts
-  rewrite rules from it when simplifying @{text "?P\<^sub>2"}.  Such local
-  assumptions are effective for rewriting formulae such as @{text "x =
-  0 \<longrightarrow> y + x = y"}.
-
-  %FIXME
-  %The local assumptions are also provided as theorems to the solver;
-  %see \secref{sec:simp-solver} below.
-
-  \medskip The following congruence rule for bounded quantifiers also
-  supplies contextual information --- about the bound variable:
-  @{text [display] "(?A = ?B) \<Longrightarrow> (\<And>x. x \<in> ?B \<Longrightarrow> ?P x \<longleftrightarrow> ?Q x) \<Longrightarrow>
-    (\<forall>x \<in> ?A. ?P x) \<longleftrightarrow> (\<forall>x \<in> ?B. ?Q x)"}
-
-  \medskip This congruence rule for conditional expressions can
-  supply contextual information for simplifying the arms:
-  @{text [display] "?p = ?q \<Longrightarrow> (?q \<Longrightarrow> ?a = ?c) \<Longrightarrow> (\<not> ?q \<Longrightarrow> ?b = ?d) \<Longrightarrow>
-    (if ?p then ?a else ?b) = (if ?q then ?c else ?d)"}
-
-  A congruence rule can also \emph{prevent} simplification of some
-  arguments.  Here is an alternative congruence rule for conditional
-  expressions that conforms to non-strict functional evaluation:
-  @{text [display] "?p = ?q \<Longrightarrow> (if ?p then ?a else ?b) = (if ?q then ?a else ?b)"}
-
-  Only the first argument is simplified; the others remain unchanged.
-  This can make simplification much faster, but may require an extra
-  case split over the condition @{text "?q"} to prove the goal.
-
-  \item @{command "print_simpset"} prints the collection of rules
-  declared to the Simplifier, which is also known as ``simpset''
-  internally.
-
-  For historical reasons, simpsets may occur independently from the
-  current context, but are conceptually dependent on it.  When the
-  Simplifier is invoked via one of its main entry points in the Isar
-  source language (as proof method \secref{sec:simp-meth} or rule
-  attribute \secref{sec:simp-meth}), its simpset is derived from the
-  current proof context, and carries a back-reference to that for
-  other tools that might get invoked internally (e.g.\ simplification
-  procedures \secref{sec:simproc}).  A mismatch of the context of the
-  simpset and the context of the problem being simplified may lead to
-  unexpected results.
-
-  \end{description}
-
-  The implicit simpset of the theory context is propagated
-  monotonically through the theory hierarchy: forming a new theory,
-  the union of the simpsets of its imports are taken as starting
-  point.  Also note that definitional packages like @{command
-  "datatype"}, @{command "primrec"}, @{command "fun"} routinely
-  declare Simplifier rules to the target context, while plain
-  @{command "definition"} is an exception in \emph{not} declaring
-  anything.
-
-  \medskip It is up the user to manipulate the current simpset further
-  by explicitly adding or deleting theorems as simplification rules,
-  or installing other tools via simplification procedures
-  (\secref{sec:simproc}).  Good simpsets are hard to design.  Rules
-  that obviously simplify, like @{text "?n + 0 \<equiv> ?n"} are good
-  candidates for the implicit simpset, unless a special
-  non-normalizing behavior of certain operations is intended.  More
-  specific rules (such as distributive laws, which duplicate subterms)
-  should be added only for specific proof steps.  Conversely,
-  sometimes a rule needs to be deleted just for some part of a proof.
-  The need of frequent additions or deletions may indicate a poorly
-  designed simpset.
-
-  \begin{warn}
-  The union of simpsets from theory imports (as described above) is
-  not always a good starting point for the new theory.  If some
-  ancestors have deleted simplification rules because they are no
-  longer wanted, while others have left those rules in, then the union
-  will contain the unwanted rules, and thus have to be deleted again
-  in the theory body.
-  \end{warn}
-*}
-
-
-subsection {* Ordered rewriting with permutative rules *}
-
-text {* A rewrite rule is \emph{permutative} if the left-hand side and
-  right-hand side are the equal up to renaming of variables.  The most
-  common permutative rule is commutativity: @{text "?x + ?y = ?y +
-  ?x"}.  Other examples include @{text "(?x - ?y) - ?z = (?x - ?z) -
-  ?y"} in arithmetic and @{text "insert ?x (insert ?y ?A) = insert ?y
-  (insert ?x ?A)"} for sets.  Such rules are common enough to merit
-  special attention.
-
-  Because ordinary rewriting loops given such rules, the Simplifier
-  employs a special strategy, called \emph{ordered rewriting}.
-  Permutative rules are detected and only applied if the rewriting
-  step decreases the redex wrt.\ a given term ordering.  For example,
-  commutativity rewrites @{text "b + a"} to @{text "a + b"}, but then
-  stops, because the redex cannot be decreased further in the sense of
-  the term ordering.
-
-  The default is lexicographic ordering of term structure, but this
-  could be also changed locally for special applications via
-  @{index_ML Simplifier.set_termless} in Isabelle/ML.
-
-  \medskip Permutative rewrite rules are declared to the Simplifier
-  just like other rewrite rules.  Their special status is recognized
-  automatically, and their application is guarded by the term ordering
-  accordingly. *}
-
-
-subsubsection {* Rewriting with AC operators *}
-
-text {* Ordered rewriting is particularly effective in the case of
-  associative-commutative operators.  (Associativity by itself is not
-  permutative.)  When dealing with an AC-operator @{text "f"}, keep
-  the following points in mind:
-
-  \begin{itemize}
-
-  \item The associative law must always be oriented from left to
-  right, namely @{text "f (f x y) z = f x (f y z)"}.  The opposite
-  orientation, if used with commutativity, leads to looping in
-  conjunction with the standard term order.
-
-  \item To complete your set of rewrite rules, you must add not just
-  associativity (A) and commutativity (C) but also a derived rule
-  \emph{left-commutativity} (LC): @{text "f x (f y z) = f y (f x z)"}.
-
-  \end{itemize}
-
-  Ordered rewriting with the combination of A, C, and LC sorts a term
-  lexicographically --- the rewriting engine imitates bubble-sort.
-*}
-
-locale AC_example =
-  fixes f :: "'a \<Rightarrow> 'a \<Rightarrow> 'a"  (infix "\<bullet>" 60)
-  assumes assoc: "(x \<bullet> y) \<bullet> z = x \<bullet> (y \<bullet> z)"
-  assumes commute: "x \<bullet> y = y \<bullet> x"
-begin
-
-lemma left_commute: "x \<bullet> (y \<bullet> z) = y \<bullet> (x \<bullet> z)"
-proof -
-  have "(x \<bullet> y) \<bullet> z = (y \<bullet> x) \<bullet> z" by (simp only: commute)
-  then show ?thesis by (simp only: assoc)
-qed
-
-lemmas AC_rules = assoc commute left_commute
-
-text {* Thus the Simplifier is able to establish equalities with
-  arbitrary permutations of subterms, by normalizing to a common
-  standard form.  For example: *}
-
-lemma "(b \<bullet> c) \<bullet> a = xxx"
-  apply (simp only: AC_rules)
-  txt {* @{subgoals} *}
-  oops
-
-lemma "(b \<bullet> c) \<bullet> a = a \<bullet> (b \<bullet> c)" by (simp only: AC_rules)
-lemma "(b \<bullet> c) \<bullet> a = c \<bullet> (b \<bullet> a)" by (simp only: AC_rules)
-lemma "(b \<bullet> c) \<bullet> a = (c \<bullet> b) \<bullet> a" by (simp only: AC_rules)
-
-end
-
-text {* Martin and Nipkow \cite{martin-nipkow} discuss the theory and
-  give many examples; other algebraic structures are amenable to
-  ordered rewriting, such as boolean rings.  The Boyer-Moore theorem
-  prover \cite{bm88book} also employs ordered rewriting.
-*}
-
-
-subsubsection {* Re-orienting equalities *}
-
-text {* Another application of ordered rewriting uses the derived rule
-  @{thm [source] eq_commute}: @{thm [source = false] eq_commute} to
-  reverse equations.
-
-  This is occasionally useful to re-orient local assumptions according
-  to the term ordering, when other built-in mechanisms of
-  reorientation and mutual simplification fail to apply.  *}
-
-
-subsection {* Configuration options \label{sec:simp-config} *}
-
-text {*
-  \begin{tabular}{rcll}
-    @{attribute_def simp_depth_limit} & : & @{text attribute} & default @{text 100} \\
-    @{attribute_def simp_trace} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def simp_trace_depth_limit} & : & @{text attribute} & default @{text 1} \\
-    @{attribute_def simp_debug} & : & @{text attribute} & default @{text false} \\
-  \end{tabular}
-  \medskip
-
-  These configurations options control further aspects of the Simplifier.
-  See also \secref{sec:config}.
-
-  \begin{description}
-
-  \item @{attribute simp_depth_limit} limits the number of recursive
-  invocations of the Simplifier during conditional rewriting.
-
-  \item @{attribute simp_trace} makes the Simplifier output internal
-  operations.  This includes rewrite steps, but also bookkeeping like
-  modifications of the simpset.
-
-  \item @{attribute simp_trace_depth_limit} limits the effect of
-  @{attribute simp_trace} to the given depth of recursive Simplifier
-  invocations (when solving conditions of rewrite rules).
-
-  \item @{attribute simp_debug} makes the Simplifier output some extra
-  information about internal operations.  This includes any attempted
-  invocation of simplification procedures.
-
-  \end{description}
-*}
-
-
-subsection {* Simplification procedures \label{sec:simproc} *}
-
-text {* Simplification procedures are ML functions that produce proven
-  rewrite rules on demand.  They are associated with higher-order
-  patterns that approximate the left-hand sides of equations.  The
-  Simplifier first matches the current redex against one of the LHS
-  patterns; if this succeeds, the corresponding ML function is
-  invoked, passing the Simplifier context and redex term.  Thus rules
-  may be specifically fashioned for particular situations, resulting
-  in a more powerful mechanism than term rewriting by a fixed set of
-  rules.
-
-  Any successful result needs to be a (possibly conditional) rewrite
-  rule @{text "t \<equiv> u"} that is applicable to the current redex.  The
-  rule will be applied just as any ordinary rewrite rule.  It is
-  expected to be already in \emph{internal form}, bypassing the
-  automatic preprocessing of object-level equivalences.
-
-  \begin{matharray}{rcl}
-    @{command_def "simproc_setup"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    simproc & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command simproc_setup} @{syntax name} '(' (@{syntax term} + '|') ')' '='
-      @{syntax text} \<newline> (@'identifier' (@{syntax nameref}+))?
-    ;
-
-    @@{attribute simproc} (('add' ':')? | 'del' ':') (@{syntax name}+)
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "simproc_setup"} defines a named simplification
-  procedure that is invoked by the Simplifier whenever any of the
-  given term patterns match the current redex.  The implementation,
-  which is provided as ML source text, needs to be of type @{ML_type
-  "morphism -> simpset -> cterm -> thm option"}, where the @{ML_type
-  cterm} represents the current redex @{text r} and the result is
-  supposed to be some proven rewrite rule @{text "r \<equiv> r'"} (or a
-  generalized version), or @{ML NONE} to indicate failure.  The
-  @{ML_type simpset} argument holds the full context of the current
-  Simplifier invocation, including the actual Isar proof context.  The
-  @{ML_type morphism} informs about the difference of the original
-  compilation context wrt.\ the one of the actual application later
-  on.  The optional @{keyword "identifier"} specifies theorems that
-  represent the logical content of the abstract theory of this
-  simproc.
-
-  Morphisms and identifiers are only relevant for simprocs that are
-  defined within a local target context, e.g.\ in a locale.
-
-  \item @{text "simproc add: name"} and @{text "simproc del: name"}
-  add or delete named simprocs to the current Simplifier context.  The
-  default is to add a simproc.  Note that @{command "simproc_setup"}
-  already adds the new simproc to the subsequent context.
-
-  \end{description}
-*}
-
-
-subsubsection {* Example *}
-
-text {* The following simplification procedure for @{thm
-  [source=false, show_types] unit_eq} in HOL performs fine-grained
-  control over rule application, beyond higher-order pattern matching.
-  Declaring @{thm unit_eq} as @{attribute simp} directly would make
-  the simplifier loop!  Note that a version of this simplification
-  procedure is already active in Isabelle/HOL.  *}
-
-simproc_setup unit ("x::unit") = {*
-  fn _ => fn _ => fn ct =>
-    if HOLogic.is_unit (term_of ct) then NONE
-    else SOME (mk_meta_eq @{thm unit_eq})
-*}
-
-text {* Since the Simplifier applies simplification procedures
-  frequently, it is important to make the failure check in ML
-  reasonably fast. *}
-
-
-subsection {* Configurable Simplifier strategies \label{sec:simp-strategies} *}
-
-text {* The core term-rewriting engine of the Simplifier is normally
-  used in combination with some add-on components that modify the
-  strategy and allow to integrate other non-Simplifier proof tools.
-  These may be reconfigured in ML as explained below.  Even if the
-  default strategies of object-logics like Isabelle/HOL are used
-  unchanged, it helps to understand how the standard Simplifier
-  strategies work. *}
-
-
-subsubsection {* The subgoaler *}
-
-text {*
-  \begin{mldecls}
-  @{index_ML Simplifier.set_subgoaler: "(Proof.context -> int -> tactic) ->
-  Proof.context -> Proof.context"} \\
-  @{index_ML Simplifier.prems_of: "Proof.context -> thm list"} \\
-  \end{mldecls}
-
-  The subgoaler is the tactic used to solve subgoals arising out of
-  conditional rewrite rules or congruence rules.  The default should
-  be simplification itself.  In rare situations, this strategy may
-  need to be changed.  For example, if the premise of a conditional
-  rule is an instance of its conclusion, as in @{text "Suc ?m < ?n \<Longrightarrow>
-  ?m < ?n"}, the default strategy could loop.  % FIXME !??
-
-  \begin{description}
-
-  \item @{ML Simplifier.set_subgoaler}~@{text "tac ctxt"} sets the
-  subgoaler of the context to @{text "tac"}.  The tactic will
-  be applied to the context of the running Simplifier instance.
-
-  \item @{ML Simplifier.prems_of}~@{text "ctxt"} retrieves the current
-  set of premises from the context.  This may be non-empty only if
-  the Simplifier has been told to utilize local assumptions in the
-  first place (cf.\ the options in \secref{sec:simp-meth}).
-
-  \end{description}
-
-  As an example, consider the following alternative subgoaler:
-*}
-
-ML {*
-  fun subgoaler_tac ctxt =
-    assume_tac ORELSE'
-    resolve_tac (Simplifier.prems_of ctxt) ORELSE'
-    asm_simp_tac ctxt
-*}
-
-text {* This tactic first tries to solve the subgoal by assumption or
-  by resolving with with one of the premises, calling simplification
-  only if that fails. *}
-
-
-subsubsection {* The solver *}
-
-text {*
-  \begin{mldecls}
-  @{index_ML_type solver} \\
-  @{index_ML Simplifier.mk_solver: "string ->
-  (Proof.context -> int -> tactic) -> solver"} \\
-  @{index_ML_op setSolver: "Proof.context * solver -> Proof.context"} \\
-  @{index_ML_op addSolver: "Proof.context * solver -> Proof.context"} \\
-  @{index_ML_op setSSolver: "Proof.context * solver -> Proof.context"} \\
-  @{index_ML_op addSSolver: "Proof.context * solver -> Proof.context"} \\
-  \end{mldecls}
-
-  A solver is a tactic that attempts to solve a subgoal after
-  simplification.  Its core functionality is to prove trivial subgoals
-  such as @{prop "True"} and @{text "t = t"}, but object-logics might
-  be more ambitious.  For example, Isabelle/HOL performs a restricted
-  version of linear arithmetic here.
-
-  Solvers are packaged up in abstract type @{ML_type solver}, with
-  @{ML Simplifier.mk_solver} as the only operation to create a solver.
-
-  \medskip Rewriting does not instantiate unknowns.  For example,
-  rewriting alone cannot prove @{text "a \<in> ?A"} since this requires
-  instantiating @{text "?A"}.  The solver, however, is an arbitrary
-  tactic and may instantiate unknowns as it pleases.  This is the only
-  way the Simplifier can handle a conditional rewrite rule whose
-  condition contains extra variables.  When a simplification tactic is
-  to be combined with other provers, especially with the Classical
-  Reasoner, it is important whether it can be considered safe or not.
-  For this reason a simpset contains two solvers: safe and unsafe.
-
-  The standard simplification strategy solely uses the unsafe solver,
-  which is appropriate in most cases.  For special applications where
-  the simplification process is not allowed to instantiate unknowns
-  within the goal, simplification starts with the safe solver, but may
-  still apply the ordinary unsafe one in nested simplifications for
-  conditional rules or congruences. Note that in this way the overall
-  tactic is not totally safe: it may instantiate unknowns that appear
-  also in other subgoals.
-
-  \begin{description}
-
-  \item @{ML Simplifier.mk_solver}~@{text "name tac"} turns @{text
-  "tac"} into a solver; the @{text "name"} is only attached as a
-  comment and has no further significance.
-
-  \item @{text "ctxt setSSolver solver"} installs @{text "solver"} as
-  the safe solver of @{text "ctxt"}.
-
-  \item @{text "ctxt addSSolver solver"} adds @{text "solver"} as an
-  additional safe solver; it will be tried after the solvers which had
-  already been present in @{text "ctxt"}.
-
-  \item @{text "ctxt setSolver solver"} installs @{text "solver"} as the
-  unsafe solver of @{text "ctxt"}.
-
-  \item @{text "ctxt addSolver solver"} adds @{text "solver"} as an
-  additional unsafe solver; it will be tried after the solvers which
-  had already been present in @{text "ctxt"}.
-
-  \end{description}
-
-  \medskip The solver tactic is invoked with the context of the
-  running Simplifier.  Further operations
-  may be used to retrieve relevant information, such as the list of
-  local Simplifier premises via @{ML Simplifier.prems_of} --- this
-  list may be non-empty only if the Simplifier runs in a mode that
-  utilizes local assumptions (see also \secref{sec:simp-meth}).  The
-  solver is also presented the full goal including its assumptions in
-  any case.  Thus it can use these (e.g.\ by calling @{ML
-  assume_tac}), even if the Simplifier proper happens to ignore local
-  premises at the moment.
-
-  \medskip As explained before, the subgoaler is also used to solve
-  the premises of congruence rules.  These are usually of the form
-  @{text "s = ?x"}, where @{text "s"} needs to be simplified and
-  @{text "?x"} needs to be instantiated with the result.  Typically,
-  the subgoaler will invoke the Simplifier at some point, which will
-  eventually call the solver.  For this reason, solver tactics must be
-  prepared to solve goals of the form @{text "t = ?x"}, usually by
-  reflexivity.  In particular, reflexivity should be tried before any
-  of the fancy automated proof tools.
-
-  It may even happen that due to simplification the subgoal is no
-  longer an equality.  For example, @{text "False \<longleftrightarrow> ?Q"} could be
-  rewritten to @{text "\<not> ?Q"}.  To cover this case, the solver could
-  try resolving with the theorem @{text "\<not> False"} of the
-  object-logic.
-
-  \medskip
-
-  \begin{warn}
-  If a premise of a congruence rule cannot be proved, then the
-  congruence is ignored.  This should only happen if the rule is
-  \emph{conditional} --- that is, contains premises not of the form
-  @{text "t = ?x"}.  Otherwise it indicates that some congruence rule,
-  or possibly the subgoaler or solver, is faulty.
-  \end{warn}
-*}
-
-
-subsubsection {* The looper *}
-
-text {*
-  \begin{mldecls}
-  @{index_ML_op setloop: "Proof.context *
-  (Proof.context -> int -> tactic) -> Proof.context"} \\
-  @{index_ML_op addloop: "Proof.context *
-  (string * (Proof.context -> int -> tactic))
-  -> Proof.context"} \\
-  @{index_ML_op delloop: "Proof.context * string -> Proof.context"} \\
-  @{index_ML Splitter.add_split: "thm -> Proof.context -> Proof.context"} \\
-  @{index_ML Splitter.del_split: "thm -> Proof.context -> Proof.context"} \\
-  \end{mldecls}
-
-  The looper is a list of tactics that are applied after
-  simplification, in case the solver failed to solve the simplified
-  goal.  If the looper succeeds, the simplification process is started
-  all over again.  Each of the subgoals generated by the looper is
-  attacked in turn, in reverse order.
-
-  A typical looper is \emph{case splitting}: the expansion of a
-  conditional.  Another possibility is to apply an elimination rule on
-  the assumptions.  More adventurous loopers could start an induction.
-
-  \begin{description}
-
-  \item @{text "ctxt setloop tac"} installs @{text "tac"} as the only
-  looper tactic of @{text "ctxt"}.
-
-  \item @{text "ctxt addloop (name, tac)"} adds @{text "tac"} as an
-  additional looper tactic with name @{text "name"}, which is
-  significant for managing the collection of loopers.  The tactic will
-  be tried after the looper tactics that had already been present in
-  @{text "ctxt"}.
-
-  \item @{text "ctxt delloop name"} deletes the looper tactic that was
-  associated with @{text "name"} from @{text "ctxt"}.
-
-  \item @{ML Splitter.add_split}~@{text "thm ctxt"} adds split tactics
-  for @{text "thm"} as additional looper tactics of @{text "ctxt"}.
-
-  \item @{ML Splitter.del_split}~@{text "thm ctxt"} deletes the split
-  tactic corresponding to @{text thm} from the looper tactics of
-  @{text "ctxt"}.
-
-  \end{description}
-
-  The splitter replaces applications of a given function; the
-  right-hand side of the replacement can be anything.  For example,
-  here is a splitting rule for conditional expressions:
-
-  @{text [display] "?P (if ?Q ?x ?y) \<longleftrightarrow> (?Q \<longrightarrow> ?P ?x) \<and> (\<not> ?Q \<longrightarrow> ?P ?y)"}
-
-  Another example is the elimination operator for Cartesian products
-  (which happens to be called @{text split} in Isabelle/HOL:
-
-  @{text [display] "?P (split ?f ?p) \<longleftrightarrow> (\<forall>a b. ?p = (a, b) \<longrightarrow> ?P (f a b))"}
-
-  For technical reasons, there is a distinction between case splitting
-  in the conclusion and in the premises of a subgoal.  The former is
-  done by @{ML Splitter.split_tac} with rules like @{thm [source]
-  split_if} or @{thm [source] option.split}, which do not split the
-  subgoal, while the latter is done by @{ML Splitter.split_asm_tac}
-  with rules like @{thm [source] split_if_asm} or @{thm [source]
-  option.split_asm}, which split the subgoal.  The function @{ML
-  Splitter.add_split} automatically takes care of which tactic to
-  call, analyzing the form of the rules given as argument; it is the
-  same operation behind @{text "split"} attribute or method modifier
-  syntax in the Isar source language.
-
-  Case splits should be allowed only when necessary; they are
-  expensive and hard to control.  Case-splitting on if-expressions in
-  the conclusion is usually beneficial, so it is enabled by default in
-  Isabelle/HOL and Isabelle/FOL/ZF.
-
-  \begin{warn}
-  With @{ML Splitter.split_asm_tac} as looper component, the
-  Simplifier may split subgoals!  This might cause unexpected problems
-  in tactic expressions that silently assume 0 or 1 subgoals after
-  simplification.
-  \end{warn}
-*}
-
-
-subsection {* Forward simplification \label{sec:simp-forward} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{attribute_def simplified} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{attribute simplified} opt? @{syntax thmrefs}?
-    ;
-
-    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use') ')'
-  \<close>}
-
-  \begin{description}
-  
-  \item @{attribute simplified}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} causes a theorem to
-  be simplified, either by exactly the specified rules @{text "a\<^sub>1, \<dots>,
-  a\<^sub>n"}, or the implicit Simplifier context if no arguments are given.
-  The result is fully simplified by default, including assumptions and
-  conclusion; the options @{text no_asm} etc.\ tune the Simplifier in
-  the same way as the for the @{text simp} method.
-
-  Note that forward simplification restricts the simplifier to its
-  most basic operation of term rewriting; solver and looper tactics
-  (\secref{sec:simp-strategies}) are \emph{not} involved here.  The
-  @{attribute simplified} attribute should be only rarely required
-  under normal circumstances.
-
-  \end{description}
-*}
-
-
-section {* The Classical Reasoner \label{sec:classical} *}
-
-subsection {* Basic concepts *}
-
-text {* Although Isabelle is generic, many users will be working in
-  some extension of classical first-order logic.  Isabelle/ZF is built
-  upon theory FOL, while Isabelle/HOL conceptually contains
-  first-order logic as a fragment.  Theorem-proving in predicate logic
-  is undecidable, but many automated strategies have been developed to
-  assist in this task.
-
-  Isabelle's classical reasoner is a generic package that accepts
-  certain information about a logic and delivers a suite of automatic
-  proof tools, based on rules that are classified and declared in the
-  context.  These proof procedures are slow and simplistic compared
-  with high-end automated theorem provers, but they can save
-  considerable time and effort in practice.  They can prove theorems
-  such as Pelletier's \cite{pelletier86} problems 40 and 41 in a few
-  milliseconds (including full proof reconstruction): *}
-
-lemma "(\<exists>y. \<forall>x. F x y \<longleftrightarrow> F x x) \<longrightarrow> \<not> (\<forall>x. \<exists>y. \<forall>z. F z y \<longleftrightarrow> \<not> F z x)"
-  by blast
-
-lemma "(\<forall>z. \<exists>y. \<forall>x. f x y \<longleftrightarrow> f x z \<and> \<not> f x x) \<longrightarrow> \<not> (\<exists>z. \<forall>x. f x z)"
-  by blast
-
-text {* The proof tools are generic.  They are not restricted to
-  first-order logic, and have been heavily used in the development of
-  the Isabelle/HOL library and applications.  The tactics can be
-  traced, and their components can be called directly; in this manner,
-  any proof can be viewed interactively.  *}
-
-
-subsubsection {* The sequent calculus *}
-
-text {* Isabelle supports natural deduction, which is easy to use for
-  interactive proof.  But natural deduction does not easily lend
-  itself to automation, and has a bias towards intuitionism.  For
-  certain proofs in classical logic, it can not be called natural.
-  The \emph{sequent calculus}, a generalization of natural deduction,
-  is easier to automate.
-
-  A \textbf{sequent} has the form @{text "\<Gamma> \<turnstile> \<Delta>"}, where @{text "\<Gamma>"}
-  and @{text "\<Delta>"} are sets of formulae.\footnote{For first-order
-  logic, sequents can equivalently be made from lists or multisets of
-  formulae.} The sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} is
-  \textbf{valid} if @{text "P\<^sub>1 \<and> \<dots> \<and> P\<^sub>m"} implies @{text "Q\<^sub>1 \<or> \<dots> \<or>
-  Q\<^sub>n"}.  Thus @{text "P\<^sub>1, \<dots>, P\<^sub>m"} represent assumptions, each of which
-  is true, while @{text "Q\<^sub>1, \<dots>, Q\<^sub>n"} represent alternative goals.  A
-  sequent is \textbf{basic} if its left and right sides have a common
-  formula, as in @{text "P, Q \<turnstile> Q, R"}; basic sequents are trivially
-  valid.
-
-  Sequent rules are classified as \textbf{right} or \textbf{left},
-  indicating which side of the @{text "\<turnstile>"} symbol they operate on.
-  Rules that operate on the right side are analogous to natural
-  deduction's introduction rules, and left rules are analogous to
-  elimination rules.  The sequent calculus analogue of @{text "(\<longrightarrow>I)"}
-  is the rule
-  \[
-  \infer[@{text "(\<longrightarrow>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<longrightarrow> Q"}}{@{text "P, \<Gamma> \<turnstile> \<Delta>, Q"}}
-  \]
-  Applying the rule backwards, this breaks down some implication on
-  the right side of a sequent; @{text "\<Gamma>"} and @{text "\<Delta>"} stand for
-  the sets of formulae that are unaffected by the inference.  The
-  analogue of the pair @{text "(\<or>I1)"} and @{text "(\<or>I2)"} is the
-  single rule
-  \[
-  \infer[@{text "(\<or>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<or> Q"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P, Q"}}
-  \]
-  This breaks down some disjunction on the right side, replacing it by
-  both disjuncts.  Thus, the sequent calculus is a kind of
-  multiple-conclusion logic.
-
-  To illustrate the use of multiple formulae on the right, let us
-  prove the classical theorem @{text "(P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}.  Working
-  backwards, we reduce this formula to a basic sequent:
-  \[
-  \infer[@{text "(\<or>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}}
-    {\infer[@{text "(\<longrightarrow>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q), (Q \<longrightarrow> P)"}}
-      {\infer[@{text "(\<longrightarrow>R)"}]{@{text "P \<turnstile> Q, (Q \<longrightarrow> P)"}}
-        {@{text "P, Q \<turnstile> Q, P"}}}}
-  \]
-
-  This example is typical of the sequent calculus: start with the
-  desired theorem and apply rules backwards in a fairly arbitrary
-  manner.  This yields a surprisingly effective proof procedure.
-  Quantifiers add only few complications, since Isabelle handles
-  parameters and schematic variables.  See \cite[Chapter
-  10]{paulson-ml2} for further discussion.  *}
-
-
-subsubsection {* Simulating sequents by natural deduction *}
-
-text {* Isabelle can represent sequents directly, as in the
-  object-logic LK.  But natural deduction is easier to work with, and
-  most object-logics employ it.  Fortunately, we can simulate the
-  sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} by the Isabelle formula
-  @{text "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>2 \<Longrightarrow> ... \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> Q\<^sub>1"} where the order of
-  the assumptions and the choice of @{text "Q\<^sub>1"} are arbitrary.
-  Elim-resolution plays a key role in simulating sequent proofs.
-
-  We can easily handle reasoning on the left.  Elim-resolution with
-  the rules @{text "(\<or>E)"}, @{text "(\<bottom>E)"} and @{text "(\<exists>E)"} achieves
-  a similar effect as the corresponding sequent rules.  For the other
-  connectives, we use sequent-style elimination rules instead of
-  destruction rules such as @{text "(\<and>E1, 2)"} and @{text "(\<forall>E)"}.
-  But note that the rule @{text "(\<not>L)"} has no effect under our
-  representation of sequents!
-  \[
-  \infer[@{text "(\<not>L)"}]{@{text "\<not> P, \<Gamma> \<turnstile> \<Delta>"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P"}}
-  \]
-
-  What about reasoning on the right?  Introduction rules can only
-  affect the formula in the conclusion, namely @{text "Q\<^sub>1"}.  The
-  other right-side formulae are represented as negated assumptions,
-  @{text "\<not> Q\<^sub>2, \<dots>, \<not> Q\<^sub>n"}.  In order to operate on one of these, it
-  must first be exchanged with @{text "Q\<^sub>1"}.  Elim-resolution with the
-  @{text swap} rule has this effect: @{text "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"}
-
-  To ensure that swaps occur only when necessary, each introduction
-  rule is converted into a swapped form: it is resolved with the
-  second premise of @{text "(swap)"}.  The swapped form of @{text
-  "(\<and>I)"}, which might be called @{text "(\<not>\<and>E)"}, is
-  @{text [display] "\<not> (P \<and> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> (\<not> R \<Longrightarrow> Q) \<Longrightarrow> R"}
-
-  Similarly, the swapped form of @{text "(\<longrightarrow>I)"} is
-  @{text [display] "\<not> (P \<longrightarrow> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P \<Longrightarrow> Q) \<Longrightarrow> R"}
-
-  Swapped introduction rules are applied using elim-resolution, which
-  deletes the negated formula.  Our representation of sequents also
-  requires the use of ordinary introduction rules.  If we had no
-  regard for readability of intermediate goal states, we could treat
-  the right side more uniformly by representing sequents as @{text
-  [display] "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> \<bottom>"}
-*}
-
-
-subsubsection {* Extra rules for the sequent calculus *}
-
-text {* As mentioned, destruction rules such as @{text "(\<and>E1, 2)"} and
-  @{text "(\<forall>E)"} must be replaced by sequent-style elimination rules.
-  In addition, we need rules to embody the classical equivalence
-  between @{text "P \<longrightarrow> Q"} and @{text "\<not> P \<or> Q"}.  The introduction
-  rules @{text "(\<or>I1, 2)"} are replaced by a rule that simulates
-  @{text "(\<or>R)"}: @{text [display] "(\<not> Q \<Longrightarrow> P) \<Longrightarrow> P \<or> Q"}
-
-  The destruction rule @{text "(\<longrightarrow>E)"} is replaced by @{text [display]
-  "(P \<longrightarrow> Q) \<Longrightarrow> (\<not> P \<Longrightarrow> R) \<Longrightarrow> (Q \<Longrightarrow> R) \<Longrightarrow> R"}
-
-  Quantifier replication also requires special rules.  In classical
-  logic, @{text "\<exists>x. P x"} is equivalent to @{text "\<not> (\<forall>x. \<not> P x)"};
-  the rules @{text "(\<exists>R)"} and @{text "(\<forall>L)"} are dual:
-  \[
-  \infer[@{text "(\<exists>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x"}}{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x, P t"}}
-  \qquad
-  \infer[@{text "(\<forall>L)"}]{@{text "\<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}{@{text "P t, \<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}
-  \]
-  Thus both kinds of quantifier may be replicated.  Theorems requiring
-  multiple uses of a universal formula are easy to invent; consider
-  @{text [display] "(\<forall>x. P x \<longrightarrow> P (f x)) \<and> P a \<longrightarrow> P (f\<^sup>n a)"} for any
-  @{text "n > 1"}.  Natural examples of the multiple use of an
-  existential formula are rare; a standard one is @{text "\<exists>x. \<forall>y. P x
-  \<longrightarrow> P y"}.
-
-  Forgoing quantifier replication loses completeness, but gains
-  decidability, since the search space becomes finite.  Many useful
-  theorems can be proved without replication, and the search generally
-  delivers its verdict in a reasonable time.  To adopt this approach,
-  represent the sequent rules @{text "(\<exists>R)"}, @{text "(\<exists>L)"} and
-  @{text "(\<forall>R)"} by @{text "(\<exists>I)"}, @{text "(\<exists>E)"} and @{text "(\<forall>I)"},
-  respectively, and put @{text "(\<forall>E)"} into elimination form: @{text
-  [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"}
-
-  Elim-resolution with this rule will delete the universal formula
-  after a single use.  To replicate universal quantifiers, replace the
-  rule by @{text [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"}
-
-  To replicate existential quantifiers, replace @{text "(\<exists>I)"} by
-  @{text [display] "(\<not> (\<exists>x. P x) \<Longrightarrow> P t) \<Longrightarrow> \<exists>x. P x"}
-
-  All introduction rules mentioned above are also useful in swapped
-  form.
-
-  Replication makes the search space infinite; we must apply the rules
-  with care.  The classical reasoner distinguishes between safe and
-  unsafe rules, applying the latter only when there is no alternative.
-  Depth-first search may well go down a blind alley; best-first search
-  is better behaved in an infinite search space.  However, quantifier
-  replication is too expensive to prove any but the simplest theorems.
-*}
-
-
-subsection {* Rule declarations *}
-
-text {* The proof tools of the Classical Reasoner depend on
-  collections of rules declared in the context, which are classified
-  as introduction, elimination or destruction and as \emph{safe} or
-  \emph{unsafe}.  In general, safe rules can be attempted blindly,
-  while unsafe rules must be used with care.  A safe rule must never
-  reduce a provable goal to an unprovable set of subgoals.
-
-  The rule @{text "P \<Longrightarrow> P \<or> Q"} is unsafe because it reduces @{text "P
-  \<or> Q"} to @{text "P"}, which might turn out as premature choice of an
-  unprovable subgoal.  Any rule is unsafe whose premises contain new
-  unknowns.  The elimination rule @{text "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"} is
-  unsafe, since it is applied via elim-resolution, which discards the
-  assumption @{text "\<forall>x. P x"} and replaces it by the weaker
-  assumption @{text "P t"}.  The rule @{text "P t \<Longrightarrow> \<exists>x. P x"} is
-  unsafe for similar reasons.  The quantifier duplication rule @{text
-  "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"} is unsafe in a different sense:
-  since it keeps the assumption @{text "\<forall>x. P x"}, it is prone to
-  looping.  In classical first-order logic, all rules are safe except
-  those mentioned above.
-
-  The safe~/ unsafe distinction is vague, and may be regarded merely
-  as a way of giving some rules priority over others.  One could argue
-  that @{text "(\<or>E)"} is unsafe, because repeated application of it
-  could generate exponentially many subgoals.  Induction rules are
-  unsafe because inductive proofs are difficult to set up
-  automatically.  Any inference is unsafe that instantiates an unknown
-  in the proof state --- thus matching must be used, rather than
-  unification.  Even proof by assumption is unsafe if it instantiates
-  unknowns shared with other subgoals.
-
-  \begin{matharray}{rcl}
-    @{command_def "print_claset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{attribute_def intro} & : & @{text attribute} \\
-    @{attribute_def elim} & : & @{text attribute} \\
-    @{attribute_def dest} & : & @{text attribute} \\
-    @{attribute_def rule} & : & @{text attribute} \\
-    @{attribute_def iff} & : & @{text attribute} \\
-    @{attribute_def swapped} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{attribute intro} | @@{attribute elim} | @@{attribute dest}) ('!' | () | '?') @{syntax nat}?
-    ;
-    @@{attribute rule} 'del'
-    ;
-    @@{attribute iff} (((() | 'add') '?'?) | 'del')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "print_claset"} prints the collection of rules
-  declared to the Classical Reasoner, i.e.\ the @{ML_type claset}
-  within the context.
-
-  \item @{attribute intro}, @{attribute elim}, and @{attribute dest}
-  declare introduction, elimination, and destruction rules,
-  respectively.  By default, rules are considered as \emph{unsafe}
-  (i.e.\ not applied blindly without backtracking), while ``@{text
-  "!"}'' classifies as \emph{safe}.  Rule declarations marked by
-  ``@{text "?"}'' coincide with those of Isabelle/Pure, cf.\
-  \secref{sec:pure-meth-att} (i.e.\ are only applied in single steps
-  of the @{method rule} method).  The optional natural number
-  specifies an explicit weight argument, which is ignored by the
-  automated reasoning tools, but determines the search order of single
-  rule steps.
-
-  Introduction rules are those that can be applied using ordinary
-  resolution.  Their swapped forms are generated internally, which
-  will be applied using elim-resolution.  Elimination rules are
-  applied using elim-resolution.  Rules are sorted by the number of
-  new subgoals they will yield; rules that generate the fewest
-  subgoals will be tried first.  Otherwise, later declarations take
-  precedence over earlier ones.
-
-  Rules already present in the context with the same classification
-  are ignored.  A warning is printed if the rule has already been
-  added with some other classification, but the rule is added anyway
-  as requested.
-
-  \item @{attribute rule}~@{text del} deletes all occurrences of a
-  rule from the classical context, regardless of its classification as
-  introduction~/ elimination~/ destruction and safe~/ unsafe.
-
-  \item @{attribute iff} declares logical equivalences to the
-  Simplifier and the Classical reasoner at the same time.
-  Non-conditional rules result in a safe introduction and elimination
-  pair; conditional ones are considered unsafe.  Rules with negative
-  conclusion are automatically inverted (using @{text "\<not>"}-elimination
-  internally).
-
-  The ``@{text "?"}'' version of @{attribute iff} declares rules to
-  the Isabelle/Pure context only, and omits the Simplifier
-  declaration.
-
-  \item @{attribute swapped} turns an introduction rule into an
-  elimination, by resolving with the classical swap principle @{text
-  "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"} in the second position.  This is mainly for
-  illustrative purposes: the Classical Reasoner already swaps rules
-  internally as explained above.
-
-  \end{description}
-*}
-
-
-subsection {* Structured methods *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def rule} & : & @{text method} \\
-    @{method_def contradiction} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method rule} @{syntax thmrefs}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{method rule} as offered by the Classical Reasoner is a
-  refinement over the Pure one (see \secref{sec:pure-meth-att}).  Both
-  versions work the same, but the classical version observes the
-  classical rule context in addition to that of Isabelle/Pure.
-
-  Common object logics (HOL, ZF, etc.) declare a rich collection of
-  classical rules (even if these would qualify as intuitionistic
-  ones), but only few declarations to the rule context of
-  Isabelle/Pure (\secref{sec:pure-meth-att}).
-
-  \item @{method contradiction} solves some goal by contradiction,
-  deriving any result from both @{text "\<not> A"} and @{text A}.  Chained
-  facts, which are guaranteed to participate, may appear in either
-  order.
-
-  \end{description}
-*}
-
-
-subsection {* Fully automated methods *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def blast} & : & @{text method} \\
-    @{method_def auto} & : & @{text method} \\
-    @{method_def force} & : & @{text method} \\
-    @{method_def fast} & : & @{text method} \\
-    @{method_def slow} & : & @{text method} \\
-    @{method_def best} & : & @{text method} \\
-    @{method_def fastforce} & : & @{text method} \\
-    @{method_def slowsimp} & : & @{text method} \\
-    @{method_def bestsimp} & : & @{text method} \\
-    @{method_def deepen} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method blast} @{syntax nat}? (@{syntax clamod} * )
-    ;
-    @@{method auto} (@{syntax nat} @{syntax nat})? (@{syntax clasimpmod} * )
-    ;
-    @@{method force} (@{syntax clasimpmod} * )
-    ;
-    (@@{method fast} | @@{method slow} | @@{method best}) (@{syntax clamod} * )
-    ;
-    (@@{method fastforce} | @@{method slowsimp} | @@{method bestsimp})
-      (@{syntax clasimpmod} * )
-    ;
-    @@{method deepen} (@{syntax nat} ?) (@{syntax clamod} * )
-    ;
-    @{syntax_def clamod}:
-      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del') ':' @{syntax thmrefs}
-    ;
-    @{syntax_def clasimpmod}: ('simp' (() | 'add' | 'del' | 'only') |
-      ('cong' | 'split') (() | 'add' | 'del') |
-      'iff' (((() | 'add') '?'?) | 'del') |
-      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del')) ':' @{syntax thmrefs}
-  \<close>}
-
-  \begin{description}
-
-  \item @{method blast} is a separate classical tableau prover that
-  uses the same classical rule declarations as explained before.
-
-  Proof search is coded directly in ML using special data structures.
-  A successful proof is then reconstructed using regular Isabelle
-  inferences.  It is faster and more powerful than the other classical
-  reasoning tools, but has major limitations too.
-
-  \begin{itemize}
-
-  \item It does not use the classical wrapper tacticals, such as the
-  integration with the Simplifier of @{method fastforce}.
-
-  \item It does not perform higher-order unification, as needed by the
-  rule @{thm [source=false] rangeI} in HOL.  There are often
-  alternatives to such rules, for example @{thm [source=false]
-  range_eqI}.
-
-  \item Function variables may only be applied to parameters of the
-  subgoal.  (This restriction arises because the prover does not use
-  higher-order unification.)  If other function variables are present
-  then the prover will fail with the message \texttt{Function Var's
-  argument not a bound variable}.
-
-  \item Its proof strategy is more general than @{method fast} but can
-  be slower.  If @{method blast} fails or seems to be running forever,
-  try @{method fast} and the other proof tools described below.
-
-  \end{itemize}
-
-  The optional integer argument specifies a bound for the number of
-  unsafe steps used in a proof.  By default, @{method blast} starts
-  with a bound of 0 and increases it successively to 20.  In contrast,
-  @{text "(blast lim)"} tries to prove the goal using a search bound
-  of @{text "lim"}.  Sometimes a slow proof using @{method blast} can
-  be made much faster by supplying the successful search bound to this
-  proof method instead.
-
-  \item @{method auto} combines classical reasoning with
-  simplification.  It is intended for situations where there are a lot
-  of mostly trivial subgoals; it proves all the easy ones, leaving the
-  ones it cannot prove.  Occasionally, attempting to prove the hard
-  ones may take a long time.
-
-  The optional depth arguments in @{text "(auto m n)"} refer to its
-  builtin classical reasoning procedures: @{text m} (default 4) is for
-  @{method blast}, which is tried first, and @{text n} (default 2) is
-  for a slower but more general alternative that also takes wrappers
-  into account.
-
-  \item @{method force} is intended to prove the first subgoal
-  completely, using many fancy proof tools and performing a rather
-  exhaustive search.  As a result, proof attempts may take rather long
-  or diverge easily.
-
-  \item @{method fast}, @{method best}, @{method slow} attempt to
-  prove the first subgoal using sequent-style reasoning as explained
-  before.  Unlike @{method blast}, they construct proofs directly in
-  Isabelle.
-
-  There is a difference in search strategy and back-tracking: @{method
-  fast} uses depth-first search and @{method best} uses best-first
-  search (guided by a heuristic function: normally the total size of
-  the proof state).
-
-  Method @{method slow} is like @{method fast}, but conducts a broader
-  search: it may, when backtracking from a failed proof attempt, undo
-  even the step of proving a subgoal by assumption.
-
-  \item @{method fastforce}, @{method slowsimp}, @{method bestsimp}
-  are like @{method fast}, @{method slow}, @{method best},
-  respectively, but use the Simplifier as additional wrapper. The name
-  @{method fastforce}, reflects the behaviour of this popular method
-  better without requiring an understanding of its implementation.
-
-  \item @{method deepen} works by exhaustive search up to a certain
-  depth.  The start depth is 4 (unless specified explicitly), and the
-  depth is increased iteratively up to 10.  Unsafe rules are modified
-  to preserve the formula they act on, so that it be used repeatedly.
-  This method can prove more goals than @{method fast}, but is much
-  slower, for example if the assumptions have many universal
-  quantifiers.
-
-  \end{description}
-
-  Any of the above methods support additional modifiers of the context
-  of classical (and simplifier) rules, but the ones related to the
-  Simplifier are explicitly prefixed by @{text simp} here.  The
-  semantics of these ad-hoc rule declarations is analogous to the
-  attributes given before.  Facts provided by forward chaining are
-  inserted into the goal before commencing proof search.
-*}
-
-
-subsection {* Partially automated methods *}
-
-text {* These proof methods may help in situations when the
-  fully-automated tools fail.  The result is a simpler subgoal that
-  can be tackled by other means, such as by manual instantiation of
-  quantifiers.
-
-  \begin{matharray}{rcl}
-    @{method_def safe} & : & @{text method} \\
-    @{method_def clarify} & : & @{text method} \\
-    @{method_def clarsimp} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{method safe} | @@{method clarify}) (@{syntax clamod} * )
-    ;
-    @@{method clarsimp} (@{syntax clasimpmod} * )
-  \<close>}
-
-  \begin{description}
-
-  \item @{method safe} repeatedly performs safe steps on all subgoals.
-  It is deterministic, with at most one outcome.
-
-  \item @{method clarify} performs a series of safe steps without
-  splitting subgoals; see also @{method clarify_step}.
-
-  \item @{method clarsimp} acts like @{method clarify}, but also does
-  simplification.  Note that if the Simplifier context includes a
-  splitter for the premises, the subgoal may still be split.
-
-  \end{description}
-*}
-
-
-subsection {* Single-step tactics *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def safe_step} & : & @{text method} \\
-    @{method_def inst_step} & : & @{text method} \\
-    @{method_def step} & : & @{text method} \\
-    @{method_def slow_step} & : & @{text method} \\
-    @{method_def clarify_step} & : &  @{text method} \\
-  \end{matharray}
-
-  These are the primitive tactics behind the automated proof methods
-  of the Classical Reasoner.  By calling them yourself, you can
-  execute these procedures one step at a time.
-
-  \begin{description}
-
-  \item @{method safe_step} performs a safe step on the first subgoal.
-  The safe wrapper tacticals are applied to a tactic that may include
-  proof by assumption or Modus Ponens (taking care not to instantiate
-  unknowns), or substitution.
-
-  \item @{method inst_step} is like @{method safe_step}, but allows
-  unknowns to be instantiated.
-
-  \item @{method step} is the basic step of the proof procedure, it
-  operates on the first subgoal.  The unsafe wrapper tacticals are
-  applied to a tactic that tries @{method safe}, @{method inst_step},
-  or applies an unsafe rule from the context.
-
-  \item @{method slow_step} resembles @{method step}, but allows
-  backtracking between using safe rules with instantiation (@{method
-  inst_step}) and using unsafe rules.  The resulting search space is
-  larger.
-
-  \item @{method clarify_step} performs a safe step on the first
-  subgoal; no splitting step is applied.  For example, the subgoal
-  @{text "A \<and> B"} is left as a conjunction.  Proof by assumption,
-  Modus Ponens, etc., may be performed provided they do not
-  instantiate unknowns.  Assumptions of the form @{text "x = t"} may
-  be eliminated.  The safe wrapper tactical is applied.
-
-  \end{description}
-*}
-
-
-subsection {* Modifying the search step *}
-
-text {*
-  \begin{mldecls}
-    @{index_ML_type wrapper: "(int -> tactic) -> (int -> tactic)"} \\[0.5ex]
-    @{index_ML_op addSWrapper: "Proof.context *
-  (string * (Proof.context -> wrapper)) -> Proof.context"} \\
-    @{index_ML_op addSbefore: "Proof.context *
-  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
-    @{index_ML_op addSafter: "Proof.context *
-  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
-    @{index_ML_op delSWrapper: "Proof.context * string -> Proof.context"} \\[0.5ex]
-    @{index_ML_op addWrapper: "Proof.context *
-  (string * (Proof.context -> wrapper)) -> Proof.context"} \\
-    @{index_ML_op addbefore: "Proof.context *
-  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
-    @{index_ML_op addafter: "Proof.context *
-  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
-    @{index_ML_op delWrapper: "Proof.context * string -> Proof.context"} \\[0.5ex]
-    @{index_ML addSss: "Proof.context -> Proof.context"} \\
-    @{index_ML addss: "Proof.context -> Proof.context"} \\
-  \end{mldecls}
-
-  The proof strategy of the Classical Reasoner is simple.  Perform as
-  many safe inferences as possible; or else, apply certain safe rules,
-  allowing instantiation of unknowns; or else, apply an unsafe rule.
-  The tactics also eliminate assumptions of the form @{text "x = t"}
-  by substitution if they have been set up to do so.  They may perform
-  a form of Modus Ponens: if there are assumptions @{text "P \<longrightarrow> Q"} and
-  @{text "P"}, then replace @{text "P \<longrightarrow> Q"} by @{text "Q"}.
-
-  The classical reasoning tools --- except @{method blast} --- allow
-  to modify this basic proof strategy by applying two lists of
-  arbitrary \emph{wrapper tacticals} to it.  The first wrapper list,
-  which is considered to contain safe wrappers only, affects @{method
-  safe_step} and all the tactics that call it.  The second one, which
-  may contain unsafe wrappers, affects the unsafe parts of @{method
-  step}, @{method slow_step}, and the tactics that call them.  A
-  wrapper transforms each step of the search, for example by
-  attempting other tactics before or after the original step tactic.
-  All members of a wrapper list are applied in turn to the respective
-  step tactic.
-
-  Initially the two wrapper lists are empty, which means no
-  modification of the step tactics. Safe and unsafe wrappers are added
-  to a claset with the functions given below, supplying them with
-  wrapper names.  These names may be used to selectively delete
-  wrappers.
-
-  \begin{description}
-
-  \item @{text "ctxt addSWrapper (name, wrapper)"} adds a new wrapper,
-  which should yield a safe tactic, to modify the existing safe step
-  tactic.
-
-  \item @{text "ctxt addSbefore (name, tac)"} adds the given tactic as a
-  safe wrapper, such that it is tried \emph{before} each safe step of
-  the search.
-
-  \item @{text "ctxt addSafter (name, tac)"} adds the given tactic as a
-  safe wrapper, such that it is tried when a safe step of the search
-  would fail.
-
-  \item @{text "ctxt delSWrapper name"} deletes the safe wrapper with
-  the given name.
-
-  \item @{text "ctxt addWrapper (name, wrapper)"} adds a new wrapper to
-  modify the existing (unsafe) step tactic.
-
-  \item @{text "ctxt addbefore (name, tac)"} adds the given tactic as an
-  unsafe wrapper, such that it its result is concatenated
-  \emph{before} the result of each unsafe step.
-
-  \item @{text "ctxt addafter (name, tac)"} adds the given tactic as an
-  unsafe wrapper, such that it its result is concatenated \emph{after}
-  the result of each unsafe step.
-
-  \item @{text "ctxt delWrapper name"} deletes the unsafe wrapper with
-  the given name.
-
-  \item @{text "addSss"} adds the simpset of the context to its
-  classical set. The assumptions and goal will be simplified, in a
-  rather safe way, after each safe step of the search.
-
-  \item @{text "addss"} adds the simpset of the context to its
-  classical set. The assumptions and goal will be simplified, before
-  the each unsafe step of the search.
-
-  \end{description}
-*}
-
-
-section {* Object-logic setup \label{sec:object-logic} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "judgment"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{method_def atomize} & : & @{text method} \\
-    @{attribute_def atomize} & : & @{text attribute} \\
-    @{attribute_def rule_format} & : & @{text attribute} \\
-    @{attribute_def rulify} & : & @{text attribute} \\
-  \end{matharray}
-
-  The very starting point for any Isabelle object-logic is a ``truth
-  judgment'' that links object-level statements to the meta-logic
-  (with its minimal language of @{text prop} that covers universal
-  quantification @{text "\<And>"} and implication @{text "\<Longrightarrow>"}).
-
-  Common object-logics are sufficiently expressive to internalize rule
-  statements over @{text "\<And>"} and @{text "\<Longrightarrow>"} within their own
-  language.  This is useful in certain situations where a rule needs
-  to be viewed as an atomic statement from the meta-level perspective,
-  e.g.\ @{text "\<And>x. x \<in> A \<Longrightarrow> P x"} versus @{text "\<forall>x \<in> A. P x"}.
-
-  From the following language elements, only the @{method atomize}
-  method and @{attribute rule_format} attribute are occasionally
-  required by end-users, the rest is for those who need to setup their
-  own object-logic.  In the latter case existing formulations of
-  Isabelle/FOL or Isabelle/HOL may be taken as realistic examples.
-
-  Generic tools may refer to the information provided by object-logic
-  declarations internally.
-
-  @{rail \<open>
-    @@{command judgment} @{syntax name} '::' @{syntax type} @{syntax mixfix}?
-    ;
-    @@{attribute atomize} ('(' 'full' ')')?
-    ;
-    @@{attribute rule_format} ('(' 'noasm' ')')?
-  \<close>}
-
-  \begin{description}
-  
-  \item @{command "judgment"}~@{text "c :: \<sigma> (mx)"} declares constant
-  @{text c} as the truth judgment of the current object-logic.  Its
-  type @{text \<sigma>} should specify a coercion of the category of
-  object-level propositions to @{text prop} of the Pure meta-logic;
-  the mixfix annotation @{text "(mx)"} would typically just link the
-  object language (internally of syntactic category @{text logic})
-  with that of @{text prop}.  Only one @{command "judgment"}
-  declaration may be given in any theory development.
-  
-  \item @{method atomize} (as a method) rewrites any non-atomic
-  premises of a sub-goal, using the meta-level equations declared via
-  @{attribute atomize} (as an attribute) beforehand.  As a result,
-  heavily nested goals become amenable to fundamental operations such
-  as resolution (cf.\ the @{method (Pure) rule} method).  Giving the ``@{text
-  "(full)"}'' option here means to turn the whole subgoal into an
-  object-statement (if possible), including the outermost parameters
-  and assumptions as well.
-
-  A typical collection of @{attribute atomize} rules for a particular
-  object-logic would provide an internalization for each of the
-  connectives of @{text "\<And>"}, @{text "\<Longrightarrow>"}, and @{text "\<equiv>"}.
-  Meta-level conjunction should be covered as well (this is
-  particularly important for locales, see \secref{sec:locale}).
-
-  \item @{attribute rule_format} rewrites a theorem by the equalities
-  declared as @{attribute rulify} rules in the current object-logic.
-  By default, the result is fully normalized, including assumptions
-  and conclusions at any depth.  The @{text "(no_asm)"} option
-  restricts the transformation to the conclusion of a rule.
-
-  In common object-logics (HOL, FOL, ZF), the effect of @{attribute
-  rule_format} is to replace (bounded) universal quantification
-  (@{text "\<forall>"}) and implication (@{text "\<longrightarrow>"}) by the corresponding
-  rule statements over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
-
-  \end{description}
-*}
-
-
-section {* Tracing higher-order unification *}
-
-text {*
-  \begin{tabular}{rcll}
-    @{attribute_def unify_trace_simp} & : & @{text "attribute"} & default @{text "false"} \\
-    @{attribute_def unify_trace_types} & : & @{text "attribute"} & default @{text "false"} \\
-    @{attribute_def unify_trace_bound} & : & @{text "attribute"} & default @{text "50"} \\
-    @{attribute_def unify_search_bound} & : & @{text "attribute"} & default @{text "60"} \\
-  \end{tabular}
-  \medskip
-
-  Higher-order unification works well in most practical situations,
-  but sometimes needs extra care to identify problems.  These tracing
-  options may help.
-
-  \begin{description}
-
-  \item @{attribute unify_trace_simp} controls tracing of the
-  simplification phase of higher-order unification.
-
-  \item @{attribute unify_trace_types} controls warnings of
-  incompleteness, when unification is not considering all possible
-  instantiations of schematic type variables.
-
-  \item @{attribute unify_trace_bound} determines the depth where
-  unification starts to print tracing information once it reaches
-  depth; 0 for full tracing.  At the default value, tracing
-  information is almost never printed in practice.
-
-  \item @{attribute unify_search_bound} prevents unification from
-  searching past the given depth.  Because of this bound, higher-order
-  unification cannot return an infinite sequence, though it can return
-  an exponentially long one.  The search rarely approaches the default
-  value in practice.  If the search is cut off, unification prints a
-  warning ``Unification bound exceeded''.
-
-  \end{description}
-
-  \begin{warn}
-  Options for unification cannot be modified in a local context.  Only
-  the global theory content is taken into account.
-  \end{warn}
-*}
-
-end
--- a/src/Doc/Isar-Ref/HOL_Specific.thy	Mon Apr 07 16:37:57 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2677 +0,0 @@
-theory HOL_Specific
-imports Base Main "~~/src/HOL/Library/Old_Recdef" "~~/src/Tools/Adhoc_Overloading"
-begin
-
-chapter {* Higher-Order Logic *}
-
-text {* Isabelle/HOL is based on Higher-Order Logic, a polymorphic
-  version of Church's Simple Theory of Types.  HOL can be best
-  understood as a simply-typed version of classical set theory.  The
-  logic was first implemented in Gordon's HOL system
-  \cite{mgordon-hol}.  It extends Church's original logic
-  \cite{church40} by explicit type variables (naive polymorphism) and
-  a sound axiomatization scheme for new types based on subsets of
-  existing types.
-
-  Andrews's book \cite{andrews86} is a full description of the
-  original Church-style higher-order logic, with proofs of correctness
-  and completeness wrt.\ certain set-theoretic interpretations.  The
-  particular extensions of Gordon-style HOL are explained semantically
-  in two chapters of the 1993 HOL book \cite{pitts93}.
-
-  Experience with HOL over decades has demonstrated that higher-order
-  logic is widely applicable in many areas of mathematics and computer
-  science.  In a sense, Higher-Order Logic is simpler than First-Order
-  Logic, because there are fewer restrictions and special cases.  Note
-  that HOL is \emph{weaker} than FOL with axioms for ZF set theory,
-  which is traditionally considered the standard foundation of regular
-  mathematics, but for most applications this does not matter.  If you
-  prefer ML to Lisp, you will probably prefer HOL to ZF.
-
-  \medskip The syntax of HOL follows @{text "\<lambda>"}-calculus and
-  functional programming.  Function application is curried.  To apply
-  the function @{text f} of type @{text "\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> \<tau>\<^sub>3"} to the
-  arguments @{text a} and @{text b} in HOL, you simply write @{text "f
-  a b"} (as in ML or Haskell).  There is no ``apply'' operator; the
-  existing application of the Pure @{text "\<lambda>"}-calculus is re-used.
-  Note that in HOL @{text "f (a, b)"} means ``@{text "f"} applied to
-  the pair @{text "(a, b)"} (which is notation for @{text "Pair a
-  b"}).  The latter typically introduces extra formal efforts that can
-  be avoided by currying functions by default.  Explicit tuples are as
-  infrequent in HOL formalizations as in good ML or Haskell programs.
-
-  \medskip Isabelle/HOL has a distinct feel, compared to other
-  object-logics like Isabelle/ZF.  It identifies object-level types
-  with meta-level types, taking advantage of the default
-  type-inference mechanism of Isabelle/Pure.  HOL fully identifies
-  object-level functions with meta-level functions, with native
-  abstraction and application.
-
-  These identifications allow Isabelle to support HOL particularly
-  nicely, but they also mean that HOL requires some sophistication
-  from the user.  In particular, an understanding of Hindley-Milner
-  type-inference with type-classes, which are both used extensively in
-  the standard libraries and applications.  Beginners can set
-  @{attribute show_types} or even @{attribute show_sorts} to get more
-  explicit information about the result of type-inference.  *}
-
-
-chapter {* Derived specification elements *}
-
-section {* Inductive and coinductive definitions \label{sec:hol-inductive} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "inductive"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def (HOL) "inductive_set"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def (HOL) "coinductive"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def (HOL) "coinductive_set"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "print_inductives"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{attribute_def (HOL) mono} & : & @{text attribute} \\
-  \end{matharray}
-
-  An \emph{inductive definition} specifies the least predicate or set
-  @{text R} closed under given rules: applying a rule to elements of
-  @{text R} yields a result within @{text R}.  For example, a
-  structural operational semantics is an inductive definition of an
-  evaluation relation.
-
-  Dually, a \emph{coinductive definition} specifies the greatest
-  predicate or set @{text R} that is consistent with given rules:
-  every element of @{text R} can be seen as arising by applying a rule
-  to elements of @{text R}.  An important example is using
-  bisimulation relations to formalise equivalence of processes and
-  infinite data structures.
-
-  Both inductive and coinductive definitions are based on the
-  Knaster-Tarski fixed-point theorem for complete lattices.  The
-  collection of introduction rules given by the user determines a
-  functor on subsets of set-theoretic relations.  The required
-  monotonicity of the recursion scheme is proven as a prerequisite to
-  the fixed-point definition and the resulting consequences.  This
-  works by pushing inclusion through logical connectives and any other
-  operator that might be wrapped around recursive occurrences of the
-  defined relation: there must be a monotonicity theorem of the form
-  @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for each premise @{text "\<M> R t"} in an
-  introduction rule.  The default rule declarations of Isabelle/HOL
-  already take care of most common situations.
-
-  @{rail \<open>
-    (@@{command (HOL) inductive} | @@{command (HOL) inductive_set} |
-      @@{command (HOL) coinductive} | @@{command (HOL) coinductive_set})
-    @{syntax target}? \<newline>
-    @{syntax "fixes"} (@'for' @{syntax "fixes"})? (@'where' clauses)? \<newline>
-    (@'monos' @{syntax thmrefs})?
-    ;
-    clauses: (@{syntax thmdecl}? @{syntax prop} + '|')
-    ;
-    @@{attribute (HOL) mono} (() | 'add' | 'del')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "inductive"} and @{command (HOL)
-  "coinductive"} define (co)inductive predicates from the introduction
-  rules.
-
-  The propositions given as @{text "clauses"} in the @{keyword
-  "where"} part are either rules of the usual @{text "\<And>/\<Longrightarrow>"} format
-  (with arbitrary nesting), or equalities using @{text "\<equiv>"}.  The
-  latter specifies extra-logical abbreviations in the sense of
-  @{command_ref abbreviation}.  Introducing abstract syntax
-  simultaneously with the actual introduction rules is occasionally
-  useful for complex specifications.
-
-  The optional @{keyword "for"} part contains a list of parameters of
-  the (co)inductive predicates that remain fixed throughout the
-  definition, in contrast to arguments of the relation that may vary
-  in each occurrence within the given @{text "clauses"}.
-
-  The optional @{keyword "monos"} declaration contains additional
-  \emph{monotonicity theorems}, which are required for each operator
-  applied to a recursive set in the introduction rules.
-
-  \item @{command (HOL) "inductive_set"} and @{command (HOL)
-  "coinductive_set"} are wrappers for to the previous commands for
-  native HOL predicates.  This allows to define (co)inductive sets,
-  where multiple arguments are simulated via tuples.
-
-  \item @{command "print_inductives"} prints (co)inductive definitions and
-  monotonicity rules.
-
-  \item @{attribute (HOL) mono} declares monotonicity rules in the
-  context.  These rule are involved in the automated monotonicity
-  proof of the above inductive and coinductive definitions.
-
-  \end{description}
-*}
-
-
-subsection {* Derived rules *}
-
-text {* A (co)inductive definition of @{text R} provides the following
-  main theorems:
-
-  \begin{description}
-
-  \item @{text R.intros} is the list of introduction rules as proven
-  theorems, for the recursive predicates (or sets).  The rules are
-  also available individually, using the names given them in the
-  theory file;
-
-  \item @{text R.cases} is the case analysis (or elimination) rule;
-
-  \item @{text R.induct} or @{text R.coinduct} is the (co)induction
-  rule;
-
-  \item @{text R.simps} is the equation unrolling the fixpoint of the
-  predicate one step.
-
-  \end{description}
-
-  When several predicates @{text "R\<^sub>1, \<dots>, R\<^sub>n"} are
-  defined simultaneously, the list of introduction rules is called
-  @{text "R\<^sub>1_\<dots>_R\<^sub>n.intros"}, the case analysis rules are
-  called @{text "R\<^sub>1.cases, \<dots>, R\<^sub>n.cases"}, and the list
-  of mutual induction rules is called @{text
-  "R\<^sub>1_\<dots>_R\<^sub>n.inducts"}.
-*}
-
-
-subsection {* Monotonicity theorems *}
-
-text {* The context maintains a default set of theorems that are used
-  in monotonicity proofs.  New rules can be declared via the
-  @{attribute (HOL) mono} attribute.  See the main Isabelle/HOL
-  sources for some examples.  The general format of such monotonicity
-  theorems is as follows:
-
-  \begin{itemize}
-
-  \item Theorems of the form @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for proving
-  monotonicity of inductive definitions whose introduction rules have
-  premises involving terms such as @{text "\<M> R t"}.
-
-  \item Monotonicity theorems for logical operators, which are of the
-  general form @{text "(\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> (\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> \<longrightarrow> \<dots>"}.  For example, in
-  the case of the operator @{text "\<or>"}, the corresponding theorem is
-  \[
-  \infer{@{text "P\<^sub>1 \<or> P\<^sub>2 \<longrightarrow> Q\<^sub>1 \<or> Q\<^sub>2"}}{@{text "P\<^sub>1 \<longrightarrow> Q\<^sub>1"} & @{text "P\<^sub>2 \<longrightarrow> Q\<^sub>2"}}
-  \]
-
-  \item De Morgan style equations for reasoning about the ``polarity''
-  of expressions, e.g.
-  \[
-  @{prop "\<not> \<not> P \<longleftrightarrow> P"} \qquad\qquad
-  @{prop "\<not> (P \<and> Q) \<longleftrightarrow> \<not> P \<or> \<not> Q"}
-  \]
-
-  \item Equations for reducing complex operators to more primitive
-  ones whose monotonicity can easily be proved, e.g.
-  \[
-  @{prop "(P \<longrightarrow> Q) \<longleftrightarrow> \<not> P \<or> Q"} \qquad\qquad
-  @{prop "Ball A P \<equiv> \<forall>x. x \<in> A \<longrightarrow> P x"}
-  \]
-
-  \end{itemize}
-*}
-
-subsubsection {* Examples *}
-
-text {* The finite powerset operator can be defined inductively like this: *}
-
-inductive_set Fin :: "'a set \<Rightarrow> 'a set set" for A :: "'a set"
-where
-  empty: "{} \<in> Fin A"
-| insert: "a \<in> A \<Longrightarrow> B \<in> Fin A \<Longrightarrow> insert a B \<in> Fin A"
-
-text {* The accessible part of a relation is defined as follows: *}
-
-inductive acc :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> bool"
-  for r :: "'a \<Rightarrow> 'a \<Rightarrow> bool"  (infix "\<prec>" 50)
-where acc: "(\<And>y. y \<prec> x \<Longrightarrow> acc r y) \<Longrightarrow> acc r x"
-
-text {* Common logical connectives can be easily characterized as
-non-recursive inductive definitions with parameters, but without
-arguments. *}
-
-inductive AND for A B :: bool
-where "A \<Longrightarrow> B \<Longrightarrow> AND A B"
-
-inductive OR for A B :: bool
-where "A \<Longrightarrow> OR A B"
-  | "B \<Longrightarrow> OR A B"
-
-inductive EXISTS for B :: "'a \<Rightarrow> bool"
-where "B a \<Longrightarrow> EXISTS B"
-
-text {* Here the @{text "cases"} or @{text "induct"} rules produced by
-  the @{command inductive} package coincide with the expected
-  elimination rules for Natural Deduction.  Already in the original
-  article by Gerhard Gentzen \cite{Gentzen:1935} there is a hint that
-  each connective can be characterized by its introductions, and the
-  elimination can be constructed systematically. *}
-
-
-section {* Recursive functions \label{sec:recursion} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "primrec"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def (HOL) "fun"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def (HOL) "function"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
-    @{command_def (HOL) "termination"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
-    @{command_def (HOL) "fun_cases"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) primrec} @{syntax target}? @{syntax "fixes"} @'where' equations
-    ;
-    (@@{command (HOL) fun} | @@{command (HOL) function}) @{syntax target}? functionopts?
-      @{syntax "fixes"} \<newline> @'where' equations
-    ;
-
-    equations: (@{syntax thmdecl}? @{syntax prop} + '|')
-    ;
-    functionopts: '(' (('sequential' | 'domintros') + ',') ')'
-    ;
-    @@{command (HOL) termination} @{syntax term}?
-    ;
-    @@{command (HOL) fun_cases} (@{syntax thmdecl}? @{syntax prop} + @'and')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "primrec"} defines primitive recursive
-  functions over datatypes (see also @{command_ref (HOL) datatype} and
-  @{command_ref (HOL) rep_datatype}).  The given @{text equations}
-  specify reduction rules that are produced by instantiating the
-  generic combinator for primitive recursion that is available for
-  each datatype.
-
-  Each equation needs to be of the form:
-
-  @{text [display] "f x\<^sub>1 \<dots> x\<^sub>m (C y\<^sub>1 \<dots> y\<^sub>k) z\<^sub>1 \<dots> z\<^sub>n = rhs"}
-
-  such that @{text C} is a datatype constructor, @{text rhs} contains
-  only the free variables on the left-hand side (or from the context),
-  and all recursive occurrences of @{text "f"} in @{text "rhs"} are of
-  the form @{text "f \<dots> y\<^sub>i \<dots>"} for some @{text i}.  At most one
-  reduction rule for each constructor can be given.  The order does
-  not matter.  For missing constructors, the function is defined to
-  return a default value, but this equation is made difficult to
-  access for users.
-
-  The reduction rules are declared as @{attribute simp} by default,
-  which enables standard proof methods like @{method simp} and
-  @{method auto} to normalize expressions of @{text "f"} applied to
-  datatype constructions, by simulating symbolic computation via
-  rewriting.
-
-  \item @{command (HOL) "function"} defines functions by general
-  wellfounded recursion. A detailed description with examples can be
-  found in \cite{isabelle-function}. The function is specified by a
-  set of (possibly conditional) recursive equations with arbitrary
-  pattern matching. The command generates proof obligations for the
-  completeness and the compatibility of patterns.
-
-  The defined function is considered partial, and the resulting
-  simplification rules (named @{text "f.psimps"}) and induction rule
-  (named @{text "f.pinduct"}) are guarded by a generated domain
-  predicate @{text "f_dom"}. The @{command (HOL) "termination"}
-  command can then be used to establish that the function is total.
-
-  \item @{command (HOL) "fun"} is a shorthand notation for ``@{command
-  (HOL) "function"}~@{text "(sequential)"}, followed by automated
-  proof attempts regarding pattern matching and termination.  See
-  \cite{isabelle-function} for further details.
-
-  \item @{command (HOL) "termination"}~@{text f} commences a
-  termination proof for the previously defined function @{text f}.  If
-  this is omitted, the command refers to the most recent function
-  definition.  After the proof is closed, the recursive equations and
-  the induction principle is established.
-
-  \item @{command (HOL) "fun_cases"} generates specialized elimination
-  rules for function equations. It expects one or more function equations
-  and produces rules that eliminate the given equalities, following the cases
-  given in the function definition.
-  \end{description}
-
-  Recursive definitions introduced by the @{command (HOL) "function"}
-  command accommodate reasoning by induction (cf.\ @{method induct}):
-  rule @{text "f.induct"} refers to a specific induction rule, with
-  parameters named according to the user-specified equations. Cases
-  are numbered starting from 1.  For @{command (HOL) "primrec"}, the
-  induction principle coincides with structural recursion on the
-  datatype where the recursion is carried out.
-
-  The equations provided by these packages may be referred later as
-  theorem list @{text "f.simps"}, where @{text f} is the (collective)
-  name of the functions defined.  Individual equations may be named
-  explicitly as well.
-
-  The @{command (HOL) "function"} command accepts the following
-  options.
-
-  \begin{description}
-
-  \item @{text sequential} enables a preprocessor which disambiguates
-  overlapping patterns by making them mutually disjoint.  Earlier
-  equations take precedence over later ones.  This allows to give the
-  specification in a format very similar to functional programming.
-  Note that the resulting simplification and induction rules
-  correspond to the transformed specification, not the one given
-  originally. This usually means that each equation given by the user
-  may result in several theorems.  Also note that this automatic
-  transformation only works for ML-style datatype patterns.
-
-  \item @{text domintros} enables the automated generation of
-  introduction rules for the domain predicate. While mostly not
-  needed, they can be helpful in some proofs about partial functions.
-
-  \end{description}
-*}
-
-subsubsection {* Example: evaluation of expressions *}
-
-text {* Subsequently, we define mutual datatypes for arithmetic and
-  boolean expressions, and use @{command primrec} for evaluation
-  functions that follow the same recursive structure. *}
-
-datatype 'a aexp =
-    IF "'a bexp"  "'a aexp"  "'a aexp"
-  | Sum "'a aexp"  "'a aexp"
-  | Diff "'a aexp"  "'a aexp"
-  | Var 'a
-  | Num nat
-and 'a bexp =
-    Less "'a aexp"  "'a aexp"
-  | And "'a bexp"  "'a bexp"
-  | Neg "'a bexp"
-
-
-text {* \medskip Evaluation of arithmetic and boolean expressions *}
-
-primrec evala :: "('a \<Rightarrow> nat) \<Rightarrow> 'a aexp \<Rightarrow> nat"
-  and evalb :: "('a \<Rightarrow> nat) \<Rightarrow> 'a bexp \<Rightarrow> bool"
-where
-  "evala env (IF b a1 a2) = (if evalb env b then evala env a1 else evala env a2)"
-| "evala env (Sum a1 a2) = evala env a1 + evala env a2"
-| "evala env (Diff a1 a2) = evala env a1 - evala env a2"
-| "evala env (Var v) = env v"
-| "evala env (Num n) = n"
-| "evalb env (Less a1 a2) = (evala env a1 < evala env a2)"
-| "evalb env (And b1 b2) = (evalb env b1 \<and> evalb env b2)"
-| "evalb env (Neg b) = (\<not> evalb env b)"
-
-text {* Since the value of an expression depends on the value of its
-  variables, the functions @{const evala} and @{const evalb} take an
-  additional parameter, an \emph{environment} that maps variables to
-  their values.
-
-  \medskip Substitution on expressions can be defined similarly.  The
-  mapping @{text f} of type @{typ "'a \<Rightarrow> 'a aexp"} given as a
-  parameter is lifted canonically on the types @{typ "'a aexp"} and
-  @{typ "'a bexp"}, respectively.
-*}
-
-primrec substa :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a aexp \<Rightarrow> 'b aexp"
-  and substb :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a bexp \<Rightarrow> 'b bexp"
-where
-  "substa f (IF b a1 a2) = IF (substb f b) (substa f a1) (substa f a2)"
-| "substa f (Sum a1 a2) = Sum (substa f a1) (substa f a2)"
-| "substa f (Diff a1 a2) = Diff (substa f a1) (substa f a2)"
-| "substa f (Var v) = f v"
-| "substa f (Num n) = Num n"
-| "substb f (Less a1 a2) = Less (substa f a1) (substa f a2)"
-| "substb f (And b1 b2) = And (substb f b1) (substb f b2)"
-| "substb f (Neg b) = Neg (substb f b)"
-
-text {* In textbooks about semantics one often finds substitution
-  theorems, which express the relationship between substitution and
-  evaluation.  For @{typ "'a aexp"} and @{typ "'a bexp"}, we can prove
-  such a theorem by mutual induction, followed by simplification.
-*}
-
-lemma subst_one:
-  "evala env (substa (Var (v := a')) a) = evala (env (v := evala env a')) a"
-  "evalb env (substb (Var (v := a')) b) = evalb (env (v := evala env a')) b"
-  by (induct a and b) simp_all
-
-lemma subst_all:
-  "evala env (substa s a) = evala (\<lambda>x. evala env (s x)) a"
-  "evalb env (substb s b) = evalb (\<lambda>x. evala env (s x)) b"
-  by (induct a and b) simp_all
-
-
-subsubsection {* Example: a substitution function for terms *}
-
-text {* Functions on datatypes with nested recursion are also defined
-  by mutual primitive recursion. *}
-
-datatype ('a, 'b) "term" = Var 'a | App 'b "('a, 'b) term list"
-
-text {* A substitution function on type @{typ "('a, 'b) term"} can be
-  defined as follows, by working simultaneously on @{typ "('a, 'b)
-  term list"}: *}
-
-primrec subst_term :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term \<Rightarrow> ('a, 'b) term" and
-  subst_term_list :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term list \<Rightarrow> ('a, 'b) term list"
-where
-  "subst_term f (Var a) = f a"
-| "subst_term f (App b ts) = App b (subst_term_list f ts)"
-| "subst_term_list f [] = []"
-| "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
-
-text {* The recursion scheme follows the structure of the unfolded
-  definition of type @{typ "('a, 'b) term"}.  To prove properties of this
-  substitution function, mutual induction is needed:
-*}
-
-lemma "subst_term (subst_term f1 \<circ> f2) t = subst_term f1 (subst_term f2 t)" and
-  "subst_term_list (subst_term f1 \<circ> f2) ts = subst_term_list f1 (subst_term_list f2 ts)"
-  by (induct t and ts) simp_all
-
-
-subsubsection {* Example: a map function for infinitely branching trees *}
-
-text {* Defining functions on infinitely branching datatypes by
-  primitive recursion is just as easy.
-*}
-
-datatype 'a tree = Atom 'a | Branch "nat \<Rightarrow> 'a tree"
-
-primrec map_tree :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a tree \<Rightarrow> 'b tree"
-where
-  "map_tree f (Atom a) = Atom (f a)"
-| "map_tree f (Branch ts) = Branch (\<lambda>x. map_tree f (ts x))"
-
-text {* Note that all occurrences of functions such as @{text ts}
-  above must be applied to an argument.  In particular, @{term
-  "map_tree f \<circ> ts"} is not allowed here. *}
-
-text {* Here is a simple composition lemma for @{term map_tree}: *}
-
-lemma "map_tree g (map_tree f t) = map_tree (g \<circ> f) t"
-  by (induct t) simp_all
-
-
-subsection {* Proof methods related to recursive definitions *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) pat_completeness} & : & @{text method} \\
-    @{method_def (HOL) relation} & : & @{text method} \\
-    @{method_def (HOL) lexicographic_order} & : & @{text method} \\
-    @{method_def (HOL) size_change} & : & @{text method} \\
-    @{method_def (HOL) induction_schema} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method (HOL) relation} @{syntax term}
-    ;
-    @@{method (HOL) lexicographic_order} (@{syntax clasimpmod} * )
-    ;
-    @@{method (HOL) size_change} ( orders (@{syntax clasimpmod} * ) )
-    ;
-    @@{method (HOL) induction_schema}
-    ;
-    orders: ( 'max' | 'min' | 'ms' ) *
-  \<close>}
-
-  \begin{description}
-
-  \item @{method (HOL) pat_completeness} is a specialized method to
-  solve goals regarding the completeness of pattern matching, as
-  required by the @{command (HOL) "function"} package (cf.\
-  \cite{isabelle-function}).
-
-  \item @{method (HOL) relation}~@{text R} introduces a termination
-  proof using the relation @{text R}.  The resulting proof state will
-  contain goals expressing that @{text R} is wellfounded, and that the
-  arguments of recursive calls decrease with respect to @{text R}.
-  Usually, this method is used as the initial proof step of manual
-  termination proofs.
-
-  \item @{method (HOL) "lexicographic_order"} attempts a fully
-  automated termination proof by searching for a lexicographic
-  combination of size measures on the arguments of the function. The
-  method accepts the same arguments as the @{method auto} method,
-  which it uses internally to prove local descents.  The @{syntax
-  clasimpmod} modifiers are accepted (as for @{method auto}).
-
-  In case of failure, extensive information is printed, which can help
-  to analyse the situation (cf.\ \cite{isabelle-function}).
-
-  \item @{method (HOL) "size_change"} also works on termination goals,
-  using a variation of the size-change principle, together with a
-  graph decomposition technique (see \cite{krauss_phd} for details).
-  Three kinds of orders are used internally: @{text max}, @{text min},
-  and @{text ms} (multiset), which is only available when the theory
-  @{text Multiset} is loaded. When no order kinds are given, they are
-  tried in order. The search for a termination proof uses SAT solving
-  internally.
-
-  For local descent proofs, the @{syntax clasimpmod} modifiers are
-  accepted (as for @{method auto}).
-
-  \item @{method (HOL) induction_schema} derives user-specified
-  induction rules from well-founded induction and completeness of
-  patterns. This factors out some operations that are done internally
-  by the function package and makes them available separately. See
-  @{file "~~/src/HOL/ex/Induction_Schema.thy"} for examples.
-
-  \end{description}
-*}
-
-
-subsection {* Functions with explicit partiality *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "partial_function"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{attribute_def (HOL) "partial_function_mono"} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) partial_function} @{syntax target}?
-      '(' @{syntax nameref} ')' @{syntax "fixes"} \<newline>
-      @'where' @{syntax thmdecl}? @{syntax prop}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "partial_function"}~@{text "(mode)"} defines
-  recursive functions based on fixpoints in complete partial
-  orders. No termination proof is required from the user or
-  constructed internally. Instead, the possibility of non-termination
-  is modelled explicitly in the result type, which contains an
-  explicit bottom element.
-
-  Pattern matching and mutual recursion are currently not supported.
-  Thus, the specification consists of a single function described by a
-  single recursive equation.
-
-  There are no fixed syntactic restrictions on the body of the
-  function, but the induced functional must be provably monotonic
-  wrt.\ the underlying order.  The monotonicity proof is performed
-  internally, and the definition is rejected when it fails. The proof
-  can be influenced by declaring hints using the
-  @{attribute (HOL) partial_function_mono} attribute.
-
-  The mandatory @{text mode} argument specifies the mode of operation
-  of the command, which directly corresponds to a complete partial
-  order on the result type. By default, the following modes are
-  defined:
-
-  \begin{description}
-
-  \item @{text option} defines functions that map into the @{type
-  option} type. Here, the value @{term None} is used to model a
-  non-terminating computation. Monotonicity requires that if @{term
-  None} is returned by a recursive call, then the overall result must
-  also be @{term None}. This is best achieved through the use of the
-  monadic operator @{const "Option.bind"}.
-
-  \item @{text tailrec} defines functions with an arbitrary result
-  type and uses the slightly degenerated partial order where @{term
-  "undefined"} is the bottom element.  Now, monotonicity requires that
-  if @{term undefined} is returned by a recursive call, then the
-  overall result must also be @{term undefined}. In practice, this is
-  only satisfied when each recursive call is a tail call, whose result
-  is directly returned. Thus, this mode of operation allows the
-  definition of arbitrary tail-recursive functions.
-
-  \end{description}
-
-  Experienced users may define new modes by instantiating the locale
-  @{const "partial_function_definitions"} appropriately.
-
-  \item @{attribute (HOL) partial_function_mono} declares rules for
-  use in the internal monotonicity proofs of partial function
-  definitions.
-
-  \end{description}
-
-*}
-
-
-subsection {* Old-style recursive function definitions (TFL) *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "recdef"} & : & @{text "theory \<rightarrow> theory)"} \\
-    @{command_def (HOL) "recdef_tc"}@{text "\<^sup>*"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
-  \end{matharray}
-
-  The old TFL commands @{command (HOL) "recdef"} and @{command (HOL)
-  "recdef_tc"} for defining recursive are mostly obsolete; @{command
-  (HOL) "function"} or @{command (HOL) "fun"} should be used instead.
-
-  @{rail \<open>
-    @@{command (HOL) recdef} ('(' @'permissive' ')')? \<newline>
-      @{syntax name} @{syntax term} (@{syntax prop} +) hints?
-    ;
-    recdeftc @{syntax thmdecl}? tc
-    ;
-    hints: '(' @'hints' ( recdefmod * ) ')'
-    ;
-    recdefmod: (('recdef_simp' | 'recdef_cong' | 'recdef_wf')
-      (() | 'add' | 'del') ':' @{syntax thmrefs}) | @{syntax clasimpmod}
-    ;
-    tc: @{syntax nameref} ('(' @{syntax nat} ')')?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "recdef"} defines general well-founded
-  recursive functions (using the TFL package), see also
-  \cite{isabelle-HOL}.  The ``@{text "(permissive)"}'' option tells
-  TFL to recover from failed proof attempts, returning unfinished
-  results.  The @{text recdef_simp}, @{text recdef_cong}, and @{text
-  recdef_wf} hints refer to auxiliary rules to be used in the internal
-  automated proof process of TFL.  Additional @{syntax clasimpmod}
-  declarations may be given to tune the context of the Simplifier
-  (cf.\ \secref{sec:simplifier}) and Classical reasoner (cf.\
-  \secref{sec:classical}).
-
-  \item @{command (HOL) "recdef_tc"}~@{text "c (i)"} recommences the
-  proof for leftover termination condition number @{text i} (default
-  1) as generated by a @{command (HOL) "recdef"} definition of
-  constant @{text c}.
-
-  Note that in most cases, @{command (HOL) "recdef"} is able to finish
-  its internal proofs without manual intervention.
-
-  \end{description}
-
-  \medskip Hints for @{command (HOL) "recdef"} may be also declared
-  globally, using the following attributes.
-
-  \begin{matharray}{rcl}
-    @{attribute_def (HOL) recdef_simp} & : & @{text attribute} \\
-    @{attribute_def (HOL) recdef_cong} & : & @{text attribute} \\
-    @{attribute_def (HOL) recdef_wf} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    (@@{attribute (HOL) recdef_simp} | @@{attribute (HOL) recdef_cong} |
-      @@{attribute (HOL) recdef_wf}) (() | 'add' | 'del')
-  \<close>}
-*}
-
-
-section {* Datatypes \label{sec:hol-datatype} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "datatype"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "rep_datatype"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) datatype} (spec + @'and')
-    ;
-    @@{command (HOL) rep_datatype} ('(' (@{syntax name} +) ')')? (@{syntax term} +)
-    ;
-
-    spec: @{syntax typespec_sorts} @{syntax mixfix}? '=' (cons + '|')
-    ;
-    cons: @{syntax name} (@{syntax type} * ) @{syntax mixfix}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "datatype"} defines inductive datatypes in
-  HOL.
-
-  \item @{command (HOL) "rep_datatype"} represents existing types as
-  datatypes.
-
-  For foundational reasons, some basic types such as @{typ nat}, @{typ
-  "'a \<times> 'b"}, @{typ "'a + 'b"}, @{typ bool} and @{typ unit} are
-  introduced by more primitive means using @{command_ref typedef}.  To
-  recover the rich infrastructure of @{command datatype} (e.g.\ rules
-  for @{method cases} and @{method induct} and the primitive recursion
-  combinators), such types may be represented as actual datatypes
-  later.  This is done by specifying the constructors of the desired
-  type, and giving a proof of the induction rule, distinctness and
-  injectivity of constructors.
-
-  For example, see @{file "~~/src/HOL/Sum_Type.thy"} for the
-  representation of the primitive sum type as fully-featured datatype.
-
-  \end{description}
-
-  The generated rules for @{method induct} and @{method cases} provide
-  case names according to the given constructors, while parameters are
-  named after the types (see also \secref{sec:cases-induct}).
-
-  See \cite{isabelle-HOL} for more details on datatypes, but beware of
-  the old-style theory syntax being used there!  Apart from proper
-  proof methods for case-analysis and induction, there are also
-  emulations of ML tactics @{method (HOL) case_tac} and @{method (HOL)
-  induct_tac} available, see \secref{sec:hol-induct-tac}; these admit
-  to refer directly to the internal structure of subgoals (including
-  internally bound parameters).
-*}
-
-
-subsubsection {* Examples *}
-
-text {* We define a type of finite sequences, with slightly different
-  names than the existing @{typ "'a list"} that is already in @{theory
-  Main}: *}
-
-datatype 'a seq = Empty | Seq 'a "'a seq"
-
-text {* We can now prove some simple lemma by structural induction: *}
-
-lemma "Seq x xs \<noteq> xs"
-proof (induct xs arbitrary: x)
-  case Empty
-  txt {* This case can be proved using the simplifier: the freeness
-    properties of the datatype are already declared as @{attribute
-    simp} rules. *}
-  show "Seq x Empty \<noteq> Empty"
-    by simp
-next
-  case (Seq y ys)
-  txt {* The step case is proved similarly. *}
-  show "Seq x (Seq y ys) \<noteq> Seq y ys"
-    using `Seq y ys \<noteq> ys` by simp
-qed
-
-text {* Here is a more succinct version of the same proof: *}
-
-lemma "Seq x xs \<noteq> xs"
-  by (induct xs arbitrary: x) simp_all
-
-
-section {* Records \label{sec:hol-record} *}
-
-text {*
-  In principle, records merely generalize the concept of tuples, where
-  components may be addressed by labels instead of just position.  The
-  logical infrastructure of records in Isabelle/HOL is slightly more
-  advanced, though, supporting truly extensible record schemes.  This
-  admits operations that are polymorphic with respect to record
-  extension, yielding ``object-oriented'' effects like (single)
-  inheritance.  See also \cite{NaraschewskiW-TPHOLs98} for more
-  details on object-oriented verification and record subtyping in HOL.
-*}
-
-
-subsection {* Basic concepts *}
-
-text {*
-  Isabelle/HOL supports both \emph{fixed} and \emph{schematic} records
-  at the level of terms and types.  The notation is as follows:
-
-  \begin{center}
-  \begin{tabular}{l|l|l}
-    & record terms & record types \\ \hline
-    fixed & @{text "\<lparr>x = a, y = b\<rparr>"} & @{text "\<lparr>x :: A, y :: B\<rparr>"} \\
-    schematic & @{text "\<lparr>x = a, y = b, \<dots> = m\<rparr>"} &
-      @{text "\<lparr>x :: A, y :: B, \<dots> :: M\<rparr>"} \\
-  \end{tabular}
-  \end{center}
-
-  \noindent The ASCII representation of @{text "\<lparr>x = a\<rparr>"} is @{text
-  "(| x = a |)"}.
-
-  A fixed record @{text "\<lparr>x = a, y = b\<rparr>"} has field @{text x} of value
-  @{text a} and field @{text y} of value @{text b}.  The corresponding
-  type is @{text "\<lparr>x :: A, y :: B\<rparr>"}, assuming that @{text "a :: A"}
-  and @{text "b :: B"}.
-
-  A record scheme like @{text "\<lparr>x = a, y = b, \<dots> = m\<rparr>"} contains fields
-  @{text x} and @{text y} as before, but also possibly further fields
-  as indicated by the ``@{text "\<dots>"}'' notation (which is actually part
-  of the syntax).  The improper field ``@{text "\<dots>"}'' of a record
-  scheme is called the \emph{more part}.  Logically it is just a free
-  variable, which is occasionally referred to as ``row variable'' in
-  the literature.  The more part of a record scheme may be
-  instantiated by zero or more further components.  For example, the
-  previous scheme may get instantiated to @{text "\<lparr>x = a, y = b, z =
-  c, \<dots> = m'\<rparr>"}, where @{text m'} refers to a different more part.
-  Fixed records are special instances of record schemes, where
-  ``@{text "\<dots>"}'' is properly terminated by the @{text "() :: unit"}
-  element.  In fact, @{text "\<lparr>x = a, y = b\<rparr>"} is just an abbreviation
-  for @{text "\<lparr>x = a, y = b, \<dots> = ()\<rparr>"}.
-
-  \medskip Two key observations make extensible records in a simply
-  typed language like HOL work out:
-
-  \begin{enumerate}
-
-  \item the more part is internalized, as a free term or type
-  variable,
-
-  \item field names are externalized, they cannot be accessed within
-  the logic as first-class values.
-
-  \end{enumerate}
-
-  \medskip In Isabelle/HOL record types have to be defined explicitly,
-  fixing their field names and types, and their (optional) parent
-  record.  Afterwards, records may be formed using above syntax, while
-  obeying the canonical order of fields as given by their declaration.
-  The record package provides several standard operations like
-  selectors and updates.  The common setup for various generic proof
-  tools enable succinct reasoning patterns.  See also the Isabelle/HOL
-  tutorial \cite{isabelle-hol-book} for further instructions on using
-  records in practice.
-*}
-
-
-subsection {* Record specifications *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "record"} & : & @{text "theory \<rightarrow> theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) record} @{syntax typespec_sorts} '=' \<newline>
-      (@{syntax type} '+')? (constdecl +)
-    ;
-    constdecl: @{syntax name} '::' @{syntax type} @{syntax mixfix}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "record"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t = \<tau> + c\<^sub>1 :: \<sigma>\<^sub>1
-  \<dots> c\<^sub>n :: \<sigma>\<^sub>n"} defines extensible record type @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"},
-  derived from the optional parent record @{text "\<tau>"} by adding new
-  field components @{text "c\<^sub>i :: \<sigma>\<^sub>i"} etc.
-
-  The type variables of @{text "\<tau>"} and @{text "\<sigma>\<^sub>i"} need to be
-  covered by the (distinct) parameters @{text "\<alpha>\<^sub>1, \<dots>,
-  \<alpha>\<^sub>m"}.  Type constructor @{text t} has to be new, while @{text
-  \<tau>} needs to specify an instance of an existing record type.  At
-  least one new field @{text "c\<^sub>i"} has to be specified.
-  Basically, field names need to belong to a unique record.  This is
-  not a real restriction in practice, since fields are qualified by
-  the record name internally.
-
-  The parent record specification @{text \<tau>} is optional; if omitted
-  @{text t} becomes a root record.  The hierarchy of all records
-  declared within a theory context forms a forest structure, i.e.\ a
-  set of trees starting with a root record each.  There is no way to
-  merge multiple parent records!
-
-  For convenience, @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"} is made a
-  type abbreviation for the fixed record type @{text "\<lparr>c\<^sub>1 ::
-  \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n\<rparr>"}, likewise is @{text
-  "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m, \<zeta>) t_scheme"} made an abbreviation for
-  @{text "\<lparr>c\<^sub>1 :: \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n, \<dots> ::
-  \<zeta>\<rparr>"}.
-
-  \end{description}
-*}
-
-
-subsection {* Record operations *}
-
-text {*
-  Any record definition of the form presented above produces certain
-  standard operations.  Selectors and updates are provided for any
-  field, including the improper one ``@{text more}''.  There are also
-  cumulative record constructor functions.  To simplify the
-  presentation below, we assume for now that @{text "(\<alpha>\<^sub>1, \<dots>,
-  \<alpha>\<^sub>m) t"} is a root record with fields @{text "c\<^sub>1 ::
-  \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n"}.
-
-  \medskip \textbf{Selectors} and \textbf{updates} are available for
-  any field (including ``@{text more}''):
-
-  \begin{matharray}{lll}
-    @{text "c\<^sub>i"} & @{text "::"} & @{text "\<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<sigma>\<^sub>i"} \\
-    @{text "c\<^sub>i_update"} & @{text "::"} & @{text "\<sigma>\<^sub>i \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
-  \end{matharray}
-
-  There is special syntax for application of updates: @{text "r\<lparr>x :=
-  a\<rparr>"} abbreviates term @{text "x_update a r"}.  Further notation for
-  repeated updates is also available: @{text "r\<lparr>x := a\<rparr>\<lparr>y := b\<rparr>\<lparr>z :=
-  c\<rparr>"} may be written @{text "r\<lparr>x := a, y := b, z := c\<rparr>"}.  Note that
-  because of postfix notation the order of fields shown here is
-  reverse than in the actual term.  Since repeated updates are just
-  function applications, fields may be freely permuted in @{text "\<lparr>x
-  := a, y := b, z := c\<rparr>"}, as far as logical equality is concerned.
-  Thus commutativity of independent updates can be proven within the
-  logic for any two fields, but not as a general theorem.
-
-  \medskip The \textbf{make} operation provides a cumulative record
-  constructor function:
-
-  \begin{matharray}{lll}
-    @{text "t.make"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
-  \end{matharray}
-
-  \medskip We now reconsider the case of non-root records, which are
-  derived of some parent.  In general, the latter may depend on
-  another parent as well, resulting in a list of \emph{ancestor
-  records}.  Appending the lists of fields of all ancestors results in
-  a certain field prefix.  The record package automatically takes care
-  of this by lifting operations over this context of ancestor fields.
-  Assuming that @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"} has ancestor
-  fields @{text "b\<^sub>1 :: \<rho>\<^sub>1, \<dots>, b\<^sub>k :: \<rho>\<^sub>k"},
-  the above record operations will get the following types:
-
-  \medskip
-  \begin{tabular}{lll}
-    @{text "c\<^sub>i"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<sigma>\<^sub>i"} \\
-    @{text "c\<^sub>i_update"} & @{text "::"} & @{text "\<sigma>\<^sub>i \<Rightarrow>
-      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow>
-      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
-    @{text "t.make"} & @{text "::"} & @{text "\<rho>\<^sub>1 \<Rightarrow> \<dots> \<rho>\<^sub>k \<Rightarrow> \<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow>
-      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
-  \end{tabular}
-  \medskip
-
-  \noindent Some further operations address the extension aspect of a
-  derived record scheme specifically: @{text "t.fields"} produces a
-  record fragment consisting of exactly the new fields introduced here
-  (the result may serve as a more part elsewhere); @{text "t.extend"}
-  takes a fixed record and adds a given more part; @{text
-  "t.truncate"} restricts a record scheme to a fixed record.
-
-  \medskip
-  \begin{tabular}{lll}
-    @{text "t.fields"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
-    @{text "t.extend"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr> \<Rightarrow>
-      \<zeta> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
-    @{text "t.truncate"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
-  \end{tabular}
-  \medskip
-
-  \noindent Note that @{text "t.make"} and @{text "t.fields"} coincide
-  for root records.
-*}
-
-
-subsection {* Derived rules and proof tools *}
-
-text {*
-  The record package proves several results internally, declaring
-  these facts to appropriate proof tools.  This enables users to
-  reason about record structures quite conveniently.  Assume that
-  @{text t} is a record type as specified above.
-
-  \begin{enumerate}
-
-  \item Standard conversions for selectors or updates applied to
-  record constructor terms are made part of the default Simplifier
-  context; thus proofs by reduction of basic operations merely require
-  the @{method simp} method without further arguments.  These rules
-  are available as @{text "t.simps"}, too.
-
-  \item Selectors applied to updated records are automatically reduced
-  by an internal simplification procedure, which is also part of the
-  standard Simplifier setup.
-
-  \item Inject equations of a form analogous to @{prop "(x, y) = (x',
-  y') \<equiv> x = x' \<and> y = y'"} are declared to the Simplifier and Classical
-  Reasoner as @{attribute iff} rules.  These rules are available as
-  @{text "t.iffs"}.
-
-  \item The introduction rule for record equality analogous to @{text
-  "x r = x r' \<Longrightarrow> y r = y r' \<dots> \<Longrightarrow> r = r'"} is declared to the Simplifier,
-  and as the basic rule context as ``@{attribute intro}@{text "?"}''.
-  The rule is called @{text "t.equality"}.
-
-  \item Representations of arbitrary record expressions as canonical
-  constructor terms are provided both in @{method cases} and @{method
-  induct} format (cf.\ the generic proof methods of the same name,
-  \secref{sec:cases-induct}).  Several variations are available, for
-  fixed records, record schemes, more parts etc.
-
-  The generic proof methods are sufficiently smart to pick the most
-  sensible rule according to the type of the indicated record
-  expression: users just need to apply something like ``@{text "(cases
-  r)"}'' to a certain proof problem.
-
-  \item The derived record operations @{text "t.make"}, @{text
-  "t.fields"}, @{text "t.extend"}, @{text "t.truncate"} are \emph{not}
-  treated automatically, but usually need to be expanded by hand,
-  using the collective fact @{text "t.defs"}.
-
-  \end{enumerate}
-*}
-
-
-subsubsection {* Examples *}
-
-text {* See @{file "~~/src/HOL/ex/Records.thy"}, for example. *}
-
-section {* Typedef axiomatization \label{sec:hol-typedef} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "typedef"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
-  \end{matharray}
-
-  A Gordon/HOL-style type definition is a certain axiom scheme that
-  identifies a new type with a subset of an existing type.  More
-  precisely, the new type is defined by exhibiting an existing type
-  @{text \<tau>}, a set @{text "A :: \<tau> set"}, and a theorem that proves
-  @{prop "\<exists>x. x \<in> A"}.  Thus @{text A} is a non-empty subset of @{text
-  \<tau>}, and the new type denotes this subset.  New functions are
-  postulated that establish an isomorphism between the new type and
-  the subset.  In general, the type @{text \<tau>} may involve type
-  variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} which means that the type definition
-  produces a type constructor @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} depending on
-  those type arguments.
-
-  The axiomatization can be considered a ``definition'' in the sense
-  of the particular set-theoretic interpretation of HOL
-  \cite{pitts93}, where the universe of types is required to be
-  downwards-closed wrt.\ arbitrary non-empty subsets.  Thus genuinely
-  new types introduced by @{command "typedef"} stay within the range
-  of HOL models by construction.  Note that @{command_ref
-  type_synonym} from Isabelle/Pure merely introduces syntactic
-  abbreviations, without any logical significance.
-
-  @{rail \<open>
-    @@{command (HOL) typedef} abs_type '=' rep_set
-    ;
-    abs_type: @{syntax typespec_sorts} @{syntax mixfix}?
-    ;
-    rep_set: @{syntax term} (@'morphisms' @{syntax name} @{syntax name})?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "typedef"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = A"}
-  axiomatizes a type definition in the background theory of the
-  current context, depending on a non-emptiness result of the set
-  @{text A} that needs to be proven here.  The set @{text A} may
-  contain type variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} as specified on the LHS,
-  but no term variables.
-
-  Even though a local theory specification, the newly introduced type
-  constructor cannot depend on parameters or assumptions of the
-  context: this is structurally impossible in HOL.  In contrast, the
-  non-emptiness proof may use local assumptions in unusual situations,
-  which could result in different interpretations in target contexts:
-  the meaning of the bijection between the representing set @{text A}
-  and the new type @{text t} may then change in different application
-  contexts.
-
-  For @{command (HOL) "typedef"}~@{text "t = A"} the newly introduced
-  type @{text t} is accompanied by a pair of morphisms to relate it to
-  the representing set over the old type.  By default, the injection
-  from type to set is called @{text Rep_t} and its inverse @{text
-  Abs_t}: An explicit @{keyword (HOL) "morphisms"} specification
-  allows to provide alternative names.
-
-  The core axiomatization uses the locale predicate @{const
-  type_definition} as defined in Isabelle/HOL.  Various basic
-  consequences of that are instantiated accordingly, re-using the
-  locale facts with names derived from the new type constructor.  Thus
-  the generic @{thm type_definition.Rep} is turned into the specific
-  @{text "Rep_t"}, for example.
-
-  Theorems @{thm type_definition.Rep}, @{thm
-  type_definition.Rep_inverse}, and @{thm type_definition.Abs_inverse}
-  provide the most basic characterization as a corresponding
-  injection/surjection pair (in both directions).  The derived rules
-  @{thm type_definition.Rep_inject} and @{thm
-  type_definition.Abs_inject} provide a more convenient version of
-  injectivity, suitable for automated proof tools (e.g.\ in
-  declarations involving @{attribute simp} or @{attribute iff}).
-  Furthermore, the rules @{thm type_definition.Rep_cases}~/ @{thm
-  type_definition.Rep_induct}, and @{thm type_definition.Abs_cases}~/
-  @{thm type_definition.Abs_induct} provide alternative views on
-  surjectivity.  These rules are already declared as set or type rules
-  for the generic @{method cases} and @{method induct} methods,
-  respectively.
-
-  \end{description}
-
-  \begin{warn}
-  If you introduce a new type axiomatically, i.e.\ via @{command_ref
-  typedecl} and @{command_ref axiomatization}, the minimum requirement
-  is that it has a non-empty model, to avoid immediate collapse of the
-  HOL logic.  Moreover, one needs to demonstrate that the
-  interpretation of such free-form axiomatizations can coexist with
-  that of the regular @{command_def typedef} scheme, and any extension
-  that other people might have introduced elsewhere.
-  \end{warn}
-*}
-
-subsubsection {* Examples *}
-
-text {* Type definitions permit the introduction of abstract data
-  types in a safe way, namely by providing models based on already
-  existing types.  Given some abstract axiomatic description @{text P}
-  of a type, this involves two steps:
-
-  \begin{enumerate}
-
-  \item Find an appropriate type @{text \<tau>} and subset @{text A} which
-  has the desired properties @{text P}, and make a type definition
-  based on this representation.
-
-  \item Prove that @{text P} holds for @{text \<tau>} by lifting @{text P}
-  from the representation.
-
-  \end{enumerate}
-
-  You can later forget about the representation and work solely in
-  terms of the abstract properties @{text P}.
-
-  \medskip The following trivial example pulls a three-element type
-  into existence within the formal logical environment of HOL. *}
-
-typedef three = "{(True, True), (True, False), (False, True)}"
-  by blast
-
-definition "One = Abs_three (True, True)"
-definition "Two = Abs_three (True, False)"
-definition "Three = Abs_three (False, True)"
-
-lemma three_distinct: "One \<noteq> Two"  "One \<noteq> Three"  "Two \<noteq> Three"
-  by (simp_all add: One_def Two_def Three_def Abs_three_inject)
-
-lemma three_cases:
-  fixes x :: three obtains "x = One" | "x = Two" | "x = Three"
-  by (cases x) (auto simp: One_def Two_def Three_def Abs_three_inject)
-
-text {* Note that such trivial constructions are better done with
-  derived specification mechanisms such as @{command datatype}: *}
-
-datatype three' = One' | Two' | Three'
-
-text {* This avoids re-doing basic definitions and proofs from the
-  primitive @{command typedef} above. *}
-
-
-
-section {* Functorial structure of types *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "functor"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) functor} (@{syntax name} ':')? @{syntax term}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "functor"}~@{text "prefix: m"} allows to
-  prove and register properties about the functorial structure of type
-  constructors.  These properties then can be used by other packages
-  to deal with those type constructors in certain type constructions.
-  Characteristic theorems are noted in the current local theory.  By
-  default, they are prefixed with the base name of the type
-  constructor, an explicit prefix can be given alternatively.
-
-  The given term @{text "m"} is considered as \emph{mapper} for the
-  corresponding type constructor and must conform to the following
-  type pattern:
-
-  \begin{matharray}{lll}
-    @{text "m"} & @{text "::"} &
-      @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>k \<Rightarrow> (\<^vec>\<alpha>\<^sub>n) t \<Rightarrow> (\<^vec>\<beta>\<^sub>n) t"} \\
-  \end{matharray}
-
-  \noindent where @{text t} is the type constructor, @{text
-  "\<^vec>\<alpha>\<^sub>n"} and @{text "\<^vec>\<beta>\<^sub>n"} are distinct
-  type variables free in the local theory and @{text "\<sigma>\<^sub>1"},
-  \ldots, @{text "\<sigma>\<^sub>k"} is a subsequence of @{text "\<alpha>\<^sub>1 \<Rightarrow>
-  \<beta>\<^sub>1"}, @{text "\<beta>\<^sub>1 \<Rightarrow> \<alpha>\<^sub>1"}, \ldots,
-  @{text "\<alpha>\<^sub>n \<Rightarrow> \<beta>\<^sub>n"}, @{text "\<beta>\<^sub>n \<Rightarrow>
-  \<alpha>\<^sub>n"}.
-
-  \end{description}
-*}
-
-
-section {* Quotient types *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "quotient_type"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
-    @{command_def (HOL) "quotient_definition"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
-    @{command_def (HOL) "print_quotmapsQ3"} & : & @{text "context \<rightarrow>"}\\
-    @{command_def (HOL) "print_quotientsQ3"} & : & @{text "context \<rightarrow>"}\\
-    @{command_def (HOL) "print_quotconsts"} & : & @{text "context \<rightarrow>"}\\
-    @{method_def (HOL) "lifting"} & : & @{text method} \\
-    @{method_def (HOL) "lifting_setup"} & : & @{text method} \\
-    @{method_def (HOL) "descending"} & : & @{text method} \\
-    @{method_def (HOL) "descending_setup"} & : & @{text method} \\
-    @{method_def (HOL) "partiality_descending"} & : & @{text method} \\
-    @{method_def (HOL) "partiality_descending_setup"} & : & @{text method} \\
-    @{method_def (HOL) "regularize"} & : & @{text method} \\
-    @{method_def (HOL) "injection"} & : & @{text method} \\
-    @{method_def (HOL) "cleaning"} & : & @{text method} \\
-    @{attribute_def (HOL) "quot_thm"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "quot_lifted"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "quot_respect"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "quot_preserve"} & : & @{text attribute} \\
-  \end{matharray}
-
-  The quotient package defines a new quotient type given a raw type
-  and a partial equivalence relation. The package also historically 
-  includes automation for transporting definitions and theorems. 
-  But most of this automation was superseded by the Lifting and Transfer
-  packages. The user should consider using these two new packages for
-  lifting definitions and transporting theorems.
-
-  @{rail \<open>
-    @@{command (HOL) quotient_type} (spec)
-    ;
-    spec: @{syntax typespec} @{syntax mixfix}? '=' \<newline>
-     @{syntax type} '/' ('partial' ':')? @{syntax term} \<newline>
-     (@'morphisms' @{syntax name} @{syntax name})? (@'parametric' @{syntax thmref})?
-  \<close>}
-
-  @{rail \<open>
-    @@{command (HOL) quotient_definition} constdecl? @{syntax thmdecl}? \<newline>
-    @{syntax term} 'is' @{syntax term}
-    ;
-    constdecl: @{syntax name} ('::' @{syntax type})? @{syntax mixfix}?
-  \<close>}
-
-  @{rail \<open>
-    @@{method (HOL) lifting} @{syntax thmrefs}?
-    ;
-    @@{method (HOL) lifting_setup} @{syntax thmrefs}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "quotient_type"} defines a new quotient type @{text \<tau>}. The
-  injection from a quotient type to a raw type is called @{text
-  rep_\<tau>}, its inverse @{text abs_\<tau>} unless explicit @{keyword (HOL)
-  "morphisms"} specification provides alternative names. @{command
-  (HOL) "quotient_type"} requires the user to prove that the relation
-  is an equivalence relation (predicate @{text equivp}), unless the
-  user specifies explicitly @{text partial} in which case the
-  obligation is @{text part_equivp}.  A quotient defined with @{text
-  partial} is weaker in the sense that less things can be proved
-  automatically.
-
-  The command internally proves a Quotient theorem and sets up the Lifting
-  package by the command @{command (HOL) setup_lifting}. Thus the Lifting 
-  and Transfer packages can be used also with quotient types defined by
-  @{command (HOL) "quotient_type"} without any extra set-up. The parametricity 
-  theorem for the equivalence relation R can be provided as an extra argument 
-  of the command and is passed to the corresponding internal call of @{command (HOL) setup_lifting}.
-  This theorem allows the Lifting package to generate a stronger transfer rule for equality.
-  
-  \end{description}
-
-  The most of the rest of the package was superseded by the Lifting and Transfer
-  packages. The user should consider using these two new packages for
-  lifting definitions and transporting theorems.
-
-  \begin{description}  
-
-  \item @{command (HOL) "quotient_definition"} defines a constant on
-  the quotient type.
-
-  \item @{command (HOL) "print_quotmapsQ3"} prints quotient map
-  functions.
-
-  \item @{command (HOL) "print_quotientsQ3"} prints quotients.
-
-  \item @{command (HOL) "print_quotconsts"} prints quotient constants.
-
-  \item @{method (HOL) "lifting"} and @{method (HOL) "lifting_setup"}
-    methods match the current goal with the given raw theorem to be
-    lifted producing three new subgoals: regularization, injection and
-    cleaning subgoals. @{method (HOL) "lifting"} tries to apply the
-    heuristics for automatically solving these three subgoals and
-    leaves only the subgoals unsolved by the heuristics to the user as
-    opposed to @{method (HOL) "lifting_setup"} which leaves the three
-    subgoals unsolved.
-
-  \item @{method (HOL) "descending"} and @{method (HOL)
-    "descending_setup"} try to guess a raw statement that would lift
-    to the current subgoal. Such statement is assumed as a new subgoal
-    and @{method (HOL) "descending"} continues in the same way as
-    @{method (HOL) "lifting"} does. @{method (HOL) "descending"} tries
-    to solve the arising regularization, injection and cleaning
-    subgoals with the analogous method @{method (HOL)
-    "descending_setup"} which leaves the four unsolved subgoals.
-
-  \item @{method (HOL) "partiality_descending"} finds the regularized
-    theorem that would lift to the current subgoal, lifts it and
-    leaves as a subgoal. This method can be used with partial
-    equivalence quotients where the non regularized statements would
-    not be true. @{method (HOL) "partiality_descending_setup"} leaves
-    the injection and cleaning subgoals unchanged.
-
-  \item @{method (HOL) "regularize"} applies the regularization
-    heuristics to the current subgoal.
-
-  \item @{method (HOL) "injection"} applies the injection heuristics
-    to the current goal using the stored quotient respectfulness
-    theorems.
-
-  \item @{method (HOL) "cleaning"} applies the injection cleaning
-    heuristics to the current subgoal using the stored quotient
-    preservation theorems.
-
-  \item @{attribute (HOL) quot_lifted} attribute tries to
-    automatically transport the theorem to the quotient type.
-    The attribute uses all the defined quotients types and quotient
-    constants often producing undesired results or theorems that
-    cannot be lifted.
-
-  \item @{attribute (HOL) quot_respect} and @{attribute (HOL)
-    quot_preserve} attributes declare a theorem as a respectfulness
-    and preservation theorem respectively.  These are stored in the
-    local theory store and used by the @{method (HOL) "injection"}
-    and @{method (HOL) "cleaning"} methods respectively.
-
-  \item @{attribute (HOL) quot_thm} declares that a certain theorem
-    is a quotient extension theorem. Quotient extension theorems
-    allow for quotienting inside container types. Given a polymorphic
-    type that serves as a container, a map function defined for this
-    container using @{command (HOL) "functor"} and a relation
-    map defined for for the container type, the quotient extension
-    theorem should be @{term "Quotient3 R Abs Rep \<Longrightarrow> Quotient3
-    (rel_map R) (map Abs) (map Rep)"}. Quotient extension theorems
-    are stored in a database and are used all the steps of lifting
-    theorems.
-
-  \end{description}
-*}
-
-
-section {* Definition by specification \label{sec:hol-specification} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "specification"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) specification} '(' (decl +) ')' \<newline>
-      (@{syntax thmdecl}? @{syntax prop} +)
-    ;
-    decl: (@{syntax name} ':')? @{syntax term} ('(' @'overloaded' ')')?
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "specification"}~@{text "decls \<phi>"} sets up a
-  goal stating the existence of terms with the properties specified to
-  hold for the constants given in @{text decls}.  After finishing the
-  proof, the theory will be augmented with definitions for the given
-  constants, as well as with theorems stating the properties for these
-  constants.
-
-  @{text decl} declares a constant to be defined by the
-  specification given.  The definition for the constant @{text c} is
-  bound to the name @{text c_def} unless a theorem name is given in
-  the declaration.  Overloaded constants should be declared as such.
-
-  \end{description}
-*}
-
-
-section {* Adhoc overloading of constants *}
-
-text {*
-  \begin{tabular}{rcll}
-  @{command_def "adhoc_overloading"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-  @{command_def "no_adhoc_overloading"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-  @{attribute_def "show_variants"} & : & @{text "attribute"} & default @{text false} \\
-  \end{tabular}
-
-  \medskip
-
-  Adhoc overloading allows to overload a constant depending on
-  its type. Typically this involves the introduction of an
-  uninterpreted constant (used for input and output) and the addition
-  of some variants (used internally). For examples see
-  @{file "~~/src/HOL/ex/Adhoc_Overloading_Examples.thy"} and
-  @{file "~~/src/HOL/Library/Monad_Syntax.thy"}.
-
-  @{rail \<open>
-    (@@{command adhoc_overloading} | @@{command no_adhoc_overloading})
-      (@{syntax nameref} (@{syntax term} + ) + @'and')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "adhoc_overloading"}~@{text "c v\<^sub>1 ... v\<^sub>n"}
-  associates variants with an existing constant.
-
-  \item @{command "no_adhoc_overloading"} is similar to
-  @{command "adhoc_overloading"}, but removes the specified variants
-  from the present context.
-  
-  \item @{attribute "show_variants"} controls printing of variants
-  of overloaded constants. If enabled, the internally used variants
-  are printed instead of their respective overloaded constants. This
-  is occasionally useful to check whether the system agrees with a
-  user's expectations about derived variants.
-
-  \end{description}
-*}
-
-chapter {* Proof tools *}
-
-section {* Adhoc tuples *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{attribute_def (HOL) split_format}@{text "\<^sup>*"} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{attribute (HOL) split_format} ('(' 'complete' ')')?
-  \<close>}
-
-  \begin{description}
-
-  \item @{attribute (HOL) split_format}\ @{text "(complete)"} causes
-  arguments in function applications to be represented canonically
-  according to their tuple type structure.
-
-  Note that this operation tends to invent funny names for new local
-  parameters introduced.
-
-  \end{description}
-*}
-
-
-section {* Transfer package *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) "transfer"} & : & @{text method} \\
-    @{method_def (HOL) "transfer'"} & : & @{text method} \\
-    @{method_def (HOL) "transfer_prover"} & : & @{text method} \\
-    @{attribute_def (HOL) "Transfer.transferred"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "untransferred"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "transfer_rule"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "transfer_domain_rule"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "relator_eq"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "relator_domain"} & : & @{text attribute} \\
-  \end{matharray}
-
-  \begin{description}
-
-  \item @{method (HOL) "transfer"} method replaces the current subgoal
-    with a logically equivalent one that uses different types and
-    constants. The replacement of types and constants is guided by the
-    database of transfer rules. Goals are generalized over all free
-    variables by default; this is necessary for variables whose types
-    change, but can be overridden for specific variables with e.g.
-    @{text "transfer fixing: x y z"}.
-
-  \item @{method (HOL) "transfer'"} is a variant of @{method (HOL)
-    transfer} that allows replacing a subgoal with one that is
-    logically stronger (rather than equivalent). For example, a
-    subgoal involving equality on a quotient type could be replaced
-    with a subgoal involving equality (instead of the corresponding
-    equivalence relation) on the underlying raw type.
-
-  \item @{method (HOL) "transfer_prover"} method assists with proving
-    a transfer rule for a new constant, provided the constant is
-    defined in terms of other constants that already have transfer
-    rules. It should be applied after unfolding the constant
-    definitions.
-
-  \item @{attribute (HOL) "untransferred"} proves the same equivalent theorem
-     as @{method (HOL) "transfer"} internally does.
-
-  \item @{attribute (HOL) Transfer.transferred} works in the opposite
-    direction than @{method (HOL) "transfer'"}. E.g., given the transfer
-    relation @{text "ZN x n \<equiv> (x = int n)"}, corresponding transfer rules and the theorem
-    @{text "\<forall>x::int \<in> {0..}. x < x + 1"}, the attribute would prove 
-    @{text "\<forall>n::nat. n < n + 1"}. The attribute is still in experimental
-    phase of development.
-
-  \item @{attribute (HOL) "transfer_rule"} attribute maintains a
-    collection of transfer rules, which relate constants at two
-    different types. Typical transfer rules may relate different type
-    instances of the same polymorphic constant, or they may relate an
-    operation on a raw type to a corresponding operation on an
-    abstract type (quotient or subtype). For example:
-
-    @{text "((A ===> B) ===> list_all2 A ===> list_all2 B) map map"}\\
-    @{text "(cr_int ===> cr_int ===> cr_int) (\<lambda>(x,y) (u,v). (x+u, y+v)) plus"}
-
-    Lemmas involving predicates on relations can also be registered
-    using the same attribute. For example:
-
-    @{text "bi_unique A \<Longrightarrow> (list_all2 A ===> op =) distinct distinct"}\\
-    @{text "\<lbrakk>bi_unique A; bi_unique B\<rbrakk> \<Longrightarrow> bi_unique (rel_prod A B)"}
-
-  \item @{attribute (HOL) "transfer_domain_rule"} attribute maintains a collection
-    of rules, which specify a domain of a transfer relation by a predicate.
-    E.g., given the transfer relation @{text "ZN x n \<equiv> (x = int n)"}, 
-    one can register the following transfer domain rule: 
-    @{text "Domainp ZN = (\<lambda>x. x \<ge> 0)"}. The rules allow the package to produce
-    more readable transferred goals, e.g., when quantifiers are transferred.
-
-  \item @{attribute (HOL) relator_eq} attribute collects identity laws
-    for relators of various type constructors, e.g. @{text "list_all2
-    (op =) = (op =)"}. The @{method (HOL) transfer} method uses these
-    lemmas to infer transfer rules for non-polymorphic constants on
-    the fly.
-
-  \item @{attribute_def (HOL) "relator_domain"} attribute collects rules 
-    describing domains of relators by predicators. E.g., @{text "Domainp A = P \<Longrightarrow>
-    Domainp (list_all2 A) = (list_all P)"}. This allows the package to lift transfer
-    domain rules through type constructors.
-
-  \end{description}
-
-  Theoretical background can be found in \cite{Huffman-Kuncar:2013:lifting_transfer}.
-*}
-
-
-section {* Lifting package *}
-
-text {*
-  The Lifting package allows users to lift terms of the raw type to the abstract type, which is 
-  a necessary step in building a library for an abstract type. Lifting defines a new constant 
-  by combining coercion functions (Abs and Rep) with the raw term. It also proves an appropriate 
-  transfer rule for the Transfer package and, if possible, an equation for the code generator.
-
-  The Lifting package provides two main commands: @{command (HOL) "setup_lifting"} for initializing 
-  the package to work with a new type, and @{command (HOL) "lift_definition"} for lifting constants. 
-  The Lifting package works with all four kinds of type abstraction: type copies, subtypes, 
-  total quotients and partial quotients.
-
-  Theoretical background can be found in \cite{Huffman-Kuncar:2013:lifting_transfer}.
-
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "setup_lifting"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
-    @{command_def (HOL) "lift_definition"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
-    @{command_def (HOL) "lifting_forget"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
-    @{command_def (HOL) "lifting_update"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
-    @{command_def (HOL) "print_quot_maps"} & : & @{text "context \<rightarrow>"}\\
-    @{command_def (HOL) "print_quotients"} & : & @{text "context \<rightarrow>"}\\
-    @{attribute_def (HOL) "quot_map"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "invariant_commute"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "reflexivity_rule"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "relator_mono"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "relator_distr"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "quot_del"} & : & @{text attribute} \\
-    @{attribute_def (HOL) "lifting_restore"} & : & @{text attribute} \\   
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) setup_lifting} ('(' 'no_code' ')')? \<newline>
-      @{syntax thmref} @{syntax thmref}? (@'parametric' @{syntax thmref})?;
-  \<close>}
-
-  @{rail \<open>
-    @@{command (HOL) lift_definition} @{syntax name} '::' @{syntax type}  @{syntax mixfix}? \<newline>
-      'is' @{syntax term} (@'parametric' @{syntax thmref})?;
-  \<close>}
-
-  @{rail \<open>
-    @@{command (HOL) lifting_forget} @{syntax nameref};
-  \<close>}
-
-  @{rail \<open>
-    @@{command (HOL) lifting_update} @{syntax nameref};
-  \<close>}
-
-  @{rail \<open>
-    @@{attribute (HOL) lifting_restore} @{syntax thmref} (@{syntax thmref} @{syntax thmref})?;
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "setup_lifting"} Sets up the Lifting package
-    to work with a user-defined type. 
-    The command supports two modes. The first one is a low-level mode when 
-    the user must provide as a first
-    argument of @{command (HOL) "setup_lifting"} a
-    quotient theorem @{text "Quotient R Abs Rep T"}. The
-    package configures a transfer rule for equality, a domain transfer
-    rules and sets up the @{command_def (HOL) "lift_definition"}
-    command to work with the abstract type. An optional theorem @{text "reflp R"}, which certifies that 
-    the equivalence relation R is total,
-    can be provided as a second argument. This allows the package to generate stronger transfer
-    rules. And finally, the parametricity theorem for R can be provided as a third argument.
-    This allows the package to generate a stronger transfer rule for equality.
-
-    Users generally will not prove the @{text Quotient} theorem manually for 
-    new types, as special commands exist to automate the process.
-    
-    When a new subtype is defined by @{command (HOL) typedef}, @{command (HOL) "lift_definition"} 
-    can be used in its
-    second mode, where only the type_definition theorem @{text "type_definition Rep Abs A"}
-    is used as an argument of the command. The command internally proves the corresponding 
-    Quotient theorem and registers it with @{command (HOL) setup_lifting} using its first mode.
-
-    For quotients, the command @{command (HOL) quotient_type} can be used. The command defines 
-    a new quotient type and similarly to the previous case, the corresponding Quotient theorem is proved 
-    and registered by @{command (HOL) setup_lifting}.
-    
-    The command @{command (HOL) "setup_lifting"} also sets up the code generator
-    for the new type. Later on, when a new constant is defined by @{command (HOL) "lift_definition"},
-    the Lifting package proves and registers a code equation (if there is one) for the new constant.
-    If the option @{text "no_code"} is specified, the Lifting package does not set up the code
-    generator and as a consequence no code equations involving an abstract type are registered
-    by @{command (HOL) "lift_definition"}.
-
-  \item @{command (HOL) "lift_definition"} @{text "f :: \<tau>"} @{keyword (HOL) "is"} @{text t}
-    Defines a new function @{text f} with an abstract type @{text \<tau>}
-    in terms of a corresponding operation @{text t} on a
-    representation type. More formally, if @{text "t :: \<sigma>"}, then
-    the command builds a term @{text "F"} as a corresponding combination of abstraction 
-    and representation functions such that @{text "F :: \<sigma> \<Rightarrow> \<tau>" } and 
-    defines @{text f} is as @{text "f \<equiv> F t"}.
-    The term @{text t} does not have to be necessarily a constant but it can be any term.
-
-    The command opens a proof environment and the user must discharge 
-    a respectfulness proof obligation. For a type copy, i.e., a typedef with @{text
-    UNIV}, the obligation is discharged automatically. The proof goal is
-    presented in a user-friendly, readable form. A respectfulness
-    theorem in the standard format @{text f.rsp} and a transfer rule
-    @{text f.transfer} for the Transfer package are generated by the
-    package.
-
-    The user can specify a parametricity theorem for @{text t} after the keyword 
-    @{keyword "parametric"}, which allows the command
-    to generate a parametric transfer rule for @{text f}.
-
-    For each constant defined through trivial quotients (type copies or
-    subtypes) @{text f.rep_eq} is generated. The equation is a code certificate
-    that defines @{text f} using the representation function.
-
-    For each constant @{text f.abs_eq} is generated. The equation is unconditional
-    for total quotients. The equation defines @{text f} using
-    the abstraction function.
-
-    Integration with [@{attribute code} abstract]: For subtypes (e.g.,
-    corresponding to a datatype invariant, such as dlist), @{command
-    (HOL) "lift_definition"} uses a code certificate theorem
-    @{text f.rep_eq} as a code equation.
-
-    Integration with [@{attribute code} equation]: For total quotients, @{command
-    (HOL) "lift_definition"} uses @{text f.abs_eq} as a code equation.
-
-  \item @{command (HOL) lifting_forget} and  @{command (HOL) lifting_update}
-    These two commands serve for storing and deleting the set-up of
-    the Lifting package and corresponding transfer rules defined by this package.
-    This is useful for hiding of type construction details of an abstract type 
-    when the construction is finished but it still allows additions to this construction
-    when this is later necessary.
-
-    Whenever the Lifting package is set up with a new abstract type @{text "\<tau>"} by  
-    @{command_def (HOL) "lift_definition"}, the package defines a new bundle
-    that is called @{text "\<tau>.lifting"}. This bundle already includes set-up for the Lifting package. 
-    The new transfer rules
-    introduced by @{command (HOL) "lift_definition"} can be stored in the bundle by
-    the command @{command (HOL) "lifting_update"} @{text "\<tau>.lifting"}.
-
-    The command @{command (HOL) "lifting_forget"} @{text "\<tau>.lifting"} deletes set-up of the Lifting 
-    package
-    for @{text \<tau>} and deletes all the transfer rules that were introduced
-    by @{command (HOL) "lift_definition"} using @{text \<tau>} as an abstract type.
-
-    The stored set-up in a bundle can be reintroduced by the Isar commands for including a bundle
-    (@{command "include"}, @{keyword "includes"} and @{command "including"}).
-
-  \item @{command (HOL) "print_quot_maps"} prints stored quotient map
-    theorems.
-
-  \item @{command (HOL) "print_quotients"} prints stored quotient
-    theorems.
-
-  \item @{attribute (HOL) quot_map} registers a quotient map
-    theorem. E.g., @{text "Quotient R Abs Rep T \<Longrightarrow> 
-    Quotient (list_all2 R) (map Abs) (map Rep) (list_all2 T)"}. 
-    For examples see @{file
-    "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} or Lifting_*.thy files
-    in the same directory.
-
-  \item @{attribute (HOL) invariant_commute} registers a theorem that
-    shows a relationship between the constant @{text
-    Lifting.invariant} (used for internal encoding of proper subtypes)
-    and a relator.  Such theorems allows the package to hide @{text
-    Lifting.invariant} from a user in a user-readable form of a
-    respectfulness theorem. For examples see @{file
-    "~~/src/HOL/List.thy"} or Lifting_*.thy files in the same directory.
-
-  \item @{attribute (HOL) reflexivity_rule} registers a theorem that shows
-    that a relator respects left-totality and left_uniqueness. For examples 
-    see @{file "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} or Lifting_*.thy files 
-    in the same directory.
-    The property is used in a reflexivity prover, which is used for discharging respectfulness
-    theorems for type copies and also for discharging assumptions of abstraction function equations.
-
-  \item @{attribute (HOL) "relator_mono"} registers a property describing a monotonicity of a relator.
-    E.g., @{text "A \<le> B \<Longrightarrow> list_all2 A \<le> list_all2 B"}. For examples 
-    see @{file "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} 
-    or Lifting_*.thy files in the same directory.
-    This property is needed for proving a stronger transfer rule in @{command_def (HOL) "lift_definition"}
-    when a parametricity theorem for the raw term is specified.
-
-  \item @{attribute (HOL) "relator_distr"} registers a property describing a distributivity
-    of the relation composition and a relator. E.g., 
-    @{text "list_all2 R \<circ>\<circ> list_all2 S = list_all2 (R \<circ>\<circ> S)"}. 
-    This property is needed for proving a stronger transfer rule in @{command_def (HOL) "lift_definition"}
-    when a parametricity theorem for the raw term is specified.
-    When this equality does not hold unconditionally (e.g., for the function type), the user can specified
-    each direction separately and also register multiple theorems with different set of assumptions.
-    This attribute can be used only after the monotonicity property was already registered by
-    @{attribute (HOL) "relator_mono"}. For examples 
-    see @{file "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} 
-    or Lifting_*.thy files in the same directory.
-
-  \item @{attribute (HOL) quot_del} deletes a corresponding Quotient theorem
-    from the Lifting infrastructure and thus de-register the corresponding quotient. 
-    This effectively causes that @{command (HOL) lift_definition}  will not
-    do any lifting for the corresponding type. This attribute is rather used for low-level
-    manipulation with set-up of the Lifting package because @{command (HOL) lifting_forget} is
-    preferred for normal usage.
-
-  \item @{attribute (HOL) lifting_restore} @{text "Quotient_thm pcr_def pcr_cr_eq_thm"} 
-    registers the Quotient theorem @{text Quotient_thm} in the Lifting infrastructure 
-    and thus sets up lifting for an abstract type @{text \<tau>} (that is defined by @{text Quotient_thm}).
-    Optional theorems @{text pcr_def} and @{text pcr_cr_eq_thm} can be specified to register 
-    the parametrized
-    correspondence relation for @{text \<tau>}. E.g., for @{text "'a dlist"}, @{text pcr_def} is
-    @{text "pcr_dlist A \<equiv> list_all2 A \<circ>\<circ> cr_dlist"} and @{text pcr_cr_eq_thm} is 
-    @{text "pcr_dlist op= = op="}.
-    This attribute is rather used for low-level
-    manipulation with set-up of the Lifting package because using of the bundle @{text \<tau>.lifting} 
-    together with the commands @{command (HOL) lifting_forget} and @{command (HOL) lifting_update} is
-    preferred for normal usage.
-
-  \end{description}
-*}
-
-
-section {* Coercive subtyping *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{attribute_def (HOL) coercion} & : & @{text attribute} \\
-    @{attribute_def (HOL) coercion_enabled} & : & @{text attribute} \\
-    @{attribute_def (HOL) coercion_map} & : & @{text attribute} \\
-  \end{matharray}
-
-  Coercive subtyping allows the user to omit explicit type
-  conversions, also called \emph{coercions}.  Type inference will add
-  them as necessary when parsing a term. See
-  \cite{traytel-berghofer-nipkow-2011} for details.
-
-  @{rail \<open>
-    @@{attribute (HOL) coercion} (@{syntax term})?
-    ;
-    @@{attribute (HOL) coercion_map} (@{syntax term})?
-  \<close>}
-
-  \begin{description}
-
-  \item @{attribute (HOL) "coercion"}~@{text "f"} registers a new
-  coercion function @{text "f :: \<sigma>\<^sub>1 \<Rightarrow> \<sigma>\<^sub>2"} where @{text "\<sigma>\<^sub>1"} and
-  @{text "\<sigma>\<^sub>2"} are type constructors without arguments.  Coercions are
-  composed by the inference algorithm if needed.  Note that the type
-  inference algorithm is complete only if the registered coercions
-  form a lattice.
-
-  \item @{attribute (HOL) "coercion_map"}~@{text "map"} registers a
-  new map function to lift coercions through type constructors. The
-  function @{text "map"} must conform to the following type pattern
-
-  \begin{matharray}{lll}
-    @{text "map"} & @{text "::"} &
-      @{text "f\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> f\<^sub>n \<Rightarrow> (\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t \<Rightarrow> (\<beta>\<^sub>1, \<dots>, \<beta>\<^sub>n) t"} \\
-  \end{matharray}
-
-  where @{text "t"} is a type constructor and @{text "f\<^sub>i"} is of type
-  @{text "\<alpha>\<^sub>i \<Rightarrow> \<beta>\<^sub>i"} or @{text "\<beta>\<^sub>i \<Rightarrow> \<alpha>\<^sub>i"}.  Registering a map function
-  overwrites any existing map function for this particular type
-  constructor.
-
-  \item @{attribute (HOL) "coercion_enabled"} enables the coercion
-  inference algorithm.
-
-  \end{description}
-*}
-
-
-section {* Arithmetic proof support *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) arith} & : & @{text method} \\
-    @{attribute_def (HOL) arith} & : & @{text attribute} \\
-    @{attribute_def (HOL) arith_split} & : & @{text attribute} \\
-  \end{matharray}
-
-  \begin{description}
-
-  \item @{method (HOL) arith} decides linear arithmetic problems (on
-  types @{text nat}, @{text int}, @{text real}).  Any current facts
-  are inserted into the goal before running the procedure.
-
-  \item @{attribute (HOL) arith} declares facts that are supplied to
-  the arithmetic provers implicitly.
-
-  \item @{attribute (HOL) arith_split} attribute declares case split
-  rules to be expanded before @{method (HOL) arith} is invoked.
-
-  \end{description}
-
-  Note that a simpler (but faster) arithmetic prover is already
-  invoked by the Simplifier.
-*}
-
-
-section {* Intuitionistic proof search *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) iprover} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method (HOL) iprover} (@{syntax rulemod} *)
-  \<close>}
-
-  \begin{description}
-
-  \item @{method (HOL) iprover} performs intuitionistic proof search,
-  depending on specifically declared rules from the context, or given
-  as explicit arguments.  Chained facts are inserted into the goal
-  before commencing proof search.
-
-  Rules need to be classified as @{attribute (Pure) intro},
-  @{attribute (Pure) elim}, or @{attribute (Pure) dest}; here the
-  ``@{text "!"}'' indicator refers to ``safe'' rules, which may be
-  applied aggressively (without considering back-tracking later).
-  Rules declared with ``@{text "?"}'' are ignored in proof search (the
-  single-step @{method (Pure) rule} method still observes these).  An
-  explicit weight annotation may be given as well; otherwise the
-  number of rule premises will be taken into account here.
-
-  \end{description}
-*}
-
-
-section {* Model Elimination and Resolution *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) "meson"} & : & @{text method} \\
-    @{method_def (HOL) "metis"} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method (HOL) meson} @{syntax thmrefs}?
-    ;
-    @@{method (HOL) metis}
-      ('(' ('partial_types' | 'full_types' | 'no_types' | @{syntax name}) ')')?
-      @{syntax thmrefs}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{method (HOL) meson} implements Loveland's model elimination
-  procedure \cite{loveland-78}.  See @{file
-  "~~/src/HOL/ex/Meson_Test.thy"} for examples.
-
-  \item @{method (HOL) metis} combines ordered resolution and ordered
-  paramodulation to find first-order (or mildly higher-order) proofs.
-  The first optional argument specifies a type encoding; see the
-  Sledgehammer manual \cite{isabelle-sledgehammer} for details.  The
-  directory @{file "~~/src/HOL/Metis_Examples"} contains several small
-  theories developed to a large extent using @{method (HOL) metis}.
-
-  \end{description}
-*}
-
-
-section {* Algebraic reasoning via Gr\"obner bases *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) "algebra"} & : & @{text method} \\
-    @{attribute_def (HOL) algebra} & : & @{text attribute} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method (HOL) algebra}
-      ('add' ':' @{syntax thmrefs})?
-      ('del' ':' @{syntax thmrefs})?
-    ;
-    @@{attribute (HOL) algebra} (() | 'add' | 'del')
-  \<close>}
-
-  \begin{description}
-
-  \item @{method (HOL) algebra} performs algebraic reasoning via
-  Gr\"obner bases, see also \cite{Chaieb-Wenzel:2007} and
-  \cite[\S3.2]{Chaieb-thesis}. The method handles deals with two main
-  classes of problems:
-
-  \begin{enumerate}
-
-  \item Universal problems over multivariate polynomials in a
-  (semi)-ring/field/idom; the capabilities of the method are augmented
-  according to properties of these structures. For this problem class
-  the method is only complete for algebraically closed fields, since
-  the underlying method is based on Hilbert's Nullstellensatz, where
-  the equivalence only holds for algebraically closed fields.
-
-  The problems can contain equations @{text "p = 0"} or inequations
-  @{text "q \<noteq> 0"} anywhere within a universal problem statement.
-
-  \item All-exists problems of the following restricted (but useful)
-  form:
-
-  @{text [display] "\<forall>x\<^sub>1 \<dots> x\<^sub>n.
-    e\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<and> \<dots> \<and> e\<^sub>m(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<longrightarrow>
-    (\<exists>y\<^sub>1 \<dots> y\<^sub>k.
-      p\<^sub>1\<^sub>1(x\<^sub>1, \<dots> ,x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>1\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0 \<and>
-      \<dots> \<and>
-      p\<^sub>t\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>t\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0)"}
-
-  Here @{text "e\<^sub>1, \<dots>, e\<^sub>n"} and the @{text "p\<^sub>i\<^sub>j"} are multivariate
-  polynomials only in the variables mentioned as arguments.
-
-  \end{enumerate}
-
-  The proof method is preceded by a simplification step, which may be
-  modified by using the form @{text "(algebra add: ths\<^sub>1 del: ths\<^sub>2)"}.
-  This acts like declarations for the Simplifier
-  (\secref{sec:simplifier}) on a private simpset for this tool.
-
-  \item @{attribute algebra} (as attribute) manages the default
-  collection of pre-simplification rules of the above proof method.
-
-  \end{description}
-*}
-
-
-subsubsection {* Example *}
-
-text {* The subsequent example is from geometry: collinearity is
-  invariant by rotation.  *}
-
-type_synonym point = "int \<times> int"
-
-fun collinear :: "point \<Rightarrow> point \<Rightarrow> point \<Rightarrow> bool" where
-  "collinear (Ax, Ay) (Bx, By) (Cx, Cy) \<longleftrightarrow>
-    (Ax - Bx) * (By - Cy) = (Ay - By) * (Bx - Cx)"
-
-lemma collinear_inv_rotation:
-  assumes "collinear (Ax, Ay) (Bx, By) (Cx, Cy)" and "c\<^sup>2 + s\<^sup>2 = 1"
-  shows "collinear (Ax * c - Ay * s, Ay * c + Ax * s)
-    (Bx * c - By * s, By * c + Bx * s) (Cx * c - Cy * s, Cy * c + Cx * s)"
-  using assms by (algebra add: collinear.simps)
-
-text {*
- See also @{file "~~/src/HOL/ex/Groebner_Examples.thy"}.
-*}
-
-
-section {* Coherent Logic *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{method_def (HOL) "coherent"} & : & @{text method} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method (HOL) coherent} @{syntax thmrefs}?
-  \<close>}
-
-  \begin{description}
-
-  \item @{method (HOL) coherent} solves problems of \emph{Coherent
-  Logic} \cite{Bezem-Coquand:2005}, which covers applications in
-  confluence theory, lattice theory and projective geometry.  See
-  @{file "~~/src/HOL/ex/Coherent.thy"} for some examples.
-
-  \end{description}
-*}
-
-
-section {* Proving propositions *}
-
-text {*
-  In addition to the standard proof methods, a number of diagnosis
-  tools search for proofs and provide an Isar proof snippet on success.
-  These tools are available via the following commands.
-
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "solve_direct"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
-    @{command_def (HOL) "try"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
-    @{command_def (HOL) "try0"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
-    @{command_def (HOL) "sledgehammer"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
-    @{command_def (HOL) "sledgehammer_params"} & : & @{text "theory \<rightarrow> theory"}
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) try}
-    ;
-
-    @@{command (HOL) try0} ( ( ( 'simp' | 'intro' | 'elim' | 'dest' ) ':' @{syntax thmrefs} ) + ) ?
-      @{syntax nat}?
-    ;
-
-    @@{command (HOL) sledgehammer} ( '[' args ']' )? facts? @{syntax nat}?
-    ;
-
-    @@{command (HOL) sledgehammer_params} ( ( '[' args ']' ) ? )
-    ;
-    args: ( @{syntax name} '=' value + ',' )
-    ;
-    facts: '(' ( ( ( ( 'add' | 'del' ) ':' ) ? @{syntax thmrefs} ) + ) ? ')'
-  \<close>} % FIXME check args "value"
-
-  \begin{description}
-
-  \item @{command (HOL) "solve_direct"} checks whether the current
-  subgoals can be solved directly by an existing theorem. Duplicate
-  lemmas can be detected in this way.
-
-  \item @{command (HOL) "try0"} attempts to prove a subgoal
-  using a combination of standard proof methods (@{method auto},
-  @{method simp}, @{method blast}, etc.).  Additional facts supplied
-  via @{text "simp:"}, @{text "intro:"}, @{text "elim:"}, and @{text
-  "dest:"} are passed to the appropriate proof methods.
-
-  \item @{command (HOL) "try"} attempts to prove or disprove a subgoal
-  using a combination of provers and disprovers (@{command (HOL)
-  "solve_direct"}, @{command (HOL) "quickcheck"}, @{command (HOL)
-  "try0"}, @{command (HOL) "sledgehammer"}, @{command (HOL)
-  "nitpick"}).
-
-  \item @{command (HOL) "sledgehammer"} attempts to prove a subgoal
-  using external automatic provers (resolution provers and SMT
-  solvers). See the Sledgehammer manual \cite{isabelle-sledgehammer}
-  for details.
-
-  \item @{command (HOL) "sledgehammer_params"} changes @{command (HOL)
-  "sledgehammer"} configuration options persistently.
-
-  \end{description}
-*}
-
-
-section {* Checking and refuting propositions *}
-
-text {*
-  Identifying incorrect propositions usually involves evaluation of
-  particular assignments and systematic counterexample search.  This
-  is supported by the following commands.
-
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "value"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def (HOL) "values"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def (HOL) "quickcheck"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
-    @{command_def (HOL) "nitpick"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
-    @{command_def (HOL) "quickcheck_params"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "nitpick_params"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "quickcheck_generator"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "find_unused_assms"} & : & @{text "context \<rightarrow>"}
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) value} ( '[' @{syntax name} ']' )? modes? @{syntax term}
-    ;
-
-    @@{command (HOL) values} modes? @{syntax nat}? @{syntax term}
-    ;
-
-    (@@{command (HOL) quickcheck} | @@{command (HOL) nitpick})
-      ( '[' args ']' )? @{syntax nat}?
-    ;
-
-    (@@{command (HOL) quickcheck_params} |
-      @@{command (HOL) nitpick_params}) ( '[' args ']' )?
-    ;
-
-    @@{command (HOL) quickcheck_generator} @{syntax nameref} \<newline>
-      'operations:' ( @{syntax term} +)
-    ;
-
-    @@{command (HOL) find_unused_assms} @{syntax name}?
-    ;
-    modes: '(' (@{syntax name} +) ')'
-    ;
-    args: ( @{syntax name} '=' value + ',' )
-  \<close>} % FIXME check "value"
-
-  \begin{description}
-
-  \item @{command (HOL) "value"}~@{text t} evaluates and prints a
-  term; optionally @{text modes} can be specified, which are appended
-  to the current print mode; see \secref{sec:print-modes}.
-  Internally, the evaluation is performed by registered evaluators,
-  which are invoked sequentially until a result is returned.
-  Alternatively a specific evaluator can be selected using square
-  brackets; typical evaluators use the current set of code equations
-  to normalize and include @{text simp} for fully symbolic evaluation
-  using the simplifier, @{text nbe} for \emph{normalization by
-  evaluation} and \emph{code} for code generation in SML.
-
-  \item @{command (HOL) "values"}~@{text t} enumerates a set
-  comprehension by evaluation and prints its values up to the given
-  number of solutions; optionally @{text modes} can be specified,
-  which are appended to the current print mode; see
-  \secref{sec:print-modes}.
-
-  \item @{command (HOL) "quickcheck"} tests the current goal for
-  counterexamples using a series of assignments for its free
-  variables; by default the first subgoal is tested, an other can be
-  selected explicitly using an optional goal index.  Assignments can
-  be chosen exhausting the search space up to a given size, or using a
-  fixed number of random assignments in the search space, or exploring
-  the search space symbolically using narrowing.  By default,
-  quickcheck uses exhaustive testing.  A number of configuration
-  options are supported for @{command (HOL) "quickcheck"}, notably:
-
-    \begin{description}
-
-    \item[@{text tester}] specifies which testing approach to apply.
-    There are three testers, @{text exhaustive}, @{text random}, and
-    @{text narrowing}.  An unknown configuration option is treated as
-    an argument to tester, making @{text "tester ="} optional.  When
-    multiple testers are given, these are applied in parallel.  If no
-    tester is specified, quickcheck uses the testers that are set
-    active, i.e., configurations @{attribute
-    quickcheck_exhaustive_active}, @{attribute
-    quickcheck_random_active}, @{attribute
-    quickcheck_narrowing_active} are set to true.
-
-    \item[@{text size}] specifies the maximum size of the search space
-    for assignment values.
-
-    \item[@{text genuine_only}] sets quickcheck only to return genuine
-    counterexample, but not potentially spurious counterexamples due
-    to underspecified functions.
-
-    \item[@{text abort_potential}] sets quickcheck to abort once it
-    found a potentially spurious counterexample and to not continue
-    to search for a further genuine counterexample.
-    For this option to be effective, the @{text genuine_only} option
-    must be set to false.
-
-    \item[@{text eval}] takes a term or a list of terms and evaluates
-    these terms under the variable assignment found by quickcheck.
-    This option is currently only supported by the default
-    (exhaustive) tester.
-
-    \item[@{text iterations}] sets how many sets of assignments are
-    generated for each particular size.
-
-    \item[@{text no_assms}] specifies whether assumptions in
-    structured proofs should be ignored.
-
-    \item[@{text locale}] specifies how to process conjectures in
-    a locale context, i.e., they can be interpreted or expanded.
-    The option is a whitespace-separated list of the two words
-    @{text interpret} and @{text expand}. The list determines the
-    order they are employed. The default setting is to first use
-    interpretations and then test the expanded conjecture.
-    The option is only provided as attribute declaration, but not
-    as parameter to the command.
-
-    \item[@{text timeout}] sets the time limit in seconds.
-
-    \item[@{text default_type}] sets the type(s) generally used to
-    instantiate type variables.
-
-    \item[@{text report}] if set quickcheck reports how many tests
-    fulfilled the preconditions.
-
-    \item[@{text use_subtype}] if set quickcheck automatically lifts
-    conjectures to registered subtypes if possible, and tests the
-    lifted conjecture.
-
-    \item[@{text quiet}] if set quickcheck does not output anything
-    while testing.
-
-    \item[@{text verbose}] if set quickcheck informs about the current
-    size and cardinality while testing.
-
-    \item[@{text expect}] can be used to check if the user's
-    expectation was met (@{text no_expectation}, @{text
-    no_counterexample}, or @{text counterexample}).
-
-    \end{description}
-
-  These option can be given within square brackets.
-
-  Using the following type classes, the testers generate values and convert
-  them back into Isabelle terms for displaying counterexamples.
-    \begin{description}
-    \item[@{text exhaustive}] The parameters of the type classes @{class exhaustive}
-      and @{class full_exhaustive} implement the testing. They take a 
-      testing function as a parameter, which takes a value of type @{typ "'a"}
-      and optionally produces a counterexample, and a size parameter for the test values.
-      In @{class full_exhaustive}, the testing function parameter additionally 
-      expects a lazy term reconstruction in the type @{typ Code_Evaluation.term}
-      of the tested value.
-
-      The canonical implementation for @{text exhaustive} testers calls the given
-      testing function on all values up to the given size and stops as soon
-      as a counterexample is found.
-
-    \item[@{text random}] The operation @{const Quickcheck_Random.random}
-      of the type class @{class random} generates a pseudo-random
-      value of the given size and a lazy term reconstruction of the value
-      in the type @{typ Code_Evaluation.term}. A pseudo-randomness generator
-      is defined in theory @{theory Random}.
-      
-    \item[@{text narrowing}] implements Haskell's Lazy Smallcheck~\cite{runciman-naylor-lindblad}
-      using the type classes @{class narrowing} and @{class partial_term_of}.
-      Variables in the current goal are initially represented as symbolic variables.
-      If the execution of the goal tries to evaluate one of them, the test engine
-      replaces it with refinements provided by @{const narrowing}.
-      Narrowing views every value as a sum-of-products which is expressed using the operations
-      @{const Quickcheck_Narrowing.cons} (embedding a value),
-      @{const Quickcheck_Narrowing.apply} (product) and @{const Quickcheck_Narrowing.sum} (sum).
-      The refinement should enable further evaluation of the goal.
-
-      For example, @{const narrowing} for the list type @{typ "'a :: narrowing list"}
-      can be recursively defined as
-      @{term "Quickcheck_Narrowing.sum (Quickcheck_Narrowing.cons [])
-                (Quickcheck_Narrowing.apply
-                  (Quickcheck_Narrowing.apply
-                    (Quickcheck_Narrowing.cons (op #))
-                    narrowing)
-                  narrowing)"}.
-      If a symbolic variable of type @{typ "_ list"} is evaluated, it is replaced by (i)~the empty
-      list @{term "[]"} and (ii)~by a non-empty list whose head and tail can then be recursively
-      refined if needed.
-
-      To reconstruct counterexamples, the operation @{const partial_term_of} transforms
-      @{text narrowing}'s deep representation of terms to the type @{typ Code_Evaluation.term}.
-      The deep representation models symbolic variables as
-      @{const Quickcheck_Narrowing.Narrowing_variable}, which are normally converted to
-      @{const Code_Evaluation.Free}, and refined values as
-      @{term "Quickcheck_Narrowing.Narrowing_constructor i args"}, where @{term "i :: integer"}
-      denotes the index in the sum of refinements. In the above example for lists,
-      @{term "0"} corresponds to @{term "[]"} and @{term "1"}
-      to @{term "op #"}.
-
-      The command @{command (HOL) "code_datatype"} sets up @{const partial_term_of}
-      such that the @{term "i"}-th refinement is interpreted as the @{term "i"}-th constructor,
-      but it does not ensures consistency with @{const narrowing}.
-    \end{description}
-
-  \item @{command (HOL) "quickcheck_params"} changes @{command (HOL)
-  "quickcheck"} configuration options persistently.
-
-  \item @{command (HOL) "quickcheck_generator"} creates random and
-  exhaustive value generators for a given type and operations.  It
-  generates values by using the operations as if they were
-  constructors of that type.
-
-  \item @{command (HOL) "nitpick"} tests the current goal for
-  counterexamples using a reduction to first-order relational
-  logic. See the Nitpick manual \cite{isabelle-nitpick} for details.
-
-  \item @{command (HOL) "nitpick_params"} changes @{command (HOL)
-  "nitpick"} configuration options persistently.
-
-  \item @{command (HOL) "find_unused_assms"} finds potentially superfluous
-  assumptions in theorems using quickcheck.
-  It takes the theory name to be checked for superfluous assumptions as
-  optional argument. If not provided, it checks the current theory.
-  Options to the internal quickcheck invocations can be changed with
-  common configuration declarations.
-
-  \end{description}
-*}
-
-
-section {* Unstructured case analysis and induction \label{sec:hol-induct-tac} *}
-
-text {*
-  The following tools of Isabelle/HOL support cases analysis and
-  induction in unstructured tactic scripts; see also
-  \secref{sec:cases-induct} for proper Isar versions of similar ideas.
-
-  \begin{matharray}{rcl}
-    @{method_def (HOL) case_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def (HOL) induct_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def (HOL) ind_cases}@{text "\<^sup>*"} & : & @{text method} \\
-    @{command_def (HOL) "inductive_cases"}@{text "\<^sup>*"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-  \end{matharray}
-
-  @{rail \<open>
-    @@{method (HOL) case_tac} @{syntax goal_spec}? @{syntax term} rule?
-    ;
-    @@{method (HOL) induct_tac} @{syntax goal_spec}? (@{syntax insts} * @'and') rule?
-    ;
-    @@{method (HOL) ind_cases} (@{syntax prop}+) (@'for' (@{syntax name}+))?
-    ;
-    @@{command (HOL) inductive_cases} (@{syntax thmdecl}? (@{syntax prop}+) + @'and')
-    ;
-    rule: 'rule' ':' @{syntax thmref}
-  \<close>}
-
-  \begin{description}
-
-  \item @{method (HOL) case_tac} and @{method (HOL) induct_tac} admit
-  to reason about inductive types.  Rules are selected according to
-  the declarations by the @{attribute cases} and @{attribute induct}
-  attributes, cf.\ \secref{sec:cases-induct}.  The @{command (HOL)
-  datatype} package already takes care of this.
-
-  These unstructured tactics feature both goal addressing and dynamic
-  instantiation.  Note that named rule cases are \emph{not} provided
-  as would be by the proper @{method cases} and @{method induct} proof
-  methods (see \secref{sec:cases-induct}).  Unlike the @{method
-  induct} method, @{method induct_tac} does not handle structured rule
-  statements, only the compact object-logic conclusion of the subgoal
-  being addressed.
-
-  \item @{method (HOL) ind_cases} and @{command (HOL)
-  "inductive_cases"} provide an interface to the internal @{ML_text
-  mk_cases} operation.  Rules are simplified in an unrestricted
-  forward manner.
-
-  While @{method (HOL) ind_cases} is a proof method to apply the
-  result immediately as elimination rules, @{command (HOL)
-  "inductive_cases"} provides case split theorems at the theory level
-  for later use.  The @{keyword "for"} argument of the @{method (HOL)
-  ind_cases} method allows to specify a list of variables that should
-  be generalized before applying the resulting rule.
-
-  \end{description}
-*}
-
-
-chapter {* Executable code *}
-
-text {* For validation purposes, it is often useful to \emph{execute}
-  specifications.  In principle, execution could be simulated by
-  Isabelle's inference kernel, i.e. by a combination of resolution and
-  simplification.  Unfortunately, this approach is rather inefficient.
-  A more efficient way of executing specifications is to translate
-  them into a functional programming language such as ML.
-
-  Isabelle provides a generic framework to support code generation
-  from executable specifications.  Isabelle/HOL instantiates these
-  mechanisms in a way that is amenable to end-user applications.  Code
-  can be generated for functional programs (including overloading
-  using type classes) targeting SML \cite{SML}, OCaml \cite{OCaml},
-  Haskell \cite{haskell-revised-report} and Scala
-  \cite{scala-overview-tech-report}.  Conceptually, code generation is
-  split up in three steps: \emph{selection} of code theorems,
-  \emph{translation} into an abstract executable view and
-  \emph{serialization} to a specific \emph{target language}.
-  Inductive specifications can be executed using the predicate
-  compiler which operates within HOL.  See \cite{isabelle-codegen} for
-  an introduction.
-
-  \begin{matharray}{rcl}
-    @{command_def (HOL) "export_code"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{attribute_def (HOL) code} & : & @{text attribute} \\
-    @{command_def (HOL) "code_datatype"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "print_codesetup"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{attribute_def (HOL) code_unfold} & : & @{text attribute} \\
-    @{attribute_def (HOL) code_post} & : & @{text attribute} \\
-    @{attribute_def (HOL) code_abbrev} & : & @{text attribute} \\
-    @{command_def (HOL) "print_codeproc"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def (HOL) "code_thms"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def (HOL) "code_deps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def (HOL) "code_reserved"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "code_printing"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "code_identifier"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "code_monad"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "code_reflect"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (HOL) "code_pred"} & : & @{text "theory \<rightarrow> proof(prove)"}
-  \end{matharray}
-
-  @{rail \<open>
-    @@{command (HOL) export_code} ( @'open' ) ? ( constexpr + ) \<newline>
-       ( ( @'in' target ( @'module_name' @{syntax string} ) ? \<newline>
-        ( @'file' @{syntax string} ) ? ( '(' args ')' ) ?) + ) ?
-    ;
-
-    const: @{syntax term}
-    ;
-
-    constexpr: ( const | 'name._' | '_' )
-    ;
-
-    typeconstructor: @{syntax nameref}
-    ;
-
-    class: @{syntax nameref}
-    ;
-
-    target: 'SML' | 'OCaml' | 'Haskell' | 'Scala' | 'Eval'
-    ;
-
-    @@{attribute (HOL) code} ( 'del' | 'equation' | 'abstype' | 'abstract'
-      | 'drop:' ( const + ) | 'abort:' ( const + ) )?
-    ;
-
-    @@{command (HOL) code_datatype} ( const + )
-    ;
-
-    @@{attribute (HOL) code_unfold} ( 'del' ) ?
-    ;
-
-    @@{attribute (HOL) code_post} ( 'del' ) ?
-    ;
-
-    @@{attribute (HOL) code_abbrev}
-    ;
-
-    @@{command (HOL) code_thms} ( constexpr + ) ?
-    ;
-
-    @@{command (HOL) code_deps} ( constexpr + ) ?
-    ;
-
-    @@{command (HOL) code_reserved} target ( @{syntax string} + )
-    ;
-
-    symbol_const: ( @'constant' const )
-    ;
-
-    symbol_typeconstructor: ( @'type_constructor' typeconstructor )
-    ;
-
-    symbol_class: ( @'type_class' class )
-    ;
-
-    symbol_class_relation: ( @'class_relation' class ( '<' | '\<subseteq>' ) class )
-    ;
-
-    symbol_class_instance: ( @'class_instance' typeconstructor @'::' class )
-    ;
-
-    symbol_module: ( @'code_module' name )
-    ;
-
-    syntax: @{syntax string} | ( @'infix' | @'infixl' | @'infixr' ) @{syntax nat} @{syntax string}
-    ;
-
-    printing_const: symbol_const ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' syntax ? + @'and' )
-    ;
-
-    printing_typeconstructor: symbol_typeconstructor ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' syntax ? + @'and' )
-    ;
-
-    printing_class: symbol_class ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' @{syntax string} ? + @'and' )
-    ;
-
-    printing_class_relation: symbol_class_relation ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' @{syntax string} ? + @'and' )
-    ;
-
-    printing_class_instance: symbol_class_instance ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' '-' ? + @'and' )
-    ;
-
-    printing_module: symbol_module ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' ( @{syntax string} ( @'attach' ( const + ) ) ? ) ? + @'and' )
-    ;
-
-    @@{command (HOL) code_printing} ( ( printing_const | printing_typeconstructor
-      | printing_class | printing_class_relation | printing_class_instance
-      | printing_module ) + '|' )
-    ;
-
-    @@{command (HOL) code_identifier} ( ( symbol_const | symbol_typeconstructor
-      | symbol_class | symbol_class_relation | symbol_class_instance
-      | symbol_module ) ( '\<rightharpoonup>' | '=>' ) \<newline>
-      ( '(' target ')' @{syntax string} ? + @'and' ) + '|' )
-    ;
-
-    @@{command (HOL) code_monad} const const target
-    ;
-
-    @@{command (HOL) code_reflect} @{syntax string} \<newline>
-      ( @'datatypes' ( @{syntax string} '=' ( '_' | ( @{syntax string} + '|' ) + @'and' ) ) ) ? \<newline>
-      ( @'functions' ( @{syntax string} + ) ) ? ( @'file' @{syntax string} ) ?
-    ;
-
-    @@{command (HOL) code_pred} \<newline> ('(' @'modes' ':' modedecl ')')? \<newline> const
-    ;
-
-    modedecl: (modes | ((const ':' modes) \<newline>
-        (@'and' ((const ':' modes @'and') +))?))
-    ;
-
-    modes: mode @'as' const
-  \<close>}
-
-  \begin{description}
-
-  \item @{command (HOL) "export_code"} generates code for a given list
-  of constants in the specified target language(s).  If no
-  serialization instruction is given, only abstract code is generated
-  internally.
-
-  Constants may be specified by giving them literally, referring to
-  all executable constants within a certain theory by giving @{text
-  "name._"}, or referring to \emph{all} executable constants currently
-  available by giving @{text "_"}.
-
-  By default, exported identifiers are minimized per module.  This
-  can be suppressed by prepending @{keyword "open"} before the list
-  of contants.
-
-  By default, for each involved theory one corresponding name space
-  module is generated.  Alternatively, a module name may be specified
-  after the @{keyword "module_name"} keyword; then \emph{all} code is
-  placed in this module.
-
-  For \emph{SML}, \emph{OCaml} and \emph{Scala} the file specification
-  refers to a single file; for \emph{Haskell}, it refers to a whole
-  directory, where code is generated in multiple files reflecting the
-  module hierarchy.  Omitting the file specification denotes standard
-  output.
-
-  Serializers take an optional list of arguments in parentheses.
-  For \emph{Haskell} a module name prefix may be given using the
-  ``@{text "root:"}'' argument; ``@{text string_classes}'' adds a
-  ``@{verbatim "deriving (Read, Show)"}'' clause to each appropriate
-  datatype declaration.
-
-  \item @{attribute (HOL) code} declare code equations for code
-  generation.  Variant @{text "code equation"} declares a conventional
-  equation as code equation.  Variants @{text "code abstype"} and
-  @{text "code abstract"} declare abstract datatype certificates or
-  code equations on abstract datatype representations respectively.
-  Vanilla @{text "code"} falls back to @{text "code equation"}
-  or @{text "code abstype"} depending on the syntactic shape
-  of the underlying equation.  Variant @{text "code del"}
-  deselects a code equation for code generation.
-
-  Variants @{text "code drop:"} and @{text "code abort:"} take
-  a list of constant as arguments and drop all code equations declared
-  for them.  In the case of {text abort}, these constants then are
-  are not required to have a definition by means of code equations;
-  if needed these are implemented by program abort (exception) instead.
-
-  Usually packages introducing code equations provide a reasonable
-  default setup for selection.  
-
-  \item @{command (HOL) "code_datatype"} specifies a constructor set
-  for a logical type.
-
-  \item @{command (HOL) "print_codesetup"} gives an overview on
-  selected code equations and code generator datatypes.
-
-  \item @{attribute (HOL) code_unfold} declares (or with option
-  ``@{text "del"}'' removes) theorems which during preprocessing
-  are applied as rewrite rules to any code equation or evaluation
-  input.
-
-  \item @{attribute (HOL) code_post} declares (or with option ``@{text
-  "del"}'' removes) theorems which are applied as rewrite rules to any
-  result of an evaluation.
-
-  \item @{attribute (HOL) code_abbrev} declares equations which are
-  applied as rewrite rules to any result of an evaluation and
-  symmetrically during preprocessing to any code equation or evaluation
-  input.
-
-  \item @{command (HOL) "print_codeproc"} prints the setup of the code
-  generator preprocessor.
-
-  \item @{command (HOL) "code_thms"} prints a list of theorems
-  representing the corresponding program containing all given
-  constants after preprocessing.
-
-  \item @{command (HOL) "code_deps"} visualizes dependencies of
-  theorems representing the corresponding program containing all given
-  constants after preprocessing.
-
-  \item @{command (HOL) "code_reserved"} declares a list of names as
-  reserved for a given target, preventing it to be shadowed by any
-  generated code.
-
-  \item @{command (HOL) "code_printing"} associates a series of symbols
-  (constants, type constructors, classes, class relations, instances,
-  module names) with target-specific serializations; omitting a serialization
-  deletes an existing serialization.
-
-  \item @{command (HOL) "code_monad"} provides an auxiliary mechanism
-  to generate monadic code for Haskell.
-
-  \item @{command (HOL) "code_identifier"} associates a a series of symbols
-  (constants, type constructors, classes, class relations, instances,
-  module names) with target-specific hints how these symbols shall be named.
-  These hints gain precedence over names for symbols with no hints at all.
-  Conflicting hints are subject to name disambiguation.
-  \emph{Warning:} It is at the discretion
-  of the user to ensure that name prefixes of identifiers in compound
-  statements like type classes or datatypes are still the same.
-
-  \item @{command (HOL) "code_reflect"} without a ``@{text "file"}''
-  argument compiles code into the system runtime environment and
-  modifies the code generator setup that future invocations of system
-  runtime code generation referring to one of the ``@{text
-  "datatypes"}'' or ``@{text "functions"}'' entities use these
-  precompiled entities.  With a ``@{text "file"}'' argument, the
-  corresponding code is generated into that specified file without
-  modifying the code generator setup.
-
-  \item @{command (HOL) "code_pred"} creates code equations for a
-    predicate given a set of introduction rules. Optional mode
-    annotations determine which arguments are supposed to be input or
-    output. If alternative introduction rules are declared, one must
-    prove a corresponding elimination rule.
-
-  \end{description}
-*}
-
-end
--- a/src/Doc/Isar-Ref/Inner_Syntax.thy	Mon Apr 07 16:37:57 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1709 +0,0 @@
-theory Inner_Syntax
-imports Base Main
-begin
-
-chapter {* Inner syntax --- the term language \label{ch:inner-syntax} *}
-
-text {* The inner syntax of Isabelle provides concrete notation for
-  the main entities of the logical framework, notably @{text
-  "\<lambda>"}-terms with types and type classes.  Applications may either
-  extend existing syntactic categories by additional notation, or
-  define new sub-languages that are linked to the standard term
-  language via some explicit markers.  For example @{verbatim
-  FOO}~@{text "foo"} could embed the syntax corresponding for some
-  user-defined nonterminal @{text "foo"} --- within the bounds of the
-  given lexical syntax of Isabelle/Pure.
-
-  The most basic way to specify concrete syntax for logical entities
-  works via mixfix annotations (\secref{sec:mixfix}), which may be
-  usually given as part of the original declaration or via explicit
-  notation commands later on (\secref{sec:notation}).  This already
-  covers many needs of concrete syntax without having to understand
-  the full complexity of inner syntax layers.
-
-  Further details of the syntax engine involves the classical
-  distinction of lexical language versus context-free grammar (see
-  \secref{sec:pure-syntax}), and various mechanisms for \emph{syntax
-  transformations} (see \secref{sec:syntax-transformations}).
-*}
-
-
-section {* Printing logical entities *}
-
-subsection {* Diagnostic commands \label{sec:print-diag} *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "typ"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "term"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "prop"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "thm"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "prf"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "full_prf"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{command_def "print_state"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
-  \end{matharray}
-
-  These diagnostic commands assist interactive development by printing
-  internal logical entities in a human-readable fashion.
-
-  @{rail \<open>
-    @@{command typ} @{syntax modes}? @{syntax type} ('::' @{syntax sort})?
-    ;
-    @@{command term} @{syntax modes}? @{syntax term}
-    ;
-    @@{command prop} @{syntax modes}? @{syntax prop}
-    ;
-    @@{command thm} @{syntax modes}? @{syntax thmrefs}
-    ;
-    ( @@{command prf} | @@{command full_prf} ) @{syntax modes}? @{syntax thmrefs}?
-    ;
-    @@{command print_state} @{syntax modes}?
-    ;
-    @{syntax_def modes}: '(' (@{syntax name} + ) ')'
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "typ"}~@{text \<tau>} reads and prints a type expression
-  according to the current context.
-
-  \item @{command "typ"}~@{text "\<tau> :: s"} uses type-inference to
-  determine the most general way to make @{text "\<tau>"} conform to sort
-  @{text "s"}.  For concrete @{text "\<tau>"} this checks if the type
-  belongs to that sort.  Dummy type parameters ``@{text "_"}''
-  (underscore) are assigned to fresh type variables with most general
-  sorts, according the the principles of type-inference.
-
-  \item @{command "term"}~@{text t} and @{command "prop"}~@{text \<phi>}
-  read, type-check and print terms or propositions according to the
-  current theory or proof context; the inferred type of @{text t} is
-  output as well.  Note that these commands are also useful in
-  inspecting the current environment of term abbreviations.
-
-  \item @{command "thm"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} retrieves
-  theorems from the current theory or proof context.  Note that any
-  attributes included in the theorem specifications are applied to a
-  temporary context derived from the current theory or proof; the
-  result is discarded, i.e.\ attributes involved in @{text "a\<^sub>1,
-  \<dots>, a\<^sub>n"} do not have any permanent effect.
-
-  \item @{command "prf"} displays the (compact) proof term of the
-  current proof state (if present), or of the given theorems. Note
-  that this requires proof terms to be switched on for the current
-  object logic (see the ``Proof terms'' section of the Isabelle
-  reference manual for information on how to do this).
-
-  \item @{command "full_prf"} is like @{command "prf"}, but displays
-  the full proof term, i.e.\ also displays information omitted in the
-  compact proof term, which is denoted by ``@{text _}'' placeholders
-  there.
-
-  \item @{command "print_state"} prints the current proof state (if
-  present), including current facts and goals.
-
-  \end{description}
-
-  All of the diagnostic commands above admit a list of @{text modes}
-  to be specified, which is appended to the current print mode; see
-  also \secref{sec:print-modes}.  Thus the output behavior may be
-  modified according particular print mode features.  For example,
-  @{command "print_state"}~@{text "(latex xsymbols)"} prints the
-  current proof state with mathematical symbols and special characters
-  represented in {\LaTeX} source, according to the Isabelle style
-  \cite{isabelle-sys}.
-
-  Note that antiquotations (cf.\ \secref{sec:antiq}) provide a more
-  systematic way to include formal items into the printed text
-  document.
-*}
-
-
-subsection {* Details of printed content *}
-
-text {*
-  \begin{tabular}{rcll}
-    @{attribute_def show_markup} & : & @{text attribute} \\
-    @{attribute_def show_types} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def show_sorts} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def show_consts} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def show_abbrevs} & : & @{text attribute} & default @{text true} \\
-    @{attribute_def show_brackets} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def names_long} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def names_short} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def names_unique} & : & @{text attribute} & default @{text true} \\
-    @{attribute_def eta_contract} & : & @{text attribute} & default @{text true} \\
-    @{attribute_def goals_limit} & : & @{text attribute} & default @{text 10} \\
-    @{attribute_def show_main_goal} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def show_hyps} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def show_tags} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def show_question_marks} & : & @{text attribute} & default @{text true} \\
-  \end{tabular}
-  \medskip
-
-  These configuration options control the detail of information that
-  is displayed for types, terms, theorems, goals etc.  See also
-  \secref{sec:config}.
-
-  \begin{description}
-
-  \item @{attribute show_markup} controls direct inlining of markup
-  into the printed representation of formal entities --- notably type
-  and sort constraints.  This enables Prover IDE users to retrieve
-  that information via tooltips or popups while hovering with the
-  mouse over the output window, for example.  Consequently, this
-  option is enabled by default for Isabelle/jEdit, but disabled for
-  TTY and Proof~General~/Emacs where document markup would not work.
-
-  \item @{attribute show_types} and @{attribute show_sorts} control
-  printing of type constraints for term variables, and sort
-  constraints for type variables.  By default, neither of these are
-  shown in output.  If @{attribute show_sorts} is enabled, types are
-  always shown as well.  In Isabelle/jEdit, manual setting of these
-  options is normally not required thanks to @{attribute show_markup}
-  above.
-
-  Note that displaying types and sorts may explain why a polymorphic
-  inference rule fails to resolve with some goal, or why a rewrite
-  rule does not apply as expected.
-
-  \item @{attribute show_consts} controls printing of types of
-  constants when displaying a goal state.
-
-  Note that the output can be enormous, because polymorphic constants
-  often occur at several different type instances.
-
-  \item @{attribute show_abbrevs} controls folding of constant
-  abbreviations.
-
-  \item @{attribute show_brackets} controls bracketing in pretty
-  printed output.  If enabled, all sub-expressions of the pretty
-  printing tree will be parenthesized, even if this produces malformed
-  term syntax!  This crude way of showing the internal structure of
-  pretty printed entities may occasionally help to diagnose problems
-  with operator priorities, for example.
-
-  \item @{attribute names_long}, @{attribute names_short}, and
-  @{attribute names_unique} control the way of printing fully
-  qualified internal names in external form.  See also
-  \secref{sec:antiq} for the document antiquotation options of the
-  same names.
-
-  \item @{attribute eta_contract} controls @{text "\<eta>"}-contracted
-  printing of terms.
-
-  The @{text \<eta>}-contraction law asserts @{prop "(\<lambda>x. f x) \<equiv> f"},
-  provided @{text x} is not free in @{text f}.  It asserts
-  \emph{extensionality} of functions: @{prop "f \<equiv> g"} if @{prop "f x \<equiv>
-  g x"} for all @{text x}.  Higher-order unification frequently puts
-  terms into a fully @{text \<eta>}-expanded form.  For example, if @{text
-  F} has type @{text "(\<tau> \<Rightarrow> \<tau>) \<Rightarrow> \<tau>"} then its expanded form is @{term
-  "\<lambda>h. F (\<lambda>x. h x)"}.
-
-  Enabling @{attribute eta_contract} makes Isabelle perform @{text
-  \<eta>}-contractions before printing, so that @{term "\<lambda>h. F (\<lambda>x. h x)"}
-  appears simply as @{text F}.
-
-  Note that the distinction between a term and its @{text \<eta>}-expanded
-  form occasionally matters.  While higher-order resolution and
-  rewriting operate modulo @{text "\<alpha>\<beta>\<eta>"}-conversion, some other tools
-  might look at terms more discretely.
-
-  \item @{attribute goals_limit} controls the maximum number of
-  subgoals to be printed.
-
-  \item @{attribute show_main_goal} controls whether the main result
-  to be proven should be displayed.  This information might be
-  relevant for schematic goals, to inspect the current claim that has
-  been synthesized so far.
-
-  \item @{attribute show_hyps} controls printing of implicit
-  hypotheses of local facts.  Normally, only those hypotheses are
-  displayed that are \emph{not} covered by the assumptions of the
-  current context: this situation indicates a fault in some tool being
-  used.
-
-  By enabling @{attribute show_hyps}, output of \emph{all} hypotheses
-  can be enforced, which is occasionally useful for diagnostic
-  purposes.
-
-  \item @{attribute show_tags} controls printing of extra annotations
-  within theorems, such as internal position information, or the case
-  names being attached by the attribute @{attribute case_names}.
-
-  Note that the @{attribute tagged} and @{attribute untagged}
-  attributes provide low-level access to the collection of tags
-  associated with a theorem.
-
-  \item @{attribute show_question_marks} controls printing of question
-  marks for schematic variables, such as @{text ?x}.  Only the leading
-  question mark is affected, the remaining text is unchanged
-  (including proper markup for schematic variables that might be
-  relevant for user interfaces).
-
-  \end{description}
-*}
-
-
-subsection {* Alternative print modes \label{sec:print-modes} *}
-
-text {*
-  \begin{mldecls}
-    @{index_ML print_mode_value: "unit -> string list"} \\
-    @{index_ML Print_Mode.with_modes: "string list -> ('a -> 'b) -> 'a -> 'b"} \\
-  \end{mldecls}
-
-  The \emph{print mode} facility allows to modify various operations
-  for printing.  Commands like @{command typ}, @{command term},
-  @{command thm} (see \secref{sec:print-diag}) take additional print
-  modes as optional argument.  The underlying ML operations are as
-  follows.
-
-  \begin{description}
-
-  \item @{ML "print_mode_value ()"} yields the list of currently
-  active print mode names.  This should be understood as symbolic
-  representation of certain individual features for printing (with
-  precedence from left to right).
-
-  \item @{ML Print_Mode.with_modes}~@{text "modes f x"} evaluates
-  @{text "f x"} in an execution context where the print mode is
-  prepended by the given @{text "modes"}.  This provides a thread-safe
-  way to augment print modes.  It is also monotonic in the set of mode
-  names: it retains the default print mode that certain
-  user-interfaces might have installed for their proper functioning!
-
-  \end{description}
-
-  \begin{warn}
-  The old global reference @{ML print_mode} should never be used
-  directly in applications.  Its main reason for being publicly
-  accessible is to support historic versions of Proof~General.
-  \end{warn}
-
-  \medskip The pretty printer for inner syntax maintains alternative
-  mixfix productions for any print mode name invented by the user, say
-  in commands like @{command notation} or @{command abbreviation}.
-  Mode names can be arbitrary, but the following ones have a specific
-  meaning by convention:
-
-  \begin{itemize}
-
-  \item @{verbatim "\"\""} (the empty string): default mode;
-  implicitly active as last element in the list of modes.
-
-  \item @{verbatim input}: dummy print mode that is never active; may
-  be used to specify notation that is only available for input.
-
-  \item @{verbatim internal} dummy print mode that is never active;
-  used internally in Isabelle/Pure.
-
-  \item @{verbatim xsymbols}: enable proper mathematical symbols
-  instead of ASCII art.\footnote{This traditional mode name stems from
-  the ``X-Symbol'' package for old versions Proof~General with XEmacs,
-  although that package has been superseded by Unicode in recent
-  years.}
-
-  \item @{verbatim HTML}: additional mode that is active in HTML
-  presentation of Isabelle theory sources; allows to provide
-  alternative output notation.
-
-  \item @{verbatim latex}: additional mode that is active in {\LaTeX}
-  document preparation of Isabelle theory sources; allows to provide
-  alternative output notation.
-
-  \end{itemize}
-*}
-
-
-subsection {* Printing limits *}
-
-text {*
-  \begin{mldecls}
-    @{index_ML Pretty.margin_default: "int Unsynchronized.ref"} \\
-  \end{mldecls}
-
-  \begin{tabular}{rcll}
-    @{attribute_def ML_print_depth} & : & @{text attribute} & default 10 \\ %FIXME move?
-  \end{tabular}
-
-  \begin{description}
-
-  \item @{ML Pretty.margin_default} indicates the global default for
-  the right margin of the built-in pretty printer, with initial value
-  76.  Note that user-interfaces typically control margins
-  automatically when resizing windows, or even bypass the formatting
-  engine of Isabelle/ML altogether and do it within the front end via
-  Isabelle/Scala.
-
-  \item @{attribute ML_print_depth} limits the printing depth of the
-  ML toplevel pretty printer; the precise effect depends on the ML
-  compiler and run-time system.  Typically the limit should be less
-  than 10.  Bigger values such as 100--1000 are useful for debugging.
-
-  \end{description}
-*}
-
-
-section {* Mixfix annotations \label{sec:mixfix} *}
-
-text {* Mixfix annotations specify concrete \emph{inner syntax} of
-  Isabelle types and terms.  Locally fixed parameters in toplevel
-  theorem statements, locale and class specifications also admit
-  mixfix annotations in a fairly uniform manner.  A mixfix annotation
-  describes the concrete syntax, the translation to abstract
-  syntax, and the pretty printing.  Special case annotations provide a
-  simple means of specifying infix operators and binders.
-
-  Isabelle mixfix syntax is inspired by {\OBJ} \cite{OBJ}.  It allows
-  to specify any context-free priority grammar, which is more general
-  than the fixity declarations of ML and Prolog.
-
-  @{rail \<open>
-    @{syntax_def mixfix}: '('
-      @{syntax template} prios? @{syntax nat}? |
-      (@'infix' | @'infixl' | @'infixr') @{syntax template} @{syntax nat} |
-      @'binder' @{syntax template} prios? @{syntax nat} |
-      @'structure'
-    ')'
-    ;
-    template: string
-    ;
-    prios: '[' (@{syntax nat} + ',') ']'
-  \<close>}
-
-  The string given as @{text template} may include literal text,
-  spacing, blocks, and arguments (denoted by ``@{text _}''); the
-  special symbol ``@{verbatim "\<index>"}'' (printed as ``@{text "\<index>"}'')
-  represents an index argument that specifies an implicit @{keyword
-  "structure"} reference (see also \secref{sec:locale}).  Only locally
-  fixed variables may be declared as @{keyword "structure"}.
-
-  Infix and binder declarations provide common abbreviations for
-  particular mixfix declarations.  So in practice, mixfix templates
-  mostly degenerate to literal text for concrete syntax, such as
-  ``@{verbatim "++"}'' for an infix symbol.  *}
-
-
-subsection {* The general mixfix form *}
-
-text {* In full generality, mixfix declarations work as follows.
-  Suppose a constant @{text "c :: \<tau>\<^sub>1 \<Rightarrow> \<dots> \<tau>\<^sub>n \<Rightarrow> \<tau>"} is annotated by
-  @{text "(mixfix [p\<^sub>1, \<dots>, p\<^sub>n] p)"}, where @{text "mixfix"} is a string
-  @{text "d\<^sub>0 _ d\<^sub>1 _ \<dots> _ d\<^sub>n"} consisting of delimiters that surround
-  argument positions as indicated by underscores.
-
-  Altogether this determines a production for a context-free priority
-  grammar, where for each argument @{text "i"} the syntactic category
-  is determined by @{text "\<tau>\<^sub>i"} (with priority @{text "p\<^sub>i"}), and the
-  result category is determined from @{text "\<tau>"} (with priority @{text
-  "p"}).  Priority specifications are optional, with default 0 for
-  arguments and 1000 for the result.\footnote{Omitting priorities is
-  prone to syntactic ambiguities unless the delimiter tokens determine
-  fully bracketed notation, as in @{text "if _ then _ else _ fi"}.}
-
-  Since @{text "\<tau>"} may be again a function type, the constant
-  type scheme may have more argument positions than the mixfix
-  pattern.  Printing a nested application @{text "c t\<^sub>1 \<dots> t\<^sub>m"} for
-  @{text "m > n"} works by attaching concrete notation only to the
-  innermost part, essentially by printing @{text "(c t\<^sub>1 \<dots> t\<^sub>n) \<dots> t\<^sub>m"}
-  instead.  If a term has fewer arguments than specified in the mixfix
-  template, the concrete syntax is ignored.
-
-  \medskip A mixfix template may also contain additional directives
-  for pretty printing, notably spaces, blocks, and breaks.  The
-  general template format is a sequence over any of the following
-  entities.
-
-  \begin{description}
-
-  \item @{text "d"} is a delimiter, namely a non-empty sequence of
-  characters other than the following special characters:
-
-  \smallskip
-  \begin{tabular}{ll}
-    @{verbatim "'"} & single quote \\
-    @{verbatim "_"} & underscore \\
-    @{text "\<index>"} & index symbol \\
-    @{verbatim "("} & open parenthesis \\
-    @{verbatim ")"} & close parenthesis \\
-    @{verbatim "/"} & slash \\
-  \end{tabular}
-  \medskip
-
-  \item @{verbatim "'"} escapes the special meaning of these
-  meta-characters, producing a literal version of the following
-  character, unless that is a blank.
-
-  A single quote followed by a blank separates delimiters, without
-  affecting printing, but input tokens may have additional white space
-  here.
-
-  \item @{verbatim "_"} is an argument position, which stands for a
-  certain syntactic category in the underlying grammar.
-
-  \item @{text "\<index>"} is an indexed argument position; this is the place
-  where implicit structure arguments can be attached.
-
-  \item @{text "s"} is a non-empty sequence of spaces for printing.
-  This and the following specifications do not affect parsing at all.
-
-  \item @{verbatim "("}@{text n} opens a pretty printing block.  The
-  optional number specifies how much indentation to add when a line
-  break occurs within the block.  If the parenthesis is not followed
-  by digits, the indentation defaults to 0.  A block specified via
-  @{verbatim "(00"} is unbreakable.
-
-  \item @{verbatim ")"} closes a pretty printing block.
-
-  \item @{verbatim "//"} forces a line break.
-
-  \item @{verbatim "/"}@{text s} allows a line break.  Here @{text s}
-  stands for the string of spaces (zero or more) right after the
-  slash.  These spaces are printed if the break is \emph{not} taken.
-
-  \end{description}
-
-  The general idea of pretty printing with blocks and breaks is also
-  described in \cite{paulson-ml2}; it goes back to \cite{Oppen:1980}.
-*}
-
-
-subsection {* Infixes *}
-
-text {* Infix operators are specified by convenient short forms that
-  abbreviate general mixfix annotations as follows:
-
-  \begin{center}
-  \begin{tabular}{lll}
-
-  @{verbatim "("}@{keyword_def "infix"}~@{verbatim "\""}@{text sy}@{verbatim "\""} @{text "p"}@{verbatim ")"}
-  & @{text "\<mapsto>"} &
-  @{verbatim "(\"(_ "}@{text sy}@{verbatim "/ _)\" ["}@{text "p + 1"}@{verbatim ", "}@{text "p + 1"}@{verbatim "]"}@{text " p"}@{verbatim ")"} \\
-  @{verbatim "("}@{keyword_def "infixl"}~@{verbatim "\""}@{text sy}@{verbatim "\""} @{text "p"}@{verbatim ")"}
-  & @{text "\<mapsto>"} &
-  @{verbatim "(\"(_ "}@{text sy}@{verbatim "/ _)\" ["}@{text "p"}@{verbatim ", "}@{text "p + 1"}@{verbatim "]"}@{text " p"}@{verbatim ")"} \\
-  @{verbatim "("}@{keyword_def "infixr"}~@{verbatim "\""}@{text sy}@{verbatim "\""} @{text "p"}@{verbatim ")"}
-  & @{text "\<mapsto>"} &
-  @{verbatim "(\"(_ "}@{text sy}@{verbatim "/ _)\" ["}@{text "p + 1"}@{verbatim ", "}@{text "p"}@{verbatim "]"}@{text " p"}@{verbatim ")"} \\
-
-  \end{tabular}
-  \end{center}
-
-  The mixfix template @{verbatim "\"(_ "}@{text sy}@{verbatim "/ _)\""}
-  specifies two argument positions; the delimiter is preceded by a
-  space and followed by a space or line break; the entire phrase is a
-  pretty printing block.
-
-  The alternative notation @{verbatim "op"}~@{text sy} is introduced
-  in addition.  Thus any infix operator may be written in prefix form
-  (as in ML), independently of the number of arguments in the term.
-*}
-
-
-subsection {* Binders *}
-
-text {* A \emph{binder} is a variable-binding construct such as a
-  quantifier.  The idea to formalize @{text "\<forall>x. b"} as @{text "All
-  (\<lambda>x. b)"} for @{text "All :: ('a \<Rightarrow> bool) \<Rightarrow> bool"} already goes back
-  to \cite{church40}.  Isabelle declarations of certain higher-order
-  operators may be annotated with @{keyword_def "binder"} annotations
-  as follows:
-
-  \begin{center}
-  @{text "c :: "}@{verbatim "\""}@{text "(\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2) \<Rightarrow> \<tau>\<^sub>3"}@{verbatim "\"  ("}@{keyword "binder"}@{verbatim " \""}@{text "sy"}@{verbatim "\" ["}@{text "p"}@{verbatim "] "}@{text "q"}@{verbatim ")"}
-  \end{center}
-
-  This introduces concrete binder syntax @{text "sy x. b"}, where
-  @{text x} is a bound variable of type @{text "\<tau>\<^sub>1"}, the body @{text
-  b} has type @{text "\<tau>\<^sub>2"} and the whole term has type @{text "\<tau>\<^sub>3"}.
-  The optional integer @{text p} specifies the syntactic priority of
-  the body; the default is @{text "q"}, which is also the priority of
-  the whole construct.
-
-  Internally, the binder syntax is expanded to something like this:
-  \begin{center}
-  @{text "c_binder :: "}@{verbatim "\""}@{text "idts \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> \<tau>\<^sub>3"}@{verbatim "\"  (\"(3"}@{text sy}@{verbatim "_./ _)\" [0, "}@{text "p"}@{verbatim "] "}@{text "q"}@{verbatim ")"}
-  \end{center}
-
-  Here @{syntax (inner) idts} is the nonterminal symbol for a list of
-  identifiers with optional type constraints (see also
-  \secref{sec:pure-grammar}).  The mixfix template @{verbatim
-  "\"(3"}@{text sy}@{verbatim "_./ _)\""} defines argument positions
-  for the bound identifiers and the body, separated by a dot with
-  optional line break; the entire phrase is a pretty printing block of
-  indentation level 3.  Note that there is no extra space after @{text
-  "sy"}, so it needs to be included user specification if the binder
-  syntax ends with a token that may be continued by an identifier
-  token at the start of @{syntax (inner) idts}.
-
-  Furthermore, a syntax translation to transforms @{text "c_binder x\<^sub>1
-  \<dots> x\<^sub>n b"} into iterated application @{text "c (\<lambda>x\<^sub>1. \<dots> c (\<lambda>x\<^sub>n. b)\<dots>)"}.
-  This works in both directions, for parsing and printing.  *}
-
-
-section {* Explicit notation \label{sec:notation} *}
-
-text {*
-  \begin{matharray}{rcll}
-    @{command_def "type_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "no_type_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "no_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-    @{command_def "write"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
-  \end{matharray}
-
-  Commands that introduce new logical entities (terms or types)
-  usually allow to provide mixfix annotations on the spot, which is
-  convenient for default notation.  Nonetheless, the syntax may be
-  modified later on by declarations for explicit notation.  This
-  allows to add or delete mixfix annotations for of existing logical
-  entities within the current context.
-
-  @{rail \<open>
-    (@@{command type_notation} | @@{command no_type_notation}) @{syntax target}?
-      @{syntax mode}? \<newline> (@{syntax nameref} @{syntax mixfix} + @'and')
-    ;
-    (@@{command notation} | @@{command no_notation}) @{syntax target}? @{syntax mode}? \<newline>
-      (@{syntax nameref} @{syntax mixfix} + @'and')
-    ;
-    @@{command write} @{syntax mode}? (@{syntax nameref} @{syntax mixfix} + @'and')
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "type_notation"}~@{text "c (mx)"} associates mixfix
-  syntax with an existing type constructor.  The arity of the
-  constructor is retrieved from the context.
-
-  \item @{command "no_type_notation"} is similar to @{command
-  "type_notation"}, but removes the specified syntax annotation from
-  the present context.
-
-  \item @{command "notation"}~@{text "c (mx)"} associates mixfix
-  syntax with an existing constant or fixed variable.  The type
-  declaration of the given entity is retrieved from the context.
-
-  \item @{command "no_notation"} is similar to @{command "notation"},
-  but removes the specified syntax annotation from the present
-  context.
-
-  \item @{command "write"} is similar to @{command "notation"}, but
-  works within an Isar proof body.
-
-  \end{description}
-*}
-
-
-section {* The Pure syntax \label{sec:pure-syntax} *}
-
-subsection {* Lexical matters \label{sec:inner-lex} *}
-
-text {* The inner lexical syntax vaguely resembles the outer one
-  (\secref{sec:outer-lex}), but some details are different.  There are
-  two main categories of inner syntax tokens:
-
-  \begin{enumerate}
-
-  \item \emph{delimiters} --- the literal tokens occurring in
-  productions of the given priority grammar (cf.\
-  \secref{sec:priority-grammar});
-
-  \item \emph{named tokens} --- various categories of identifiers etc.
-
-  \end{enumerate}
-
-  Delimiters override named tokens and may thus render certain
-  identifiers inaccessible.  Sometimes the logical context admits
-  alternative ways to refer to the same entity, potentially via
-  qualified names.
-
-  \medskip The categories for named tokens are defined once and for
-  all as follows, reusing some categories of the outer token syntax
-  (\secref{sec:outer-lex}).
-
-  \begin{center}
-  \begin{supertabular}{rcl}
-    @{syntax_def (inner) id} & = & @{syntax_ref ident} \\
-    @{syntax_def (inner) longid} & = & @{syntax_ref longident} \\
-    @{syntax_def (inner) var} & = & @{syntax_ref var} \\
-    @{syntax_def (inner) tid} & = & @{syntax_ref typefree} \\
-    @{syntax_def (inner) tvar} & = & @{syntax_ref typevar} \\
-    @{syntax_def (inner) num_token} & = & @{syntax_ref nat}@{text "  |  "}@{verbatim "-"}@{syntax_ref nat} \\
-    @{syntax_def (inner) float_token} & = & @{syntax_ref nat}@{verbatim "."}@{syntax_ref nat}@{text "  |  "}@{verbatim "-"}@{syntax_ref nat}@{verbatim "."}@{syntax_ref nat} \\
-    @{syntax_def (inner) xnum_token} & = & @{verbatim "#"}@{syntax_ref nat}@{text "  |  "}@{verbatim "#-"}@{syntax_ref nat} \\
-    @{syntax_def (inner) str_token} & = & @{verbatim "''"} @{text "\<dots>"} @{verbatim "''"} \\
-    @{syntax_def (inner) string_token} & = & @{verbatim "\""} @{text "\<dots>"} @{verbatim "\""} \\
-    @{syntax_def (inner) cartouche} & = & @{verbatim "\<open>"} @{text "\<dots>"} @{verbatim "\<close>"} \\
-  \end{supertabular}
-  \end{center}
-
-  The token categories @{syntax (inner) num_token}, @{syntax (inner)
-  float_token}, @{syntax (inner) xnum_token}, @{syntax (inner)
-  str_token}, @{syntax (inner) string_token}, and @{syntax (inner)
-  cartouche} are not used in Pure. Object-logics may implement
-  numerals and string literals by adding appropriate syntax
-  declarations, together with some translation functions (e.g.\ see
-  @{file "~~/src/HOL/Tools/string_syntax.ML"}).
-
-  The derived categories @{syntax_def (inner) num_const}, @{syntax_def
-  (inner) float_const}, and @{syntax_def (inner) num_const} provide
-  robust access to the respective tokens: the syntax tree holds a
-  syntactic constant instead of a free variable.
-*}
-
-
-subsection {* Priority grammars \label{sec:priority-grammar} *}
-
-text {* A context-free grammar consists of a set of \emph{terminal
-  symbols}, a set of \emph{nonterminal symbols} and a set of
-  \emph{productions}.  Productions have the form @{text "A = \<gamma>"},
-  where @{text A} is a nonterminal and @{text \<gamma>} is a string of
-  terminals and nonterminals.  One designated nonterminal is called
-  the \emph{root symbol}.  The language defined by the grammar
-  consists of all strings of terminals that can be derived from the
-  root symbol by applying productions as rewrite rules.
-
-  The standard Isabelle parser for inner syntax uses a \emph{priority
-  grammar}.  Each nonterminal is decorated by an integer priority:
-  @{text "A\<^sup>(\<^sup>p\<^sup>)"}.  In a derivation, @{text "A\<^sup>(\<^sup>p\<^sup>)"} may be rewritten
-  using a production @{text "A\<^sup>(\<^sup>q\<^sup>) = \<gamma>"} only if @{text "p \<le> q"}.  Any
-  priority grammar can be translated into a normal context-free
-  grammar by introducing new nonterminals and productions.
-
-  \medskip Formally, a set of context free productions @{text G}
-  induces a derivation relation @{text "\<longrightarrow>\<^sub>G"} as follows.  Let @{text
-  \<alpha>} and @{text \<beta>} denote strings of terminal or nonterminal symbols.
-  Then @{text "\<alpha> A\<^sup>(\<^sup>p\<^sup>) \<beta> \<longrightarrow>\<^sub>G \<alpha> \<gamma> \<beta>"} holds if and only if @{text G}
-  contains some production @{text "A\<^sup>(\<^sup>q\<^sup>) = \<gamma>"} for @{text "p \<le> q"}.
-
-  \medskip The following grammar for arithmetic expressions
-  demonstrates how binding power and associativity of operators can be
-  enforced by priorities.
-
-  \begin{center}
-  \begin{tabular}{rclr}
-  @{text "A\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "="} & @{verbatim "("} @{text "A\<^sup>(\<^sup>0\<^sup>)"} @{verbatim ")"} \\
-  @{text "A\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "="} & @{verbatim 0} \\
-  @{text "A\<^sup>(\<^sup>0\<^sup>)"} & @{text "="} & @{text "A\<^sup>(\<^sup>0\<^sup>)"} @{verbatim "+"} @{text "A\<^sup>(\<^sup>1\<^sup>)"} \\
-  @{text "A\<^sup>(\<^sup>2\<^sup>)"} & @{text "="} & @{text "A\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "*"} @{text "A\<^sup>(\<^sup>2\<^sup>)"} \\
-  @{text "A\<^sup>(\<^sup>3\<^sup>)"} & @{text "="} & @{verbatim "-"} @{text "A\<^sup>(\<^sup>3\<^sup>)"} \\
-  \end{tabular}
-  \end{center}
-  The choice of priorities determines that @{verbatim "-"} binds
-  tighter than @{verbatim "*"}, which binds tighter than @{verbatim
-  "+"}.  Furthermore @{verbatim "+"} associates to the left and
-  @{verbatim "*"} to the right.
-
-  \medskip For clarity, grammars obey these conventions:
-  \begin{itemize}
-
-  \item All priorities must lie between 0 and 1000.
-
-  \item Priority 0 on the right-hand side and priority 1000 on the
-  left-hand side may be omitted.
-
-  \item The production @{text "A\<^sup>(\<^sup>p\<^sup>) = \<alpha>"} is written as @{text "A = \<alpha>
-  (p)"}, i.e.\ the priority of the left-hand side actually appears in
-  a column on the far right.
-
-  \item Alternatives are separated by @{text "|"}.
-
-  \item Repetition is indicated by dots @{text "(\<dots>)"} in an informal
-  but obvious way.
-
-  \end{itemize}
-
-  Using these conventions, the example grammar specification above
-  takes the form:
-  \begin{center}
-  \begin{tabular}{rclc}
-    @{text A} & @{text "="} & @{verbatim "("} @{text A} @{verbatim ")"} \\
-              & @{text "|"} & @{verbatim 0} & \qquad\qquad \\
-              & @{text "|"} & @{text A} @{verbatim "+"} @{text "A\<^sup>(\<^sup>1\<^sup>)"} & @{text "(0)"} \\
-              & @{text "|"} & @{text "A\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "*"} @{text "A\<^sup>(\<^sup>2\<^sup>)"} & @{text "(2)"} \\
-              & @{text "|"} & @{verbatim "-"} @{text "A\<^sup>(\<^sup>3\<^sup>)"} & @{text "(3)"} \\
-  \end{tabular}
-  \end{center}
-*}
-
-
-subsection {* The Pure grammar \label{sec:pure-grammar} *}
-
-text {* The priority grammar of the @{text "Pure"} theory is defined
-  approximately like this:
-
-  \begin{center}
-  \begin{supertabular}{rclr}
-
-  @{syntax_def (inner) any} & = & @{text "prop  |  logic"} \\\\
-
-  @{syntax_def (inner) prop} & = & @{verbatim "("} @{text prop} @{verbatim ")"} \\
-    & @{text "|"} & @{text "prop\<^sup>(\<^sup>4\<^sup>)"} @{verbatim "::"} @{text type} & @{text "(3)"} \\
-    & @{text "|"} & @{text "any\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "=="} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(2)"} \\
-    & @{text "|"} & @{text "any\<^sup>(\<^sup>3\<^sup>)"} @{text "\<equiv>"} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(2)"} \\
-    & @{text "|"} & @{text "prop\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "&&&"} @{text "prop\<^sup>(\<^sup>2\<^sup>)"} & @{text "(2)"} \\
-    & @{text "|"} & @{text "prop\<^sup>(\<^sup>2\<^sup>)"} @{verbatim "==>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
-    & @{text "|"} & @{text "prop\<^sup>(\<^sup>2\<^sup>)"} @{text "\<Longrightarrow>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
-    & @{text "|"} & @{verbatim "[|"} @{text prop} @{verbatim ";"} @{text "\<dots>"} @{verbatim ";"} @{text prop} @{verbatim "|]"} @{verbatim "==>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
-    & @{text "|"} & @{text "\<lbrakk>"} @{text prop} @{verbatim ";"} @{text "\<dots>"} @{verbatim ";"} @{text prop} @{text "\<rbrakk>"} @{text "\<Longrightarrow>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
-    & @{text "|"} & @{verbatim "!!"} @{text idts} @{verbatim "."} @{text prop} & @{text "(0)"} \\
-    & @{text "|"} & @{text "\<And>"} @{text idts} @{verbatim "."} @{text prop} & @{text "(0)"} \\
-    & @{text "|"} & @{verbatim OFCLASS} @{verbatim "("} @{text type} @{verbatim ","} @{text logic} @{verbatim ")"} \\
-    & @{text "|"} & @{verbatim SORT_CONSTRAINT} @{verbatim "("} @{text type} @{verbatim ")"} \\
-    & @{text "|"} & @{verbatim TERM} @{text logic} \\
-    & @{text "|"} & @{verbatim PROP} @{text aprop} \\\\
-
-  @{syntax_def (inner) aprop} & = & @{verbatim "("} @{text aprop} @{verbatim ")"} \\
-    & @{text "|"} & @{text "id  |  longid  |  var  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "..."} \\
-    & @{text "|"} & @{verbatim CONST} @{text "id  |  "}@{verbatim CONST} @{text "longid"} \\
-    & @{text "|"} & @{verbatim XCONST} @{text "id  |  "}@{verbatim XCONST} @{text "longid"} \\
-    & @{text "|"} & @{text "logic\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)  any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) \<dots> any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "(999)"} \\\\
-
-  @{syntax_def (inner) logic} & = & @{verbatim "("} @{text logic} @{verbatim ")"} \\
-    & @{text "|"} & @{text "logic\<^sup>(\<^sup>4\<^sup>)"} @{verbatim "::"} @{text type} & @{text "(3)"} \\
-    & @{text "|"} & @{text "id  |  longid  |  var  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "..."} \\
-    & @{text "|"} & @{verbatim CONST} @{text "id  |  "}@{verbatim CONST} @{text "longid"} \\
-    & @{text "|"} & @{verbatim XCONST} @{text "id  |  "}@{verbatim XCONST} @{text "longid"} \\
-    & @{text "|"} & @{text "logic\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)  any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) \<dots> any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "(999)"} \\
-    & @{text "|"} & @{text "\<struct> index\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} \\
-    & @{text "|"} & @{verbatim "%"} @{text pttrns} @{verbatim "."} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(3)"} \\
-    & @{text "|"} & @{text \<lambda>} @{text pttrns} @{verbatim "."} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(3)"} \\
-    & @{text "|"} & @{verbatim op} @{verbatim "=="}@{text "  |  "}@{verbatim op} @{text "\<equiv>"}@{text "  |  "}@{verbatim op} @{verbatim "&&&"} \\
-    & @{text "|"} & @{verbatim op} @{verbatim "==>"}@{text "  |  "}@{verbatim op} @{text "\<Longrightarrow>"} \\
-    & @{text "|"} & @{verbatim TYPE} @{verbatim "("} @{text type} @{verbatim ")"} \\\\
-
-  @{syntax_def (inner) idt} & = & @{verbatim "("} @{text idt} @{verbatim ")"}@{text "  |  id  |  "}@{verbatim "_"} \\
-    & @{text "|"} & @{text id} @{verbatim "::"} @{text type} & @{text "(0)"} \\
-    & @{text "|"} & @{verbatim "_"} @{verbatim "::"} @{text type} & @{text "(0)"} \\\\
-
-  @{syntax_def (inner) index} & = & @{verbatim "\<^bsub>"} @{text "logic\<^sup>(\<^sup>0\<^sup>)"} @{verbatim "\<^esub>"}@{text "  |  |  \<index>"} \\\\
-
-  @{syntax_def (inner) idts} & = & @{text "idt  |  idt\<^sup>(\<^sup>1\<^sup>) idts"} & @{text "(0)"} \\\\
-
-  @{syntax_def (inner) pttrn} & = & @{text idt} \\\\
-
-  @{syntax_def (inner) pttrns} & = & @{text "pttrn  |  pttrn\<^sup>(\<^sup>1\<^sup>) pttrns"} & @{text "(0)"} \\\\
-
-  @{syntax_def (inner) type} & = & @{verbatim "("} @{text type} @{verbatim ")"} \\
-    & @{text "|"} & @{text "tid  |  tvar  |  "}@{verbatim "_"} \\
-    & @{text "|"} & @{text "tid"} @{verbatim "::"} @{text "sort  |  tvar  "}@{verbatim "::"} @{text "sort  |  "}@{verbatim "_"} @{verbatim "::"} @{text "sort"} \\
-    & @{text "|"} & @{text "type_name  |  type\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) type_name"} \\
-    & @{text "|"} & @{verbatim "("} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim ")"} @{text type_name} \\
-    & @{text "|"} & @{text "type\<^sup>(\<^sup>1\<^sup>)"} @{verbatim "=>"} @{text type} & @{text "(0)"} \\
-    & @{text "|"} & @{text "type\<^sup>(\<^sup>1\<^sup>)"} @{text "\<Rightarrow>"} @{text type} & @{text "(0)"} \\
-    & @{text "|"} & @{verbatim "["} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim "]"} @{verbatim "=>"} @{text type} & @{text "(0)"} \\
-    & @{text "|"} & @{verbatim "["} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim "]"} @{text "\<Rightarrow>"} @{text type} & @{text "(0)"} \\
-  @{syntax_def (inner) type_name} & = & @{text "id  |  longid"} \\\\
-
-  @{syntax_def (inner) sort} & = & @{syntax class_name}~@{text "  |  "}@{verbatim "{}"} \\
-    & @{text "|"} & @{verbatim "{"} @{syntax class_name} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{syntax class_name} @{verbatim "}"} \\
-  @{syntax_def (inner) class_name} & = & @{text "id  |  longid"} \\
-  \end{supertabular}
-  \end{center}
-
-  \medskip Here literal terminals are printed @{verbatim "verbatim"};
-  see also \secref{sec:inner-lex} for further token categories of the
-  inner syntax.  The meaning of the nonterminals defined by the above
-  grammar is as follows:
-
-  \begin{description}
-
-  \item @{syntax_ref (inner) any} denotes any term.
-
-  \item @{syntax_ref (inner) prop} denotes meta-level propositions,
-  which are terms of type @{typ prop}.  The syntax of such formulae of
-  the meta-logic is carefully distinguished from usual conventions for
-  object-logics.  In particular, plain @{text "\<lambda>"}-term notation is
-  \emph{not} recognized as @{syntax (inner) prop}.
-
-  \item @{syntax_ref (inner) aprop} denotes atomic propositions, which
-  are embedded into regular @{syntax (inner) prop} by means of an
-  explicit @{verbatim PROP} token.
-
-  Terms of type @{typ prop} with non-constant head, e.g.\ a plain
-  variable, are printed in this form.  Constants that yield type @{typ
-  prop} are expected to provide their own concrete syntax; otherwise
-  the printed version will appear like @{syntax (inner) logic} and
-  cannot be parsed again as @{syntax (inner) prop}.
-
-  \item @{syntax_ref (inner) logic} denotes arbitrary terms of a
-  logical type, excluding type @{typ prop}.  This is the main
-  syntactic category of object-logic entities, covering plain @{text
-  \<lambda>}-term notation (variables, abstraction, application), plus
-  anything defined by the user.
-
-  When specifying notation for logical entities, all logical types
-  (excluding @{typ prop}) are \emph{collapsed} to this single category
-  of @{syntax (inner) logic}.
-
-  \item @{syntax_ref (inner) index} denotes an optional index term for
-  indexed syntax.  If omitted, it refers to the first @{keyword_ref
-  "structure"} variable in the context.  The special dummy ``@{text
-  "\<index>"}'' serves as pattern variable in mixfix annotations that
-  introduce indexed notation.
-
-  \item @{syntax_ref (inner) idt} denotes identifiers, possibly
-  constrained by types.
-
-  \item @{syntax_ref (inner) idts} denotes a sequence of @{syntax_ref
-  (inner) idt}.  This is the most basic category for variables in
-  iterated binders, such as @{text "\<lambda>"} or @{text "\<And>"}.
-
-  \item @{syntax_ref (inner) pttrn} and @{syntax_ref (inner) pttrns}
-  denote patterns for abstraction, cases bindings etc.  In Pure, these
-  categories start as a merely copy of @{syntax (inner) idt} and
-  @{syntax (inner) idts}, respectively.  Object-logics may add
-  additional productions for binding forms.
-
-  \item @{syntax_ref (inner) type} denotes types of the meta-logic.
-
-  \item @{syntax_ref (inner) sort} denotes meta-level sorts.
-
-  \end{description}
-
-  Here are some further explanations of certain syntax features.
-
-  \begin{itemize}
-
-  \item In @{syntax (inner) idts}, note that @{text "x :: nat y"} is
-  parsed as @{text "x :: (nat y)"}, treating @{text y} like a type
-  constructor applied to @{text nat}.  To avoid this interpretation,
-  write @{text "(x :: nat) y"} with explicit parentheses.
-
-  \item Similarly, @{text "x :: nat y :: nat"} is parsed as @{text "x ::
-  (nat y :: nat)"}.  The correct form is @{text "(x :: nat) (y ::
-  nat)"}, or @{text "(x :: nat) y :: nat"} if @{text y} is last in the
-  sequence of identifiers.
-
-  \item Type constraints for terms bind very weakly.  For example,
-  @{text "x < y :: nat"} is normally parsed as @{text "(x < y) ::
-  nat"}, unless @{text "<"} has a very low priority, in which case the
-  input is likely to be ambiguous.  The correct form is @{text "x < (y
-  :: nat)"}.
-
-  \item Constraints may be either written with two literal colons
-  ``@{verbatim "::"}'' or the double-colon symbol @{verbatim "\<Colon>"},
-  which actually looks exactly the same in some {\LaTeX} styles.
-
-  \item Dummy variables (written as underscore) may occur in different
-  roles.
-
-  \begin{description}
-
-  \item A type ``@{text "_"}'' or ``@{text "_ :: sort"}'' acts like an
-  anonymous inference parameter, which is filled-in according to the
-  most general type produced by the type-checking phase.
-
-  \item A bound ``@{text "_"}'' refers to a vacuous abstraction, where
-  the body does not refer to the binding introduced here.  As in the
-  term @{term "\<lambda>x _. x"}, which is @{text "\<alpha>"}-equivalent to @{text
-  "\<lambda>x y. x"}.
-
-  \item A free ``@{text "_"}'' refers to an implicit outer binding.
-  Higher definitional packages usually allow forms like @{text "f x _
-  = x"}.
-
-  \item A schematic ``@{text "_"}'' (within a term pattern, see
-  \secref{sec:term-decls}) refers to an anonymous variable that is
-  implicitly abstracted over its context of locally bound variables.
-  For example, this allows pattern matching of @{text "{x. f x = g
-  x}"} against @{text "{x. _ = _}"}, or even @{text "{_. _ = _}"} by
-  using both bound and schematic dummies.
-
-  \end{description}
-
-  \item The three literal dots ``@{verbatim "..."}'' may be also
-  written as ellipsis symbol @{verbatim "\<dots>"}.  In both cases this
-  refers to a special schematic variable, which is bound in the
-  context.  This special term abbreviation works nicely with
-  calculational reasoning (\secref{sec:calculation}).
-
-  \item @{verbatim CONST} ensures that the given identifier is treated
-  as constant term, and passed through the parse tree in fully
-  internalized form.  This is particularly relevant for translation
-  rules (\secref{sec:syn-trans}), notably on the RHS.
-
-  \item @{verbatim XCONST} is similar to @{verbatim CONST}, but
-  retains the constant name as given.  This is only relevant to
-  translation rules (\secref{sec:syn-trans}), notably on the LHS.
-
-  \end{itemize}
-*}
-
-
-subsection {* Inspecting the syntax *}
-
-text {*
-  \begin{matharray}{rcl}
-    @{command_def "print_syntax"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-  \end{matharray}
-
-  \begin{description}
-
-  \item @{command "print_syntax"} prints the inner syntax of the
-  current context.  The output can be quite large; the most important
-  sections are explained below.
-
-  \begin{description}
-
-  \item @{text "lexicon"} lists the delimiters of the inner token
-  language; see \secref{sec:inner-lex}.
-
-  \item @{text "prods"} lists the productions of the underlying
-  priority grammar; see \secref{sec:priority-grammar}.
-
-  The nonterminal @{text "A\<^sup>(\<^sup>p\<^sup>)"} is rendered in plain text as @{text
-  "A[p]"}; delimiters are quoted.  Many productions have an extra
-  @{text "\<dots> => name"}.  These names later become the heads of parse
-  trees; they also guide the pretty printer.
-
-  Productions without such parse tree names are called \emph{copy
-  productions}.  Their right-hand side must have exactly one
-  nonterminal symbol (or named token).  The parser does not create a
-  new parse tree node for copy productions, but simply returns the
-  parse tree of the right-hand symbol.
-
-  If the right-hand side of a copy production consists of a single
-  nonterminal without any delimiters, then it is called a \emph{chain
-  production}.  Chain productions act as abbreviations: conceptually,
-  they are removed from the grammar by adding new productions.
-  Priority information attached to chain productions is ignored; only
-  the dummy value @{text "-1"} is displayed.
-
-  \item @{text "print modes"} lists the alternative print modes
-  provided by this grammar; see \secref{sec:print-modes}.
-
-  \item @{text "parse_rules"} and @{text "print_rules"} relate to
-  syntax translations (macros); see \secref{sec:syn-trans}.
-
-  \item @{text "parse_ast_translation"} and @{text
-  "print_ast_translation"} list sets of constants that invoke
-  translation functions for abstract syntax trees, which are only
-  required in very special situations; see \secref{sec:tr-funs}.
-
-  \item @{text "parse_translation"} and @{text "print_translation"}
-  list the sets of constants that invoke regular translation
-  functions; see \secref{sec:tr-funs}.
-
-  \end{description}
-
-  \end{description}
-*}
-
-
-subsection {* Ambiguity of parsed expressions *}
-
-text {*
-  \begin{tabular}{rcll}
-    @{attribute_def syntax_ambiguity_warning} & : & @{text attribute} & default @{text true} \\
-    @{attribute_def syntax_ambiguity_limit} & : & @{text attribute} & default @{text 10} \\
-  \end{tabular}
-
-  Depending on the grammar and the given input, parsing may be
-  ambiguous.  Isabelle lets the Earley parser enumerate all possible
-  parse trees, and then tries to make the best out of the situation.
-  Terms that cannot be type-checked are filtered out, which often
-  leads to a unique result in the end.  Unlike regular type
-  reconstruction, which is applied to the whole collection of input
-  terms simultaneously, the filtering stage only treats each given
-  term in isolation.  Filtering is also not attempted for individual
-  types or raw ASTs (as required for @{command translations}).
-
-  Certain warning or error messages are printed, depending on the
-  situation and the given configuration options.  Parsing ultimately
-  fails, if multiple results remain after the filtering phase.
-
-  \begin{description}
-
-  \item @{attribute syntax_ambiguity_warning} controls output of
-  explicit warning messages about syntax ambiguity.
-
-  \item @{attribute syntax_ambiguity_limit} determines the number of
-  resulting parse trees that are shown as part of the printed message
-  in case of an ambiguity.
-
-  \end{description}
-*}
-
-
-section {* Syntax transformations \label{sec:syntax-transformations} *}
-
-text {* The inner syntax engine of Isabelle provides separate
-  mechanisms to transform parse trees either via rewrite systems on
-  first-order ASTs (\secref{sec:syn-trans}), or ML functions on ASTs
-  or syntactic @{text "\<lambda>"}-terms (\secref{sec:tr-funs}).  This works
-  both for parsing and printing, as outlined in
-  \figref{fig:parse-print}.
-
-  \begin{figure}[htbp]
-  \begin{center}
-  \begin{tabular}{cl}
-  string          & \\
-  @{text "\<down>"}     & lexer + parser \\
-  parse tree      & \\
-  @{text "\<down>"}     & parse AST translation \\
-  AST             & \\
-  @{text "\<down>"}     & AST rewriting (macros) \\
-  AST             & \\
-  @{text "\<down>"}     & parse translation \\
-  --- pre-term ---    & \\
-  @{text "\<down>"}     & print translation \\
-  AST             & \\
-  @{text "\<down>"}     & AST rewriting (macros) \\
-  AST             & \\
-  @{text "\<down>"}     & print AST translation \\
-  string          &
-  \end{tabular}
-  \end{center}
-  \caption{Parsing and printing with translations}\label{fig:parse-print}
-  \end{figure}
-
-  These intermediate syntax tree formats eventually lead to a pre-term
-  with all names and binding scopes resolved, but most type
-  information still missing.  Explicit type constraints might be given by
-  the user, or implicit position information by the system --- both
-  need to be passed-through carefully by syntax transformations.
-
-  Pre-terms are further processed by the so-called \emph{check} and
-  \emph{unckeck} phases that are intertwined with type-inference (see
-  also \cite{isabelle-implementation}).  The latter allows to operate
-  on higher-order abstract syntax with proper binding and type
-  information already available.
-
-  As a rule of thumb, anything that manipulates bindings of variables
-  or constants needs to be implemented as syntax transformation (see
-  below).  Anything else is better done via check/uncheck: a prominent
-  example application is the @{command abbreviation} concept of
-  Isabelle/Pure. *}
-
-
-subsection {* Abstract syntax trees \label{sec:ast} *}
-
-text {* The ML datatype @{ML_type Ast.ast} explicitly represents the
-  intermediate AST format that is used for syntax rewriting
-  (\secref{sec:syn-trans}).  It is defined in ML as follows:
-  \begin{ttbox}
-  datatype ast =
-    Constant of string |
-    Variable of string |
-    Appl of ast list
-  \end{ttbox}
-
-  An AST is either an atom (constant or variable) or a list of (at
-  least two) subtrees.  Occasional diagnostic output of ASTs uses
-  notation that resembles S-expression of LISP.  Constant atoms are
-  shown as quoted strings, variable atoms as non-quoted strings and
-  applications as a parenthesized list of subtrees.  For example, the
-  AST
-  @{ML [display] "Ast.Appl
-  [Ast.Constant \"_abs\", Ast.Variable \"x\", Ast.Variable \"t\"]"}
-  is pretty-printed as @{verbatim "(\"_abs\" x t)"}.  Note that
-  @{verbatim "()"} and @{verbatim "(x)"} are excluded as ASTs, because
-  they have too few subtrees.
-
-  \medskip AST application is merely a pro-forma mechanism to indicate
-  certain syntactic structures.  Thus @{verbatim "(c a b)"} could mean
-  either term application or type application, depending on the
-  syntactic context.
-
-  Nested application like @{verbatim "((\"_abs\" x t) u)"} is also
-  possible, but ASTs are definitely first-order: the syntax constant
-  @{verbatim "\"_abs\""} does not bind the @{verbatim x} in any way.
-  Proper bindings are introduced in later stages of the term syntax,
-  where @{verbatim "(\"_abs\" x t)"} becomes an @{ML Abs} node and
-  occurrences of @{verbatim x} in @{verbatim t} are replaced by bound
-  variables (represented as de-Bruijn indices).
-*}
-
-
-subsubsection {* AST constants versus variables *}
-
-text {* Depending on the situation --- input syntax, output syntax,
-  translation patterns --- the distinction of atomic asts as @{ML
-  Ast.Constant} versus @{ML Ast.Variable} serves slightly different
-  purposes.
-
-  Input syntax of a term such as @{text "f a b = c"} does not yet
-  indicate the scopes of atomic entities @{text "f, a, b, c"}: they
-  could be global constants or local variables, even bound ones
-  depending on the context of the term.  @{ML Ast.Variable} leaves
-  this choice still open: later syntax layers (or translation
-  functions) may capture such a variable to determine its role
-  specifically, to make it a constant, bound variable, free variable
-  etc.  In contrast, syntax translations that introduce already known
-  constants would rather do it via @{ML Ast.Constant} to prevent
-  accidental re-interpretation later on.
-
-  Output syntax turns term constants into @{ML Ast.Constant} and
-  variables (free or schematic) into @{ML Ast.Variable}.  This
-  information is precise when printing fully formal @{text "\<lambda>"}-terms.
-
-  \medskip AST translation patterns (\secref{sec:syn-trans}) that
-  represent terms cannot distinguish constants and variables
-  syntactically.  Explicit indication of @{text "CONST c"} inside the
-  term language is required, unless @{text "c"} is known as special
-  \emph{syntax constant} (see also @{command syntax}).  It is also
-  possible to use @{command syntax} declarations (without mixfix
-  annotation) to enforce that certain unqualified names are always
-  treated as constant within the syntax machinery.
-
-  The situation is simpler for ASTs that represent types or sorts,
-  since the concrete syntax already distinguishes type variables from
-  type constants (constructors).  So @{text "('a, 'b) foo"}
-  corresponds to an AST application of some constant for @{text foo}
-  and variable arguments for @{text "'a"} and @{text "'b"}.  Note that
-  the postfix application is merely a feature of the concrete syntax,
-  while in the AST the constructor occurs in head position.  *}
-
-
-subsubsection {* Authentic syntax names *}
-
-text {* Naming constant entities within ASTs is another delicate
-  issue.  Unqualified names are resolved in the name space tables in
-  the last stage of parsing, after all translations have been applied.
-  Since syntax transformations do not know about this later name
-  resolution, there can be surprises in boundary cases.
-
-  \emph{Authentic syntax names} for @{ML Ast.Constant} avoid this
-  problem: the fully-qualified constant name with a special prefix for
-  its formal category (@{text "class"}, @{text "type"}, @{text
-  "const"}, @{text "fixed"}) represents the information faithfully
-  within the untyped AST format.  Accidental overlap with free or
-  bound variables is excluded as well.  Authentic syntax names work
-  implicitly in the following situations:
-
-  \begin{itemize}
-
-  \item Input of term constants (or fixed variables) that are
-  introduced by concrete syntax via @{command notation}: the
-  correspondence of a particular grammar production to some known term
-  entity is preserved.
-
-  \item Input of type constants (constructors) and type classes ---
-  thanks to explicit syntactic distinction independently on the
-  context.
-
-  \item Output of term constants, type constants, type classes ---
-  this information is already available from the internal term to be
-  printed.
-
-  \end{itemize}
-
-  In other words, syntax transformations that operate on input terms
-  written as prefix applications are difficult to make robust.
-  Luckily, this case rarely occurs in practice, because syntax forms
-  to be translated usually correspond to some concrete notation. *}
-
-
-subsection {* Raw syntax and translations \label{sec:syn-trans} *}
-
-text {*
-  \begin{tabular}{rcll}
-    @{command_def "nonterminal"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "syntax"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "no_syntax"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "translations"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def "no_translations"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{attribute_def syntax_ast_trace} & : & @{text attribute} & default @{text false} \\
-    @{attribute_def syntax_ast_stats} & : & @{text attribute} & default @{text false} \\
-  \end{tabular}
-
-  Unlike mixfix notation for existing formal entities
-  (\secref{sec:notation}), raw syntax declarations provide full access
-  to the priority grammar of the inner syntax, without any sanity
-  checks.  This includes additional syntactic categories (via
-  @{command nonterminal}) and free-form grammar productions (via
-  @{command syntax}).  Additional syntax translations (or macros, via
-  @{command translations}) are required to turn resulting parse trees
-  into proper representations of formal entities again.
-
-  @{rail \<open>
-    @@{command nonterminal} (@{syntax name} + @'and')
-    ;
-    (@@{command syntax} | @@{command no_syntax}) @{syntax mode}? (constdecl +)
-    ;
-    (@@{command translations} | @@{command no_translations})
-      (transpat ('==' | '=>' | '<=' | '\<rightleftharpoons>' | '\<rightharpoonup>' | '\<leftharpoondown>') transpat +)
-    ;
-
-    constdecl: @{syntax name} '::' @{syntax type} @{syntax mixfix}?
-    ;
-    mode: ('(' ( @{syntax name} | @'output' | @{syntax name} @'output' ) ')')
-    ;
-    transpat: ('(' @{syntax nameref} ')')? @{syntax string}
-  \<close>}
-
-  \begin{description}
-
-  \item @{command "nonterminal"}~@{text c} declares a type
-  constructor @{text c} (without arguments) to act as purely syntactic
-  type: a nonterminal symbol of the inner syntax.
-
-  \item @{command "syntax"}~@{text "(mode) c :: \<sigma> (mx)"} augments the
-  priority grammar and the pretty printer table for the given print
-  mode (default @{verbatim "\"\""}). An optional keyword @{keyword_ref
-  "output"} means that only the pretty printer table is affected.
-
-  Following \secref{sec:mixfix}, the mixfix annotation @{text "mx =
-  template ps q"} together with type @{text "\<sigma> = \<tau>\<^sub>1 \<Rightarrow> \<dots> \<tau>\<^sub>n \<Rightarrow> \<tau>"} and
-  specify a grammar production.  The @{text template} contains
-  delimiter tokens that surround @{text "n"} argument positions
-  (@{verbatim "_"}).  The latter correspond to nonterminal symbols
-  @{text "A\<^sub>i"} derived from the argument types @{text "\<tau>\<^sub>i"} as
-  follows:
-  \begin{itemize}
-
-  \item @{text "prop"} if @{text "\<tau>\<^sub>i = prop"}
-
-  \item @{text "logic"} if @{text "\<tau>\<^sub>i = (\<dots>)\<kappa>"} for logical type
-  constructor @{text "\<kappa> \<noteq> prop"}
-
-  \item @{text any} if @{text "\<tau>\<^sub>i = \<alpha>"} for type variables
-
-  \item @{text "\<kappa>"} if @{text "\<tau>\<^sub>i = \<kappa>"} for nonterminal @{text "\<kappa>"}
-  (syntactic type constructor)
-
-  \end{itemize}
-
-  Each @{text "A\<^sub>i"} is decorated by priority @{text "p\<^sub>i"} from the
-  given list @{text "ps"}; misssing priorities default to 0.
-
-  The resulting nonterminal of the production is determined similarly
-  from type @{text "\<tau>"}, with priority @{text "q"} and default 1000.
-
-  \medskip Parsing via this production produces parse trees @{text
-  "t\<^sub>1, \<dots>, t\<^sub>n"} for the argument slots.  The resulting parse tree is
-  composed as @{text "c t\<^sub>1 \<dots> t\<^sub>n"}, by using the syntax constant @{text
-  "c"} of the syntax declaration.
-
-  Such syntactic constants are invented on the spot, without formal
-  check wrt.\ existing declarations.  It is conventional to use plain
-  identifiers prefixed by a single underscore (e.g.\ @{text
-  "_foobar"}).  Names should be chosen with care, to avoid clashes
-  with other syntax declarations.
-
-  \medskip The special case of copy production is specified by @{text
-  "c = "}@{verbatim "\"\""} (empty string).  It means that the
-  resulting parse tree @{text "t"} is copied directly, without any
-  further decoration.
-
-  \item @{command "no_syntax"}~@{text "(mode) decls"} removes grammar
-  declarations (and translations) resulting from @{text decls}, which
-  are interpreted in the same manner as for @{command "syntax"} above.
-
-  \item @{command "translations"}~@{text rules} specifies syntactic
-  translation rules (i.e.\ macros) as first-order rewrite rules on
-  ASTs (\secref{sec:ast}).  The theory context maintains two
-  independent lists translation rules: parse rules (@{verbatim "=>"}
-  or @{text "\<rightharpoonup>"}) and print rules (@{verbatim "<="} or @{text "\<leftharpoondown>"}).
-  For convenience, both can be specified simultaneously as parse~/
-  print rules (@{verbatim "=="} or @{text "\<rightleftharpoons>"}).
-
-  Translation patterns may be prefixed by the syntactic category to be
-  used for parsing; the default is @{text logic} which means that
-  regular term syntax is used.  Both sides of the syntax translation
-  rule undergo parsing and parse AST translations
-  \secref{sec:tr-funs}, in order to perform some fundamental
-  normalization like @{text "\<lambda>x y. b \<leadsto> \<lambda>x. \<lambda>y. b"}, but other AST
-  translation rules are \emph{not} applied recursively here.
-
-  When processing AST patterns, the inner syntax lexer runs in a
-  different mode that allows identifiers to start with underscore.
-  This accommodates the usual naming convention for auxiliary syntax
-  constants --- those that do not have a logical counter part --- by
-  allowing to specify arbitrary AST applications within the term
-  syntax, independently of the corresponding concrete syntax.
-
-  Atomic ASTs are distinguished as @{ML Ast.Constant} versus @{ML
-  Ast.Variable} as follows: a qualified name or syntax constant
-  declared via @{command syntax}, or parse tree head of concrete
-  notation becomes @{ML Ast.Constant}, anything else @{ML
-  Ast.Variable}.  Note that @{text CONST} and @{text XCONST} within
-  the term language (\secref{sec:pure-grammar}) allow to enforce
-  treatment as constants.
-
-  AST rewrite rules @{text "(lhs, rhs)"} need to obey the following
-  side-conditions:
-
-  \begin{itemize}
-
-  \item Rules must be left linear: @{text "lhs"} must not contain
-  repeated variables.\footnote{The deeper reason for this is that AST
-  equality is not well-defined: different occurrences of the ``same''
-  AST could be decorated differently by accidental type-constraints or
-  source position information, for example.}
-
-  \item Every variable in @{text "rhs"} must also occur in @{text
-  "lhs"}.
-
-  \end{itemize}
-
-  \item @{command "no_translations"}~@{text rules} removes syntactic
-  translation rules, which are interpreted in the same manner as for
-  @{command "translations"} above.
-
-  \item @{attribute syntax_ast_trace} and @{attribute
-  syntax_ast_stats} control diagnostic output in the AST normalization
-  process, when translation rules are applied to concrete input or
-  output.
-
-  \end{description}
-
-  Raw syntax and translations provides a slightly more low-level
-  access to the grammar and the form of resulting parse trees.  It is
-  often possible to avoid this untyped macro mechanism, and use
-  type-safe @{command abbreviation} or @{command notation} instead.
-  Some important situations where @{command syntax} and @{command
-  translations} are really need are as follows:
-
-  \begin{itemize}
-
-  \item Iterated replacement via recursive @{command translations}.
-  For example, consider list enumeration @{term "[a, b, c, d]"} as
-  defined in theory @{theory List} in Isabelle/HOL.
-
-  \item Change of binding status of variables: anything beyond the
-  built-in @{keyword "binder"} mixfix annotation requires explicit
-  syntax translations.  For example, consider list filter
-  comprehension @{term "[x \<leftarrow> xs . P]"} as defined in theory @{theory
-  List} in Isabelle/HOL.
-
-  \end{itemize}
-*}
-
-subsubsection {* Applying translation rules *}
-
-text {* As a term is being parsed or printed, an AST is generated as
-  an intermediate form according to \figref{fig:parse-print}.  The AST
-  is normalized by applying translation rules in the manner of a
-  first-order term rewriting system.  We first examine how a single
-  rule is applied.
-
-  Let @{text "t"} be the abstract syntax tree to be normalized and
-  @{text "(lhs, rhs)"} some translation rule.  A subtree @{text "u"}
-  of @{text "t"} is called \emph{redex} if it is an instance of @{text
-  "lhs"}; in this case the pattern @{text "lhs"} is said to match the
-  object @{text "u"}.  A redex matched by @{text "lhs"} may be
-  replaced by the corresponding instance of @{text "rhs"}, thus
-  \emph{rewriting} the AST @{text "t"}.  Matching requires some notion
-  of \emph{place-holders} in rule patterns: @{ML Ast.Variable} serves
-  this purpose.
-
-  More precisely, the matching of the object @{text "u"} against the
-  pattern @{text "lhs"} is performed as follows:
-
-  \begin{itemize}
-
-  \item Objects of the form @{ML Ast.Variable}~@{text "x"} or @{ML
-  Ast.Constant}~@{text "x"} are matched by pattern @{ML
-  Ast.Constant}~@{text "x"}.  Thus all atomic ASTs in the object are
-  treated as (potential) constants, and a successful match makes them
-  actual constants even before name space resolution (see also
-  \secref{sec:ast}).
-
-  \item Object @{text "u"} is matched by pattern @{ML
-  Ast.Variable}~@{text "x"}, binding @{text "x"} to @{text "u"}.
-
-  \item Object @{ML Ast.Appl}~@{text "us"} is matched by @{ML
-  Ast.Appl}~@{text "ts"} if @{text "us"} and @{text "ts"} have the
-  same length and each corresponding subtree matches.
-
-  \item In every other case, matching fails.
-
-  \end{itemize}
-
-  A successful match yields a substitution that is applied to @{text
-  "rhs"}, generating the instance that replaces @{text "u"}.
-
-  Normalizing an AST involves repeatedly applying translation rules
-  until none are applicable.  This works yoyo-like: top-down,
-  bottom-up, top-down, etc.  At each subtree position, rules are
-  chosen in order of appearance in the theory definitions.
-
-  The configuration options @{attribute syntax_ast_trace} and
-  @{attribute syntax_ast_stats} might help to understand this process
-  and diagnose problems.
-
-  \begin{warn}
-  If syntax translation rules work incorrectly, the output of
-  @{command_ref print_syntax} with its \emph{rules} sections reveals the
-  actual internal forms of AST pattern, without potentially confusing
-  concrete syntax.  Recall that AST constants appear as quoted strings
-  and variables without quotes.
-  \end{warn}
-
-  \begin{warn}
-  If @{attribute_ref eta_contract} is set to @{text "true"}, terms
-  will be @{text "\<eta>"}-contracted \emph{before