more standard document preparation within session context;
authorwenzelm
Tue, 28 Aug 2012 12:45:49 +0200
changeset 48958 12afbf6eb7f9
parent 48957 c04001b3a753
child 48959 d7e36be3eb60
more standard document preparation within session context;
doc-src/IsarRef/Base.thy
doc-src/IsarRef/Document_Preparation.thy
doc-src/IsarRef/First_Order_Logic.thy
doc-src/IsarRef/Framework.thy
doc-src/IsarRef/Generic.thy
doc-src/IsarRef/HOL_Specific.thy
doc-src/IsarRef/Inner_Syntax.thy
doc-src/IsarRef/ML_Tactic.thy
doc-src/IsarRef/Misc.thy
doc-src/IsarRef/Outer_Syntax.thy
doc-src/IsarRef/Preface.thy
doc-src/IsarRef/Proof.thy
doc-src/IsarRef/Quick_Reference.thy
doc-src/IsarRef/Spec.thy
doc-src/IsarRef/Symbols.thy
doc-src/IsarRef/Synopsis.thy
doc-src/IsarRef/Thy/Base.thy
doc-src/IsarRef/Thy/Document_Preparation.thy
doc-src/IsarRef/Thy/First_Order_Logic.thy
doc-src/IsarRef/Thy/Framework.thy
doc-src/IsarRef/Thy/Generic.thy
doc-src/IsarRef/Thy/HOL_Specific.thy
doc-src/IsarRef/Thy/Inner_Syntax.thy
doc-src/IsarRef/Thy/ML_Tactic.thy
doc-src/IsarRef/Thy/Misc.thy
doc-src/IsarRef/Thy/Outer_Syntax.thy
doc-src/IsarRef/Thy/Preface.thy
doc-src/IsarRef/Thy/Proof.thy
doc-src/IsarRef/Thy/Quick_Reference.thy
doc-src/IsarRef/Thy/Spec.thy
doc-src/IsarRef/Thy/Symbols.thy
doc-src/IsarRef/Thy/Synopsis.thy
doc-src/IsarRef/Thy/document/Document_Preparation.tex
doc-src/IsarRef/Thy/document/First_Order_Logic.tex
doc-src/IsarRef/Thy/document/Framework.tex
doc-src/IsarRef/Thy/document/Generic.tex
doc-src/IsarRef/Thy/document/HOLCF_Specific.tex
doc-src/IsarRef/Thy/document/HOL_Specific.tex
doc-src/IsarRef/Thy/document/Inner_Syntax.tex
doc-src/IsarRef/Thy/document/ML_Tactic.tex
doc-src/IsarRef/Thy/document/Misc.tex
doc-src/IsarRef/Thy/document/Outer_Syntax.tex
doc-src/IsarRef/Thy/document/Preface.tex
doc-src/IsarRef/Thy/document/Proof.tex
doc-src/IsarRef/Thy/document/Quick_Reference.tex
doc-src/IsarRef/Thy/document/Spec.tex
doc-src/IsarRef/Thy/document/Symbols.tex
doc-src/IsarRef/Thy/document/Synopsis.tex
doc-src/IsarRef/Thy/document/isar-vm.eps
doc-src/IsarRef/Thy/document/isar-vm.pdf
doc-src/IsarRef/Thy/document/isar-vm.svg
doc-src/IsarRef/document/isar-vm.eps
doc-src/IsarRef/document/isar-vm.pdf
doc-src/IsarRef/document/isar-vm.svg
doc-src/IsarRef/document/root.tex
doc-src/IsarRef/document/showsymbols
doc-src/IsarRef/document/style.sty
doc-src/IsarRef/isar-ref.tex
doc-src/IsarRef/showsymbols
doc-src/IsarRef/style.sty
doc-src/ROOT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Base.thy	Tue Aug 28 12:45:49 2012 +0200
@@ -0,0 +1,8 @@
+theory Base
+imports Pure
+begin
+
+ML_file "../antiquote_setup.ML"
+setup Antiquote_Setup.setup
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Document_Preparation.thy	Tue Aug 28 12:45:49 2012 +0200
@@ -0,0 +1,611 @@
+theory Document_Preparation
+imports Base Main
+begin
+
+chapter {* Document preparation \label{ch:document-prep} *}
+
+text {* Isabelle/Isar provides a simple document preparation system
+  based on regular {PDF-\LaTeX} technology, with full support for
+  hyper-links and bookmarks.  Thus the results are well suited for WWW
+  browsing and as printed copies.
+
+  \medskip Isabelle generates {\LaTeX} output while running a
+  \emph{logic session} (see also \cite{isabelle-sys}).  Getting
+  started with a working configuration for common situations is quite
+  easy by using the Isabelle @{verbatim mkdir} and @{verbatim make}
+  tools.  First invoke
+\begin{ttbox}
+  isabelle mkdir Foo
+\end{ttbox}
+  to initialize a separate directory for session @{verbatim Foo} (it
+  is safe to experiment, since @{verbatim "isabelle mkdir"} never
+  overwrites existing files).  Ensure that @{verbatim "Foo/ROOT.ML"}
+  holds ML commands to load all theories required for this session;
+  furthermore @{verbatim "Foo/document/root.tex"} should include any
+  special {\LaTeX} macro packages required for your document (the
+  default is usually sufficient as a start).
+
+  The session is controlled by a separate @{verbatim IsaMakefile}
+  (with crude source dependencies by default).  This file is located
+  one level up from the @{verbatim Foo} directory location.  Now
+  invoke
+\begin{ttbox}
+  isabelle make Foo
+\end{ttbox}
+  to run the @{verbatim Foo} session, with browser information and
+  document preparation enabled.  Unless any errors are reported by
+  Isabelle or {\LaTeX}, the output will appear inside the directory
+  defined by the @{verbatim ISABELLE_BROWSER_INFO} setting (as
+  reported by the batch job in verbose mode).
+
+  \medskip You may also consider to tune the @{verbatim usedir}
+  options in @{verbatim IsaMakefile}, for example to switch the output
+  format between @{verbatim pdf} and @{verbatim dvi}, or activate the
+  @{verbatim "-D"} option to retain a second copy of the generated
+  {\LaTeX} sources (for manual inspection or separate runs of
+  @{executable latex}).
+
+  \medskip See \emph{The Isabelle System Manual} \cite{isabelle-sys}
+  for further details on Isabelle logic sessions and theory
+  presentation.  The Isabelle/HOL tutorial \cite{isabelle-hol-book}
+  also covers theory presentation to some extent.
+*}
+
+
+section {* Markup commands \label{sec:markup} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "header"} & : & @{text "toplevel \<rightarrow> toplevel"} \\[0.5ex]
+    @{command_def "chapter"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "section"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "subsection"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "subsubsection"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "text"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "text_raw"} & : & @{text "local_theory \<rightarrow> local_theory"} \\[0.5ex]
+    @{command_def "sect"} & : & @{text "proof \<rightarrow> proof"} \\
+    @{command_def "subsect"} & : & @{text "proof \<rightarrow> proof"} \\
+    @{command_def "subsubsect"} & : & @{text "proof \<rightarrow> proof"} \\
+    @{command_def "txt"} & : & @{text "proof \<rightarrow> proof"} \\
+    @{command_def "txt_raw"} & : & @{text "proof \<rightarrow> proof"} \\
+  \end{matharray}
+
+  Markup commands provide a structured way to insert text into the
+  document generated from a theory.  Each markup command takes a
+  single @{syntax text} argument, which is passed as argument to a
+  corresponding {\LaTeX} macro.  The default macros provided by
+  @{file "~~/lib/texinputs/isabelle.sty"} can be redefined according
+  to the needs of the underlying document and {\LaTeX} styles.
+
+  Note that formal comments (\secref{sec:comments}) are similar to
+  markup commands, but have a different status within Isabelle/Isar
+  syntax.
+
+  @{rail "
+    (@@{command chapter} | @@{command section} | @@{command subsection} |
+      @@{command subsubsection} | @@{command text}) @{syntax target}? @{syntax text}
+    ;
+    (@@{command header} | @@{command text_raw} | @@{command sect} | @@{command subsect} |
+      @@{command subsubsect} | @@{command txt} | @@{command txt_raw}) @{syntax text}
+  "}
+
+  \begin{description}
+
+  \item @{command header} provides plain text markup just preceding
+  the formal beginning of a theory.  The corresponding {\LaTeX} macro
+  is @{verbatim "\\isamarkupheader"}, which acts like @{command
+  section} by default.
+  
+  \item @{command chapter}, @{command section}, @{command subsection},
+  and @{command subsubsection} mark chapter and section headings
+  within the main theory body or local theory targets.  The
+  corresponding {\LaTeX} macros are @{verbatim "\\isamarkupchapter"},
+  @{verbatim "\\isamarkupsection"}, @{verbatim
+  "\\isamarkupsubsection"} etc.
+
+  \item @{command sect}, @{command subsect}, and @{command subsubsect}
+  mark section headings within proofs.  The corresponding {\LaTeX}
+  macros are @{verbatim "\\isamarkupsect"}, @{verbatim
+  "\\isamarkupsubsect"} etc.
+
+  \item @{command text} and @{command txt} specify paragraphs of plain
+  text.  This corresponds to a {\LaTeX} environment @{verbatim
+  "\\begin{isamarkuptext}"} @{text "\<dots>"} @{verbatim
+  "\\end{isamarkuptext}"} etc.
+
+  \item @{command text_raw} and @{command txt_raw} insert {\LaTeX}
+  source into the output, without additional markup.  Thus the full
+  range of document manipulations becomes available, at the risk of
+  messing up document output.
+
+  \end{description}
+
+  Except for @{command "text_raw"} and @{command "txt_raw"}, the text
+  passed to any of the above markup commands may refer to formal
+  entities via \emph{document antiquotations}, see also
+  \secref{sec:antiq}.  These are interpreted in the present theory or
+  proof context, or the named @{text "target"}.
+
+  \medskip The proof markup commands closely resemble those for theory
+  specifications, but have a different formal status and produce
+  different {\LaTeX} macros.  The default definitions coincide for
+  analogous commands such as @{command section} and @{command sect}.
+*}
+
+
+section {* Document Antiquotations \label{sec:antiq} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{antiquotation_def "theory"} & : & @{text antiquotation} \\
+    @{antiquotation_def "thm"} & : & @{text antiquotation} \\
+    @{antiquotation_def "lemma"} & : & @{text antiquotation} \\
+    @{antiquotation_def "prop"} & : & @{text antiquotation} \\
+    @{antiquotation_def "term"} & : & @{text antiquotation} \\
+    @{antiquotation_def term_type} & : & @{text antiquotation} \\
+    @{antiquotation_def typeof} & : & @{text antiquotation} \\
+    @{antiquotation_def const} & : & @{text antiquotation} \\
+    @{antiquotation_def abbrev} & : & @{text antiquotation} \\
+    @{antiquotation_def typ} & : & @{text antiquotation} \\
+    @{antiquotation_def type} & : & @{text antiquotation} \\
+    @{antiquotation_def class} & : & @{text antiquotation} \\
+    @{antiquotation_def "text"} & : & @{text antiquotation} \\
+    @{antiquotation_def goals} & : & @{text antiquotation} \\
+    @{antiquotation_def subgoals} & : & @{text antiquotation} \\
+    @{antiquotation_def prf} & : & @{text antiquotation} \\
+    @{antiquotation_def full_prf} & : & @{text antiquotation} \\
+    @{antiquotation_def ML} & : & @{text antiquotation} \\
+    @{antiquotation_def ML_op} & : & @{text antiquotation} \\
+    @{antiquotation_def ML_type} & : & @{text antiquotation} \\
+    @{antiquotation_def ML_struct} & : & @{text antiquotation} \\
+    @{antiquotation_def "file"} & : & @{text antiquotation} \\
+  \end{matharray}
+
+  The overall content of an Isabelle/Isar theory may alternate between
+  formal and informal text.  The main body consists of formal
+  specification and proof commands, interspersed with markup commands
+  (\secref{sec:markup}) or document comments (\secref{sec:comments}).
+  The argument of markup commands quotes informal text to be printed
+  in the resulting document, but may again refer to formal entities
+  via \emph{document antiquotations}.
+
+  For example, embedding of ``@{text [source=false] "@{term [show_types] \"f x = a + x\"}"}''
+  within a text block makes
+  \isa{{\isacharparenleft}f{\isasymColon}{\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a{\isacharparenright}\ {\isacharparenleft}x{\isasymColon}{\isacharprime}a{\isacharparenright}\ {\isacharequal}\ {\isacharparenleft}a{\isasymColon}{\isacharprime}a{\isacharparenright}\ {\isacharplus}\ x} appear in the final {\LaTeX} document.
+
+  Antiquotations usually spare the author tedious typing of logical
+  entities in full detail.  Even more importantly, some degree of
+  consistency-checking between the main body of formal text and its
+  informal explanation is achieved, since terms and types appearing in
+  antiquotations are checked within the current theory or proof
+  context.
+
+  %% FIXME less monolithic presentation, move to individual sections!?
+  @{rail "
+    '@{' antiquotation '}'
+    ;
+    @{syntax_def antiquotation}:
+      @@{antiquotation theory} options @{syntax name} |
+      @@{antiquotation thm} options styles @{syntax thmrefs} |
+      @@{antiquotation lemma} options @{syntax prop} @'by' @{syntax method} @{syntax method}? |
+      @@{antiquotation prop} options styles @{syntax prop} |
+      @@{antiquotation term} options styles @{syntax term} |
+      @@{antiquotation (HOL) value} options styles @{syntax term} |
+      @@{antiquotation term_type} options styles @{syntax term} |
+      @@{antiquotation typeof} options styles @{syntax term} |
+      @@{antiquotation const} options @{syntax term} |
+      @@{antiquotation abbrev} options @{syntax term} |
+      @@{antiquotation typ} options @{syntax type} |
+      @@{antiquotation type} options @{syntax name} |
+      @@{antiquotation class} options @{syntax name} |
+      @@{antiquotation text} options @{syntax name}
+    ;
+    @{syntax antiquotation}:
+      @@{antiquotation goals} options |
+      @@{antiquotation subgoals} options |
+      @@{antiquotation prf} options @{syntax thmrefs} |
+      @@{antiquotation full_prf} options @{syntax thmrefs} |
+      @@{antiquotation ML} options @{syntax name} |
+      @@{antiquotation ML_op} options @{syntax name} |
+      @@{antiquotation ML_type} options @{syntax name} |
+      @@{antiquotation ML_struct} options @{syntax name} |
+      @@{antiquotation \"file\"} options @{syntax name}
+    ;
+    options: '[' (option * ',') ']'
+    ;
+    option: @{syntax name} | @{syntax name} '=' @{syntax name}
+    ;
+    styles: '(' (style + ',') ')'
+    ;
+    style: (@{syntax name} +)
+  "}
+
+  Note that the syntax of antiquotations may \emph{not} include source
+  comments @{verbatim "(*"}~@{text "\<dots>"}~@{verbatim "*)"} nor verbatim
+  text @{verbatim "{"}@{verbatim "*"}~@{text "\<dots>"}~@{verbatim
+  "*"}@{verbatim "}"}.
+
+  \begin{description}
+  
+  \item @{text "@{theory A}"} prints the name @{text "A"}, which is
+  guaranteed to refer to a valid ancestor theory in the current
+  context.
+
+  \item @{text "@{thm a\<^sub>1 \<dots> a\<^sub>n}"} prints theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}.
+  Full fact expressions are allowed here, including attributes
+  (\secref{sec:syn-att}).
+
+  \item @{text "@{prop \<phi>}"} prints a well-typed proposition @{text
+  "\<phi>"}.
+
+  \item @{text "@{lemma \<phi> by m}"} proves a well-typed proposition
+  @{text "\<phi>"} by method @{text m} and prints the original @{text "\<phi>"}.
+
+  \item @{text "@{term t}"} prints a well-typed term @{text "t"}.
+  
+  \item @{text "@{value t}"} evaluates a term @{text "t"} and prints
+  its result, see also @{command_ref (HOL) value}.
+
+  \item @{text "@{term_type t}"} prints a well-typed term @{text "t"}
+  annotated with its type.
+
+  \item @{text "@{typeof t}"} prints the type of a well-typed term
+  @{text "t"}.
+
+  \item @{text "@{const c}"} prints a logical or syntactic constant
+  @{text "c"}.
+  
+  \item @{text "@{abbrev c x\<^sub>1 \<dots> x\<^sub>n}"} prints a constant abbreviation
+  @{text "c x\<^sub>1 \<dots> x\<^sub>n \<equiv> rhs"} as defined in the current context.
+
+  \item @{text "@{typ \<tau>}"} prints a well-formed type @{text "\<tau>"}.
+
+  \item @{text "@{type \<kappa>}"} prints a (logical or syntactic) type
+    constructor @{text "\<kappa>"}.
+
+  \item @{text "@{class c}"} prints a class @{text c}.
+
+  \item @{text "@{text s}"} prints uninterpreted source text @{text
+  s}.  This is particularly useful to print portions of text according
+  to the Isabelle document style, without demanding well-formedness,
+  e.g.\ small pieces of terms that should not be parsed or
+  type-checked yet.
+
+  \item @{text "@{goals}"} prints the current \emph{dynamic} goal
+  state.  This is mainly for support of tactic-emulation scripts
+  within Isar.  Presentation of goal states does not conform to the
+  idea of human-readable proof documents!
+
+  When explaining proofs in detail it is usually better to spell out
+  the reasoning via proper Isar proof commands, instead of peeking at
+  the internal machine configuration.
+  
+  \item @{text "@{subgoals}"} is similar to @{text "@{goals}"}, but
+  does not print the main goal.
+  
+  \item @{text "@{prf a\<^sub>1 \<dots> a\<^sub>n}"} prints the (compact) proof terms
+  corresponding to the theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}. Note that this
+  requires proof terms to be switched on for the current logic
+  session.
+  
+  \item @{text "@{full_prf a\<^sub>1 \<dots> a\<^sub>n}"} is like @{text "@{prf a\<^sub>1 \<dots>
+  a\<^sub>n}"}, but prints the full proof terms, i.e.\ also displays
+  information omitted in the compact proof term, which is denoted by
+  ``@{text _}'' placeholders there.
+  
+  \item @{text "@{ML s}"}, @{text "@{ML_op s}"}, @{text "@{ML_type
+  s}"}, and @{text "@{ML_struct s}"} check text @{text s} as ML value,
+  infix operator, type, and structure, respectively.  The source is
+  printed verbatim.
+
+  \item @{text "@{file path}"} checks that @{text "path"} refers to a
+  file (or directory) and prints it verbatim.
+
+  \end{description}
+*}
+
+
+subsection {* Styled antiquotations *}
+
+text {* The antiquotations @{text thm}, @{text prop} and @{text
+  term} admit an extra \emph{style} specification to modify the
+  printed result.  A style is specified by a name with a possibly
+  empty number of arguments;  multiple styles can be sequenced with
+  commas.  The following standard styles are available:
+
+  \begin{description}
+  
+  \item @{text lhs} extracts the first argument of any application
+  form with at least two arguments --- typically meta-level or
+  object-level equality, or any other binary relation.
+  
+  \item @{text rhs} is like @{text lhs}, but extracts the second
+  argument.
+  
+  \item @{text "concl"} extracts the conclusion @{text C} from a rule
+  in Horn-clause normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}.
+  
+  \item @{text "prem"} @{text n} extract premise number
+  @{text "n"} from from a rule in Horn-clause
+  normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}
+
+  \end{description}
+*}
+
+
+subsection {* General options *}
+
+text {* The following options are available to tune the printed output
+  of antiquotations.  Note that many of these coincide with global ML
+  flags of the same names.
+
+  \begin{description}
+
+  \item @{antiquotation_option_def show_types}~@{text "= bool"} and
+  @{antiquotation_option_def show_sorts}~@{text "= bool"} control
+  printing of explicit type and sort constraints.
+
+  \item @{antiquotation_option_def show_structs}~@{text "= bool"}
+  controls printing of implicit structures.
+
+  \item @{antiquotation_option_def show_abbrevs}~@{text "= bool"}
+  controls folding of abbreviations.
+
+  \item @{antiquotation_option_def names_long}~@{text "= bool"} forces
+  names of types and constants etc.\ to be printed in their fully
+  qualified internal form.
+
+  \item @{antiquotation_option_def names_short}~@{text "= bool"}
+  forces names of types and constants etc.\ to be printed unqualified.
+  Note that internalizing the output again in the current context may
+  well yield a different result.
+
+  \item @{antiquotation_option_def names_unique}~@{text "= bool"}
+  determines whether the printed version of qualified names should be
+  made sufficiently long to avoid overlap with names declared further
+  back.  Set to @{text false} for more concise output.
+
+  \item @{antiquotation_option_def eta_contract}~@{text "= bool"}
+  prints terms in @{text \<eta>}-contracted form.
+
+  \item @{antiquotation_option_def display}~@{text "= bool"} indicates
+  if the text is to be output as multi-line ``display material'',
+  rather than a small piece of text without line breaks (which is the
+  default).
+
+  In this mode the embedded entities are printed in the same style as
+  the main theory text.
+
+  \item @{antiquotation_option_def break}~@{text "= bool"} controls
+  line breaks in non-display material.
+
+  \item @{antiquotation_option_def quotes}~@{text "= bool"} indicates
+  if the output should be enclosed in double quotes.
+
+  \item @{antiquotation_option_def mode}~@{text "= name"} adds @{text
+  name} to the print mode to be used for presentation.  Note that the
+  standard setup for {\LaTeX} output is already present by default,
+  including the modes @{text latex} and @{text xsymbols}.
+
+  \item @{antiquotation_option_def margin}~@{text "= nat"} and
+  @{antiquotation_option_def indent}~@{text "= nat"} change the margin
+  or indentation for pretty printing of display material.
+
+  \item @{antiquotation_option_def goals_limit}~@{text "= nat"}
+  determines the maximum number of goals to be printed (for goal-based
+  antiquotation).
+
+  \item @{antiquotation_option_def source}~@{text "= bool"} prints the
+  original source text of the antiquotation arguments, rather than its
+  internal representation.  Note that formal checking of
+  @{antiquotation "thm"}, @{antiquotation "term"}, etc. is still
+  enabled; use the @{antiquotation "text"} antiquotation for unchecked
+  output.
+
+  Regular @{text "term"} and @{text "typ"} antiquotations with @{text
+  "source = false"} involve a full round-trip from the original source
+  to an internalized logical entity back to a source form, according
+  to the syntax of the current context.  Thus the printed output is
+  not under direct control of the author, it may even fluctuate a bit
+  as the underlying theory is changed later on.
+
+  In contrast, @{antiquotation_option source}~@{text "= true"}
+  admits direct printing of the given source text, with the desirable
+  well-formedness check in the background, but without modification of
+  the printed text.
+
+  \end{description}
+
+  For boolean flags, ``@{text "name = true"}'' may be abbreviated as
+  ``@{text name}''.  All of the above flags are disabled by default,
+  unless changed from ML, say in the @{verbatim "ROOT.ML"} of the
+  logic session.
+*}
+
+
+section {* Markup via command tags \label{sec:tags} *}
+
+text {* Each Isabelle/Isar command may be decorated by additional
+  presentation tags, to indicate some modification in the way it is
+  printed in the document.
+
+  @{rail "
+    @{syntax_def tags}: ( tag * )
+    ;
+    tag: '%' (@{syntax ident} | @{syntax string})
+  "}
+
+  Some tags are pre-declared for certain classes of commands, serving
+  as default markup if no tags are given in the text:
+
+  \medskip
+  \begin{tabular}{ll}
+    @{text "theory"} & theory begin/end \\
+    @{text "proof"} & all proof commands \\
+    @{text "ML"} & all commands involving ML code \\
+  \end{tabular}
+
+  \medskip The Isabelle document preparation system
+  \cite{isabelle-sys} allows tagged command regions to be presented
+  specifically, e.g.\ to fold proof texts, or drop parts of the text
+  completely.
+
+  For example ``@{command "by"}~@{text "%invisible auto"}'' causes
+  that piece of proof to be treated as @{text invisible} instead of
+  @{text "proof"} (the default), which may be shown or hidden
+  depending on the document setup.  In contrast, ``@{command
+  "by"}~@{text "%visible auto"}'' forces this text to be shown
+  invariably.
+
+  Explicit tag specifications within a proof apply to all subsequent
+  commands of the same level of nesting.  For example, ``@{command
+  "proof"}~@{text "%visible \<dots>"}~@{command "qed"}'' forces the whole
+  sub-proof to be typeset as @{text visible} (unless some of its parts
+  are tagged differently).
+
+  \medskip Command tags merely produce certain markup environments for
+  type-setting.  The meaning of these is determined by {\LaTeX}
+  macros, as defined in @{file "~~/lib/texinputs/isabelle.sty"} or
+  by the document author.  The Isabelle document preparation tools
+  also provide some high-level options to specify the meaning of
+  arbitrary tags to ``keep'', ``drop'', or ``fold'' the corresponding
+  parts of the text.  Logic sessions may also specify ``document
+  versions'', where given tags are interpreted in some particular way.
+  Again see \cite{isabelle-sys} for further details.
+*}
+
+
+section {* Railroad diagrams *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{antiquotation_def "rail"} & : & @{text antiquotation} \\
+  \end{matharray}
+
+  @{rail "'rail' string"}
+
+  The @{antiquotation rail} antiquotation allows to include syntax
+  diagrams into Isabelle documents.  {\LaTeX} requires the style file
+  @{"file" "~~/lib/texinputs/pdfsetup.sty"}, which can be used via
+  @{verbatim "\\usepackage{pdfsetup}"} in @{verbatim "root.tex"}, for
+  example.
+
+  The rail specification language is quoted here as Isabelle @{syntax
+  string}; it has its own grammar given below.
+
+  @{rail "
+  rule? + ';'
+  ;
+  rule: ((identifier | @{syntax antiquotation}) ':')? body
+  ;
+  body: concatenation + '|'
+  ;
+  concatenation: ((atom '?'?) +) (('*' | '+') atom?)?
+  ;
+  atom: '(' body? ')' | identifier |
+    '@'? (string | @{syntax antiquotation}) |
+    '\\\\\\\\'
+  "}
+
+  The lexical syntax of @{text "identifier"} coincides with that of
+  @{syntax ident} in regular Isabelle syntax, but @{text string} uses
+  single quotes instead of double quotes of the standard @{syntax
+  string} category, to avoid extra escapes.
+
+  Each @{text rule} defines a formal language (with optional name),
+  using a notation that is similar to EBNF or regular expressions with
+  recursion.  The meaning and visual appearance of these rail language
+  elements is illustrated by the following representative examples.
+
+  \begin{itemize}
+
+  \item Empty @{verbatim "()"}
+
+  @{rail "()"}
+
+  \item Nonterminal @{verbatim "A"}
+
+  @{rail "A"}
+
+  \item Nonterminal via Isabelle antiquotation
+  @{verbatim "@{syntax method}"}
+
+  @{rail "@{syntax method}"}
+
+  \item Terminal @{verbatim "'xyz'"}
+
+  @{rail "'xyz'"}
+
+  \item Terminal in keyword style @{verbatim "@'xyz'"}
+
+  @{rail "@'xyz'"}
+
+  \item Terminal via Isabelle antiquotation
+  @{verbatim "@@{method rule}"}
+
+  @{rail "@@{method rule}"}
+
+  \item Concatenation @{verbatim "A B C"}
+
+  @{rail "A B C"}
+
+  \item Linebreak @{verbatim "\\\\"} inside
+  concatenation\footnote{Strictly speaking, this is only a single
+  backslash, but the enclosing @{syntax string} syntax requires a
+  second one for escaping.} @{verbatim "A B C \\\\ D E F"}
+
+  @{rail "A B C \\ D E F"}
+
+  \item Variants @{verbatim "A | B | C"}
+
+  @{rail "A | B | C"}
+
+  \item Option @{verbatim "A ?"}
+
+  @{rail "A ?"}
+
+  \item Repetition @{verbatim "A *"}
+
+  @{rail "A *"}
+
+  \item Repetition with separator @{verbatim "A * sep"}
+
+  @{rail "A * sep"}
+
+  \item Strict repetition @{verbatim "A +"}
+
+  @{rail "A +"}
+
+  \item Strict repetition with separator @{verbatim "A + sep"}
+
+  @{rail "A + sep"}
+
+  \end{itemize}
+*}
+
+
+section {* Draft presentation *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "display_drafts"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+    @{command_def "print_drafts"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+  \end{matharray}
+
+  @{rail "
+    (@@{command display_drafts} | @@{command print_drafts}) (@{syntax name} +)
+
+  "}
+
+  \begin{description}
+
+  \item @{command "display_drafts"}~@{text paths} and @{command
+  "print_drafts"}~@{text paths} perform simple output of a given list
+  of raw source files.  Only those symbols that do not require
+  additional {\LaTeX} packages are displayed properly, everything else
+  is left verbatim.
+
+  \end{description}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/First_Order_Logic.thy	Tue Aug 28 12:45:49 2012 +0200
@@ -0,0 +1,520 @@
+
+header {* Example: First-Order Logic *}
+
+theory %visible First_Order_Logic
+imports Base  (* FIXME Pure!? *)
+begin
+
+text {*
+  \noindent In order to commence a new object-logic within
+  Isabelle/Pure we introduce abstract syntactic categories @{text "i"}
+  for individuals and @{text "o"} for object-propositions.  The latter
+  is embedded into the language of Pure propositions by means of a
+  separate judgment.
+*}
+
+typedecl i
+typedecl o
+
+judgment
+  Trueprop :: "o \<Rightarrow> prop"    ("_" 5)
+
+text {*
+  \noindent Note that the object-logic judgement is implicit in the
+  syntax: writing @{prop A} produces @{term "Trueprop A"} internally.
+  From the Pure perspective this means ``@{prop A} is derivable in the
+  object-logic''.
+*}
+
+
+subsection {* Equational reasoning \label{sec:framework-ex-equal} *}
+
+text {*
+  Equality is axiomatized as a binary predicate on individuals, with
+  reflexivity as introduction, and substitution as elimination
+  principle.  Note that the latter is particularly convenient in a
+  framework like Isabelle, because syntactic congruences are
+  implicitly produced by unification of @{term "B x"} against
+  expressions containing occurrences of @{term x}.
+*}
+
+axiomatization
+  equal :: "i \<Rightarrow> i \<Rightarrow> o"  (infix "=" 50)
+where
+  refl [intro]: "x = x" and
+  subst [elim]: "x = y \<Longrightarrow> B x \<Longrightarrow> B y"
+
+text {*
+  \noindent Substitution is very powerful, but also hard to control in
+  full generality.  We derive some common symmetry~/ transitivity
+  schemes of @{term equal} as particular consequences.
+*}
+
+theorem sym [sym]:
+  assumes "x = y"
+  shows "y = x"
+proof -
+  have "x = x" ..
+  with `x = y` show "y = x" ..
+qed
+
+theorem forw_subst [trans]:
+  assumes "y = x" and "B x"
+  shows "B y"
+proof -
+  from `y = x` have "x = y" ..
+  from this and `B x` show "B y" ..
+qed
+
+theorem back_subst [trans]:
+  assumes "B x" and "x = y"
+  shows "B y"
+proof -
+  from `x = y` and `B x`
+  show "B y" ..
+qed
+
+theorem trans [trans]:
+  assumes "x = y" and "y = z"
+  shows "x = z"
+proof -
+  from `y = z` and `x = y`
+  show "x = z" ..
+qed
+
+
+subsection {* Basic group theory *}
+
+text {*
+  As an example for equational reasoning we consider some bits of
+  group theory.  The subsequent locale definition postulates group
+  operations and axioms; we also derive some consequences of this
+  specification.
+*}
+
+locale group =
+  fixes prod :: "i \<Rightarrow> i \<Rightarrow> i"  (infix "\<circ>" 70)
+    and inv :: "i \<Rightarrow> i"  ("(_\<inverse>)" [1000] 999)
+    and unit :: i  ("1")
+  assumes assoc: "(x \<circ> y) \<circ> z = x \<circ> (y \<circ> z)"
+    and left_unit:  "1 \<circ> x = x"
+    and left_inv: "x\<inverse> \<circ> x = 1"
+begin
+
+theorem right_inv: "x \<circ> x\<inverse> = 1"
+proof -
+  have "x \<circ> x\<inverse> = 1 \<circ> (x \<circ> x\<inverse>)" by (rule left_unit [symmetric])
+  also have "\<dots> = (1 \<circ> x) \<circ> x\<inverse>" by (rule assoc [symmetric])
+  also have "1 = (x\<inverse>)\<inverse> \<circ> x\<inverse>" by (rule left_inv [symmetric])
+  also have "\<dots> \<circ> x = (x\<inverse>)\<inverse> \<circ> (x\<inverse> \<circ> x)" by (rule assoc)
+  also have "x\<inverse> \<circ> x = 1" by (rule left_inv)
+  also have "((x\<inverse>)\<inverse> \<circ> \<dots>) \<circ> x\<inverse> = (x\<inverse>)\<inverse> \<circ> (1 \<circ> x\<inverse>)" by (rule assoc)
+  also have "1 \<circ> x\<inverse> = x\<inverse>" by (rule left_unit)
+  also have "(x\<inverse>)\<inverse> \<circ> \<dots> = 1" by (rule left_inv)
+  finally show "x \<circ> x\<inverse> = 1" .
+qed
+
+theorem right_unit: "x \<circ> 1 = x"
+proof -
+  have "1 = x\<inverse> \<circ> x" by (rule left_inv [symmetric])
+  also have "x \<circ> \<dots> = (x \<circ> x\<inverse>) \<circ> x" by (rule assoc [symmetric])
+  also have "x \<circ> x\<inverse> = 1" by (rule right_inv)
+  also have "\<dots> \<circ> x = x" by (rule left_unit)
+  finally show "x \<circ> 1 = x" .
+qed
+
+text {*
+  \noindent Reasoning from basic axioms is often tedious.  Our proofs
+  work by producing various instances of the given rules (potentially
+  the symmetric form) using the pattern ``@{command have}~@{text
+  eq}~@{command "by"}~@{text "(rule r)"}'' and composing the chain of
+  results via @{command also}/@{command finally}.  These steps may
+  involve any of the transitivity rules declared in
+  \secref{sec:framework-ex-equal}, namely @{thm trans} in combining
+  the first two results in @{thm right_inv} and in the final steps of
+  both proofs, @{thm forw_subst} in the first combination of @{thm
+  right_unit}, and @{thm back_subst} in all other calculational steps.
+
+  Occasional substitutions in calculations are adequate, but should
+  not be over-emphasized.  The other extreme is to compose a chain by
+  plain transitivity only, with replacements occurring always in
+  topmost position. For example:
+*}
+
+(*<*)
+theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
+proof -
+  assume [symmetric, defn]: "\<And>x y. (x \<equiv> y) \<equiv> Trueprop (x = y)"
+(*>*)
+  have "x \<circ> 1 = x \<circ> (x\<inverse> \<circ> x)" unfolding left_inv ..
+  also have "\<dots> = (x \<circ> x\<inverse>) \<circ> x" unfolding assoc ..
+  also have "\<dots> = 1 \<circ> x" unfolding right_inv ..
+  also have "\<dots> = x" unfolding left_unit ..
+  finally have "x \<circ> 1 = x" .
+(*<*)
+qed
+(*>*)
+
+text {*
+  \noindent Here we have re-used the built-in mechanism for unfolding
+  definitions in order to normalize each equational problem.  A more
+  realistic object-logic would include proper setup for the Simplifier
+  (\secref{sec:simplifier}), the main automated tool for equational
+  reasoning in Isabelle.  Then ``@{command unfolding}~@{thm
+  left_inv}~@{command ".."}'' would become ``@{command "by"}~@{text
+  "(simp only: left_inv)"}'' etc.
+*}
+
+end
+
+
+subsection {* Propositional logic \label{sec:framework-ex-prop} *}
+
+text {*
+  We axiomatize basic connectives of propositional logic: implication,
+  disjunction, and conjunction.  The associated rules are modeled
+  after Gentzen's system of Natural Deduction \cite{Gentzen:1935}.
+*}
+
+axiomatization
+  imp :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<longrightarrow>" 25) where
+  impI [intro]: "(A \<Longrightarrow> B) \<Longrightarrow> A \<longrightarrow> B" and
+  impD [dest]: "(A \<longrightarrow> B) \<Longrightarrow> A \<Longrightarrow> B"
+
+axiomatization
+  disj :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<or>" 30) where
+  disjI\<^isub>1 [intro]: "A \<Longrightarrow> A \<or> B" and
+  disjI\<^isub>2 [intro]: "B \<Longrightarrow> A \<or> B" and
+  disjE [elim]: "A \<or> B \<Longrightarrow> (A \<Longrightarrow> C) \<Longrightarrow> (B \<Longrightarrow> C) \<Longrightarrow> C"
+
+axiomatization
+  conj :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<and>" 35) where
+  conjI [intro]: "A \<Longrightarrow> B \<Longrightarrow> A \<and> B" and
+  conjD\<^isub>1: "A \<and> B \<Longrightarrow> A" and
+  conjD\<^isub>2: "A \<and> B \<Longrightarrow> B"
+
+text {*
+  \noindent The conjunctive destructions have the disadvantage that
+  decomposing @{prop "A \<and> B"} involves an immediate decision which
+  component should be projected.  The more convenient simultaneous
+  elimination @{prop "A \<and> B \<Longrightarrow> (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C"} can be derived as
+  follows:
+*}
+
+theorem conjE [elim]:
+  assumes "A \<and> B"
+  obtains A and B
+proof
+  from `A \<and> B` show A by (rule conjD\<^isub>1)
+  from `A \<and> B` show B by (rule conjD\<^isub>2)
+qed
+
+text {*
+  \noindent Here is an example of swapping conjuncts with a single
+  intermediate elimination step:
+*}
+
+(*<*)
+lemma "\<And>A. PROP A \<Longrightarrow> PROP A"
+proof -
+(*>*)
+  assume "A \<and> B"
+  then obtain B and A ..
+  then have "B \<and> A" ..
+(*<*)
+qed
+(*>*)
+
+text {*
+  \noindent Note that the analogous elimination rule for disjunction
+  ``@{text "\<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"}'' coincides with
+  the original axiomatization of @{thm disjE}.
+
+  \medskip We continue propositional logic by introducing absurdity
+  with its characteristic elimination.  Plain truth may then be
+  defined as a proposition that is trivially true.
+*}
+
+axiomatization
+  false :: o  ("\<bottom>") where
+  falseE [elim]: "\<bottom> \<Longrightarrow> A"
+
+definition
+  true :: o  ("\<top>") where
+  "\<top> \<equiv> \<bottom> \<longrightarrow> \<bottom>"
+
+theorem trueI [intro]: \<top>
+  unfolding true_def ..
+
+text {*
+  \medskip\noindent Now negation represents an implication towards
+  absurdity:
+*}
+
+definition
+  not :: "o \<Rightarrow> o"  ("\<not> _" [40] 40) where
+  "\<not> A \<equiv> A \<longrightarrow> \<bottom>"
+
+theorem notI [intro]:
+  assumes "A \<Longrightarrow> \<bottom>"
+  shows "\<not> A"
+unfolding not_def
+proof
+  assume A
+  then show \<bottom> by (rule `A \<Longrightarrow> \<bottom>`)
+qed
+
+theorem notE [elim]:
+  assumes "\<not> A" and A
+  shows B
+proof -
+  from `\<not> A` have "A \<longrightarrow> \<bottom>" unfolding not_def .
+  from `A \<longrightarrow> \<bottom>` and `A` have \<bottom> ..
+  then show B ..
+qed
+
+
+subsection {* Classical logic *}
+
+text {*
+  Subsequently we state the principle of classical contradiction as a
+  local assumption.  Thus we refrain from forcing the object-logic
+  into the classical perspective.  Within that context, we may derive
+  well-known consequences of the classical principle.
+*}
+
+locale classical =
+  assumes classical: "(\<not> C \<Longrightarrow> C) \<Longrightarrow> C"
+begin
+
+theorem double_negation:
+  assumes "\<not> \<not> C"
+  shows C
+proof (rule classical)
+  assume "\<not> C"
+  with `\<not> \<not> C` show C ..
+qed
+
+theorem tertium_non_datur: "C \<or> \<not> C"
+proof (rule double_negation)
+  show "\<not> \<not> (C \<or> \<not> C)"
+  proof
+    assume "\<not> (C \<or> \<not> C)"
+    have "\<not> C"
+    proof
+      assume C then have "C \<or> \<not> C" ..
+      with `\<not> (C \<or> \<not> C)` show \<bottom> ..
+    qed
+    then have "C \<or> \<not> C" ..
+    with `\<not> (C \<or> \<not> C)` show \<bottom> ..
+  qed
+qed
+
+text {*
+  \noindent These examples illustrate both classical reasoning and
+  non-trivial propositional proofs in general.  All three rules
+  characterize classical logic independently, but the original rule is
+  already the most convenient to use, because it leaves the conclusion
+  unchanged.  Note that @{prop "(\<not> C \<Longrightarrow> C) \<Longrightarrow> C"} fits again into our
+  format for eliminations, despite the additional twist that the
+  context refers to the main conclusion.  So we may write @{thm
+  classical} as the Isar statement ``@{text "\<OBTAINS> \<not> thesis"}''.
+  This also explains nicely how classical reasoning really works:
+  whatever the main @{text thesis} might be, we may always assume its
+  negation!
+*}
+
+end
+
+
+subsection {* Quantifiers \label{sec:framework-ex-quant} *}
+
+text {*
+  Representing quantifiers is easy, thanks to the higher-order nature
+  of the underlying framework.  According to the well-known technique
+  introduced by Church \cite{church40}, quantifiers are operators on
+  predicates, which are syntactically represented as @{text "\<lambda>"}-terms
+  of type @{typ "i \<Rightarrow> o"}.  Binder notation turns @{text "All (\<lambda>x. B
+  x)"} into @{text "\<forall>x. B x"} etc.
+*}
+
+axiomatization
+  All :: "(i \<Rightarrow> o) \<Rightarrow> o"  (binder "\<forall>" 10) where
+  allI [intro]: "(\<And>x. B x) \<Longrightarrow> \<forall>x. B x" and
+  allD [dest]: "(\<forall>x. B x) \<Longrightarrow> B a"
+
+axiomatization
+  Ex :: "(i \<Rightarrow> o) \<Rightarrow> o"  (binder "\<exists>" 10) where
+  exI [intro]: "B a \<Longrightarrow> (\<exists>x. B x)" and
+  exE [elim]: "(\<exists>x. B x) \<Longrightarrow> (\<And>x. B x \<Longrightarrow> C) \<Longrightarrow> C"
+
+text {*
+  \noindent The statement of @{thm exE} corresponds to ``@{text
+  "\<ASSUMES> \<exists>x. B x \<OBTAINS> x \<WHERE> B x"}'' in Isar.  In the
+  subsequent example we illustrate quantifier reasoning involving all
+  four rules:
+*}
+
+theorem
+  assumes "\<exists>x. \<forall>y. R x y"
+  shows "\<forall>y. \<exists>x. R x y"
+proof    -- {* @{text "\<forall>"} introduction *}
+  obtain x where "\<forall>y. R x y" using `\<exists>x. \<forall>y. R x y` ..    -- {* @{text "\<exists>"} elimination *}
+  fix y have "R x y" using `\<forall>y. R x y` ..    -- {* @{text "\<forall>"} destruction *}
+  then show "\<exists>x. R x y" ..    -- {* @{text "\<exists>"} introduction *}
+qed
+
+
+subsection {* Canonical reasoning patterns *}
+
+text {*
+  The main rules of first-order predicate logic from
+  \secref{sec:framework-ex-prop} and \secref{sec:framework-ex-quant}
+  can now be summarized as follows, using the native Isar statement
+  format of \secref{sec:framework-stmt}.
+
+  \medskip
+  \begin{tabular}{l}
+  @{text "impI: \<ASSUMES> A \<Longrightarrow> B \<SHOWS> A \<longrightarrow> B"} \\
+  @{text "impD: \<ASSUMES> A \<longrightarrow> B \<AND> A \<SHOWS> B"} \\[1ex]
+
+  @{text "disjI\<^isub>1: \<ASSUMES> A \<SHOWS> A \<or> B"} \\
+  @{text "disjI\<^isub>2: \<ASSUMES> B \<SHOWS> A \<or> B"} \\
+  @{text "disjE: \<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"} \\[1ex]
+
+  @{text "conjI: \<ASSUMES> A \<AND> B \<SHOWS> A \<and> B"} \\
+  @{text "conjE: \<ASSUMES> A \<and> B \<OBTAINS> A \<AND> B"} \\[1ex]
+
+  @{text "falseE: \<ASSUMES> \<bottom> \<SHOWS> A"} \\
+  @{text "trueI: \<SHOWS> \<top>"} \\[1ex]
+
+  @{text "notI: \<ASSUMES> A \<Longrightarrow> \<bottom> \<SHOWS> \<not> A"} \\
+  @{text "notE: \<ASSUMES> \<not> A \<AND> A \<SHOWS> B"} \\[1ex]
+
+  @{text "allI: \<ASSUMES> \<And>x. B x \<SHOWS> \<forall>x. B x"} \\
+  @{text "allE: \<ASSUMES> \<forall>x. B x \<SHOWS> B a"} \\[1ex]
+
+  @{text "exI: \<ASSUMES> B a \<SHOWS> \<exists>x. B x"} \\
+  @{text "exE: \<ASSUMES> \<exists>x. B x \<OBTAINS> a \<WHERE> B a"}
+  \end{tabular}
+  \medskip
+
+  \noindent This essentially provides a declarative reading of Pure
+  rules as Isar reasoning patterns: the rule statements tells how a
+  canonical proof outline shall look like.  Since the above rules have
+  already been declared as @{attribute (Pure) intro}, @{attribute
+  (Pure) elim}, @{attribute (Pure) dest} --- each according to its
+  particular shape --- we can immediately write Isar proof texts as
+  follows:
+*}
+
+(*<*)
+theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
+proof -
+(*>*)
+
+  txt_raw {*\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "A \<longrightarrow> B"
+  proof
+    assume A
+    show B sorry %noproof
+  qed
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "A \<longrightarrow> B" and A sorry %noproof
+  then have B ..
+
+  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have A sorry %noproof
+  then have "A \<or> B" ..
+
+  have B sorry %noproof
+  then have "A \<or> B" ..
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "A \<or> B" sorry %noproof
+  then have C
+  proof
+    assume A
+    then show C sorry %noproof
+  next
+    assume B
+    then show C sorry %noproof
+  qed
+
+  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have A and B sorry %noproof
+  then have "A \<and> B" ..
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "A \<and> B" sorry %noproof
+  then obtain A and B ..
+
+  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<bottom>" sorry %noproof
+  then have A ..
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<top>" ..
+
+  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<not> A"
+  proof
+    assume A
+    then show "\<bottom>" sorry %noproof
+  qed
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<not> A" and A sorry %noproof
+  then have B ..
+
+  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<forall>x. B x"
+  proof
+    fix x
+    show "B x" sorry %noproof
+  qed
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<forall>x. B x" sorry %noproof
+  then have "B a" ..
+
+  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<exists>x. B x"
+  proof
+    show "B a" sorry %noproof
+  qed
+
+  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+  have "\<exists>x. B x" sorry %noproof
+  then obtain a where "B a" ..
+
+  txt_raw {*\end{minipage}*}
+
+(*<*)
+qed
+(*>*)
+
+text {*
+  \bigskip\noindent Of course, these proofs are merely examples.  As
+  sketched in \secref{sec:framework-subproof}, there is a fair amount
+  of flexibility in expressing Pure deductions in Isar.  Here the user
+  is asked to express himself adequately, aiming at proof texts of
+  literary quality.
+*}
+
+end %visible
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Framework.thy	Tue Aug 28 12:45:49 2012 +0200
@@ -0,0 +1,1016 @@
+theory Framework
+imports Base Main
+begin
+
+chapter {* The Isabelle/Isar Framework \label{ch:isar-framework} *}
+
+text {*
+  Isabelle/Isar
+  \cite{Wenzel:1999:TPHOL,Wenzel-PhD,Nipkow-TYPES02,Wenzel-Paulson:2006,Wenzel:2006:Festschrift}
+  is intended as a generic framework for developing formal
+  mathematical documents with full proof checking.  Definitions and
+  proofs are organized as theories.  An assembly of theory sources may
+  be presented as a printed document; see also
+  \chref{ch:document-prep}.
+
+  The main objective of Isar is the design of a human-readable
+  structured proof language, which is called the ``primary proof
+  format'' in Isar terminology.  Such a primary proof language is
+  somewhere in the middle between the extremes of primitive proof
+  objects and actual natural language.  In this respect, Isar is a bit
+  more formalistic than Mizar
+  \cite{Trybulec:1993:MizarFeatures,Rudnicki:1992:MizarOverview,Wiedijk:1999:Mizar},
+  using logical symbols for certain reasoning schemes where Mizar
+  would prefer English words; see \cite{Wenzel-Wiedijk:2002} for
+  further comparisons of these systems.
+
+  So Isar challenges the traditional way of recording informal proofs
+  in mathematical prose, as well as the common tendency to see fully
+  formal proofs directly as objects of some logical calculus (e.g.\
+  @{text "\<lambda>"}-terms in a version of type theory).  In fact, Isar is
+  better understood as an interpreter of a simple block-structured
+  language for describing the data flow of local facts and goals,
+  interspersed with occasional invocations of proof methods.
+  Everything is reduced to logical inferences internally, but these
+  steps are somewhat marginal compared to the overall bookkeeping of
+  the interpretation process.  Thanks to careful design of the syntax
+  and semantics of Isar language elements, a formal record of Isar
+  instructions may later appear as an intelligible text to the
+  attentive reader.
+
+  The Isar proof language has emerged from careful analysis of some
+  inherent virtues of the existing logical framework of Isabelle/Pure
+  \cite{paulson-found,paulson700}, notably composition of higher-order
+  natural deduction rules, which is a generalization of Gentzen's
+  original calculus \cite{Gentzen:1935}.  The approach of generic
+  inference systems in Pure is continued by Isar towards actual proof
+  texts.
+
+  Concrete applications require another intermediate layer: an
+  object-logic.  Isabelle/HOL \cite{isa-tutorial} (simply-typed
+  set-theory) is being used most of the time; Isabelle/ZF
+  \cite{isabelle-ZF} is less extensively developed, although it would
+  probably fit better for classical mathematics.
+
+  \medskip In order to illustrate natural deduction in Isar, we shall
+  refer to the background theory and library of Isabelle/HOL.  This
+  includes common notions of predicate logic, naive set-theory etc.\
+  using fairly standard mathematical notation.  From the perspective
+  of generic natural deduction there is nothing special about the
+  logical connectives of HOL (@{text "\<and>"}, @{text "\<or>"}, @{text "\<forall>"},
+  @{text "\<exists>"}, etc.), only the resulting reasoning principles are
+  relevant to the user.  There are similar rules available for
+  set-theory operators (@{text "\<inter>"}, @{text "\<union>"}, @{text "\<Inter>"}, @{text
+  "\<Union>"}, etc.), or any other theory developed in the library (lattice
+  theory, topology etc.).
+
+  Subsequently we briefly review fragments of Isar proof texts
+  corresponding directly to such general deduction schemes.  The
+  examples shall refer to set-theory, to minimize the danger of
+  understanding connectives of predicate logic as something special.
+
+  \medskip The following deduction performs @{text "\<inter>"}-introduction,
+  working forwards from assumptions towards the conclusion.  We give
+  both the Isar text, and depict the primitive rule involved, as
+  determined by unification of the problem against rules that are
+  declared in the library context.
+*}
+
+text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
+
+(*<*)
+notepad
+begin
+(*>*)
+    assume "x \<in> A" and "x \<in> B"
+    then have "x \<in> A \<inter> B" ..
+(*<*)
+end
+(*>*)
+
+text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
+
+text {*
+  \infer{@{prop "x \<in> A \<inter> B"}}{@{prop "x \<in> A"} & @{prop "x \<in> B"}}
+*}
+
+text_raw {*\end{minipage}*}
+
+text {*
+  \medskip\noindent Note that @{command assume} augments the proof
+  context, @{command then} indicates that the current fact shall be
+  used in the next step, and @{command have} states an intermediate
+  goal.  The two dots ``@{command ".."}'' refer to a complete proof of
+  this claim, using the indicated facts and a canonical rule from the
+  context.  We could have been more explicit here by spelling out the
+  final proof step via the @{command "by"} command:
+*}
+
+(*<*)
+notepad
+begin
+(*>*)
+    assume "x \<in> A" and "x \<in> B"
+    then have "x \<in> A \<inter> B" by (rule IntI)
+(*<*)
+end
+(*>*)
+
+text {*
+  \noindent The format of the @{text "\<inter>"}-introduction rule represents
+  the most basic inference, which proceeds from given premises to a
+  conclusion, without any nested proof context involved.
+
+  The next example performs backwards introduction on @{term "\<Inter>\<A>"},
+  the intersection of all sets within a given set.  This requires a
+  nested proof of set membership within a local context, where @{term
+  A} is an arbitrary-but-fixed member of the collection:
+*}
+
+text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
+
+(*<*)
+notepad
+begin
+(*>*)
+    have "x \<in> \<Inter>\<A>"
+    proof
+      fix A
+      assume "A \<in> \<A>"
+      show "x \<in> A" sorry %noproof
+    qed
+(*<*)
+end
+(*>*)
+
+text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
+
+text {*
+  \infer{@{prop "x \<in> \<Inter>\<A>"}}{\infer*{@{prop "x \<in> A"}}{@{text "[A][A \<in> \<A>]"}}}
+*}
+
+text_raw {*\end{minipage}*}
+
+text {*
+  \medskip\noindent This Isar reasoning pattern again refers to the
+  primitive rule depicted above.  The system determines it in the
+  ``@{command proof}'' step, which could have been spelt out more
+  explicitly as ``@{command proof}~@{text "(rule InterI)"}''.  Note
+  that the rule involves both a local parameter @{term "A"} and an
+  assumption @{prop "A \<in> \<A>"} in the nested reasoning.  This kind of
+  compound rule typically demands a genuine sub-proof in Isar, working
+  backwards rather than forwards as seen before.  In the proof body we
+  encounter the @{command fix}-@{command assume}-@{command show}
+  outline of nested sub-proofs that is typical for Isar.  The final
+  @{command show} is like @{command have} followed by an additional
+  refinement of the enclosing claim, using the rule derived from the
+  proof body.
+
+  \medskip The next example involves @{term "\<Union>\<A>"}, which can be
+  characterized as the set of all @{term "x"} such that @{prop "\<exists>A. x
+  \<in> A \<and> A \<in> \<A>"}.  The elimination rule for @{prop "x \<in> \<Union>\<A>"} does
+  not mention @{text "\<exists>"} and @{text "\<and>"} at all, but admits to obtain
+  directly a local @{term "A"} such that @{prop "x \<in> A"} and @{prop "A
+  \<in> \<A>"} hold.  This corresponds to the following Isar proof and
+  inference rule, respectively:
+*}
+
+text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
+
+(*<*)
+notepad
+begin
+(*>*)
+    assume "x \<in> \<Union>\<A>"
+    then have C
+    proof
+      fix A
+      assume "x \<in> A" and "A \<in> \<A>"
+      show C sorry %noproof
+    qed
+(*<*)
+end
+(*>*)
+
+text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
+
+text {*
+  \infer{@{prop "C"}}{@{prop "x \<in> \<Union>\<A>"} & \infer*{@{prop "C"}~}{@{text "[A][x \<in> A, A \<in> \<A>]"}}}
+*}
+
+text_raw {*\end{minipage}*}
+
+text {*
+  \medskip\noindent Although the Isar proof follows the natural
+  deduction rule closely, the text reads not as natural as
+  anticipated.  There is a double occurrence of an arbitrary
+  conclusion @{prop "C"}, which represents the final result, but is
+  irrelevant for now.  This issue arises for any elimination rule
+  involving local parameters.  Isar provides the derived language
+  element @{command obtain}, which is able to perform the same
+  elimination proof more conveniently:
+*}
+
+(*<*)
+notepad
+begin
+(*>*)
+    assume "x \<in> \<Union>\<A>"
+    then obtain A where "x \<in> A" and "A \<in> \<A>" ..
+(*<*)
+end
+(*>*)
+
+text {*
+  \noindent Here we avoid to mention the final conclusion @{prop "C"}
+  and return to plain forward reasoning.  The rule involved in the
+  ``@{command ".."}'' proof is the same as before.
+*}
+
+
+section {* The Pure framework \label{sec:framework-pure} *}
+
+text {*
+  The Pure logic \cite{paulson-found,paulson700} is an intuitionistic
+  fragment of higher-order logic \cite{church40}.  In type-theoretic
+  parlance, there are three levels of @{text "\<lambda>"}-calculus with
+  corresponding arrows @{text "\<Rightarrow>"}/@{text "\<And>"}/@{text "\<Longrightarrow>"}:
+
+  \medskip
+  \begin{tabular}{ll}
+  @{text "\<alpha> \<Rightarrow> \<beta>"} & syntactic function space (terms depending on terms) \\
+  @{text "\<And>x. B(x)"} & universal quantification (proofs depending on terms) \\
+  @{text "A \<Longrightarrow> B"} & implication (proofs depending on proofs) \\
+  \end{tabular}
+  \medskip
+
+  \noindent Here only the types of syntactic terms, and the
+  propositions of proof terms have been shown.  The @{text
+  "\<lambda>"}-structure of proofs can be recorded as an optional feature of
+  the Pure inference kernel \cite{Berghofer-Nipkow:2000:TPHOL}, but
+  the formal system can never depend on them due to \emph{proof
+  irrelevance}.
+
+  On top of this most primitive layer of proofs, Pure implements a
+  generic calculus for nested natural deduction rules, similar to
+  \cite{Schroeder-Heister:1984}.  Here object-logic inferences are
+  internalized as formulae over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
+  Combining such rule statements may involve higher-order unification
+  \cite{paulson-natural}.
+*}
+
+
+subsection {* Primitive inferences *}
+
+text {*
+  Term syntax provides explicit notation for abstraction @{text "\<lambda>x ::
+  \<alpha>. b(x)"} and application @{text "b a"}, while types are usually
+  implicit thanks to type-inference; terms of type @{text "prop"} are
+  called propositions.  Logical statements are composed via @{text "\<And>x
+  :: \<alpha>. B(x)"} and @{text "A \<Longrightarrow> B"}.  Primitive reasoning operates on
+  judgments of the form @{text "\<Gamma> \<turnstile> \<phi>"}, with standard introduction
+  and elimination rules for @{text "\<And>"} and @{text "\<Longrightarrow>"} that refer to
+  fixed parameters @{text "x\<^isub>1, \<dots>, x\<^isub>m"} and hypotheses
+  @{text "A\<^isub>1, \<dots>, A\<^isub>n"} from the context @{text "\<Gamma>"};
+  the corresponding proof terms are left implicit.  The subsequent
+  inference rules define @{text "\<Gamma> \<turnstile> \<phi>"} inductively, relative to a
+  collection of axioms:
+
+  \[
+  \infer{@{text "\<turnstile> A"}}{(@{text "A"} \text{~axiom})}
+  \qquad
+  \infer{@{text "A \<turnstile> A"}}{}
+  \]
+
+  \[
+  \infer{@{text "\<Gamma> \<turnstile> \<And>x. B(x)"}}{@{text "\<Gamma> \<turnstile> B(x)"} & @{text "x \<notin> \<Gamma>"}}
+  \qquad
+  \infer{@{text "\<Gamma> \<turnstile> B(a)"}}{@{text "\<Gamma> \<turnstile> \<And>x. B(x)"}}
+  \]
+
+  \[
+  \infer{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
+  \qquad
+  \infer{@{text "\<Gamma>\<^sub>1 \<union> \<Gamma>\<^sub>2 \<turnstile> B"}}{@{text "\<Gamma>\<^sub>1 \<turnstile> A \<Longrightarrow> B"} & @{text "\<Gamma>\<^sub>2 \<turnstile> A"}}
+  \]
+
+  Furthermore, Pure provides a built-in equality @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow>
+  prop"} with axioms for reflexivity, substitution, extensionality,
+  and @{text "\<alpha>\<beta>\<eta>"}-conversion on @{text "\<lambda>"}-terms.
+
+  \medskip An object-logic introduces another layer on top of Pure,
+  e.g.\ with types @{text "i"} for individuals and @{text "o"} for
+  propositions, term constants @{text "Trueprop :: o \<Rightarrow> prop"} as
+  (implicit) derivability judgment and connectives like @{text "\<and> :: o
+  \<Rightarrow> o \<Rightarrow> o"} or @{text "\<forall> :: (i \<Rightarrow> o) \<Rightarrow> o"}, and axioms for object-level
+  rules such as @{text "conjI: A \<Longrightarrow> B \<Longrightarrow> A \<and> B"} or @{text "allI: (\<And>x. B
+  x) \<Longrightarrow> \<forall>x. B x"}.  Derived object rules are represented as theorems of
+  Pure.  After the initial object-logic setup, further axiomatizations
+  are usually avoided; plain definitions and derived principles are
+  used exclusively.
+*}
+
+
+subsection {* Reasoning with rules \label{sec:framework-resolution} *}
+
+text {*
+  Primitive inferences mostly serve foundational purposes.  The main
+  reasoning mechanisms of Pure operate on nested natural deduction
+  rules expressed as formulae, using @{text "\<And>"} to bind local
+  parameters and @{text "\<Longrightarrow>"} to express entailment.  Multiple
+  parameters and premises are represented by repeating these
+  connectives in a right-associative manner.
+
+  Since @{text "\<And>"} and @{text "\<Longrightarrow>"} commute thanks to the theorem
+  @{prop "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"}, we may assume w.l.o.g.\
+  that rule statements always observe the normal form where
+  quantifiers are pulled in front of implications at each level of
+  nesting.  This means that any Pure proposition may be presented as a
+  \emph{Hereditary Harrop Formula} \cite{Miller:1991} which is of the
+  form @{text "\<And>x\<^isub>1 \<dots> x\<^isub>m. H\<^isub>1 \<Longrightarrow> \<dots> H\<^isub>n \<Longrightarrow>
+  A"} for @{text "m, n \<ge> 0"}, and @{text "A"} atomic, and @{text
+  "H\<^isub>1, \<dots>, H\<^isub>n"} being recursively of the same format.
+  Following the convention that outermost quantifiers are implicit,
+  Horn clauses @{text "A\<^isub>1 \<Longrightarrow> \<dots> A\<^isub>n \<Longrightarrow> A"} are a special
+  case of this.
+
+  For example, @{text "\<inter>"}-introduction rule encountered before is
+  represented as a Pure theorem as follows:
+  \[
+  @{text "IntI:"}~@{prop "x \<in> A \<Longrightarrow> x \<in> B \<Longrightarrow> x \<in> A \<inter> B"}
+  \]
+
+  \noindent This is a plain Horn clause, since no further nesting on
+  the left is involved.  The general @{text "\<Inter>"}-introduction
+  corresponds to a Hereditary Harrop Formula with one additional level
+  of nesting:
+  \[
+  @{text "InterI:"}~@{prop "(\<And>A. A \<in> \<A> \<Longrightarrow> x \<in> A) \<Longrightarrow> x \<in> \<Inter>\<A>"}
+  \]
+
+  \medskip Goals are also represented as rules: @{text "A\<^isub>1 \<Longrightarrow>
+  \<dots> A\<^isub>n \<Longrightarrow> C"} states that the sub-goals @{text "A\<^isub>1, \<dots>,
+  A\<^isub>n"} entail the result @{text "C"}; for @{text "n = 0"} the
+  goal is finished.  To allow @{text "C"} being a rule statement
+  itself, we introduce the protective marker @{text "# :: prop \<Rightarrow>
+  prop"}, which is defined as identity and hidden from the user.  We
+  initialize and finish goal states as follows:
+
+  \[
+  \begin{array}{c@ {\qquad}c}
+  \infer[(@{inference_def init})]{@{text "C \<Longrightarrow> #C"}}{} &
+  \infer[(@{inference_def finish})]{@{text C}}{@{text "#C"}}
+  \end{array}
+  \]
+
+  \noindent Goal states are refined in intermediate proof steps until
+  a finished form is achieved.  Here the two main reasoning principles
+  are @{inference resolution}, for back-chaining a rule against a
+  sub-goal (replacing it by zero or more sub-goals), and @{inference
+  assumption}, for solving a sub-goal (finding a short-circuit with
+  local assumptions).  Below @{text "\<^vec>x"} stands for @{text
+  "x\<^isub>1, \<dots>, x\<^isub>n"} (@{text "n \<ge> 0"}).
+
+  \[
+  \infer[(@{inference_def resolution})]
+  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>A (\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
+  {\begin{tabular}{rl}
+    @{text "rule:"} &
+    @{text "\<^vec>A \<^vec>a \<Longrightarrow> B \<^vec>a"} \\
+    @{text "goal:"} &
+    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
+    @{text "goal unifier:"} &
+    @{text "(\<lambda>\<^vec>x. B (\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
+   \end{tabular}}
+  \]
+
+  \medskip
+
+  \[
+  \infer[(@{inference_def assumption})]{@{text "C\<vartheta>"}}
+  {\begin{tabular}{rl}
+    @{text "goal:"} &
+    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> A \<^vec>x) \<Longrightarrow> C"} \\
+    @{text "assm unifier:"} & @{text "A\<vartheta> = H\<^sub>i\<vartheta>"}~~\text{(for some~@{text "H\<^sub>i"})} \\
+   \end{tabular}}
+  \]
+
+  The following trace illustrates goal-oriented reasoning in
+  Isabelle/Pure:
+
+  {\footnotesize
+  \medskip
+  \begin{tabular}{r@ {\quad}l}
+  @{text "(A \<and> B \<Longrightarrow> B \<and> A) \<Longrightarrow> #(A \<and> B \<Longrightarrow> B \<and> A)"} & @{text "(init)"} \\
+  @{text "(A \<and> B \<Longrightarrow> B) \<Longrightarrow> (A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution B \<Longrightarrow> A \<Longrightarrow> B \<and> A)"} \\
+  @{text "(A \<and> B \<Longrightarrow> A \<and> B) \<Longrightarrow> (A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution A \<and> B \<Longrightarrow> B)"} \\
+  @{text "(A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(assumption)"} \\
+  @{text "(A \<and> B \<Longrightarrow> A \<and> B) \<Longrightarrow> #\<dots>"} & @{text "(resolution A \<and> B \<Longrightarrow> A)"} \\
+  @{text "#\<dots>"} & @{text "(assumption)"} \\
+  @{text "A \<and> B \<Longrightarrow> B \<and> A"} & @{text "(finish)"} \\
+  \end{tabular}
+  \medskip
+  }
+
+  Compositions of @{inference assumption} after @{inference
+  resolution} occurs quite often, typically in elimination steps.
+  Traditional Isabelle tactics accommodate this by a combined
+  @{inference_def elim_resolution} principle.  In contrast, Isar uses
+  a slightly more refined combination, where the assumptions to be
+  closed are marked explicitly, using again the protective marker
+  @{text "#"}:
+
+  \[
+  \infer[(@{inference refinement})]
+  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>G' (\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
+  {\begin{tabular}{rl}
+    @{text "sub\<hyphen>proof:"} &
+    @{text "\<^vec>G \<^vec>a \<Longrightarrow> B \<^vec>a"} \\
+    @{text "goal:"} &
+    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
+    @{text "goal unifier:"} &
+    @{text "(\<lambda>\<^vec>x. B (\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
+    @{text "assm unifiers:"} &
+    @{text "(\<lambda>\<^vec>x. G\<^sub>j (\<^vec>a \<^vec>x))\<vartheta> = #H\<^sub>i\<vartheta>"} \\
+    & \quad (for each marked @{text "G\<^sub>j"} some @{text "#H\<^sub>i"}) \\
+   \end{tabular}}
+  \]
+
+  \noindent Here the @{text "sub\<hyphen>proof"} rule stems from the
+  main @{command fix}-@{command assume}-@{command show} outline of
+  Isar (cf.\ \secref{sec:framework-subproof}): each assumption
+  indicated in the text results in a marked premise @{text "G"} above.
+  The marking enforces resolution against one of the sub-goal's
+  premises.  Consequently, @{command fix}-@{command assume}-@{command
+  show} enables to fit the result of a sub-proof quite robustly into a
+  pending sub-goal, while maintaining a good measure of flexibility.
+*}
+
+
+section {* The Isar proof language \label{sec:framework-isar} *}
+
+text {*
+  Structured proofs are presented as high-level expressions for
+  composing entities of Pure (propositions, facts, and goals).  The
+  Isar proof language allows to organize reasoning within the
+  underlying rule calculus of Pure, but Isar is not another logical
+  calculus!
+
+  Isar is an exercise in sound minimalism.  Approximately half of the
+  language is introduced as primitive, the rest defined as derived
+  concepts.  The following grammar describes the core language
+  (category @{text "proof"}), which is embedded into theory
+  specification elements such as @{command theorem}; see also
+  \secref{sec:framework-stmt} for the separate category @{text
+  "statement"}.
+
+  \medskip
+  \begin{tabular}{rcl}
+    @{text "theory\<hyphen>stmt"} & = & @{command "theorem"}~@{text "statement proof  |"}~~@{command "definition"}~@{text "\<dots>  |  \<dots>"} \\[1ex]
+
+    @{text "proof"} & = & @{text "prfx\<^sup>*"}~@{command "proof"}~@{text "method\<^sup>? stmt\<^sup>*"}~@{command "qed"}~@{text "method\<^sup>?"} \\[1ex]
+
+    @{text prfx} & = & @{command "using"}~@{text "facts"} \\
+    & @{text "|"} & @{command "unfolding"}~@{text "facts"} \\
+
+    @{text stmt} & = & @{command "{"}~@{text "stmt\<^sup>*"}~@{command "}"} \\
+    & @{text "|"} & @{command "next"} \\
+    & @{text "|"} & @{command "note"}~@{text "name = facts"} \\
+    & @{text "|"} & @{command "let"}~@{text "term = term"} \\
+    & @{text "|"} & @{command "fix"}~@{text "var\<^sup>+"} \\
+    & @{text "|"} & @{command assume}~@{text "\<guillemotleft>inference\<guillemotright> name: props"} \\
+    & @{text "|"} & @{command "then"}@{text "\<^sup>?"}~@{text goal} \\
+    @{text goal} & = & @{command "have"}~@{text "name: props proof"} \\
+    & @{text "|"} & @{command "show"}~@{text "name: props proof"} \\
+  \end{tabular}
+
+  \medskip Simultaneous propositions or facts may be separated by the
+  @{keyword "and"} keyword.
+
+  \medskip The syntax for terms and propositions is inherited from
+  Pure (and the object-logic).  A @{text "pattern"} is a @{text
+  "term"} with schematic variables, to be bound by higher-order
+  matching.
+
+  \medskip Facts may be referenced by name or proposition.  For
+  example, the result of ``@{command have}~@{text "a: A \<langle>proof\<rangle>"}''
+  becomes available both as @{text "a"} and
+  \isacharbackquoteopen@{text "A"}\isacharbackquoteclose.  Moreover,
+  fact expressions may involve attributes that modify either the
+  theorem or the background context.  For example, the expression
+  ``@{text "a [OF b]"}'' refers to the composition of two facts
+  according to the @{inference resolution} inference of
+  \secref{sec:framework-resolution}, while ``@{text "a [intro]"}''
+  declares a fact as introduction rule in the context.
+
+  The special fact called ``@{fact this}'' always refers to the last
+  result, as produced by @{command note}, @{command assume}, @{command
+  have}, or @{command show}.  Since @{command note} occurs
+  frequently together with @{command then} we provide some
+  abbreviations:
+
+  \medskip
+  \begin{tabular}{rcl}
+    @{command from}~@{text a} & @{text "\<equiv>"} & @{command note}~@{text a}~@{command then} \\
+    @{command with}~@{text a} & @{text "\<equiv>"} & @{command from}~@{text "a \<AND> this"} \\
+  \end{tabular}
+  \medskip
+
+  The @{text "method"} category is essentially a parameter and may be
+  populated later.  Methods use the facts indicated by @{command
+  "then"} or @{command using}, and then operate on the goal state.
+  Some basic methods are predefined: ``@{method "-"}'' leaves the goal
+  unchanged, ``@{method this}'' applies the facts as rules to the
+  goal, ``@{method (Pure) "rule"}'' applies the facts to another rule and the
+  result to the goal (both ``@{method this}'' and ``@{method (Pure) rule}''
+  refer to @{inference resolution} of
+  \secref{sec:framework-resolution}).  The secondary arguments to
+  ``@{method (Pure) rule}'' may be specified explicitly as in ``@{text "(rule
+  a)"}'', or picked from the context.  In the latter case, the system
+  first tries rules declared as @{attribute (Pure) elim} or
+  @{attribute (Pure) dest}, followed by those declared as @{attribute
+  (Pure) intro}.
+
+  The default method for @{command proof} is ``@{method (Pure) rule}''
+  (arguments picked from the context), for @{command qed} it is
+  ``@{method "-"}''.  Further abbreviations for terminal proof steps
+  are ``@{command "by"}~@{text "method\<^sub>1 method\<^sub>2"}'' for
+  ``@{command proof}~@{text "method\<^sub>1"}~@{command qed}~@{text
+  "method\<^sub>2"}'', and ``@{command ".."}'' for ``@{command
+  "by"}~@{method (Pure) rule}, and ``@{command "."}'' for ``@{command
+  "by"}~@{method this}''.  The @{command unfolding} element operates
+  directly on the current facts and goal by applying equalities.
+
+  \medskip Block structure can be indicated explicitly by ``@{command
+  "{"}~@{text "\<dots>"}~@{command "}"}'', although the body of a sub-proof
+  already involves implicit nesting.  In any case, @{command next}
+  jumps into the next section of a block, i.e.\ it acts like closing
+  an implicit block scope and opening another one; there is no direct
+  correspondence to subgoals here.
+
+  The remaining elements @{command fix} and @{command assume} build up
+  a local context (see \secref{sec:framework-context}), while
+  @{command show} refines a pending sub-goal by the rule resulting
+  from a nested sub-proof (see \secref{sec:framework-subproof}).
+  Further derived concepts will support calculational reasoning (see
+  \secref{sec:framework-calc}).
+*}
+
+
+subsection {* Context elements \label{sec:framework-context} *}
+
+text {*
+  In judgments @{text "\<Gamma> \<turnstile> \<phi>"} of the primitive framework, @{text "\<Gamma>"}
+  essentially acts like a proof context.  Isar elaborates this idea
+  towards a higher-level notion, with additional information for
+  type-inference, term abbreviations, local facts, hypotheses etc.
+
+  The element @{command fix}~@{text "x :: \<alpha>"} declares a local
+  parameter, i.e.\ an arbitrary-but-fixed entity of a given type; in
+  results exported from the context, @{text "x"} may become anything.
+  The @{command assume}~@{text "\<guillemotleft>inference\<guillemotright>"} element provides a
+  general interface to hypotheses: ``@{command assume}~@{text
+  "\<guillemotleft>inference\<guillemotright> A"}'' produces @{text "A \<turnstile> A"} locally, while the
+  included inference tells how to discharge @{text A} from results
+  @{text "A \<turnstile> B"} later on.  There is no user-syntax for @{text
+  "\<guillemotleft>inference\<guillemotright>"}, i.e.\ it may only occur internally when derived
+  commands are defined in ML.
+
+  At the user-level, the default inference for @{command assume} is
+  @{inference discharge} as given below.  The additional variants
+  @{command presume} and @{command def} are defined as follows:
+
+  \medskip
+  \begin{tabular}{rcl}
+    @{command presume}~@{text A} & @{text "\<equiv>"} & @{command assume}~@{text "\<guillemotleft>weak\<hyphen>discharge\<guillemotright> A"} \\
+    @{command def}~@{text "x \<equiv> a"} & @{text "\<equiv>"} & @{command fix}~@{text x}~@{command assume}~@{text "\<guillemotleft>expansion\<guillemotright> x \<equiv> a"} \\
+  \end{tabular}
+  \medskip
+
+  \[
+  \infer[(@{inference_def discharge})]{@{text "\<strut>\<Gamma> - A \<turnstile> #A \<Longrightarrow> B"}}{@{text "\<strut>\<Gamma> \<turnstile> B"}}
+  \]
+  \[
+  \infer[(@{inference_def "weak\<hyphen>discharge"})]{@{text "\<strut>\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<strut>\<Gamma> \<turnstile> B"}}
+  \]
+  \[
+  \infer[(@{inference_def expansion})]{@{text "\<strut>\<Gamma> - (x \<equiv> a) \<turnstile> B a"}}{@{text "\<strut>\<Gamma> \<turnstile> B x"}}
+  \]
+
+  \medskip Note that @{inference discharge} and @{inference
+  "weak\<hyphen>discharge"} differ in the marker for @{prop A}, which is
+  relevant when the result of a @{command fix}-@{command
+  assume}-@{command show} outline is composed with a pending goal,
+  cf.\ \secref{sec:framework-subproof}.
+
+  The most interesting derived context element in Isar is @{command
+  obtain} \cite[\S5.3]{Wenzel-PhD}, which supports generalized
+  elimination steps in a purely forward manner.  The @{command obtain}
+  command takes a specification of parameters @{text "\<^vec>x"} and
+  assumptions @{text "\<^vec>A"} to be added to the context, together
+  with a proof of a case rule stating that this extension is
+  conservative (i.e.\ may be removed from closed results later on):
+
+  \medskip
+  \begin{tabular}{l}
+  @{text "\<langle>facts\<rangle>"}~~@{command obtain}~@{text "\<^vec>x \<WHERE> \<^vec>A \<^vec>x  \<langle>proof\<rangle> \<equiv>"} \\[0.5ex]
+  \quad @{command have}~@{text "case: \<And>thesis. (\<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis) \<Longrightarrow> thesis\<rangle>"} \\
+  \quad @{command proof}~@{method "-"} \\
+  \qquad @{command fix}~@{text thesis} \\
+  \qquad @{command assume}~@{text "[intro]: \<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis"} \\
+  \qquad @{command show}~@{text thesis}~@{command using}~@{text "\<langle>facts\<rangle> \<langle>proof\<rangle>"} \\
+  \quad @{command qed} \\
+  \quad @{command fix}~@{text "\<^vec>x"}~@{command assume}~@{text "\<guillemotleft>elimination case\<guillemotright> \<^vec>A \<^vec>x"} \\
+  \end{tabular}
+  \medskip
+
+  \[
+  \infer[(@{inference elimination})]{@{text "\<Gamma> \<turnstile> B"}}{
+    \begin{tabular}{rl}
+    @{text "case:"} &
+    @{text "\<Gamma> \<turnstile> \<And>thesis. (\<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis) \<Longrightarrow> thesis"} \\[0.2ex]
+    @{text "result:"} &
+    @{text "\<Gamma> \<union> \<^vec>A \<^vec>y \<turnstile> B"} \\[0.2ex]
+    \end{tabular}}
+  \]
+
+  \noindent Here the name ``@{text thesis}'' is a specific convention
+  for an arbitrary-but-fixed proposition; in the primitive natural
+  deduction rules shown before we have occasionally used @{text C}.
+  The whole statement of ``@{command obtain}~@{text x}~@{keyword
+  "where"}~@{text "A x"}'' may be read as a claim that @{text "A x"}
+  may be assumed for some arbitrary-but-fixed @{text "x"}.  Also note
+  that ``@{command obtain}~@{text "A \<AND> B"}'' without parameters
+  is similar to ``@{command have}~@{text "A \<AND> B"}'', but the
+  latter involves multiple sub-goals.
+
+  \medskip The subsequent Isar proof texts explain all context
+  elements introduced above using the formal proof language itself.
+  After finishing a local proof within a block, we indicate the
+  exported result via @{command note}.
+*}
+
+(*<*)
+theorem True
+proof
+(*>*)
+  txt_raw {* \begin{minipage}[t]{0.45\textwidth} *}
+  {
+    fix x
+    have "B x" sorry %noproof
+  }
+  note `\<And>x. B x`
+  txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
+  {
+    assume A
+    have B sorry %noproof
+  }
+  note `A \<Longrightarrow> B`
+  txt_raw {* \end{minipage}\\[3ex]\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
+  {
+    def x \<equiv> a
+    have "B x" sorry %noproof
+  }
+  note `B a`
+  txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
+  {
+    obtain x where "A x" sorry %noproof
+    have B sorry %noproof
+  }
+  note `B`
+  txt_raw {* \end{minipage} *}
+(*<*)
+qed
+(*>*)
+
+text {*
+  \bigskip\noindent This illustrates the meaning of Isar context
+  elements without goals getting in between.
+*}
+
+subsection {* Structured statements \label{sec:framework-stmt} *}
+
+text {*
+  The category @{text "statement"} of top-level theorem specifications
+  is defined as follows:
+
+  \medskip
+  \begin{tabular}{rcl}
+  @{text "statement"} & @{text "\<equiv>"} & @{text "name: props \<AND> \<dots>"} \\
+  & @{text "|"} & @{text "context\<^sup>* conclusion"} \\[0.5ex]
+
+  @{text "context"} & @{text "\<equiv>"} & @{text "\<FIXES> vars \<AND> \<dots>"} \\
+  & @{text "|"} & @{text "\<ASSUMES> name: props \<AND> \<dots>"} \\
+
+  @{text "conclusion"} & @{text "\<equiv>"} & @{text "\<SHOWS> name: props \<AND> \<dots>"} \\
+  & @{text "|"} & @{text "\<OBTAINS> vars \<AND> \<dots> \<WHERE> name: props \<AND> \<dots>"} \\
+  & & \quad @{text "\<BBAR> \<dots>"} \\
+  \end{tabular}
+
+  \medskip\noindent A simple @{text "statement"} consists of named
+  propositions.  The full form admits local context elements followed
+  by the actual conclusions, such as ``@{keyword "fixes"}~@{text
+  x}~@{keyword "assumes"}~@{text "A x"}~@{keyword "shows"}~@{text "B
+  x"}''.  The final result emerges as a Pure rule after discharging
+  the context: @{prop "\<And>x. A x \<Longrightarrow> B x"}.
+
+  The @{keyword "obtains"} variant is another abbreviation defined
+  below; unlike @{command obtain} (cf.\
+  \secref{sec:framework-context}) there may be several ``cases''
+  separated by ``@{text "\<BBAR>"}'', each consisting of several
+  parameters (@{text "vars"}) and several premises (@{text "props"}).
+  This specifies multi-branch elimination rules.
+
+  \medskip
+  \begin{tabular}{l}
+  @{text "\<OBTAINS> \<^vec>x \<WHERE> \<^vec>A \<^vec>x   \<BBAR>   \<dots>   \<equiv>"} \\[0.5ex]
+  \quad @{text "\<FIXES> thesis"} \\
+  \quad @{text "\<ASSUMES> [intro]: \<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis  \<AND>  \<dots>"} \\
+  \quad @{text "\<SHOWS> thesis"} \\
+  \end{tabular}
+  \medskip
+
+  Presenting structured statements in such an ``open'' format usually
+  simplifies the subsequent proof, because the outer structure of the
+  problem is already laid out directly.  E.g.\ consider the following
+  canonical patterns for @{text "\<SHOWS>"} and @{text "\<OBTAINS>"},
+  respectively:
+*}
+
+text_raw {*\begin{minipage}{0.5\textwidth}*}
+
+theorem
+  fixes x and y
+  assumes "A x" and "B y"
+  shows "C x y"
+proof -
+  from `A x` and `B y`
+  show "C x y" sorry %noproof
+qed
+
+text_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
+
+theorem
+  obtains x and y
+  where "A x" and "B y"
+proof -
+  have "A a" and "B b" sorry %noproof
+  then show thesis ..
+qed
+
+text_raw {*\end{minipage}*}
+
+text {*
+  \medskip\noindent Here local facts \isacharbackquoteopen@{text "A
+  x"}\isacharbackquoteclose\ and \isacharbackquoteopen@{text "B
+  y"}\isacharbackquoteclose\ are referenced immediately; there is no
+  need to decompose the logical rule structure again.  In the second
+  proof the final ``@{command then}~@{command show}~@{text
+  thesis}~@{command ".."}''  involves the local rule case @{text "\<And>x
+  y. A x \<Longrightarrow> B y \<Longrightarrow> thesis"} for the particular instance of terms @{text
+  "a"} and @{text "b"} produced in the body.
+*}
+
+
+subsection {* Structured proof refinement \label{sec:framework-subproof} *}
+
+text {*
+  By breaking up the grammar for the Isar proof language, we may
+  understand a proof text as a linear sequence of individual proof
+  commands.  These are interpreted as transitions of the Isar virtual
+  machine (Isar/VM), which operates on a block-structured
+  configuration in single steps.  This allows users to write proof
+  texts in an incremental manner, and inspect intermediate
+  configurations for debugging.
+
+  The basic idea is analogous to evaluating algebraic expressions on a
+  stack machine: @{text "(a + b) \<cdot> c"} then corresponds to a sequence
+  of single transitions for each symbol @{text "(, a, +, b, ), \<cdot>, c"}.
+  In Isar the algebraic values are facts or goals, and the operations
+  are inferences.
+
+  \medskip The Isar/VM state maintains a stack of nodes, each node
+  contains the local proof context, the linguistic mode, and a pending
+  goal (optional).  The mode determines the type of transition that
+  may be performed next, it essentially alternates between forward and
+  backward reasoning, with an intermediate stage for chained facts
+  (see \figref{fig:isar-vm}).
+
+  \begin{figure}[htb]
+  \begin{center}
+  \includegraphics[width=0.8\textwidth]{isar-vm}
+  \end{center}
+  \caption{Isar/VM modes}\label{fig:isar-vm}
+  \end{figure}
+
+  For example, in @{text "state"} mode Isar acts like a mathematical
+  scratch-pad, accepting declarations like @{command fix}, @{command
+  assume}, and claims like @{command have}, @{command show}.  A goal
+  statement changes the mode to @{text "prove"}, which means that we
+  may now refine the problem via @{command unfolding} or @{command
+  proof}.  Then we are again in @{text "state"} mode of a proof body,
+  which may issue @{command show} statements to solve pending
+  sub-goals.  A concluding @{command qed} will return to the original
+  @{text "state"} mode one level upwards.  The subsequent Isar/VM
+  trace indicates block structure, linguistic mode, goal state, and
+  inferences:
+*}
+
+text_raw {* \begingroup\footnotesize *}
+(*<*)notepad begin
+(*>*)
+  txt_raw {* \begin{minipage}[t]{0.18\textwidth} *}
+  have "A \<longrightarrow> B"
+  proof
+    assume A
+    show B
+      sorry %noproof
+  qed
+  txt_raw {* \end{minipage}\quad
+\begin{minipage}[t]{0.06\textwidth}
+@{text "begin"} \\
+\\
+\\
+@{text "begin"} \\
+@{text "end"} \\
+@{text "end"} \\
+\end{minipage}
+\begin{minipage}[t]{0.08\textwidth}
+@{text "prove"} \\
+@{text "state"} \\
+@{text "state"} \\
+@{text "prove"} \\
+@{text "state"} \\
+@{text "state"} \\
+\end{minipage}\begin{minipage}[t]{0.35\textwidth}
+@{text "(A \<longrightarrow> B) \<Longrightarrow> #(A \<longrightarrow> B)"} \\
+@{text "(A \<Longrightarrow> B) \<Longrightarrow> #(A \<longrightarrow> B)"} \\
+\\
+\\
+@{text "#(A \<longrightarrow> B)"} \\
+@{text "A \<longrightarrow> B"} \\
+\end{minipage}\begin{minipage}[t]{0.4\textwidth}
+@{text "(init)"} \\
+@{text "(resolution impI)"} \\
+\\
+\\
+@{text "(refinement #A \<Longrightarrow> B)"} \\
+@{text "(finish)"} \\
+\end{minipage} *}
+(*<*)
+end
+(*>*)
+text_raw {* \endgroup *}
+
+text {*
+  \noindent Here the @{inference refinement} inference from
+  \secref{sec:framework-resolution} mediates composition of Isar
+  sub-proofs nicely.  Observe that this principle incorporates some
+  degree of freedom in proof composition.  In particular, the proof
+  body allows parameters and assumptions to be re-ordered, or commuted
+  according to Hereditary Harrop Form.  Moreover, context elements
+  that are not used in a sub-proof may be omitted altogether.  For
+  example:
+*}
+
+text_raw {*\begin{minipage}{0.5\textwidth}*}
+
+(*<*)
+notepad
+begin
+(*>*)
+  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
+  proof -
+    fix x and y
+    assume "A x" and "B y"
+    show "C x y" sorry %noproof
+  qed
+
+txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
+
+(*<*)
+next
+(*>*)
+  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
+  proof -
+    fix x assume "A x"
+    fix y assume "B y"
+    show "C x y" sorry %noproof
+  qed
+
+txt_raw {*\end{minipage}\\[3ex]\begin{minipage}{0.5\textwidth}*}
+
+(*<*)
+next
+(*>*)
+  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
+  proof -
+    fix y assume "B y"
+    fix x assume "A x"
+    show "C x y" sorry
+  qed
+
+txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
+(*<*)
+next
+(*>*)
+  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
+  proof -
+    fix y assume "B y"
+    fix x
+    show "C x y" sorry
+  qed
+(*<*)
+end
+(*>*)
+
+text_raw {*\end{minipage}*}
+
+text {*
+  \medskip\noindent Such ``peephole optimizations'' of Isar texts are
+  practically important to improve readability, by rearranging
+  contexts elements according to the natural flow of reasoning in the
+  body, while still observing the overall scoping rules.
+
+  \medskip This illustrates the basic idea of structured proof
+  processing in Isar.  The main mechanisms are based on natural
+  deduction rule composition within the Pure framework.  In
+  particular, there are no direct operations on goal states within the
+  proof body.  Moreover, there is no hidden automated reasoning
+  involved, just plain unification.
+*}
+
+
+subsection {* Calculational reasoning \label{sec:framework-calc} *}
+
+text {*
+  The existing Isar infrastructure is sufficiently flexible to support
+  calculational reasoning (chains of transitivity steps) as derived
+  concept.  The generic proof elements introduced below depend on
+  rules declared as @{attribute trans} in the context.  It is left to
+  the object-logic to provide a suitable rule collection for mixed
+  relations of @{text "="}, @{text "<"}, @{text "\<le>"}, @{text "\<subset>"},
+  @{text "\<subseteq>"} etc.  Due to the flexibility of rule composition
+  (\secref{sec:framework-resolution}), substitution of equals by
+  equals is covered as well, even substitution of inequalities
+  involving monotonicity conditions; see also \cite[\S6]{Wenzel-PhD}
+  and \cite{Bauer-Wenzel:2001}.
+
+  The generic calculational mechanism is based on the observation that
+  rules such as @{text "trans:"}~@{prop "x = y \<Longrightarrow> y = z \<Longrightarrow> x = z"}
+  proceed from the premises towards the conclusion in a deterministic
+  fashion.  Thus we may reason in forward mode, feeding intermediate
+  results into rules selected from the context.  The course of
+  reasoning is organized by maintaining a secondary fact called
+  ``@{fact calculation}'', apart from the primary ``@{fact this}''
+  already provided by the Isar primitives.  In the definitions below,
+  @{attribute OF} refers to @{inference resolution}
+  (\secref{sec:framework-resolution}) with multiple rule arguments,
+  and @{text "trans"} represents to a suitable rule from the context:
+
+  \begin{matharray}{rcl}
+    @{command "also"}@{text "\<^sub>0"} & \equiv & @{command "note"}~@{text "calculation = this"} \\
+    @{command "also"}@{text "\<^sub>n\<^sub>+\<^sub>1"} & \equiv & @{command "note"}~@{text "calculation = trans [OF calculation this]"} \\[0.5ex]
+    @{command "finally"} & \equiv & @{command "also"}~@{command "from"}~@{text calculation} \\
+  \end{matharray}
+
+  \noindent The start of a calculation is determined implicitly in the
+  text: here @{command also} sets @{fact calculation} to the current
+  result; any subsequent occurrence will update @{fact calculation} by
+  combination with the next result and a transitivity rule.  The
+  calculational sequence is concluded via @{command finally}, where
+  the final result is exposed for use in a concluding claim.
+
+  Here is a canonical proof pattern, using @{command have} to
+  establish the intermediate results:
+*}
+
+(*<*)
+notepad
+begin
+(*>*)
+  have "a = b" sorry
+  also have "\<dots> = c" sorry
+  also have "\<dots> = d" sorry
+  finally have "a = d" .
+(*<*)
+end
+(*>*)
+
+text {*
+  \noindent The term ``@{text "\<dots>"}'' above is a special abbreviation
+  provided by the Isabelle/Isar syntax layer: it statically refers to
+  the right-hand side argument of the previous statement given in the
+  text.  Thus it happens to coincide with relevant sub-expressions in
+  the calculational chain, but the exact correspondence is dependent
+  on the transitivity rules being involved.
+
+  \medskip Symmetry rules such as @{prop "x = y \<Longrightarrow> y = x"} are like
+  transitivities with only one premise.  Isar maintains a separate
+  rule collection declared via the @{attribute sym} attribute, to be
+  used in fact expressions ``@{text "a [symmetric]"}'', or single-step
+  proofs ``@{command assume}~@{text "x = y"}~@{command then}~@{command
+  have}~@{text "y = x"}~@{command ".."}''.
+*}
+
+end
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Generic.thy	Tue Aug 28 12:45:49 2012 +0200
@@ -0,0 +1,1279 @@
+theory Generic
+imports Base Main
+begin
+
+chapter {* Generic tools and packages \label{ch:gen-tools} *}
+
+section {* Configuration options \label{sec:config} *}
+
+text {* Isabelle/Pure maintains a record of named configuration
+  options within the theory or proof context, with values of type
+  @{ML_type bool}, @{ML_type int}, @{ML_type real}, or @{ML_type
+  string}.  Tools may declare options in ML, and then refer to these
+  values (relative to the context).  Thus global reference variables
+  are easily avoided.  The user may change the value of a
+  configuration option by means of an associated attribute of the same
+  name.  This form of context declaration works particularly well with
+  commands such as @{command "declare"} or @{command "using"} like
+  this:
+*}
+
+declare [[show_main_goal = false]]
+
+notepad
+begin
+  note [[show_main_goal = true]]
+end
+
+text {* For historical reasons, some tools cannot take the full proof
+  context into account and merely refer to the background theory.
+  This is accommodated by configuration options being declared as
+  ``global'', which may not be changed within a local context.
+
+  \begin{matharray}{rcll}
+    @{command_def "print_configs"} & : & @{text "context \<rightarrow>"} \\
+  \end{matharray}
+
+  @{rail "
+    @{syntax name} ('=' ('true' | 'false' | @{syntax int} | @{syntax float} | @{syntax name}))?
+  "}
+
+  \begin{description}
+  
+  \item @{command "print_configs"} prints the available configuration
+  options, with names, types, and current values.
+  
+  \item @{text "name = value"} as an attribute expression modifies the
+  named option, with the syntax of the value depending on the option's
+  type.  For @{ML_type bool} the default value is @{text true}.  Any
+  attempt to change a global option in a local context is ignored.
+
+  \end{description}
+*}
+
+
+section {* Basic proof tools *}
+
+subsection {* Miscellaneous methods and attributes \label{sec:misc-meth-att} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def unfold} & : & @{text method} \\
+    @{method_def fold} & : & @{text method} \\
+    @{method_def insert} & : & @{text method} \\[0.5ex]
+    @{method_def erule}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def drule}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def frule}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def intro} & : & @{text method} \\
+    @{method_def elim} & : & @{text method} \\
+    @{method_def succeed} & : & @{text method} \\
+    @{method_def fail} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail "
+    (@@{method fold} | @@{method unfold} | @@{method insert}) @{syntax thmrefs}
+    ;
+    (@@{method erule} | @@{method drule} | @@{method frule})
+      ('(' @{syntax nat} ')')? @{syntax thmrefs}
+    ;
+    (@@{method intro} | @@{method elim}) @{syntax thmrefs}?
+  "}
+
+  \begin{description}
+  
+  \item @{method unfold}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{method fold}~@{text
+  "a\<^sub>1 \<dots> a\<^sub>n"} expand (or fold back) the given definitions throughout
+  all goals; any chained facts provided are inserted into the goal and
+  subject to rewriting as well.
+
+  \item @{method insert}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} inserts theorems as facts
+  into all goals of the proof state.  Note that current facts
+  indicated for forward chaining are ignored.
+
+  \item @{method erule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, @{method
+  drule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, and @{method frule}~@{text
+  "a\<^sub>1 \<dots> a\<^sub>n"} are similar to the basic @{method rule}
+  method (see \secref{sec:pure-meth-att}), but apply rules by
+  elim-resolution, destruct-resolution, and forward-resolution,
+  respectively \cite{isabelle-implementation}.  The optional natural
+  number argument (default 0) specifies additional assumption steps to
+  be performed here.
+
+  Note that these methods are improper ones, mainly serving for
+  experimentation and tactic script emulation.  Different modes of
+  basic rule application are usually expressed in Isar at the proof
+  language level, rather than via implicit proof state manipulations.
+  For example, a proper single-step elimination would be done using
+  the plain @{method rule} method, with forward chaining of current
+  facts.
+
+  \item @{method intro} and @{method elim} repeatedly refine some goal
+  by intro- or elim-resolution, after having inserted any chained
+  facts.  Exactly the rules given as arguments are taken into account;
+  this allows fine-tuned decomposition of a proof problem, in contrast
+  to common automated tools.
+
+  \item @{method succeed} yields a single (unchanged) result; it is
+  the identity of the ``@{text ","}'' method combinator (cf.\
+  \secref{sec:proof-meth}).
+
+  \item @{method fail} yields an empty result sequence; it is the
+  identity of the ``@{text "|"}'' method combinator (cf.\
+  \secref{sec:proof-meth}).
+
+  \end{description}
+
+  \begin{matharray}{rcl}
+    @{attribute_def tagged} & : & @{text attribute} \\
+    @{attribute_def untagged} & : & @{text attribute} \\[0.5ex]
+    @{attribute_def THEN} & : & @{text attribute} \\
+    @{attribute_def unfolded} & : & @{text attribute} \\
+    @{attribute_def folded} & : & @{text attribute} \\
+    @{attribute_def abs_def} & : & @{text attribute} \\[0.5ex]
+    @{attribute_def rotated} & : & @{text attribute} \\
+    @{attribute_def (Pure) elim_format} & : & @{text attribute} \\
+    @{attribute_def standard}@{text "\<^sup>*"} & : & @{text attribute} \\
+    @{attribute_def no_vars}@{text "\<^sup>*"} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail "
+    @@{attribute tagged} @{syntax name} @{syntax name}
+    ;
+    @@{attribute untagged} @{syntax name}
+    ;
+    @@{attribute THEN} ('[' @{syntax nat} ']')? @{syntax thmref}
+    ;
+    (@@{attribute unfolded} | @@{attribute folded}) @{syntax thmrefs}
+    ;
+    @@{attribute rotated} @{syntax int}?
+  "}
+
+  \begin{description}
+
+  \item @{attribute tagged}~@{text "name value"} and @{attribute
+  untagged}~@{text name} add and remove \emph{tags} of some theorem.
+  Tags may be any list of string pairs that serve as formal comment.
+  The first string is considered the tag name, the second its value.
+  Note that @{attribute untagged} removes any tags of the same name.
+
+  \item @{attribute THEN}~@{text a} composes rules by resolution; it
+  resolves with the first premise of @{text a} (an alternative
+  position may be also specified).  See also @{ML_op "RS"} in
+  \cite{isabelle-implementation}.
+  
+  \item @{attribute unfolded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{attribute
+  folded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} expand and fold back again the given
+  definitions throughout a rule.
+
+  \item @{attribute abs_def} turns an equation of the form @{prop "f x
+  y \<equiv> t"} into @{prop "f \<equiv> \<lambda>x y. t"}, which ensures that @{method
+  simp} or @{method unfold} steps always expand it.  This also works
+  for object-logic equality.
+
+  \item @{attribute rotated}~@{text n} rotate the premises of a
+  theorem by @{text n} (default 1).
+
+  \item @{attribute (Pure) elim_format} turns a destruction rule into
+  elimination rule format, by resolving with the rule @{prop "PROP A \<Longrightarrow>
+  (PROP A \<Longrightarrow> PROP B) \<Longrightarrow> PROP B"}.
+  
+  Note that the Classical Reasoner (\secref{sec:classical}) provides
+  its own version of this operation.
+
+  \item @{attribute standard} puts a theorem into the standard form of
+  object-rules at the outermost theory level.  Note that this
+  operation violates the local proof context (including active
+  locales).
+
+  \item @{attribute no_vars} replaces schematic variables by free
+  ones; this is mainly for tuning output of pretty printed theorems.
+
+  \end{description}
+*}
+
+
+subsection {* Low-level equational reasoning *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def subst} & : & @{text method} \\
+    @{method_def hypsubst} & : & @{text method} \\
+    @{method_def split} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail "
+    @@{method subst} ('(' 'asm' ')')? \\ ('(' (@{syntax nat}+) ')')? @{syntax thmref}
+    ;
+    @@{method split} @{syntax thmrefs}
+  "}
+
+  These methods provide low-level facilities for equational reasoning
+  that are intended for specialized applications only.  Normally,
+  single step calculations would be performed in a structured text
+  (see also \secref{sec:calculation}), while the Simplifier methods
+  provide the canonical way for automated normalization (see
+  \secref{sec:simplifier}).
+
+  \begin{description}
+
+  \item @{method subst}~@{text eq} performs a single substitution step
+  using rule @{text eq}, which may be either a meta or object
+  equality.
+
+  \item @{method subst}~@{text "(asm) eq"} substitutes in an
+  assumption.
+
+  \item @{method subst}~@{text "(i \<dots> j) eq"} performs several
+  substitutions in the conclusion. The numbers @{text i} to @{text j}
+  indicate the positions to substitute at.  Positions are ordered from
+  the top of the term tree moving down from left to right. For
+  example, in @{text "(a + b) + (c + d)"} there are three positions
+  where commutativity of @{text "+"} is applicable: 1 refers to @{text
+  "a + b"}, 2 to the whole term, and 3 to @{text "c + d"}.
+
+  If the positions in the list @{text "(i \<dots> j)"} are non-overlapping
+  (e.g.\ @{text "(2 3)"} in @{text "(a + b) + (c + d)"}) you may
+  assume all substitutions are performed simultaneously.  Otherwise
+  the behaviour of @{text subst} is not specified.
+
+  \item @{method subst}~@{text "(asm) (i \<dots> j) eq"} performs the
+  substitutions in the assumptions. The positions refer to the
+  assumptions in order from left to right.  For example, given in a
+  goal of the form @{text "P (a + b) \<Longrightarrow> P (c + d) \<Longrightarrow> \<dots>"}, position 1 of
+  commutativity of @{text "+"} is the subterm @{text "a + b"} and
+  position 2 is the subterm @{text "c + d"}.
+
+  \item @{method hypsubst} performs substitution using some
+  assumption; this only works for equations of the form @{text "x =
+  t"} where @{text x} is a free or bound variable.
+
+  \item @{method split}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} performs single-step case
+  splitting using the given rules.  Splitting is performed in the
+  conclusion or some assumption of the subgoal, depending of the
+  structure of the rule.
+  
+  Note that the @{method simp} method already involves repeated
+  application of split rules as declared in the current context, using
+  @{attribute split}, for example.
+
+  \end{description}
+*}
+
+
+subsection {* Further tactic emulations \label{sec:tactics} *}
+
+text {*
+  The following improper proof methods emulate traditional tactics.
+  These admit direct access to the goal state, which is normally
+  considered harmful!  In particular, this may involve both numbered
+  goal addressing (default 1), and dynamic instantiation within the
+  scope of some subgoal.
+
+  \begin{warn}
+    Dynamic instantiations refer to universally quantified parameters
+    of a subgoal (the dynamic context) rather than fixed variables and
+    term abbreviations of a (static) Isar context.
+  \end{warn}
+
+  Tactic emulation methods, unlike their ML counterparts, admit
+  simultaneous instantiation from both dynamic and static contexts.
+  If names occur in both contexts goal parameters hide locally fixed
+  variables.  Likewise, schematic variables refer to term
+  abbreviations, if present in the static context.  Otherwise the
+  schematic variable is interpreted as a schematic variable and left
+  to be solved by unification with certain parts of the subgoal.
+
+  Note that the tactic emulation proof methods in Isabelle/Isar are
+  consistently named @{text foo_tac}.  Note also that variable names
+  occurring on left hand sides of instantiations must be preceded by a
+  question mark if they coincide with a keyword or contain dots.  This
+  is consistent with the attribute @{attribute "where"} (see
+  \secref{sec:pure-meth-att}).
+
+  \begin{matharray}{rcl}
+    @{method_def rule_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def erule_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def drule_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def frule_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def cut_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def thin_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def subgoal_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def rename_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def rotate_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def tactic}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def raw_tactic}@{text "\<^sup>*"} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail "
+    (@@{method rule_tac} | @@{method erule_tac} | @@{method drule_tac} |
+      @@{method frule_tac} | @@{method cut_tac} | @@{method thin_tac}) @{syntax goal_spec}? \\
+    ( dynamic_insts @'in' @{syntax thmref} | @{syntax thmrefs} )
+    ;
+    @@{method subgoal_tac} @{syntax goal_spec}? (@{syntax prop} +)
+    ;
+    @@{method rename_tac} @{syntax goal_spec}? (@{syntax name} +)
+    ;
+    @@{method rotate_tac} @{syntax goal_spec}? @{syntax int}?
+    ;
+    (@@{method tactic} | @@{method raw_tactic}) @{syntax text}
+    ;
+
+    dynamic_insts: ((@{syntax name} '=' @{syntax term}) + @'and')
+  "}
+
+\begin{description}
+
+  \item @{method rule_tac} etc. do resolution of rules with explicit
+  instantiation.  This works the same way as the ML tactics @{ML
+  res_inst_tac} etc. (see \cite{isabelle-implementation})
+
+  Multiple rules may be only given if there is no instantiation; then
+  @{method rule_tac} is the same as @{ML resolve_tac} in ML (see
+  \cite{isabelle-implementation}).
+
+  \item @{method cut_tac} inserts facts into the proof state as
+  assumption of a subgoal; instantiations may be given as well.  Note
+  that the scope of schematic variables is spread over the main goal
+  statement and rule premises are turned into new subgoals.  This is
+  in contrast to the regular method @{method insert} which inserts
+  closed rule statements.
+
+  \item @{method thin_tac}~@{text \<phi>} deletes the specified premise
+  from a subgoal.  Note that @{text \<phi>} may contain schematic
+  variables, to abbreviate the intended proposition; the first
+  matching subgoal premise will be deleted.  Removing useless premises
+  from a subgoal increases its readability and can make search tactics
+  run faster.
+
+  \item @{method subgoal_tac}~@{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} adds the propositions
+  @{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} as local premises to a subgoal, and poses the same
+  as new subgoals (in the original context).
+
+  \item @{method rename_tac}~@{text "x\<^sub>1 \<dots> x\<^sub>n"} renames parameters of a
+  goal according to the list @{text "x\<^sub>1, \<dots>, x\<^sub>n"}, which refers to the
+  \emph{suffix} of variables.
+
+  \item @{method rotate_tac}~@{text n} rotates the premises of a
+  subgoal by @{text n} positions: from right to left if @{text n} is
+  positive, and from left to right if @{text n} is negative; the
+  default value is 1.
+
+  \item @{method tactic}~@{text "text"} produces a proof method from
+  any ML text of type @{ML_type tactic}.  Apart from the usual ML
+  environment and the current proof context, the ML code may refer to
+  the locally bound values @{ML_text facts}, which indicates any
+  current facts used for forward-chaining.
+
+  \item @{method raw_tactic} is similar to @{method tactic}, but
+  presents the goal state in its raw internal form, where simultaneous
+  subgoals appear as conjunction of the logical framework instead of
+  the usual split into several subgoals.  While feature this is useful
+  for debugging of complex method definitions, it should not never
+  appear in production theories.
+
+  \end{description}
+*}
+
+
+section {* The Simplifier \label{sec:simplifier} *}
+
+subsection {* Simplification methods *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def simp} & : & @{text method} \\
+    @{method_def simp_all} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail "
+    (@@{method simp} | @@{method simp_all}) opt? (@{syntax simpmod} * )
+    ;
+
+    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use' | 'asm_lr' ) ')'
+    ;
+    @{syntax_def simpmod}: ('add' | 'del' | 'only' | 'cong' (() | 'add' | 'del') |
+      'split' (() | 'add' | 'del')) ':' @{syntax thmrefs}
+  "}
+
+  \begin{description}
+
+  \item @{method simp} invokes the Simplifier, after declaring
+  additional rules according to the arguments given.  Note that the
+  @{text only} modifier first removes all other rewrite rules,
+  congruences, and looper tactics (including splits), and then behaves
+  like @{text add}.
+
+  \medskip The @{text cong} modifiers add or delete Simplifier
+  congruence rules (see also \secref{sec:simp-cong}), the default is
+  to add.
+
+  \medskip The @{text split} modifiers add or delete rules for the
+  Splitter (see also \cite{isabelle-ref}), the default is to add.
+  This works only if the Simplifier method has been properly setup to
+  include the Splitter (all major object logics such HOL, HOLCF, FOL,
+  ZF do this already).
+
+  \item @{method simp_all} is similar to @{method simp}, but acts on
+  all goals (backwards from the last to the first one).
+
+  \end{description}
+
+  By default the Simplifier methods take local assumptions fully into
+  account, using equational assumptions in the subsequent
+  normalization process, or simplifying assumptions themselves (cf.\
+  @{ML asm_full_simp_tac} in \cite{isabelle-ref}).  In structured
+  proofs this is usually quite well behaved in practice: just the
+  local premises of the actual goal are involved, additional facts may
+  be inserted via explicit forward-chaining (via @{command "then"},
+  @{command "from"}, @{command "using"} etc.).
+
+  Additional Simplifier options may be specified to tune the behavior
+  further (mostly for unstructured scripts with many accidental local
+  facts): ``@{text "(no_asm)"}'' means assumptions are ignored
+  completely (cf.\ @{ML simp_tac}), ``@{text "(no_asm_simp)"}'' means
+  assumptions are used in the simplification of the conclusion but are
+  not themselves simplified (cf.\ @{ML asm_simp_tac}), and ``@{text
+  "(no_asm_use)"}'' means assumptions are simplified but are not used
+  in the simplification of each other or the conclusion (cf.\ @{ML
+  full_simp_tac}).  For compatibility reasons, there is also an option
+  ``@{text "(asm_lr)"}'', which means that an assumption is only used
+  for simplifying assumptions which are to the right of it (cf.\ @{ML
+  asm_lr_simp_tac}).
+
+  The configuration option @{text "depth_limit"} limits the number of
+  recursive invocations of the simplifier during conditional
+  rewriting.
+
+  \medskip The Splitter package is usually configured to work as part
+  of the Simplifier.  The effect of repeatedly applying @{ML
+  split_tac} can be simulated by ``@{text "(simp only: split:
+  a\<^sub>1 \<dots> a\<^sub>n)"}''.  There is also a separate @{text split}
+  method available for single-step case splitting.
+*}
+
+
+subsection {* Declaring rules *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "print_simpset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{attribute_def simp} & : & @{text attribute} \\
+    @{attribute_def split} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail "
+    (@@{attribute simp} | @@{attribute split}) (() | 'add' | 'del')
+  "}
+
+  \begin{description}
+
+  \item @{command "print_simpset"} prints the collection of rules
+  declared to the Simplifier, which is also known as ``simpset''
+  internally \cite{isabelle-ref}.
+
+  \item @{attribute simp} declares simplification rules.
+
+  \item @{attribute split} declares case split rules.
+
+  \end{description}
+*}
+
+
+subsection {* Congruence rules\label{sec:simp-cong} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{attribute_def cong} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail "
+    @@{attribute cong} (() | 'add' | 'del')
+  "}
+
+  \begin{description}
+
+  \item @{attribute cong} declares congruence rules to the Simplifier
+  context.
+
+  \end{description}
+
+  Congruence rules are equalities of the form @{text [display]
+  "\<dots> \<Longrightarrow> f ?x\<^sub>1 \<dots> ?x\<^sub>n = f ?y\<^sub>1 \<dots> ?y\<^sub>n"}
+
+  This controls the simplification of the arguments of @{text f}.  For
+  example, some arguments can be simplified under additional
+  assumptions: @{text [display] "?P\<^sub>1 \<longleftrightarrow> ?Q\<^sub>1 \<Longrightarrow> (?Q\<^sub>1 \<Longrightarrow> ?P\<^sub>2 \<longleftrightarrow> ?Q\<^sub>2) \<Longrightarrow>
+  (?P\<^sub>1 \<longrightarrow> ?P\<^sub>2) \<longleftrightarrow> (?Q\<^sub>1 \<longrightarrow> ?Q\<^sub>2)"}
+
+  Given this rule, the simplifier assumes @{text "?Q\<^sub>1"} and extracts
+  rewrite rules from it when simplifying @{text "?P\<^sub>2"}.  Such local
+  assumptions are effective for rewriting formulae such as @{text "x =
+  0 \<longrightarrow> y + x = y"}.
+
+  %FIXME
+  %The local assumptions are also provided as theorems to the solver;
+  %see \secref{sec:simp-solver} below.
+
+  \medskip The following congruence rule for bounded quantifiers also
+  supplies contextual information --- about the bound variable:
+  @{text [display] "(?A = ?B) \<Longrightarrow> (\<And>x. x \<in> ?B \<Longrightarrow> ?P x \<longleftrightarrow> ?Q x) \<Longrightarrow>
+    (\<forall>x \<in> ?A. ?P x) \<longleftrightarrow> (\<forall>x \<in> ?B. ?Q x)"}
+
+  \medskip This congruence rule for conditional expressions can
+  supply contextual information for simplifying the arms:
+  @{text [display] "?p = ?q \<Longrightarrow> (?q \<Longrightarrow> ?a = ?c) \<Longrightarrow> (\<not> ?q \<Longrightarrow> ?b = ?d) \<Longrightarrow>
+    (if ?p then ?a else ?b) = (if ?q then ?c else ?d)"}
+
+  A congruence rule can also \emph{prevent} simplification of some
+  arguments.  Here is an alternative congruence rule for conditional
+  expressions that conforms to non-strict functional evaluation:
+  @{text [display] "?p = ?q \<Longrightarrow> (if ?p then ?a else ?b) = (if ?q then ?a else ?b)"}
+
+  Only the first argument is simplified; the others remain unchanged.
+  This can make simplification much faster, but may require an extra
+  case split over the condition @{text "?q"} to prove the goal.
+*}
+
+
+subsection {* Simplification procedures *}
+
+text {* Simplification procedures are ML functions that produce proven
+  rewrite rules on demand.  They are associated with higher-order
+  patterns that approximate the left-hand sides of equations.  The
+  Simplifier first matches the current redex against one of the LHS
+  patterns; if this succeeds, the corresponding ML function is
+  invoked, passing the Simplifier context and redex term.  Thus rules
+  may be specifically fashioned for particular situations, resulting
+  in a more powerful mechanism than term rewriting by a fixed set of
+  rules.
+
+  Any successful result needs to be a (possibly conditional) rewrite
+  rule @{text "t \<equiv> u"} that is applicable to the current redex.  The
+  rule will be applied just as any ordinary rewrite rule.  It is
+  expected to be already in \emph{internal form}, bypassing the
+  automatic preprocessing of object-level equivalences.
+
+  \begin{matharray}{rcl}
+    @{command_def "simproc_setup"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    simproc & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail "
+    @@{command simproc_setup} @{syntax name} '(' (@{syntax term} + '|') ')' '='
+      @{syntax text} \\ (@'identifier' (@{syntax nameref}+))?
+    ;
+
+    @@{attribute simproc} (('add' ':')? | 'del' ':') (@{syntax name}+)
+  "}
+
+  \begin{description}
+
+  \item @{command "simproc_setup"} defines a named simplification
+  procedure that is invoked by the Simplifier whenever any of the
+  given term patterns match the current redex.  The implementation,
+  which is provided as ML source text, needs to be of type @{ML_type
+  "morphism -> simpset -> cterm -> thm option"}, where the @{ML_type
+  cterm} represents the current redex @{text r} and the result is
+  supposed to be some proven rewrite rule @{text "r \<equiv> r'"} (or a
+  generalized version), or @{ML NONE} to indicate failure.  The
+  @{ML_type simpset} argument holds the full context of the current
+  Simplifier invocation, including the actual Isar proof context.  The
+  @{ML_type morphism} informs about the difference of the original
+  compilation context wrt.\ the one of the actual application later
+  on.  The optional @{keyword "identifier"} specifies theorems that
+  represent the logical content of the abstract theory of this
+  simproc.
+
+  Morphisms and identifiers are only relevant for simprocs that are
+  defined within a local target context, e.g.\ in a locale.
+
+  \item @{text "simproc add: name"} and @{text "simproc del: name"}
+  add or delete named simprocs to the current Simplifier context.  The
+  default is to add a simproc.  Note that @{command "simproc_setup"}
+  already adds the new simproc to the subsequent context.
+
+  \end{description}
+*}
+
+
+subsubsection {* Example *}
+
+text {* The following simplification procedure for @{thm
+  [source=false, show_types] unit_eq} in HOL performs fine-grained
+  control over rule application, beyond higher-order pattern matching.
+  Declaring @{thm unit_eq} as @{attribute simp} directly would make
+  the simplifier loop!  Note that a version of this simplification
+  procedure is already active in Isabelle/HOL.  *}
+
+simproc_setup unit ("x::unit") = {*
+  fn _ => fn _ => fn ct =>
+    if HOLogic.is_unit (term_of ct) then NONE
+    else SOME (mk_meta_eq @{thm unit_eq})
+*}
+
+text {* Since the Simplifier applies simplification procedures
+  frequently, it is important to make the failure check in ML
+  reasonably fast. *}
+
+
+subsection {* Forward simplification *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{attribute_def simplified} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail "
+    @@{attribute simplified} opt? @{syntax thmrefs}?
+    ;
+
+    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use') ')'
+  "}
+
+  \begin{description}
+  
+  \item @{attribute simplified}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} causes a theorem to
+  be simplified, either by exactly the specified rules @{text "a\<^sub>1, \<dots>,
+  a\<^sub>n"}, or the implicit Simplifier context if no arguments are given.
+  The result is fully simplified by default, including assumptions and
+  conclusion; the options @{text no_asm} etc.\ tune the Simplifier in
+  the same way as the for the @{text simp} method.
+
+  Note that forward simplification restricts the simplifier to its
+  most basic operation of term rewriting; solver and looper tactics
+  \cite{isabelle-ref} are \emph{not} involved here.  The @{text
+  simplified} attribute should be only rarely required under normal
+  circumstances.
+
+  \end{description}
+*}
+
+
+section {* The Classical Reasoner \label{sec:classical} *}
+
+subsection {* Basic concepts *}
+
+text {* Although Isabelle is generic, many users will be working in
+  some extension of classical first-order logic.  Isabelle/ZF is built
+  upon theory FOL, while Isabelle/HOL conceptually contains
+  first-order logic as a fragment.  Theorem-proving in predicate logic
+  is undecidable, but many automated strategies have been developed to
+  assist in this task.
+
+  Isabelle's classical reasoner is a generic package that accepts
+  certain information about a logic and delivers a suite of automatic
+  proof tools, based on rules that are classified and declared in the
+  context.  These proof procedures are slow and simplistic compared
+  with high-end automated theorem provers, but they can save
+  considerable time and effort in practice.  They can prove theorems
+  such as Pelletier's \cite{pelletier86} problems 40 and 41 in a few
+  milliseconds (including full proof reconstruction): *}
+
+lemma "(\<exists>y. \<forall>x. F x y \<longleftrightarrow> F x x) \<longrightarrow> \<not> (\<forall>x. \<exists>y. \<forall>z. F z y \<longleftrightarrow> \<not> F z x)"
+  by blast
+
+lemma "(\<forall>z. \<exists>y. \<forall>x. f x y \<longleftrightarrow> f x z \<and> \<not> f x x) \<longrightarrow> \<not> (\<exists>z. \<forall>x. f x z)"
+  by blast
+
+text {* The proof tools are generic.  They are not restricted to
+  first-order logic, and have been heavily used in the development of
+  the Isabelle/HOL library and applications.  The tactics can be
+  traced, and their components can be called directly; in this manner,
+  any proof can be viewed interactively.  *}
+
+
+subsubsection {* The sequent calculus *}
+
+text {* Isabelle supports natural deduction, which is easy to use for
+  interactive proof.  But natural deduction does not easily lend
+  itself to automation, and has a bias towards intuitionism.  For
+  certain proofs in classical logic, it can not be called natural.
+  The \emph{sequent calculus}, a generalization of natural deduction,
+  is easier to automate.
+
+  A \textbf{sequent} has the form @{text "\<Gamma> \<turnstile> \<Delta>"}, where @{text "\<Gamma>"}
+  and @{text "\<Delta>"} are sets of formulae.\footnote{For first-order
+  logic, sequents can equivalently be made from lists or multisets of
+  formulae.} The sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} is
+  \textbf{valid} if @{text "P\<^sub>1 \<and> \<dots> \<and> P\<^sub>m"} implies @{text "Q\<^sub>1 \<or> \<dots> \<or>
+  Q\<^sub>n"}.  Thus @{text "P\<^sub>1, \<dots>, P\<^sub>m"} represent assumptions, each of which
+  is true, while @{text "Q\<^sub>1, \<dots>, Q\<^sub>n"} represent alternative goals.  A
+  sequent is \textbf{basic} if its left and right sides have a common
+  formula, as in @{text "P, Q \<turnstile> Q, R"}; basic sequents are trivially
+  valid.
+
+  Sequent rules are classified as \textbf{right} or \textbf{left},
+  indicating which side of the @{text "\<turnstile>"} symbol they operate on.
+  Rules that operate on the right side are analogous to natural
+  deduction's introduction rules, and left rules are analogous to
+  elimination rules.  The sequent calculus analogue of @{text "(\<longrightarrow>I)"}
+  is the rule
+  \[
+  \infer[@{text "(\<longrightarrow>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<longrightarrow> Q"}}{@{text "P, \<Gamma> \<turnstile> \<Delta>, Q"}}
+  \]
+  Applying the rule backwards, this breaks down some implication on
+  the right side of a sequent; @{text "\<Gamma>"} and @{text "\<Delta>"} stand for
+  the sets of formulae that are unaffected by the inference.  The
+  analogue of the pair @{text "(\<or>I1)"} and @{text "(\<or>I2)"} is the
+  single rule
+  \[
+  \infer[@{text "(\<or>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<or> Q"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P, Q"}}
+  \]
+  This breaks down some disjunction on the right side, replacing it by
+  both disjuncts.  Thus, the sequent calculus is a kind of
+  multiple-conclusion logic.
+
+  To illustrate the use of multiple formulae on the right, let us
+  prove the classical theorem @{text "(P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}.  Working
+  backwards, we reduce this formula to a basic sequent:
+  \[
+  \infer[@{text "(\<or>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}}
+    {\infer[@{text "(\<longrightarrow>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q), (Q \<longrightarrow> P)"}}
+      {\infer[@{text "(\<longrightarrow>R)"}]{@{text "P \<turnstile> Q, (Q \<longrightarrow> P)"}}
+        {@{text "P, Q \<turnstile> Q, P"}}}}
+  \]
+
+  This example is typical of the sequent calculus: start with the
+  desired theorem and apply rules backwards in a fairly arbitrary
+  manner.  This yields a surprisingly effective proof procedure.
+  Quantifiers add only few complications, since Isabelle handles
+  parameters and schematic variables.  See \cite[Chapter
+  10]{paulson-ml2} for further discussion.  *}
+
+
+subsubsection {* Simulating sequents by natural deduction *}
+
+text {* Isabelle can represent sequents directly, as in the
+  object-logic LK.  But natural deduction is easier to work with, and
+  most object-logics employ it.  Fortunately, we can simulate the
+  sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} by the Isabelle formula
+  @{text "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>2 \<Longrightarrow> ... \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> Q\<^sub>1"} where the order of
+  the assumptions and the choice of @{text "Q\<^sub>1"} are arbitrary.
+  Elim-resolution plays a key role in simulating sequent proofs.
+
+  We can easily handle reasoning on the left.  Elim-resolution with
+  the rules @{text "(\<or>E)"}, @{text "(\<bottom>E)"} and @{text "(\<exists>E)"} achieves
+  a similar effect as the corresponding sequent rules.  For the other
+  connectives, we use sequent-style elimination rules instead of
+  destruction rules such as @{text "(\<and>E1, 2)"} and @{text "(\<forall>E)"}.
+  But note that the rule @{text "(\<not>L)"} has no effect under our
+  representation of sequents!
+  \[
+  \infer[@{text "(\<not>L)"}]{@{text "\<not> P, \<Gamma> \<turnstile> \<Delta>"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P"}}
+  \]
+
+  What about reasoning on the right?  Introduction rules can only
+  affect the formula in the conclusion, namely @{text "Q\<^sub>1"}.  The
+  other right-side formulae are represented as negated assumptions,
+  @{text "\<not> Q\<^sub>2, \<dots>, \<not> Q\<^sub>n"}.  In order to operate on one of these, it
+  must first be exchanged with @{text "Q\<^sub>1"}.  Elim-resolution with the
+  @{text swap} rule has this effect: @{text "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"}
+
+  To ensure that swaps occur only when necessary, each introduction
+  rule is converted into a swapped form: it is resolved with the
+  second premise of @{text "(swap)"}.  The swapped form of @{text
+  "(\<and>I)"}, which might be called @{text "(\<not>\<and>E)"}, is
+  @{text [display] "\<not> (P \<and> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> (\<not> R \<Longrightarrow> Q) \<Longrightarrow> R"}
+
+  Similarly, the swapped form of @{text "(\<longrightarrow>I)"} is
+  @{text [display] "\<not> (P \<longrightarrow> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P \<Longrightarrow> Q) \<Longrightarrow> R"}
+
+  Swapped introduction rules are applied using elim-resolution, which
+  deletes the negated formula.  Our representation of sequents also
+  requires the use of ordinary introduction rules.  If we had no
+  regard for readability of intermediate goal states, we could treat
+  the right side more uniformly by representing sequents as @{text
+  [display] "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> \<bottom>"}
+*}
+
+
+subsubsection {* Extra rules for the sequent calculus *}
+
+text {* As mentioned, destruction rules such as @{text "(\<and>E1, 2)"} and
+  @{text "(\<forall>E)"} must be replaced by sequent-style elimination rules.
+  In addition, we need rules to embody the classical equivalence
+  between @{text "P \<longrightarrow> Q"} and @{text "\<not> P \<or> Q"}.  The introduction
+  rules @{text "(\<or>I1, 2)"} are replaced by a rule that simulates
+  @{text "(\<or>R)"}: @{text [display] "(\<not> Q \<Longrightarrow> P) \<Longrightarrow> P \<or> Q"}
+
+  The destruction rule @{text "(\<longrightarrow>E)"} is replaced by @{text [display]
+  "(P \<longrightarrow> Q) \<Longrightarrow> (\<not> P \<Longrightarrow> R) \<Longrightarrow> (Q \<Longrightarrow> R) \<Longrightarrow> R"}
+
+  Quantifier replication also requires special rules.  In classical
+  logic, @{text "\<exists>x. P x"} is equivalent to @{text "\<not> (\<forall>x. \<not> P x)"};
+  the rules @{text "(\<exists>R)"} and @{text "(\<forall>L)"} are dual:
+  \[
+  \infer[@{text "(\<exists>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x"}}{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x, P t"}}
+  \qquad
+  \infer[@{text "(\<forall>L)"}]{@{text "\<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}{@{text "P t, \<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}
+  \]
+  Thus both kinds of quantifier may be replicated.  Theorems requiring
+  multiple uses of a universal formula are easy to invent; consider
+  @{text [display] "(\<forall>x. P x \<longrightarrow> P (f x)) \<and> P a \<longrightarrow> P (f\<^sup>n a)"} for any
+  @{text "n > 1"}.  Natural examples of the multiple use of an
+  existential formula are rare; a standard one is @{text "\<exists>x. \<forall>y. P x
+  \<longrightarrow> P y"}.
+
+  Forgoing quantifier replication loses completeness, but gains
+  decidability, since the search space becomes finite.  Many useful
+  theorems can be proved without replication, and the search generally
+  delivers its verdict in a reasonable time.  To adopt this approach,
+  represent the sequent rules @{text "(\<exists>R)"}, @{text "(\<exists>L)"} and
+  @{text "(\<forall>R)"} by @{text "(\<exists>I)"}, @{text "(\<exists>E)"} and @{text "(\<forall>I)"},
+  respectively, and put @{text "(\<forall>E)"} into elimination form: @{text
+  [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"}
+
+  Elim-resolution with this rule will delete the universal formula
+  after a single use.  To replicate universal quantifiers, replace the
+  rule by @{text [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"}
+
+  To replicate existential quantifiers, replace @{text "(\<exists>I)"} by
+  @{text [display] "(\<not> (\<exists>x. P x) \<Longrightarrow> P t) \<Longrightarrow> \<exists>x. P x"}
+
+  All introduction rules mentioned above are also useful in swapped
+  form.
+
+  Replication makes the search space infinite; we must apply the rules
+  with care.  The classical reasoner distinguishes between safe and
+  unsafe rules, applying the latter only when there is no alternative.
+  Depth-first search may well go down a blind alley; best-first search
+  is better behaved in an infinite search space.  However, quantifier
+  replication is too expensive to prove any but the simplest theorems.
+*}
+
+
+subsection {* Rule declarations *}
+
+text {* The proof tools of the Classical Reasoner depend on
+  collections of rules declared in the context, which are classified
+  as introduction, elimination or destruction and as \emph{safe} or
+  \emph{unsafe}.  In general, safe rules can be attempted blindly,
+  while unsafe rules must be used with care.  A safe rule must never
+  reduce a provable goal to an unprovable set of subgoals.
+
+  The rule @{text "P \<Longrightarrow> P \<or> Q"} is unsafe because it reduces @{text "P
+  \<or> Q"} to @{text "P"}, which might turn out as premature choice of an
+  unprovable subgoal.  Any rule is unsafe whose premises contain new
+  unknowns.  The elimination rule @{text "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"} is
+  unsafe, since it is applied via elim-resolution, which discards the
+  assumption @{text "\<forall>x. P x"} and replaces it by the weaker
+  assumption @{text "P t"}.  The rule @{text "P t \<Longrightarrow> \<exists>x. P x"} is
+  unsafe for similar reasons.  The quantifier duplication rule @{text
+  "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"} is unsafe in a different sense:
+  since it keeps the assumption @{text "\<forall>x. P x"}, it is prone to
+  looping.  In classical first-order logic, all rules are safe except
+  those mentioned above.
+
+  The safe~/ unsafe distinction is vague, and may be regarded merely
+  as a way of giving some rules priority over others.  One could argue
+  that @{text "(\<or>E)"} is unsafe, because repeated application of it
+  could generate exponentially many subgoals.  Induction rules are
+  unsafe because inductive proofs are difficult to set up
+  automatically.  Any inference is unsafe that instantiates an unknown
+  in the proof state --- thus matching must be used, rather than
+  unification.  Even proof by assumption is unsafe if it instantiates
+  unknowns shared with other subgoals.
+
+  \begin{matharray}{rcl}
+    @{command_def "print_claset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{attribute_def intro} & : & @{text attribute} \\
+    @{attribute_def elim} & : & @{text attribute} \\
+    @{attribute_def dest} & : & @{text attribute} \\
+    @{attribute_def rule} & : & @{text attribute} \\
+    @{attribute_def iff} & : & @{text attribute} \\
+    @{attribute_def swapped} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail "
+    (@@{attribute intro} | @@{attribute elim} | @@{attribute dest}) ('!' | () | '?') @{syntax nat}?
+    ;
+    @@{attribute rule} 'del'
+    ;
+    @@{attribute iff} (((() | 'add') '?'?) | 'del')
+  "}
+
+  \begin{description}
+
+  \item @{command "print_claset"} prints the collection of rules
+  declared to the Classical Reasoner, i.e.\ the @{ML_type claset}
+  within the context.
+
+  \item @{attribute intro}, @{attribute elim}, and @{attribute dest}
+  declare introduction, elimination, and destruction rules,
+  respectively.  By default, rules are considered as \emph{unsafe}
+  (i.e.\ not applied blindly without backtracking), while ``@{text
+  "!"}'' classifies as \emph{safe}.  Rule declarations marked by
+  ``@{text "?"}'' coincide with those of Isabelle/Pure, cf.\
+  \secref{sec:pure-meth-att} (i.e.\ are only applied in single steps
+  of the @{method rule} method).  The optional natural number
+  specifies an explicit weight argument, which is ignored by the
+  automated reasoning tools, but determines the search order of single
+  rule steps.
+
+  Introduction rules are those that can be applied using ordinary
+  resolution.  Their swapped forms are generated internally, which
+  will be applied using elim-resolution.  Elimination rules are
+  applied using elim-resolution.  Rules are sorted by the number of
+  new subgoals they will yield; rules that generate the fewest
+  subgoals will be tried first.  Otherwise, later declarations take
+  precedence over earlier ones.
+
+  Rules already present in the context with the same classification
+  are ignored.  A warning is printed if the rule has already been
+  added with some other classification, but the rule is added anyway
+  as requested.
+
+  \item @{attribute rule}~@{text del} deletes all occurrences of a
+  rule from the classical context, regardless of its classification as
+  introduction~/ elimination~/ destruction and safe~/ unsafe.
+
+  \item @{attribute iff} declares logical equivalences to the
+  Simplifier and the Classical reasoner at the same time.
+  Non-conditional rules result in a safe introduction and elimination
+  pair; conditional ones are considered unsafe.  Rules with negative
+  conclusion are automatically inverted (using @{text "\<not>"}-elimination
+  internally).
+
+  The ``@{text "?"}'' version of @{attribute iff} declares rules to
+  the Isabelle/Pure context only, and omits the Simplifier
+  declaration.
+
+  \item @{attribute swapped} turns an introduction rule into an
+  elimination, by resolving with the classical swap principle @{text
+  "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"} in the second position.  This is mainly for
+  illustrative purposes: the Classical Reasoner already swaps rules
+  internally as explained above.
+
+  \end{description}
+*}
+
+
+subsection {* Structured methods *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def rule} & : & @{text method} \\
+    @{method_def contradiction} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail "
+    @@{method rule} @{syntax thmrefs}?
+  "}
+
+  \begin{description}
+
+  \item @{method rule} as offered by the Classical Reasoner is a
+  refinement over the Pure one (see \secref{sec:pure-meth-att}).  Both
+  versions work the same, but the classical version observes the
+  classical rule context in addition to that of Isabelle/Pure.
+
+  Common object logics (HOL, ZF, etc.) declare a rich collection of
+  classical rules (even if these would qualify as intuitionistic
+  ones), but only few declarations to the rule context of
+  Isabelle/Pure (\secref{sec:pure-meth-att}).
+
+  \item @{method contradiction} solves some goal by contradiction,
+  deriving any result from both @{text "\<not> A"} and @{text A}.  Chained
+  facts, which are guaranteed to participate, may appear in either
+  order.
+
+  \end{description}
+*}
+
+
+subsection {* Automated methods *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def blast} & : & @{text method} \\
+    @{method_def auto} & : & @{text method} \\
+    @{method_def force} & : & @{text method} \\
+    @{method_def fast} & : & @{text method} \\
+    @{method_def slow} & : & @{text method} \\
+    @{method_def best} & : & @{text method} \\
+    @{method_def fastforce} & : & @{text method} \\
+    @{method_def slowsimp} & : & @{text method} \\
+    @{method_def bestsimp} & : & @{text method} \\
+    @{method_def deepen} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail "
+    @@{method blast} @{syntax nat}? (@{syntax clamod} * )
+    ;
+    @@{method auto} (@{syntax nat} @{syntax nat})? (@{syntax clasimpmod} * )
+    ;
+    @@{method force} (@{syntax clasimpmod} * )
+    ;
+    (@@{method fast} | @@{method slow} | @@{method best}) (@{syntax clamod} * )
+    ;
+    (@@{method fastforce} | @@{method slowsimp} | @@{method bestsimp})
+      (@{syntax clasimpmod} * )
+    ;
+    @@{method deepen} (@{syntax nat} ?) (@{syntax clamod} * )
+    ;
+    @{syntax_def clamod}:
+      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del') ':' @{syntax thmrefs}
+    ;
+    @{syntax_def clasimpmod}: ('simp' (() | 'add' | 'del' | 'only') |
+      ('cong' | 'split') (() | 'add' | 'del') |
+      'iff' (((() | 'add') '?'?) | 'del') |
+      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del')) ':' @{syntax thmrefs}
+  "}
+
+  \begin{description}
+
+  \item @{method blast} is a separate classical tableau prover that
+  uses the same classical rule declarations as explained before.
+
+  Proof search is coded directly in ML using special data structures.
+  A successful proof is then reconstructed using regular Isabelle
+  inferences.  It is faster and more powerful than the other classical
+  reasoning tools, but has major limitations too.
+
+  \begin{itemize}
+
+  \item It does not use the classical wrapper tacticals, such as the
+  integration with the Simplifier of @{method fastforce}.
+
+  \item It does not perform higher-order unification, as needed by the
+  rule @{thm [source=false] rangeI} in HOL.  There are often
+  alternatives to such rules, for example @{thm [source=false]
+  range_eqI}.
+
+  \item Function variables may only be applied to parameters of the
+  subgoal.  (This restriction arises because the prover does not use
+  higher-order unification.)  If other function variables are present
+  then the prover will fail with the message \texttt{Function Var's
+  argument not a bound variable}.
+
+  \item Its proof strategy is more general than @{method fast} but can
+  be slower.  If @{method blast} fails or seems to be running forever,
+  try @{method fast} and the other proof tools described below.
+
+  \end{itemize}
+
+  The optional integer argument specifies a bound for the number of
+  unsafe steps used in a proof.  By default, @{method blast} starts
+  with a bound of 0 and increases it successively to 20.  In contrast,
+  @{text "(blast lim)"} tries to prove the goal using a search bound
+  of @{text "lim"}.  Sometimes a slow proof using @{method blast} can
+  be made much faster by supplying the successful search bound to this
+  proof method instead.
+
+  \item @{method auto} combines classical reasoning with
+  simplification.  It is intended for situations where there are a lot
+  of mostly trivial subgoals; it proves all the easy ones, leaving the
+  ones it cannot prove.  Occasionally, attempting to prove the hard
+  ones may take a long time.
+
+  The optional depth arguments in @{text "(auto m n)"} refer to its
+  builtin classical reasoning procedures: @{text m} (default 4) is for
+  @{method blast}, which is tried first, and @{text n} (default 2) is
+  for a slower but more general alternative that also takes wrappers
+  into account.
+
+  \item @{method force} is intended to prove the first subgoal
+  completely, using many fancy proof tools and performing a rather
+  exhaustive search.  As a result, proof attempts may take rather long
+  or diverge easily.
+
+  \item @{method fast}, @{method best}, @{method slow} attempt to
+  prove the first subgoal using sequent-style reasoning as explained
+  before.  Unlike @{method blast}, they construct proofs directly in
+  Isabelle.
+
+  There is a difference in search strategy and back-tracking: @{method
+  fast} uses depth-first search and @{method best} uses best-first
+  search (guided by a heuristic function: normally the total size of
+  the proof state).
+
+  Method @{method slow} is like @{method fast}, but conducts a broader
+  search: it may, when backtracking from a failed proof attempt, undo
+  even the step of proving a subgoal by assumption.
+
+  \item @{method fastforce}, @{method slowsimp}, @{method bestsimp}
+  are like @{method fast}, @{method slow}, @{method best},
+  respectively, but use the Simplifier as additional wrapper. The name
+  @{method fastforce}, reflects the behaviour of this popular method
+  better without requiring an understanding of its implementation.
+
+  \item @{method deepen} works by exhaustive search up to a certain
+  depth.  The start depth is 4 (unless specified explicitly), and the
+  depth is increased iteratively up to 10.  Unsafe rules are modified
+  to preserve the formula they act on, so that it be used repeatedly.
+  This method can prove more goals than @{method fast}, but is much
+  slower, for example if the assumptions have many universal
+  quantifiers.
+
+  \end{description}
+
+  Any of the above methods support additional modifiers of the context
+  of classical (and simplifier) rules, but the ones related to the
+  Simplifier are explicitly prefixed by @{text simp} here.  The
+  semantics of these ad-hoc rule declarations is analogous to the
+  attributes given before.  Facts provided by forward chaining are
+  inserted into the goal before commencing proof search.
+*}
+
+
+subsection {* Semi-automated methods *}
+
+text {* These proof methods may help in situations when the
+  fully-automated tools fail.  The result is a simpler subgoal that
+  can be tackled by other means, such as by manual instantiation of
+  quantifiers.
+
+  \begin{matharray}{rcl}
+    @{method_def safe} & : & @{text method} \\
+    @{method_def clarify} & : & @{text method} \\
+    @{method_def clarsimp} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail "
+    (@@{method safe} | @@{method clarify}) (@{syntax clamod} * )
+    ;
+    @@{method clarsimp} (@{syntax clasimpmod} * )
+  "}
+
+  \begin{description}
+
+  \item @{method safe} repeatedly performs safe steps on all subgoals.
+  It is deterministic, with at most one outcome.
+
+  \item @{method clarify} performs a series of safe steps without
+  splitting subgoals; see also @{ML clarify_step_tac}.
+
+  \item @{method clarsimp} acts like @{method clarify}, but also does
+  simplification.  Note that if the Simplifier context includes a
+  splitter for the premises, the subgoal may still be split.
+
+  \end{description}
+*}
+
+
+subsection {* Single-step tactics *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{index_ML safe_step_tac: "Proof.context -> int -> tactic"} \\
+    @{index_ML inst_step_tac: "Proof.context -> int -> tactic"} \\
+    @{index_ML step_tac: "Proof.context -> int -> tactic"} \\
+    @{index_ML slow_step_tac: "Proof.context -> int -> tactic"} \\
+    @{index_ML clarify_step_tac: "Proof.context -> int -> tactic"} \\
+  \end{matharray}
+
+  These are the primitive tactics behind the (semi)automated proof
+  methods of the Classical Reasoner.  By calling them yourself, you
+  can execute these procedures one step at a time.
+
+  \begin{description}
+
+  \item @{ML safe_step_tac}~@{text "ctxt i"} performs a safe step on
+  subgoal @{text i}.  The safe wrapper tacticals are applied to a
+  tactic that may include proof by assumption or Modus Ponens (taking
+  care not to instantiate unknowns), or substitution.
+
+  \item @{ML inst_step_tac} is like @{ML safe_step_tac}, but allows
+  unknowns to be instantiated.
+
+  \item @{ML step_tac}~@{text "ctxt i"} is the basic step of the proof
+  procedure.  The unsafe wrapper tacticals are applied to a tactic
+  that tries @{ML safe_tac}, @{ML inst_step_tac}, or applies an unsafe
+  rule from the context.
+
+  \item @{ML slow_step_tac} resembles @{ML step_tac}, but allows
+  backtracking between using safe rules with instantiation (@{ML
+  inst_step_tac}) and using unsafe rules.  The resulting search space
+  is larger.
+
+  \item @{ML clarify_step_tac}~@{text "ctxt i"} performs a safe step
+  on subgoal @{text i}.  No splitting step is applied; for example,
+  the subgoal @{text "A \<and> B"} is left as a conjunction.  Proof by
+  assumption, Modus Ponens, etc., may be performed provided they do
+  not instantiate unknowns.  Assumptions of the form @{text "x = t"}
+  may be eliminated.  The safe wrapper tactical is applied.
+
+  \end{description}
+*}
+
+
+section {* Object-logic setup \label{sec:object-logic} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "judgment"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{method_def atomize} & : & @{text method} \\
+    @{attribute_def atomize} & : & @{text attribute} \\
+    @{attribute_def rule_format} & : & @{text attribute} \\
+    @{attribute_def rulify} & : & @{text attribute} \\
+  \end{matharray}
+
+  The very starting point for any Isabelle object-logic is a ``truth
+  judgment'' that links object-level statements to the meta-logic
+  (with its minimal language of @{text prop} that covers universal
+  quantification @{text "\<And>"} and implication @{text "\<Longrightarrow>"}).
+
+  Common object-logics are sufficiently expressive to internalize rule
+  statements over @{text "\<And>"} and @{text "\<Longrightarrow>"} within their own
+  language.  This is useful in certain situations where a rule needs
+  to be viewed as an atomic statement from the meta-level perspective,
+  e.g.\ @{text "\<And>x. x \<in> A \<Longrightarrow> P x"} versus @{text "\<forall>x \<in> A. P x"}.
+
+  From the following language elements, only the @{method atomize}
+  method and @{attribute rule_format} attribute are occasionally
+  required by end-users, the rest is for those who need to setup their
+  own object-logic.  In the latter case existing formulations of
+  Isabelle/FOL or Isabelle/HOL may be taken as realistic examples.
+
+  Generic tools may refer to the information provided by object-logic
+  declarations internally.
+
+  @{rail "
+    @@{command judgment} @{syntax name} '::' @{syntax type} @{syntax mixfix}?
+    ;
+    @@{attribute atomize} ('(' 'full' ')')?
+    ;
+    @@{attribute rule_format} ('(' 'noasm' ')')?
+  "}
+
+  \begin{description}
+  
+  \item @{command "judgment"}~@{text "c :: \<sigma> (mx)"} declares constant
+  @{text c} as the truth judgment of the current object-logic.  Its
+  type @{text \<sigma>} should specify a coercion of the category of
+  object-level propositions to @{text prop} of the Pure meta-logic;
+  the mixfix annotation @{text "(mx)"} would typically just link the
+  object language (internally of syntactic category @{text logic})
+  with that of @{text prop}.  Only one @{command "judgment"}
+  declaration may be given in any theory development.
+  
+  \item @{method atomize} (as a method) rewrites any non-atomic
+  premises of a sub-goal, using the meta-level equations declared via
+  @{attribute atomize} (as an attribute) beforehand.  As a result,
+  heavily nested goals become amenable to fundamental operations such
+  as resolution (cf.\ the @{method (Pure) rule} method).  Giving the ``@{text
+  "(full)"}'' option here means to turn the whole subgoal into an
+  object-statement (if possible), including the outermost parameters
+  and assumptions as well.
+
+  A typical collection of @{attribute atomize} rules for a particular
+  object-logic would provide an internalization for each of the
+  connectives of @{text "\<And>"}, @{text "\<Longrightarrow>"}, and @{text "\<equiv>"}.
+  Meta-level conjunction should be covered as well (this is
+  particularly important for locales, see \secref{sec:locale}).
+
+  \item @{attribute rule_format} rewrites a theorem by the equalities
+  declared as @{attribute rulify} rules in the current object-logic.
+  By default, the result is fully normalized, including assumptions
+  and conclusions at any depth.  The @{text "(no_asm)"} option
+  restricts the transformation to the conclusion of a rule.
+
+  In common object-logics (HOL, FOL, ZF), the effect of @{attribute
+  rule_format} is to replace (bounded) universal quantification
+  (@{text "\<forall>"}) and implication (@{text "\<longrightarrow>"}) by the corresponding
+  rule statements over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
+
+  \end{description}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/HOL_Specific.thy	Tue Aug 28 12:45:49 2012 +0200
@@ -0,0 +1,2366 @@
+theory HOL_Specific
+imports Base Main "~~/src/HOL/Library/Old_Recdef"
+begin
+
+chapter {* Isabelle/HOL \label{ch:hol} *}
+
+section {* Higher-Order Logic *}
+
+text {* Isabelle/HOL is based on Higher-Order Logic, a polymorphic
+  version of Church's Simple Theory of Types.  HOL can be best
+  understood as a simply-typed version of classical set theory.  The
+  logic was first implemented in Gordon's HOL system
+  \cite{mgordon-hol}.  It extends Church's original logic
+  \cite{church40} by explicit type variables (naive polymorphism) and
+  a sound axiomatization scheme for new types based on subsets of
+  existing types.
+
+  Andrews's book \cite{andrews86} is a full description of the
+  original Church-style higher-order logic, with proofs of correctness
+  and completeness wrt.\ certain set-theoretic interpretations.  The
+  particular extensions of Gordon-style HOL are explained semantically
+  in two chapters of the 1993 HOL book \cite{pitts93}.
+
+  Experience with HOL over decades has demonstrated that higher-order
+  logic is widely applicable in many areas of mathematics and computer
+  science.  In a sense, Higher-Order Logic is simpler than First-Order
+  Logic, because there are fewer restrictions and special cases.  Note
+  that HOL is \emph{weaker} than FOL with axioms for ZF set theory,
+  which is traditionally considered the standard foundation of regular
+  mathematics, but for most applications this does not matter.  If you
+  prefer ML to Lisp, you will probably prefer HOL to ZF.
+
+  \medskip The syntax of HOL follows @{text "\<lambda>"}-calculus and
+  functional programming.  Function application is curried.  To apply
+  the function @{text f} of type @{text "\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> \<tau>\<^sub>3"} to the
+  arguments @{text a} and @{text b} in HOL, you simply write @{text "f
+  a b"} (as in ML or Haskell).  There is no ``apply'' operator; the
+  existing application of the Pure @{text "\<lambda>"}-calculus is re-used.
+  Note that in HOL @{text "f (a, b)"} means ``@{text "f"} applied to
+  the pair @{text "(a, b)"} (which is notation for @{text "Pair a
+  b"}).  The latter typically introduces extra formal efforts that can
+  be avoided by currying functions by default.  Explicit tuples are as
+  infrequent in HOL formalizations as in good ML or Haskell programs.
+
+  \medskip Isabelle/HOL has a distinct feel, compared to other
+  object-logics like Isabelle/ZF.  It identifies object-level types
+  with meta-level types, taking advantage of the default
+  type-inference mechanism of Isabelle/Pure.  HOL fully identifies
+  object-level functions with meta-level functions, with native
+  abstraction and application.
+
+  These identifications allow Isabelle to support HOL particularly
+  nicely, but they also mean that HOL requires some sophistication
+  from the user.  In particular, an understanding of Hindley-Milner
+  type-inference with type-classes, which are both used extensively in
+  the standard libraries and applications.  Beginners can set
+  @{attribute show_types} or even @{attribute show_sorts} to get more
+  explicit information about the result of type-inference.  *}
+
+
+section {* Inductive and coinductive definitions \label{sec:hol-inductive} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "inductive"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def (HOL) "inductive_set"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def (HOL) "coinductive"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def (HOL) "coinductive_set"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{attribute_def (HOL) mono} & : & @{text attribute} \\
+  \end{matharray}
+
+  An \emph{inductive definition} specifies the least predicate or set
+  @{text R} closed under given rules: applying a rule to elements of
+  @{text R} yields a result within @{text R}.  For example, a
+  structural operational semantics is an inductive definition of an
+  evaluation relation.
+
+  Dually, a \emph{coinductive definition} specifies the greatest
+  predicate or set @{text R} that is consistent with given rules:
+  every element of @{text R} can be seen as arising by applying a rule
+  to elements of @{text R}.  An important example is using
+  bisimulation relations to formalise equivalence of processes and
+  infinite data structures.
+
+  Both inductive and coinductive definitions are based on the
+  Knaster-Tarski fixed-point theorem for complete lattices.  The
+  collection of introduction rules given by the user determines a
+  functor on subsets of set-theoretic relations.  The required
+  monotonicity of the recursion scheme is proven as a prerequisite to
+  the fixed-point definition and the resulting consequences.  This
+  works by pushing inclusion through logical connectives and any other
+  operator that might be wrapped around recursive occurrences of the
+  defined relation: there must be a monotonicity theorem of the form
+  @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for each premise @{text "\<M> R t"} in an
+  introduction rule.  The default rule declarations of Isabelle/HOL
+  already take care of most common situations.
+
+  @{rail "
+    (@@{command (HOL) inductive} | @@{command (HOL) inductive_set} |
+      @@{command (HOL) coinductive} | @@{command (HOL) coinductive_set})
+    @{syntax target}? \\
+    @{syntax \"fixes\"} (@'for' @{syntax \"fixes\"})? (@'where' clauses)? \\
+    (@'monos' @{syntax thmrefs})?
+    ;
+    clauses: (@{syntax thmdecl}? @{syntax prop} + '|')
+    ;
+    @@{attribute (HOL) mono} (() | 'add' | 'del')
+  "}
+
+  \begin{description}
+
+  \item @{command (HOL) "inductive"} and @{command (HOL)
+  "coinductive"} define (co)inductive predicates from the introduction
+  rules.
+
+  The propositions given as @{text "clauses"} in the @{keyword
+  "where"} part are either rules of the usual @{text "\<And>/\<Longrightarrow>"} format
+  (with arbitrary nesting), or equalities using @{text "\<equiv>"}.  The
+  latter specifies extra-logical abbreviations in the sense of
+  @{command_ref abbreviation}.  Introducing abstract syntax
+  simultaneously with the actual introduction rules is occasionally
+  useful for complex specifications.
+
+  The optional @{keyword "for"} part contains a list of parameters of
+  the (co)inductive predicates that remain fixed throughout the
+  definition, in contrast to arguments of the relation that may vary
+  in each occurrence within the given @{text "clauses"}.
+
+  The optional @{keyword "monos"} declaration contains additional
+  \emph{monotonicity theorems}, which are required for each operator
+  applied to a recursive set in the introduction rules.
+
+  \item @{command (HOL) "inductive_set"} and @{command (HOL)
+  "coinductive_set"} are wrappers for to the previous commands for
+  native HOL predicates.  This allows to define (co)inductive sets,
+  where multiple arguments are simulated via tuples.
+
+  \item @{attribute (HOL) mono} declares monotonicity rules in the
+  context.  These rule are involved in the automated monotonicity
+  proof of the above inductive and coinductive definitions.
+
+  \end{description}
+*}
+
+
+subsection {* Derived rules *}
+
+text {* A (co)inductive definition of @{text R} provides the following
+  main theorems:
+
+  \begin{description}
+
+  \item @{text R.intros} is the list of introduction rules as proven
+  theorems, for the recursive predicates (or sets).  The rules are
+  also available individually, using the names given them in the
+  theory file;
+
+  \item @{text R.cases} is the case analysis (or elimination) rule;
+
+  \item @{text R.induct} or @{text R.coinduct} is the (co)induction
+  rule;
+
+  \item @{text R.simps} is the equation unrolling the fixpoint of the
+  predicate one step.
+
+  \end{description}
+
+  When several predicates @{text "R\<^sub>1, \<dots>, R\<^sub>n"} are
+  defined simultaneously, the list of introduction rules is called
+  @{text "R\<^sub>1_\<dots>_R\<^sub>n.intros"}, the case analysis rules are
+  called @{text "R\<^sub>1.cases, \<dots>, R\<^sub>n.cases"}, and the list
+  of mutual induction rules is called @{text
+  "R\<^sub>1_\<dots>_R\<^sub>n.inducts"}.
+*}
+
+
+subsection {* Monotonicity theorems *}
+
+text {* The context maintains a default set of theorems that are used
+  in monotonicity proofs.  New rules can be declared via the
+  @{attribute (HOL) mono} attribute.  See the main Isabelle/HOL
+  sources for some examples.  The general format of such monotonicity
+  theorems is as follows:
+
+  \begin{itemize}
+
+  \item Theorems of the form @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for proving
+  monotonicity of inductive definitions whose introduction rules have
+  premises involving terms such as @{text "\<M> R t"}.
+
+  \item Monotonicity theorems for logical operators, which are of the
+  general form @{text "(\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> (\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> \<longrightarrow> \<dots>"}.  For example, in
+  the case of the operator @{text "\<or>"}, the corresponding theorem is
+  \[
+  \infer{@{text "P\<^sub>1 \<or> P\<^sub>2 \<longrightarrow> Q\<^sub>1 \<or> Q\<^sub>2"}}{@{text "P\<^sub>1 \<longrightarrow> Q\<^sub>1"} & @{text "P\<^sub>2 \<longrightarrow> Q\<^sub>2"}}
+  \]
+
+  \item De Morgan style equations for reasoning about the ``polarity''
+  of expressions, e.g.
+  \[
+  @{prop "\<not> \<not> P \<longleftrightarrow> P"} \qquad\qquad
+  @{prop "\<not> (P \<and> Q) \<longleftrightarrow> \<not> P \<or> \<not> Q"}
+  \]
+
+  \item Equations for reducing complex operators to more primitive
+  ones whose monotonicity can easily be proved, e.g.
+  \[
+  @{prop "(P \<longrightarrow> Q) \<longleftrightarrow> \<not> P \<or> Q"} \qquad\qquad
+  @{prop "Ball A P \<equiv> \<forall>x. x \<in> A \<longrightarrow> P x"}
+  \]
+
+  \end{itemize}
+*}
+
+subsubsection {* Examples *}
+
+text {* The finite powerset operator can be defined inductively like this: *}
+
+inductive_set Fin :: "'a set \<Rightarrow> 'a set set" for A :: "'a set"
+where
+  empty: "{} \<in> Fin A"
+| insert: "a \<in> A \<Longrightarrow> B \<in> Fin A \<Longrightarrow> insert a B \<in> Fin A"
+
+text {* The accessible part of a relation is defined as follows: *}
+
+inductive acc :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> bool"
+  for r :: "'a \<Rightarrow> 'a \<Rightarrow> bool"  (infix "\<prec>" 50)
+where acc: "(\<And>y. y \<prec> x \<Longrightarrow> acc r y) \<Longrightarrow> acc r x"
+
+text {* Common logical connectives can be easily characterized as
+non-recursive inductive definitions with parameters, but without
+arguments. *}
+
+inductive AND for A B :: bool
+where "A \<Longrightarrow> B \<Longrightarrow> AND A B"
+
+inductive OR for A B :: bool
+where "A \<Longrightarrow> OR A B"
+  | "B \<Longrightarrow> OR A B"
+
+inductive EXISTS for B :: "'a \<Rightarrow> bool"
+where "B a \<Longrightarrow> EXISTS B"
+
+text {* Here the @{text "cases"} or @{text "induct"} rules produced by
+  the @{command inductive} package coincide with the expected
+  elimination rules for Natural Deduction.  Already in the original
+  article by Gerhard Gentzen \cite{Gentzen:1935} there is a hint that
+  each connective can be characterized by its introductions, and the
+  elimination can be constructed systematically. *}
+
+
+section {* Recursive functions \label{sec:recursion} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "primrec"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def (HOL) "fun"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def (HOL) "function"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
+    @{command_def (HOL) "termination"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
+  \end{matharray}
+
+  @{rail "
+    @@{command (HOL) primrec} @{syntax target}? @{syntax \"fixes\"} @'where' equations
+    ;
+    (@@{command (HOL) fun} | @@{command (HOL) function}) @{syntax target}? functionopts?
+      @{syntax \"fixes\"} \\ @'where' equations
+    ;
+
+    equations: (@{syntax thmdecl}? @{syntax prop} + '|')
+    ;
+    functionopts: '(' (('sequential' | 'domintros') + ',') ')'
+    ;
+    @@{command (HOL) termination} @{syntax term}?
+  "}
+
+  \begin{description}
+
+  \item @{command (HOL) "primrec"} defines primitive recursive
+  functions over datatypes (see also @{command_ref (HOL) datatype} and
+  @{command_ref (HOL) rep_datatype}).  The given @{text equations}
+  specify reduction rules that are produced by instantiating the
+  generic combinator for primitive recursion that is available for
+  each datatype.
+
+  Each equation needs to be of the form:
+
+  @{text [display] "f x\<^sub>1 \<dots> x\<^sub>m (C y\<^sub>1 \<dots> y\<^sub>k) z\<^sub>1 \<dots> z\<^sub>n = rhs"}
+
+  such that @{text C} is a datatype constructor, @{text rhs} contains
+  only the free variables on the left-hand side (or from the context),
+  and all recursive occurrences of @{text "f"} in @{text "rhs"} are of
+  the form @{text "f \<dots> y\<^sub>i \<dots>"} for some @{text i}.  At most one
+  reduction rule for each constructor can be given.  The order does
+  not matter.  For missing constructors, the function is defined to
+  return a default value, but this equation is made difficult to
+  access for users.
+
+  The reduction rules are declared as @{attribute simp} by default,
+  which enables standard proof methods like @{method simp} and
+  @{method auto} to normalize expressions of @{text "f"} applied to
+  datatype constructions, by simulating symbolic computation via
+  rewriting.
+
+  \item @{command (HOL) "function"} defines functions by general
+  wellfounded recursion. A detailed description with examples can be
+  found in \cite{isabelle-function}. The function is specified by a
+  set of (possibly conditional) recursive equations with arbitrary
+  pattern matching. The command generates proof obligations for the
+  completeness and the compatibility of patterns.
+
+  The defined function is considered partial, and the resulting
+  simplification rules (named @{text "f.psimps"}) and induction rule
+  (named @{text "f.pinduct"}) are guarded by a generated domain
+  predicate @{text "f_dom"}. The @{command (HOL) "termination"}
+  command can then be used to establish that the function is total.
+
+  \item @{command (HOL) "fun"} is a shorthand notation for ``@{command
+  (HOL) "function"}~@{text "(sequential)"}, followed by automated
+  proof attempts regarding pattern matching and termination.  See
+  \cite{isabelle-function} for further details.
+
+  \item @{command (HOL) "termination"}~@{text f} commences a
+  termination proof for the previously defined function @{text f}.  If
+  this is omitted, the command refers to the most recent function
+  definition.  After the proof is closed, the recursive equations and
+  the induction principle is established.
+
+  \end{description}
+
+  Recursive definitions introduced by the @{command (HOL) "function"}
+  command accommodate reasoning by induction (cf.\ @{method induct}):
+  rule @{text "f.induct"} refers to a specific induction rule, with
+  parameters named according to the user-specified equations. Cases
+  are numbered starting from 1.  For @{command (HOL) "primrec"}, the
+  induction principle coincides with structural recursion on the
+  datatype where the recursion is carried out.
+
+  The equations provided by these packages may be referred later as
+  theorem list @{text "f.simps"}, where @{text f} is the (collective)
+  name of the functions defined.  Individual equations may be named
+  explicitly as well.
+
+  The @{command (HOL) "function"} command accepts the following
+  options.
+
+  \begin{description}
+
+  \item @{text sequential} enables a preprocessor which disambiguates
+  overlapping patterns by making them mutually disjoint.  Earlier
+  equations take precedence over later ones.  This allows to give the
+  specification in a format very similar to functional programming.
+  Note that the resulting simplification and induction rules
+  correspond to the transformed specification, not the one given
+  originally. This usually means that each equation given by the user
+  may result in several theorems.  Also note that this automatic
+  transformation only works for ML-style datatype patterns.
+
+  \item @{text domintros} enables the automated generation of
+  introduction rules for the domain predicate. While mostly not
+  needed, they can be helpful in some proofs about partial functions.
+
+  \end{description}
+*}
+
+subsubsection {* Example: evaluation of expressions *}
+
+text {* Subsequently, we define mutual datatypes for arithmetic and
+  boolean expressions, and use @{command primrec} for evaluation
+  functions that follow the same recursive structure. *}
+
+datatype 'a aexp =
+    IF "'a bexp"  "'a aexp"  "'a aexp"
+  | Sum "'a aexp"  "'a aexp"
+  | Diff "'a aexp"  "'a aexp"
+  | Var 'a
+  | Num nat
+and 'a bexp =
+    Less "'a aexp"  "'a aexp"
+  | And "'a bexp"  "'a bexp"
+  | Neg "'a bexp"
+
+
+text {* \medskip Evaluation of arithmetic and boolean expressions *}
+
+primrec evala :: "('a \<Rightarrow> nat) \<Rightarrow> 'a aexp \<Rightarrow> nat"
+  and evalb :: "('a \<Rightarrow> nat) \<Rightarrow> 'a bexp \<Rightarrow> bool"
+where
+  "evala env (IF b a1 a2) = (if evalb env b then evala env a1 else evala env a2)"
+| "evala env (Sum a1 a2) = evala env a1 + evala env a2"
+| "evala env (Diff a1 a2) = evala env a1 - evala env a2"
+| "evala env (Var v) = env v"
+| "evala env (Num n) = n"
+| "evalb env (Less a1 a2) = (evala env a1 < evala env a2)"
+| "evalb env (And b1 b2) = (evalb env b1 \<and> evalb env b2)"
+| "evalb env (Neg b) = (\<not> evalb env b)"
+
+text {* Since the value of an expression depends on the value of its
+  variables, the functions @{const evala} and @{const evalb} take an
+  additional parameter, an \emph{environment} that maps variables to
+  their values.
+
+  \medskip Substitution on expressions can be defined similarly.  The
+  mapping @{text f} of type @{typ "'a \<Rightarrow> 'a aexp"} given as a
+  parameter is lifted canonically on the types @{typ "'a aexp"} and
+  @{typ "'a bexp"}, respectively.
+*}
+
+primrec substa :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a aexp \<Rightarrow> 'b aexp"
+  and substb :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a bexp \<Rightarrow> 'b bexp"
+where
+  "substa f (IF b a1 a2) = IF (substb f b) (substa f a1) (substa f a2)"
+| "substa f (Sum a1 a2) = Sum (substa f a1) (substa f a2)"
+| "substa f (Diff a1 a2) = Diff (substa f a1) (substa f a2)"
+| "substa f (Var v) = f v"
+| "substa f (Num n) = Num n"
+| "substb f (Less a1 a2) = Less (substa f a1) (substa f a2)"
+| "substb f (And b1 b2) = And (substb f b1) (substb f b2)"
+| "substb f (Neg b) = Neg (substb f b)"
+
+text {* In textbooks about semantics one often finds substitution
+  theorems, which express the relationship between substitution and
+  evaluation.  For @{typ "'a aexp"} and @{typ "'a bexp"}, we can prove
+  such a theorem by mutual induction, followed by simplification.
+*}
+
+lemma subst_one:
+  "evala env (substa (Var (v := a')) a) = evala (env (v := evala env a')) a"
+  "evalb env (substb (Var (v := a')) b) = evalb (env (v := evala env a')) b"
+  by (induct a and b) simp_all
+
+lemma subst_all:
+  "evala env (substa s a) = evala (\<lambda>x. evala env (s x)) a"
+  "evalb env (substb s b) = evalb (\<lambda>x. evala env (s x)) b"
+  by (induct a and b) simp_all
+
+
+subsubsection {* Example: a substitution function for terms *}
+
+text {* Functions on datatypes with nested recursion are also defined
+  by mutual primitive recursion. *}
+
+datatype ('a, 'b) "term" = Var 'a | App 'b "('a, 'b) term list"
+
+text {* A substitution function on type @{typ "('a, 'b) term"} can be
+  defined as follows, by working simultaneously on @{typ "('a, 'b)
+  term list"}: *}
+
+primrec subst_term :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term \<Rightarrow> ('a, 'b) term" and
+  subst_term_list :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term list \<Rightarrow> ('a, 'b) term list"
+where
+  "subst_term f (Var a) = f a"
+| "subst_term f (App b ts) = App b (subst_term_list f ts)"
+| "subst_term_list f [] = []"
+| "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
+
+text {* The recursion scheme follows the structure of the unfolded
+  definition of type @{typ "('a, 'b) term"}.  To prove properties of this
+  substitution function, mutual induction is needed:
+*}
+
+lemma "subst_term (subst_term f1 \<circ> f2) t = subst_term f1 (subst_term f2 t)" and
+  "subst_term_list (subst_term f1 \<circ> f2) ts = subst_term_list f1 (subst_term_list f2 ts)"
+  by (induct t and ts) simp_all
+
+
+subsubsection {* Example: a map function for infinitely branching trees *}
+
+text {* Defining functions on infinitely branching datatypes by
+  primitive recursion is just as easy.
+*}
+
+datatype 'a tree = Atom 'a | Branch "nat \<Rightarrow> 'a tree"
+
+primrec map_tree :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a tree \<Rightarrow> 'b tree"
+where
+  "map_tree f (Atom a) = Atom (f a)"
+| "map_tree f (Branch ts) = Branch (\<lambda>x. map_tree f (ts x))"
+
+text {* Note that all occurrences of functions such as @{text ts}
+  above must be applied to an argument.  In particular, @{term
+  "map_tree f \<circ> ts"} is not allowed here. *}
+
+text {* Here is a simple composition lemma for @{term map_tree}: *}
+
+lemma "map_tree g (map_tree f t) = map_tree (g \<circ> f) t"
+  by (induct t) simp_all
+
+
+subsection {* Proof methods related to recursive definitions *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) pat_completeness} & : & @{text method} \\
+    @{method_def (HOL) relation} & : & @{text method} \\
+    @{method_def (HOL) lexicographic_order} & : & @{text method} \\
+    @{method_def (HOL) size_change} & : & @{text method} \\
+    @{method_def (HOL) induction_schema} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail "
+    @@{method (HOL) relation} @{syntax term}
+    ;
+    @@{method (HOL) lexicographic_order} (@{syntax clasimpmod} * )
+    ;
+    @@{method (HOL) size_change} ( orders (@{syntax clasimpmod} * ) )
+    ;
+    @@{method (HOL) induction_schema}
+    ;
+    orders: ( 'max' | 'min' | 'ms' ) *
+  "}
+
+  \begin{description}
+
+  \item @{method (HOL) pat_completeness} is a specialized method to
+  solve goals regarding the completeness of pattern matching, as
+  required by the @{command (HOL) "function"} package (cf.\
+  \cite{isabelle-function}).
+
+  \item @{method (HOL) relation}~@{text R} introduces a termination
+  proof using the relation @{text R}.  The resulting proof state will
+  contain goals expressing that @{text R} is wellfounded, and that the
+  arguments of recursive calls decrease with respect to @{text R}.
+  Usually, this method is used as the initial proof step of manual
+  termination proofs.
+
+  \item @{method (HOL) "lexicographic_order"} attempts a fully
+  automated termination proof by searching for a lexicographic
+  combination of size measures on the arguments of the function. The
+  method accepts the same arguments as the @{method auto} method,
+  which it uses internally to prove local descents.  The @{syntax
+  clasimpmod} modifiers are accepted (as for @{method auto}).
+
+  In case of failure, extensive information is printed, which can help
+  to analyse the situation (cf.\ \cite{isabelle-function}).
+
+  \item @{method (HOL) "size_change"} also works on termination goals,
+  using a variation of the size-change principle, together with a
+  graph decomposition technique (see \cite{krauss_phd} for details).
+  Three kinds of orders are used internally: @{text max}, @{text min},
+  and @{text ms} (multiset), which is only available when the theory
+  @{text Multiset} is loaded. When no order kinds are given, they are
+  tried in order. The search for a termination proof uses SAT solving
+  internally.
+
+  For local descent proofs, the @{syntax clasimpmod} modifiers are
+  accepted (as for @{method auto}).
+
+  \item @{method (HOL) induction_schema} derives user-specified
+  induction rules from well-founded induction and completeness of
+  patterns. This factors out some operations that are done internally
+  by the function package and makes them available separately. See
+  @{file "~~/src/HOL/ex/Induction_Schema.thy"} for examples.
+
+  \end{description}
+*}
+
+
+subsection {* Functions with explicit partiality *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "partial_function"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{attribute_def (HOL) "partial_function_mono"} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail "
+    @@{command (HOL) partial_function} @{syntax target}?
+      '(' @{syntax nameref} ')' @{syntax \"fixes\"} \\
+      @'where' @{syntax thmdecl}? @{syntax prop}
+  "}
+
+  \begin{description}
+
+  \item @{command (HOL) "partial_function"}~@{text "(mode)"} defines
+  recursive functions based on fixpoints in complete partial
+  orders. No termination proof is required from the user or
+  constructed internally. Instead, the possibility of non-termination
+  is modelled explicitly in the result type, which contains an
+  explicit bottom element.
+
+  Pattern matching and mutual recursion are currently not supported.
+  Thus, the specification consists of a single function described by a
+  single recursive equation.
+
+  There are no fixed syntactic restrictions on the body of the
+  function, but the induced functional must be provably monotonic
+  wrt.\ the underlying order.  The monotonicitity proof is performed
+  internally, and the definition is rejected when it fails. The proof
+  can be influenced by declaring hints using the
+  @{attribute (HOL) partial_function_mono} attribute.
+
+  The mandatory @{text mode} argument specifies the mode of operation
+  of the command, which directly corresponds to a complete partial
+  order on the result type. By default, the following modes are
+  defined:
+
+  \begin{description}
+
+  \item @{text option} defines functions that map into the @{type
+  option} type. Here, the value @{term None} is used to model a
+  non-terminating computation. Monotonicity requires that if @{term
+  None} is returned by a recursive call, then the overall result must
+  also be @{term None}. This is best achieved through the use of the
+  monadic operator @{const "Option.bind"}.
+
+  \item @{text tailrec} defines functions with an arbitrary result
+  type and uses the slightly degenerated partial order where @{term
+  "undefined"} is the bottom element.  Now, monotonicity requires that
+  if @{term undefined} is returned by a recursive call, then the
+  overall result must also be @{term undefined}. In practice, this is
+  only satisfied when each recursive call is a tail call, whose result
+  is directly returned. Thus, this mode of operation allows the
+  definition of arbitrary tail-recursive functions.
+
+  \end{description}
+
+  Experienced users may define new modes by instantiating the locale
+  @{const "partial_function_definitions"} appropriately.
+
+  \item @{attribute (HOL) partial_function_mono} declares rules for
+  use in the internal monononicity proofs of partial function
+  definitions.
+
+  \end{description}
+
+*}
+
+
+subsection {* Old-style recursive function definitions (TFL) *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "recdef"} & : & @{text "theory \<rightarrow> theory)"} \\
+    @{command_def (HOL) "recdef_tc"}@{text "\<^sup>*"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
+  \end{matharray}
+
+  The old TFL commands @{command (HOL) "recdef"} and @{command (HOL)
+  "recdef_tc"} for defining recursive are mostly obsolete; @{command
+  (HOL) "function"} or @{command (HOL) "fun"} should be used instead.
+
+  @{rail "
+    @@{command (HOL) recdef} ('(' @'permissive' ')')? \\
+      @{syntax name} @{syntax term} (@{syntax prop} +) hints?
+    ;
+    recdeftc @{syntax thmdecl}? tc
+    ;
+    hints: '(' @'hints' ( recdefmod * ) ')'
+    ;
+    recdefmod: (('recdef_simp' | 'recdef_cong' | 'recdef_wf')
+      (() | 'add' | 'del') ':' @{syntax thmrefs}) | @{syntax clasimpmod}
+    ;
+    tc: @{syntax nameref} ('(' @{syntax nat} ')')?
+  "}
+
+  \begin{description}
+
+  \item @{command (HOL) "recdef"} defines general well-founded
+  recursive functions (using the TFL package), see also
+  \cite{isabelle-HOL}.  The ``@{text "(permissive)"}'' option tells
+  TFL to recover from failed proof attempts, returning unfinished
+  results.  The @{text recdef_simp}, @{text recdef_cong}, and @{text
+  recdef_wf} hints refer to auxiliary rules to be used in the internal
+  automated proof process of TFL.  Additional @{syntax clasimpmod}
+  declarations may be given to tune the context of the Simplifier
+  (cf.\ \secref{sec:simplifier}) and Classical reasoner (cf.\
+  \secref{sec:classical}).
+
+  \item @{command (HOL) "recdef_tc"}~@{text "c (i)"} recommences the
+  proof for leftover termination condition number @{text i} (default
+  1) as generated by a @{command (HOL) "recdef"} definition of
+  constant @{text c}.
+
+  Note that in most cases, @{command (HOL) "recdef"} is able to finish
+  its internal proofs without manual intervention.
+
+  \end{description}
+
+  \medskip Hints for @{command (HOL) "recdef"} may be also declared
+  globally, using the following attributes.
+
+  \begin{matharray}{rcl}
+    @{attribute_def (HOL) recdef_simp} & : & @{text attribute} \\
+    @{attribute_def (HOL) recdef_cong} & : & @{text attribute} \\
+    @{attribute_def (HOL) recdef_wf} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail "
+    (@@{attribute (HOL) recdef_simp} | @@{attribute (HOL) recdef_cong} |
+      @@{attribute (HOL) recdef_wf}) (() | 'add' | 'del')
+  "}
+*}
+
+
+section {* Datatypes \label{sec:hol-datatype} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "datatype"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "rep_datatype"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
+  \end{matharray}
+
+  @{rail "
+    @@{command (HOL) datatype} (spec + @'and')
+    ;
+    @@{command (HOL) rep_datatype} ('(' (@{syntax name} +) ')')? (@{syntax term} +)
+    ;
+
+    spec: @{syntax typespec_sorts} @{syntax mixfix}? '=' (cons + '|')
+    ;
+    cons: @{syntax name} (@{syntax type} * ) @{syntax mixfix}?
+  "}
+
+  \begin{description}
+
+  \item @{command (HOL) "datatype"} defines inductive datatypes in
+  HOL.
+
+  \item @{command (HOL) "rep_datatype"} represents existing types as
+  datatypes.
+
+  For foundational reasons, some basic types such as @{typ nat}, @{typ
+  "'a \<times> 'b"}, @{typ "'a + 'b"}, @{typ bool} and @{typ unit} are
+  introduced by more primitive means using @{command_ref typedef}.  To
+  recover the rich infrastructure of @{command datatype} (e.g.\ rules
+  for @{method cases} and @{method induct} and the primitive recursion
+  combinators), such types may be represented as actual datatypes
+  later.  This is done by specifying the constructors of the desired
+  type, and giving a proof of the induction rule, distinctness and
+  injectivity of constructors.
+
+  For example, see @{file "~~/src/HOL/Sum_Type.thy"} for the
+  representation of the primitive sum type as fully-featured datatype.
+
+  \end{description}
+
+  The generated rules for @{method induct} and @{method cases} provide
+  case names according to the given constructors, while parameters are
+  named after the types (see also \secref{sec:cases-induct}).
+
+  See \cite{isabelle-HOL} for more details on datatypes, but beware of
+  the old-style theory syntax being used there!  Apart from proper
+  proof methods for case-analysis and induction, there are also
+  emulations of ML tactics @{method (HOL) case_tac} and @{method (HOL)
+  induct_tac} available, see \secref{sec:hol-induct-tac}; these admit
+  to refer directly to the internal structure of subgoals (including
+  internally bound parameters).
+*}
+
+
+subsubsection {* Examples *}
+
+text {* We define a type of finite sequences, with slightly different
+  names than the existing @{typ "'a list"} that is already in @{theory
+  Main}: *}
+
+datatype 'a seq = Empty | Seq 'a "'a seq"
+
+text {* We can now prove some simple lemma by structural induction: *}
+
+lemma "Seq x xs \<noteq> xs"
+proof (induct xs arbitrary: x)
+  case Empty
+  txt {* This case can be proved using the simplifier: the freeness
+    properties of the datatype are already declared as @{attribute
+    simp} rules. *}
+  show "Seq x Empty \<noteq> Empty"
+    by simp
+next
+  case (Seq y ys)
+  txt {* The step case is proved similarly. *}
+  show "Seq x (Seq y ys) \<noteq> Seq y ys"
+    using `Seq y ys \<noteq> ys` by simp
+qed
+
+text {* Here is a more succinct version of the same proof: *}
+
+lemma "Seq x xs \<noteq> xs"
+  by (induct xs arbitrary: x) simp_all
+
+
+section {* Records \label{sec:hol-record} *}
+
+text {*
+  In principle, records merely generalize the concept of tuples, where
+  components may be addressed by labels instead of just position.  The
+  logical infrastructure of records in Isabelle/HOL is slightly more
+  advanced, though, supporting truly extensible record schemes.  This
+  admits operations that are polymorphic with respect to record
+  extension, yielding ``object-oriented'' effects like (single)
+  inheritance.  See also \cite{NaraschewskiW-TPHOLs98} for more
+  details on object-oriented verification and record subtyping in HOL.
+*}
+
+
+subsection {* Basic concepts *}
+
+text {*
+  Isabelle/HOL supports both \emph{fixed} and \emph{schematic} records
+  at the level of terms and types.  The notation is as follows:
+
+  \begin{center}
+  \begin{tabular}{l|l|l}
+    & record terms & record types \\ \hline
+    fixed & @{text "\<lparr>x = a, y = b\<rparr>"} & @{text "\<lparr>x :: A, y :: B\<rparr>"} \\
+    schematic & @{text "\<lparr>x = a, y = b, \<dots> = m\<rparr>"} &
+      @{text "\<lparr>x :: A, y :: B, \<dots> :: M\<rparr>"} \\
+  \end{tabular}
+  \end{center}
+
+  \noindent The ASCII representation of @{text "\<lparr>x = a\<rparr>"} is @{text
+  "(| x = a |)"}.
+
+  A fixed record @{text "\<lparr>x = a, y = b\<rparr>"} has field @{text x} of value
+  @{text a} and field @{text y} of value @{text b}.  The corresponding
+  type is @{text "\<lparr>x :: A, y :: B\<rparr>"}, assuming that @{text "a :: A"}
+  and @{text "b :: B"}.
+
+  A record scheme like @{text "\<lparr>x = a, y = b, \<dots> = m\<rparr>"} contains fields
+  @{text x} and @{text y} as before, but also possibly further fields
+  as indicated by the ``@{text "\<dots>"}'' notation (which is actually part
+  of the syntax).  The improper field ``@{text "\<dots>"}'' of a record
+  scheme is called the \emph{more part}.  Logically it is just a free
+  variable, which is occasionally referred to as ``row variable'' in
+  the literature.  The more part of a record scheme may be
+  instantiated by zero or more further components.  For example, the
+  previous scheme may get instantiated to @{text "\<lparr>x = a, y = b, z =
+  c, \<dots> = m'\<rparr>"}, where @{text m'} refers to a different more part.
+  Fixed records are special instances of record schemes, where
+  ``@{text "\<dots>"}'' is properly terminated by the @{text "() :: unit"}
+  element.  In fact, @{text "\<lparr>x = a, y = b\<rparr>"} is just an abbreviation
+  for @{text "\<lparr>x = a, y = b, \<dots> = ()\<rparr>"}.
+
+  \medskip Two key observations make extensible records in a simply
+  typed language like HOL work out:
+
+  \begin{enumerate}
+
+  \item the more part is internalized, as a free term or type
+  variable,
+
+  \item field names are externalized, they cannot be accessed within
+  the logic as first-class values.
+
+  \end{enumerate}
+
+  \medskip In Isabelle/HOL record types have to be defined explicitly,
+  fixing their field names and types, and their (optional) parent
+  record.  Afterwards, records may be formed using above syntax, while
+  obeying the canonical order of fields as given by their declaration.
+  The record package provides several standard operations like
+  selectors and updates.  The common setup for various generic proof
+  tools enable succinct reasoning patterns.  See also the Isabelle/HOL
+  tutorial \cite{isabelle-hol-book} for further instructions on using
+  records in practice.
+*}
+
+
+subsection {* Record specifications *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "record"} & : & @{text "theory \<rightarrow> theory"} \\
+  \end{matharray}
+
+  @{rail "
+    @@{command (HOL) record} @{syntax typespec_sorts} '=' \\
+      (@{syntax type} '+')? (constdecl +)
+    ;
+    constdecl: @{syntax name} '::' @{syntax type} @{syntax mixfix}?
+  "}
+
+  \begin{description}
+
+  \item @{command (HOL) "record"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t = \<tau> + c\<^sub>1 :: \<sigma>\<^sub>1
+  \<dots> c\<^sub>n :: \<sigma>\<^sub>n"} defines extensible record type @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"},
+  derived from the optional parent record @{text "\<tau>"} by adding new
+  field components @{text "c\<^sub>i :: \<sigma>\<^sub>i"} etc.
+
+  The type variables of @{text "\<tau>"} and @{text "\<sigma>\<^sub>i"} need to be
+  covered by the (distinct) parameters @{text "\<alpha>\<^sub>1, \<dots>,
+  \<alpha>\<^sub>m"}.  Type constructor @{text t} has to be new, while @{text
+  \<tau>} needs to specify an instance of an existing record type.  At
+  least one new field @{text "c\<^sub>i"} has to be specified.
+  Basically, field names need to belong to a unique record.  This is
+  not a real restriction in practice, since fields are qualified by
+  the record name internally.
+
+  The parent record specification @{text \<tau>} is optional; if omitted
+  @{text t} becomes a root record.  The hierarchy of all records
+  declared within a theory context forms a forest structure, i.e.\ a
+  set of trees starting with a root record each.  There is no way to
+  merge multiple parent records!
+
+  For convenience, @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"} is made a
+  type abbreviation for the fixed record type @{text "\<lparr>c\<^sub>1 ::
+  \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n\<rparr>"}, likewise is @{text
+  "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m, \<zeta>) t_scheme"} made an abbreviation for
+  @{text "\<lparr>c\<^sub>1 :: \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n, \<dots> ::
+  \<zeta>\<rparr>"}.
+
+  \end{description}
+*}
+
+
+subsection {* Record operations *}
+
+text {*
+  Any record definition of the form presented above produces certain
+  standard operations.  Selectors and updates are provided for any
+  field, including the improper one ``@{text more}''.  There are also
+  cumulative record constructor functions.  To simplify the
+  presentation below, we assume for now that @{text "(\<alpha>\<^sub>1, \<dots>,
+  \<alpha>\<^sub>m) t"} is a root record with fields @{text "c\<^sub>1 ::
+  \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n"}.
+
+  \medskip \textbf{Selectors} and \textbf{updates} are available for
+  any field (including ``@{text more}''):
+
+  \begin{matharray}{lll}
+    @{text "c\<^sub>i"} & @{text "::"} & @{text "\<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<sigma>\<^sub>i"} \\
+    @{text "c\<^sub>i_update"} & @{text "::"} & @{text "\<sigma>\<^sub>i \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
+  \end{matharray}
+
+  There is special syntax for application of updates: @{text "r\<lparr>x :=
+  a\<rparr>"} abbreviates term @{text "x_update a r"}.  Further notation for
+  repeated updates is also available: @{text "r\<lparr>x := a\<rparr>\<lparr>y := b\<rparr>\<lparr>z :=
+  c\<rparr>"} may be written @{text "r\<lparr>x := a, y := b, z := c\<rparr>"}.  Note that
+  because of postfix notation the order of fields shown here is
+  reverse than in the actual term.  Since repeated updates are just
+  function applications, fields may be freely permuted in @{text "\<lparr>x
+  := a, y := b, z := c\<rparr>"}, as far as logical equality is concerned.
+  Thus commutativity of independent updates can be proven within the
+  logic for any two fields, but not as a general theorem.
+
+  \medskip The \textbf{make} operation provides a cumulative record
+  constructor function:
+
+  \begin{matharray}{lll}
+    @{text "t.make"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
+  \end{matharray}
+
+  \medskip We now reconsider the case of non-root records, which are
+  derived of some parent.  In general, the latter may depend on
+  another parent as well, resulting in a list of \emph{ancestor
+  records}.  Appending the lists of fields of all ancestors results in
+  a certain field prefix.  The record package automatically takes care
+  of this by lifting operations over this context of ancestor fields.
+  Assuming that @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"} has ancestor
+  fields @{text "b\<^sub>1 :: \<rho>\<^sub>1, \<dots>, b\<^sub>k :: \<rho>\<^sub>k"},
+  the above record operations will get the following types:
+
+  \medskip
+  \begin{tabular}{lll}
+    @{text "c\<^sub>i"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<sigma>\<^sub>i"} \\
+    @{text "c\<^sub>i_update"} & @{text "::"} & @{text "\<sigma>\<^sub>i \<Rightarrow>
+      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow>
+      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
+    @{text "t.make"} & @{text "::"} & @{text "\<rho>\<^sub>1 \<Rightarrow> \<dots> \<rho>\<^sub>k \<Rightarrow> \<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow>
+      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
+  \end{tabular}
+  \medskip
+
+  \noindent Some further operations address the extension aspect of a
+  derived record scheme specifically: @{text "t.fields"} produces a
+  record fragment consisting of exactly the new fields introduced here
+  (the result may serve as a more part elsewhere); @{text "t.extend"}
+  takes a fixed record and adds a given more part; @{text
+  "t.truncate"} restricts a record scheme to a fixed record.
+
+  \medskip
+  \begin{tabular}{lll}
+    @{text "t.fields"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
+    @{text "t.extend"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr> \<Rightarrow>
+      \<zeta> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
+    @{text "t.truncate"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
+  \end{tabular}
+  \medskip
+
+  \noindent Note that @{text "t.make"} and @{text "t.fields"} coincide
+  for root records.
+*}
+
+
+subsection {* Derived rules and proof tools *}
+
+text {*
+  The record package proves several results internally, declaring
+  these facts to appropriate proof tools.  This enables users to
+  reason about record structures quite conveniently.  Assume that
+  @{text t} is a record type as specified above.
+
+  \begin{enumerate}
+
+  \item Standard conversions for selectors or updates applied to
+  record constructor terms are made part of the default Simplifier
+  context; thus proofs by reduction of basic operations merely require
+  the @{method simp} method without further arguments.  These rules
+  are available as @{text "t.simps"}, too.
+
+  \item Selectors applied to updated records are automatically reduced
+  by an internal simplification procedure, which is also part of the
+  standard Simplifier setup.
+
+  \item Inject equations of a form analogous to @{prop "(x, y) = (x',
+  y') \<equiv> x = x' \<and> y = y'"} are declared to the Simplifier and Classical
+  Reasoner as @{attribute iff} rules.  These rules are available as
+  @{text "t.iffs"}.
+
+  \item The introduction rule for record equality analogous to @{text
+  "x r = x r' \<Longrightarrow> y r = y r' \<dots> \<Longrightarrow> r = r'"} is declared to the Simplifier,
+  and as the basic rule context as ``@{attribute intro}@{text "?"}''.
+  The rule is called @{text "t.equality"}.
+
+  \item Representations of arbitrary record expressions as canonical
+  constructor terms are provided both in @{method cases} and @{method
+  induct} format (cf.\ the generic proof methods of the same name,
+  \secref{sec:cases-induct}).  Several variations are available, for
+  fixed records, record schemes, more parts etc.
+
+  The generic proof methods are sufficiently smart to pick the most
+  sensible rule according to the type of the indicated record
+  expression: users just need to apply something like ``@{text "(cases
+  r)"}'' to a certain proof problem.
+
+  \item The derived record operations @{text "t.make"}, @{text
+  "t.fields"}, @{text "t.extend"}, @{text "t.truncate"} are \emph{not}
+  treated automatically, but usually need to be expanded by hand,
+  using the collective fact @{text "t.defs"}.
+
+  \end{enumerate}
+*}
+
+
+subsubsection {* Examples *}
+
+text {* See @{file "~~/src/HOL/ex/Records.thy"}, for example. *}
+
+
+section {* Adhoc tuples *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{attribute_def (HOL) split_format}@{text "\<^sup>*"} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail "
+    @@{attribute (HOL) split_format} ('(' 'complete' ')')?
+  "}
+
+  \begin{description}
+
+  \item @{attribute (HOL) split_format}\ @{text "(complete)"} causes
+  arguments in function applications to be represented canonically
+  according to their tuple type structure.
+
+  Note that this operation tends to invent funny names for new local
+  parameters introduced.
+
+  \end{description}
+*}
+
+
+section {* Typedef axiomatization \label{sec:hol-typedef} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "typedef"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
+  \end{matharray}
+
+  A Gordon/HOL-style type definition is a certain axiom scheme that
+  identifies a new type with a subset of an existing type.  More
+  precisely, the new type is defined by exhibiting an existing type
+  @{text \<tau>}, a set @{text "A :: \<tau> set"}, and a theorem that proves
+  @{prop "\<exists>x. x \<in> A"}.  Thus @{text A} is a non-empty subset of @{text
+  \<tau>}, and the new type denotes this subset.  New functions are
+  postulated that establish an isomorphism between the new type and
+  the subset.  In general, the type @{text \<tau>} may involve type
+  variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} which means that the type definition
+  produces a type constructor @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} depending on
+  those type arguments.
+
+  The axiomatization can be considered a ``definition'' in the sense
+  of the particular set-theoretic interpretation of HOL
+  \cite{pitts93}, where the universe of types is required to be
+  downwards-closed wrt.\ arbitrary non-empty subsets.  Thus genuinely
+  new types introduced by @{command "typedef"} stay within the range
+  of HOL models by construction.  Note that @{command_ref
+  type_synonym} from Isabelle/Pure merely introduces syntactic
+  abbreviations, without any logical significance.
+
+  @{rail "
+    @@{command (HOL) typedef} alt_name? abs_type '=' rep_set
+    ;
+
+    alt_name: '(' (@{syntax name} | @'open' | @'open' @{syntax name}) ')'
+    ;
+    abs_type: @{syntax typespec_sorts} @{syntax mixfix}?
+    ;
+    rep_set: @{syntax term} (@'morphisms' @{syntax name} @{syntax name})?
+  "}
+
+  \begin{description}
+
+  \item @{command (HOL) "typedef"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = A"}
+  axiomatizes a type definition in the background theory of the
+  current context, depending on a non-emptiness result of the set
+  @{text A} that needs to be proven here.  The set @{text A} may
+  contain type variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} as specified on the LHS,
+  but no term variables.
+
+  Even though a local theory specification, the newly introduced type
+  constructor cannot depend on parameters or assumptions of the
+  context: this is structurally impossible in HOL.  In contrast, the
+  non-emptiness proof may use local assumptions in unusual situations,
+  which could result in different interpretations in target contexts:
+  the meaning of the bijection between the representing set @{text A}
+  and the new type @{text t} may then change in different application
+  contexts.
+
+  By default, @{command (HOL) "typedef"} defines both a type
+  constructor @{text t} for the new type, and a term constant @{text
+  t} for the representing set within the old type.  Use the ``@{text
+  "(open)"}'' option to suppress a separate constant definition
+  altogether.  The injection from type to set is called @{text Rep_t},
+  its inverse @{text Abs_t}, unless explicit @{keyword (HOL)
+  "morphisms"} specification provides alternative names.
+
+  The core axiomatization uses the locale predicate @{const
+  type_definition} as defined in Isabelle/HOL.  Various basic
+  consequences of that are instantiated accordingly, re-using the
+  locale facts with names derived from the new type constructor.  Thus
+  the generic @{thm type_definition.Rep} is turned into the specific
+  @{text "Rep_t"}, for example.
+
+  Theorems @{thm type_definition.Rep}, @{thm
+  type_definition.Rep_inverse}, and @{thm type_definition.Abs_inverse}
+  provide the most basic characterization as a corresponding
+  injection/surjection pair (in both directions).  The derived rules
+  @{thm type_definition.Rep_inject} and @{thm
+  type_definition.Abs_inject} provide a more convenient version of
+  injectivity, suitable for automated proof tools (e.g.\ in
+  declarations involving @{attribute simp} or @{attribute iff}).
+  Furthermore, the rules @{thm type_definition.Rep_cases}~/ @{thm
+  type_definition.Rep_induct}, and @{thm type_definition.Abs_cases}~/
+  @{thm type_definition.Abs_induct} provide alternative views on
+  surjectivity.  These rules are already declared as set or type rules
+  for the generic @{method cases} and @{method induct} methods,
+  respectively.
+
+  An alternative name for the set definition (and other derived
+  entities) may be specified in parentheses; the default is to use
+  @{text t} directly.
+
+  \end{description}
+
+  \begin{warn}
+  If you introduce a new type axiomatically, i.e.\ via @{command_ref
+  typedecl} and @{command_ref axiomatization}, the minimum requirement
+  is that it has a non-empty model, to avoid immediate collapse of the
+  HOL logic.  Moreover, one needs to demonstrate that the
+  interpretation of such free-form axiomatizations can coexist with
+  that of the regular @{command_def typedef} scheme, and any extension
+  that other people might have introduced elsewhere (e.g.\ in HOLCF
+  \cite{MuellerNvOS99}).
+  \end{warn}
+*}
+
+subsubsection {* Examples *}
+
+text {* Type definitions permit the introduction of abstract data
+  types in a safe way, namely by providing models based on already
+  existing types.  Given some abstract axiomatic description @{text P}
+  of a type, this involves two steps:
+
+  \begin{enumerate}
+
+  \item Find an appropriate type @{text \<tau>} and subset @{text A} which
+  has the desired properties @{text P}, and make a type definition
+  based on this representation.
+
+  \item Prove that @{text P} holds for @{text \<tau>} by lifting @{text P}
+  from the representation.
+
+  \end{enumerate}
+
+  You can later forget about the representation and work solely in
+  terms of the abstract properties @{text P}.
+
+  \medskip The following trivial example pulls a three-element type
+  into existence within the formal logical environment of HOL. *}
+
+typedef three = "{(True, True), (True, False), (False, True)}"
+  by blast
+
+definition "One = Abs_three (True, True)"
+definition "Two = Abs_three (True, False)"
+definition "Three = Abs_three (False, True)"
+
+lemma three_distinct: "One \<noteq> Two"  "One \<noteq> Three"  "Two \<noteq> Three"
+  by (simp_all add: One_def Two_def Three_def Abs_three_inject three_def)
+
+lemma three_cases:
+  fixes x :: three obtains "x = One" | "x = Two" | "x = Three"
+  by (cases x) (auto simp: One_def Two_def Three_def Abs_three_inject three_def)
+
+text {* Note that such trivial constructions are better done with
+  derived specification mechanisms such as @{command datatype}: *}
+
+datatype three' = One' | Two' | Three'
+
+text {* This avoids re-doing basic definitions and proofs from the
+  primitive @{command typedef} above. *}
+
+
+section {* Functorial structure of types *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "enriched_type"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
+  \end{matharray}
+
+  @{rail "
+    @@{command (HOL) enriched_type} (@{syntax name} ':')? @{syntax term}
+    ;
+  "}
+
+  \begin{description}
+
+  \item @{command (HOL) "enriched_type"}~@{text "prefix: m"} allows to
+  prove and register properties about the functorial structure of type
+  constructors.  These properties then can be used by other packages
+  to deal with those type constructors in certain type constructions.
+  Characteristic theorems are noted in the current local theory.  By
+  default, they are prefixed with the base name of the type
+  constructor, an explicit prefix can be given alternatively.
+
+  The given term @{text "m"} is considered as \emph{mapper} for the
+  corresponding type constructor and must conform to the following
+  type pattern:
+
+  \begin{matharray}{lll}
+    @{text "m"} & @{text "::"} &
+      @{text "\<sigma>\<^isub>1 \<Rightarrow> \<dots> \<sigma>\<^isub>k \<Rightarrow> (\<^vec>\<alpha>\<^isub>n) t \<Rightarrow> (\<^vec>\<beta>\<^isub>n) t"} \\
+  \end{matharray}
+
+  \noindent where @{text t} is the type constructor, @{text
+  "\<^vec>\<alpha>\<^isub>n"} and @{text "\<^vec>\<beta>\<^isub>n"} are distinct
+  type variables free in the local theory and @{text "\<sigma>\<^isub>1"},
+  \ldots, @{text "\<sigma>\<^isub>k"} is a subsequence of @{text "\<alpha>\<^isub>1 \<Rightarrow>
+  \<beta>\<^isub>1"}, @{text "\<beta>\<^isub>1 \<Rightarrow> \<alpha>\<^isub>1"}, \ldots,
+  @{text "\<alpha>\<^isub>n \<Rightarrow> \<beta>\<^isub>n"}, @{text "\<beta>\<^isub>n \<Rightarrow>
+  \<alpha>\<^isub>n"}.
+
+  \end{description}
+*}
+
+
+section {* Transfer package *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) "transfer"} & : & @{text method} \\
+    @{method_def (HOL) "transfer'"} & : & @{text method} \\
+    @{method_def (HOL) "transfer_prover"} & : & @{text method} \\
+    @{attribute_def (HOL) "transfer_rule"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "relator_eq"} & : & @{text attribute} \\
+  \end{matharray}
+
+  \begin{description}
+
+  \item @{method (HOL) "transfer"} method replaces the current subgoal
+    with a logically equivalent one that uses different types and
+    constants. The replacement of types and constants is guided by the
+    database of transfer rules. Goals are generalized over all free
+    variables by default; this is necessary for variables whose types
+    change, but can be overridden for specific variables with e.g.
+    @{text "transfer fixing: x y z"}.
+
+  \item @{method (HOL) "transfer'"} is a variant of @{method (HOL)
+    transfer} that allows replacing a subgoal with one that is
+    logically stronger (rather than equivalent). For example, a
+    subgoal involving equality on a quotient type could be replaced
+    with a subgoal involving equality (instead of the corresponding
+    equivalence relation) on the underlying raw type.
+
+  \item @{method (HOL) "transfer_prover"} method assists with proving
+    a transfer rule for a new constant, provided the constant is
+    defined in terms of other constants that already have transfer
+    rules. It should be applied after unfolding the constant
+    definitions.
+
+  \item @{attribute (HOL) "transfer_rule"} attribute maintains a
+    collection of transfer rules, which relate constants at two
+    different types. Typical transfer rules may relate different type
+    instances of the same polymorphic constant, or they may relate an
+    operation on a raw type to a corresponding operation on an
+    abstract type (quotient or subtype). For example:
+
+    @{text "((A ===> B) ===> list_all2 A ===> list_all2 B) map map"}\\
+    @{text "(cr_int ===> cr_int ===> cr_int) (\<lambda>(x,y) (u,v). (x+u, y+v)) plus"}
+
+    Lemmas involving predicates on relations can also be registered
+    using the same attribute. For example:
+
+    @{text "bi_unique A \<Longrightarrow> (list_all2 A ===> op =) distinct distinct"}\\
+    @{text "\<lbrakk>bi_unique A; bi_unique B\<rbrakk> \<Longrightarrow> bi_unique (prod_rel A B)"}
+
+  \item @{attribute (HOL) relator_eq} attribute collects identity laws
+    for relators of various type constructors, e.g. @{text "list_all2
+    (op =) = (op =)"}. The @{method (HOL) transfer} method uses these
+    lemmas to infer transfer rules for non-polymorphic constants on
+    the fly.
+
+  \end{description}
+
+*}
+
+
+section {* Lifting package *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "setup_lifting"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
+    @{command_def (HOL) "lift_definition"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
+    @{command_def (HOL) "print_quotmaps"} & : & @{text "context \<rightarrow>"}\\
+    @{command_def (HOL) "print_quotients"} & : & @{text "context \<rightarrow>"}\\
+    @{attribute_def (HOL) "quot_map"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "invariant_commute"} & : & @{text attribute} \\
+  \end{matharray}
+
+  @{rail "
+    @@{command (HOL) setup_lifting} ('(' 'no_abs_code' ')')? \\
+      @{syntax thmref} @{syntax thmref}?;
+  "}
+
+  @{rail "
+    @@{command (HOL) lift_definition} @{syntax name} '::' @{syntax type}  @{syntax mixfix}? \\
+      'is' @{syntax term};
+  "}
+
+  \begin{description}
+
+  \item @{command (HOL) "setup_lifting"} Sets up the Lifting package
+    to work with a user-defined type. The user must provide either a
+    quotient theorem @{text "Quotient R Abs Rep T"} or a
+    type_definition theorem @{text "type_definition Rep Abs A"}.  The
+    package configures transfer rules for equality and quantifiers on
+    the type, and sets up the @{command_def (HOL) "lift_definition"}
+    command to work with the type.  In the case of a quotient theorem,
+    an optional theorem @{text "reflp R"} can be provided as a second
+    argument.  This allows the package to generate stronger transfer
+    rules.
+
+    @{command (HOL) "setup_lifting"} is called automatically if a
+    quotient type is defined by the command @{command (HOL)
+    "quotient_type"} from the Quotient package.
+
+    If @{command (HOL) "setup_lifting"} is called with a
+    type_definition theorem, the abstract type implicitly defined by
+    the theorem is declared as an abstract type in the code
+    generator. This allows @{command (HOL) "lift_definition"} to
+    register (generated) code certificate theorems as abstract code
+    equations in the code generator.  The option @{text "no_abs_code"}
+    of the command @{command (HOL) "setup_lifting"} can turn off that
+    behavior and causes that code certificate theorems generated by
+    @{command (HOL) "lift_definition"} are not registred as abstract
+    code equations.
+
+  \item @{command (HOL) "lift_definition"} @{text "f :: \<tau> is t"}
+    Defines a new function @{text f} with an abstract type @{text \<tau>}
+    in terms of a corresponding operation @{text t} on a
+    representation type.  The term @{text t} doesn't have to be
+    necessarily a constant but it can be any term.
+
+    Users must discharge a respectfulness proof obligation when each
+    constant is defined. For a type copy, i.e. a typedef with @{text
+    UNIV}, the proof is discharged automatically. The obligation is
+    presented in a user-friendly, readable form. A respectfulness
+    theorem in the standard format @{text f.rsp} and a transfer rule
+    @{text f.tranfer} for the Transfer package are generated by the
+    package.
+
+    Integration with code_abstype: For typedefs (e.g. subtypes
+    corresponding to a datatype invariant, such as dlist), @{command
+    (HOL) "lift_definition"} generates a code certificate theorem
+    @{text f.rep_eq} and sets up code generation for each constant.
+
+  \item @{command (HOL) "print_quotmaps"} prints stored quotient map
+    theorems.
+
+  \item @{command (HOL) "print_quotients"} prints stored quotient
+    theorems.
+
+  \item @{attribute (HOL) quot_map} registers a quotient map
+    theorem. For examples see @{file
+    "~~/src/HOL/Library/Quotient_List.thy"} or other Quotient_*.thy
+    files.
+
+  \item @{attribute (HOL) invariant_commute} registers a theorem which
+    shows a relationship between the constant @{text
+    Lifting.invariant} (used for internal encoding of proper subtypes)
+    and a relator.  Such theorems allows the package to hide @{text
+    Lifting.invariant} from a user in a user-readable form of a
+    respectfulness theorem. For examples see @{file
+    "~~/src/HOL/Library/Quotient_List.thy"} or other Quotient_*.thy
+    files.
+
+  \end{description}
+*}
+
+
+section {* Quotient types *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "quotient_type"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
+    @{command_def (HOL) "quotient_definition"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
+    @{command_def (HOL) "print_quotmapsQ3"} & : & @{text "context \<rightarrow>"}\\
+    @{command_def (HOL) "print_quotientsQ3"} & : & @{text "context \<rightarrow>"}\\
+    @{command_def (HOL) "print_quotconsts"} & : & @{text "context \<rightarrow>"}\\
+    @{method_def (HOL) "lifting"} & : & @{text method} \\
+    @{method_def (HOL) "lifting_setup"} & : & @{text method} \\
+    @{method_def (HOL) "descending"} & : & @{text method} \\
+    @{method_def (HOL) "descending_setup"} & : & @{text method} \\
+    @{method_def (HOL) "partiality_descending"} & : & @{text method} \\
+    @{method_def (HOL) "partiality_descending_setup"} & : & @{text method} \\
+    @{method_def (HOL) "regularize"} & : & @{text method} \\
+    @{method_def (HOL) "injection"} & : & @{text method} \\
+    @{method_def (HOL) "cleaning"} & : & @{text method} \\
+    @{attribute_def (HOL) "quot_thm"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "quot_lifted"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "quot_respect"} & : & @{text attribute} \\
+    @{attribute_def (HOL) "quot_preserve"} & : & @{text attribute} \\
+  \end{matharray}
+
+  The quotient package defines a new quotient type given a raw type
+  and a partial equivalence relation.  It also includes automation for
+  transporting definitions and theorems.  It can automatically produce
+  definitions and theorems on the quotient type, given the
+  corresponding constants and facts on the raw type.
+
+  @{rail "
+    @@{command (HOL) quotient_type} (spec + @'and');
+
+    spec: @{syntax typespec} @{syntax mixfix}? '=' \\
+     @{syntax type} '/' ('partial' ':')? @{syntax term} \\
+     (@'morphisms' @{syntax name} @{syntax name})?;
+  "}
+
+  @{rail "
+    @@{command (HOL) quotient_definition} constdecl? @{syntax thmdecl}? \\
+    @{syntax term} 'is' @{syntax term};
+
+    constdecl: @{syntax name} ('::' @{syntax type})? @{syntax mixfix}?
+  "}
+
+  @{rail "
+    @@{method (HOL) lifting} @{syntax thmrefs}?
+    ;
+
+    @@{method (HOL) lifting_setup} @{syntax thmrefs}?
+    ;
+  "}
+
+  \begin{description}
+
+  \item @{command (HOL) "quotient_type"} defines quotient types. The
+  injection from a quotient type to a raw type is called @{text
+  rep_t}, its inverse @{text abs_t} unless explicit @{keyword (HOL)
+  "morphisms"} specification provides alternative names. @{command
+  (HOL) "quotient_type"} requires the user to prove that the relation
+  is an equivalence relation (predicate @{text equivp}), unless the
+  user specifies explicitely @{text partial} in which case the
+  obligation is @{text part_equivp}.  A quotient defined with @{text
+  partial} is weaker in the sense that less things can be proved
+  automatically.
+
+  \item @{command (HOL) "quotient_definition"} defines a constant on
+  the quotient type.
+
+  \item @{command (HOL) "print_quotmapsQ3"} prints quotient map
+  functions.
+
+  \item @{command (HOL) "print_quotientsQ3"} prints quotients.
+
+  \item @{command (HOL) "print_quotconsts"} prints quotient constants.
+
+  \item @{method (HOL) "lifting"} and @{method (HOL) "lifting_setup"}
+    methods match the current goal with the given raw theorem to be
+    lifted producing three new subgoals: regularization, injection and
+    cleaning subgoals. @{method (HOL) "lifting"} tries to apply the
+    heuristics for automatically solving these three subgoals and
+    leaves only the subgoals unsolved by the heuristics to the user as
+    opposed to @{method (HOL) "lifting_setup"} which leaves the three
+    subgoals unsolved.
+
+  \item @{method (HOL) "descending"} and @{method (HOL)
+    "descending_setup"} try to guess a raw statement that would lift
+    to the current subgoal. Such statement is assumed as a new subgoal
+    and @{method (HOL) "descending"} continues in the same way as
+    @{method (HOL) "lifting"} does. @{method (HOL) "descending"} tries
+    to solve the arising regularization, injection and cleaning
+    subgoals with the analoguous method @{method (HOL)
+    "descending_setup"} which leaves the four unsolved subgoals.
+
+  \item @{method (HOL) "partiality_descending"} finds the regularized
+    theorem that would lift to the current subgoal, lifts it and
+    leaves as a subgoal. This method can be used with partial
+    equivalence quotients where the non regularized statements would
+    not be true. @{method (HOL) "partiality_descending_setup"} leaves
+    the injection and cleaning subgoals unchanged.
+
+  \item @{method (HOL) "regularize"} applies the regularization
+    heuristics to the current subgoal.
+
+  \item @{method (HOL) "injection"} applies the injection heuristics
+    to the current goal using the stored quotient respectfulness
+    theorems.
+
+  \item @{method (HOL) "cleaning"} applies the injection cleaning
+    heuristics to the current subgoal using the stored quotient
+    preservation theorems.
+
+  \item @{attribute (HOL) quot_lifted} attribute tries to
+    automatically transport the theorem to the quotient type.
+    The attribute uses all the defined quotients types and quotient
+    constants often producing undesired results or theorems that
+    cannot be lifted.
+
+  \item @{attribute (HOL) quot_respect} and @{attribute (HOL)
+    quot_preserve} attributes declare a theorem as a respectfulness
+    and preservation theorem respectively.  These are stored in the
+    local theory store and used by the @{method (HOL) "injection"}
+    and @{method (HOL) "cleaning"} methods respectively.
+
+  \item @{attribute (HOL) quot_thm} declares that a certain theorem
+    is a quotient extension theorem. Quotient extension theorems
+    allow for quotienting inside container types. Given a polymorphic
+    type that serves as a container, a map function defined for this
+    container using @{command (HOL) "enriched_type"} and a relation
+    map defined for for the container type, the quotient extension
+    theorem should be @{term "Quotient3 R Abs Rep \<Longrightarrow> Quotient3
+    (rel_map R) (map Abs) (map Rep)"}. Quotient extension theorems
+    are stored in a database and are used all the steps of lifting
+    theorems.
+
+  \end{description}
+*}
+
+
+section {* Coercive subtyping *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{attribute_def (HOL) coercion} & : & @{text attribute} \\
+    @{attribute_def (HOL) coercion_enabled} & : & @{text attribute} \\
+    @{attribute_def (HOL) coercion_map} & : & @{text attribute} \\
+  \end{matharray}
+
+  Coercive subtyping allows the user to omit explicit type
+  conversions, also called \emph{coercions}.  Type inference will add
+  them as necessary when parsing a term. See
+  \cite{traytel-berghofer-nipkow-2011} for details.
+
+  @{rail "
+    @@{attribute (HOL) coercion} (@{syntax term})?
+    ;
+  "}
+  @{rail "
+    @@{attribute (HOL) coercion_map} (@{syntax term})?
+    ;
+  "}
+
+  \begin{description}
+
+  \item @{attribute (HOL) "coercion"}~@{text "f"} registers a new
+  coercion function @{text "f :: \<sigma>\<^isub>1 \<Rightarrow> \<sigma>\<^isub>2"} where @{text "\<sigma>\<^isub>1"} and
+  @{text "\<sigma>\<^isub>2"} are type constructors without arguments.  Coercions are
+  composed by the inference algorithm if needed.  Note that the type
+  inference algorithm is complete only if the registered coercions
+  form a lattice.
+
+  \item @{attribute (HOL) "coercion_map"}~@{text "map"} registers a
+  new map function to lift coercions through type constructors. The
+  function @{text "map"} must conform to the following type pattern
+
+  \begin{matharray}{lll}
+    @{text "map"} & @{text "::"} &
+      @{text "f\<^isub>1 \<Rightarrow> \<dots> \<Rightarrow> f\<^isub>n \<Rightarrow> (\<alpha>\<^isub>1, \<dots>, \<alpha>\<^isub>n) t \<Rightarrow> (\<beta>\<^isub>1, \<dots>, \<beta>\<^isub>n) t"} \\
+  \end{matharray}
+
+  where @{text "t"} is a type constructor and @{text "f\<^isub>i"} is of type
+  @{text "\<alpha>\<^isub>i \<Rightarrow> \<beta>\<^isub>i"} or @{text "\<beta>\<^isub>i \<Rightarrow> \<alpha>\<^isub>i"}.  Registering a map function
+  overwrites any existing map function for this particular type
+  constructor.
+
+  \item @{attribute (HOL) "coercion_enabled"} enables the coercion
+  inference algorithm.
+
+  \end{description}
+*}
+
+
+section {* Arithmetic proof support *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) arith} & : & @{text method} \\
+    @{attribute_def (HOL) arith} & : & @{text attribute} \\
+    @{attribute_def (HOL) arith_split} & : & @{text attribute} \\
+  \end{matharray}
+
+  \begin{description}
+
+  \item @{method (HOL) arith} decides linear arithmetic problems (on
+  types @{text nat}, @{text int}, @{text real}).  Any current facts
+  are inserted into the goal before running the procedure.
+
+  \item @{attribute (HOL) arith} declares facts that are supplied to
+  the arithmetic provers implicitly.
+
+  \item @{attribute (HOL) arith_split} attribute declares case split
+  rules to be expanded before @{method (HOL) arith} is invoked.
+
+  \end{description}
+
+  Note that a simpler (but faster) arithmetic prover is already
+  invoked by the Simplifier.
+*}
+
+
+section {* Intuitionistic proof search *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) iprover} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail "
+    @@{method (HOL) iprover} ( @{syntax rulemod} * )
+  "}
+
+  \begin{description}
+
+  \item @{method (HOL) iprover} performs intuitionistic proof search,
+  depending on specifically declared rules from the context, or given
+  as explicit arguments.  Chained facts are inserted into the goal
+  before commencing proof search.
+
+  Rules need to be classified as @{attribute (Pure) intro},
+  @{attribute (Pure) elim}, or @{attribute (Pure) dest}; here the
+  ``@{text "!"}'' indicator refers to ``safe'' rules, which may be
+  applied aggressively (without considering back-tracking later).
+  Rules declared with ``@{text "?"}'' are ignored in proof search (the
+  single-step @{method (Pure) rule} method still observes these).  An
+  explicit weight annotation may be given as well; otherwise the
+  number of rule premises will be taken into account here.
+
+  \end{description}
+*}
+
+
+section {* Model Elimination and Resolution *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) "meson"} & : & @{text method} \\
+    @{method_def (HOL) "metis"} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail "
+    @@{method (HOL) meson} @{syntax thmrefs}?
+    ;
+
+    @@{method (HOL) metis}
+      ('(' ('partial_types' | 'full_types' | 'no_types' | @{syntax name}) ')')?
+      @{syntax thmrefs}?
+  "}
+
+  \begin{description}
+
+  \item @{method (HOL) meson} implements Loveland's model elimination
+  procedure \cite{loveland-78}.  See @{file
+  "~~/src/HOL/ex/Meson_Test.thy"} for examples.
+
+  \item @{method (HOL) metis} combines ordered resolution and ordered
+  paramodulation to find first-order (or mildly higher-order) proofs.
+  The first optional argument specifies a type encoding; see the
+  Sledgehammer manual \cite{isabelle-sledgehammer} for details.  The
+  directory @{file "~~/src/HOL/Metis_Examples"} contains several small
+  theories developed to a large extent using @{method (HOL) metis}.
+
+  \end{description}
+*}
+
+
+section {* Coherent Logic *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{method_def (HOL) "coherent"} & : & @{text method} \\
+  \end{matharray}
+
+  @{rail "
+    @@{method (HOL) coherent} @{syntax thmrefs}?
+  "}
+
+  \begin{description}
+
+  \item @{method (HOL) coherent} solves problems of \emph{Coherent
+  Logic} \cite{Bezem-Coquand:2005}, which covers applications in
+  confluence theory, lattice theory and projective geometry.  See
+  @{file "~~/src/HOL/ex/Coherent.thy"} for some examples.
+
+  \end{description}
+*}
+
+
+section {* Proving propositions *}
+
+text {*
+  In addition to the standard proof methods, a number of diagnosis
+  tools search for proofs and provide an Isar proof snippet on success.
+  These tools are available via the following commands.
+
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "solve_direct"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
+    @{command_def (HOL) "try"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
+    @{command_def (HOL) "try0"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
+    @{command_def (HOL) "sledgehammer"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
+    @{command_def (HOL) "sledgehammer_params"} & : & @{text "theory \<rightarrow> theory"}
+  \end{matharray}
+
+  @{rail "
+    @@{command (HOL) try}
+    ;
+
+    @@{command (HOL) try0} ( ( ( 'simp' | 'intro' | 'elim' | 'dest' ) ':' @{syntax thmrefs} ) + ) ?
+      @{syntax nat}?
+    ;
+
+    @@{command (HOL) sledgehammer} ( '[' args ']' )? facts? @{syntax nat}?
+    ;
+
+    @@{command (HOL) sledgehammer_params} ( ( '[' args ']' ) ? )
+    ;
+
+    args: ( @{syntax name} '=' value + ',' )
+    ;
+
+    facts: '(' ( ( ( ( 'add' | 'del' ) ':' ) ? @{syntax thmrefs} ) + ) ? ')'
+    ;
+  "} % FIXME check args "value"
+
+  \begin{description}
+
+  \item @{command (HOL) "solve_direct"} checks whether the current
+  subgoals can be solved directly by an existing theorem. Duplicate
+  lemmas can be detected in this way.
+
+  \item @{command (HOL) "try0"} attempts to prove a subgoal
+  using a combination of standard proof methods (@{method auto},
+  @{method simp}, @{method blast}, etc.).  Additional facts supplied
+  via @{text "simp:"}, @{text "intro:"}, @{text "elim:"}, and @{text
+  "dest:"} are passed to the appropriate proof methods.
+
+  \item @{command (HOL) "try"} attempts to prove or disprove a subgoal
+  using a combination of provers and disprovers (@{command (HOL)
+  "solve_direct"}, @{command (HOL) "quickcheck"}, @{command (HOL)
+  "try0"}, @{command (HOL) "sledgehammer"}, @{command (HOL)
+  "nitpick"}).
+
+  \item @{command (HOL) "sledgehammer"} attempts to prove a subgoal
+  using external automatic provers (resolution provers and SMT
+  solvers). See the Sledgehammer manual \cite{isabelle-sledgehammer}
+  for details.
+
+  \item @{command (HOL) "sledgehammer_params"} changes @{command (HOL)
+  "sledgehammer"} configuration options persistently.
+
+  \end{description}
+*}
+
+
+section {* Checking and refuting propositions *}
+
+text {*
+  Identifying incorrect propositions usually involves evaluation of
+  particular assignments and systematic counterexample search.  This
+  is supported by the following commands.
+
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "value"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def (HOL) "values"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def (HOL) "quickcheck"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
+    @{command_def (HOL) "refute"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
+    @{command_def (HOL) "nitpick"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
+    @{command_def (HOL) "quickcheck_params"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "refute_params"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "nitpick_params"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "quickcheck_generator"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "find_unused_assms"} & : & @{text "context \<rightarrow>"}
+  \end{matharray}
+
+  @{rail "
+    @@{command (HOL) value} ( '[' @{syntax name} ']' )? modes? @{syntax term}
+    ;
+
+    @@{command (HOL) values} modes? @{syntax nat}? @{syntax term}
+    ;
+
+    (@@{command (HOL) quickcheck} | @@{command (HOL) refute} | @@{command (HOL) nitpick})
+      ( '[' args ']' )? @{syntax nat}?
+    ;
+
+    (@@{command (HOL) quickcheck_params} | @@{command (HOL) refute_params} |
+      @@{command (HOL) nitpick_params}) ( '[' args ']' )?
+    ;
+
+    @@{command (HOL) quickcheck_generator} @{syntax nameref} \\
+      'operations:' ( @{syntax term} +)
+    ;
+
+    @@{command (HOL) find_unused_assms} @{syntax name}?
+    ;
+
+    modes: '(' (@{syntax name} +) ')'
+    ;
+
+    args: ( @{syntax name} '=' value + ',' )
+    ;
+  "} % FIXME check "value"
+
+  \begin{description}
+
+  \item @{command (HOL) "value"}~@{text t} evaluates and prints a
+  term; optionally @{text modes} can be specified, which are appended
+  to the current print mode; see \secref{sec:print-modes}.
+  Internally, the evaluation is performed by registered evaluators,
+  which are invoked sequentially until a result is returned.
+  Alternatively a specific evaluator can be selected using square
+  brackets; typical evaluators use the current set of code equations
+  to normalize and include @{text simp} for fully symbolic evaluation
+  using the simplifier, @{text nbe} for \emph{normalization by
+  evaluation} and \emph{code} for code generation in SML.
+
+  \item @{command (HOL) "values"}~@{text t} enumerates a set
+  comprehension by evaluation and prints its values up to the given
+  number of solutions; optionally @{text modes} can be specified,
+  which are appended to the current print mode; see
+  \secref{sec:print-modes}.
+
+  \item @{command (HOL) "quickcheck"} tests the current goal for
+  counterexamples using a series of assignments for its free
+  variables; by default the first subgoal is tested, an other can be
+  selected explicitly using an optional goal index.  Assignments can
+  be chosen exhausting the search space upto a given size, or using a
+  fixed number of random assignments in the search space, or exploring
+  the search space symbolically using narrowing.  By default,
+  quickcheck uses exhaustive testing.  A number of configuration
+  options are supported for @{command (HOL) "quickcheck"}, notably:
+
+    \begin{description}
+
+    \item[@{text tester}] specifies which testing approach to apply.
+    There are three testers, @{text exhaustive}, @{text random}, and
+    @{text narrowing}.  An unknown configuration option is treated as
+    an argument to tester, making @{text "tester ="} optional.  When
+    multiple testers are given, these are applied in parallel.  If no
+    tester is specified, quickcheck uses the testers that are set
+    active, i.e., configurations @{attribute
+    quickcheck_exhaustive_active}, @{attribute
+    quickcheck_random_active}, @{attribute
+    quickcheck_narrowing_active} are set to true.
+
+    \item[@{text size}] specifies the maximum size of the search space
+    for assignment values.
+
+    \item[@{text genuine_only}] sets quickcheck only to return genuine
+    counterexample, but not potentially spurious counterexamples due
+    to underspecified functions.
+
+    \item[@{text abort_potential}] sets quickcheck to abort once it
+    found a potentially spurious counterexample and to not continue
+    to search for a further genuine counterexample.
+    For this option to be effective, the @{text genuine_only} option
+    must be set to false.
+
+    \item[@{text eval}] takes a term or a list of terms and evaluates
+    these terms under the variable assignment found by quickcheck.
+    This option is currently only supported by the default
+    (exhaustive) tester.
+
+    \item[@{text iterations}] sets how many sets of assignments are
+    generated for each particular size.
+
+    \item[@{text no_assms}] specifies whether assumptions in
+    structured proofs should be ignored.
+
+    \item[@{text locale}] specifies how to process conjectures in
+    a locale context, i.e., they can be interpreted or expanded.
+    The option is a whitespace-separated list of the two words
+    @{text interpret} and @{text expand}. The list determines the
+    order they are employed. The default setting is to first use
+    interpretations and then test the expanded conjecture.
+    The option is only provided as attribute declaration, but not
+    as parameter to the command.
+
+    \item[@{text timeout}] sets the time limit in seconds.
+
+    \item[@{text default_type}] sets the type(s) generally used to
+    instantiate type variables.
+
+    \item[@{text report}] if set quickcheck reports how many tests
+    fulfilled the preconditions.
+
+    \item[@{text use_subtype}] if set quickcheck automatically lifts
+    conjectures to registered subtypes if possible, and tests the
+    lifted conjecture.
+
+    \item[@{text quiet}] if set quickcheck does not output anything
+    while testing.
+
+    \item[@{text verbose}] if set quickcheck informs about the current
+    size and cardinality while testing.
+
+    \item[@{text expect}] can be used to check if the user's
+    expectation was met (@{text no_expectation}, @{text
+    no_counterexample}, or @{text counterexample}).
+
+    \end{description}
+
+  These option can be given within square brackets.
+
+  \item @{command (HOL) "quickcheck_params"} changes @{command (HOL)
+  "quickcheck"} configuration options persistently.
+
+  \item @{command (HOL) "quickcheck_generator"} creates random and
+  exhaustive value generators for a given type and operations.  It
+  generates values by using the operations as if they were
+  constructors of that type.
+
+  \item @{command (HOL) "refute"} tests the current goal for
+  counterexamples using a reduction to SAT. The following
+  configuration options are supported:
+
+    \begin{description}
+
+    \item[@{text minsize}] specifies the minimum size (cardinality) of
+    the models to search for.
+
+    \item[@{text maxsize}] specifies the maximum size (cardinality) of
+    the models to search for. Nonpositive values mean @{text "\<infinity>"}.
+
+    \item[@{text maxvars}] specifies the maximum number of Boolean
+    variables to use when transforming the term into a propositional
+    formula.  Nonpositive values mean @{text "\<infinity>"}.
+
+    \item[@{text satsolver}] specifies the SAT solver to use.
+
+    \item[@{text no_assms}] specifies whether assumptions in
+    structured proofs should be ignored.
+
+    \item[@{text maxtime}] sets the time limit in seconds.
+
+    \item[@{text expect}] can be used to check if the user's
+    expectation was met (@{text genuine}, @{text potential}, @{text
+    none}, or @{text unknown}).
+
+    \end{description}
+
+  These option can be given within square brackets.
+
+  \item @{command (HOL) "refute_params"} changes @{command (HOL)
+  "refute"} configuration options persistently.
+
+  \item @{command (HOL) "nitpick"} tests the current goal for
+  counterexamples using a reduction to first-order relational
+  logic. See the Nitpick manual \cite{isabelle-nitpick} for details.
+
+  \item @{command (HOL) "nitpick_params"} changes @{command (HOL)
+  "nitpick"} configuration options persistently.
+
+  \item @{command (HOL) "find_unused_assms"} finds potentially superfluous
+  assumptions in theorems using quickcheck.
+  It takes the theory name to be checked for superfluous assumptions as
+  optional argument. If not provided, it checks the current theory.
+  Options to the internal quickcheck invocations can be changed with
+  common configuration declarations.
+
+  \end{description}
+*}
+
+
+section {* Unstructured case analysis and induction \label{sec:hol-induct-tac} *}
+
+text {*
+  The following tools of Isabelle/HOL support cases analysis and
+  induction in unstructured tactic scripts; see also
+  \secref{sec:cases-induct} for proper Isar versions of similar ideas.
+
+  \begin{matharray}{rcl}
+    @{method_def (HOL) case_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def (HOL) induct_tac}@{text "\<^sup>*"} & : & @{text method} \\
+    @{method_def (HOL) ind_cases}@{text "\<^sup>*"} & : & @{text method} \\
+    @{command_def (HOL) "inductive_cases"}@{text "\<^sup>*"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+  \end{matharray}
+
+  @{rail "
+    @@{method (HOL) case_tac} @{syntax goal_spec}? @{syntax term} rule?
+    ;
+    @@{method (HOL) induct_tac} @{syntax goal_spec}? (@{syntax insts} * @'and') rule?
+    ;
+    @@{method (HOL) ind_cases} (@{syntax prop}+) (@'for' (@{syntax name}+))?
+    ;
+    @@{command (HOL) inductive_cases} (@{syntax thmdecl}? (@{syntax prop}+) + @'and')
+    ;
+
+    rule: 'rule' ':' @{syntax thmref}
+  "}
+
+  \begin{description}
+
+  \item @{method (HOL) case_tac} and @{method (HOL) induct_tac} admit
+  to reason about inductive types.  Rules are selected according to
+  the declarations by the @{attribute cases} and @{attribute induct}
+  attributes, cf.\ \secref{sec:cases-induct}.  The @{command (HOL)
+  datatype} package already takes care of this.
+
+  These unstructured tactics feature both goal addressing and dynamic
+  instantiation.  Note that named rule cases are \emph{not} provided
+  as would be by the proper @{method cases} and @{method induct} proof
+  methods (see \secref{sec:cases-induct}).  Unlike the @{method
+  induct} method, @{method induct_tac} does not handle structured rule
+  statements, only the compact object-logic conclusion of the subgoal
+  being addressed.
+
+  \item @{method (HOL) ind_cases} and @{command (HOL)
+  "inductive_cases"} provide an interface to the internal @{ML_text
+  mk_cases} operation.  Rules are simplified in an unrestricted
+  forward manner.
+
+  While @{method (HOL) ind_cases} is a proof method to apply the
+  result immediately as elimination rules, @{command (HOL)
+  "inductive_cases"} provides case split theorems at the theory level
+  for later use.  The @{keyword "for"} argument of the @{method (HOL)
+  ind_cases} method allows to specify a list of variables that should
+  be generalized before applying the resulting rule.
+
+  \end{description}
+*}
+
+
+section {* Executable code *}
+
+text {* For validation purposes, it is often useful to \emph{execute}
+  specifications.  In principle, execution could be simulated by
+  Isabelle's inference kernel, i.e. by a combination of resolution and
+  simplification.  Unfortunately, this approach is rather inefficient.
+  A more efficient way of executing specifications is to translate
+  them into a functional programming language such as ML.
+
+  Isabelle provides a generic framework to support code generation
+  from executable specifications.  Isabelle/HOL instantiates these
+  mechanisms in a way that is amenable to end-user applications.  Code
+  can be generated for functional programs (including overloading
+  using type classes) targeting SML \cite{SML}, OCaml \cite{OCaml},
+  Haskell \cite{haskell-revised-report} and Scala
+  \cite{scala-overview-tech-report}.  Conceptually, code generation is
+  split up in three steps: \emph{selection} of code theorems,
+  \emph{translation} into an abstract executable view and
+  \emph{serialization} to a specific \emph{target language}.
+  Inductive specifications can be executed using the predicate
+  compiler which operates within HOL.  See \cite{isabelle-codegen} for
+  an introduction.
+
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "export_code"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{attribute_def (HOL) code} & : & @{text attribute} \\
+    @{command_def (HOL) "code_abort"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_datatype"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "print_codesetup"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{attribute_def (HOL) code_unfold} & : & @{text attribute} \\
+    @{attribute_def (HOL) code_post} & : & @{text attribute} \\
+    @{attribute_def (HOL) code_abbrev} & : & @{text attribute} \\
+    @{command_def (HOL) "print_codeproc"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def (HOL) "code_thms"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def (HOL) "code_deps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def (HOL) "code_const"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_type"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_class"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_instance"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_reserved"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_monad"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_include"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_modulename"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_reflect"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (HOL) "code_pred"} & : & @{text "theory \<rightarrow> proof(prove)"}
+  \end{matharray}
+
+  @{rail "
+    @@{command (HOL) export_code} ( constexpr + ) \\
+       ( ( @'in' target ( @'module_name' @{syntax string} ) ? \\
+        ( @'file' ( @{syntax string} | '-' ) ) ? ( '(' args ')' ) ?) + ) ?
+    ;
+
+    const: @{syntax term}
+    ;
+
+    constexpr: ( const | 'name._' | '_' )
+    ;
+
+    typeconstructor: @{syntax nameref}
+    ;
+
+    class: @{syntax nameref}
+    ;
+
+    target: 'SML' | 'OCaml' | 'Haskell' | 'Scala'
+    ;
+
+    @@{attribute (HOL) code} ( 'del' | 'abstype' | 'abstract' )?
+    ;
+
+    @@{command (HOL) code_abort} ( const + )
+    ;
+
+    @@{command (HOL) code_datatype} ( const + )
+    ;
+
+    @@{attribute (HOL) code_unfold} ( 'del' ) ?
+    ;
+
+    @@{attribute (HOL) code_post} ( 'del' ) ?
+    ;
+
+    @@{attribute (HOL) code_abbrev}
+    ;
+
+    @@{command (HOL) code_thms} ( constexpr + ) ?
+    ;
+
+    @@{command (HOL) code_deps} ( constexpr + ) ?
+    ;
+
+    @@{command (HOL) code_const} (const + @'and') \\
+      ( ( '(' target ( syntax ? + @'and' ) ')' ) + )
+    ;
+
+    @@{command (HOL) code_type} (typeconstructor + @'and') \\
+      ( ( '(' target ( syntax ? + @'and' ) ')' ) + )
+    ;
+
+    @@{command (HOL) code_class} (class + @'and') \\
+      ( ( '(' target \\ ( @{syntax string} ? + @'and' ) ')' ) + )
+    ;
+
+    @@{command (HOL) code_instance} (( typeconstructor '::' class ) + @'and') \\
+      ( ( '(' target ( '-' ? + @'and' ) ')' ) + )
+    ;
+
+    @@{command (HOL) code_reserved} target ( @{syntax string} + )
+    ;
+
+    @@{command (HOL) code_monad} const const target
+    ;
+
+    @@{command (HOL) code_include} target ( @{syntax string} ( @{syntax string} | '-') )
+    ;
+
+    @@{command (HOL) code_modulename} target ( ( @{syntax string} @{syntax string} ) + )
+    ;
+
+    @@{command (HOL) code_reflect} @{syntax string} \\
+      ( @'datatypes' ( @{syntax string} '=' ( '_' | ( @{syntax string} + '|' ) + @'and' ) ) ) ? \\
+      ( @'functions' ( @{syntax string} + ) ) ? ( @'file' @{syntax string} ) ?
+    ;
+
+    @@{command (HOL) code_pred} \\( '(' @'modes' ':' modedecl ')')? \\ const
+    ;
+
+    syntax: @{syntax string} | ( @'infix' | @'infixl' | @'infixr' ) @{syntax nat} @{syntax string}
+    ;
+
+    modedecl: (modes | ((const ':' modes) \\
+         (@'and' ((const ':' modes @'and') +))?))
+    ;
+
+    modes: mode @'as' const
+  "}
+
+  \begin{description}
+
+  \item @{command (HOL) "export_code"} generates code for a given list
+  of constants in the specified target language(s).  If no
+  serialization instruction is given, only abstract code is generated
+  internally.
+
+  Constants may be specified by giving them literally, referring to
+  all executable contants within a certain theory by giving @{text
+  "name.*"}, or referring to \emph{all} executable constants currently
+  available by giving @{text "*"}.
+
+  By default, for each involved theory one corresponding name space
+  module is generated.  Alternativly, a module name may be specified
+  after the @{keyword "module_name"} keyword; then \emph{all} code is
+  placed in this module.
+
+  For \emph{SML}, \emph{OCaml} and \emph{Scala} the file specification
+  refers to a single file; for \emph{Haskell}, it refers to a whole
+  directory, where code is generated in multiple files reflecting the
+  module hierarchy.  Omitting the file specification denotes standard
+  output.
+
+  Serializers take an optional list of arguments in parentheses.  For
+  \emph{SML} and \emph{OCaml}, ``@{text no_signatures}`` omits
+  explicit module signatures.
+
+  For \emph{Haskell} a module name prefix may be given using the
+  ``@{text "root:"}'' argument; ``@{text string_classes}'' adds a
+  ``@{verbatim "deriving (Read, Show)"}'' clause to each appropriate
+  datatype declaration.
+
+  \item @{attribute (HOL) code} explicitly selects (or with option
+  ``@{text "del"}'' deselects) a code equation for code generation.
+  Usually packages introducing code equations provide a reasonable
+  default setup for selection.  Variants @{text "code abstype"} and
+  @{text "code abstract"} declare abstract datatype certificates or
+  code equations on abstract datatype representations respectively.
+
+  \item @{command (HOL) "code_abort"} declares constants which are not
+  required to have a definition by means of code equations; if needed
+  these are implemented by program abort instead.
+
+  \item @{command (HOL) "code_datatype"} specifies a constructor set
+  for a logical type.
+
+  \item @{command (HOL) "print_codesetup"} gives an overview on
+  selected code equations and code generator datatypes.
+
+  \item @{attribute (HOL) code_unfold} declares (or with option
+  ``@{text "del"}'' removes) theorems which during preprocessing
+  are applied as rewrite rules to any code equation or evaluation
+  input.
+
+  \item @{attribute (HOL) code_post} declares (or with option ``@{text
+  "del"}'' removes) theorems which are applied as rewrite rules to any
+  result of an evaluation.
+
+  \item @{attribute (HOL) code_abbrev} declares equations which are
+  applied as rewrite rules to any result of an evaluation and
+  symmetrically during preprocessing to any code equation or evaluation
+  input.
+
+  \item @{command (HOL) "print_codeproc"} prints the setup of the code
+  generator preprocessor.
+
+  \item @{command (HOL) "code_thms"} prints a list of theorems
+  representing the corresponding program containing all given
+  constants after preprocessing.
+
+  \item @{command (HOL) "code_deps"} visualizes dependencies of
+  theorems representing the corresponding program containing all given
+  constants after preprocessing.
+
+  \item @{command (HOL) "code_const"} associates a list of constants
+  with target-specific serializations; omitting a serialization
+  deletes an existing serialization.
+
+  \item @{command (HOL) "code_type"} associates a list of type
+  constructors with target-specific serializations; omitting a
+  serialization deletes an existing serialization.
+
+  \item @{command (HOL) "code_class"} associates a list of classes
+  with target-specific class names; omitting a serialization deletes
+  an existing serialization.  This applies only to \emph{Haskell}.
+
+  \item @{command (HOL) "code_instance"} declares a list of type
+  constructor / class instance relations as ``already present'' for a
+  given target.  Omitting a ``@{text "-"}'' deletes an existing
+  ``already present'' declaration.  This applies only to
+  \emph{Haskell}.
+
+  \item @{command (HOL) "code_reserved"} declares a list of names as
+  reserved for a given target, preventing it to be shadowed by any
+  generated code.
+
+  \item @{command (HOL) "code_monad"} provides an auxiliary mechanism
+  to generate monadic code for Haskell.
+
+  \item @{command (HOL) "code_include"} adds arbitrary named content
+  (``include'') to generated code.  A ``@{text "-"}'' as last argument
+  will remove an already added ``include''.
+
+  \item @{command (HOL) "code_modulename"} declares aliasings from one
+  module name onto another.
+
+  \item @{command (HOL) "code_reflect"} without a ``@{text "file"}''
+  argument compiles code into the system runtime environment and
+  modifies the code generator setup that future invocations of system
+  runtime code generation referring to one of the ``@{text
+  "datatypes"}'' or ``@{text "functions"}'' entities use these
+  precompiled entities.  With a ``@{text "file"}'' argument, the
+  corresponding code is generated into that specified file without
+  modifying the code generator setup.
+
+  \item @{command (HOL) "code_pred"} creates code equations for a
+    predicate given a set of introduction rules. Optional mode
+    annotations determine which arguments are supposed to be input or
+    output. If alternative introduction rules are declared, one must
+    prove a corresponding elimination rule.
+
+  \end{description}
+*}
+
+
+section {* Definition by specification \label{sec:hol-specification} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def (HOL) "specification"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
+    @{command_def (HOL) "ax_specification"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
+  \end{matharray}
+
+  @{rail "
+  (@@{command (HOL) specification} | @@{command (HOL) ax_specification})
+    '(' (decl +) ')' \\ (@{syntax thmdecl}? @{syntax prop} +)
+  ;
+  decl: ((@{syntax name} ':')? @{syntax term} '(' @'overloaded' ')'?)
+  "}
+
+  \begin{description}
+
+  \item @{command (HOL) "specification"}~@{text "decls \<phi>"} sets up a
+  goal stating the existence of terms with the properties specified to
+  hold for the constants given in @{text decls}.  After finishing the
+  proof, the theory will be augmented with definitions for the given
+  constants, as well as with theorems stating the properties for these
+  constants.
+
+  \item @{command (HOL) "ax_specification"}~@{text "decls \<phi>"} sets up
+  a goal stating the existence of terms with the properties specified
+  to hold for the constants given in @{text decls}.  After finishing
+  the proof, the theory will be augmented with axioms expressing the
+  properties given in the first place.
+
+  \item @{text decl} declares a constant to be defined by the
+  specification given.  The definition for the constant @{text c} is
+  bound to the name @{text c_def} unless a theorem name is given in
+  the declaration.  Overloaded constants should be declared as such.
+
+  \end{description}
+
+  Whether to use @{command (HOL) "specification"} or @{command (HOL)
+  "ax_specification"} is to some extent a matter of style.  @{command
+  (HOL) "specification"} introduces no new axioms, and so by
+  construction cannot introduce inconsistencies, whereas @{command
+  (HOL) "ax_specification"} does introduce axioms, but only after the
+  user has explicitly proven it to be safe.  A practical issue must be
+  considered, though: After introducing two constants with the same
+  properties using @{command (HOL) "specification"}, one can prove
+  that the two constants are, in fact, equal.  If this might be a
+  problem, one should use @{command (HOL) "ax_specification"}.
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Inner_Syntax.thy	Tue Aug 28 12:45:49 2012 +0200
@@ -0,0 +1,1574 @@
+theory Inner_Syntax
+imports Base Main
+begin
+
+chapter {* Inner syntax --- the term language \label{ch:inner-syntax} *}
+
+text {* The inner syntax of Isabelle provides concrete notation for
+  the main entities of the logical framework, notably @{text
+  "\<lambda>"}-terms with types and type classes.  Applications may either
+  extend existing syntactic categories by additional notation, or
+  define new sub-languages that are linked to the standard term
+  language via some explicit markers.  For example @{verbatim
+  FOO}~@{text "foo"} could embed the syntax corresponding for some
+  user-defined nonterminal @{text "foo"} --- within the bounds of the
+  given lexical syntax of Isabelle/Pure.
+
+  The most basic way to specify concrete syntax for logical entities
+  works via mixfix annotations (\secref{sec:mixfix}), which may be
+  usually given as part of the original declaration or via explicit
+  notation commands later on (\secref{sec:notation}).  This already
+  covers many needs of concrete syntax without having to understand
+  the full complexity of inner syntax layers.
+
+  Further details of the syntax engine involves the classical
+  distinction of lexical language versus context-free grammar (see
+  \secref{sec:pure-syntax}), and various mechanisms for \emph{syntax
+  transformations} (see \secref{sec:syntax-transformations}).
+*}
+
+
+section {* Printing logical entities *}
+
+subsection {* Diagnostic commands \label{sec:print-diag} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "typ"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "term"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "prop"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "thm"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "prf"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "full_prf"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "pr"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+  \end{matharray}
+
+  These diagnostic commands assist interactive development by printing
+  internal logical entities in a human-readable fashion.
+
+  @{rail "
+    @@{command typ} @{syntax modes}? @{syntax type} ('::' @{syntax sort})?
+    ;
+    @@{command term} @{syntax modes}? @{syntax term}
+    ;
+    @@{command prop} @{syntax modes}? @{syntax prop}
+    ;
+    @@{command thm} @{syntax modes}? @{syntax thmrefs}
+    ;
+    ( @@{command prf} | @@{command full_prf} ) @{syntax modes}? @{syntax thmrefs}?
+    ;
+    @@{command pr} @{syntax modes}? @{syntax nat}?
+    ;
+
+    @{syntax_def modes}: '(' (@{syntax name} + ) ')'
+  "}
+
+  \begin{description}
+
+  \item @{command "typ"}~@{text \<tau>} reads and prints a type expression
+  according to the current context.
+
+  \item @{command "typ"}~@{text "\<tau> :: s"} uses type-inference to
+  determine the most general way to make @{text "\<tau>"} conform to sort
+  @{text "s"}.  For concrete @{text "\<tau>"} this checks if the type
+  belongs to that sort.  Dummy type parameters ``@{text "_"}''
+  (underscore) are assigned to fresh type variables with most general
+  sorts, according the the principles of type-inference.
+
+  \item @{command "term"}~@{text t} and @{command "prop"}~@{text \<phi>}
+  read, type-check and print terms or propositions according to the
+  current theory or proof context; the inferred type of @{text t} is
+  output as well.  Note that these commands are also useful in
+  inspecting the current environment of term abbreviations.
+
+  \item @{command "thm"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} retrieves
+  theorems from the current theory or proof context.  Note that any
+  attributes included in the theorem specifications are applied to a
+  temporary context derived from the current theory or proof; the
+  result is discarded, i.e.\ attributes involved in @{text "a\<^sub>1,
+  \<dots>, a\<^sub>n"} do not have any permanent effect.
+
+  \item @{command "prf"} displays the (compact) proof term of the
+  current proof state (if present), or of the given theorems. Note
+  that this requires proof terms to be switched on for the current
+  object logic (see the ``Proof terms'' section of the Isabelle
+  reference manual for information on how to do this).
+
+  \item @{command "full_prf"} is like @{command "prf"}, but displays
+  the full proof term, i.e.\ also displays information omitted in the
+  compact proof term, which is denoted by ``@{text _}'' placeholders
+  there.
+
+  \item @{command "pr"}~@{text "goals"} prints the current proof state
+  (if present), including current facts and goals.  The optional limit
+  arguments affect the number of goals to be displayed, which is
+  initially 10.  Omitting limit value leaves the current setting
+  unchanged.
+
+  \end{description}
+
+  All of the diagnostic commands above admit a list of @{text modes}
+  to be specified, which is appended to the current print mode; see
+  also \secref{sec:print-modes}.  Thus the output behavior may be
+  modified according particular print mode features.  For example,
+  @{command "pr"}~@{text "(latex xsymbols)"} would print the current
+  proof state with mathematical symbols and special characters
+  represented in {\LaTeX} source, according to the Isabelle style
+  \cite{isabelle-sys}.
+
+  Note that antiquotations (cf.\ \secref{sec:antiq}) provide a more
+  systematic way to include formal items into the printed text
+  document.
+*}
+
+
+subsection {* Details of printed content *}
+
+text {*
+  \begin{tabular}{rcll}
+    @{attribute_def show_types} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def show_sorts} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def show_consts} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def show_abbrevs} & : & @{text attribute} & default @{text true} \\
+    @{attribute_def show_brackets} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def names_long} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def names_short} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def names_unique} & : & @{text attribute} & default @{text true} \\
+    @{attribute_def eta_contract} & : & @{text attribute} & default @{text true} \\
+    @{attribute_def goals_limit} & : & @{text attribute} & default @{text 10} \\
+    @{attribute_def show_main_goal} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def show_hyps} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def show_tags} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def show_question_marks} & : & @{text attribute} & default @{text true} \\
+  \end{tabular}
+  \medskip
+
+  These configuration options control the detail of information that
+  is displayed for types, terms, theorems, goals etc.  See also
+  \secref{sec:config}.
+
+  \begin{description}
+
+  \item @{attribute show_types} and @{attribute show_sorts} control
+  printing of type constraints for term variables, and sort
+  constraints for type variables.  By default, neither of these are
+  shown in output.  If @{attribute show_sorts} is enabled, types are
+  always shown as well.
+
+  Note that displaying types and sorts may explain why a polymorphic
+  inference rule fails to resolve with some goal, or why a rewrite
+  rule does not apply as expected.
+
+  \item @{attribute show_consts} controls printing of types of
+  constants when displaying a goal state.
+
+  Note that the output can be enormous, because polymorphic constants
+  often occur at several different type instances.
+
+  \item @{attribute show_abbrevs} controls folding of constant
+  abbreviations.
+
+  \item @{attribute show_brackets} controls bracketing in pretty
+  printed output.  If enabled, all sub-expressions of the pretty
+  printing tree will be parenthesized, even if this produces malformed
+  term syntax!  This crude way of showing the internal structure of
+  pretty printed entities may occasionally help to diagnose problems
+  with operator priorities, for example.
+
+  \item @{attribute names_long}, @{attribute names_short}, and
+  @{attribute names_unique} control the way of printing fully
+  qualified internal names in external form.  See also
+  \secref{sec:antiq} for the document antiquotation options of the
+  same names.
+
+  \item @{attribute eta_contract} controls @{text "\<eta>"}-contracted
+  printing of terms.
+
+  The @{text \<eta>}-contraction law asserts @{prop "(\<lambda>x. f x) \<equiv> f"},
+  provided @{text x} is not free in @{text f}.  It asserts
+  \emph{extensionality} of functions: @{prop "f \<equiv> g"} if @{prop "f x \<equiv>
+  g x"} for all @{text x}.  Higher-order unification frequently puts
+  terms into a fully @{text \<eta>}-expanded form.  For example, if @{text
+  F} has type @{text "(\<tau> \<Rightarrow> \<tau>) \<Rightarrow> \<tau>"} then its expanded form is @{term
+  "\<lambda>h. F (\<lambda>x. h x)"}.
+
+  Enabling @{attribute eta_contract} makes Isabelle perform @{text
+  \<eta>}-contractions before printing, so that @{term "\<lambda>h. F (\<lambda>x. h x)"}
+  appears simply as @{text F}.
+
+  Note that the distinction between a term and its @{text \<eta>}-expanded
+  form occasionally matters.  While higher-order resolution and
+  rewriting operate modulo @{text "\<alpha>\<beta>\<eta>"}-conversion, some other tools
+  might look at terms more discretely.
+
+  \item @{attribute goals_limit} controls the maximum number of
+  subgoals to be shown in goal output.
+
+  \item @{attribute show_main_goal} controls whether the main result
+  to be proven should be displayed.  This information might be
+  relevant for schematic goals, to inspect the current claim that has
+  been synthesized so far.
+
+  \item @{attribute show_hyps} controls printing of implicit
+  hypotheses of local facts.  Normally, only those hypotheses are
+  displayed that are \emph{not} covered by the assumptions of the
+  current context: this situation indicates a fault in some tool being
+  used.
+
+  By enabling @{attribute show_hyps}, output of \emph{all} hypotheses
+  can be enforced, which is occasionally useful for diagnostic
+  purposes.
+
+  \item @{attribute show_tags} controls printing of extra annotations
+  within theorems, such as internal position information, or the case
+  names being attached by the attribute @{attribute case_names}.
+
+  Note that the @{attribute tagged} and @{attribute untagged}
+  attributes provide low-level access to the collection of tags
+  associated with a theorem.
+
+  \item @{attribute show_question_marks} controls printing of question
+  marks for schematic variables, such as @{text ?x}.  Only the leading
+  question mark is affected, the remaining text is unchanged
+  (including proper markup for schematic variables that might be
+  relevant for user interfaces).
+
+  \end{description}
+*}
+
+
+subsection {* Alternative print modes \label{sec:print-modes} *}
+
+text {*
+  \begin{mldecls}
+    @{index_ML print_mode_value: "unit -> string list"} \\
+    @{index_ML Print_Mode.with_modes: "string list -> ('a -> 'b) -> 'a -> 'b"} \\
+  \end{mldecls}
+
+  The \emph{print mode} facility allows to modify various operations
+  for printing.  Commands like @{command typ}, @{command term},
+  @{command thm} (see \secref{sec:print-diag}) take additional print
+  modes as optional argument.  The underlying ML operations are as
+  follows.
+
+  \begin{description}
+
+  \item @{ML "print_mode_value ()"} yields the list of currently
+  active print mode names.  This should be understood as symbolic
+  representation of certain individual features for printing (with
+  precedence from left to right).
+
+  \item @{ML Print_Mode.with_modes}~@{text "modes f x"} evaluates
+  @{text "f x"} in an execution context where the print mode is
+  prepended by the given @{text "modes"}.  This provides a thread-safe
+  way to augment print modes.  It is also monotonic in the set of mode
+  names: it retains the default print mode that certain
+  user-interfaces might have installed for their proper functioning!
+
+  \end{description}
+
+  \begin{warn}
+  The old global reference @{ML print_mode} should never be used
+  directly in applications.  Its main reason for being publicly
+  accessible is to support historic versions of Proof~General.
+  \end{warn}
+
+  \medskip The pretty printer for inner syntax maintains alternative
+  mixfix productions for any print mode name invented by the user, say
+  in commands like @{command notation} or @{command abbreviation}.
+  Mode names can be arbitrary, but the following ones have a specific
+  meaning by convention:
+
+  \begin{itemize}
+
+  \item @{verbatim "\"\""} (the empty string): default mode;
+  implicitly active as last element in the list of modes.
+
+  \item @{verbatim input}: dummy print mode that is never active; may
+  be used to specify notation that is only available for input.
+
+  \item @{verbatim internal} dummy print mode that is never active;
+  used internally in Isabelle/Pure.
+
+  \item @{verbatim xsymbols}: enable proper mathematical symbols
+  instead of ASCII art.\footnote{This traditional mode name stems from
+  the ``X-Symbol'' package for old versions Proof~General with XEmacs,
+  although that package has been superseded by Unicode in recent
+  years.}
+
+  \item @{verbatim HTML}: additional mode that is active in HTML
+  presentation of Isabelle theory sources; allows to provide
+  alternative output notation.
+
+  \item @{verbatim latex}: additional mode that is active in {\LaTeX}
+  document preparation of Isabelle theory sources; allows to provide
+  alternative output notation.
+
+  \end{itemize}
+*}
+
+
+subsection {* Printing limits *}
+
+text {*
+  \begin{mldecls}
+    @{index_ML Pretty.margin_default: "int Unsynchronized.ref"} \\
+    @{index_ML print_depth: "int -> unit"} \\
+  \end{mldecls}
+
+  These ML functions set limits for pretty printed text.
+
+  \begin{description}
+
+  \item @{ML Pretty.margin_default} indicates the global default for
+  the right margin of the built-in pretty printer, with initial value
+  76.  Note that user-interfaces typically control margins
+  automatically when resizing windows, or even bypass the formatting
+  engine of Isabelle/ML altogether and do it within the front end via
+  Isabelle/Scala.
+
+  \item @{ML print_depth}~@{text n} limits the printing depth of the
+  ML toplevel pretty printer; the precise effect depends on the ML
+  compiler and run-time system.  Typically @{text n} should be less
+  than 10.  Bigger values such as 100--1000 are useful for debugging.
+
+  \end{description}
+*}
+
+
+section {* Mixfix annotations \label{sec:mixfix} *}
+
+text {* Mixfix annotations specify concrete \emph{inner syntax} of
+  Isabelle types and terms.  Locally fixed parameters in toplevel
+  theorem statements, locale and class specifications also admit
+  mixfix annotations in a fairly uniform manner.  A mixfix annotation
+  describes describes the concrete syntax, the translation to abstract
+  syntax, and the pretty printing.  Special case annotations provide a
+  simple means of specifying infix operators and binders.
+
+  Isabelle mixfix syntax is inspired by {\OBJ} \cite{OBJ}.  It allows
+  to specify any context-free priority grammar, which is more general
+  than the fixity declarations of ML and Prolog.
+
+  @{rail "
+    @{syntax_def mixfix}: '(' mfix ')'
+    ;
+    @{syntax_def struct_mixfix}: '(' ( mfix | @'structure' ) ')'
+    ;
+
+    mfix: @{syntax template} prios? @{syntax nat}? |
+      (@'infix' | @'infixl' | @'infixr') @{syntax template} @{syntax nat} |
+      @'binder' @{syntax template} prios? @{syntax nat}
+    ;
+    template: string
+    ;
+    prios: '[' (@{syntax nat} + ',') ']'
+  "}
+
+  The string given as @{text template} may include literal text,
+  spacing, blocks, and arguments (denoted by ``@{text _}''); the
+  special symbol ``@{verbatim "\<index>"}'' (printed as ``@{text "\<index>"}'')
+  represents an index argument that specifies an implicit structure
+  reference (see also \secref{sec:locale}).  Infix and binder
+  declarations provide common abbreviations for particular mixfix
+  declarations.  So in practice, mixfix templates mostly degenerate to
+  literal text for concrete syntax, such as ``@{verbatim "++"}'' for
+  an infix symbol.
+*}
+
+
+subsection {* The general mixfix form *}
+
+text {* In full generality, mixfix declarations work as follows.
+  Suppose a constant @{text "c :: \<tau>\<^sub>1 \<Rightarrow> \<dots> \<tau>\<^sub>n \<Rightarrow> \<tau>"} is annotated by
+  @{text "(mixfix [p\<^sub>1, \<dots>, p\<^sub>n] p)"}, where @{text "mixfix"} is a string
+  @{text "d\<^sub>0 _ d\<^sub>1 _ \<dots> _ d\<^sub>n"} consisting of delimiters that surround
+  argument positions as indicated by underscores.
+
+  Altogether this determines a production for a context-free priority
+  grammar, where for each argument @{text "i"} the syntactic category
+  is determined by @{text "\<tau>\<^sub>i"} (with priority @{text "p\<^sub>i"}), and the
+  result category is determined from @{text "\<tau>"} (with priority @{text
+  "p"}).  Priority specifications are optional, with default 0 for
+  arguments and 1000 for the result.\footnote{Omitting priorities is
+  prone to syntactic ambiguities unless the delimiter tokens determine
+  fully bracketed notation, as in @{text "if _ then _ else _ fi"}.}
+
+  Since @{text "\<tau>"} may be again a function type, the constant
+  type scheme may have more argument positions than the mixfix
+  pattern.  Printing a nested application @{text "c t\<^sub>1 \<dots> t\<^sub>m"} for
+  @{text "m > n"} works by attaching concrete notation only to the
+  innermost part, essentially by printing @{text "(c t\<^sub>1 \<dots> t\<^sub>n) \<dots> t\<^sub>m"}
+  instead.  If a term has fewer arguments than specified in the mixfix
+  template, the concrete syntax is ignored.
+
+  \medskip A mixfix template may also contain additional directives
+  for pretty printing, notably spaces, blocks, and breaks.  The
+  general template format is a sequence over any of the following
+  entities.
+
+  \begin{description}
+
+  \item @{text "d"} is a delimiter, namely a non-empty sequence of
+  characters other than the following special characters:
+
+  \smallskip
+  \begin{tabular}{ll}
+    @{verbatim "'"} & single quote \\
+    @{verbatim "_"} & underscore \\
+    @{text "\<index>"} & index symbol \\
+    @{verbatim "("} & open parenthesis \\
+    @{verbatim ")"} & close parenthesis \\
+    @{verbatim "/"} & slash \\
+  \end{tabular}
+  \medskip
+
+  \item @{verbatim "'"} escapes the special meaning of these
+  meta-characters, producing a literal version of the following
+  character, unless that is a blank.
+
+  A single quote followed by a blank separates delimiters, without
+  affecting printing, but input tokens may have additional white space
+  here.
+
+  \item @{verbatim "_"} is an argument position, which stands for a
+  certain syntactic category in the underlying grammar.
+
+  \item @{text "\<index>"} is an indexed argument position; this is the place
+  where implicit structure arguments can be attached.
+
+  \item @{text "s"} is a non-empty sequence of spaces for printing.
+  This and the following specifications do not affect parsing at all.
+
+  \item @{verbatim "("}@{text n} opens a pretty printing block.  The
+  optional number specifies how much indentation to add when a line
+  break occurs within the block.  If the parenthesis is not followed
+  by digits, the indentation defaults to 0.  A block specified via
+  @{verbatim "(00"} is unbreakable.
+
+  \item @{verbatim ")"} closes a pretty printing block.
+
+  \item @{verbatim "//"} forces a line break.
+
+  \item @{verbatim "/"}@{text s} allows a line break.  Here @{text s}
+  stands for the string of spaces (zero or more) right after the
+  slash.  These spaces are printed if the break is \emph{not} taken.
+
+  \end{description}
+
+  The general idea of pretty printing with blocks and breaks is also
+  described in \cite{paulson-ml2}; it goes back to \cite{Oppen:1980}.
+*}
+
+
+subsection {* Infixes *}
+
+text {* Infix operators are specified by convenient short forms that
+  abbreviate general mixfix annotations as follows:
+
+  \begin{center}
+  \begin{tabular}{lll}
+
+  @{verbatim "("}@{keyword_def "infix"}~@{verbatim "\""}@{text sy}@{verbatim "\""} @{text "p"}@{verbatim ")"}
+  & @{text "\<mapsto>"} &
+  @{verbatim "(\"(_ "}@{text sy}@{verbatim "/ _)\" ["}@{text "p + 1"}@{verbatim ", "}@{text "p + 1"}@{verbatim "]"}@{text " p"}@{verbatim ")"} \\
+  @{verbatim "("}@{keyword_def "infixl"}~@{verbatim "\""}@{text sy}@{verbatim "\""} @{text "p"}@{verbatim ")"}
+  & @{text "\<mapsto>"} &
+  @{verbatim "(\"(_ "}@{text sy}@{verbatim "/ _)\" ["}@{text "p"}@{verbatim ", "}@{text "p + 1"}@{verbatim "]"}@{text " p"}@{verbatim ")"} \\
+  @{verbatim "("}@{keyword_def "infixr"}~@{verbatim "\""}@{text sy}@{verbatim "\""} @{text "p"}@{verbatim ")"}
+  & @{text "\<mapsto>"} &
+  @{verbatim "(\"(_ "}@{text sy}@{verbatim "/ _)\" ["}@{text "p + 1"}@{verbatim ", "}@{text "p"}@{verbatim "]"}@{text " p"}@{verbatim ")"} \\
+
+  \end{tabular}
+  \end{center}
+
+  The mixfix template @{verbatim "\"(_ "}@{text sy}@{verbatim "/ _)\""}
+  specifies two argument positions; the delimiter is preceded by a
+  space and followed by a space or line break; the entire phrase is a
+  pretty printing block.
+
+  The alternative notation @{verbatim "op"}~@{text sy} is introduced
+  in addition.  Thus any infix operator may be written in prefix form
+  (as in ML), independently of the number of arguments in the term.
+*}
+
+
+subsection {* Binders *}
+
+text {* A \emph{binder} is a variable-binding construct such as a
+  quantifier.  The idea to formalize @{text "\<forall>x. b"} as @{text "All
+  (\<lambda>x. b)"} for @{text "All :: ('a \<Rightarrow> bool) \<Rightarrow> bool"} already goes back
+  to \cite{church40}.  Isabelle declarations of certain higher-order
+  operators may be annotated with @{keyword_def "binder"} annotations
+  as follows:
+
+  \begin{center}
+  @{text "c :: "}@{verbatim "\""}@{text "(\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2) \<Rightarrow> \<tau>\<^sub>3"}@{verbatim "\"  ("}@{keyword "binder"}@{verbatim " \""}@{text "sy"}@{verbatim "\" ["}@{text "p"}@{verbatim "] "}@{text "q"}@{verbatim ")"}
+  \end{center}
+
+  This introduces concrete binder syntax @{text "sy x. b"}, where
+  @{text x} is a bound variable of type @{text "\<tau>\<^sub>1"}, the body @{text
+  b} has type @{text "\<tau>\<^sub>2"} and the whole term has type @{text "\<tau>\<^sub>3"}.
+  The optional integer @{text p} specifies the syntactic priority of
+  the body; the default is @{text "q"}, which is also the priority of
+  the whole construct.
+
+  Internally, the binder syntax is expanded to something like this:
+  \begin{center}
+  @{text "c_binder :: "}@{verbatim "\""}@{text "idts \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> \<tau>\<^sub>3"}@{verbatim "\"  (\"(3"}@{text sy}@{verbatim "_./ _)\" [0, "}@{text "p"}@{verbatim "] "}@{text "q"}@{verbatim ")"}
+  \end{center}
+
+  Here @{syntax (inner) idts} is the nonterminal symbol for a list of
+  identifiers with optional type constraints (see also
+  \secref{sec:pure-grammar}).  The mixfix template @{verbatim
+  "\"(3"}@{text sy}@{verbatim "_./ _)\""} defines argument positions
+  for the bound identifiers and the body, separated by a dot with
+  optional line break; the entire phrase is a pretty printing block of
+  indentation level 3.  Note that there is no extra space after @{text
+  "sy"}, so it needs to be included user specification if the binder
+  syntax ends with a token that may be continued by an identifier
+  token at the start of @{syntax (inner) idts}.
+
+  Furthermore, a syntax translation to transforms @{text "c_binder x\<^sub>1
+  \<dots> x\<^sub>n b"} into iterated application @{text "c (\<lambda>x\<^sub>1. \<dots> c (\<lambda>x\<^sub>n. b)\<dots>)"}.
+  This works in both directions, for parsing and printing.  *}
+
+
+section {* Explicit notation \label{sec:notation} *}
+
+text {*
+  \begin{matharray}{rcll}
+    @{command_def "type_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "no_type_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "no_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
+    @{command_def "write"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+  \end{matharray}
+
+  Commands that introduce new logical entities (terms or types)
+  usually allow to provide mixfix annotations on the spot, which is
+  convenient for default notation.  Nonetheless, the syntax may be
+  modified later on by declarations for explicit notation.  This
+  allows to add or delete mixfix annotations for of existing logical
+  entities within the current context.
+
+  @{rail "
+    (@@{command type_notation} | @@{command no_type_notation}) @{syntax target}?
+      @{syntax mode}? \\ (@{syntax nameref} @{syntax mixfix} + @'and')
+    ;
+    (@@{command notation} | @@{command no_notation}) @{syntax target}? @{syntax mode}? \\
+      (@{syntax nameref} @{syntax struct_mixfix} + @'and')
+    ;
+    @@{command write} @{syntax mode}? (@{syntax nameref} @{syntax struct_mixfix} + @'and')
+  "}
+
+  \begin{description}
+
+  \item @{command "type_notation"}~@{text "c (mx)"} associates mixfix
+  syntax with an existing type constructor.  The arity of the
+  constructor is retrieved from the context.
+
+  \item @{command "no_type_notation"} is similar to @{command
+  "type_notation"}, but removes the specified syntax annotation from
+  the present context.
+
+  \item @{command "notation"}~@{text "c (mx)"} associates mixfix
+  syntax with an existing constant or fixed variable.  The type
+  declaration of the given entity is retrieved from the context.
+
+  \item @{command "no_notation"} is similar to @{command "notation"},
+  but removes the specified syntax annotation from the present
+  context.
+
+  \item @{command "write"} is similar to @{command "notation"}, but
+  works within an Isar proof body.
+
+  \end{description}
+*}
+
+
+section {* The Pure syntax \label{sec:pure-syntax} *}
+
+subsection {* Lexical matters \label{sec:inner-lex} *}
+
+text {* The inner lexical syntax vaguely resembles the outer one
+  (\secref{sec:outer-lex}), but some details are different.  There are
+  two main categories of inner syntax tokens:
+
+  \begin{enumerate}
+
+  \item \emph{delimiters} --- the literal tokens occurring in
+  productions of the given priority grammar (cf.\
+  \secref{sec:priority-grammar});
+
+  \item \emph{named tokens} --- various categories of identifiers etc.
+
+  \end{enumerate}
+
+  Delimiters override named tokens and may thus render certain
+  identifiers inaccessible.  Sometimes the logical context admits
+  alternative ways to refer to the same entity, potentially via
+  qualified names.
+
+  \medskip The categories for named tokens are defined once and for
+  all as follows, reusing some categories of the outer token syntax
+  (\secref{sec:outer-lex}).
+
+  \begin{center}
+  \begin{supertabular}{rcl}
+    @{syntax_def (inner) id} & = & @{syntax_ref ident} \\
+    @{syntax_def (inner) longid} & = & @{syntax_ref longident} \\
+    @{syntax_def (inner) var} & = & @{syntax_ref var} \\
+    @{syntax_def (inner) tid} & = & @{syntax_ref typefree} \\
+    @{syntax_def (inner) tvar} & = & @{syntax_ref typevar} \\
+    @{syntax_def (inner) num_token} & = & @{syntax_ref nat}@{text "  |  "}@{verbatim "-"}@{syntax_ref nat} \\
+    @{syntax_def (inner) float_token} & = & @{syntax_ref nat}@{verbatim "."}@{syntax_ref nat}@{text "  |  "}@{verbatim "-"}@{syntax_ref nat}@{verbatim "."}@{syntax_ref nat} \\
+    @{syntax_def (inner) xnum_token} & = & @{verbatim "#"}@{syntax_ref nat}@{text "  |  "}@{verbatim "#-"}@{syntax_ref nat} \\
+
+    @{syntax_def (inner) str_token} & = & @{verbatim "''"} @{text "\<dots>"} @{verbatim "''"} \\
+  \end{supertabular}
+  \end{center}
+
+  The token categories @{syntax (inner) num_token}, @{syntax (inner)
+  float_token}, @{syntax (inner) xnum_token}, and @{syntax (inner)
+  str_token} are not used in Pure.  Object-logics may implement numerals
+  and string constants by adding appropriate syntax declarations,
+  together with some translation functions (e.g.\ see Isabelle/HOL).
+
+  The derived categories @{syntax_def (inner) num_const}, @{syntax_def
+  (inner) float_const}, and @{syntax_def (inner) num_const} provide
+  robust access to the respective tokens: the syntax tree holds a
+  syntactic constant instead of a free variable.
+*}
+
+
+subsection {* Priority grammars \label{sec:priority-grammar} *}
+
+text {* A context-free grammar consists of a set of \emph{terminal
+  symbols}, a set of \emph{nonterminal symbols} and a set of
+  \emph{productions}.  Productions have the form @{text "A = \<gamma>"},
+  where @{text A} is a nonterminal and @{text \<gamma>} is a string of
+  terminals and nonterminals.  One designated nonterminal is called
+  the \emph{root symbol}.  The language defined by the grammar
+  consists of all strings of terminals that can be derived from the
+  root symbol by applying productions as rewrite rules.
+
+  The standard Isabelle parser for inner syntax uses a \emph{priority
+  grammar}.  Each nonterminal is decorated by an integer priority:
+  @{text "A\<^sup>(\<^sup>p\<^sup>)"}.  In a derivation, @{text "A\<^sup>(\<^sup>p\<^sup>)"} may be rewritten
+  using a production @{text "A\<^sup>(\<^sup>q\<^sup>) = \<gamma>"} only if @{text "p \<le> q"}.  Any
+  priority grammar can be translated into a normal context-free
+  grammar by introducing new nonterminals and productions.
+
+  \medskip Formally, a set of context free productions @{text G}
+  induces a derivation relation @{text "\<longrightarrow>\<^sub>G"} as follows.  Let @{text
+  \<alpha>} and @{text \<beta>} denote strings of terminal or nonterminal symbols.
+  Then @{text "\<alpha> A\<^sup>(\<^sup>p\<^sup>) \<beta> \<longrightarrow>\<^sub>G \<alpha> \<gamma> \<beta>"} holds if and only if @{text G}
+  contains some production @{text "A\<^sup>(\<^sup>q\<^sup>) = \<gamma>"} for @{text "p \<le> q"}.
+
+  \medskip The following grammar for arithmetic expressions
+  demonstrates how binding power and associativity of operators can be
+  enforced by priorities.
+
+  \begin{center}
+  \begin{tabular}{rclr}
+  @{text "A\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "="} & @{verbatim "("} @{text "A\<^sup>(\<^sup>0\<^sup>)"} @{verbatim ")"} \\
+  @{text "A\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "="} & @{verbatim 0} \\
+  @{text "A\<^sup>(\<^sup>0\<^sup>)"} & @{text "="} & @{text "A\<^sup>(\<^sup>0\<^sup>)"} @{verbatim "+"} @{text "A\<^sup>(\<^sup>1\<^sup>)"} \\
+  @{text "A\<^sup>(\<^sup>2\<^sup>)"} & @{text "="} & @{text "A\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "*"} @{text "A\<^sup>(\<^sup>2\<^sup>)"} \\
+  @{text "A\<^sup>(\<^sup>3\<^sup>)"} & @{text "="} & @{verbatim "-"} @{text "A\<^sup>(\<^sup>3\<^sup>)"} \\
+  \end{tabular}
+  \end{center}
+  The choice of priorities determines that @{verbatim "-"} binds
+  tighter than @{verbatim "*"}, which binds tighter than @{verbatim
+  "+"}.  Furthermore @{verbatim "+"} associates to the left and
+  @{verbatim "*"} to the right.
+
+  \medskip For clarity, grammars obey these conventions:
+  \begin{itemize}
+
+  \item All priorities must lie between 0 and 1000.
+
+  \item Priority 0 on the right-hand side and priority 1000 on the
+  left-hand side may be omitted.
+
+  \item The production @{text "A\<^sup>(\<^sup>p\<^sup>) = \<alpha>"} is written as @{text "A = \<alpha>
+  (p)"}, i.e.\ the priority of the left-hand side actually appears in
+  a column on the far right.
+
+  \item Alternatives are separated by @{text "|"}.
+
+  \item Repetition is indicated by dots @{text "(\<dots>)"} in an informal
+  but obvious way.
+
+  \end{itemize}
+
+  Using these conventions, the example grammar specification above
+  takes the form:
+  \begin{center}
+  \begin{tabular}{rclc}
+    @{text A} & @{text "="} & @{verbatim "("} @{text A} @{verbatim ")"} \\
+              & @{text "|"} & @{verbatim 0} & \qquad\qquad \\
+              & @{text "|"} & @{text A} @{verbatim "+"} @{text "A\<^sup>(\<^sup>1\<^sup>)"} & @{text "(0)"} \\
+              & @{text "|"} & @{text "A\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "*"} @{text "A\<^sup>(\<^sup>2\<^sup>)"} & @{text "(2)"} \\
+              & @{text "|"} & @{verbatim "-"} @{text "A\<^sup>(\<^sup>3\<^sup>)"} & @{text "(3)"} \\
+  \end{tabular}
+  \end{center}
+*}
+
+
+subsection {* The Pure grammar \label{sec:pure-grammar} *}
+
+text {* The priority grammar of the @{text "Pure"} theory is defined
+  approximately like this:
+
+  \begin{center}
+  \begin{supertabular}{rclr}
+
+  @{syntax_def (inner) any} & = & @{text "prop  |  logic"} \\\\
+
+  @{syntax_def (inner) prop} & = & @{verbatim "("} @{text prop} @{verbatim ")"} \\
+    & @{text "|"} & @{text "prop\<^sup>(\<^sup>4\<^sup>)"} @{verbatim "::"} @{text type} & @{text "(3)"} \\
+    & @{text "|"} & @{text "any\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "=="} @{text "any\<^sup>(\<^sup>2\<^sup>)"} & @{text "(2)"} \\
+    & @{text "|"} & @{text "any\<^sup>(\<^sup>3\<^sup>)"} @{text "\<equiv>"} @{text "any\<^sup>(\<^sup>2\<^sup>)"} & @{text "(2)"} \\
+    & @{text "|"} & @{text "prop\<^sup>(\<^sup>3\<^sup>)"} @{verbatim "&&&"} @{text "prop\<^sup>(\<^sup>2\<^sup>)"} & @{text "(2)"} \\
+    & @{text "|"} & @{text "prop\<^sup>(\<^sup>2\<^sup>)"} @{verbatim "==>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
+    & @{text "|"} & @{text "prop\<^sup>(\<^sup>2\<^sup>)"} @{text "\<Longrightarrow>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
+    & @{text "|"} & @{verbatim "[|"} @{text prop} @{verbatim ";"} @{text "\<dots>"} @{verbatim ";"} @{text prop} @{verbatim "|]"} @{verbatim "==>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
+    & @{text "|"} & @{text "\<lbrakk>"} @{text prop} @{verbatim ";"} @{text "\<dots>"} @{verbatim ";"} @{text prop} @{text "\<rbrakk>"} @{text "\<Longrightarrow>"} @{text "prop\<^sup>(\<^sup>1\<^sup>)"} & @{text "(1)"} \\
+    & @{text "|"} & @{verbatim "!!"} @{text idts} @{verbatim "."} @{text prop} & @{text "(0)"} \\
+    & @{text "|"} & @{text "\<And>"} @{text idts} @{verbatim "."} @{text prop} & @{text "(0)"} \\
+    & @{text "|"} & @{verbatim OFCLASS} @{verbatim "("} @{text type} @{verbatim ","} @{text logic} @{verbatim ")"} \\
+    & @{text "|"} & @{verbatim SORT_CONSTRAINT} @{verbatim "("} @{text type} @{verbatim ")"} \\
+    & @{text "|"} & @{verbatim TERM} @{text logic} \\
+    & @{text "|"} & @{verbatim PROP} @{text aprop} \\\\
+
+  @{syntax_def (inner) aprop} & = & @{verbatim "("} @{text aprop} @{verbatim ")"} \\
+    & @{text "|"} & @{text "id  |  longid  |  var  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "..."} \\
+    & @{text "|"} & @{verbatim CONST} @{text "id  |  "}@{verbatim CONST} @{text "longid"} \\
+    & @{text "|"} & @{verbatim XCONST} @{text "id  |  "}@{verbatim XCONST} @{text "longid"} \\
+    & @{text "|"} & @{text "logic\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)  any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) \<dots> any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "(999)"} \\\\
+
+  @{syntax_def (inner) logic} & = & @{verbatim "("} @{text logic} @{verbatim ")"} \\
+    & @{text "|"} & @{text "logic\<^sup>(\<^sup>4\<^sup>)"} @{verbatim "::"} @{text type} & @{text "(3)"} \\
+    & @{text "|"} & @{text "id  |  longid  |  var  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "..."} \\
+    & @{text "|"} & @{verbatim CONST} @{text "id  |  "}@{verbatim CONST} @{text "longid"} \\
+    & @{text "|"} & @{verbatim XCONST} @{text "id  |  "}@{verbatim XCONST} @{text "longid"} \\
+    & @{text "|"} & @{text "logic\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)  any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) \<dots> any\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} & @{text "(999)"} \\
+    & @{text "|"} & @{text "\<struct> index\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>)"} \\
+    & @{text "|"} & @{verbatim "%"} @{text pttrns} @{verbatim "."} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(3)"} \\
+    & @{text "|"} & @{text \<lambda>} @{text pttrns} @{verbatim "."} @{text "any\<^sup>(\<^sup>3\<^sup>)"} & @{text "(3)"} \\
+    & @{text "|"} & @{verbatim op} @{verbatim "=="}@{text "  |  "}@{verbatim op} @{text "\<equiv>"}@{text "  |  "}@{verbatim op} @{verbatim "&&&"} \\
+    & @{text "|"} & @{verbatim op} @{verbatim "==>"}@{text "  |  "}@{verbatim op} @{text "\<Longrightarrow>"} \\
+    & @{text "|"} & @{verbatim TYPE} @{verbatim "("} @{text type} @{verbatim ")"} \\\\
+
+  @{syntax_def (inner) idt} & = & @{verbatim "("} @{text idt} @{verbatim ")"}@{text "  |  id  |  "}@{verbatim "_"} \\
+    & @{text "|"} & @{text id} @{verbatim "::"} @{text type} & @{text "(0)"} \\
+    & @{text "|"} & @{verbatim "_"} @{verbatim "::"} @{text type} & @{text "(0)"} \\\\
+
+  @{syntax_def (inner) index} & = & @{verbatim "\<^bsub>"} @{text "logic\<^sup>(\<^sup>0\<^sup>)"} @{verbatim "\<^esub>"}@{text "  |  |  \<index>"} \\\\
+
+  @{syntax_def (inner) idts} & = & @{text "idt  |  idt\<^sup>(\<^sup>1\<^sup>) idts"} & @{text "(0)"} \\\\
+
+  @{syntax_def (inner) pttrn} & = & @{text idt} \\\\
+
+  @{syntax_def (inner) pttrns} & = & @{text "pttrn  |  pttrn\<^sup>(\<^sup>1\<^sup>) pttrns"} & @{text "(0)"} \\\\
+
+  @{syntax_def (inner) type} & = & @{verbatim "("} @{text type} @{verbatim ")"} \\
+    & @{text "|"} & @{text "tid  |  tvar  |  "}@{verbatim "_"} \\
+    & @{text "|"} & @{text "tid"} @{verbatim "::"} @{text "sort  |  tvar  "}@{verbatim "::"} @{text "sort  |  "}@{verbatim "_"} @{verbatim "::"} @{text "sort"} \\
+    & @{text "|"} & @{text "type_name  |  type\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) type_name"} \\
+    & @{text "|"} & @{verbatim "("} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim ")"} @{text type_name} \\
+    & @{text "|"} & @{text "type\<^sup>(\<^sup>1\<^sup>)"} @{verbatim "=>"} @{text type} & @{text "(0)"} \\
+    & @{text "|"} & @{text "type\<^sup>(\<^sup>1\<^sup>)"} @{text "\<Rightarrow>"} @{text type} & @{text "(0)"} \\
+    & @{text "|"} & @{verbatim "["} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim "]"} @{verbatim "=>"} @{text type} & @{text "(0)"} \\
+    & @{text "|"} & @{verbatim "["} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim "]"} @{text "\<Rightarrow>"} @{text type} & @{text "(0)"} \\
+  @{syntax_def (inner) type_name} & = & @{text "id  |  longid"} \\\\
+
+  @{syntax_def (inner) sort} & = & @{syntax class_name}~@{text "  |  "}@{verbatim "{}"} \\
+    & @{text "|"} & @{verbatim "{"} @{syntax class_name} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{syntax class_name} @{verbatim "}"} \\
+  @{syntax_def (inner) class_name} & = & @{text "id  |  longid"} \\
+  \end{supertabular}
+  \end{center}
+
+  \medskip Here literal terminals are printed @{verbatim "verbatim"};
+  see also \secref{sec:inner-lex} for further token categories of the
+  inner syntax.  The meaning of the nonterminals defined by the above
+  grammar is as follows:
+
+  \begin{description}
+
+  \item @{syntax_ref (inner) any} denotes any term.
+
+  \item @{syntax_ref (inner) prop} denotes meta-level propositions,
+  which are terms of type @{typ prop}.  The syntax of such formulae of
+  the meta-logic is carefully distinguished from usual conventions for
+  object-logics.  In particular, plain @{text "\<lambda>"}-term notation is
+  \emph{not} recognized as @{syntax (inner) prop}.
+
+  \item @{syntax_ref (inner) aprop} denotes atomic propositions, which
+  are embedded into regular @{syntax (inner) prop} by means of an
+  explicit @{verbatim PROP} token.
+
+  Terms of type @{typ prop} with non-constant head, e.g.\ a plain
+  variable, are printed in this form.  Constants that yield type @{typ
+  prop} are expected to provide their own concrete syntax; otherwise
+  the printed version will appear like @{syntax (inner) logic} and
+  cannot be parsed again as @{syntax (inner) prop}.
+
+  \item @{syntax_ref (inner) logic} denotes arbitrary terms of a
+  logical type, excluding type @{typ prop}.  This is the main
+  syntactic category of object-logic entities, covering plain @{text
+  \<lambda>}-term notation (variables, abstraction, application), plus
+  anything defined by the user.
+
+  When specifying notation for logical entities, all logical types
+  (excluding @{typ prop}) are \emph{collapsed} to this single category
+  of @{syntax (inner) logic}.
+
+  \item @{syntax_ref (inner) index} denotes an optional index term for
+  indexed syntax.  If omitted, it refers to the first @{keyword
+  "structure"} variable in the context.  The special dummy ``@{text
+  "\<index>"}'' serves as pattern variable in mixfix annotations that
+  introduce indexed notation.
+
+  \item @{syntax_ref (inner) idt} denotes identifiers, possibly
+  constrained by types.
+
+  \item @{syntax_ref (inner) idts} denotes a sequence of @{syntax_ref
+  (inner) idt}.  This is the most basic category for variables in
+  iterated binders, such as @{text "\<lambda>"} or @{text "\<And>"}.
+
+  \item @{syntax_ref (inner) pttrn} and @{syntax_ref (inner) pttrns}
+  denote patterns for abstraction, cases bindings etc.  In Pure, these
+  categories start as a merely copy of @{syntax (inner) idt} and
+  @{syntax (inner) idts}, respectively.  Object-logics may add
+  additional productions for binding forms.
+
+  \item @{syntax_ref (inner) type} denotes types of the meta-logic.
+
+  \item @{syntax_ref (inner) sort} denotes meta-level sorts.
+
+  \end{description}
+
+  Here are some further explanations of certain syntax features.
+
+  \begin{itemize}
+
+  \item In @{syntax (inner) idts}, note that @{text "x :: nat y"} is
+  parsed as @{text "x :: (nat y)"}, treating @{text y} like a type
+  constructor applied to @{text nat}.  To avoid this interpretation,
+  write @{text "(x :: nat) y"} with explicit parentheses.
+
+  \item Similarly, @{text "x :: nat y :: nat"} is parsed as @{text "x ::
+  (nat y :: nat)"}.  The correct form is @{text "(x :: nat) (y ::
+  nat)"}, or @{text "(x :: nat) y :: nat"} if @{text y} is last in the
+  sequence of identifiers.
+
+  \item Type constraints for terms bind very weakly.  For example,
+  @{text "x < y :: nat"} is normally parsed as @{text "(x < y) ::
+  nat"}, unless @{text "<"} has a very low priority, in which case the
+  input is likely to be ambiguous.  The correct form is @{text "x < (y
+  :: nat)"}.
+
+  \item Constraints may be either written with two literal colons
+  ``@{verbatim "::"}'' or the double-colon symbol @{verbatim "\<Colon>"},
+  which actually looks exactly the same in some {\LaTeX} styles.
+
+  \item Dummy variables (written as underscore) may occur in different
+  roles.
+
+  \begin{description}
+
+  \item A type ``@{text "_"}'' or ``@{text "_ :: sort"}'' acts like an
+  anonymous inference parameter, which is filled-in according to the
+  most general type produced by the type-checking phase.
+
+  \item A bound ``@{text "_"}'' refers to a vacuous abstraction, where
+  the body does not refer to the binding introduced here.  As in the
+  term @{term "\<lambda>x _. x"}, which is @{text "\<alpha>"}-equivalent to @{text
+  "\<lambda>x y. x"}.
+
+  \item A free ``@{text "_"}'' refers to an implicit outer binding.
+  Higher definitional packages usually allow forms like @{text "f x _
+  = x"}.
+
+  \item A schematic ``@{text "_"}'' (within a term pattern, see
+  \secref{sec:term-decls}) refers to an anonymous variable that is
+  implicitly abstracted over its context of locally bound variables.
+  For example, this allows pattern matching of @{text "{x. f x = g
+  x}"} against @{text "{x. _ = _}"}, or even @{text "{_. _ = _}"} by
+  using both bound and schematic dummies.
+
+  \end{description}
+
+  \item The three literal dots ``@{verbatim "..."}'' may be also
+  written as ellipsis symbol @{verbatim "\<dots>"}.  In both cases this
+  refers to a special schematic variable, which is bound in the
+  context.  This special term abbreviation works nicely with
+  calculational reasoning (\secref{sec:calculation}).
+
+  \item @{verbatim CONST} ensures that the given identifier is treated
+  as constant term, and passed through the parse tree in fully
+  internalized form.  This is particularly relevant for translation
+  rules (\secref{sec:syn-trans}), notably on the RHS.
+
+  \item @{verbatim XCONST} is similar to @{verbatim CONST}, but
+  retains the constant name as given.  This is only relevant to
+  translation rules (\secref{sec:syn-trans}), notably on the LHS.
+
+  \end{itemize}
+*}
+
+
+subsection {* Inspecting the syntax *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "print_syntax"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+  \end{matharray}
+
+  \begin{description}
+
+  \item @{command "print_syntax"} prints the inner syntax of the
+  current context.  The output can be quite large; the most important
+  sections are explained below.
+
+  \begin{description}
+
+  \item @{text "lexicon"} lists the delimiters of the inner token
+  language; see \secref{sec:inner-lex}.
+
+  \item @{text "prods"} lists the productions of the underlying
+  priority grammar; see \secref{sec:priority-grammar}.
+
+  The nonterminal @{text "A\<^sup>(\<^sup>p\<^sup>)"} is rendered in plain text as @{text
+  "A[p]"}; delimiters are quoted.  Many productions have an extra
+  @{text "\<dots> => name"}.  These names later become the heads of parse
+  trees; they also guide the pretty printer.
+
+  Productions without such parse tree names are called \emph{copy
+  productions}.  Their right-hand side must have exactly one
+  nonterminal symbol (or named token).  The parser does not create a
+  new parse tree node for copy productions, but simply returns the
+  parse tree of the right-hand symbol.
+
+  If the right-hand side of a copy production consists of a single
+  nonterminal without any delimiters, then it is called a \emph{chain
+  production}.  Chain productions act as abbreviations: conceptually,
+  they are removed from the grammar by adding new productions.
+  Priority information attached to chain productions is ignored; only
+  the dummy value @{text "-1"} is displayed.
+
+  \item @{text "print modes"} lists the alternative print modes
+  provided by this grammar; see \secref{sec:print-modes}.
+
+  \item @{text "parse_rules"} and @{text "print_rules"} relate to
+  syntax translations (macros); see \secref{sec:syn-trans}.
+
+  \item @{text "parse_ast_translation"} and @{text
+  "print_ast_translation"} list sets of constants that invoke
+  translation functions for abstract syntax trees, which are only
+  required in very special situations; see \secref{sec:tr-funs}.
+
+  \item @{text "parse_translation"} and @{text "print_translation"}
+  list the sets of constants that invoke regular translation
+  functions; see \secref{sec:tr-funs}.
+
+  \end{description}
+
+  \end{description}
+*}
+
+
+subsection {* Ambiguity of parsed expressions *}
+
+text {*
+  \begin{tabular}{rcll}
+    @{attribute_def syntax_ambiguity_warning} & : & @{text attribute} & default @{text true} \\
+    @{attribute_def syntax_ambiguity_limit} & : & @{text attribute} & default @{text 10} \\
+  \end{tabular}
+
+  Depending on the grammar and the given input, parsing may be
+  ambiguous.  Isabelle lets the Earley parser enumerate all possible
+  parse trees, and then tries to make the best out of the situation.
+  Terms that cannot be type-checked are filtered out, which often
+  leads to a unique result in the end.  Unlike regular type
+  reconstruction, which is applied to the whole collection of input
+  terms simultaneously, the filtering stage only treats each given
+  term in isolation.  Filtering is also not attempted for individual
+  types or raw ASTs (as required for @{command translations}).
+
+  Certain warning or error messages are printed, depending on the
+  situation and the given configuration options.  Parsing ultimately
+  fails, if multiple results remain after the filtering phase.
+
+  \begin{description}
+
+  \item @{attribute syntax_ambiguity_warning} controls output of
+  explicit warning messages about syntax ambiguity.
+
+  \item @{attribute syntax_ambiguity_limit} determines the number of
+  resulting parse trees that are shown as part of the printed message
+  in case of an ambiguity.
+
+  \end{description}
+*}
+
+
+section {* Syntax transformations \label{sec:syntax-transformations} *}
+
+text {* The inner syntax engine of Isabelle provides separate
+  mechanisms to transform parse trees either as rewrite systems on
+  first-order ASTs (\secref{sec:syn-trans}), or ML functions on ASTs
+  or syntactic @{text "\<lambda>"}-terms (\secref{sec:tr-funs}).  This works
+  both for parsing and printing, as outlined in
+  \figref{fig:parse-print}.
+
+  \begin{figure}[htbp]
+  \begin{center}
+  \begin{tabular}{cl}
+  string          & \\
+  @{text "\<down>"}     & lexer + parser \\
+  parse tree      & \\
+  @{text "\<down>"}     & parse AST translation \\
+  AST             & \\
+  @{text "\<down>"}     & AST rewriting (macros) \\
+  AST             & \\
+  @{text "\<down>"}     & parse translation \\
+  --- pre-term ---    & \\
+  @{text "\<down>"}     & print translation \\
+  AST             & \\
+  @{text "\<down>"}     & AST rewriting (macros) \\
+  AST             & \\
+  @{text "\<down>"}     & print AST translation \\
+  string          &
+  \end{tabular}
+  \end{center}
+  \caption{Parsing and printing with translations}\label{fig:parse-print}
+  \end{figure}
+
+  These intermediate syntax tree formats eventually lead to a pre-term
+  with all names and binding scopes resolved, but most type
+  information still missing.  Explicit type constraints might be given by
+  the user, or implicit position information by the system --- both
+  need to be passed-through carefully by syntax transformations.
+
+  Pre-terms are further processed by the so-called \emph{check} and
+  \emph{unckeck} phases that are intertwined with type-inference (see
+  also \cite{isabelle-implementation}).  The latter allows to operate
+  on higher-order abstract syntax with proper binding and type
+  information already available.
+
+  As a rule of thumb, anything that manipulates bindings of variables
+  or constants needs to be implemented as syntax transformation (see
+  below).  Anything else is better done via check/uncheck: a prominent
+  example application is the @{command abbreviation} concept of
+  Isabelle/Pure. *}
+
+
+subsection {* Abstract syntax trees \label{sec:ast} *}
+
+text {* The ML datatype @{ML_type Ast.ast} explicitly represents the
+  intermediate AST format that is used for syntax rewriting
+  (\secref{sec:syn-trans}).  It is defined in ML as follows:
+  \begin{ttbox}
+  datatype ast =
+    Constant of string |
+    Variable of string |
+    Appl of ast list
+  \end{ttbox}
+
+  An AST is either an atom (constant or variable) or a list of (at
+  least two) subtrees.  Occasional diagnostic output of ASTs uses
+  notation that resembles S-expression of LISP.  Constant atoms are
+  shown as quoted strings, variable atoms as non-quoted strings and
+  applications as a parenthesized list of subtrees.  For example, the
+  AST
+  @{ML [display] "Ast.Appl
+  [Ast.Constant \"_abs\", Ast.Variable \"x\", Ast.Variable \"t\"]"}
+  is pretty-printed as @{verbatim "(\"_abs\" x t)"}.  Note that
+  @{verbatim "()"} and @{verbatim "(x)"} are excluded as ASTs, because
+  they have too few subtrees.
+
+  \medskip AST application is merely a pro-forma mechanism to indicate
+  certain syntactic structures.  Thus @{verbatim "(c a b)"} could mean
+  either term application or type application, depending on the
+  syntactic context.
+
+  Nested application like @{verbatim "((\"_abs\" x t) u)"} is also
+  possible, but ASTs are definitely first-order: the syntax constant
+  @{verbatim "\"_abs\""} does not bind the @{verbatim x} in any way.
+  Proper bindings are introduced in later stages of the term syntax,
+  where @{verbatim "(\"_abs\" x t)"} becomes an @{ML Abs} node and
+  occurrences of @{verbatim x} in @{verbatim t} are replaced by bound
+  variables (represented as de-Bruijn indices).
+*}
+
+
+subsubsection {* AST constants versus variables *}
+
+text {* Depending on the situation --- input syntax, output syntax,
+  translation patterns --- the distinction of atomic asts as @{ML
+  Ast.Constant} versus @{ML Ast.Variable} serves slightly different
+  purposes.
+
+  Input syntax of a term such as @{text "f a b = c"} does not yet
+  indicate the scopes of atomic entities @{text "f, a, b, c"}: they
+  could be global constants or local variables, even bound ones
+  depending on the context of the term.  @{ML Ast.Variable} leaves
+  this choice still open: later syntax layers (or translation
+  functions) may capture such a variable to determine its role
+  specifically, to make it a constant, bound variable, free variable
+  etc.  In contrast, syntax translations that introduce already known
+  constants would rather do it via @{ML Ast.Constant} to prevent
+  accidental re-interpretation later on.
+
+  Output syntax turns term constants into @{ML Ast.Constant} and
+  variables (free or schematic) into @{ML Ast.Variable}.  This
+  information is precise when printing fully formal @{text "\<lambda>"}-terms.
+
+  In AST translation patterns (\secref{sec:syn-trans}) the system
+  guesses from the current theory context which atoms should be
+  treated as constant versus variable for the matching process.
+  Sometimes this needs to be indicated more explicitly using @{text
+  "CONST c"} inside the term language.  It is also possible to use
+  @{command syntax} declarations (without mixfix annotation) to
+  enforce that certain unqualified names are always treated as
+  constant within the syntax machinery.
+
+  \medskip For ASTs that represent the language of types or sorts, the
+  situation is much simpler, since the concrete syntax already
+  distinguishes type variables from type constants (constructors).  So
+  @{text "('a, 'b) foo"} corresponds to an AST application of some
+  constant for @{text foo} and variable arguments for @{text "'a"} and
+  @{text "'b"}.  Note that the postfix application is merely a feature
+  of the concrete syntax, while in the AST the constructor occurs in
+  head position.  *}
+
+
+subsubsection {* Authentic syntax names *}
+
+text {* Naming constant entities within ASTs is another delicate
+  issue.  Unqualified names are looked up in the name space tables in
+  the last stage of parsing, after all translations have been applied.
+  Since syntax transformations do not know about this later name
+  resolution yet, there can be surprises in boundary cases.
+
+  \emph{Authentic syntax names} for @{ML Ast.Constant} avoid this
+  problem: the fully-qualified constant name with a special prefix for
+  its formal category (@{text "class"}, @{text "type"}, @{text
+  "const"}, @{text "fixed"}) represents the information faithfully
+  within the untyped AST format.  Accidental overlap with free or
+  bound variables is excluded as well.  Authentic syntax names work
+  implicitly in the following situations:
+
+  \begin{itemize}
+
+  \item Input of term constants (or fixed variables) that are
+  introduced by concrete syntax via @{command notation}: the
+  correspondence of a particular grammar production to some known term
+  entity is preserved.
+
+  \item Input of type constants (constructors) and type classes ---
+  thanks to explicit syntactic distinction independently on the
+  context.
+
+  \item Output of term constants, type constants, type classes ---
+  this information is already available from the internal term to be
+  printed.
+
+  \end{itemize}
+
+  In other words, syntax transformations that operate on input terms
+  written as prefix applications are difficult to make robust.
+  Luckily, this case rarely occurs in practice, because syntax forms
+  to be translated usually correspond to some bits of concrete
+  notation. *}
+
+
+subsection {* Raw syntax and translations \label{sec:syn-trans} *}
+
+text {*
+  \begin{tabular}{rcll}
+    @{command_def "nonterminal"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "syntax"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "no_syntax"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "translations"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "no_translations"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{attribute_def syntax_ast_trace} & : & @{text attribute} & default @{text false} \\
+    @{attribute_def syntax_ast_stats} & : & @{text attribute} & default @{text false} \\
+  \end{tabular}
+
+  Unlike mixfix notation for existing formal entities
+  (\secref{sec:notation}), raw syntax declarations provide full access
+  to the priority grammar of the inner syntax, without any sanity
+  checks.  This includes additional syntactic categories (via
+  @{command nonterminal}) and free-form grammar productions (via
+  @{command syntax}).  Additional syntax translations (or macros, via
+  @{command translations}) are required to turn resulting parse trees
+  into proper representations of formal entities again.
+
+  @{rail "
+    @@{command nonterminal} (@{syntax name} + @'and')
+    ;
+    (@@{command syntax} | @@{command no_syntax}) @{syntax mode}? (constdecl +)
+    ;
+    (@@{command translations} | @@{command no_translations})
+      (transpat ('==' | '=>' | '<=' | '\<rightleftharpoons>' | '\<rightharpoonup>' | '\<leftharpoondown>') transpat +)
+    ;
+
+    constdecl: @{syntax name} '::' @{syntax type} @{syntax mixfix}?
+    ;
+    mode: ('(' ( @{syntax name} | @'output' | @{syntax name} @'output' ) ')')
+    ;
+    transpat: ('(' @{syntax nameref} ')')? @{syntax string}
+  "}
+
+  \begin{description}
+
+  \item @{command "nonterminal"}~@{text c} declares a type
+  constructor @{text c} (without arguments) to act as purely syntactic
+  type: a nonterminal symbol of the inner syntax.
+
+  \item @{command "syntax"}~@{text "(mode) c :: \<sigma> (mx)"} augments the
+  priority grammar and the pretty printer table for the given print
+  mode (default @{verbatim "\"\""}). An optional keyword @{keyword_ref
+  "output"} means that only the pretty printer table is affected.
+
+  Following \secref{sec:mixfix}, the mixfix annotation @{text "mx =
+  template ps q"} together with type @{text "\<sigma> = \<tau>\<^sub>1 \<Rightarrow> \<dots> \<tau>\<^sub>n \<Rightarrow> \<tau>"} and
+  specify a grammar production.  The @{text template} contains
+  delimiter tokens that surround @{text "n"} argument positions
+  (@{verbatim "_"}).  The latter correspond to nonterminal symbols
+  @{text "A\<^sub>i"} derived from the argument types @{text "\<tau>\<^sub>i"} as
+  follows:
+  \begin{itemize}
+
+  \item @{text "prop"} if @{text "\<tau>\<^sub>i = prop"}
+
+  \item @{text "logic"} if @{text "\<tau>\<^sub>i = (\<dots>)\<kappa>"} for logical type
+  constructor @{text "\<kappa> \<noteq> prop"}
+
+  \item @{text any} if @{text "\<tau>\<^sub>i = \<alpha>"} for type variables
+
+  \item @{text "\<kappa>"} if @{text "\<tau>\<^sub>i = \<kappa>"} for nonterminal @{text "\<kappa>"}
+  (syntactic type constructor)
+
+  \end{itemize}
+
+  Each @{text "A\<^sub>i"} is decorated by priority @{text "p\<^sub>i"} from the
+  given list @{text "ps"}; misssing priorities default to 0.
+
+  The resulting nonterminal of the production is determined similarly
+  from type @{text "\<tau>"}, with priority @{text "q"} and default 1000.
+
+  \medskip Parsing via this production produces parse trees @{text
+  "t\<^sub>1, \<dots>, t\<^sub>n"} for the argument slots.  The resulting parse tree is
+  composed as @{text "c t\<^sub>1 \<dots> t\<^sub>n"}, by using the syntax constant @{text
+  "c"} of the syntax declaration.
+
+  Such syntactic constants are invented on the spot, without formal
+  check wrt.\ existing declarations.  It is conventional to use plain
+  identifiers prefixed by a single underscore (e.g.\ @{text
+  "_foobar"}).  Names should be chosen with care, to avoid clashes
+  with other syntax declarations.
+
+  \medskip The special case of copy production is specified by @{text
+  "c = "}@{verbatim "\"\""} (empty string).  It means that the
+  resulting parse tree @{text "t"} is copied directly, without any
+  further decoration.
+
+  \item @{command "no_syntax"}~@{text "(mode) decls"} removes grammar
+  declarations (and translations) resulting from @{text decls}, which
+  are interpreted in the same manner as for @{command "syntax"} above.
+
+  \item @{command "translations"}~@{text rules} specifies syntactic
+  translation rules (i.e.\ macros) as first-order rewrite rules on
+  ASTs (\secref{sec:ast}).  The theory context maintains two
+  independent lists translation rules: parse rules (@{verbatim "=>"}
+  or @{text "\<rightharpoonup>"}) and print rules (@{verbatim "<="} or @{text "\<leftharpoondown>"}).
+  For convenience, both can be specified simultaneously as parse~/
+  print rules (@{verbatim "=="} or @{text "\<rightleftharpoons>"}).
+
+  Translation patterns may be prefixed by the syntactic category to be
+  used for parsing; the default is @{text logic} which means that
+  regular term syntax is used.  Both sides of the syntax translation
+  rule undergo parsing and parse AST translations
+  \secref{sec:tr-funs}, in order to perform some fundamental
+  normalization like @{text "\<lambda>x y. b \<leadsto> \<lambda>x. \<lambda>y. b"}, but other AST
+  translation rules are \emph{not} applied recursively here.
+
+  When processing AST patterns, the inner syntax lexer runs in a
+  different mode that allows identifiers to start with underscore.
+  This accommodates the usual naming convention for auxiliary syntax
+  constants --- those that do not have a logical counter part --- by
+  allowing to specify arbitrary AST applications within the term
+  syntax, independently of the corresponding concrete syntax.
+
+  Atomic ASTs are distinguished as @{ML Ast.Constant} versus @{ML
+  Ast.Variable} as follows: a qualified name or syntax constant
+  declared via @{command syntax}, or parse tree head of concrete
+  notation becomes @{ML Ast.Constant}, anything else @{ML
+  Ast.Variable}.  Note that @{text CONST} and @{text XCONST} within
+  the term language (\secref{sec:pure-grammar}) allow to enforce
+  treatment as constants.
+
+  AST rewrite rules @{text "(lhs, rhs)"} need to obey the following
+  side-conditions:
+
+  \begin{itemize}
+
+  \item Rules must be left linear: @{text "lhs"} must not contain
+  repeated variables.\footnote{The deeper reason for this is that AST
+  equality is not well-defined: different occurrences of the ``same''
+  AST could be decorated differently by accidental type-constraints or
+  source position information, for example.}
+
+  \item Every variable in @{text "rhs"} must also occur in @{text
+  "lhs"}.
+
+  \end{itemize}
+
+  \item @{command "no_translations"}~@{text rules} removes syntactic
+  translation rules, which are interpreted in the same manner as for
+  @{command "translations"} above.
+
+  \item @{attribute syntax_ast_trace} and @{attribute
+  syntax_ast_stats} control diagnostic output in the AST normalization
+  process, when translation rules are applied to concrete input or
+  output.
+
+  \end{description}
+
+  Raw syntax and translations provides a slightly more low-level
+  access to the grammar and the form of resulting parse trees.  It is
+  often possible to avoid this untyped macro mechanism, and use
+  type-safe @{command abbreviation} or @{command notation} instead.
+  Some important situations where @{command syntax} and @{command
+  translations} are really need are as follows:
+
+  \begin{itemize}
+
+  \item Iterated replacement via recursive @{command translations}.
+  For example, consider list enumeration @{term "[a, b, c, d]"} as
+  defined in theory @{theory List} in Isabelle/HOL.
+
+  \item Change of binding status of variables: anything beyond the
+  built-in @{keyword "binder"} mixfix annotation requires explicit
+  syntax translations.  For example, consider list filter
+  comprehension @{term "[x \<leftarrow> xs . P]"} as defined in theory @{theory
+  List} in Isabelle/HOL.
+
+  \end{itemize}
+*}
+
+subsubsection {* Applying translation rules *}
+
+text {* As a term is being parsed or printed, an AST is generated as
+  an intermediate form according to \figref{fig:parse-print}.  The AST
+  is normalized by applying translation rules in the manner of a
+  first-order term rewriting system.  We first examine how a single
+  rule is applied.
+
+  Let @{text "t"} be the abstract syntax tree to be normalized and
+  @{text "(lhs, rhs)"} some translation rule.  A subtree @{text "u"}
+  of @{text "t"} is called \emph{redex} if it is an instance of @{text
+  "lhs"}; in this case the pattern @{text "lhs"} is said to match the
+  object @{text "u"}.  A redex matched by @{text "lhs"} may be
+  replaced by the corresponding instance of @{text "rhs"}, thus
+  \emph{rewriting} the AST @{text "t"}.  Matching requires some notion
+  of \emph{place-holders} in rule patterns: @{ML Ast.Variable} serves
+  this purpose.
+
+  More precisely, the matching of the object @{text "u"} against the
+  pattern @{text "lhs"} is performed as follows:
+
+  \begin{itemize}
+
+  \item Objects of the form @{ML Ast.Variable}~@{text "x"} or @{ML
+  Ast.Constant}~@{text "x"} are matched by pattern @{ML
+  Ast.Constant}~@{text "x"}.  Thus all atomic ASTs in the object are
+  treated as (potential) constants, and a successful match makes them
+  actual constants even before name space resolution (see also
+  \secref{sec:ast}).
+
+  \item Object @{text "u"} is matched by pattern @{ML
+  Ast.Variable}~@{text "x"}, binding @{text "x"} to @{text "u"}.
+
+  \item Object @{ML Ast.Appl}~@{text "us"} is matched by @{ML
+  Ast.Appl}~@{text "ts"} if @{text "us"} and @{text "ts"} have the
+  same length and each corresponding subtree matches.
+
+  \item In every other case, matching fails.
+
+  \end{itemize}
+
+  A successful match yields a substitution that is applied to @{text
+  "rhs"}, generating the instance that replaces @{text "u"}.
+
+  Normalizing an AST involves repeatedly applying translation rules
+  until none are applicable.  This works yoyo-like: top-down,
+  bottom-up, top-down, etc.  At each subtree position, rules are
+  chosen in order of appearance in the theory definitions.
+
+  The configuration options @{attribute syntax_ast_trace} and
+  @{attribute syntax_ast_stats} might help to understand this process
+  and diagnose problems.
+
+  \begin{warn}
+  If syntax translation rules work incorrectly, the output of
+  @{command_ref print_syntax} with its \emph{rules} sections reveals the
+  actual internal forms of AST pattern, without potentially confusing
+  concrete syntax.  Recall that AST constants appear as quoted strings
+  and variables without quotes.
+  \end{warn}
+
+  \begin{warn}
+  If @{attribute_ref eta_contract} is set to @{text "true"}, terms
+  will be @{text "\<eta>"}-contracted \emph{before} the AST rewriter sees
+  them.  Thus some abstraction nodes needed for print rules to match
+  may vanish.  For example, @{text "Ball A (\<lambda>x. P x)"} would contract
+  to @{text "Ball A P"} and the standard print rule would fail to
+  apply.  This problem can be avoided by hand-written ML translation
+  functions (see also \secref{sec:tr-funs}), which is in fact the same
+  mechanism used in built-in @{keyword "binder"} declarations.
+  \end{warn}
+*}
+
+
+subsection {* Syntax translation functions \label{sec:tr-funs} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "parse_ast_translation"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "parse_translation"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "print_translation"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "typed_print_translation"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def "print_ast_translation"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{ML_antiquotation_def "class_syntax"} & : & @{text ML_antiquotation} \\
+    @{ML_antiquotation_def "type_syntax"} & : & @{text ML_antiquotation} \\
+    @{ML_antiquotation_def "const_syntax"} & : & @{text ML_antiquotation} \\
+    @{ML_antiquotation_def "syntax_const"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  Syntax translation functions written in ML admit almost arbitrary
+  manipulations of inner syntax, at the expense of some complexity and
+  obscurity in the implementation.
+
+  @{rail "
+  ( @@{command parse_ast_translation} | @@{command parse_translation} |
+    @@{command print_translation} | @@{command typed_print_translation} |
+    @@{command print_ast_translation}) ('(' @'advanced' ')')? @{syntax text}
+  ;
+  (@@{ML_antiquotation class_syntax} |
+   @@{ML_antiquotation type_syntax} |
+   @@{ML_antiquotation const_syntax} |
+   @@{ML_antiquotation syntax_const}) name
+  "}
+
+  \begin{description}
+
+  \item @{command parse_translation} etc. declare syntax translation
+  functions to the theory.  Any of these commands have a single
+  @{syntax text} argument that refers to an ML expression of
+  appropriate type, which are as follows by default:
+
+  \medskip
+  {\footnotesize
+  \begin{tabular}{ll}
+  @{command parse_ast_translation} & : @{ML_type "(string * (Ast.ast list -> Ast.ast)) list"} \\
+  @{command parse_translation} & : @{ML_type "(string * (term list -> term)) list"} \\
+  @{command print_translation} & : @{ML_type "(string * (term list -> term)) list"} \\
+  @{command typed_print_translation} & : @{ML_type "(string * (typ -> term list -> term)) list"} \\
+  @{command print_ast_translation} & : @{ML_type "(string * (Ast.ast list -> Ast.ast)) list"} \\
+  \end{tabular}}
+  \medskip
+
+  The argument list consists of @{text "(c, tr)"} pairs, where @{text
+  "c"} is the syntax name of the formal entity involved, and @{text
+  "tr"} a function that translates a syntax form @{text "c args"} into
+  @{text "tr args"}.  The ML naming convention for parse translations
+  is @{text "c_tr"} and for print translations @{text "c_tr'"}.
+
+  The @{command_ref print_syntax} command displays the sets of names
+  associated with the translation functions of a theory under @{text
+  "parse_ast_translation"} etc.
+
+  If the @{verbatim "("}@{keyword "advanced"}@{verbatim ")"} option is
+  given, the corresponding translation functions depend on the current
+  theory or proof context as additional argument.  This allows to
+  implement advanced syntax mechanisms, as translations functions may
+  refer to specific theory declarations or auxiliary proof data.
+
+  \item @{text "@{class_syntax c}"}, @{text "@{type_syntax c}"},
+  @{text "@{const_syntax c}"} inline the authentic syntax name of the
+  given formal entities into the ML source.  This is the
+  fully-qualified logical name prefixed by a special marker to
+  indicate its kind: thus different logical name spaces are properly
+  distinguished within parse trees.
+
+  \item @{text "@{const_syntax c}"} inlines the name @{text "c"} of
+  the given syntax constant, having checked that it has been declared
+  via some @{command syntax} commands within the theory context.  Note
+  that the usual naming convention makes syntax constants start with
+  underscore, to reduce the chance of accidental clashes with other
+  names occurring in parse trees (unqualified constants etc.).
+
+  \end{description}
+*}
+
+
+subsubsection {* The translation strategy *}
+
+text {* The different kinds of translation functions are invoked during
+  the transformations between parse trees, ASTs and syntactic terms
+  (cf.\ \figref{fig:parse-print}).  Whenever a combination of the form
+  @{text "c x\<^sub>1 \<dots> x\<^sub>n"} is encountered, and a translation function
+  @{text "f"} of appropriate kind is declared for @{text "c"}, the
+  result is produced by evaluation of @{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} in ML.
+
+  For AST translations, the arguments @{text "x\<^sub>1, \<dots>, x\<^sub>n"} are ASTs.  A
+  combination has the form @{ML "Ast.Constant"}~@{text "c"} or @{ML
+  "Ast.Appl"}~@{text "["}@{ML Ast.Constant}~@{text "c, x\<^sub>1, \<dots>, x\<^sub>n]"}.
+  For term translations, the arguments are terms and a combination has
+  the form @{ML Const}~@{text "(c, \<tau>)"} or @{ML Const}~@{text "(c, \<tau>)
+  $ x\<^sub>1 $ \<dots> $ x\<^sub>n"}.  Terms allow more sophisticated transformations
+  than ASTs do, typically involving abstractions and bound
+  variables. \emph{Typed} print translations may even peek at the type
+  @{text "\<tau>"} of the constant they are invoked on, although that information
+  may be inaccurate.
+
+  Regardless of whether they act on ASTs or terms, translation
+  functions called during the parsing process differ from those for
+  printing in their overall behaviour:
+
+  \begin{description}
+
+  \item [Parse translations] are applied bottom-up.  The arguments are
+  already in translated form.  The translations must not fail;
+  exceptions trigger an error message.  There may be at most one
+  function associated with any syntactic name.
+
+  \item [Print translations] are applied top-down.  They are supplied
+  with arguments that are partly still in internal form.  The result
+  again undergoes translation; therefore a print translation should
+  not introduce as head the very constant that invoked it.  The
+  function may raise exception @{ML Match} to indicate failure; in
+  this event it has no effect.  Multiple functions associated with
+  some syntactic name are tried in the order of declaration in the
+  theory.
+
+  \end{description}
+
+  Only constant atoms --- constructor @{ML Ast.Constant} for ASTs and
+  @{ML Const} for terms --- can invoke translation functions.  This
+  means that parse translations can only be associated with parse tree
+  heads of concrete syntax, or syntactic constants introduced via
+  other translations.  For plain identifiers within the term language,
+  the status of constant versus variable is not yet know during
+  parsing.  This is in contrast to print translations, where constants
+  are explicitly known from the given term in its fully internal form.
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/ML_Tactic.thy	Tue Aug 28 12:45:49 2012 +0200
@@ -0,0 +1,180 @@
+theory ML_Tactic
+imports Base Main
+begin
+
+chapter {* ML tactic expressions *}
+
+text {*
+  Isar Proof methods closely resemble traditional tactics, when used
+  in unstructured sequences of @{command "apply"} commands.
+  Isabelle/Isar provides emulations for all major ML tactics of
+  classic Isabelle --- mostly for the sake of easy porting of existing
+  developments, as actual Isar proof texts would demand much less
+  diversity of proof methods.
+
+  Unlike tactic expressions in ML, Isar proof methods provide proper
+  concrete syntax for additional arguments, options, modifiers etc.
+  Thus a typical method text is usually more concise than the
+  corresponding ML tactic.  Furthermore, the Isar versions of classic
+  Isabelle tactics often cover several variant forms by a single
+  method with separate options to tune the behavior.  For example,
+  method @{method simp} replaces all of @{ML simp_tac}~/ @{ML
+  asm_simp_tac}~/ @{ML full_simp_tac}~/ @{ML asm_full_simp_tac}, there
+  is also concrete syntax for augmenting the Simplifier context (the
+  current ``simpset'') in a convenient way.
+*}
+
+
+section {* Resolution tactics *}
+
+text {*
+  Classic Isabelle provides several variant forms of tactics for
+  single-step rule applications (based on higher-order resolution).
+  The space of resolution tactics has the following main dimensions.
+
+  \begin{enumerate}
+
+  \item The ``mode'' of resolution: intro, elim, destruct, or forward
+  (e.g.\ @{ML resolve_tac}, @{ML eresolve_tac}, @{ML dresolve_tac},
+  @{ML forward_tac}).
+
+  \item Optional explicit instantiation (e.g.\ @{ML resolve_tac} vs.\
+  @{ML res_inst_tac}).
+
+  \item Abbreviations for singleton arguments (e.g.\ @{ML resolve_tac}
+  vs.\ @{ML rtac}).
+
+  \end{enumerate}
+
+  Basically, the set of Isar tactic emulations @{method rule_tac},
+  @{method erule_tac}, @{method drule_tac}, @{method frule_tac} (see
+  \secref{sec:tactics}) would be sufficient to cover the four modes,
+  either with or without instantiation, and either with single or
+  multiple arguments.  Although it is more convenient in most cases to
+  use the plain @{method_ref (Pure) rule} method, or any of its
+  ``improper'' variants @{method erule}, @{method drule}, @{method
+  frule}.  Note that explicit goal addressing is only supported by the
+  actual @{method rule_tac} version.
+
+  With this in mind, plain resolution tactics correspond to Isar
+  methods as follows.
+
+  \medskip
+  \begin{tabular}{lll}
+    @{ML rtac}~@{text "a 1"} & & @{text "rule a"} \\
+    @{ML resolve_tac}~@{text "[a\<^sub>1, \<dots>] 1"} & & @{text "rule a\<^sub>1 \<dots>"} \\
+    @{ML res_inst_tac}~@{text "ctxt [(x\<^sub>1, t\<^sub>1), \<dots>] a 1"} & &
+    @{text "rule_tac x\<^sub>1 = t\<^sub>1 \<AND> \<dots> \<IN> a"} \\[0.5ex]
+    @{ML rtac}~@{text "a i"} & & @{text "rule_tac [i] a"} \\
+    @{ML resolve_tac}~@{text "[a\<^sub>1, \<dots>] i"} & & @{text "rule_tac [i] a\<^sub>1 \<dots>"} \\
+    @{ML res_inst_tac}~@{text "ctxt [(x\<^sub>1, t\<^sub>1), \<dots>] a i"} & &
+    @{text "rule_tac [i] x\<^sub>1 = t\<^sub>1 \<AND> \<dots> \<IN> a"} \\
+  \end{tabular}
+  \medskip
+
+  Note that explicit goal addressing may be usually avoided by
+  changing the order of subgoals with @{command "defer"} or @{command
+  "prefer"} (see \secref{sec:tactic-commands}).
+*}
+
+
+section {* Simplifier tactics *}
+
+text {*
+  The main Simplifier tactics @{ML simp_tac} and variants (cf.\
+  \cite{isabelle-ref}) are all covered by the @{method simp} and
+  @{method simp_all} methods (see \secref{sec:simplifier}).  Note that
+  there is no individual goal addressing available, simplification
+  acts either on the first goal (@{method simp}) or all goals
+  (@{method simp_all}).
+
+  \medskip
+  \begin{tabular}{lll}
+    @{ML asm_full_simp_tac}~@{text "@{simpset} 1"} & & @{method simp} \\
+    @{ML ALLGOALS}~(@{ML asm_full_simp_tac}~@{text "@{simpset}"}) & & @{method simp_all} \\[0.5ex]
+    @{ML simp_tac}~@{text "@{simpset} 1"} & & @{method simp}~@{text "(no_asm)"} \\
+    @{ML asm_simp_tac}~@{text "@{simpset} 1"} & & @{method simp}~@{text "(no_asm_simp)"} \\
+    @{ML full_simp_tac}~@{text "@{simpset} 1"} & & @{method simp}~@{text "(no_asm_use)"} \\
+    @{ML asm_lr_simp_tac}~@{text "@{simpset} 1"} & & @{method simp}~@{text "(asm_lr)"} \\
+  \end{tabular}
+  \medskip
+*}
+
+
+section {* Classical Reasoner tactics *}
+
+text {* The Classical Reasoner provides a rather large number of
+  variations of automated tactics, such as @{ML blast_tac}, @{ML
+  fast_tac}, @{ML clarify_tac} etc.  The corresponding Isar methods
+  usually share the same base name, such as @{method blast}, @{method
+  fast}, @{method clarify} etc.\ (see \secref{sec:classical}).  *}
+
+
+section {* Miscellaneous tactics *}
+
+text {*
+  There are a few additional tactics defined in various theories of
+  Isabelle/HOL, some of these also in Isabelle/FOL or Isabelle/ZF.
+  The most common ones of these may be ported to Isar as follows.
+
+  \medskip
+  \begin{tabular}{lll}
+    @{ML stac}~@{text "a 1"} & & @{text "subst a"} \\
+    @{ML hyp_subst_tac}~@{text 1} & & @{text hypsubst} \\
+    @{ML strip_tac}~@{text 1} & @{text "\<approx>"} & @{text "intro strip"} \\
+    @{ML split_all_tac}~@{text 1} & & @{text "simp (no_asm_simp) only: split_tupled_all"} \\
+      & @{text "\<approx>"} & @{text "simp only: split_tupled_all"} \\
+      & @{text "\<lless>"} & @{text "clarify"} \\
+  \end{tabular}
+*}
+
+
+section {* Tacticals *}
+
+text {*
+  Classic Isabelle provides a huge amount of tacticals for combination
+  and modification of existing tactics.  This has been greatly reduced
+  in Isar, providing the bare minimum of combinators only: ``@{text
+  ","}'' (sequential composition), ``@{text "|"}'' (alternative
+  choices), ``@{text "?"}'' (try), ``@{text "+"}'' (repeat at least
+  once).  These are usually sufficient in practice; if all fails,
+  arbitrary ML tactic code may be invoked via the @{method tactic}
+  method (see \secref{sec:tactics}).
+
+  \medskip Common ML tacticals may be expressed directly in Isar as
+  follows:
+
+  \medskip
+  \begin{tabular}{lll}
+    @{text "tac\<^sub>1"}~@{ML_text THEN}~@{text "tac\<^sub>2"} & & @{text "meth\<^sub>1, meth\<^sub>2"} \\
+    @{text "tac\<^sub>1"}~@{ML_text ORELSE}~@{text "tac\<^sub>2"} & & @{text "meth\<^sub>1 | meth\<^sub>2"} \\
+    @{ML TRY}~@{text tac} & & @{text "meth?"} \\
+    @{ML REPEAT1}~@{text tac} & & @{text "meth+"} \\
+    @{ML REPEAT}~@{text tac} & & @{text "(meth+)?"} \\
+    @{ML EVERY}~@{text "[tac\<^sub>1, \<dots>]"} & & @{text "meth\<^sub>1, \<dots>"} \\
+    @{ML FIRST}~@{text "[tac\<^sub>1, \<dots>]"} & & @{text "meth\<^sub>1 | \<dots>"} \\
+  \end{tabular}
+  \medskip
+
+  \medskip @{ML CHANGED} (see \cite{isabelle-implementation}) is
+  usually not required in Isar, since most basic proof methods already
+  fail unless there is an actual change in the goal state.
+  Nevertheless, ``@{text "?"}''  (try) may be used to accept
+  \emph{unchanged} results as well.
+
+  \medskip @{ML ALLGOALS}, @{ML SOMEGOAL} etc.\ (see
+  \cite{isabelle-implementation}) are not available in Isar, since
+  there is no direct goal addressing.  Nevertheless, some basic
+  methods address all goals internally, notably @{method simp_all}
+  (see \secref{sec:simplifier}).  Also note that @{ML ALLGOALS} can be
+  often replaced by ``@{text "+"}'' (repeat at least once), although
+  this usually has a different operational behavior: subgoals are
+  solved in a different order.
+
+  \medskip Iterated resolution, such as
+  @{ML_text "REPEAT (FIRSTGOAL (resolve_tac ...))"}, is usually better
+  expressed using the @{method intro} and @{method elim} methods of
+  Isar (see \secref{sec:classical}).
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Misc.thy	Tue Aug 28 12:45:49 2012 +0200
@@ -0,0 +1,151 @@
+theory Misc
+imports Base Main
+begin
+
+chapter {* Other commands *}
+
+section {* Inspecting the context *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "print_commands"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+    @{command_def "print_theory"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "print_methods"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "print_attributes"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "print_theorems"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "find_theorems"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "find_consts"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "thm_deps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "unused_thms"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "print_facts"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+    @{command_def "print_binds"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
+  \end{matharray}
+
+  @{rail "
+    (@@{command print_theory} | @@{command print_theorems}) ('!'?)
+    ;
+
+    @@{command find_theorems} ('(' @{syntax nat}? 'with_dups'? ')')? \\ (thmcriterion * )
+    ;
+    thmcriterion: ('-'?) ('name' ':' @{syntax nameref} | 'intro' | 'elim' | 'dest' |
+      'solves' | 'simp' ':' @{syntax term} | @{syntax term})
+    ;
+    @@{command find_consts} (constcriterion * )
+    ;
+    constcriterion: ('-'?)
+      ('name' ':' @{syntax nameref} | 'strict' ':' @{syntax type} | @{syntax type})
+    ;
+    @@{command thm_deps} @{syntax thmrefs}
+    ;
+    @@{command unused_thms} ((@{syntax name} +) '-' (@{syntax name} * ))?
+  "}
+
+  These commands print certain parts of the theory and proof context.
+  Note that there are some further ones available, such as for the set
+  of rules declared for simplifications.
+
+  \begin{description}
+  
+  \item @{command "print_commands"} prints Isabelle's outer theory
+  syntax, including keywords and command.
+  
+  \item @{command "print_theory"} prints the main logical content of
+  the theory context; the ``@{text "!"}'' option indicates extra
+  verbosity.
+
+  \item @{command "print_methods"} prints all proof methods
+  available in the current theory context.
+  
+  \item @{command "print_attributes"} prints all attributes
+  available in the current theory context.
+  
+  \item @{command "print_theorems"} prints theorems resulting from the
+  last command; the ``@{text "!"}'' option indicates extra verbosity.
+  
+  \item @{command "find_theorems"}~@{text criteria} retrieves facts
+  from the theory or proof context matching all of given search
+  criteria.  The criterion @{text "name: p"} selects all theorems
+  whose fully qualified name matches pattern @{text p}, which may
+  contain ``@{text "*"}'' wildcards.  The criteria @{text intro},
+  @{text elim}, and @{text dest} select theorems that match the
+  current goal as introduction, elimination or destruction rules,
+  respectively.  The criterion @{text "solves"} returns all rules
+  that would directly solve the current goal.  The criterion
+  @{text "simp: t"} selects all rewrite rules whose left-hand side
+  matches the given term.  The criterion term @{text t} selects all
+  theorems that contain the pattern @{text t} -- as usual, patterns
+  may contain occurrences of the dummy ``@{text _}'', schematic
+  variables, and type constraints.
+  
+  Criteria can be preceded by ``@{text "-"}'' to select theorems that
+  do \emph{not} match. Note that giving the empty list of criteria
+  yields \emph{all} currently known facts.  An optional limit for the
+  number of printed facts may be given; the default is 40.  By
+  default, duplicates are removed from the search result. Use
+  @{text with_dups} to display duplicates.
+
+  \item @{command "find_consts"}~@{text criteria} prints all constants
+  whose type meets all of the given criteria. The criterion @{text
+  "strict: ty"} is met by any type that matches the type pattern
+  @{text ty}.  Patterns may contain both the dummy type ``@{text _}''
+  and sort constraints. The criterion @{text ty} is similar, but it
+  also matches against subtypes. The criterion @{text "name: p"} and
+  the prefix ``@{text "-"}'' function as described for @{command
+  "find_theorems"}.
+
+  \item @{command "thm_deps"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}
+  visualizes dependencies of facts, using Isabelle's graph browser
+  tool (see also \cite{isabelle-sys}).
+
+  \item @{command "unused_thms"}~@{text "A\<^isub>1 \<dots> A\<^isub>m - B\<^isub>1 \<dots> B\<^isub>n"}
+  displays all unused theorems in theories @{text "B\<^isub>1 \<dots> B\<^isub>n"}
+  or their parents, but not in @{text "A\<^isub>1 \<dots> A\<^isub>m"} or their parents.
+  If @{text n} is @{text 0}, the end of the range of theories
+  defaults to the current theory. If no range is specified,
+  only the unused theorems in the current theory are displayed.
+  
+  \item @{command "print_facts"} prints all local facts of the
+  current context, both named and unnamed ones.
+  
+  \item @{command "print_binds"} prints all term abbreviations
+  present in the context.
+
+  \end{description}
+*}
+
+
+section {* System commands *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "cd"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+    @{command_def "pwd"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+    @{command_def "use_thy"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
+  \end{matharray}
+
+  @{rail "
+    (@@{command cd} | @@{command use_thy}) @{syntax name}
+  "}
+
+  \begin{description}
+
+  \item @{command "cd"}~@{text path} changes the current directory
+  of the Isabelle process.
+
+  \item @{command "pwd"} prints the current working directory.
+
+  \item @{command "use_thy"}~@{text A} preload theory @{text A}.
+  These system commands are scarcely used when working interactively,
+  since loading of theories is done automatically as required.
+
+  \end{description}
+
+  %FIXME proper place (!?)
+  Isabelle file specification may contain path variables (e.g.\
+  @{verbatim "$ISABELLE_HOME"}) that are expanded accordingly.  Note
+  that @{verbatim "~"} abbreviates @{verbatim "$USER_HOME"}, and
+  @{verbatim "~~"} abbreviates @{verbatim "$ISABELLE_HOME"}.  The
+  general syntax for path specifications follows POSIX conventions.
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Outer_Syntax.thy	Tue Aug 28 12:45:49 2012 +0200
@@ -0,0 +1,427 @@
+theory Outer_Syntax
+imports Base Main
+begin
+
+chapter {* Outer syntax --- the theory language \label{ch:outer-syntax} *}
+
+text {*
+  The rather generic framework of Isabelle/Isar syntax emerges from
+  three main syntactic categories: \emph{commands} of the top-level
+  Isar engine (covering theory and proof elements), \emph{methods} for
+  general goal refinements (analogous to traditional ``tactics''), and
+  \emph{attributes} for operations on facts (within a certain
+  context).  Subsequently we give a reference of basic syntactic
+  entities underlying Isabelle/Isar syntax in a bottom-up manner.
+  Concrete theory and proof language elements will be introduced later
+  on.
+
+  \medskip In order to get started with writing well-formed
+  Isabelle/Isar documents, the most important aspect to be noted is
+  the difference of \emph{inner} versus \emph{outer} syntax.  Inner
+  syntax is that of Isabelle types and terms of the logic, while outer
+  syntax is that of Isabelle/Isar theory sources (specifications and
+  proofs).  As a general rule, inner syntax entities may occur only as
+  \emph{atomic entities} within outer syntax.  For example, the string
+  @{verbatim "\"x + y\""} and identifier @{verbatim z} are legal term
+  specifications within a theory, while @{verbatim "x + y"} without
+  quotes is not.
+
+  Printed theory documents usually omit quotes to gain readability
+  (this is a matter of {\LaTeX} macro setup, say via @{verbatim
+  "\\isabellestyle"}, see also \cite{isabelle-sys}).  Experienced
+  users of Isabelle/Isar may easily reconstruct the lost technical
+  information, while mere readers need not care about quotes at all.
+
+  \medskip Isabelle/Isar input may contain any number of input
+  termination characters ``@{verbatim ";"}'' (semicolon) to separate
+  commands explicitly.  This is particularly useful in interactive
+  shell sessions to make clear where the current command is intended
+  to end.  Otherwise, the interpreter loop will continue to issue a
+  secondary prompt ``@{verbatim "#"}'' until an end-of-command is
+  clearly recognized from the input syntax, e.g.\ encounter of the
+  next command keyword.
+
+  More advanced interfaces such as Proof~General \cite{proofgeneral}
+  do not require explicit semicolons, the amount of input text is
+  determined automatically by inspecting the present content of the
+  Emacs text buffer.  In the printed presentation of Isabelle/Isar
+  documents semicolons are omitted altogether for readability.
+
+  \begin{warn}
+    Proof~General requires certain syntax classification tables in
+    order to achieve properly synchronized interaction with the
+    Isabelle/Isar process.  These tables need to be consistent with
+    the Isabelle version and particular logic image to be used in a
+    running session (common object-logics may well change the outer
+    syntax).  The standard setup should work correctly with any of the
+    ``official'' logic images derived from Isabelle/HOL (including
+    HOLCF etc.).  Users of alternative logics may need to tell
+    Proof~General explicitly, e.g.\ by giving an option @{verbatim "-k ZF"}
+    (in conjunction with @{verbatim "-l ZF"}, to specify the default
+    logic image).  Note that option @{verbatim "-L"} does both
+    of this at the same time.
+  \end{warn}
+*}
+
+
+section {* Lexical matters \label{sec:outer-lex} *}
+
+text {* The outer lexical syntax consists of three main categories of
+  syntax tokens:
+
+  \begin{enumerate}
+
+  \item \emph{major keywords} --- the command names that are available
+  in the present logic session;
+
+  \item \emph{minor keywords} --- additional literal tokens required
+  by the syntax of commands;
+
+  \item \emph{named tokens} --- various categories of identifiers etc.
+
+  \end{enumerate}
+
+  Major keywords and minor keywords are guaranteed to be disjoint.
+  This helps user-interfaces to determine the overall structure of a
+  theory text, without knowing the full details of command syntax.
+  Internally, there is some additional information about the kind of
+  major keywords, which approximates the command type (theory command,
+  proof command etc.).
+
+  Keywords override named tokens.  For example, the presence of a
+  command called @{verbatim term} inhibits the identifier @{verbatim
+  term}, but the string @{verbatim "\"term\""} can be used instead.
+  By convention, the outer syntax always allows quoted strings in
+  addition to identifiers, wherever a named entity is expected.
+
+  When tokenizing a given input sequence, the lexer repeatedly takes
+  the longest prefix of the input that forms a valid token.  Spaces,
+  tabs, newlines and formfeeds between tokens serve as explicit
+  separators.
+
+  \medskip The categories for named tokens are defined once and for
+  all as follows.
+
+  \begin{center}
+  \begin{supertabular}{rcl}
+    @{syntax_def ident} & = & @{text "letter quasiletter\<^sup>*"} \\
+    @{syntax_def longident} & = & @{text "ident("}@{verbatim "."}@{text "ident)\<^sup>+"} \\
+    @{syntax_def symident} & = & @{text "sym\<^sup>+  |  "}@{verbatim "\\"}@{verbatim "<"}@{text ident}@{verbatim ">"} \\
+    @{syntax_def nat} & = & @{text "digit\<^sup>+"} \\
+    @{syntax_def float} & = & @{syntax_ref nat}@{verbatim "."}@{syntax_ref nat}@{text "  |  "}@{verbatim "-"}@{syntax_ref nat}@{verbatim "."}@{syntax_ref nat} \\
+    @{syntax_def var} & = & @{verbatim "?"}@{text "ident  |  "}@{verbatim "?"}@{text ident}@{verbatim "."}@{text nat} \\
+    @{syntax_def typefree} & = & @{verbatim "'"}@{text ident} \\
+    @{syntax_def typevar} & = & @{verbatim "?"}@{text "typefree  |  "}@{verbatim "?"}@{text typefree}@{verbatim "."}@{text nat} \\
+    @{syntax_def string} & = & @{verbatim "\""} @{text "\<dots>"} @{verbatim "\""} \\
+    @{syntax_def altstring} & = & @{verbatim "`"} @{text "\<dots>"} @{verbatim "`"} \\
+    @{syntax_def verbatim} & = & @{verbatim "{*"} @{text "\<dots>"} @{verbatim "*"}@{verbatim "}"} \\[1ex]
+
+    @{text letter} & = & @{text "latin  |  "}@{verbatim "\\"}@{verbatim "<"}@{text latin}@{verbatim ">"}@{text "  |  "}@{verbatim "\\"}@{verbatim "<"}@{text "latin latin"}@{verbatim ">"}@{text "  |  greek  |"} \\
+          &   & @{verbatim "\<^isub>"}@{text "  |  "}@{verbatim "\<^isup>"} \\
+    @{text quasiletter} & = & @{text "letter  |  digit  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "'"} \\
+    @{text latin} & = & @{verbatim a}@{text "  | \<dots> |  "}@{verbatim z}@{text "  |  "}@{verbatim A}@{text "  |  \<dots> |  "}@{verbatim Z} \\
+    @{text digit} & = & @{verbatim "0"}@{text "  |  \<dots> |  "}@{verbatim "9"} \\
+    @{text sym} & = & @{verbatim "!"}@{text "  |  "}@{verbatim "#"}@{text "  |  "}@{verbatim "$"}@{text "  |  "}@{verbatim "%"}@{text "  |  "}@{verbatim "&"}@{text "  |  "}@{verbatim "*"}@{text "  |  "}@{verbatim "+"}@{text "  |  "}@{verbatim "-"}@{text "  |  "}@{verbatim "/"}@{text "  |"} \\
+    & & @{verbatim "<"}@{text "  |  "}@{verbatim "="}@{text "  |  "}@{verbatim ">"}@{text "  |  "}@{verbatim "?"}@{text "  |  "}@{verbatim "@"}@{text "  |  "}@{verbatim "^"}@{text "  |  "}@{verbatim "_"}@{text "  |  "}@{verbatim "|"}@{text "  |  "}@{verbatim "~"} \\
+    @{text greek} & = & @{verbatim "\<alpha>"}@{text "  |  "}@{verbatim "\<beta>"}@{text "  |  "}@{verbatim "\<gamma>"}@{text "  |  "}@{verbatim "\<delta>"}@{text "  |"} \\
+          &   & @{verbatim "\<epsilon>"}@{text "  |  "}@{verbatim "\<zeta>"}@{text "  |  "}@{verbatim "\<eta>"}@{text "  |  "}@{verbatim "\<theta>"}@{text "  |"} \\
+          &   & @{verbatim "\<iota>"}@{text "  |  "}@{verbatim "\<kappa>"}@{text "  |  "}@{verbatim "\<mu>"}@{text "  |  "}@{verbatim "\<nu>"}@{text "  |"} \\
+          &   & @{verbatim "\<xi>"}@{text "  |  "}@{verbatim "\<pi>"}@{text "  |  "}@{verbatim "\<rho>"}@{text "  |  "}@{verbatim "\<sigma>"}@{text "  |  "}@{verbatim "\<tau>"}@{text "  |"} \\
+          &   & @{verbatim "\<upsilon>"}@{text "  |  "}@{verbatim "\<phi>"}@{text "  |  "}@{verbatim "\<chi>"}@{text "  |  "}@{verbatim "\<psi>"}@{text "  |"} \\
+          &   & @{verbatim "\<omega>"}@{text "  |  "}@{verbatim "\<Gamma>"}@{text "  |  "}@{verbatim "\<Delta>"}@{text "  |  "}@{verbatim "\<Theta>"}@{text "  |"} \\
+          &   & @{verbatim "\<Lambda>"}@{text "  |  "}@{verbatim "\<Xi>"}@{text "  |  "}@{verbatim "\<Pi>"}@{text "  |  "}@{verbatim "\<Sigma>"}@{text "  |"} \\
+          &   & @{verbatim "\<Upsilon>"}@{text "  |  "}@{verbatim "\<Phi>"}@{text "  |  "}@{verbatim "\<Psi>"}@{text "  |  "}@{verbatim "\<Omega>"} \\
+  \end{supertabular}
+  \end{center}
+
+  A @{syntax_ref var} or @{syntax_ref typevar} describes an unknown,
+  which is internally a pair of base name and index (ML type @{ML_type
+  indexname}).  These components are either separated by a dot as in
+  @{text "?x.1"} or @{text "?x7.3"} or run together as in @{text
+  "?x1"}.  The latter form is possible if the base name does not end
+  with digits.  If the index is 0, it may be dropped altogether:
+  @{text "?x"} and @{text "?x0"} and @{text "?x.0"} all refer to the
+  same unknown, with basename @{text "x"} and index 0.
+
+  The syntax of @{syntax_ref string} admits any characters, including
+  newlines; ``@{verbatim "\""}'' (double-quote) and ``@{verbatim
+  "\\"}'' (backslash) need to be escaped by a backslash; arbitrary
+  character codes may be specified as ``@{verbatim "\\"}@{text ddd}'',
+  with three decimal digits.  Alternative strings according to
+  @{syntax_ref altstring} are analogous, using single back-quotes
+  instead.
+
+  The body of @{syntax_ref verbatim} may consist of any text not
+  containing ``@{verbatim "*"}@{verbatim "}"}''; this allows
+  convenient inclusion of quotes without further escapes.  There is no
+  way to escape ``@{verbatim "*"}@{verbatim "}"}''.  If the quoted
+  text is {\LaTeX} source, one may usually add some blank or comment
+  to avoid the critical character sequence.
+
+  Source comments take the form @{verbatim "(*"}~@{text
+  "\<dots>"}~@{verbatim "*)"} and may be nested, although the user-interface
+  might prevent this.  Note that this form indicates source comments
+  only, which are stripped after lexical analysis of the input.  The
+  Isar syntax also provides proper \emph{document comments} that are
+  considered as part of the text (see \secref{sec:comments}).
+
+  Common mathematical symbols such as @{text \<forall>} are represented in
+  Isabelle as @{verbatim \<forall>}.  There are infinitely many Isabelle
+  symbols like this, although proper presentation is left to front-end
+  tools such as {\LaTeX}, Proof~General, or Isabelle/jEdit.  A list of
+  predefined Isabelle symbols that work well with these tools is given
+  in \appref{app:symbols}.  Note that @{verbatim "\<lambda>"} does not belong
+  to the @{text letter} category, since it is already used differently
+  in the Pure term language.  *}
+
+
+section {* Common syntax entities *}
+
+text {*
+  We now introduce several basic syntactic entities, such as names,
+  terms, and theorem specifications, which are factored out of the
+  actual Isar language elements to be described later.
+*}
+
+
+subsection {* Names *}
+
+text {* Entity @{syntax name} usually refers to any name of types,
+  constants, theorems etc.\ that are to be \emph{declared} or
+  \emph{defined} (so qualified identifiers are excluded here).  Quoted
+  strings provide an escape for non-identifier names or those ruled
+  out by outer syntax keywords (e.g.\ quoted @{verbatim "\"let\""}).
+  Already existing objects are usually referenced by @{syntax
+  nameref}.
+
+  @{rail "
+    @{syntax_def name}: @{syntax ident} | @{syntax symident} |
+      @{syntax string} | @{syntax nat}
+    ;
+    @{syntax_def parname}: '(' @{syntax name} ')'
+    ;
+    @{syntax_def nameref}: @{syntax name} | @{syntax longident}
+  "}
+*}
+
+
+subsection {* Numbers *}
+
+text {* The outer lexical syntax (\secref{sec:outer-lex}) admits
+  natural numbers and floating point numbers.  These are combined as
+  @{syntax int} and @{syntax real} as follows.
+
+  @{rail "
+    @{syntax_def int}: @{syntax nat} | '-' @{syntax nat}
+    ;
+    @{syntax_def real}: @{syntax float} | @{syntax int}
+  "}
+
+  Note that there is an overlap with the category @{syntax name},
+  which also includes @{syntax nat}.
+*}
+
+
+subsection {* Comments \label{sec:comments} *}
+
+text {* Large chunks of plain @{syntax text} are usually given
+  @{syntax verbatim}, i.e.\ enclosed in @{verbatim "{"}@{verbatim
+  "*"}~@{text "\<dots>"}~@{verbatim "*"}@{verbatim "}"}.  For convenience,
+  any of the smaller text units conforming to @{syntax nameref} are
+  admitted as well.  A marginal @{syntax comment} is of the form
+  @{verbatim "--"}~@{syntax text}.  Any number of these may occur
+  within Isabelle/Isar commands.
+
+  @{rail "
+    @{syntax_def text}: @{syntax verbatim} | @{syntax nameref}
+    ;
+    @{syntax_def comment}: '--' @{syntax text}
+  "}
+*}
+
+
+subsection {* Type classes, sorts and arities *}
+
+text {*
+  Classes are specified by plain names.  Sorts have a very simple
+  inner syntax, which is either a single class name @{text c} or a
+  list @{text "{c\<^sub>1, \<dots>, c\<^sub>n}"} referring to the
+  intersection of these classes.  The syntax of type arities is given
+  directly at the outer level.
+
+  @{rail "
+    @{syntax_def classdecl}: @{syntax name} (('<' | '\<subseteq>') (@{syntax nameref} + ','))?
+    ;
+    @{syntax_def sort}: @{syntax nameref}
+    ;
+    @{syntax_def arity}: ('(' (@{syntax sort} + ',') ')')? @{syntax sort}
+  "}
+*}
+
+
+subsection {* Types and terms \label{sec:types-terms} *}
+
+text {*
+  The actual inner Isabelle syntax, that of types and terms of the
+  logic, is far too sophisticated in order to be modelled explicitly
+  at the outer theory level.  Basically, any such entity has to be
+  quoted to turn it into a single token (the parsing and type-checking
+  is performed internally later).  For convenience, a slightly more
+  liberal convention is adopted: quotes may be omitted for any type or
+  term that is already atomic at the outer level.  For example, one
+  may just write @{verbatim x} instead of quoted @{verbatim "\"x\""}.
+  Note that symbolic identifiers (e.g.\ @{verbatim "++"} or @{text
+  "\<forall>"} are available as well, provided these have not been superseded
+  by commands or other keywords already (such as @{verbatim "="} or
+  @{verbatim "+"}).
+
+  @{rail "
+    @{syntax_def type}: @{syntax nameref} | @{syntax typefree} |
+      @{syntax typevar}
+    ;
+    @{syntax_def term}: @{syntax nameref} | @{syntax var}
+    ;
+    @{syntax_def prop}: @{syntax term}
+  "}
+
+  Positional instantiations are indicated by giving a sequence of
+  terms, or the placeholder ``@{text _}'' (underscore), which means to
+  skip a position.
+
+  @{rail "
+    @{syntax_def inst}: '_' | @{syntax term}
+    ;
+    @{syntax_def insts}: (@{syntax inst} *)
+  "}
+
+  Type declarations and definitions usually refer to @{syntax
+  typespec} on the left-hand side.  This models basic type constructor
+  application at the outer syntax level.  Note that only plain postfix
+  notation is available here, but no infixes.
+
+  @{rail "
+    @{syntax_def typespec}:
+      (() | @{syntax typefree} | '(' ( @{syntax typefree} + ',' ) ')') @{syntax name}
+    ;
+    @{syntax_def typespec_sorts}:
+      (() | (@{syntax typefree} ('::' @{syntax sort})?) |
+        '(' ( (@{syntax typefree} ('::' @{syntax sort})?) + ',' ) ')') @{syntax name}
+  "}
+*}
+
+
+subsection {* Term patterns and declarations \label{sec:term-decls} *}
+
+text {* Wherever explicit propositions (or term fragments) occur in a
+  proof text, casual binding of schematic term variables may be given
+  specified via patterns of the form ``@{text "(\<IS> p\<^sub>1 \<dots> p\<^sub>n)"}''.
+  This works both for @{syntax term} and @{syntax prop}.
+
+  @{rail "
+    @{syntax_def term_pat}: '(' (@'is' @{syntax term} +) ')'
+    ;
+    @{syntax_def prop_pat}: '(' (@'is' @{syntax prop} +) ')'
+  "}
+
+  \medskip Declarations of local variables @{text "x :: \<tau>"} and
+  logical propositions @{text "a : \<phi>"} represent different views on
+  the same principle of introducing a local scope.  In practice, one
+  may usually omit the typing of @{syntax vars} (due to
+  type-inference), and the naming of propositions (due to implicit
+  references of current facts).  In any case, Isar proof elements
+  usually admit to introduce multiple such items simultaneously.
+
+  @{rail "
+    @{syntax_def vars}: (@{syntax name} +) ('::' @{syntax type})?
+    ;
+    @{syntax_def props}: @{syntax thmdecl}? (@{syntax prop} @{syntax prop_pat}? +)
+  "}
+
+  The treatment of multiple declarations corresponds to the
+  complementary focus of @{syntax vars} versus @{syntax props}.  In
+  ``@{text "x\<^sub>1 \<dots> x\<^sub>n :: \<tau>"}'' the typing refers to all variables, while
+  in @{text "a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} the naming refers to all propositions
+  collectively.  Isar language elements that refer to @{syntax vars}
+  or @{syntax props} typically admit separate typings or namings via
+  another level of iteration, with explicit @{keyword_ref "and"}
+  separators; e.g.\ see @{command "fix"} and @{command "assume"} in
+  \secref{sec:proof-context}.
+*}
+
+
+subsection {* Attributes and theorems \label{sec:syn-att} *}
+
+text {* Attributes have their own ``semi-inner'' syntax, in the sense
+  that input conforming to @{syntax args} below is parsed by the
+  attribute a second time.  The attribute argument specifications may
+  be any sequence of atomic entities (identifiers, strings etc.), or
+  properly bracketed argument lists.  Below @{syntax atom} refers to
+  any atomic entity, including any @{syntax keyword} conforming to
+  @{syntax symident}.
+
+  @{rail "
+    @{syntax_def atom}: @{syntax nameref} | @{syntax typefree} |
+      @{syntax typevar} | @{syntax var} | @{syntax nat} | @{syntax float} |
+      @{syntax keyword}
+    ;
+    arg: @{syntax atom} | '(' @{syntax args} ')' | '[' @{syntax args} ']'
+    ;
+    @{syntax_def args}: arg *
+    ;
+    @{syntax_def attributes}: '[' (@{syntax nameref} @{syntax args} * ',') ']'
+  "}
+
+  Theorem specifications come in several flavors: @{syntax axmdecl}
+  and @{syntax thmdecl} usually refer to axioms, assumptions or
+  results of goal statements, while @{syntax thmdef} collects lists of
+  existing theorems.  Existing theorems are given by @{syntax thmref}
+  and @{syntax thmrefs}, the former requires an actual singleton
+  result.
+
+  There are three forms of theorem references:
+  \begin{enumerate}
+  
+  \item named facts @{text "a"},
+
+  \item selections from named facts @{text "a(i)"} or @{text "a(j - k)"},
+
+  \item literal fact propositions using @{syntax_ref altstring} syntax
+  @{verbatim "`"}@{text "\<phi>"}@{verbatim "`"} (see also method
+  @{method_ref fact}).
+
+  \end{enumerate}
+
+  Any kind of theorem specification may include lists of attributes
+  both on the left and right hand sides; attributes are applied to any
+  immediately preceding fact.  If names are omitted, the theorems are
+  not stored within the theorem database of the theory or proof
+  context, but any given attributes are applied nonetheless.
+
+  An extra pair of brackets around attributes (like ``@{text
+  "[[simproc a]]"}'') abbreviates a theorem reference involving an
+  internal dummy fact, which will be ignored later on.  So only the
+  effect of the attribute on the background context will persist.
+  This form of in-place declarations is particularly useful with
+  commands like @{command "declare"} and @{command "using"}.
+
+  @{rail "
+    @{syntax_def axmdecl}: @{syntax name} @{syntax attributes}? ':'
+    ;
+    @{syntax_def thmdecl}: thmbind ':'
+    ;
+    @{syntax_def thmdef}: thmbind '='
+    ;
+    @{syntax_def thmref}:
+      (@{syntax nameref} selection? | @{syntax altstring}) @{syntax attributes}? |
+      '[' @{syntax attributes} ']'
+    ;
+    @{syntax_def thmrefs}: @{syntax thmref} +
+    ;
+
+    thmbind: @{syntax name} @{syntax attributes} | @{syntax name} | @{syntax attributes}
+    ;
+    selection: '(' ((@{syntax nat} | @{syntax nat} '-' @{syntax nat}?) + ',') ')'
+  "}
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Preface.thy	Tue Aug 28 12:45:49 2012 +0200
@@ -0,0 +1,70 @@
+theory Preface
+imports Base Main
+begin
+
+chapter {* Preface *}
+
+text {*
+  The \emph{Isabelle} system essentially provides a generic
+  infrastructure for building deductive systems (programmed in
+  Standard ML), with a special focus on interactive theorem proving in
+  higher-order logics.  Many years ago, even end-users would refer to
+  certain ML functions (goal commands, tactics, tacticals etc.) to
+  pursue their everyday theorem proving tasks.
+  
+  In contrast \emph{Isar} provides an interpreted language environment
+  of its own, which has been specifically tailored for the needs of
+  theory and proof development.  Compared to raw ML, the Isabelle/Isar
+  top-level provides a more robust and comfortable development
+  platform, with proper support for theory development graphs, managed
+  transactions with unlimited undo etc.  The Isabelle/Isar version of
+  the \emph{Proof~General} user interface
+  \cite{proofgeneral,Aspinall:TACAS:2000} provides a decent front-end
+  for interactive theory and proof development in this advanced
+  theorem proving environment, even though it is somewhat biased
+  towards old-style proof scripts.
+
+  \medskip Apart from the technical advances over bare-bones ML
+  programming, the main purpose of the Isar language is to provide a
+  conceptually different view on machine-checked proofs
+  \cite{Wenzel:1999:TPHOL,Wenzel-PhD}.  \emph{Isar} stands for
+  \emph{Intelligible semi-automated reasoning}.  Drawing from both the
+  traditions of informal mathematical proof texts and high-level
+  programming languages, Isar offers a versatile environment for
+  structured formal proof documents.  Thus properly written Isar
+  proofs become accessible to a broader audience than unstructured
+  tactic scripts (which typically only provide operational information
+  for the machine).  Writing human-readable proof texts certainly
+  requires some additional efforts by the writer to achieve a good
+  presentation, both of formal and informal parts of the text.  On the
+  other hand, human-readable formal texts gain some value in their own
+  right, independently of the mechanic proof-checking process.
+
+  Despite its grand design of structured proof texts, Isar is able to
+  assimilate the old tactical style as an ``improper'' sub-language.
+  This provides an easy upgrade path for existing tactic scripts, as
+  well as some means for interactive experimentation and debugging of
+  structured proofs.  Isabelle/Isar supports a broad range of proof
+  styles, both readable and unreadable ones.
+
+  \medskip The generic Isabelle/Isar framework (see
+  \chref{ch:isar-framework}) works reasonably well for any Isabelle
+  object-logic that conforms to the natural deduction view of the
+  Isabelle/Pure framework.  Specific language elements introduced by
+  Isabelle/HOL are described in \chref{ch:hol}.  Although the main
+  language elements are already provided by the Isabelle/Pure
+  framework, examples given in the generic parts will usually refer to
+  Isabelle/HOL.
+
+  \medskip Isar commands may be either \emph{proper} document
+  constructors, or \emph{improper commands}.  Some proof methods and
+  attributes introduced later are classified as improper as well.
+  Improper Isar language elements, which are marked by ``@{text
+  "\<^sup>*"}'' in the subsequent chapters; they are often helpful
+  when developing proof documents, but their use is discouraged for
+  the final human-readable outcome.  Typical examples are diagnostic
+  commands that print terms or theorems according to the current
+  context; other commands emulate old-style tactical theorem proving.
+*}
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Proof.thy	Tue Aug 28 12:45:49 2012 +0200
@@ -0,0 +1,1522 @@
+theory Proof
+imports Base Main
+begin
+
+chapter {* Proofs \label{ch:proofs} *}
+
+text {*
+  Proof commands perform transitions of Isar/VM machine
+  configurations, which are block-structured, consisting of a stack of
+  nodes with three main components: logical proof context, current
+  facts, and open goals.  Isar/VM transitions are typed according to
+  the following three different modes of operation:
+
+  \begin{description}
+
+  \item @{text "proof(prove)"} means that a new goal has just been
+  stated that is now to be \emph{proven}; the next command may refine
+  it by some proof method, and enter a sub-proof to establish the
+  actual result.
+
+  \item @{text "proof(state)"} is like a nested theory mode: the
+  context may be augmented by \emph{stating} additional assumptions,
+  intermediate results etc.
+
+  \item @{text "proof(chain)"} is intermediate between @{text
+  "proof(state)"} and @{text "proof(prove)"}: existing facts (i.e.\
+  the contents of the special ``@{fact_ref this}'' register) have been
+  just picked up in order to be used when refining the goal claimed
+  next.
+
+  \end{description}
+
+  The proof mode indicator may be understood as an instruction to the
+  writer, telling what kind of operation may be performed next.  The
+  corresponding typings of proof commands restricts the shape of
+  well-formed proof texts to particular command sequences.  So dynamic
+  arrangements of commands eventually turn out as static texts of a
+  certain structure.
+
+  \Appref{ap:refcard} gives a simplified grammar of the (extensible)
+  language emerging that way from the different types of proof
+  commands.  The main ideas of the overall Isar framework are
+  explained in \chref{ch:isar-framework}.
+*}
+
+
+section {* Proof structure *}
+
+subsection {* Formal notepad *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "notepad"} & : & @{text "local_theory \<rightarrow> proof(state)"} \\
+  \end{matharray}
+
+  @{rail "
+    @@{command notepad} @'begin'
+    ;
+    @@{command end}
+  "}
+
+  \begin{description}
+
+  \item @{command "notepad"}~@{keyword "begin"} opens a proof state
+  without any goal statement.  This allows to experiment with Isar,
+  without producing any persistent result.
+
+  The notepad can be closed by @{command "end"} or discontinued by
+  @{command "oops"}.
+
+  \end{description}
+*}
+
+
+subsection {* Blocks *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "next"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "{"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "}"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+  \end{matharray}
+
+  While Isar is inherently block-structured, opening and closing
+  blocks is mostly handled rather casually, with little explicit
+  user-intervention.  Any local goal statement automatically opens
+  \emph{two} internal blocks, which are closed again when concluding
+  the sub-proof (by @{command "qed"} etc.).  Sections of different
+  context within a sub-proof may be switched via @{command "next"},
+  which is just a single block-close followed by block-open again.
+  The effect of @{command "next"} is to reset the local proof context;
+  there is no goal focus involved here!
+
+  For slightly more advanced applications, there are explicit block
+  parentheses as well.  These typically achieve a stronger forward
+  style of reasoning.
+
+  \begin{description}
+
+  \item @{command "next"} switches to a fresh block within a
+  sub-proof, resetting the local context to the initial one.
+
+  \item @{command "{"} and @{command "}"} explicitly open and close
+  blocks.  Any current facts pass through ``@{command "{"}''
+  unchanged, while ``@{command "}"}'' causes any result to be
+  \emph{exported} into the enclosing context.  Thus fixed variables
+  are generalized, assumptions discharged, and local definitions
+  unfolded (cf.\ \secref{sec:proof-context}).  There is no difference
+  of @{command "assume"} and @{command "presume"} in this mode of
+  forward reasoning --- in contrast to plain backward reasoning with
+  the result exported at @{command "show"} time.
+
+  \end{description}
+*}
+
+
+subsection {* Omitting proofs *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "oops"} & : & @{text "proof \<rightarrow> local_theory | theory"} \\
+  \end{matharray}
+
+  The @{command "oops"} command discontinues the current proof
+  attempt, while considering the partial proof text as properly
+  processed.  This is conceptually quite different from ``faking''
+  actual proofs via @{command_ref "sorry"} (see
+  \secref{sec:proof-steps}): @{command "oops"} does not observe the
+  proof structure at all, but goes back right to the theory level.
+  Furthermore, @{command "oops"} does not produce any result theorem
+  --- there is no intended claim to be able to complete the proof
+  in any way.
+
+  A typical application of @{command "oops"} is to explain Isar proofs
+  \emph{within} the system itself, in conjunction with the document
+  preparation tools of Isabelle described in \chref{ch:document-prep}.
+  Thus partial or even wrong proof attempts can be discussed in a
+  logically sound manner.  Note that the Isabelle {\LaTeX} macros can
+  be easily adapted to print something like ``@{text "\<dots>"}'' instead of
+  the keyword ``@{command "oops"}''.
+*}
+
+
+section {* Statements *}
+
+subsection {* Context elements \label{sec:proof-context} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "fix"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "assume"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "presume"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "def"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+  \end{matharray}
+
+  The logical proof context consists of fixed variables and
+  assumptions.  The former closely correspond to Skolem constants, or
+  meta-level universal quantification as provided by the Isabelle/Pure
+  logical framework.  Introducing some \emph{arbitrary, but fixed}
+  variable via ``@{command "fix"}~@{text x}'' results in a local value
+  that may be used in the subsequent proof as any other variable or
+  constant.  Furthermore, any result @{text "\<turnstile> \<phi>[x]"} exported from
+  the context will be universally closed wrt.\ @{text x} at the
+  outermost level: @{text "\<turnstile> \<And>x. \<phi>[x]"} (this is expressed in normal
+  form using Isabelle's meta-variables).
+
+  Similarly, introducing some assumption @{text \<chi>} has two effects.
+  On the one hand, a local theorem is created that may be used as a
+  fact in subsequent proof steps.  On the other hand, any result
+  @{text "\<chi> \<turnstile> \<phi>"} exported from the context becomes conditional wrt.\
+  the assumption: @{text "\<turnstile> \<chi> \<Longrightarrow> \<phi>"}.  Thus, solving an enclosing goal
+  using such a result would basically introduce a new subgoal stemming
+  from the assumption.  How this situation is handled depends on the
+  version of assumption command used: while @{command "assume"}
+  insists on solving the subgoal by unification with some premise of
+  the goal, @{command "presume"} leaves the subgoal unchanged in order
+  to be proved later by the user.
+
+  Local definitions, introduced by ``@{command "def"}~@{text "x \<equiv>
+  t"}'', are achieved by combining ``@{command "fix"}~@{text x}'' with
+  another version of assumption that causes any hypothetical equation
+  @{text "x \<equiv> t"} to be eliminated by the reflexivity rule.  Thus,
+  exporting some result @{text "x \<equiv> t \<turnstile> \<phi>[x]"} yields @{text "\<turnstile>
+  \<phi>[t]"}.
+
+  @{rail "
+    @@{command fix} (@{syntax vars} + @'and')
+    ;
+    (@@{command assume} | @@{command presume}) (@{syntax props} + @'and')
+    ;
+    @@{command def} (def + @'and')
+    ;
+    def: @{syntax thmdecl}? \\ @{syntax name} ('==' | '\<equiv>') @{syntax term} @{syntax term_pat}?
+  "}
+
+  \begin{description}
+  
+  \item @{command "fix"}~@{text x} introduces a local variable @{text
+  x} that is \emph{arbitrary, but fixed.}
+  
+  \item @{command "assume"}~@{text "a: \<phi>"} and @{command
+  "presume"}~@{text "a: \<phi>"} introduce a local fact @{text "\<phi> \<turnstile> \<phi>"} by
+  assumption.  Subsequent results applied to an enclosing goal (e.g.\
+  by @{command_ref "show"}) are handled as follows: @{command
+  "assume"} expects to be able to unify with existing premises in the
+  goal, while @{command "presume"} leaves @{text \<phi>} as new subgoals.
+  
+  Several lists of assumptions may be given (separated by
+  @{keyword_ref "and"}; the resulting list of current facts consists
+  of all of these concatenated.
+  
+  \item @{command "def"}~@{text "x \<equiv> t"} introduces a local
+  (non-polymorphic) definition.  In results exported from the context,
+  @{text x} is replaced by @{text t}.  Basically, ``@{command
+  "def"}~@{text "x \<equiv> t"}'' abbreviates ``@{command "fix"}~@{text
+  x}~@{command "assume"}~@{text "x \<equiv> t"}'', with the resulting
+  hypothetical equation solved by reflexivity.
+  
+  The default name for the definitional equation is @{text x_def}.
+  Several simultaneous definitions may be given at the same time.
+
+  \end{description}
+
+  The special name @{fact_ref prems} refers to all assumptions of the
+  current context as a list of theorems.  This feature should be used
+  with great care!  It is better avoided in final proof texts.
+*}
+
+
+subsection {* Term abbreviations \label{sec:term-abbrev} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "let"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{keyword_def "is"} & : & syntax \\
+  \end{matharray}
+
+  Abbreviations may be either bound by explicit @{command
+  "let"}~@{text "p \<equiv> t"} statements, or by annotating assumptions or
+  goal statements with a list of patterns ``@{text "(\<IS> p\<^sub>1 \<dots>
+  p\<^sub>n)"}''.  In both cases, higher-order matching is invoked to
+  bind extra-logical term variables, which may be either named
+  schematic variables of the form @{text ?x}, or nameless dummies
+  ``@{variable _}'' (underscore). Note that in the @{command "let"}
+  form the patterns occur on the left-hand side, while the @{keyword
+  "is"} patterns are in postfix position.
+
+  Polymorphism of term bindings is handled in Hindley-Milner style,
+  similar to ML.  Type variables referring to local assumptions or
+  open goal statements are \emph{fixed}, while those of finished
+  results or bound by @{command "let"} may occur in \emph{arbitrary}
+  instances later.  Even though actual polymorphism should be rarely
+  used in practice, this mechanism is essential to achieve proper
+  incremental type-inference, as the user proceeds to build up the
+  Isar proof text from left to right.
+
+  \medskip Term abbreviations are quite different from local
+  definitions as introduced via @{command "def"} (see
+  \secref{sec:proof-context}).  The latter are visible within the
+  logic as actual equations, while abbreviations disappear during the
+  input process just after type checking.  Also note that @{command
+  "def"} does not support polymorphism.
+
+  @{rail "
+    @@{command let} ((@{syntax term} + @'and') '=' @{syntax term} + @'and')
+  "}
+
+  The syntax of @{keyword "is"} patterns follows @{syntax term_pat} or
+  @{syntax prop_pat} (see \secref{sec:term-decls}).
+
+  \begin{description}
+
+  \item @{command "let"}~@{text "p\<^sub>1 = t\<^sub>1 \<AND> \<dots> p\<^sub>n = t\<^sub>n"} binds any
+  text variables in patterns @{text "p\<^sub>1, \<dots>, p\<^sub>n"} by simultaneous
+  higher-order matching against terms @{text "t\<^sub>1, \<dots>, t\<^sub>n"}.
+
+  \item @{text "(\<IS> p\<^sub>1 \<dots> p\<^sub>n)"} resembles @{command "let"}, but
+  matches @{text "p\<^sub>1, \<dots>, p\<^sub>n"} against the preceding statement.  Also
+  note that @{keyword "is"} is not a separate command, but part of
+  others (such as @{command "assume"}, @{command "have"} etc.).
+
+  \end{description}
+
+  Some \emph{implicit} term abbreviations\index{term abbreviations}
+  for goals and facts are available as well.  For any open goal,
+  @{variable_ref thesis} refers to its object-level statement,
+  abstracted over any meta-level parameters (if present).  Likewise,
+  @{variable_ref this} is bound for fact statements resulting from
+  assumptions or finished goals.  In case @{variable this} refers to
+  an object-logic statement that is an application @{text "f t"}, then
+  @{text t} is bound to the special text variable ``@{variable "\<dots>"}''
+  (three dots).  The canonical application of this convenience are
+  calculational proofs (see \secref{sec:calculation}).
+*}
+
+
+subsection {* Facts and forward chaining \label{sec:proof-facts} *}
+
+text {*
+  \begin{matharray}{rcl}
+    @{command_def "note"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
+    @{command_def "then"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
+    @{command_def "from"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
+    @{command_def "with"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
+    @{command_def "using"} & : & @{text "proof(prove) \<rightarrow> proof(prove)"} \\
+    @{command_def "unfolding"} & : & @{text "proof(prove) \<rightarrow> proof(prove)"} \\
+  \end{matharray}
+
+  New facts are established either by assumption or proof of local
+  statements.  Any fact will usually be involved in further proofs,
+  either as explicit arguments of proof methods, or when forward
+  chaining towards the next goal via @{command "then"} (and variants);
+  @{command "from"} and @{command "with"} are composite forms
+  involving @{command "note"}.  The @{command "using"} elements
+  augments the collection of used facts \emph{after} a goal has been
+  stated.  Note that the special theorem name @{fact_ref this} refers
+  to the most recently established facts, but only \emph{before}
+  issuing a follow-up claim.
+
+  @{rail "