src/Doc/Implementation/Logic.thy
changeset 56420 b266e7a86485
parent 56243 2e10a36b8d46
child 56579 4c94f631c595
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Doc/Implementation/Logic.thy	Sat Apr 05 11:37:00 2014 +0200
@@ -0,0 +1,1462 @@
+theory Logic
+imports Base
+begin
+
+chapter {* Primitive logic \label{ch:logic} *}
+
+text {*
+  The logical foundations of Isabelle/Isar are that of the Pure logic,
+  which has been introduced as a Natural Deduction framework in
+  \cite{paulson700}.  This is essentially the same logic as ``@{text
+  "\<lambda>HOL"}'' in the more abstract setting of Pure Type Systems (PTS)
+  \cite{Barendregt-Geuvers:2001}, although there are some key
+  differences in the specific treatment of simple types in
+  Isabelle/Pure.
+
+  Following type-theoretic parlance, the Pure logic consists of three
+  levels of @{text "\<lambda>"}-calculus with corresponding arrows, @{text
+  "\<Rightarrow>"} for syntactic function space (terms depending on terms), @{text
+  "\<And>"} for universal quantification (proofs depending on terms), and
+  @{text "\<Longrightarrow>"} for implication (proofs depending on proofs).
+
+  Derivations are relative to a logical theory, which declares type
+  constructors, constants, and axioms.  Theory declarations support
+  schematic polymorphism, which is strictly speaking outside the
+  logic.\footnote{This is the deeper logical reason, why the theory
+  context @{text "\<Theta>"} is separate from the proof context @{text "\<Gamma>"}
+  of the core calculus: type constructors, term constants, and facts
+  (proof constants) may involve arbitrary type schemes, but the type
+  of a locally fixed term parameter is also fixed!}
+*}
+
+
+section {* Types \label{sec:types} *}
+
+text {*
+  The language of types is an uninterpreted order-sorted first-order
+  algebra; types are qualified by ordered type classes.
+
+  \medskip A \emph{type class} is an abstract syntactic entity
+  declared in the theory context.  The \emph{subclass relation} @{text
+  "c\<^sub>1 \<subseteq> c\<^sub>2"} is specified by stating an acyclic
+  generating relation; the transitive closure is maintained
+  internally.  The resulting relation is an ordering: reflexive,
+  transitive, and antisymmetric.
+
+  A \emph{sort} is a list of type classes written as @{text "s = {c\<^sub>1,
+  \<dots>, c\<^sub>m}"}, it represents symbolic intersection.  Notationally, the
+  curly braces are omitted for singleton intersections, i.e.\ any
+  class @{text "c"} may be read as a sort @{text "{c}"}.  The ordering
+  on type classes is extended to sorts according to the meaning of
+  intersections: @{text "{c\<^sub>1, \<dots> c\<^sub>m} \<subseteq> {d\<^sub>1, \<dots>, d\<^sub>n}"} iff @{text
+  "\<forall>j. \<exists>i. c\<^sub>i \<subseteq> d\<^sub>j"}.  The empty intersection @{text "{}"} refers to
+  the universal sort, which is the largest element wrt.\ the sort
+  order.  Thus @{text "{}"} represents the ``full sort'', not the
+  empty one!  The intersection of all (finitely many) classes declared
+  in the current theory is the least element wrt.\ the sort ordering.
+
+  \medskip A \emph{fixed type variable} is a pair of a basic name
+  (starting with a @{text "'"} character) and a sort constraint, e.g.\
+  @{text "('a, s)"} which is usually printed as @{text "\<alpha>\<^sub>s"}.
+  A \emph{schematic type variable} is a pair of an indexname and a
+  sort constraint, e.g.\ @{text "(('a, 0), s)"} which is usually
+  printed as @{text "?\<alpha>\<^sub>s"}.
+
+  Note that \emph{all} syntactic components contribute to the identity
+  of type variables: basic name, index, and sort constraint.  The core
+  logic handles type variables with the same name but different sorts
+  as different, although the type-inference layer (which is outside
+  the core) rejects anything like that.
+
+  A \emph{type constructor} @{text "\<kappa>"} is a @{text "k"}-ary operator
+  on types declared in the theory.  Type constructor application is
+  written postfix as @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>k)\<kappa>"}.  For
+  @{text "k = 0"} the argument tuple is omitted, e.g.\ @{text "prop"}
+  instead of @{text "()prop"}.  For @{text "k = 1"} the parentheses
+  are omitted, e.g.\ @{text "\<alpha> list"} instead of @{text "(\<alpha>)list"}.
+  Further notation is provided for specific constructors, notably the
+  right-associative infix @{text "\<alpha> \<Rightarrow> \<beta>"} instead of @{text "(\<alpha>,
+  \<beta>)fun"}.
+  
+  The logical category \emph{type} is defined inductively over type
+  variables and type constructors as follows: @{text "\<tau> = \<alpha>\<^sub>s | ?\<alpha>\<^sub>s |
+  (\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>k)\<kappa>"}.
+
+  A \emph{type abbreviation} is a syntactic definition @{text
+  "(\<^vec>\<alpha>)\<kappa> = \<tau>"} of an arbitrary type expression @{text "\<tau>"} over
+  variables @{text "\<^vec>\<alpha>"}.  Type abbreviations appear as type
+  constructors in the syntax, but are expanded before entering the
+  logical core.
+
+  A \emph{type arity} declares the image behavior of a type
+  constructor wrt.\ the algebra of sorts: @{text "\<kappa> :: (s\<^sub>1, \<dots>,
+  s\<^sub>k)s"} means that @{text "(\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>k)\<kappa>"} is
+  of sort @{text "s"} if every argument type @{text "\<tau>\<^sub>i"} is
+  of sort @{text "s\<^sub>i"}.  Arity declarations are implicitly
+  completed, i.e.\ @{text "\<kappa> :: (\<^vec>s)c"} entails @{text "\<kappa> ::
+  (\<^vec>s)c'"} for any @{text "c' \<supseteq> c"}.
+
+  \medskip The sort algebra is always maintained as \emph{coregular},
+  which means that type arities are consistent with the subclass
+  relation: for any type constructor @{text "\<kappa>"}, and classes @{text
+  "c\<^sub>1 \<subseteq> c\<^sub>2"}, and arities @{text "\<kappa> ::
+  (\<^vec>s\<^sub>1)c\<^sub>1"} and @{text "\<kappa> ::
+  (\<^vec>s\<^sub>2)c\<^sub>2"} holds @{text "\<^vec>s\<^sub>1 \<subseteq>
+  \<^vec>s\<^sub>2"} component-wise.
+
+  The key property of a coregular order-sorted algebra is that sort
+  constraints can be solved in a most general fashion: for each type
+  constructor @{text "\<kappa>"} and sort @{text "s"} there is a most general
+  vector of argument sorts @{text "(s\<^sub>1, \<dots>, s\<^sub>k)"} such
+  that a type scheme @{text "(\<alpha>\<^bsub>s\<^sub>1\<^esub>, \<dots>,
+  \<alpha>\<^bsub>s\<^sub>k\<^esub>)\<kappa>"} is of sort @{text "s"}.
+  Consequently, type unification has most general solutions (modulo
+  equivalence of sorts), so type-inference produces primary types as
+  expected \cite{nipkow-prehofer}.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type class: string} \\
+  @{index_ML_type sort: "class list"} \\
+  @{index_ML_type arity: "string * sort list * sort"} \\
+  @{index_ML_type typ} \\
+  @{index_ML Term.map_atyps: "(typ -> typ) -> typ -> typ"} \\
+  @{index_ML Term.fold_atyps: "(typ -> 'a -> 'a) -> typ -> 'a -> 'a"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML Sign.subsort: "theory -> sort * sort -> bool"} \\
+  @{index_ML Sign.of_sort: "theory -> typ * sort -> bool"} \\
+  @{index_ML Sign.add_type: "Proof.context -> binding * int * mixfix -> theory -> theory"} \\
+  @{index_ML Sign.add_type_abbrev: "Proof.context ->
+  binding * string list * typ -> theory -> theory"} \\
+  @{index_ML Sign.primitive_class: "binding * class list -> theory -> theory"} \\
+  @{index_ML Sign.primitive_classrel: "class * class -> theory -> theory"} \\
+  @{index_ML Sign.primitive_arity: "arity -> theory -> theory"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type class} represents type classes.
+
+  \item Type @{ML_type sort} represents sorts, i.e.\ finite
+  intersections of classes.  The empty list @{ML "[]: sort"} refers to
+  the empty class intersection, i.e.\ the ``full sort''.
+
+  \item Type @{ML_type arity} represents type arities.  A triple
+  @{text "(\<kappa>, \<^vec>s, s) : arity"} represents @{text "\<kappa> ::
+  (\<^vec>s)s"} as described above.
+
+  \item Type @{ML_type typ} represents types; this is a datatype with
+  constructors @{ML TFree}, @{ML TVar}, @{ML Type}.
+
+  \item @{ML Term.map_atyps}~@{text "f \<tau>"} applies the mapping @{text
+  "f"} to all atomic types (@{ML TFree}, @{ML TVar}) occurring in
+  @{text "\<tau>"}.
+
+  \item @{ML Term.fold_atyps}~@{text "f \<tau>"} iterates the operation
+  @{text "f"} over all occurrences of atomic types (@{ML TFree}, @{ML
+  TVar}) in @{text "\<tau>"}; the type structure is traversed from left to
+  right.
+
+  \item @{ML Sign.subsort}~@{text "thy (s\<^sub>1, s\<^sub>2)"}
+  tests the subsort relation @{text "s\<^sub>1 \<subseteq> s\<^sub>2"}.
+
+  \item @{ML Sign.of_sort}~@{text "thy (\<tau>, s)"} tests whether type
+  @{text "\<tau>"} is of sort @{text "s"}.
+
+  \item @{ML Sign.add_type}~@{text "ctxt (\<kappa>, k, mx)"} declares a
+  new type constructors @{text "\<kappa>"} with @{text "k"} arguments and
+  optional mixfix syntax.
+
+  \item @{ML Sign.add_type_abbrev}~@{text "ctxt (\<kappa>, \<^vec>\<alpha>, \<tau>)"}
+  defines a new type abbreviation @{text "(\<^vec>\<alpha>)\<kappa> = \<tau>"}.
+
+  \item @{ML Sign.primitive_class}~@{text "(c, [c\<^sub>1, \<dots>,
+  c\<^sub>n])"} declares a new class @{text "c"}, together with class
+  relations @{text "c \<subseteq> c\<^sub>i"}, for @{text "i = 1, \<dots>, n"}.
+
+  \item @{ML Sign.primitive_classrel}~@{text "(c\<^sub>1,
+  c\<^sub>2)"} declares the class relation @{text "c\<^sub>1 \<subseteq>
+  c\<^sub>2"}.
+
+  \item @{ML Sign.primitive_arity}~@{text "(\<kappa>, \<^vec>s, s)"} declares
+  the arity @{text "\<kappa> :: (\<^vec>s)s"}.
+
+  \end{description}
+*}
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def "class"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "sort"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "type_name"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "type_abbrev"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "nonterminal"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "typ"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  @{rail \<open>
+  @@{ML_antiquotation class} nameref
+  ;
+  @@{ML_antiquotation sort} sort
+  ;
+  (@@{ML_antiquotation type_name} |
+   @@{ML_antiquotation type_abbrev} |
+   @@{ML_antiquotation nonterminal}) nameref
+  ;
+  @@{ML_antiquotation typ} type
+  \<close>}
+
+  \begin{description}
+
+  \item @{text "@{class c}"} inlines the internalized class @{text
+  "c"} --- as @{ML_type string} literal.
+
+  \item @{text "@{sort s}"} inlines the internalized sort @{text "s"}
+  --- as @{ML_type "string list"} literal.
+
+  \item @{text "@{type_name c}"} inlines the internalized type
+  constructor @{text "c"} --- as @{ML_type string} literal.
+
+  \item @{text "@{type_abbrev c}"} inlines the internalized type
+  abbreviation @{text "c"} --- as @{ML_type string} literal.
+
+  \item @{text "@{nonterminal c}"} inlines the internalized syntactic
+  type~/ grammar nonterminal @{text "c"} --- as @{ML_type string}
+  literal.
+
+  \item @{text "@{typ \<tau>}"} inlines the internalized type @{text "\<tau>"}
+  --- as constructor term for datatype @{ML_type typ}.
+
+  \end{description}
+*}
+
+
+section {* Terms \label{sec:terms} *}
+
+text {*
+  The language of terms is that of simply-typed @{text "\<lambda>"}-calculus
+  with de-Bruijn indices for bound variables (cf.\ \cite{debruijn72}
+  or \cite{paulson-ml2}), with the types being determined by the
+  corresponding binders.  In contrast, free variables and constants
+  have an explicit name and type in each occurrence.
+
+  \medskip A \emph{bound variable} is a natural number @{text "b"},
+  which accounts for the number of intermediate binders between the
+  variable occurrence in the body and its binding position.  For
+  example, the de-Bruijn term @{text "\<lambda>\<^bsub>bool\<^esub>. \<lambda>\<^bsub>bool\<^esub>. 1 \<and> 0"} would
+  correspond to @{text "\<lambda>x\<^bsub>bool\<^esub>. \<lambda>y\<^bsub>bool\<^esub>. x \<and> y"} in a named
+  representation.  Note that a bound variable may be represented by
+  different de-Bruijn indices at different occurrences, depending on
+  the nesting of abstractions.
+
+  A \emph{loose variable} is a bound variable that is outside the
+  scope of local binders.  The types (and names) for loose variables
+  can be managed as a separate context, that is maintained as a stack
+  of hypothetical binders.  The core logic operates on closed terms,
+  without any loose variables.
+
+  A \emph{fixed variable} is a pair of a basic name and a type, e.g.\
+  @{text "(x, \<tau>)"} which is usually printed @{text "x\<^sub>\<tau>"} here.  A
+  \emph{schematic variable} is a pair of an indexname and a type,
+  e.g.\ @{text "((x, 0), \<tau>)"} which is likewise printed as @{text
+  "?x\<^sub>\<tau>"}.
+
+  \medskip A \emph{constant} is a pair of a basic name and a type,
+  e.g.\ @{text "(c, \<tau>)"} which is usually printed as @{text "c\<^sub>\<tau>"}
+  here.  Constants are declared in the context as polymorphic families
+  @{text "c :: \<sigma>"}, meaning that all substitution instances @{text
+  "c\<^sub>\<tau>"} for @{text "\<tau> = \<sigma>\<vartheta>"} are valid.
+
+  The vector of \emph{type arguments} of constant @{text "c\<^sub>\<tau>"} wrt.\
+  the declaration @{text "c :: \<sigma>"} is defined as the codomain of the
+  matcher @{text "\<vartheta> = {?\<alpha>\<^sub>1 \<mapsto> \<tau>\<^sub>1, \<dots>, ?\<alpha>\<^sub>n \<mapsto> \<tau>\<^sub>n}"} presented in
+  canonical order @{text "(\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>n)"}, corresponding to the
+  left-to-right occurrences of the @{text "\<alpha>\<^sub>i"} in @{text "\<sigma>"}.
+  Within a given theory context, there is a one-to-one correspondence
+  between any constant @{text "c\<^sub>\<tau>"} and the application @{text "c(\<tau>\<^sub>1,
+  \<dots>, \<tau>\<^sub>n)"} of its type arguments.  For example, with @{text "plus :: \<alpha>
+  \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>"}, the instance @{text "plus\<^bsub>nat \<Rightarrow> nat \<Rightarrow> nat\<^esub>"} corresponds to
+  @{text "plus(nat)"}.
+
+  Constant declarations @{text "c :: \<sigma>"} may contain sort constraints
+  for type variables in @{text "\<sigma>"}.  These are observed by
+  type-inference as expected, but \emph{ignored} by the core logic.
+  This means the primitive logic is able to reason with instances of
+  polymorphic constants that the user-level type-checker would reject
+  due to violation of type class restrictions.
+
+  \medskip An \emph{atomic term} is either a variable or constant.
+  The logical category \emph{term} is defined inductively over atomic
+  terms, with abstraction and application as follows: @{text "t = b |
+  x\<^sub>\<tau> | ?x\<^sub>\<tau> | c\<^sub>\<tau> | \<lambda>\<^sub>\<tau>. t | t\<^sub>1 t\<^sub>2"}.  Parsing and printing takes care of
+  converting between an external representation with named bound
+  variables.  Subsequently, we shall use the latter notation instead
+  of internal de-Bruijn representation.
+
+  The inductive relation @{text "t :: \<tau>"} assigns a (unique) type to a
+  term according to the structure of atomic terms, abstractions, and
+  applicatins:
+  \[
+  \infer{@{text "a\<^sub>\<tau> :: \<tau>"}}{}
+  \qquad
+  \infer{@{text "(\<lambda>x\<^sub>\<tau>. t) :: \<tau> \<Rightarrow> \<sigma>"}}{@{text "t :: \<sigma>"}}
+  \qquad
+  \infer{@{text "t u :: \<sigma>"}}{@{text "t :: \<tau> \<Rightarrow> \<sigma>"} & @{text "u :: \<tau>"}}
+  \]
+  A \emph{well-typed term} is a term that can be typed according to these rules.
+
+  Typing information can be omitted: type-inference is able to
+  reconstruct the most general type of a raw term, while assigning
+  most general types to all of its variables and constants.
+  Type-inference depends on a context of type constraints for fixed
+  variables, and declarations for polymorphic constants.
+
+  The identity of atomic terms consists both of the name and the type
+  component.  This means that different variables @{text
+  "x\<^bsub>\<tau>\<^sub>1\<^esub>"} and @{text "x\<^bsub>\<tau>\<^sub>2\<^esub>"} may become the same after
+  type instantiation.  Type-inference rejects variables of the same
+  name, but different types.  In contrast, mixed instances of
+  polymorphic constants occur routinely.
+
+  \medskip The \emph{hidden polymorphism} of a term @{text "t :: \<sigma>"}
+  is the set of type variables occurring in @{text "t"}, but not in
+  its type @{text "\<sigma>"}.  This means that the term implicitly depends
+  on type arguments that are not accounted in the result type, i.e.\
+  there are different type instances @{text "t\<vartheta> :: \<sigma>"} and
+  @{text "t\<vartheta>' :: \<sigma>"} with the same type.  This slightly
+  pathological situation notoriously demands additional care.
+
+  \medskip A \emph{term abbreviation} is a syntactic definition @{text
+  "c\<^sub>\<sigma> \<equiv> t"} of a closed term @{text "t"} of type @{text "\<sigma>"},
+  without any hidden polymorphism.  A term abbreviation looks like a
+  constant in the syntax, but is expanded before entering the logical
+  core.  Abbreviations are usually reverted when printing terms, using
+  @{text "t \<rightarrow> c\<^sub>\<sigma>"} as rules for higher-order rewriting.
+
+  \medskip Canonical operations on @{text "\<lambda>"}-terms include @{text
+  "\<alpha>\<beta>\<eta>"}-conversion: @{text "\<alpha>"}-conversion refers to capture-free
+  renaming of bound variables; @{text "\<beta>"}-conversion contracts an
+  abstraction applied to an argument term, substituting the argument
+  in the body: @{text "(\<lambda>x. b)a"} becomes @{text "b[a/x]"}; @{text
+  "\<eta>"}-conversion contracts vacuous application-abstraction: @{text
+  "\<lambda>x. f x"} becomes @{text "f"}, provided that the bound variable
+  does not occur in @{text "f"}.
+
+  Terms are normally treated modulo @{text "\<alpha>"}-conversion, which is
+  implicit in the de-Bruijn representation.  Names for bound variables
+  in abstractions are maintained separately as (meaningless) comments,
+  mostly for parsing and printing.  Full @{text "\<alpha>\<beta>\<eta>"}-conversion is
+  commonplace in various standard operations (\secref{sec:obj-rules})
+  that are based on higher-order unification and matching.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type term} \\
+  @{index_ML_op "aconv": "term * term -> bool"} \\
+  @{index_ML Term.map_types: "(typ -> typ) -> term -> term"} \\
+  @{index_ML Term.fold_types: "(typ -> 'a -> 'a) -> term -> 'a -> 'a"} \\
+  @{index_ML Term.map_aterms: "(term -> term) -> term -> term"} \\
+  @{index_ML Term.fold_aterms: "(term -> 'a -> 'a) -> term -> 'a -> 'a"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML fastype_of: "term -> typ"} \\
+  @{index_ML lambda: "term -> term -> term"} \\
+  @{index_ML betapply: "term * term -> term"} \\
+  @{index_ML incr_boundvars: "int -> term -> term"} \\
+  @{index_ML Sign.declare_const: "Proof.context ->
+  (binding * typ) * mixfix -> theory -> term * theory"} \\
+  @{index_ML Sign.add_abbrev: "string -> binding * term ->
+  theory -> (term * term) * theory"} \\
+  @{index_ML Sign.const_typargs: "theory -> string * typ -> typ list"} \\
+  @{index_ML Sign.const_instance: "theory -> string * typ list -> typ"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type term} represents de-Bruijn terms, with comments
+  in abstractions, and explicitly named free variables and constants;
+  this is a datatype with constructors @{index_ML Bound}, @{index_ML
+  Free}, @{index_ML Var}, @{index_ML Const}, @{index_ML Abs},
+  @{index_ML_op "$"}.
+
+  \item @{text "t"}~@{ML_text aconv}~@{text "u"} checks @{text
+  "\<alpha>"}-equivalence of two terms.  This is the basic equality relation
+  on type @{ML_type term}; raw datatype equality should only be used
+  for operations related to parsing or printing!
+
+  \item @{ML Term.map_types}~@{text "f t"} applies the mapping @{text
+  "f"} to all types occurring in @{text "t"}.
+
+  \item @{ML Term.fold_types}~@{text "f t"} iterates the operation
+  @{text "f"} over all occurrences of types in @{text "t"}; the term
+  structure is traversed from left to right.
+
+  \item @{ML Term.map_aterms}~@{text "f t"} applies the mapping @{text
+  "f"} to all atomic terms (@{ML Bound}, @{ML Free}, @{ML Var}, @{ML
+  Const}) occurring in @{text "t"}.
+
+  \item @{ML Term.fold_aterms}~@{text "f t"} iterates the operation
+  @{text "f"} over all occurrences of atomic terms (@{ML Bound}, @{ML
+  Free}, @{ML Var}, @{ML Const}) in @{text "t"}; the term structure is
+  traversed from left to right.
+
+  \item @{ML fastype_of}~@{text "t"} determines the type of a
+  well-typed term.  This operation is relatively slow, despite the
+  omission of any sanity checks.
+
+  \item @{ML lambda}~@{text "a b"} produces an abstraction @{text
+  "\<lambda>a. b"}, where occurrences of the atomic term @{text "a"} in the
+  body @{text "b"} are replaced by bound variables.
+
+  \item @{ML betapply}~@{text "(t, u)"} produces an application @{text
+  "t u"}, with topmost @{text "\<beta>"}-conversion if @{text "t"} is an
+  abstraction.
+
+  \item @{ML incr_boundvars}~@{text "j"} increments a term's dangling
+  bound variables by the offset @{text "j"}.  This is required when
+  moving a subterm into a context where it is enclosed by a different
+  number of abstractions.  Bound variables with a matching abstraction
+  are unaffected.
+
+  \item @{ML Sign.declare_const}~@{text "ctxt ((c, \<sigma>), mx)"} declares
+  a new constant @{text "c :: \<sigma>"} with optional mixfix syntax.
+
+  \item @{ML Sign.add_abbrev}~@{text "print_mode (c, t)"}
+  introduces a new term abbreviation @{text "c \<equiv> t"}.
+
+  \item @{ML Sign.const_typargs}~@{text "thy (c, \<tau>)"} and @{ML
+  Sign.const_instance}~@{text "thy (c, [\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>n])"}
+  convert between two representations of polymorphic constants: full
+  type instance vs.\ compact type arguments form.
+
+  \end{description}
+*}
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def "const_name"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "const_abbrev"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "const"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "term"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "prop"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  @{rail \<open>
+  (@@{ML_antiquotation const_name} |
+   @@{ML_antiquotation const_abbrev}) nameref
+  ;
+  @@{ML_antiquotation const} ('(' (type + ',') ')')?
+  ;
+  @@{ML_antiquotation term} term
+  ;
+  @@{ML_antiquotation prop} prop
+  \<close>}
+
+  \begin{description}
+
+  \item @{text "@{const_name c}"} inlines the internalized logical
+  constant name @{text "c"} --- as @{ML_type string} literal.
+
+  \item @{text "@{const_abbrev c}"} inlines the internalized
+  abbreviated constant name @{text "c"} --- as @{ML_type string}
+  literal.
+
+  \item @{text "@{const c(\<^vec>\<tau>)}"} inlines the internalized
+  constant @{text "c"} with precise type instantiation in the sense of
+  @{ML Sign.const_instance} --- as @{ML Const} constructor term for
+  datatype @{ML_type term}.
+
+  \item @{text "@{term t}"} inlines the internalized term @{text "t"}
+  --- as constructor term for datatype @{ML_type term}.
+
+  \item @{text "@{prop \<phi>}"} inlines the internalized proposition
+  @{text "\<phi>"} --- as constructor term for datatype @{ML_type term}.
+
+  \end{description}
+*}
+
+
+section {* Theorems \label{sec:thms} *}
+
+text {*
+  A \emph{proposition} is a well-typed term of type @{text "prop"}, a
+  \emph{theorem} is a proven proposition (depending on a context of
+  hypotheses and the background theory).  Primitive inferences include
+  plain Natural Deduction rules for the primary connectives @{text
+  "\<And>"} and @{text "\<Longrightarrow>"} of the framework.  There is also a builtin
+  notion of equality/equivalence @{text "\<equiv>"}.
+*}
+
+
+subsection {* Primitive connectives and rules \label{sec:prim-rules} *}
+
+text {*
+  The theory @{text "Pure"} contains constant declarations for the
+  primitive connectives @{text "\<And>"}, @{text "\<Longrightarrow>"}, and @{text "\<equiv>"} of
+  the logical framework, see \figref{fig:pure-connectives}.  The
+  derivability judgment @{text "A\<^sub>1, \<dots>, A\<^sub>n \<turnstile> B"} is
+  defined inductively by the primitive inferences given in
+  \figref{fig:prim-rules}, with the global restriction that the
+  hypotheses must \emph{not} contain any schematic variables.  The
+  builtin equality is conceptually axiomatized as shown in
+  \figref{fig:pure-equality}, although the implementation works
+  directly with derived inferences.
+
+  \begin{figure}[htb]
+  \begin{center}
+  \begin{tabular}{ll}
+  @{text "all :: (\<alpha> \<Rightarrow> prop) \<Rightarrow> prop"} & universal quantification (binder @{text "\<And>"}) \\
+  @{text "\<Longrightarrow> :: prop \<Rightarrow> prop \<Rightarrow> prop"} & implication (right associative infix) \\
+  @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> prop"} & equality relation (infix) \\
+  \end{tabular}
+  \caption{Primitive connectives of Pure}\label{fig:pure-connectives}
+  \end{center}
+  \end{figure}
+
+  \begin{figure}[htb]
+  \begin{center}
+  \[
+  \infer[@{text "(axiom)"}]{@{text "\<turnstile> A"}}{@{text "A \<in> \<Theta>"}}
+  \qquad
+  \infer[@{text "(assume)"}]{@{text "A \<turnstile> A"}}{}
+  \]
+  \[
+  \infer[@{text "(\<And>\<hyphen>intro)"}]{@{text "\<Gamma> \<turnstile> \<And>x. B[x]"}}{@{text "\<Gamma> \<turnstile> B[x]"} & @{text "x \<notin> \<Gamma>"}}
+  \qquad
+  \infer[@{text "(\<And>\<hyphen>elim)"}]{@{text "\<Gamma> \<turnstile> B[a]"}}{@{text "\<Gamma> \<turnstile> \<And>x. B[x]"}}
+  \]
+  \[
+  \infer[@{text "(\<Longrightarrow>\<hyphen>intro)"}]{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
+  \qquad
+  \infer[@{text "(\<Longrightarrow>\<hyphen>elim)"}]{@{text "\<Gamma>\<^sub>1 \<union> \<Gamma>\<^sub>2 \<turnstile> B"}}{@{text "\<Gamma>\<^sub>1 \<turnstile> A \<Longrightarrow> B"} & @{text "\<Gamma>\<^sub>2 \<turnstile> A"}}
+  \]
+  \caption{Primitive inferences of Pure}\label{fig:prim-rules}
+  \end{center}
+  \end{figure}
+
+  \begin{figure}[htb]
+  \begin{center}
+  \begin{tabular}{ll}
+  @{text "\<turnstile> (\<lambda>x. b[x]) a \<equiv> b[a]"} & @{text "\<beta>"}-conversion \\
+  @{text "\<turnstile> x \<equiv> x"} & reflexivity \\
+  @{text "\<turnstile> x \<equiv> y \<Longrightarrow> P x \<Longrightarrow> P y"} & substitution \\
+  @{text "\<turnstile> (\<And>x. f x \<equiv> g x) \<Longrightarrow> f \<equiv> g"} & extensionality \\
+  @{text "\<turnstile> (A \<Longrightarrow> B) \<Longrightarrow> (B \<Longrightarrow> A) \<Longrightarrow> A \<equiv> B"} & logical equivalence \\
+  \end{tabular}
+  \caption{Conceptual axiomatization of Pure equality}\label{fig:pure-equality}
+  \end{center}
+  \end{figure}
+
+  The introduction and elimination rules for @{text "\<And>"} and @{text
+  "\<Longrightarrow>"} are analogous to formation of dependently typed @{text
+  "\<lambda>"}-terms representing the underlying proof objects.  Proof terms
+  are irrelevant in the Pure logic, though; they cannot occur within
+  propositions.  The system provides a runtime option to record
+  explicit proof terms for primitive inferences, see also
+  \secref{sec:proof-terms}.  Thus all three levels of @{text
+  "\<lambda>"}-calculus become explicit: @{text "\<Rightarrow>"} for terms, and @{text
+  "\<And>/\<Longrightarrow>"} for proofs (cf.\ \cite{Berghofer-Nipkow:2000:TPHOL}).
+
+  Observe that locally fixed parameters (as in @{text
+  "\<And>\<hyphen>intro"}) need not be recorded in the hypotheses, because
+  the simple syntactic types of Pure are always inhabitable.
+  ``Assumptions'' @{text "x :: \<tau>"} for type-membership are only
+  present as long as some @{text "x\<^sub>\<tau>"} occurs in the statement
+  body.\footnote{This is the key difference to ``@{text "\<lambda>HOL"}'' in
+  the PTS framework \cite{Barendregt-Geuvers:2001}, where hypotheses
+  @{text "x : A"} are treated uniformly for propositions and types.}
+
+  \medskip The axiomatization of a theory is implicitly closed by
+  forming all instances of type and term variables: @{text "\<turnstile>
+  A\<vartheta>"} holds for any substitution instance of an axiom
+  @{text "\<turnstile> A"}.  By pushing substitutions through derivations
+  inductively, we also get admissible @{text "generalize"} and @{text
+  "instantiate"} rules as shown in \figref{fig:subst-rules}.
+
+  \begin{figure}[htb]
+  \begin{center}
+  \[
+  \infer{@{text "\<Gamma> \<turnstile> B[?\<alpha>]"}}{@{text "\<Gamma> \<turnstile> B[\<alpha>]"} & @{text "\<alpha> \<notin> \<Gamma>"}}
+  \quad
+  \infer[\quad@{text "(generalize)"}]{@{text "\<Gamma> \<turnstile> B[?x]"}}{@{text "\<Gamma> \<turnstile> B[x]"} & @{text "x \<notin> \<Gamma>"}}
+  \]
+  \[
+  \infer{@{text "\<Gamma> \<turnstile> B[\<tau>]"}}{@{text "\<Gamma> \<turnstile> B[?\<alpha>]"}}
+  \quad
+  \infer[\quad@{text "(instantiate)"}]{@{text "\<Gamma> \<turnstile> B[t]"}}{@{text "\<Gamma> \<turnstile> B[?x]"}}
+  \]
+  \caption{Admissible substitution rules}\label{fig:subst-rules}
+  \end{center}
+  \end{figure}
+
+  Note that @{text "instantiate"} does not require an explicit
+  side-condition, because @{text "\<Gamma>"} may never contain schematic
+  variables.
+
+  In principle, variables could be substituted in hypotheses as well,
+  but this would disrupt the monotonicity of reasoning: deriving
+  @{text "\<Gamma>\<vartheta> \<turnstile> B\<vartheta>"} from @{text "\<Gamma> \<turnstile> B"} is
+  correct, but @{text "\<Gamma>\<vartheta> \<supseteq> \<Gamma>"} does not necessarily hold:
+  the result belongs to a different proof context.
+
+  \medskip An \emph{oracle} is a function that produces axioms on the
+  fly.  Logically, this is an instance of the @{text "axiom"} rule
+  (\figref{fig:prim-rules}), but there is an operational difference.
+  The system always records oracle invocations within derivations of
+  theorems by a unique tag.
+
+  Axiomatizations should be limited to the bare minimum, typically as
+  part of the initial logical basis of an object-logic formalization.
+  Later on, theories are usually developed in a strictly definitional
+  fashion, by stating only certain equalities over new constants.
+
+  A \emph{simple definition} consists of a constant declaration @{text
+  "c :: \<sigma>"} together with an axiom @{text "\<turnstile> c \<equiv> t"}, where @{text "t
+  :: \<sigma>"} is a closed term without any hidden polymorphism.  The RHS
+  may depend on further defined constants, but not @{text "c"} itself.
+  Definitions of functions may be presented as @{text "c \<^vec>x \<equiv>
+  t"} instead of the puristic @{text "c \<equiv> \<lambda>\<^vec>x. t"}.
+
+  An \emph{overloaded definition} consists of a collection of axioms
+  for the same constant, with zero or one equations @{text
+  "c((\<^vec>\<alpha>)\<kappa>) \<equiv> t"} for each type constructor @{text "\<kappa>"} (for
+  distinct variables @{text "\<^vec>\<alpha>"}).  The RHS may mention
+  previously defined constants as above, or arbitrary constants @{text
+  "d(\<alpha>\<^sub>i)"} for some @{text "\<alpha>\<^sub>i"} projected from @{text
+  "\<^vec>\<alpha>"}.  Thus overloaded definitions essentially work by
+  primitive recursion over the syntactic structure of a single type
+  argument.  See also \cite[\S4.3]{Haftmann-Wenzel:2006:classes}.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Logic.all: "term -> term -> term"} \\
+  @{index_ML Logic.mk_implies: "term * term -> term"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML_type ctyp} \\
+  @{index_ML_type cterm} \\
+  @{index_ML Thm.ctyp_of: "theory -> typ -> ctyp"} \\
+  @{index_ML Thm.cterm_of: "theory -> term -> cterm"} \\
+  @{index_ML Thm.apply: "cterm -> cterm -> cterm"} \\
+  @{index_ML Thm.lambda: "cterm -> cterm -> cterm"} \\
+  @{index_ML Thm.all: "cterm -> cterm -> cterm"} \\
+  @{index_ML Drule.mk_implies: "cterm * cterm -> cterm"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML_type thm} \\
+  @{index_ML Thm.peek_status: "thm -> {oracle: bool, unfinished: bool, failed: bool}"} \\
+  @{index_ML Thm.transfer: "theory -> thm -> thm"} \\
+  @{index_ML Thm.assume: "cterm -> thm"} \\
+  @{index_ML Thm.forall_intr: "cterm -> thm -> thm"} \\
+  @{index_ML Thm.forall_elim: "cterm -> thm -> thm"} \\
+  @{index_ML Thm.implies_intr: "cterm -> thm -> thm"} \\
+  @{index_ML Thm.implies_elim: "thm -> thm -> thm"} \\
+  @{index_ML Thm.generalize: "string list * string list -> int -> thm -> thm"} \\
+  @{index_ML Thm.instantiate: "(ctyp * ctyp) list * (cterm * cterm) list -> thm -> thm"} \\
+  @{index_ML Thm.add_axiom: "Proof.context ->
+  binding * term -> theory -> (string * thm) * theory"} \\
+  @{index_ML Thm.add_oracle: "binding * ('a -> cterm) -> theory ->
+  (string * ('a -> thm)) * theory"} \\
+  @{index_ML Thm.add_def: "Proof.context -> bool -> bool ->
+  binding * term -> theory -> (string * thm) * theory"} \\
+  \end{mldecls}
+  \begin{mldecls}
+  @{index_ML Theory.add_deps: "Proof.context -> string ->
+  string * typ -> (string * typ) list -> theory -> theory"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Thm.peek_status}~@{text "thm"} informs about the current
+  status of the derivation object behind the given theorem.  This is a
+  snapshot of a potentially ongoing (parallel) evaluation of proofs.
+  The three Boolean values indicate the following: @{verbatim oracle}
+  if the finished part contains some oracle invocation; @{verbatim
+  unfinished} if some future proofs are still pending; @{verbatim
+  failed} if some future proof has failed, rendering the theorem
+  invalid!
+
+  \item @{ML Logic.all}~@{text "a B"} produces a Pure quantification
+  @{text "\<And>a. B"}, where occurrences of the atomic term @{text "a"} in
+  the body proposition @{text "B"} are replaced by bound variables.
+  (See also @{ML lambda} on terms.)
+
+  \item @{ML Logic.mk_implies}~@{text "(A, B)"} produces a Pure
+  implication @{text "A \<Longrightarrow> B"}.
+
+  \item Types @{ML_type ctyp} and @{ML_type cterm} represent certified
+  types and terms, respectively.  These are abstract datatypes that
+  guarantee that its values have passed the full well-formedness (and
+  well-typedness) checks, relative to the declarations of type
+  constructors, constants etc.\ in the background theory.  The
+  abstract types @{ML_type ctyp} and @{ML_type cterm} are part of the
+  same inference kernel that is mainly responsible for @{ML_type thm}.
+  Thus syntactic operations on @{ML_type ctyp} and @{ML_type cterm}
+  are located in the @{ML_structure Thm} module, even though theorems are
+  not yet involved at that stage.
+
+  \item @{ML Thm.ctyp_of}~@{text "thy \<tau>"} and @{ML
+  Thm.cterm_of}~@{text "thy t"} explicitly checks types and terms,
+  respectively.  This also involves some basic normalizations, such
+  expansion of type and term abbreviations from the theory context.
+  Full re-certification is relatively slow and should be avoided in
+  tight reasoning loops.
+
+  \item @{ML Thm.apply}, @{ML Thm.lambda}, @{ML Thm.all}, @{ML
+  Drule.mk_implies} etc.\ compose certified terms (or propositions)
+  incrementally.  This is equivalent to @{ML Thm.cterm_of} after
+  unchecked @{ML_op "$"}, @{ML lambda}, @{ML Logic.all}, @{ML
+  Logic.mk_implies} etc., but there can be a big difference in
+  performance when large existing entities are composed by a few extra
+  constructions on top.  There are separate operations to decompose
+  certified terms and theorems to produce certified terms again.
+
+  \item Type @{ML_type thm} represents proven propositions.  This is
+  an abstract datatype that guarantees that its values have been
+  constructed by basic principles of the @{ML_structure Thm} module.
+  Every @{ML_type thm} value refers its background theory,
+  cf.\ \secref{sec:context-theory}.
+
+  \item @{ML Thm.transfer}~@{text "thy thm"} transfers the given
+  theorem to a \emph{larger} theory, see also \secref{sec:context}.
+  This formal adjustment of the background context has no logical
+  significance, but is occasionally required for formal reasons, e.g.\
+  when theorems that are imported from more basic theories are used in
+  the current situation.
+
+  \item @{ML Thm.assume}, @{ML Thm.forall_intr}, @{ML
+  Thm.forall_elim}, @{ML Thm.implies_intr}, and @{ML Thm.implies_elim}
+  correspond to the primitive inferences of \figref{fig:prim-rules}.
+
+  \item @{ML Thm.generalize}~@{text "(\<^vec>\<alpha>, \<^vec>x)"}
+  corresponds to the @{text "generalize"} rules of
+  \figref{fig:subst-rules}.  Here collections of type and term
+  variables are generalized simultaneously, specified by the given
+  basic names.
+
+  \item @{ML Thm.instantiate}~@{text "(\<^vec>\<alpha>\<^sub>s,
+  \<^vec>x\<^sub>\<tau>)"} corresponds to the @{text "instantiate"} rules
+  of \figref{fig:subst-rules}.  Type variables are substituted before
+  term variables.  Note that the types in @{text "\<^vec>x\<^sub>\<tau>"}
+  refer to the instantiated versions.
+
+  \item @{ML Thm.add_axiom}~@{text "ctxt (name, A)"} declares an
+  arbitrary proposition as axiom, and retrieves it as a theorem from
+  the resulting theory, cf.\ @{text "axiom"} in
+  \figref{fig:prim-rules}.  Note that the low-level representation in
+  the axiom table may differ slightly from the returned theorem.
+
+  \item @{ML Thm.add_oracle}~@{text "(binding, oracle)"} produces a named
+  oracle rule, essentially generating arbitrary axioms on the fly,
+  cf.\ @{text "axiom"} in \figref{fig:prim-rules}.
+
+  \item @{ML Thm.add_def}~@{text "ctxt unchecked overloaded (name, c
+  \<^vec>x \<equiv> t)"} states a definitional axiom for an existing constant
+  @{text "c"}.  Dependencies are recorded via @{ML Theory.add_deps},
+  unless the @{text "unchecked"} option is set.  Note that the
+  low-level representation in the axiom table may differ slightly from
+  the returned theorem.
+
+  \item @{ML Theory.add_deps}~@{text "ctxt name c\<^sub>\<tau> \<^vec>d\<^sub>\<sigma>"}
+  declares dependencies of a named specification for constant @{text
+  "c\<^sub>\<tau>"}, relative to existing specifications for constants @{text
+  "\<^vec>d\<^sub>\<sigma>"}.
+
+  \end{description}
+*}
+
+
+text %mlantiq {*
+  \begin{matharray}{rcl}
+  @{ML_antiquotation_def "ctyp"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "cterm"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "cprop"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "thm"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "thms"} & : & @{text ML_antiquotation} \\
+  @{ML_antiquotation_def "lemma"} & : & @{text ML_antiquotation} \\
+  \end{matharray}
+
+  @{rail \<open>
+  @@{ML_antiquotation ctyp} typ
+  ;
+  @@{ML_antiquotation cterm} term
+  ;
+  @@{ML_antiquotation cprop} prop
+  ;
+  @@{ML_antiquotation thm} thmref
+  ;
+  @@{ML_antiquotation thms} thmrefs
+  ;
+  @@{ML_antiquotation lemma} ('(' @'open' ')')? ((prop +) + @'and') \<newline>
+    @'by' method method?
+  \<close>}
+
+  \begin{description}
+
+  \item @{text "@{ctyp \<tau>}"} produces a certified type wrt.\ the
+  current background theory --- as abstract value of type @{ML_type
+  ctyp}.
+
+  \item @{text "@{cterm t}"} and @{text "@{cprop \<phi>}"} produce a
+  certified term wrt.\ the current background theory --- as abstract
+  value of type @{ML_type cterm}.
+
+  \item @{text "@{thm a}"} produces a singleton fact --- as abstract
+  value of type @{ML_type thm}.
+
+  \item @{text "@{thms a}"} produces a general fact --- as abstract
+  value of type @{ML_type "thm list"}.
+
+  \item @{text "@{lemma \<phi> by meth}"} produces a fact that is proven on
+  the spot according to the minimal proof, which imitates a terminal
+  Isar proof.  The result is an abstract value of type @{ML_type thm}
+  or @{ML_type "thm list"}, depending on the number of propositions
+  given here.
+
+  The internal derivation object lacks a proper theorem name, but it
+  is formally closed, unless the @{text "(open)"} option is specified
+  (this may impact performance of applications with proof terms).
+
+  Since ML antiquotations are always evaluated at compile-time, there
+  is no run-time overhead even for non-trivial proofs.  Nonetheless,
+  the justification is syntactically limited to a single @{command
+  "by"} step.  More complex Isar proofs should be done in regular
+  theory source, before compiling the corresponding ML text that uses
+  the result.
+
+  \end{description}
+
+*}
+
+
+subsection {* Auxiliary connectives \label{sec:logic-aux} *}
+
+text {* Theory @{text "Pure"} provides a few auxiliary connectives
+  that are defined on top of the primitive ones, see
+  \figref{fig:pure-aux}.  These special constants are useful in
+  certain internal encodings, and are normally not directly exposed to
+  the user.
+
+  \begin{figure}[htb]
+  \begin{center}
+  \begin{tabular}{ll}
+  @{text "conjunction :: prop \<Rightarrow> prop \<Rightarrow> prop"} & (infix @{text "&&&"}) \\
+  @{text "\<turnstile> A &&& B \<equiv> (\<And>C. (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C)"} \\[1ex]
+  @{text "prop :: prop \<Rightarrow> prop"} & (prefix @{text "#"}, suppressed) \\
+  @{text "#A \<equiv> A"} \\[1ex]
+  @{text "term :: \<alpha> \<Rightarrow> prop"} & (prefix @{text "TERM"}) \\
+  @{text "term x \<equiv> (\<And>A. A \<Longrightarrow> A)"} \\[1ex]
+  @{text "type :: \<alpha> itself"} & (prefix @{text "TYPE"}) \\
+  @{text "(unspecified)"} \\
+  \end{tabular}
+  \caption{Definitions of auxiliary connectives}\label{fig:pure-aux}
+  \end{center}
+  \end{figure}
+
+  The introduction @{text "A \<Longrightarrow> B \<Longrightarrow> A &&& B"}, and eliminations
+  (projections) @{text "A &&& B \<Longrightarrow> A"} and @{text "A &&& B \<Longrightarrow> B"} are
+  available as derived rules.  Conjunction allows to treat
+  simultaneous assumptions and conclusions uniformly, e.g.\ consider
+  @{text "A \<Longrightarrow> B \<Longrightarrow> C &&& D"}.  In particular, the goal mechanism
+  represents multiple claims as explicit conjunction internally, but
+  this is refined (via backwards introduction) into separate sub-goals
+  before the user commences the proof; the final result is projected
+  into a list of theorems using eliminations (cf.\
+  \secref{sec:tactical-goals}).
+
+  The @{text "prop"} marker (@{text "#"}) makes arbitrarily complex
+  propositions appear as atomic, without changing the meaning: @{text
+  "\<Gamma> \<turnstile> A"} and @{text "\<Gamma> \<turnstile> #A"} are interchangeable.  See
+  \secref{sec:tactical-goals} for specific operations.
+
+  The @{text "term"} marker turns any well-typed term into a derivable
+  proposition: @{text "\<turnstile> TERM t"} holds unconditionally.  Although
+  this is logically vacuous, it allows to treat terms and proofs
+  uniformly, similar to a type-theoretic framework.
+
+  The @{text "TYPE"} constructor is the canonical representative of
+  the unspecified type @{text "\<alpha> itself"}; it essentially injects the
+  language of types into that of terms.  There is specific notation
+  @{text "TYPE(\<tau>)"} for @{text "TYPE\<^bsub>\<tau> itself\<^esub>"}.
+  Although being devoid of any particular meaning, the term @{text
+  "TYPE(\<tau>)"} accounts for the type @{text "\<tau>"} within the term
+  language.  In particular, @{text "TYPE(\<alpha>)"} may be used as formal
+  argument in primitive definitions, in order to circumvent hidden
+  polymorphism (cf.\ \secref{sec:terms}).  For example, @{text "c
+  TYPE(\<alpha>) \<equiv> A[\<alpha>]"} defines @{text "c :: \<alpha> itself \<Rightarrow> prop"} in terms of
+  a proposition @{text "A"} that depends on an additional type
+  argument, which is essentially a predicate on types.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Conjunction.intr: "thm -> thm -> thm"} \\
+  @{index_ML Conjunction.elim: "thm -> thm * thm"} \\
+  @{index_ML Drule.mk_term: "cterm -> thm"} \\
+  @{index_ML Drule.dest_term: "thm -> cterm"} \\
+  @{index_ML Logic.mk_type: "typ -> term"} \\
+  @{index_ML Logic.dest_type: "term -> typ"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Conjunction.intr} derives @{text "A &&& B"} from @{text
+  "A"} and @{text "B"}.
+
+  \item @{ML Conjunction.elim} derives @{text "A"} and @{text "B"}
+  from @{text "A &&& B"}.
+
+  \item @{ML Drule.mk_term} derives @{text "TERM t"}.
+
+  \item @{ML Drule.dest_term} recovers term @{text "t"} from @{text
+  "TERM t"}.
+
+  \item @{ML Logic.mk_type}~@{text "\<tau>"} produces the term @{text
+  "TYPE(\<tau>)"}.
+
+  \item @{ML Logic.dest_type}~@{text "TYPE(\<tau>)"} recovers the type
+  @{text "\<tau>"}.
+
+  \end{description}
+*}
+
+
+subsection {* Sort hypotheses *}
+
+text {* Type variables are decorated with sorts, as explained in
+  \secref{sec:types}.  This constrains type instantiation to certain
+  ranges of types: variable @{text "\<alpha>\<^sub>s"} may only be assigned to types
+  @{text "\<tau>"} that belong to sort @{text "s"}.  Within the logic, sort
+  constraints act like implicit preconditions on the result @{text
+  "\<lparr>\<alpha>\<^sub>1 : s\<^sub>1\<rparr>, \<dots>, \<lparr>\<alpha>\<^sub>n : s\<^sub>n\<rparr>, \<Gamma> \<turnstile> \<phi>"} where the type variables @{text
+  "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} cover the propositions @{text "\<Gamma>"}, @{text "\<phi>"}, as
+  well as the proof of @{text "\<Gamma> \<turnstile> \<phi>"}.
+
+  These \emph{sort hypothesis} of a theorem are passed monotonically
+  through further derivations.  They are redundant, as long as the
+  statement of a theorem still contains the type variables that are
+  accounted here.  The logical significance of sort hypotheses is
+  limited to the boundary case where type variables disappear from the
+  proposition, e.g.\ @{text "\<lparr>\<alpha>\<^sub>s : s\<rparr> \<turnstile> \<phi>"}.  Since such dangling type
+  variables can be renamed arbitrarily without changing the
+  proposition @{text "\<phi>"}, the inference kernel maintains sort
+  hypotheses in anonymous form @{text "s \<turnstile> \<phi>"}.
+
+  In most practical situations, such extra sort hypotheses may be
+  stripped in a final bookkeeping step, e.g.\ at the end of a proof:
+  they are typically left over from intermediate reasoning with type
+  classes that can be satisfied by some concrete type @{text "\<tau>"} of
+  sort @{text "s"} to replace the hypothetical type variable @{text
+  "\<alpha>\<^sub>s"}.  *}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Thm.extra_shyps: "thm -> sort list"} \\
+  @{index_ML Thm.strip_shyps: "thm -> thm"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Thm.extra_shyps}~@{text "thm"} determines the extraneous
+  sort hypotheses of the given theorem, i.e.\ the sorts that are not
+  present within type variables of the statement.
+
+  \item @{ML Thm.strip_shyps}~@{text "thm"} removes any extraneous
+  sort hypotheses that can be witnessed from the type signature.
+
+  \end{description}
+*}
+
+text %mlex {* The following artificial example demonstrates the
+  derivation of @{prop False} with a pending sort hypothesis involving
+  a logically empty sort.  *}
+
+class empty =
+  assumes bad: "\<And>(x::'a) y. x \<noteq> y"
+
+theorem (in empty) false: False
+  using bad by blast
+
+ML {*
+  @{assert} (Thm.extra_shyps @{thm false} = [@{sort empty}])
+*}
+
+text {* Thanks to the inference kernel managing sort hypothesis
+  according to their logical significance, this example is merely an
+  instance of \emph{ex falso quodlibet consequitur} --- not a collapse
+  of the logical framework! *}
+
+
+section {* Object-level rules \label{sec:obj-rules} *}
+
+text {*
+  The primitive inferences covered so far mostly serve foundational
+  purposes.  User-level reasoning usually works via object-level rules
+  that are represented as theorems of Pure.  Composition of rules
+  involves \emph{backchaining}, \emph{higher-order unification} modulo
+  @{text "\<alpha>\<beta>\<eta>"}-conversion of @{text "\<lambda>"}-terms, and so-called
+  \emph{lifting} of rules into a context of @{text "\<And>"} and @{text
+  "\<Longrightarrow>"} connectives.  Thus the full power of higher-order Natural
+  Deduction in Isabelle/Pure becomes readily available.
+*}
+
+
+subsection {* Hereditary Harrop Formulae *}
+
+text {*
+  The idea of object-level rules is to model Natural Deduction
+  inferences in the style of Gentzen \cite{Gentzen:1935}, but we allow
+  arbitrary nesting similar to \cite{extensions91}.  The most basic
+  rule format is that of a \emph{Horn Clause}:
+  \[
+  \infer{@{text "A"}}{@{text "A\<^sub>1"} & @{text "\<dots>"} & @{text "A\<^sub>n"}}
+  \]
+  where @{text "A, A\<^sub>1, \<dots>, A\<^sub>n"} are atomic propositions
+  of the framework, usually of the form @{text "Trueprop B"}, where
+  @{text "B"} is a (compound) object-level statement.  This
+  object-level inference corresponds to an iterated implication in
+  Pure like this:
+  \[
+  @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> A"}
+  \]
+  As an example consider conjunction introduction: @{text "A \<Longrightarrow> B \<Longrightarrow> A \<and>
+  B"}.  Any parameters occurring in such rule statements are
+  conceptionally treated as arbitrary:
+  \[
+  @{text "\<And>x\<^sub>1 \<dots> x\<^sub>m. A\<^sub>1 x\<^sub>1 \<dots> x\<^sub>m \<Longrightarrow> \<dots> A\<^sub>n x\<^sub>1 \<dots> x\<^sub>m \<Longrightarrow> A x\<^sub>1 \<dots> x\<^sub>m"}
+  \]
+
+  Nesting of rules means that the positions of @{text "A\<^sub>i"} may
+  again hold compound rules, not just atomic propositions.
+  Propositions of this format are called \emph{Hereditary Harrop
+  Formulae} in the literature \cite{Miller:1991}.  Here we give an
+  inductive characterization as follows:
+
+  \medskip
+  \begin{tabular}{ll}
+  @{text "\<^bold>x"} & set of variables \\
+  @{text "\<^bold>A"} & set of atomic propositions \\
+  @{text "\<^bold>H  =  \<And>\<^bold>x\<^sup>*. \<^bold>H\<^sup>* \<Longrightarrow> \<^bold>A"} & set of Hereditary Harrop Formulas \\
+  \end{tabular}
+  \medskip
+
+  Thus we essentially impose nesting levels on propositions formed
+  from @{text "\<And>"} and @{text "\<Longrightarrow>"}.  At each level there is a prefix
+  of parameters and compound premises, concluding an atomic
+  proposition.  Typical examples are @{text "\<longrightarrow>"}-introduction @{text
+  "(A \<Longrightarrow> B) \<Longrightarrow> A \<longrightarrow> B"} or mathematical induction @{text "P 0 \<Longrightarrow> (\<And>n. P n
+  \<Longrightarrow> P (Suc n)) \<Longrightarrow> P n"}.  Even deeper nesting occurs in well-founded
+  induction @{text "(\<And>x. (\<And>y. y \<prec> x \<Longrightarrow> P y) \<Longrightarrow> P x) \<Longrightarrow> P x"}, but this
+  already marks the limit of rule complexity that is usually seen in
+  practice.
+
+  \medskip Regular user-level inferences in Isabelle/Pure always
+  maintain the following canonical form of results:
+
+  \begin{itemize}
+
+  \item Normalization by @{text "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"},
+  which is a theorem of Pure, means that quantifiers are pushed in
+  front of implication at each level of nesting.  The normal form is a
+  Hereditary Harrop Formula.
+
+  \item The outermost prefix of parameters is represented via
+  schematic variables: instead of @{text "\<And>\<^vec>x. \<^vec>H \<^vec>x
+  \<Longrightarrow> A \<^vec>x"} we have @{text "\<^vec>H ?\<^vec>x \<Longrightarrow> A ?\<^vec>x"}.
+  Note that this representation looses information about the order of
+  parameters, and vacuous quantifiers vanish automatically.
+
+  \end{itemize}
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML Simplifier.norm_hhf: "Proof.context -> thm -> thm"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{ML Simplifier.norm_hhf}~@{text "ctxt thm"} normalizes the given
+  theorem according to the canonical form specified above.  This is
+  occasionally helpful to repair some low-level tools that do not
+  handle Hereditary Harrop Formulae properly.
+
+  \end{description}
+*}
+
+
+subsection {* Rule composition *}
+
+text {*
+  The rule calculus of Isabelle/Pure provides two main inferences:
+  @{inference resolution} (i.e.\ back-chaining of rules) and
+  @{inference assumption} (i.e.\ closing a branch), both modulo
+  higher-order unification.  There are also combined variants, notably
+  @{inference elim_resolution} and @{inference dest_resolution}.
+
+  To understand the all-important @{inference resolution} principle,
+  we first consider raw @{inference_def composition} (modulo
+  higher-order unification with substitution @{text "\<vartheta>"}):
+  \[
+  \infer[(@{inference_def composition})]{@{text "\<^vec>A\<vartheta> \<Longrightarrow> C\<vartheta>"}}
+  {@{text "\<^vec>A \<Longrightarrow> B"} & @{text "B' \<Longrightarrow> C"} & @{text "B\<vartheta> = B'\<vartheta>"}}
+  \]
+  Here the conclusion of the first rule is unified with the premise of
+  the second; the resulting rule instance inherits the premises of the
+  first and conclusion of the second.  Note that @{text "C"} can again
+  consist of iterated implications.  We can also permute the premises
+  of the second rule back-and-forth in order to compose with @{text
+  "B'"} in any position (subsequently we shall always refer to
+  position 1 w.l.o.g.).
+
+  In @{inference composition} the internal structure of the common
+  part @{text "B"} and @{text "B'"} is not taken into account.  For
+  proper @{inference resolution} we require @{text "B"} to be atomic,
+  and explicitly observe the structure @{text "\<And>\<^vec>x. \<^vec>H
+  \<^vec>x \<Longrightarrow> B' \<^vec>x"} of the premise of the second rule.  The
+  idea is to adapt the first rule by ``lifting'' it into this context,
+  by means of iterated application of the following inferences:
+  \[
+  \infer[(@{inference_def imp_lift})]{@{text "(\<^vec>H \<Longrightarrow> \<^vec>A) \<Longrightarrow> (\<^vec>H \<Longrightarrow> B)"}}{@{text "\<^vec>A \<Longrightarrow> B"}}
+  \]
+  \[
+  \infer[(@{inference_def all_lift})]{@{text "(\<And>\<^vec>x. \<^vec>A (?\<^vec>a \<^vec>x)) \<Longrightarrow> (\<And>\<^vec>x. B (?\<^vec>a \<^vec>x))"}}{@{text "\<^vec>A ?\<^vec>a \<Longrightarrow> B ?\<^vec>a"}}
+  \]
+  By combining raw composition with lifting, we get full @{inference
+  resolution} as follows:
+  \[
+  \infer[(@{inference_def resolution})]
+  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>A (?\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
+  {\begin{tabular}{l}
+    @{text "\<^vec>A ?\<^vec>a \<Longrightarrow> B ?\<^vec>a"} \\
+    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
+    @{text "(\<lambda>\<^vec>x. B (?\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
+   \end{tabular}}
+  \]
+
+  Continued resolution of rules allows to back-chain a problem towards
+  more and sub-problems.  Branches are closed either by resolving with
+  a rule of 0 premises, or by producing a ``short-circuit'' within a
+  solved situation (again modulo unification):
+  \[
+  \infer[(@{inference_def assumption})]{@{text "C\<vartheta>"}}
+  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> A \<^vec>x) \<Longrightarrow> C"} & @{text "A\<vartheta> = H\<^sub>i\<vartheta>"}~~\text{(for some~@{text i})}}
+  \]
+
+  %FIXME @{inference_def elim_resolution}, @{inference_def dest_resolution}
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_op "RSN": "thm * (int * thm) -> thm"} \\
+  @{index_ML_op "RS": "thm * thm -> thm"} \\
+
+  @{index_ML_op "RLN": "thm list * (int * thm list) -> thm list"} \\
+  @{index_ML_op "RL": "thm list * thm list -> thm list"} \\
+
+  @{index_ML_op "MRS": "thm list * thm -> thm"} \\
+  @{index_ML_op "OF": "thm * thm list -> thm"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item @{text "rule\<^sub>1 RSN (i, rule\<^sub>2)"} resolves the conclusion of
+  @{text "rule\<^sub>1"} with the @{text i}-th premise of @{text "rule\<^sub>2"},
+  according to the @{inference resolution} principle explained above.
+  Unless there is precisely one resolvent it raises exception @{ML
+  THM}.
+
+  This corresponds to the rule attribute @{attribute THEN} in Isar
+  source language.
+
+  \item @{text "rule\<^sub>1 RS rule\<^sub>2"} abbreviates @{text "rule\<^sub>1 RSN (1,
+  rule\<^sub>2)"}.
+
+  \item @{text "rules\<^sub>1 RLN (i, rules\<^sub>2)"} joins lists of rules.  For
+  every @{text "rule\<^sub>1"} in @{text "rules\<^sub>1"} and @{text "rule\<^sub>2"} in
+  @{text "rules\<^sub>2"}, it resolves the conclusion of @{text "rule\<^sub>1"} with
+  the @{text "i"}-th premise of @{text "rule\<^sub>2"}, accumulating multiple
+  results in one big list.  Note that such strict enumerations of
+  higher-order unifications can be inefficient compared to the lazy
+  variant seen in elementary tactics like @{ML resolve_tac}.
+
+  \item @{text "rules\<^sub>1 RL rules\<^sub>2"} abbreviates @{text "rules\<^sub>1 RLN (1,
+  rules\<^sub>2)"}.
+
+  \item @{text "[rule\<^sub>1, \<dots>, rule\<^sub>n] MRS rule"} resolves @{text "rule\<^sub>i"}
+  against premise @{text "i"} of @{text "rule"}, for @{text "i = n, \<dots>,
+  1"}.  By working from right to left, newly emerging premises are
+  concatenated in the result, without interfering.
+
+  \item @{text "rule OF rules"} is an alternative notation for @{text
+  "rules MRS rule"}, which makes rule composition look more like
+  function application.  Note that the argument @{text "rules"} need
+  not be atomic.
+
+  This corresponds to the rule attribute @{attribute OF} in Isar
+  source language.
+
+  \end{description}
+*}
+
+
+section {* Proof terms \label{sec:proof-terms} *}
+
+text {* The Isabelle/Pure inference kernel can record the proof of
+  each theorem as a proof term that contains all logical inferences in
+  detail.  Rule composition by resolution (\secref{sec:obj-rules}) and
+  type-class reasoning is broken down to primitive rules of the
+  logical framework.  The proof term can be inspected by a separate
+  proof-checker, for example.
+
+  According to the well-known \emph{Curry-Howard isomorphism}, a proof
+  can be viewed as a @{text "\<lambda>"}-term. Following this idea, proofs in
+  Isabelle are internally represented by a datatype similar to the one
+  for terms described in \secref{sec:terms}.  On top of these
+  syntactic terms, two more layers of @{text "\<lambda>"}-calculus are added,
+  which correspond to @{text "\<And>x :: \<alpha>. B x"} and @{text "A \<Longrightarrow> B"}
+  according to the propositions-as-types principle.  The resulting
+  3-level @{text "\<lambda>"}-calculus resembles ``@{text "\<lambda>HOL"}'' in the
+  more abstract setting of Pure Type Systems (PTS)
+  \cite{Barendregt-Geuvers:2001}, if some fine points like schematic
+  polymorphism and type classes are ignored.
+
+  \medskip\emph{Proof abstractions} of the form @{text "\<^bold>\<lambda>x :: \<alpha>. prf"}
+  or @{text "\<^bold>\<lambda>p : A. prf"} correspond to introduction of @{text
+  "\<And>"}/@{text "\<Longrightarrow>"}, and \emph{proof applications} of the form @{text
+  "p \<cdot> t"} or @{text "p \<bullet> q"} correspond to elimination of @{text
+  "\<And>"}/@{text "\<Longrightarrow>"}.  Actual types @{text "\<alpha>"}, propositions @{text
+  "A"}, and terms @{text "t"} might be suppressed and reconstructed
+  from the overall proof term.
+
+  \medskip Various atomic proofs indicate special situations within
+  the proof construction as follows.
+
+  A \emph{bound proof variable} is a natural number @{text "b"} that
+  acts as de-Bruijn index for proof term abstractions.
+
+  A \emph{minimal proof} ``@{text "?"}'' is a dummy proof term.  This
+  indicates some unrecorded part of the proof.
+
+  @{text "Hyp A"} refers to some pending hypothesis by giving its
+  proposition.  This indicates an open context of implicit hypotheses,
+  similar to loose bound variables or free variables within a term
+  (\secref{sec:terms}).
+
+  An \emph{axiom} or \emph{oracle} @{text "a : A[\<^vec>\<tau>]"} refers
+  some postulated @{text "proof constant"}, which is subject to
+  schematic polymorphism of theory content, and the particular type
+  instantiation may be given explicitly.  The vector of types @{text
+  "\<^vec>\<tau>"} refers to the schematic type variables in the generic
+  proposition @{text "A"} in canonical order.
+
+  A \emph{proof promise} @{text "a : A[\<^vec>\<tau>]"} is a placeholder
+  for some proof of polymorphic proposition @{text "A"}, with explicit
+  type instantiation as given by the vector @{text "\<^vec>\<tau>"}, as
+  above.  Unlike axioms or oracles, proof promises may be
+  \emph{fulfilled} eventually, by substituting @{text "a"} by some
+  particular proof @{text "q"} at the corresponding type instance.
+  This acts like Hindley-Milner @{text "let"}-polymorphism: a generic
+  local proof definition may get used at different type instances, and
+  is replaced by the concrete instance eventually.
+
+  A \emph{named theorem} wraps up some concrete proof as a closed
+  formal entity, in the manner of constant definitions for proof
+  terms.  The \emph{proof body} of such boxed theorems involves some
+  digest about oracles and promises occurring in the original proof.
+  This allows the inference kernel to manage this critical information
+  without the full overhead of explicit proof terms.
+*}
+
+
+subsection {* Reconstructing and checking proof terms *}
+
+text {* Fully explicit proof terms can be large, but most of this
+  information is redundant and can be reconstructed from the context.
+  Therefore, the Isabelle/Pure inference kernel records only
+  \emph{implicit} proof terms, by omitting all typing information in
+  terms, all term and type labels of proof abstractions, and some
+  argument terms of applications @{text "p \<cdot> t"} (if possible).
+
+  There are separate operations to reconstruct the full proof term
+  later on, using \emph{higher-order pattern unification}
+  \cite{nipkow-patterns,Berghofer-Nipkow:2000:TPHOL}.
+
+  The \emph{proof checker} expects a fully reconstructed proof term,
+  and can turn it into a theorem by replaying its primitive inferences
+  within the kernel.  *}
+
+
+subsection {* Concrete syntax of proof terms *}
+
+text {* The concrete syntax of proof terms is a slight extension of
+  the regular inner syntax of Isabelle/Pure \cite{isabelle-isar-ref}.
+  Its main syntactic category @{syntax (inner) proof} is defined as
+  follows:
+
+  \begin{center}
+  \begin{supertabular}{rclr}
+
+  @{syntax_def (inner) proof} & = & @{verbatim Lam} @{text params} @{verbatim "."} @{text proof} \\
+    & @{text "|"} & @{text "\<^bold>\<lambda>"} @{text "params"} @{verbatim "."} @{text proof} \\
+    & @{text "|"} & @{text proof} @{verbatim "%"} @{text any} \\
+    & @{text "|"} & @{text proof} @{text "\<cdot>"} @{text any} \\
+    & @{text "|"} & @{text proof} @{verbatim "%%"} @{text proof} \\
+    & @{text "|"} & @{text proof} @{text "\<bullet>"} @{text proof} \\
+    & @{text "|"} & @{text "id  |  longid"} \\
+  \\
+
+  @{text param} & = & @{text idt} \\
+    & @{text "|"} & @{text idt} @{verbatim ":"} @{text prop} \\
+    & @{text "|"} & @{verbatim "("} @{text param} @{verbatim ")"} \\
+  \\
+
+  @{text params} & = & @{text param} \\
+    & @{text "|"} & @{text param} @{text params} \\
+
+  \end{supertabular}
+  \end{center}
+
+  Implicit term arguments in partial proofs are indicated by ``@{text
+  "_"}''.  Type arguments for theorems and axioms may be specified
+  using @{text "p \<cdot> TYPE(type)"} (they must appear before any other
+  term argument of a theorem or axiom, but may be omitted altogether).
+
+  \medskip There are separate read and print operations for proof
+  terms, in order to avoid conflicts with the regular term language.
+*}
+
+text %mlref {*
+  \begin{mldecls}
+  @{index_ML_type proof} \\
+  @{index_ML_type proof_body} \\
+  @{index_ML proofs: "int Unsynchronized.ref"} \\
+  @{index_ML Reconstruct.reconstruct_proof:
+  "theory -> term -> proof -> proof"} \\
+  @{index_ML Reconstruct.expand_proof: "theory ->
+  (string * term option) list -> proof -> proof"} \\
+  @{index_ML Proof_Checker.thm_of_proof: "theory -> proof -> thm"} \\
+  @{index_ML Proof_Syntax.read_proof: "theory -> bool -> bool -> string -> proof"} \\
+  @{index_ML Proof_Syntax.pretty_proof: "Proof.context -> proof -> Pretty.T"} \\
+  \end{mldecls}
+
+  \begin{description}
+
+  \item Type @{ML_type proof} represents proof terms; this is a
+  datatype with constructors @{index_ML Abst}, @{index_ML AbsP},
+  @{index_ML_op "%"}, @{index_ML_op "%%"}, @{index_ML PBound},
+  @{index_ML MinProof}, @{index_ML Hyp}, @{index_ML PAxm}, @{index_ML
+  Oracle}, @{index_ML Promise}, @{index_ML PThm} as explained above.
+  %FIXME OfClass (!?)
+
+  \item Type @{ML_type proof_body} represents the nested proof
+  information of a named theorem, consisting of a digest of oracles
+  and named theorem over some proof term.  The digest only covers the
+  directly visible part of the proof: in order to get the full
+  information, the implicit graph of nested theorems needs to be
+  traversed (e.g.\ using @{ML Proofterm.fold_body_thms}).
+
+  \item @{ML Thm.proof_of}~@{text "thm"} and @{ML
+  Thm.proof_body_of}~@{text "thm"} produce the proof term or proof
+  body (with digest of oracles and theorems) from a given theorem.
+  Note that this involves a full join of internal futures that fulfill
+  pending proof promises, and thus disrupts the natural bottom-up
+  construction of proofs by introducing dynamic ad-hoc dependencies.
+  Parallel performance may suffer by inspecting proof terms at
+  run-time.
+
+  \item @{ML proofs} specifies the detail of proof recording within
+  @{ML_type thm} values produced by the inference kernel: @{ML 0}
+  records only the names of oracles, @{ML 1} records oracle names and
+  propositions, @{ML 2} additionally records full proof terms.
+  Officially named theorems that contribute to a result are recorded
+  in any case.
+
+  \item @{ML Reconstruct.reconstruct_proof}~@{text "thy prop prf"}
+  turns the implicit proof term @{text "prf"} into a full proof of the
+  given proposition.
+
+  Reconstruction may fail if @{text "prf"} is not a proof of @{text
+  "prop"}, or if it does not contain sufficient information for
+  reconstruction.  Failure may only happen for proofs that are
+  constructed manually, but not for those produced automatically by
+  the inference kernel.
+
+  \item @{ML Reconstruct.expand_proof}~@{text "thy [thm\<^sub>1, \<dots>, thm\<^sub>n]
+  prf"} expands and reconstructs the proofs of all specified theorems,
+  with the given (full) proof.  Theorems that are not unique specified
+  via their name may be disambiguated by giving their proposition.
+
+  \item @{ML Proof_Checker.thm_of_proof}~@{text "thy prf"} turns the
+  given (full) proof into a theorem, by replaying it using only
+  primitive rules of the inference kernel.
+
+  \item @{ML Proof_Syntax.read_proof}~@{text "thy b\<^sub>1 b\<^sub>2 s"} reads in a
+  proof term. The Boolean flags indicate the use of sort and type
+  information.  Usually, typing information is left implicit and is
+  inferred during proof reconstruction.  %FIXME eliminate flags!?
+
+  \item @{ML Proof_Syntax.pretty_proof}~@{text "ctxt prf"}
+  pretty-prints the given proof term.
+
+  \end{description}
+*}
+
+text %mlex {* Detailed proof information of a theorem may be retrieved
+  as follows: *}
+
+lemma ex: "A \<and> B \<longrightarrow> B \<and> A"
+proof
+  assume "A \<and> B"
+  then obtain B and A ..
+  then show "B \<and> A" ..
+qed
+
+ML_val {*
+  (*proof body with digest*)
+  val body = Proofterm.strip_thm (Thm.proof_body_of @{thm ex});
+
+  (*proof term only*)
+  val prf = Proofterm.proof_of body;
+  Pretty.writeln (Proof_Syntax.pretty_proof @{context} prf);
+
+  (*all theorems used in the graph of nested proofs*)
+  val all_thms =
+    Proofterm.fold_body_thms
+      (fn (name, _, _) => insert (op =) name) [body] [];
+*}
+
+text {* The result refers to various basic facts of Isabelle/HOL:
+  @{thm [source] HOL.impI}, @{thm [source] HOL.conjE}, @{thm [source]
+  HOL.conjI} etc.  The combinator @{ML Proofterm.fold_body_thms}
+  recursively explores the graph of the proofs of all theorems being
+  used here.
+
+  \medskip Alternatively, we may produce a proof term manually, and
+  turn it into a theorem as follows: *}
+
+ML_val {*
+  val thy = @{theory};
+  val prf =
+    Proof_Syntax.read_proof thy true false
+      "impI \<cdot> _ \<cdot> _ \<bullet> \
+      \   (\<^bold>\<lambda>H: _. \
+      \     conjE \<cdot> _ \<cdot> _ \<cdot> _ \<bullet> H \<bullet> \
+      \       (\<^bold>\<lambda>(H: _) Ha: _. conjI \<cdot> _ \<cdot> _ \<bullet> Ha \<bullet> H))";
+  val thm =
+    prf
+    |> Reconstruct.reconstruct_proof thy @{prop "A \<and> B \<longrightarrow> B \<and> A"}
+    |> Proof_Checker.thm_of_proof thy
+    |> Drule.export_without_context;
+*}
+
+text {* \medskip See also @{file "~~/src/HOL/Proofs/ex/XML_Data.thy"}
+  for further examples, with export and import of proof terms via
+  XML/ML data representation.
+*}
+
+end