more symbols;
authorwenzelm
Wed, 14 Oct 2015 15:10:32 +0200
changeset 61439 2bf52eec4e8a
parent 61438 151f894984d8
child 61440 8626c2fed037
more symbols;
src/Doc/Implementation/Eq.thy
src/Doc/Implementation/Integration.thy
src/Doc/Implementation/Isar.thy
src/Doc/Implementation/Local_Theory.thy
src/Doc/Implementation/Logic.thy
src/Doc/Implementation/ML.thy
src/Doc/Implementation/Prelim.thy
src/Doc/Implementation/Proof.thy
src/Doc/Implementation/Syntax.thy
src/Doc/Implementation/Tactic.thy
src/Doc/Isar_Ref/Document_Preparation.thy
src/Doc/Isar_Ref/Generic.thy
src/Doc/Isar_Ref/HOL_Specific.thy
src/Doc/Isar_Ref/Inner_Syntax.thy
src/Doc/Isar_Ref/Outer_Syntax.thy
src/Doc/Isar_Ref/Proof.thy
src/Doc/Isar_Ref/Proof_Script.thy
src/Doc/Isar_Ref/Spec.thy
src/Doc/JEdit/JEdit.thy
src/Doc/System/Basics.thy
src/Doc/System/Misc.thy
src/Doc/System/Sessions.thy
--- a/src/Doc/Implementation/Eq.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Implementation/Eq.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -100,21 +100,21 @@
 
   \begin{description}
 
-  \item @{ML rewrite_rule}~@{text "ctxt rules thm"} rewrites the whole
+  \<^descr> @{ML rewrite_rule}~@{text "ctxt rules thm"} rewrites the whole
   theorem by the given rules.
 
-  \item @{ML rewrite_goals_rule}~@{text "ctxt rules thm"} rewrites the
+  \<^descr> @{ML rewrite_goals_rule}~@{text "ctxt rules thm"} rewrites the
   outer premises of the given theorem.  Interpreting the same as a
   goal state (\secref{sec:tactical-goals}) it means to rewrite all
   subgoals (in the same manner as @{ML rewrite_goals_tac}).
 
-  \item @{ML rewrite_goal_tac}~@{text "ctxt rules i"} rewrites subgoal
+  \<^descr> @{ML rewrite_goal_tac}~@{text "ctxt rules i"} rewrites subgoal
   @{text "i"} by the given rewrite rules.
 
-  \item @{ML rewrite_goals_tac}~@{text "ctxt rules"} rewrites all subgoals
+  \<^descr> @{ML rewrite_goals_tac}~@{text "ctxt rules"} rewrites all subgoals
   by the given rewrite rules.
 
-  \item @{ML fold_goals_tac}~@{text "ctxt rules"} essentially uses @{ML
+  \<^descr> @{ML fold_goals_tac}~@{text "ctxt rules"} essentially uses @{ML
   rewrite_goals_tac} with the symmetric form of each member of @{text
   "rules"}, re-ordered to fold longer expression first.  This supports
   to idea to fold primitive definitions that appear in expended form
--- a/src/Doc/Implementation/Integration.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Implementation/Integration.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -46,22 +46,22 @@
 
   \begin{description}
 
-  \item Type @{ML_type Toplevel.state} represents Isar toplevel
+  \<^descr> Type @{ML_type Toplevel.state} represents Isar toplevel
   states, which are normally manipulated through the concept of
   toplevel transitions only (\secref{sec:toplevel-transition}).
 
-  \item @{ML Toplevel.UNDEF} is raised for undefined toplevel
+  \<^descr> @{ML Toplevel.UNDEF} is raised for undefined toplevel
   operations.  Many operations work only partially for certain cases,
   since @{ML_type Toplevel.state} is a sum type.
 
-  \item @{ML Toplevel.is_toplevel}~@{text "state"} checks for an empty
+  \<^descr> @{ML Toplevel.is_toplevel}~@{text "state"} checks for an empty
   toplevel state.
 
-  \item @{ML Toplevel.theory_of}~@{text "state"} selects the
+  \<^descr> @{ML Toplevel.theory_of}~@{text "state"} selects the
   background theory of @{text "state"}, it raises @{ML Toplevel.UNDEF}
   for an empty toplevel state.
 
-  \item @{ML Toplevel.proof_of}~@{text "state"} selects the Isar proof
+  \<^descr> @{ML Toplevel.proof_of}~@{text "state"} selects the Isar proof
   state if available, otherwise it raises an error.
 
   \end{description}
@@ -74,7 +74,7 @@
 
   \begin{description}
 
-  \item @{text "@{Isar.state}"} refers to Isar toplevel state at that
+  \<^descr> @{text "@{Isar.state}"} refers to Isar toplevel state at that
   point --- as abstract value.
 
   This only works for diagnostic ML commands, such as @{command
@@ -123,27 +123,27 @@
 
   \begin{description}
 
-  \item @{ML Toplevel.keep}~@{text "tr"} adjoins a diagnostic
+  \<^descr> @{ML Toplevel.keep}~@{text "tr"} adjoins a diagnostic
   function.
 
-  \item @{ML Toplevel.theory}~@{text "tr"} adjoins a theory
+  \<^descr> @{ML Toplevel.theory}~@{text "tr"} adjoins a theory
   transformer.
 
-  \item @{ML Toplevel.theory_to_proof}~@{text "tr"} adjoins a global
+  \<^descr> @{ML Toplevel.theory_to_proof}~@{text "tr"} adjoins a global
   goal function, which turns a theory into a proof state.  The theory
   may be changed before entering the proof; the generic Isar goal
   setup includes an @{verbatim after_qed} argument that specifies how to
   apply the proven result to the enclosing context, when the proof
   is finished.
 
-  \item @{ML Toplevel.proof}~@{text "tr"} adjoins a deterministic
+  \<^descr> @{ML Toplevel.proof}~@{text "tr"} adjoins a deterministic
   proof command, with a singleton result.
 
-  \item @{ML Toplevel.proofs}~@{text "tr"} adjoins a general proof
+  \<^descr> @{ML Toplevel.proofs}~@{text "tr"} adjoins a general proof
   command, with zero or more result states (represented as a lazy
   list).
 
-  \item @{ML Toplevel.end_proof}~@{text "tr"} adjoins a concluding
+  \<^descr> @{ML Toplevel.end_proof}~@{text "tr"} adjoins a concluding
   proof command, that returns the resulting theory, after applying the
   resulting facts to the target context.
 
@@ -177,11 +177,11 @@
 
   \begin{description}
 
-  \item @{ML use_thy}~@{text A} ensures that theory @{text A} is fully
+  \<^descr> @{ML use_thy}~@{text A} ensures that theory @{text A} is fully
   up-to-date wrt.\ the external file store; outdated ancestors are reloaded on
   demand.
 
-  \item @{ML use_thys} is similar to @{ML use_thy}, but handles several
+  \<^descr> @{ML use_thys} is similar to @{ML use_thy}, but handles several
   theories simultaneously. Thus it acts like processing the import header of a
   theory, without performing the merge of the result. By loading a whole
   sub-graph of theories, the intrinsic parallelism can be exploited by the
@@ -189,14 +189,14 @@
 
   This variant is used by default in @{tool build} @{cite "isabelle-system"}.
 
-  \item @{ML Thy_Info.get_theory}~@{text A} retrieves the theory value
+  \<^descr> @{ML Thy_Info.get_theory}~@{text A} retrieves the theory value
   presently associated with name @{text A}. Note that the result might be
   outdated wrt.\ the file-system content.
 
-  \item @{ML Thy_Info.remove_thy}~@{text A} deletes theory @{text A} and all
+  \<^descr> @{ML Thy_Info.remove_thy}~@{text A} deletes theory @{text A} and all
   descendants from the theory database.
 
-  \item @{ML Thy_Info.register_thy}~@{text "text thy"} registers an existing
+  \<^descr> @{ML Thy_Info.register_thy}~@{text "text thy"} registers an existing
   theory value with the theory loader database and updates source version
   information according to the file store.
 
--- a/src/Doc/Implementation/Isar.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Implementation/Isar.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -81,7 +81,7 @@
 
   \begin{description}
 
-  \item Type @{ML_type Proof.state} represents Isar proof states.
+  \<^descr> Type @{ML_type Proof.state} represents Isar proof states.
   This is a block-structured configuration with proof context,
   linguistic mode, and optional goal.  The latter consists of goal
   context, goal facts (``@{text "using"}''), and tactical goal state
@@ -91,7 +91,7 @@
   refinement of some parts of the tactical goal --- how exactly is
   defined by the proof method that is applied in that situation.
 
-  \item @{ML Proof.assert_forward}, @{ML Proof.assert_chain}, @{ML
+  \<^descr> @{ML Proof.assert_forward}, @{ML Proof.assert_chain}, @{ML
   Proof.assert_backward} are partial identity functions that fail
   unless a certain linguistic mode is active, namely ``@{text
   "proof(state)"}'', ``@{text "proof(chain)"}'', ``@{text
@@ -101,24 +101,24 @@
   It is advisable study the implementations of existing proof commands
   for suitable modes to be asserted.
 
-  \item @{ML Proof.simple_goal}~@{text "state"} returns the structured
+  \<^descr> @{ML Proof.simple_goal}~@{text "state"} returns the structured
   Isar goal (if available) in the form seen by ``simple'' methods
   (like @{method simp} or @{method blast}).  The Isar goal facts are
   already inserted as premises into the subgoals, which are presented
   individually as in @{ML Proof.goal}.
 
-  \item @{ML Proof.goal}~@{text "state"} returns the structured Isar
+  \<^descr> @{ML Proof.goal}~@{text "state"} returns the structured Isar
   goal (if available) in the form seen by regular methods (like
   @{method rule}).  The auxiliary internal encoding of Pure
   conjunctions is split into individual subgoals as usual.
 
-  \item @{ML Proof.raw_goal}~@{text "state"} returns the structured
+  \<^descr> @{ML Proof.raw_goal}~@{text "state"} returns the structured
   Isar goal (if available) in the raw internal form seen by ``raw''
   methods (like @{method induct}).  This form is rarely appropriate
   for diagnostic tools; @{ML Proof.simple_goal} or @{ML Proof.goal}
   should be used in most situations.
 
-  \item @{ML Proof.theorem}~@{text "before_qed after_qed statement ctxt"}
+  \<^descr> @{ML Proof.theorem}~@{text "before_qed after_qed statement ctxt"}
   initializes a toplevel Isar proof state within a given context.
 
   The optional @{text "before_qed"} method is applied at the end of
@@ -150,7 +150,7 @@
 
   \begin{description}
 
-  \item @{text "@{Isar.goal}"} refers to the regular goal state (if
+  \<^descr> @{text "@{Isar.goal}"} refers to the regular goal state (if
   available) of the current proof state managed by the Isar toplevel
   --- as abstract value.
 
@@ -320,33 +320,33 @@
 
   \begin{description}
 
-  \item Type @{ML_type Proof.method} represents proof methods as
+  \<^descr> Type @{ML_type Proof.method} represents proof methods as
   abstract type.
 
-  \item @{ML METHOD_CASES}~@{text "(fn facts => cases_tactic)"} wraps
+  \<^descr> @{ML METHOD_CASES}~@{text "(fn facts => cases_tactic)"} wraps
   @{text cases_tactic} depending on goal facts as proof method with
   cases; the goal context is passed via method syntax.
 
-  \item @{ML METHOD}~@{text "(fn facts => tactic)"} wraps @{text
+  \<^descr> @{ML METHOD}~@{text "(fn facts => tactic)"} wraps @{text
   tactic} depending on goal facts as regular proof method; the goal
   context is passed via method syntax.
 
-  \item @{ML SIMPLE_METHOD}~@{text "tactic"} wraps a tactic that
+  \<^descr> @{ML SIMPLE_METHOD}~@{text "tactic"} wraps a tactic that
   addresses all subgoals uniformly as simple proof method.  Goal facts
   are already inserted into all subgoals before @{text "tactic"} is
   applied.
 
-  \item @{ML SIMPLE_METHOD'}~@{text "tactic"} wraps a tactic that
+  \<^descr> @{ML SIMPLE_METHOD'}~@{text "tactic"} wraps a tactic that
   addresses a specific subgoal as simple proof method that operates on
   subgoal 1.  Goal facts are inserted into the subgoal then the @{text
   "tactic"} is applied.
 
-  \item @{ML Method.insert_tac}~@{text "facts i"} inserts @{text
+  \<^descr> @{ML Method.insert_tac}~@{text "facts i"} inserts @{text
   "facts"} into subgoal @{text "i"}.  This is convenient to reproduce
   part of the @{ML SIMPLE_METHOD} or @{ML SIMPLE_METHOD'} wrapping
   within regular @{ML METHOD}, for example.
 
-  \item @{ML Method.setup}~@{text "name parser description"} provides
+  \<^descr> @{ML Method.setup}~@{text "name parser description"} provides
   the functionality of the Isar command @{command method_setup} as ML
   function.
 
@@ -548,17 +548,17 @@
 
   \begin{description}
 
-  \item Type @{ML_type attribute} represents attributes as concrete
+  \<^descr> Type @{ML_type attribute} represents attributes as concrete
   type alias.
 
-  \item @{ML Thm.rule_attribute}~@{text "(fn context => rule)"} wraps
+  \<^descr> @{ML Thm.rule_attribute}~@{text "(fn context => rule)"} wraps
   a context-dependent rule (mapping on @{ML_type thm}) as attribute.
 
-  \item @{ML Thm.declaration_attribute}~@{text "(fn thm => decl)"}
+  \<^descr> @{ML Thm.declaration_attribute}~@{text "(fn thm => decl)"}
   wraps a theorem-dependent declaration (mapping on @{ML_type
   Context.generic}) as attribute.
 
-  \item @{ML Attrib.setup}~@{text "name parser description"} provides
+  \<^descr> @{ML Attrib.setup}~@{text "name parser description"} provides
   the functionality of the Isar command @{command attribute_setup} as
   ML function.
 
@@ -576,7 +576,7 @@
 
   \begin{description}
 
-  \item @{text "@{attributes [\<dots>]}"} embeds attribute source
+  \<^descr> @{text "@{attributes [\<dots>]}"} embeds attribute source
   representation into the ML text, which is particularly useful with
   declarations like @{ML Local_Theory.note}.  Attribute names are
   internalized at compile time, but the source is unevaluated.  This
--- a/src/Doc/Implementation/Local_Theory.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Implementation/Local_Theory.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -105,7 +105,7 @@
 
   \begin{description}
 
-  \item Type @{ML_type local_theory} represents local theories.
+  \<^descr> Type @{ML_type local_theory} represents local theories.
   Although this is merely an alias for @{ML_type Proof.context}, it is
   semantically a subtype of the same: a @{ML_type local_theory} holds
   target information as special context data.  Subtyping means that
@@ -113,7 +113,7 @@
   with operations on expecting a regular @{text "ctxt:"}~@{ML_type
   Proof.context}.
 
-  \item @{ML Named_Target.init}~@{text "before_exit name thy"}
+  \<^descr> @{ML Named_Target.init}~@{text "before_exit name thy"}
   initializes a local theory derived from the given background theory.
   An empty name refers to a \emph{global theory} context, and a
   non-empty name refers to a @{command locale} or @{command class}
@@ -121,7 +121,7 @@
   useful for experimentation --- normally the Isar toplevel already
   takes care to initialize the local theory context.
 
-  \item @{ML Local_Theory.define}~@{text "((b, mx), (a, rhs))
+  \<^descr> @{ML Local_Theory.define}~@{text "((b, mx), (a, rhs))
   lthy"} defines a local entity according to the specification that is
   given relatively to the current @{text "lthy"} context.  In
   particular the term of the RHS may refer to earlier local entities
@@ -141,7 +141,7 @@
   declarations such as @{attribute simp}, while non-trivial rules like
   @{attribute simplified} are better avoided.
 
-  \item @{ML Local_Theory.note}~@{text "(a, ths) lthy"} is
+  \<^descr> @{ML Local_Theory.note}~@{text "(a, ths) lthy"} is
   analogous to @{ML Local_Theory.define}, but defines facts instead of
   terms.  There is also a slightly more general variant @{ML
   Local_Theory.notes} that defines several facts (with attribute
--- a/src/Doc/Implementation/Logic.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Implementation/Logic.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -140,50 +140,50 @@
 
   \begin{description}
 
-  \item Type @{ML_type class} represents type classes.
+  \<^descr> Type @{ML_type class} represents type classes.
 
-  \item Type @{ML_type sort} represents sorts, i.e.\ finite
+  \<^descr> Type @{ML_type sort} represents sorts, i.e.\ finite
   intersections of classes.  The empty list @{ML "[]: sort"} refers to
   the empty class intersection, i.e.\ the ``full sort''.
 
-  \item Type @{ML_type arity} represents type arities.  A triple
+  \<^descr> Type @{ML_type arity} represents type arities.  A triple
   @{text "(\<kappa>, \<^vec>s, s) : arity"} represents @{text "\<kappa> ::
   (\<^vec>s)s"} as described above.
 
-  \item Type @{ML_type typ} represents types; this is a datatype with
+  \<^descr> Type @{ML_type typ} represents types; this is a datatype with
   constructors @{ML TFree}, @{ML TVar}, @{ML Type}.
 
-  \item @{ML Term.map_atyps}~@{text "f \<tau>"} applies the mapping @{text
+  \<^descr> @{ML Term.map_atyps}~@{text "f \<tau>"} applies the mapping @{text
   "f"} to all atomic types (@{ML TFree}, @{ML TVar}) occurring in
   @{text "\<tau>"}.
 
-  \item @{ML Term.fold_atyps}~@{text "f \<tau>"} iterates the operation
+  \<^descr> @{ML Term.fold_atyps}~@{text "f \<tau>"} iterates the operation
   @{text "f"} over all occurrences of atomic types (@{ML TFree}, @{ML
   TVar}) in @{text "\<tau>"}; the type structure is traversed from left to
   right.
 
-  \item @{ML Sign.subsort}~@{text "thy (s\<^sub>1, s\<^sub>2)"}
+  \<^descr> @{ML Sign.subsort}~@{text "thy (s\<^sub>1, s\<^sub>2)"}
   tests the subsort relation @{text "s\<^sub>1 \<subseteq> s\<^sub>2"}.
 
-  \item @{ML Sign.of_sort}~@{text "thy (\<tau>, s)"} tests whether type
+  \<^descr> @{ML Sign.of_sort}~@{text "thy (\<tau>, s)"} tests whether type
   @{text "\<tau>"} is of sort @{text "s"}.
 
-  \item @{ML Sign.add_type}~@{text "ctxt (\<kappa>, k, mx)"} declares a
+  \<^descr> @{ML Sign.add_type}~@{text "ctxt (\<kappa>, k, mx)"} declares a
   new type constructors @{text "\<kappa>"} with @{text "k"} arguments and
   optional mixfix syntax.
 
-  \item @{ML Sign.add_type_abbrev}~@{text "ctxt (\<kappa>, \<^vec>\<alpha>, \<tau>)"}
+  \<^descr> @{ML Sign.add_type_abbrev}~@{text "ctxt (\<kappa>, \<^vec>\<alpha>, \<tau>)"}
   defines a new type abbreviation @{text "(\<^vec>\<alpha>)\<kappa> = \<tau>"}.
 
-  \item @{ML Sign.primitive_class}~@{text "(c, [c\<^sub>1, \<dots>,
+  \<^descr> @{ML Sign.primitive_class}~@{text "(c, [c\<^sub>1, \<dots>,
   c\<^sub>n])"} declares a new class @{text "c"}, together with class
   relations @{text "c \<subseteq> c\<^sub>i"}, for @{text "i = 1, \<dots>, n"}.
 
-  \item @{ML Sign.primitive_classrel}~@{text "(c\<^sub>1,
+  \<^descr> @{ML Sign.primitive_classrel}~@{text "(c\<^sub>1,
   c\<^sub>2)"} declares the class relation @{text "c\<^sub>1 \<subseteq>
   c\<^sub>2"}.
 
-  \item @{ML Sign.primitive_arity}~@{text "(\<kappa>, \<^vec>s, s)"} declares
+  \<^descr> @{ML Sign.primitive_arity}~@{text "(\<kappa>, \<^vec>s, s)"} declares
   the arity @{text "\<kappa> :: (\<^vec>s)s"}.
 
   \end{description}
@@ -213,23 +213,23 @@
 
   \begin{description}
 
-  \item @{text "@{class c}"} inlines the internalized class @{text
+  \<^descr> @{text "@{class c}"} inlines the internalized class @{text
   "c"} --- as @{ML_type string} literal.
 
-  \item @{text "@{sort s}"} inlines the internalized sort @{text "s"}
+  \<^descr> @{text "@{sort s}"} inlines the internalized sort @{text "s"}
   --- as @{ML_type "string list"} literal.
 
-  \item @{text "@{type_name c}"} inlines the internalized type
+  \<^descr> @{text "@{type_name c}"} inlines the internalized type
   constructor @{text "c"} --- as @{ML_type string} literal.
 
-  \item @{text "@{type_abbrev c}"} inlines the internalized type
+  \<^descr> @{text "@{type_abbrev c}"} inlines the internalized type
   abbreviation @{text "c"} --- as @{ML_type string} literal.
 
-  \item @{text "@{nonterminal c}"} inlines the internalized syntactic
+  \<^descr> @{text "@{nonterminal c}"} inlines the internalized syntactic
   type~/ grammar nonterminal @{text "c"} --- as @{ML_type string}
   literal.
 
-  \item @{text "@{typ \<tau>}"} inlines the internalized type @{text "\<tau>"}
+  \<^descr> @{text "@{typ \<tau>}"} inlines the internalized type @{text "\<tau>"}
   --- as constructor term for datatype @{ML_type typ}.
 
   \end{description}
@@ -385,58 +385,58 @@
 
   \begin{description}
 
-  \item Type @{ML_type term} represents de-Bruijn terms, with comments
+  \<^descr> Type @{ML_type term} represents de-Bruijn terms, with comments
   in abstractions, and explicitly named free variables and constants;
   this is a datatype with constructors @{index_ML Bound}, @{index_ML
   Free}, @{index_ML Var}, @{index_ML Const}, @{index_ML Abs},
   @{index_ML_op "$"}.
 
-  \item @{text "t"}~@{ML_text aconv}~@{text "u"} checks @{text
+  \<^descr> @{text "t"}~@{ML_text aconv}~@{text "u"} checks @{text
   "\<alpha>"}-equivalence of two terms.  This is the basic equality relation
   on type @{ML_type term}; raw datatype equality should only be used
   for operations related to parsing or printing!
 
-  \item @{ML Term.map_types}~@{text "f t"} applies the mapping @{text
+  \<^descr> @{ML Term.map_types}~@{text "f t"} applies the mapping @{text
   "f"} to all types occurring in @{text "t"}.
 
-  \item @{ML Term.fold_types}~@{text "f t"} iterates the operation
+  \<^descr> @{ML Term.fold_types}~@{text "f t"} iterates the operation
   @{text "f"} over all occurrences of types in @{text "t"}; the term
   structure is traversed from left to right.
 
-  \item @{ML Term.map_aterms}~@{text "f t"} applies the mapping @{text
+  \<^descr> @{ML Term.map_aterms}~@{text "f t"} applies the mapping @{text
   "f"} to all atomic terms (@{ML Bound}, @{ML Free}, @{ML Var}, @{ML
   Const}) occurring in @{text "t"}.
 
-  \item @{ML Term.fold_aterms}~@{text "f t"} iterates the operation
+  \<^descr> @{ML Term.fold_aterms}~@{text "f t"} iterates the operation
   @{text "f"} over all occurrences of atomic terms (@{ML Bound}, @{ML
   Free}, @{ML Var}, @{ML Const}) in @{text "t"}; the term structure is
   traversed from left to right.
 
-  \item @{ML fastype_of}~@{text "t"} determines the type of a
+  \<^descr> @{ML fastype_of}~@{text "t"} determines the type of a
   well-typed term.  This operation is relatively slow, despite the
   omission of any sanity checks.
 
-  \item @{ML lambda}~@{text "a b"} produces an abstraction @{text
+  \<^descr> @{ML lambda}~@{text "a b"} produces an abstraction @{text
   "\<lambda>a. b"}, where occurrences of the atomic term @{text "a"} in the
   body @{text "b"} are replaced by bound variables.
 
-  \item @{ML betapply}~@{text "(t, u)"} produces an application @{text
+  \<^descr> @{ML betapply}~@{text "(t, u)"} produces an application @{text
   "t u"}, with topmost @{text "\<beta>"}-conversion if @{text "t"} is an
   abstraction.
 
-  \item @{ML incr_boundvars}~@{text "j"} increments a term's dangling
+  \<^descr> @{ML incr_boundvars}~@{text "j"} increments a term's dangling
   bound variables by the offset @{text "j"}.  This is required when
   moving a subterm into a context where it is enclosed by a different
   number of abstractions.  Bound variables with a matching abstraction
   are unaffected.
 
-  \item @{ML Sign.declare_const}~@{text "ctxt ((c, \<sigma>), mx)"} declares
+  \<^descr> @{ML Sign.declare_const}~@{text "ctxt ((c, \<sigma>), mx)"} declares
   a new constant @{text "c :: \<sigma>"} with optional mixfix syntax.
 
-  \item @{ML Sign.add_abbrev}~@{text "print_mode (c, t)"}
+  \<^descr> @{ML Sign.add_abbrev}~@{text "print_mode (c, t)"}
   introduces a new term abbreviation @{text "c \<equiv> t"}.
 
-  \item @{ML Sign.const_typargs}~@{text "thy (c, \<tau>)"} and @{ML
+  \<^descr> @{ML Sign.const_typargs}~@{text "thy (c, \<tau>)"} and @{ML
   Sign.const_instance}~@{text "thy (c, [\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>n])"}
   convert between two representations of polymorphic constants: full
   type instance vs.\ compact type arguments form.
@@ -466,22 +466,22 @@
 
   \begin{description}
 
-  \item @{text "@{const_name c}"} inlines the internalized logical
+  \<^descr> @{text "@{const_name c}"} inlines the internalized logical
   constant name @{text "c"} --- as @{ML_type string} literal.
 
-  \item @{text "@{const_abbrev c}"} inlines the internalized
+  \<^descr> @{text "@{const_abbrev c}"} inlines the internalized
   abbreviated constant name @{text "c"} --- as @{ML_type string}
   literal.
 
-  \item @{text "@{const c(\<^vec>\<tau>)}"} inlines the internalized
+  \<^descr> @{text "@{const c(\<^vec>\<tau>)}"} inlines the internalized
   constant @{text "c"} with precise type instantiation in the sense of
   @{ML Sign.const_instance} --- as @{ML Const} constructor term for
   datatype @{ML_type term}.
 
-  \item @{text "@{term t}"} inlines the internalized term @{text "t"}
+  \<^descr> @{text "@{term t}"} inlines the internalized term @{text "t"}
   --- as constructor term for datatype @{ML_type term}.
 
-  \item @{text "@{prop \<phi>}"} inlines the internalized proposition
+  \<^descr> @{text "@{prop \<phi>}"} inlines the internalized proposition
   @{text "\<phi>"} --- as constructor term for datatype @{ML_type term}.
 
   \end{description}
@@ -683,7 +683,7 @@
 
   \begin{description}
 
-  \item @{ML Thm.peek_status}~@{text "thm"} informs about the current
+  \<^descr> @{ML Thm.peek_status}~@{text "thm"} informs about the current
   status of the derivation object behind the given theorem.  This is a
   snapshot of a potentially ongoing (parallel) evaluation of proofs.
   The three Boolean values indicate the following: @{verbatim oracle}
@@ -692,15 +692,15 @@
   failed} if some future proof has failed, rendering the theorem
   invalid!
 
-  \item @{ML Logic.all}~@{text "a B"} produces a Pure quantification
+  \<^descr> @{ML Logic.all}~@{text "a B"} produces a Pure quantification
   @{text "\<And>a. B"}, where occurrences of the atomic term @{text "a"} in
   the body proposition @{text "B"} are replaced by bound variables.
   (See also @{ML lambda} on terms.)
 
-  \item @{ML Logic.mk_implies}~@{text "(A, B)"} produces a Pure
+  \<^descr> @{ML Logic.mk_implies}~@{text "(A, B)"} produces a Pure
   implication @{text "A \<Longrightarrow> B"}.
 
-  \item Types @{ML_type ctyp} and @{ML_type cterm} represent certified
+  \<^descr> Types @{ML_type ctyp} and @{ML_type cterm} represent certified
   types and terms, respectively.  These are abstract datatypes that
   guarantee that its values have passed the full well-formedness (and
   well-typedness) checks, relative to the declarations of type
@@ -711,7 +711,7 @@
   are located in the @{ML_structure Thm} module, even though theorems are
   not yet involved at that stage.
 
-  \item @{ML Thm.ctyp_of}~@{text "ctxt \<tau>"} and @{ML
+  \<^descr> @{ML Thm.ctyp_of}~@{text "ctxt \<tau>"} and @{ML
   Thm.cterm_of}~@{text "ctxt t"} explicitly check types and terms,
   respectively.  This also involves some basic normalizations, such
   expansion of type and term abbreviations from the underlying
@@ -719,7 +719,7 @@
   Full re-certification is relatively slow and should be avoided in
   tight reasoning loops.
 
-  \item @{ML Thm.apply}, @{ML Thm.lambda}, @{ML Thm.all}, @{ML
+  \<^descr> @{ML Thm.apply}, @{ML Thm.lambda}, @{ML Thm.all}, @{ML
   Drule.mk_implies} etc.\ compose certified terms (or propositions)
   incrementally.  This is equivalent to @{ML Thm.cterm_of} after
   unchecked @{ML_op "$"}, @{ML lambda}, @{ML Logic.all}, @{ML
@@ -728,53 +728,53 @@
   constructions on top.  There are separate operations to decompose
   certified terms and theorems to produce certified terms again.
 
-  \item Type @{ML_type thm} represents proven propositions.  This is
+  \<^descr> Type @{ML_type thm} represents proven propositions.  This is
   an abstract datatype that guarantees that its values have been
   constructed by basic principles of the @{ML_structure Thm} module.
   Every @{ML_type thm} value refers its background theory,
   cf.\ \secref{sec:context-theory}.
 
-  \item @{ML Thm.transfer}~@{text "thy thm"} transfers the given
+  \<^descr> @{ML Thm.transfer}~@{text "thy thm"} transfers the given
   theorem to a \emph{larger} theory, see also \secref{sec:context}.
   This formal adjustment of the background context has no logical
   significance, but is occasionally required for formal reasons, e.g.\
   when theorems that are imported from more basic theories are used in
   the current situation.
 
-  \item @{ML Thm.assume}, @{ML Thm.forall_intr}, @{ML
+  \<^descr> @{ML Thm.assume}, @{ML Thm.forall_intr}, @{ML
   Thm.forall_elim}, @{ML Thm.implies_intr}, and @{ML Thm.implies_elim}
   correspond to the primitive inferences of \figref{fig:prim-rules}.
 
-  \item @{ML Thm.generalize}~@{text "(\<^vec>\<alpha>, \<^vec>x)"}
+  \<^descr> @{ML Thm.generalize}~@{text "(\<^vec>\<alpha>, \<^vec>x)"}
   corresponds to the @{text "generalize"} rules of
   \figref{fig:subst-rules}.  Here collections of type and term
   variables are generalized simultaneously, specified by the given
   basic names.
 
-  \item @{ML Thm.instantiate}~@{text "(\<^vec>\<alpha>\<^sub>s,
+  \<^descr> @{ML Thm.instantiate}~@{text "(\<^vec>\<alpha>\<^sub>s,
   \<^vec>x\<^sub>\<tau>)"} corresponds to the @{text "instantiate"} rules
   of \figref{fig:subst-rules}.  Type variables are substituted before
   term variables.  Note that the types in @{text "\<^vec>x\<^sub>\<tau>"}
   refer to the instantiated versions.
 
-  \item @{ML Thm.add_axiom}~@{text "ctxt (name, A)"} declares an
+  \<^descr> @{ML Thm.add_axiom}~@{text "ctxt (name, A)"} declares an
   arbitrary proposition as axiom, and retrieves it as a theorem from
   the resulting theory, cf.\ @{text "axiom"} in
   \figref{fig:prim-rules}.  Note that the low-level representation in
   the axiom table may differ slightly from the returned theorem.
 
-  \item @{ML Thm.add_oracle}~@{text "(binding, oracle)"} produces a named
+  \<^descr> @{ML Thm.add_oracle}~@{text "(binding, oracle)"} produces a named
   oracle rule, essentially generating arbitrary axioms on the fly,
   cf.\ @{text "axiom"} in \figref{fig:prim-rules}.
 
-  \item @{ML Thm.add_def}~@{text "ctxt unchecked overloaded (name, c
+  \<^descr> @{ML Thm.add_def}~@{text "ctxt unchecked overloaded (name, c
   \<^vec>x \<equiv> t)"} states a definitional axiom for an existing constant
   @{text "c"}.  Dependencies are recorded via @{ML Theory.add_deps},
   unless the @{text "unchecked"} option is set.  Note that the
   low-level representation in the axiom table may differ slightly from
   the returned theorem.
 
-  \item @{ML Theory.add_deps}~@{text "ctxt name c\<^sub>\<tau> \<^vec>d\<^sub>\<sigma>"}
+  \<^descr> @{ML Theory.add_deps}~@{text "ctxt name c\<^sub>\<tau> \<^vec>d\<^sub>\<sigma>"}
   declares dependencies of a named specification for constant @{text
   "c\<^sub>\<tau>"}, relative to existing specifications for constants @{text
   "\<^vec>d\<^sub>\<sigma>"}.  This also works for type constructors.
@@ -810,21 +810,21 @@
 
   \begin{description}
 
-  \item @{text "@{ctyp \<tau>}"} produces a certified type wrt.\ the
+  \<^descr> @{text "@{ctyp \<tau>}"} produces a certified type wrt.\ the
   current background theory --- as abstract value of type @{ML_type
   ctyp}.
 
-  \item @{text "@{cterm t}"} and @{text "@{cprop \<phi>}"} produce a
+  \<^descr> @{text "@{cterm t}"} and @{text "@{cprop \<phi>}"} produce a
   certified term wrt.\ the current background theory --- as abstract
   value of type @{ML_type cterm}.
 
-  \item @{text "@{thm a}"} produces a singleton fact --- as abstract
+  \<^descr> @{text "@{thm a}"} produces a singleton fact --- as abstract
   value of type @{ML_type thm}.
 
-  \item @{text "@{thms a}"} produces a general fact --- as abstract
+  \<^descr> @{text "@{thms a}"} produces a general fact --- as abstract
   value of type @{ML_type "thm list"}.
 
-  \item @{text "@{lemma \<phi> by meth}"} produces a fact that is proven on
+  \<^descr> @{text "@{lemma \<phi> by meth}"} produces a fact that is proven on
   the spot according to the minimal proof, which imitates a terminal
   Isar proof.  The result is an abstract value of type @{ML_type thm}
   or @{ML_type "thm list"}, depending on the number of propositions
@@ -917,21 +917,21 @@
 
   \begin{description}
 
-  \item @{ML Conjunction.intr} derives @{text "A &&& B"} from @{text
+  \<^descr> @{ML Conjunction.intr} derives @{text "A &&& B"} from @{text
   "A"} and @{text "B"}.
 
-  \item @{ML Conjunction.elim} derives @{text "A"} and @{text "B"}
+  \<^descr> @{ML Conjunction.elim} derives @{text "A"} and @{text "B"}
   from @{text "A &&& B"}.
 
-  \item @{ML Drule.mk_term} derives @{text "TERM t"}.
+  \<^descr> @{ML Drule.mk_term} derives @{text "TERM t"}.
 
-  \item @{ML Drule.dest_term} recovers term @{text "t"} from @{text
+  \<^descr> @{ML Drule.dest_term} recovers term @{text "t"} from @{text
   "TERM t"}.
 
-  \item @{ML Logic.mk_type}~@{text "\<tau>"} produces the term @{text
+  \<^descr> @{ML Logic.mk_type}~@{text "\<tau>"} produces the term @{text
   "TYPE(\<tau>)"}.
 
-  \item @{ML Logic.dest_type}~@{text "TYPE(\<tau>)"} recovers the type
+  \<^descr> @{ML Logic.dest_type}~@{text "TYPE(\<tau>)"} recovers the type
   @{text "\<tau>"}.
 
   \end{description}
@@ -974,11 +974,11 @@
 
   \begin{description}
 
-  \item @{ML Thm.extra_shyps}~@{text "thm"} determines the extraneous
+  \<^descr> @{ML Thm.extra_shyps}~@{text "thm"} determines the extraneous
   sort hypotheses of the given theorem, i.e.\ the sorts that are not
   present within type variables of the statement.
 
-  \item @{ML Thm.strip_shyps}~@{text "thm"} removes any extraneous
+  \<^descr> @{ML Thm.strip_shyps}~@{text "thm"} removes any extraneous
   sort hypotheses that can be witnessed from the type signature.
 
   \end{description}
@@ -1092,7 +1092,7 @@
 
   \begin{description}
 
-  \item @{ML Simplifier.norm_hhf}~@{text "ctxt thm"} normalizes the given
+  \<^descr> @{ML Simplifier.norm_hhf}~@{text "ctxt thm"} normalizes the given
   theorem according to the canonical form specified above.  This is
   occasionally helpful to repair some low-level tools that do not
   handle Hereditary Harrop Formulae properly.
@@ -1176,7 +1176,7 @@
 
   \begin{description}
 
-  \item @{text "rule\<^sub>1 RSN (i, rule\<^sub>2)"} resolves the conclusion of
+  \<^descr> @{text "rule\<^sub>1 RSN (i, rule\<^sub>2)"} resolves the conclusion of
   @{text "rule\<^sub>1"} with the @{text i}-th premise of @{text "rule\<^sub>2"},
   according to the @{inference resolution} principle explained above.
   Unless there is precisely one resolvent it raises exception @{ML
@@ -1185,10 +1185,10 @@
   This corresponds to the rule attribute @{attribute THEN} in Isar
   source language.
 
-  \item @{text "rule\<^sub>1 RS rule\<^sub>2"} abbreviates @{text "rule\<^sub>1 RSN (1,
+  \<^descr> @{text "rule\<^sub>1 RS rule\<^sub>2"} abbreviates @{text "rule\<^sub>1 RSN (1,
   rule\<^sub>2)"}.
 
-  \item @{text "rules\<^sub>1 RLN (i, rules\<^sub>2)"} joins lists of rules.  For
+  \<^descr> @{text "rules\<^sub>1 RLN (i, rules\<^sub>2)"} joins lists of rules.  For
   every @{text "rule\<^sub>1"} in @{text "rules\<^sub>1"} and @{text "rule\<^sub>2"} in
   @{text "rules\<^sub>2"}, it resolves the conclusion of @{text "rule\<^sub>1"} with
   the @{text "i"}-th premise of @{text "rule\<^sub>2"}, accumulating multiple
@@ -1196,15 +1196,15 @@
   higher-order unifications can be inefficient compared to the lazy
   variant seen in elementary tactics like @{ML resolve_tac}.
 
-  \item @{text "rules\<^sub>1 RL rules\<^sub>2"} abbreviates @{text "rules\<^sub>1 RLN (1,
+  \<^descr> @{text "rules\<^sub>1 RL rules\<^sub>2"} abbreviates @{text "rules\<^sub>1 RLN (1,
   rules\<^sub>2)"}.
 
-  \item @{text "[rule\<^sub>1, \<dots>, rule\<^sub>n] MRS rule"} resolves @{text "rule\<^sub>i"}
+  \<^descr> @{text "[rule\<^sub>1, \<dots>, rule\<^sub>n] MRS rule"} resolves @{text "rule\<^sub>i"}
   against premise @{text "i"} of @{text "rule"}, for @{text "i = n, \<dots>,
   1"}.  By working from right to left, newly emerging premises are
   concatenated in the result, without interfering.
 
-  \item @{text "rule OF rules"} is an alternative notation for @{text
+  \<^descr> @{text "rule OF rules"} is an alternative notation for @{text
   "rules MRS rule"}, which makes rule composition look more like
   function application.  Note that the argument @{text "rules"} need
   not be atomic.
@@ -1361,21 +1361,21 @@
 
   \begin{description}
 
-  \item Type @{ML_type proof} represents proof terms; this is a
+  \<^descr> Type @{ML_type proof} represents proof terms; this is a
   datatype with constructors @{index_ML Abst}, @{index_ML AbsP},
   @{index_ML_op "%"}, @{index_ML_op "%%"}, @{index_ML PBound},
   @{index_ML MinProof}, @{index_ML Hyp}, @{index_ML PAxm}, @{index_ML
   Oracle}, @{index_ML Promise}, @{index_ML PThm} as explained above.
   %FIXME OfClass (!?)
 
-  \item Type @{ML_type proof_body} represents the nested proof
+  \<^descr> Type @{ML_type proof_body} represents the nested proof
   information of a named theorem, consisting of a digest of oracles
   and named theorem over some proof term.  The digest only covers the
   directly visible part of the proof: in order to get the full
   information, the implicit graph of nested theorems needs to be
   traversed (e.g.\ using @{ML Proofterm.fold_body_thms}).
 
-  \item @{ML Thm.proof_of}~@{text "thm"} and @{ML
+  \<^descr> @{ML Thm.proof_of}~@{text "thm"} and @{ML
   Thm.proof_body_of}~@{text "thm"} produce the proof term or proof
   body (with digest of oracles and theorems) from a given theorem.
   Note that this involves a full join of internal futures that fulfill
@@ -1384,14 +1384,14 @@
   Parallel performance may suffer by inspecting proof terms at
   run-time.
 
-  \item @{ML proofs} specifies the detail of proof recording within
+  \<^descr> @{ML proofs} specifies the detail of proof recording within
   @{ML_type thm} values produced by the inference kernel: @{ML 0}
   records only the names of oracles, @{ML 1} records oracle names and
   propositions, @{ML 2} additionally records full proof terms.
   Officially named theorems that contribute to a result are recorded
   in any case.
 
-  \item @{ML Reconstruct.reconstruct_proof}~@{text "thy prop prf"}
+  \<^descr> @{ML Reconstruct.reconstruct_proof}~@{text "thy prop prf"}
   turns the implicit proof term @{text "prf"} into a full proof of the
   given proposition.
 
@@ -1401,21 +1401,21 @@
   constructed manually, but not for those produced automatically by
   the inference kernel.
 
-  \item @{ML Reconstruct.expand_proof}~@{text "thy [thm\<^sub>1, \<dots>, thm\<^sub>n]
+  \<^descr> @{ML Reconstruct.expand_proof}~@{text "thy [thm\<^sub>1, \<dots>, thm\<^sub>n]
   prf"} expands and reconstructs the proofs of all specified theorems,
   with the given (full) proof.  Theorems that are not unique specified
   via their name may be disambiguated by giving their proposition.
 
-  \item @{ML Proof_Checker.thm_of_proof}~@{text "thy prf"} turns the
+  \<^descr> @{ML Proof_Checker.thm_of_proof}~@{text "thy prf"} turns the
   given (full) proof into a theorem, by replaying it using only
   primitive rules of the inference kernel.
 
-  \item @{ML Proof_Syntax.read_proof}~@{text "thy b\<^sub>1 b\<^sub>2 s"} reads in a
+  \<^descr> @{ML Proof_Syntax.read_proof}~@{text "thy b\<^sub>1 b\<^sub>2 s"} reads in a
   proof term. The Boolean flags indicate the use of sort and type
   information.  Usually, typing information is left implicit and is
   inferred during proof reconstruction.  %FIXME eliminate flags!?
 
-  \item @{ML Proof_Syntax.pretty_proof}~@{text "ctxt prf"}
+  \<^descr> @{ML Proof_Syntax.pretty_proof}~@{text "ctxt prf"}
   pretty-prints the given proof term.
 
   \end{description}
--- a/src/Doc/Implementation/ML.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Implementation/ML.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -260,7 +260,7 @@
   @{ML_text lhs} (not @{ML_text lhs_tm}).  Or a term that is known
   to be a variable can be called @{ML_text v} or @{ML_text x}.
 
-  \item Tactics (\secref{sec:tactics}) are sufficiently important to
+  \<^item> Tactics (\secref{sec:tactics}) are sufficiently important to
   have specific naming conventions.  The name of a basic tactic
   definition always has a @{ML_text "_tac"} suffix, the subgoal index
   (if applicable) is always called @{ML_text i}, and the goal state
@@ -648,20 +648,20 @@
 
   \begin{description}
 
-  \item @{ML "ML_Context.the_generic_context ()"} refers to the theory
+  \<^descr> @{ML "ML_Context.the_generic_context ()"} refers to the theory
   context of the ML toplevel --- at compile time.  ML code needs to
   take care to refer to @{ML "ML_Context.the_generic_context ()"}
   correctly.  Recall that evaluation of a function body is delayed
   until actual run-time.
 
-  \item @{ML "Context.>>"}~@{text f} applies context transformation
+  \<^descr> @{ML "Context.>>"}~@{text f} applies context transformation
   @{text f} to the implicit context of the ML toplevel.
 
-  \item @{ML ML_Thms.bind_thms}~@{text "(name, thms)"} stores a list of
+  \<^descr> @{ML ML_Thms.bind_thms}~@{text "(name, thms)"} stores a list of
   theorems produced in ML both in the (global) theory context and the
   ML toplevel, associating it with the provided name.
 
-  \item @{ML ML_Thms.bind_thm} is similar to @{ML ML_Thms.bind_thms} but
+  \<^descr> @{ML ML_Thms.bind_thm} is similar to @{ML ML_Thms.bind_thms} but
   refers to a singleton fact.
 
   \end{description}
@@ -727,12 +727,12 @@
 
   \begin{description}
 
-  \item @{text "@{make_string}"} inlines a function to print arbitrary values
+  \<^descr> @{text "@{make_string}"} inlines a function to print arbitrary values
   similar to the ML toplevel. The result is compiler dependent and may fall
   back on "?" in certain situations. The value of configuration option
   @{attribute_ref ML_print_depth} determines further details of output.
 
-  \item @{text "@{print f}"} uses the ML function @{text "f: string ->
+  \<^descr> @{text "@{print f}"} uses the ML function @{text "f: string ->
   unit"} to output the result of @{text "@{make_string}"} above,
   together with the source position of the antiquotation.  The default
   output function is @{ML writeln}.
@@ -909,13 +909,13 @@
 
   \begin{description}
 
-  \item @{ML fold}~@{text f} lifts the parametrized update function
+  \<^descr> @{ML fold}~@{text f} lifts the parametrized update function
   @{text "f"} to a list of parameters.
 
-  \item @{ML fold_rev}~@{text "f"} is similar to @{ML fold}~@{text
+  \<^descr> @{ML fold_rev}~@{text "f"} is similar to @{ML fold}~@{text
   "f"}, but works inside-out, as if the list would be reversed.
 
-  \item @{ML fold_map}~@{text "f"} lifts the parametrized update
+  \<^descr> @{ML fold_map}~@{text "f"} lifts the parametrized update
   function @{text "f"} (with side-result) to a list of parameters and
   cumulative side-results.
 
@@ -1035,11 +1035,11 @@
 
   \begin{description}
 
-  \item @{ML writeln}~@{text "text"} outputs @{text "text"} as regular
+  \<^descr> @{ML writeln}~@{text "text"} outputs @{text "text"} as regular
   message.  This is the primary message output operation of Isabelle
   and should be used by default.
 
-  \item @{ML tracing}~@{text "text"} outputs @{text "text"} as special
+  \<^descr> @{ML tracing}~@{text "text"} outputs @{text "text"} as special
   tracing message, indicating potential high-volume output to the
   front-end (hundreds or thousands of messages issued by a single
   command).  The idea is to allow the user-interface to downgrade the
@@ -1049,11 +1049,11 @@
   output, e.g.\ switch to a different output window.  So this channel
   should not be used for regular output.
 
-  \item @{ML warning}~@{text "text"} outputs @{text "text"} as
+  \<^descr> @{ML warning}~@{text "text"} outputs @{text "text"} as
   warning, which typically means some extra emphasis on the front-end
   side (color highlighting, icons, etc.).
 
-  \item @{ML error}~@{text "text"} raises exception @{ML ERROR}~@{text
+  \<^descr> @{ML error}~@{text "text"} raises exception @{ML ERROR}~@{text
   "text"} and thus lets the Isar toplevel print @{text "text"} on the
   error channel, which typically means some extra emphasis on the
   front-end side (color highlighting, icons, etc.).
@@ -1217,30 +1217,30 @@
 
   \begin{description}
 
-  \item @{ML try}~@{text "f x"} makes the partiality of evaluating
+  \<^descr> @{ML try}~@{text "f x"} makes the partiality of evaluating
   @{text "f x"} explicit via the option datatype.  Interrupts are
   \emph{not} handled here, i.e.\ this form serves as safe replacement
   for the \emph{unsafe} version @{ML_text "(SOME"}~@{text "f
   x"}~@{ML_text "handle _ => NONE)"} that is occasionally seen in
   books about SML97, but not in Isabelle/ML.
 
-  \item @{ML can} is similar to @{ML try} with more abstract result.
-
-  \item @{ML ERROR}~@{text "msg"} represents user errors; this
+  \<^descr> @{ML can} is similar to @{ML try} with more abstract result.
+
+  \<^descr> @{ML ERROR}~@{text "msg"} represents user errors; this
   exception is normally raised indirectly via the @{ML error} function
   (see \secref{sec:message-channels}).
 
-  \item @{ML Fail}~@{text "msg"} represents general program failures.
-
-  \item @{ML Exn.is_interrupt} identifies interrupts robustly, without
+  \<^descr> @{ML Fail}~@{text "msg"} represents general program failures.
+
+  \<^descr> @{ML Exn.is_interrupt} identifies interrupts robustly, without
   mentioning concrete exception constructors in user code.  Handled
   interrupts need to be re-raised promptly!
 
-  \item @{ML reraise}~@{text "exn"} raises exception @{text "exn"}
+  \<^descr> @{ML reraise}~@{text "exn"} raises exception @{text "exn"}
   while preserving its implicit position information (if possible,
   depending on the ML platform).
 
-  \item @{ML Runtime.exn_trace}~@{ML_text "(fn () =>"}~@{text
+  \<^descr> @{ML Runtime.exn_trace}~@{ML_text "(fn () =>"}~@{text
   "e"}@{ML_text ")"} evaluates expression @{text "e"} while printing
   a full trace of its stack of nested exceptions (if possible,
   depending on the ML platform).
@@ -1258,7 +1258,7 @@
 
   \begin{description}
 
-  \item @{text "@{assert}"} inlines a function
+  \<^descr> @{text "@{assert}"} inlines a function
   @{ML_type "bool -> unit"} that raises @{ML Fail} if the argument is
   @{ML false}.  Due to inlining the source position of failed
   assertions is included in the error output.
@@ -1339,28 +1339,28 @@
 
   \begin{description}
 
-  \item Type @{ML_type "Symbol.symbol"} represents individual Isabelle
+  \<^descr> Type @{ML_type "Symbol.symbol"} represents individual Isabelle
   symbols.
 
-  \item @{ML "Symbol.explode"}~@{text "str"} produces a symbol list
+  \<^descr> @{ML "Symbol.explode"}~@{text "str"} produces a symbol list
   from the packed form.  This function supersedes @{ML
   "String.explode"} for virtually all purposes of manipulating text in
   Isabelle!\footnote{The runtime overhead for exploded strings is
   mainly that of the list structure: individual symbols that happen to
   be a singleton string do not require extra memory in Poly/ML.}
 
-  \item @{ML "Symbol.is_letter"}, @{ML "Symbol.is_digit"}, @{ML
+  \<^descr> @{ML "Symbol.is_letter"}, @{ML "Symbol.is_digit"}, @{ML
   "Symbol.is_quasi"}, @{ML "Symbol.is_blank"} classify standard
   symbols according to fixed syntactic conventions of Isabelle, cf.\
   @{cite "isabelle-isar-ref"}.
 
-  \item Type @{ML_type "Symbol.sym"} is a concrete datatype that
+  \<^descr> Type @{ML_type "Symbol.sym"} is a concrete datatype that
   represents the different kinds of symbols explicitly, with
   constructors @{ML "Symbol.Char"}, @{ML "Symbol.UTF8"},
   @{ML "Symbol.Sym"}, @{ML "Symbol.Ctrl"}, @{ML "Symbol.Raw"},
   @{ML "Symbol.Malformed"}.
 
-  \item @{ML "Symbol.decode"} converts the string representation of a
+  \<^descr> @{ML "Symbol.decode"} converts the string representation of a
   symbol into the datatype version.
 
   \end{description}
@@ -1401,7 +1401,7 @@
 
   \begin{description}
 
-  \item Type @{ML_type char} is \emph{not} used.  The smallest textual
+  \<^descr> Type @{ML_type char} is \emph{not} used.  The smallest textual
   unit in Isabelle is represented as a ``symbol'' (see
   \secref{sec:symbols}).
 
@@ -1418,7 +1418,7 @@
 
   \begin{description}
 
-  \item Type @{ML_type string} represents immutable vectors of 8-bit
+  \<^descr> Type @{ML_type string} represents immutable vectors of 8-bit
   characters.  There are operations in SML to convert back and forth
   to actual byte vectors, which are seldom used.
 
@@ -1473,7 +1473,7 @@
 
   \begin{description}
 
-  \item Type @{ML_type int} represents regular mathematical integers, which
+  \<^descr> Type @{ML_type int} represents regular mathematical integers, which
   are \emph{unbounded}. Overflow is treated properly, but should never happen
   in practice.\footnote{The size limit for integer bit patterns in memory is
   64\,MB for 32-bit Poly/ML, and much higher for 64-bit systems.} This works
@@ -1501,11 +1501,11 @@
 
   \begin{description}
 
-  \item Type @{ML_type Time.time} represents time abstractly according
+  \<^descr> Type @{ML_type Time.time} represents time abstractly according
   to the SML97 basis library definition.  This is adequate for
   internal ML operations, but awkward in concrete time specifications.
 
-  \item @{ML seconds}~@{text "s"} turns the concrete scalar @{text
+  \<^descr> @{ML seconds}~@{text "s"} turns the concrete scalar @{text
   "s"} (measured in seconds) into an abstract time value.  Floating
   point numbers are easy to use as configuration options in the
   context (see \secref{sec:config-options}) or system options that
@@ -1553,13 +1553,13 @@
 
   \begin{description}
 
-  \item @{ML cons}~@{text "x xs"} evaluates to @{text "x :: xs"}.
+  \<^descr> @{ML cons}~@{text "x xs"} evaluates to @{text "x :: xs"}.
 
   Tupled infix operators are a historical accident in Standard ML.
   The curried @{ML cons} amends this, but it should be only used when
   partial application is required.
 
-  \item @{ML member}, @{ML insert}, @{ML remove}, @{ML update} treat
+  \<^descr> @{ML member}, @{ML insert}, @{ML remove}, @{ML update} treat
   lists as a set-like container that maintains the order of elements.
   See @{file "~~/src/Pure/library.ML"} for the full specifications
   (written in ML).  There are some further derived operations like
@@ -1629,7 +1629,7 @@
 
   \begin{description}
 
-  \item @{ML AList.lookup}, @{ML AList.defined}, @{ML AList.update}
+  \<^descr> @{ML AList.lookup}, @{ML AList.defined}, @{ML AList.update}
   implement the main ``framework operations'' for mappings in
   Isabelle/ML, following standard conventions for their names and
   types.
@@ -1836,11 +1836,11 @@
 
   \begin{description}
 
-  \item @{ML File.tmp_path}~@{text "path"} relocates the base
+  \<^descr> @{ML File.tmp_path}~@{text "path"} relocates the base
   component of @{text "path"} into the unique temporary directory of
   the running Isabelle/ML process.
 
-  \item @{ML serial_string}~@{text "()"} creates a new serial number
+  \<^descr> @{ML serial_string}~@{text "()"} creates a new serial number
   that is unique over the runtime of the Isabelle/ML process.
 
   \end{description}
@@ -1883,14 +1883,14 @@
 
   \begin{description}
 
-  \item Type @{ML_type "'a Synchronized.var"} represents synchronized
+  \<^descr> Type @{ML_type "'a Synchronized.var"} represents synchronized
   variables with state of type @{ML_type 'a}.
 
-  \item @{ML Synchronized.var}~@{text "name x"} creates a synchronized
+  \<^descr> @{ML Synchronized.var}~@{text "name x"} creates a synchronized
   variable that is initialized with value @{text "x"}.  The @{text
   "name"} is used for tracing.
 
-  \item @{ML Synchronized.guarded_access}~@{text "var f"} lets the
+  \<^descr> @{ML Synchronized.guarded_access}~@{text "var f"} lets the
   function @{text "f"} operate within a critical section on the state
   @{text "x"} as follows: if @{text "f x"} produces @{ML NONE}, it
   continues to wait on the internal condition variable, expecting that
@@ -1996,32 +1996,32 @@
 
   \begin{description}
 
-  \item Type @{ML_type "'a Exn.result"} represents the disjoint sum of
+  \<^descr> Type @{ML_type "'a Exn.result"} represents the disjoint sum of
   ML results explicitly, with constructor @{ML Exn.Res} for regular
   values and @{ML "Exn.Exn"} for exceptions.
 
-  \item @{ML Exn.capture}~@{text "f x"} manages the evaluation of
+  \<^descr> @{ML Exn.capture}~@{text "f x"} manages the evaluation of
   @{text "f x"} such that exceptions are made explicit as @{ML
   "Exn.Exn"}.  Note that this includes physical interrupts (see also
   \secref{sec:exceptions}), so the same precautions apply to user
   code: interrupts must not be absorbed accidentally!
 
-  \item @{ML Exn.interruptible_capture} is similar to @{ML
+  \<^descr> @{ML Exn.interruptible_capture} is similar to @{ML
   Exn.capture}, but interrupts are immediately re-raised as required
   for user code.
 
-  \item @{ML Exn.release}~@{text "result"} releases the original
+  \<^descr> @{ML Exn.release}~@{text "result"} releases the original
   runtime result, exposing its regular value or raising the reified
   exception.
 
-  \item @{ML Par_Exn.release_all}~@{text "results"} combines results
+  \<^descr> @{ML Par_Exn.release_all}~@{text "results"} combines results
   that were produced independently (e.g.\ by parallel evaluation).  If
   all results are regular values, that list is returned.  Otherwise,
   the collection of all exceptions is raised, wrapped-up as collective
   parallel exception.  Note that the latter prevents access to
   individual exceptions by conventional @{verbatim "handle"} of ML.
 
-  \item @{ML Par_Exn.release_first} is similar to @{ML
+  \<^descr> @{ML Par_Exn.release_first} is similar to @{ML
   Par_Exn.release_all}, but only the first (meaningful) exception that has
   occurred in the original evaluation process is raised again, the others are
   ignored.  That single exception may get handled by conventional
@@ -2057,7 +2057,7 @@
 
   \begin{description}
 
-  \item @{ML Par_List.map}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} is like @{ML
+  \<^descr> @{ML Par_List.map}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} is like @{ML
   "map"}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"}, but the evaluation of @{text "f x\<^sub>i"}
   for @{text "i = 1, \<dots>, n"} is performed in parallel.
 
@@ -2067,7 +2067,7 @@
   program exception that happened to occur in the parallel evaluation
   is propagated, and all other failures are ignored.
 
-  \item @{ML Par_List.get_some}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} produces some
+  \<^descr> @{ML Par_List.get_some}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} produces some
   @{text "f x\<^sub>i"} that is of the form @{text "SOME y\<^sub>i"}, if that
   exists, otherwise @{text "NONE"}.  Thus it is similar to @{ML
   Library.get_first}, but subject to a non-deterministic parallel
@@ -2130,19 +2130,19 @@
 
   \begin{description}
 
-  \item Type @{ML_type "'a lazy"} represents lazy values over type @{verbatim
+  \<^descr> Type @{ML_type "'a lazy"} represents lazy values over type @{verbatim
   "'a"}.
 
-  \item @{ML Lazy.lazy}~@{text "(fn () => e)"} wraps the unevaluated
+  \<^descr> @{ML Lazy.lazy}~@{text "(fn () => e)"} wraps the unevaluated
   expression @{text e} as unfinished lazy value.
 
-  \item @{ML Lazy.value}~@{text a} wraps the value @{text a} as finished lazy
+  \<^descr> @{ML Lazy.value}~@{text a} wraps the value @{text a} as finished lazy
   value.  When forced, it returns @{text a} without any further evaluation.
 
   There is very low overhead for this proforma wrapping of strict values as
   lazy values.
 
-  \item @{ML Lazy.force}~@{text x} produces the result of the lazy value in a
+  \<^descr> @{ML Lazy.force}~@{text x} produces the result of the lazy value in a
   thread-safe manner as explained above. Thus it may cause the current thread
   to wait on a pending evaluation attempt by another thread.
 
@@ -2223,15 +2223,15 @@
 
   \begin{description}
 
-  \item Type @{ML_type "'a future"} represents future values over type
+  \<^descr> Type @{ML_type "'a future"} represents future values over type
   @{verbatim "'a"}.
 
-  \item @{ML Future.fork}~@{text "(fn () => e)"} registers the unevaluated
+  \<^descr> @{ML Future.fork}~@{text "(fn () => e)"} registers the unevaluated
   expression @{text e} as unfinished future value, to be evaluated eventually
   on the parallel worker-thread farm. This is a shorthand for @{ML
   Future.forks} below, with default parameters and a single expression.
 
-  \item @{ML Future.forks}~@{text "params exprs"} is the general interface to
+  \<^descr> @{ML Future.forks}~@{text "params exprs"} is the general interface to
   fork several futures simultaneously. The @{text params} consist of the
   following fields:
 
@@ -2273,7 +2273,7 @@
 
   \end{itemize}
 
-  \item @{ML Future.join}~@{text x} retrieves the value of an already finished
+  \<^descr> @{ML Future.join}~@{text x} retrieves the value of an already finished
   future, which may lead to an exception, according to the result of its
   previous evaluation.
 
@@ -2295,7 +2295,7 @@
   explicitly when forked (see @{text deps} above). Thus the evaluation can
   work from the bottom up, without join conflicts and wait states.
 
-  \item @{ML Future.joins}~@{text xs} joins the given list of futures
+  \<^descr> @{ML Future.joins}~@{text xs} joins the given list of futures
   simultaneously, which is more efficient than @{ML "map Future.join"}~@{text
   xs}.
 
@@ -2305,23 +2305,23 @@
   presently evaluated on other threads only happens as last resort, when no
   other unfinished futures are left over.
 
-  \item @{ML Future.value}~@{text a} wraps the value @{text a} as finished
+  \<^descr> @{ML Future.value}~@{text a} wraps the value @{text a} as finished
   future value, bypassing the worker-thread farm. When joined, it returns
   @{text a} without any further evaluation.
 
   There is very low overhead for this proforma wrapping of strict values as
   futures.
 
-  \item @{ML Future.map}~@{text "f x"} is a fast-path implementation of @{ML
+  \<^descr> @{ML Future.map}~@{text "f x"} is a fast-path implementation of @{ML
   Future.fork}~@{text "(fn () => f ("}@{ML Future.join}~@{text "x))"}, which
   avoids the full overhead of the task queue and worker-thread farm as far as
   possible. The function @{text f} is supposed to be some trivial
   post-processing or projection of the future result.
 
-  \item @{ML Future.cancel}~@{text "x"} cancels the task group of the given
+  \<^descr> @{ML Future.cancel}~@{text "x"} cancels the task group of the given
   future, using @{ML Future.cancel_group} below.
 
-  \item @{ML Future.cancel_group}~@{text "group"} cancels all tasks of the
+  \<^descr> @{ML Future.cancel_group}~@{text "group"} cancels all tasks of the
   given task group for all time. Threads that are presently processing a task
   of the given group are interrupted: it may take some time until they are
   actually terminated. Tasks that are queued but not yet processed are
@@ -2329,11 +2329,11 @@
   invalidated, any further attempt to fork a future that belongs to it will
   yield a canceled result as well.
 
-  \item @{ML Future.promise}~@{text abort} registers a passive future with the
+  \<^descr> @{ML Future.promise}~@{text abort} registers a passive future with the
   given @{text abort} operation: it is invoked when the future task group is
   canceled.
 
-  \item @{ML Future.fulfill}~@{text "x a"} finishes the passive future @{text
+  \<^descr> @{ML Future.fulfill}~@{text "x a"} finishes the passive future @{text
   x} by the given value @{text a}. If the promise has already been canceled,
   the attempt to fulfill it causes an exception.
 
--- a/src/Doc/Implementation/Prelim.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Implementation/Prelim.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -131,25 +131,25 @@
 
   \begin{description}
 
-  \item Type @{ML_type theory} represents theory contexts.
+  \<^descr> Type @{ML_type theory} represents theory contexts.
 
-  \item @{ML "Context.eq_thy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} check strict
+  \<^descr> @{ML "Context.eq_thy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} check strict
   identity of two theories.
 
-  \item @{ML "Context.subthy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} compares theories
+  \<^descr> @{ML "Context.subthy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} compares theories
   according to the intrinsic graph structure of the construction.
   This sub-theory relation is a nominal approximation of inclusion
   (@{text "\<subseteq>"}) of the corresponding content (according to the
   semantics of the ML modules that implement the data).
 
-  \item @{ML "Theory.begin_theory"}~@{text "name parents"} constructs
+  \<^descr> @{ML "Theory.begin_theory"}~@{text "name parents"} constructs
   a new theory based on the given parents.  This ML function is
   normally not invoked directly.
 
-  \item @{ML "Theory.parents_of"}~@{text "thy"} returns the direct
+  \<^descr> @{ML "Theory.parents_of"}~@{text "thy"} returns the direct
   ancestors of @{text thy}.
 
-  \item @{ML "Theory.ancestors_of"}~@{text "thy"} returns all
+  \<^descr> @{ML "Theory.ancestors_of"}~@{text "thy"} returns all
   ancestors of @{text thy} (not including @{text thy} itself).
 
   \end{description}
@@ -169,14 +169,14 @@
 
   \begin{description}
 
-  \item @{text "@{theory}"} refers to the background theory of the
+  \<^descr> @{text "@{theory}"} refers to the background theory of the
   current context --- as abstract value.
 
-  \item @{text "@{theory A}"} refers to an explicitly named ancestor
+  \<^descr> @{text "@{theory A}"} refers to an explicitly named ancestor
   theory @{text "A"} of the background theory of the current context
   --- as abstract value.
 
-  \item @{text "@{theory_context A}"} is similar to @{text "@{theory
+  \<^descr> @{text "@{theory_context A}"} is similar to @{text "@{theory
   A}"}, but presents the result as initial @{ML_type Proof.context}
   (see also @{ML Proof_Context.init_global}).
 
@@ -222,15 +222,15 @@
 
   \begin{description}
 
-  \item Type @{ML_type Proof.context} represents proof contexts.
+  \<^descr> Type @{ML_type Proof.context} represents proof contexts.
 
-  \item @{ML Proof_Context.init_global}~@{text "thy"} produces a proof
+  \<^descr> @{ML Proof_Context.init_global}~@{text "thy"} produces a proof
   context derived from @{text "thy"}, initializing all data.
 
-  \item @{ML Proof_Context.theory_of}~@{text "ctxt"} selects the
+  \<^descr> @{ML Proof_Context.theory_of}~@{text "ctxt"} selects the
   background theory from @{text "ctxt"}.
 
-  \item @{ML Proof_Context.transfer}~@{text "thy ctxt"} promotes the
+  \<^descr> @{ML Proof_Context.transfer}~@{text "thy ctxt"} promotes the
   background theory of @{text "ctxt"} to the super theory @{text
   "thy"}.
 
@@ -244,7 +244,7 @@
 
   \begin{description}
 
-  \item @{text "@{context}"} refers to \emph{the} context at
+  \<^descr> @{text "@{context}"} refers to \emph{the} context at
   compile-time --- as abstract value.  Independently of (local) theory
   or proof mode, this always produces a meaningful result.
 
@@ -281,15 +281,15 @@
 
   \begin{description}
 
-  \item Type @{ML_type Context.generic} is the direct sum of @{ML_type
+  \<^descr> Type @{ML_type Context.generic} is the direct sum of @{ML_type
   "theory"} and @{ML_type "Proof.context"}, with the datatype
   constructors @{ML "Context.Theory"} and @{ML "Context.Proof"}.
 
-  \item @{ML Context.theory_of}~@{text "context"} always produces a
+  \<^descr> @{ML Context.theory_of}~@{text "context"} always produces a
   theory from the generic @{text "context"}, using @{ML
   "Proof_Context.theory_of"} as required.
 
-  \item @{ML Context.proof_of}~@{text "context"} always produces a
+  \<^descr> @{ML Context.proof_of}~@{text "context"} always produces a
   proof context from the generic @{text "context"}, using @{ML
   "Proof_Context.init_global"} as required (note that this re-initializes the
   context data with each invocation).
@@ -385,15 +385,15 @@
 
   \begin{description}
 
-  \item @{ML_functor Theory_Data}@{text "(spec)"} declares data for
+  \<^descr> @{ML_functor Theory_Data}@{text "(spec)"} declares data for
   type @{ML_type theory} according to the specification provided as
   argument structure.  The resulting structure provides data init and
   access operations as described above.
 
-  \item @{ML_functor Proof_Data}@{text "(spec)"} is analogous to
+  \<^descr> @{ML_functor Proof_Data}@{text "(spec)"} is analogous to
   @{ML_functor Theory_Data} for type @{ML_type Proof.context}.
 
-  \item @{ML_functor Generic_Data}@{text "(spec)"} is analogous to
+  \<^descr> @{ML_functor Generic_Data}@{text "(spec)"} is analogous to
   @{ML_functor Theory_Data} for type @{ML_type Context.generic}.
 
   \end{description}
@@ -546,13 +546,13 @@
 
   \begin{description}
 
-  \item @{ML Config.get}~@{text "ctxt config"} gets the value of
+  \<^descr> @{ML Config.get}~@{text "ctxt config"} gets the value of
   @{text "config"} in the given context.
 
-  \item @{ML Config.map}~@{text "config f ctxt"} updates the context
+  \<^descr> @{ML Config.map}~@{text "config f ctxt"} updates the context
   by updating the value of @{text "config"}.
 
-  \item @{text "config ="}~@{ML Attrib.setup_config_bool}~@{text "name
+  \<^descr> @{text "config ="}~@{ML Attrib.setup_config_bool}~@{text "name
   default"} creates a named configuration option of type @{ML_type
   bool}, with the given @{text "default"} depending on the application
   context.  The resulting @{text "config"} can be used to get/map its
@@ -560,7 +560,7 @@
   background theory that registers the option as attribute with some
   concrete syntax.
 
-  \item @{ML Attrib.config_int}, @{ML Attrib.config_real}, and @{ML
+  \<^descr> @{ML Attrib.config_int}, @{ML Attrib.config_real}, and @{ML
   Attrib.config_string} work like @{ML Attrib.config_bool}, but for
   types @{ML_type int} and @{ML_type string}, respectively.
 
@@ -692,25 +692,25 @@
 
   \begin{description}
 
-  \item @{ML Name.internal}~@{text "name"} produces an internal name
+  \<^descr> @{ML Name.internal}~@{text "name"} produces an internal name
   by adding one underscore.
 
-  \item @{ML Name.skolem}~@{text "name"} produces a Skolem name by
+  \<^descr> @{ML Name.skolem}~@{text "name"} produces a Skolem name by
   adding two underscores.
 
-  \item Type @{ML_type Name.context} represents the context of already
+  \<^descr> Type @{ML_type Name.context} represents the context of already
   used names; the initial value is @{ML "Name.context"}.
 
-  \item @{ML Name.declare}~@{text "name"} enters a used name into the
+  \<^descr> @{ML Name.declare}~@{text "name"} enters a used name into the
   context.
 
-  \item @{ML Name.invent}~@{text "context name n"} produces @{text
+  \<^descr> @{ML Name.invent}~@{text "context name n"} produces @{text
   "n"} fresh names derived from @{text "name"}.
 
-  \item @{ML Name.variant}~@{text "name context"} produces a fresh
+  \<^descr> @{ML Name.variant}~@{text "name context"} produces a fresh
   variant of @{text "name"}; the result is declared to the context.
 
-  \item @{ML Variable.names_of}~@{text "ctxt"} retrieves the context
+  \<^descr> @{ML Variable.names_of}~@{text "ctxt"} retrieves the context
   of declared type and term variable names.  Projecting a proof
   context down to a primitive name context is occasionally useful when
   invoking lower-level operations.  Regular management of ``fresh
@@ -800,7 +800,7 @@
 
   \begin{description}
 
-  \item Type @{ML_type indexname} represents indexed names.  This is
+  \<^descr> Type @{ML_type indexname} represents indexed names.  This is
   an abbreviation for @{ML_type "string * int"}.  The second component
   is usually non-negative, except for situations where @{text "(x,
   -1)"} is used to inject basic names into this type.  Other negative
@@ -845,16 +845,16 @@
 
   \begin{description}
 
-  \item @{ML Long_Name.base_name}~@{text "name"} returns the base name
+  \<^descr> @{ML Long_Name.base_name}~@{text "name"} returns the base name
   of a long name.
 
-  \item @{ML Long_Name.qualifier}~@{text "name"} returns the qualifier
+  \<^descr> @{ML Long_Name.qualifier}~@{text "name"} returns the qualifier
   of a long name.
 
-  \item @{ML Long_Name.append}~@{text "name\<^sub>1 name\<^sub>2"} appends two long
+  \<^descr> @{ML Long_Name.append}~@{text "name\<^sub>1 name\<^sub>2"} appends two long
   names.
 
-  \item @{ML Long_Name.implode}~@{text "names"} and @{ML
+  \<^descr> @{ML Long_Name.implode}~@{text "names"} and @{ML
   Long_Name.explode}~@{text "name"} convert between the packed string
   representation and the explicit list form of long names.
 
@@ -949,66 +949,66 @@
 
   \begin{description}
 
-  \item Type @{ML_type binding} represents the abstract concept of
+  \<^descr> Type @{ML_type binding} represents the abstract concept of
   name bindings.
 
-  \item @{ML Binding.empty} is the empty binding.
+  \<^descr> @{ML Binding.empty} is the empty binding.
 
-  \item @{ML Binding.name}~@{text "name"} produces a binding with base
+  \<^descr> @{ML Binding.name}~@{text "name"} produces a binding with base
   name @{text "name"}.  Note that this lacks proper source position
   information; see also the ML antiquotation @{ML_antiquotation
   binding}.
 
-  \item @{ML Binding.qualify}~@{text "mandatory name binding"}
+  \<^descr> @{ML Binding.qualify}~@{text "mandatory name binding"}
   prefixes qualifier @{text "name"} to @{text "binding"}.  The @{text
   "mandatory"} flag tells if this name component always needs to be
   given in name space accesses --- this is mostly @{text "false"} in
   practice.  Note that this part of qualification is typically used in
   derived specification mechanisms.
 
-  \item @{ML Binding.prefix} is similar to @{ML Binding.qualify}, but
+  \<^descr> @{ML Binding.prefix} is similar to @{ML Binding.qualify}, but
   affects the system prefix.  This part of extra qualification is
   typically used in the infrastructure for modular specifications,
   notably ``local theory targets'' (see also \chref{ch:local-theory}).
 
-  \item @{ML Binding.concealed}~@{text "binding"} indicates that the
+  \<^descr> @{ML Binding.concealed}~@{text "binding"} indicates that the
   binding shall refer to an entity that serves foundational purposes
   only.  This flag helps to mark implementation details of
   specification mechanism etc.  Other tools should not depend on the
   particulars of concealed entities (cf.\ @{ML
   Name_Space.is_concealed}).
 
-  \item @{ML Binding.print}~@{text "binding"} produces a string
+  \<^descr> @{ML Binding.print}~@{text "binding"} produces a string
   representation for human-readable output, together with some formal
   markup that might get used in GUI front-ends, for example.
 
-  \item Type @{ML_type Name_Space.naming} represents the abstract
+  \<^descr> Type @{ML_type Name_Space.naming} represents the abstract
   concept of a naming policy.
 
-  \item @{ML Name_Space.global_naming} is the default naming policy: it is
+  \<^descr> @{ML Name_Space.global_naming} is the default naming policy: it is
   global and lacks any path prefix.  In a regular theory context this is
   augmented by a path prefix consisting of the theory name.
 
-  \item @{ML Name_Space.add_path}~@{text "path naming"} augments the
+  \<^descr> @{ML Name_Space.add_path}~@{text "path naming"} augments the
   naming policy by extending its path component.
 
-  \item @{ML Name_Space.full_name}~@{text "naming binding"} turns a
+  \<^descr> @{ML Name_Space.full_name}~@{text "naming binding"} turns a
   name binding (usually a basic name) into the fully qualified
   internal name, according to the given naming policy.
 
-  \item Type @{ML_type Name_Space.T} represents name spaces.
+  \<^descr> Type @{ML_type Name_Space.T} represents name spaces.
 
-  \item @{ML Name_Space.empty}~@{text "kind"} and @{ML Name_Space.merge}~@{text
+  \<^descr> @{ML Name_Space.empty}~@{text "kind"} and @{ML Name_Space.merge}~@{text
   "(space\<^sub>1, space\<^sub>2)"} are the canonical operations for
   maintaining name spaces according to theory data management
   (\secref{sec:context-data}); @{text "kind"} is a formal comment
   to characterize the purpose of a name space.
 
-  \item @{ML Name_Space.declare}~@{text "context strict binding
+  \<^descr> @{ML Name_Space.declare}~@{text "context strict binding
   space"} enters a name binding as fully qualified internal name into
   the name space, using the naming of the context.
 
-  \item @{ML Name_Space.intern}~@{text "space name"} internalizes a
+  \<^descr> @{ML Name_Space.intern}~@{text "space name"} internalizes a
   (partially qualified) external name.
 
   This operation is mostly for parsing!  Note that fully qualified
@@ -1017,13 +1017,13 @@
   (or their derivatives for @{ML_type theory} and
   @{ML_type Proof.context}).
 
-  \item @{ML Name_Space.extern}~@{text "ctxt space name"} externalizes a
+  \<^descr> @{ML Name_Space.extern}~@{text "ctxt space name"} externalizes a
   (fully qualified) internal name.
 
   This operation is mostly for printing!  User code should not rely on
   the precise result too much.
 
-  \item @{ML Name_Space.is_concealed}~@{text "space name"} indicates
+  \<^descr> @{ML Name_Space.is_concealed}~@{text "space name"} indicates
   whether @{text "name"} refers to a strictly private entity that
   other tools are supposed to ignore!
 
@@ -1041,7 +1041,7 @@
 
   \begin{description}
 
-  \item @{text "@{binding name}"} produces a binding with base name
+  \<^descr> @{text "@{binding name}"} produces a binding with base name
   @{text "name"} and the source position taken from the concrete
   syntax of this antiquotation.  In many situations this is more
   appropriate than the more basic @{ML Binding.name} function.
--- a/src/Doc/Implementation/Proof.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Implementation/Proof.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -116,45 +116,45 @@
 
   \begin{description}
 
-  \item @{ML Variable.add_fixes}~@{text "xs ctxt"} fixes term
+  \<^descr> @{ML Variable.add_fixes}~@{text "xs ctxt"} fixes term
   variables @{text "xs"}, returning the resulting internal names.  By
   default, the internal representation coincides with the external
   one, which also means that the given variables must not be fixed
   already.  There is a different policy within a local proof body: the
   given names are just hints for newly invented Skolem variables.
 
-  \item @{ML Variable.variant_fixes} is similar to @{ML
+  \<^descr> @{ML Variable.variant_fixes} is similar to @{ML
   Variable.add_fixes}, but always produces fresh variants of the given
   names.
 
-  \item @{ML Variable.declare_term}~@{text "t ctxt"} declares term
+  \<^descr> @{ML Variable.declare_term}~@{text "t ctxt"} declares term
   @{text "t"} to belong to the context.  This automatically fixes new
   type variables, but not term variables.  Syntactic constraints for
   type and term variables are declared uniformly, though.
 
-  \item @{ML Variable.declare_constraints}~@{text "t ctxt"} declares
+  \<^descr> @{ML Variable.declare_constraints}~@{text "t ctxt"} declares
   syntactic constraints from term @{text "t"}, without making it part
   of the context yet.
 
-  \item @{ML Variable.export}~@{text "inner outer thms"} generalizes
+  \<^descr> @{ML Variable.export}~@{text "inner outer thms"} generalizes
   fixed type and term variables in @{text "thms"} according to the
   difference of the @{text "inner"} and @{text "outer"} context,
   following the principles sketched above.
 
-  \item @{ML Variable.polymorphic}~@{text "ctxt ts"} generalizes type
+  \<^descr> @{ML Variable.polymorphic}~@{text "ctxt ts"} generalizes type
   variables in @{text "ts"} as far as possible, even those occurring
   in fixed term variables.  The default policy of type-inference is to
   fix newly introduced type variables, which is essentially reversed
   with @{ML Variable.polymorphic}: here the given terms are detached
   from the context as far as possible.
 
-  \item @{ML Variable.import}~@{text "open thms ctxt"} invents fixed
+  \<^descr> @{ML Variable.import}~@{text "open thms ctxt"} invents fixed
   type and term variables for the schematic ones occurring in @{text
   "thms"}.  The @{text "open"} flag indicates whether the fixed names
   should be accessible to the user, otherwise newly introduced names
   are marked as ``internal'' (\secref{sec:names}).
 
-  \item @{ML Variable.focus}~@{text "bindings B"} decomposes the outermost @{text
+  \<^descr> @{ML Variable.focus}~@{text "bindings B"} decomposes the outermost @{text
   "\<And>"} prefix of proposition @{text "B"}, using the given name bindings.
 
   \end{description}
@@ -293,27 +293,27 @@
 
   \begin{description}
 
-  \item Type @{ML_type Assumption.export} represents arbitrary export
+  \<^descr> Type @{ML_type Assumption.export} represents arbitrary export
   rules, which is any function of type @{ML_type "bool -> cterm list
   -> thm -> thm"}, where the @{ML_type "bool"} indicates goal mode,
   and the @{ML_type "cterm list"} the collection of assumptions to be
   discharged simultaneously.
 
-  \item @{ML Assumption.assume}~@{text "ctxt A"} turns proposition @{text
+  \<^descr> @{ML Assumption.assume}~@{text "ctxt A"} turns proposition @{text
   "A"} into a primitive assumption @{text "A \<turnstile> A'"}, where the
   conclusion @{text "A'"} is in HHF normal form.
 
-  \item @{ML Assumption.add_assms}~@{text "r As"} augments the context
+  \<^descr> @{ML Assumption.add_assms}~@{text "r As"} augments the context
   by assumptions @{text "As"} with export rule @{text "r"}.  The
   resulting facts are hypothetical theorems as produced by the raw
   @{ML Assumption.assume}.
 
-  \item @{ML Assumption.add_assumes}~@{text "As"} is a special case of
+  \<^descr> @{ML Assumption.add_assumes}~@{text "As"} is a special case of
   @{ML Assumption.add_assms} where the export rule performs @{text
   "\<Longrightarrow>\<hyphen>intro"} or @{text "#\<Longrightarrow>\<hyphen>intro"}, depending on goal
   mode.
 
-  \item @{ML Assumption.export}~@{text "is_goal inner outer thm"}
+  \<^descr> @{ML Assumption.export}~@{text "is_goal inner outer thm"}
   exports result @{text "thm"} from the the @{text "inner"} context
   back into the @{text "outer"} one; @{text "is_goal = true"} means
   this is a goal context.  The result is in HHF normal form.  Note
@@ -421,31 +421,31 @@
 
   \begin{description}
 
-  \item @{ML SUBPROOF}~@{text "tac ctxt i"} decomposes the structure
+  \<^descr> @{ML SUBPROOF}~@{text "tac ctxt i"} decomposes the structure
   of the specified sub-goal, producing an extended context and a
   reduced goal, which needs to be solved by the given tactic.  All
   schematic parameters of the goal are imported into the context as
   fixed ones, which may not be instantiated in the sub-proof.
 
-  \item @{ML Subgoal.FOCUS}, @{ML Subgoal.FOCUS_PREMS}, and @{ML
+  \<^descr> @{ML Subgoal.FOCUS}, @{ML Subgoal.FOCUS_PREMS}, and @{ML
   Subgoal.FOCUS_PARAMS} are similar to @{ML SUBPROOF}, but are
   slightly more flexible: only the specified parts of the subgoal are
   imported into the context, and the body tactic may introduce new
   subgoals and schematic variables.
 
-  \item @{ML Subgoal.focus}, @{ML Subgoal.focus_prems}, @{ML
+  \<^descr> @{ML Subgoal.focus}, @{ML Subgoal.focus_prems}, @{ML
   Subgoal.focus_params} extract the focus information from a goal
   state in the same way as the corresponding tacticals above.  This is
   occasionally useful to experiment without writing actual tactics
   yet.
 
-  \item @{ML Goal.prove}~@{text "ctxt xs As C tac"} states goal @{text
+  \<^descr> @{ML Goal.prove}~@{text "ctxt xs As C tac"} states goal @{text
   "C"} in the context augmented by fixed variables @{text "xs"} and
   assumptions @{text "As"}, and applies tactic @{text "tac"} to solve
   it.  The latter may depend on the local assumptions being presented
   as facts.  The result is in HHF normal form.
 
-  \item @{ML Goal.prove_common}~@{text "ctxt fork_pri"} is the common form
+  \<^descr> @{ML Goal.prove_common}~@{text "ctxt fork_pri"} is the common form
   to state and prove a simultaneous goal statement, where @{ML Goal.prove}
   is a convenient shorthand that is most frequently used in applications.
 
@@ -462,7 +462,7 @@
   transaction. Thus the system is able to expose error messages ultimately
   to the end-user, even though the subsequent ML code misses them.
 
-  \item @{ML Obtain.result}~@{text "tac thms ctxt"} eliminates the
+  \<^descr> @{ML Obtain.result}~@{text "tac thms ctxt"} eliminates the
   given facts using a tactic, which results in additional fixed
   variables and assumptions in the context.  Final results need to be
   exported explicitly.
--- a/src/Doc/Implementation/Syntax.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Implementation/Syntax.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -90,10 +90,10 @@
 
   \begin{description}
 
-  \item @{ML Syntax.read_typs}~@{text "ctxt strs"} parses and checks a
+  \<^descr> @{ML Syntax.read_typs}~@{text "ctxt strs"} parses and checks a
   simultaneous list of source strings as types of the logic.
 
-  \item @{ML Syntax.read_terms}~@{text "ctxt strs"} parses and checks a
+  \<^descr> @{ML Syntax.read_terms}~@{text "ctxt strs"} parses and checks a
   simultaneous list of source strings as terms of the logic.
   Type-reconstruction puts all parsed terms into the same scope: types of
   free variables ultimately need to coincide.
@@ -103,26 +103,26 @@
   is possible to use @{ML Type.constraint} on the intermediate pre-terms
   (\secref{sec:term-check}).
 
-  \item @{ML Syntax.read_props}~@{text "ctxt strs"} parses and checks a
+  \<^descr> @{ML Syntax.read_props}~@{text "ctxt strs"} parses and checks a
   simultaneous list of source strings as terms of the logic, with an implicit
   type-constraint for each argument to enforce type @{typ prop}; this also
   affects the inner syntax for parsing. The remaining type-reconstruction
   works as for @{ML Syntax.read_terms}.
 
-  \item @{ML Syntax.read_typ}, @{ML Syntax.read_term}, @{ML Syntax.read_prop}
+  \<^descr> @{ML Syntax.read_typ}, @{ML Syntax.read_term}, @{ML Syntax.read_prop}
   are like the simultaneous versions, but operate on a single argument only.
   This convenient shorthand is adequate in situations where a single item in
   its own scope is processed. Do not use @{ML "map o Syntax.read_term"} where
   @{ML Syntax.read_terms} is actually intended!
 
-  \item @{ML Syntax.pretty_typ}~@{text "ctxt T"} and @{ML
+  \<^descr> @{ML Syntax.pretty_typ}~@{text "ctxt T"} and @{ML
   Syntax.pretty_term}~@{text "ctxt t"} uncheck and pretty-print the given type
   or term, respectively. Although the uncheck phase acts on a simultaneous
   list as well, this is rarely used in practice, so only the singleton case is
   provided as combined pretty operation. There is no distinction of term vs.\
   proposition.
 
-  \item @{ML Syntax.string_of_typ} and @{ML Syntax.string_of_term} are
+  \<^descr> @{ML Syntax.string_of_typ} and @{ML Syntax.string_of_term} are
   convenient compositions of @{ML Syntax.pretty_typ} and @{ML
   Syntax.pretty_term} with @{ML Pretty.string_of} for output. The result may
   be concatenated with other strings, as long as there is no further
@@ -181,22 +181,22 @@
 
   \begin{description}
 
-  \item @{ML Syntax.parse_typ}~@{text "ctxt str"} parses a source string as
+  \<^descr> @{ML Syntax.parse_typ}~@{text "ctxt str"} parses a source string as
   pre-type that is ready to be used with subsequent check operations.
 
-  \item @{ML Syntax.parse_term}~@{text "ctxt str"} parses a source string as
+  \<^descr> @{ML Syntax.parse_term}~@{text "ctxt str"} parses a source string as
   pre-term that is ready to be used with subsequent check operations.
 
-  \item @{ML Syntax.parse_prop}~@{text "ctxt str"} parses a source string as
+  \<^descr> @{ML Syntax.parse_prop}~@{text "ctxt str"} parses a source string as
   pre-term that is ready to be used with subsequent check operations. The
   inner syntax category is @{typ prop} and a suitable type-constraint is
   included to ensure that this information is observed in subsequent type
   reconstruction.
 
-  \item @{ML Syntax.unparse_typ}~@{text "ctxt T"} unparses a type after
+  \<^descr> @{ML Syntax.unparse_typ}~@{text "ctxt T"} unparses a type after
   uncheck operations, to turn it into a pretty tree.
 
-  \item @{ML Syntax.unparse_term}~@{text "ctxt T"} unparses a term after
+  \<^descr> @{ML Syntax.unparse_term}~@{text "ctxt T"} unparses a term after
   uncheck operations, to turn it into a pretty tree. There is no distinction
   for propositions here.
 
@@ -249,11 +249,11 @@
 
   \begin{description}
 
-  \item @{ML Syntax.check_typs}~@{text "ctxt Ts"} checks a simultaneous list
+  \<^descr> @{ML Syntax.check_typs}~@{text "ctxt Ts"} checks a simultaneous list
   of pre-types as types of the logic.  Typically, this involves normalization
   of type synonyms.
 
-  \item @{ML Syntax.check_terms}~@{text "ctxt ts"} checks a simultaneous list
+  \<^descr> @{ML Syntax.check_terms}~@{text "ctxt ts"} checks a simultaneous list
   of pre-terms as terms of the logic. Typically, this involves type-inference
   and normalization term abbreviations. The types within the given terms are
   treated in the same way as for @{ML Syntax.check_typs}.
@@ -264,15 +264,15 @@
   is checked; afterwards the type arguments are recovered with @{ML
   Logic.dest_type}.
 
-  \item @{ML Syntax.check_props}~@{text "ctxt ts"} checks a simultaneous list
+  \<^descr> @{ML Syntax.check_props}~@{text "ctxt ts"} checks a simultaneous list
   of pre-terms as terms of the logic, such that all terms are constrained by
   type @{typ prop}. The remaining check operation works as @{ML
   Syntax.check_terms} above.
 
-  \item @{ML Syntax.uncheck_typs}~@{text "ctxt Ts"} unchecks a simultaneous
+  \<^descr> @{ML Syntax.uncheck_typs}~@{text "ctxt Ts"} unchecks a simultaneous
   list of types of the logic, in preparation of pretty printing.
 
-  \item @{ML Syntax.uncheck_terms}~@{text "ctxt ts"} unchecks a simultaneous
+  \<^descr> @{ML Syntax.uncheck_terms}~@{text "ctxt ts"} unchecks a simultaneous
   list of terms of the logic, in preparation of pretty printing. There is no
   distinction for propositions here.
 
--- a/src/Doc/Implementation/Tactic.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Implementation/Tactic.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -72,19 +72,19 @@
 
   \begin{description}
 
-  \item @{ML "Goal.init"}~@{text C} initializes a tactical goal from
+  \<^descr> @{ML "Goal.init"}~@{text C} initializes a tactical goal from
   the well-formed proposition @{text C}.
 
-  \item @{ML "Goal.finish"}~@{text "ctxt thm"} checks whether theorem
+  \<^descr> @{ML "Goal.finish"}~@{text "ctxt thm"} checks whether theorem
   @{text "thm"} is a solved goal (no subgoals), and concludes the
   result by removing the goal protection.  The context is only
   required for printing error messages.
 
-  \item @{ML "Goal.protect"}~@{text "n thm"} protects the statement
+  \<^descr> @{ML "Goal.protect"}~@{text "n thm"} protects the statement
   of theorem @{text "thm"}.  The parameter @{text n} indicates the
   number of premises to be retained.
 
-  \item @{ML "Goal.conclude"}~@{text "thm"} removes the goal
+  \<^descr> @{ML "Goal.conclude"}~@{text "thm"} removes the goal
   protection, even if there are pending subgoals.
 
   \end{description}
@@ -189,48 +189,48 @@
 
   \begin{description}
 
-  \item Type @{ML_type tactic} represents tactics.  The
+  \<^descr> Type @{ML_type tactic} represents tactics.  The
   well-formedness conditions described above need to be observed.  See
   also @{file "~~/src/Pure/General/seq.ML"} for the underlying
   implementation of lazy sequences.
 
-  \item Type @{ML_type "int -> tactic"} represents tactics with
+  \<^descr> Type @{ML_type "int -> tactic"} represents tactics with
   explicit subgoal addressing, with well-formedness conditions as
   described above.
 
-  \item @{ML no_tac} is a tactic that always fails, returning the
+  \<^descr> @{ML no_tac} is a tactic that always fails, returning the
   empty sequence.
 
-  \item @{ML all_tac} is a tactic that always succeeds, returning a
+  \<^descr> @{ML all_tac} is a tactic that always succeeds, returning a
   singleton sequence with unchanged goal state.
 
-  \item @{ML print_tac}~@{text "ctxt message"} is like @{ML all_tac}, but
+  \<^descr> @{ML print_tac}~@{text "ctxt message"} is like @{ML all_tac}, but
   prints a message together with the goal state on the tracing
   channel.
 
-  \item @{ML PRIMITIVE}~@{text rule} turns a primitive inference rule
+  \<^descr> @{ML PRIMITIVE}~@{text rule} turns a primitive inference rule
   into a tactic with unique result.  Exception @{ML THM} is considered
   a regular tactic failure and produces an empty result; other
   exceptions are passed through.
 
-  \item @{ML SUBGOAL}~@{text "(fn (subgoal, i) => tactic)"} is the
+  \<^descr> @{ML SUBGOAL}~@{text "(fn (subgoal, i) => tactic)"} is the
   most basic form to produce a tactic with subgoal addressing.  The
   given abstraction over the subgoal term and subgoal number allows to
   peek at the relevant information of the full goal state.  The
   subgoal range is checked as required above.
 
-  \item @{ML CSUBGOAL} is similar to @{ML SUBGOAL}, but passes the
+  \<^descr> @{ML CSUBGOAL} is similar to @{ML SUBGOAL}, but passes the
   subgoal as @{ML_type cterm} instead of raw @{ML_type term}.  This
   avoids expensive re-certification in situations where the subgoal is
   used directly for primitive inferences.
 
-  \item @{ML SELECT_GOAL}~@{text "tac i"} confines a tactic to the
+  \<^descr> @{ML SELECT_GOAL}~@{text "tac i"} confines a tactic to the
   specified subgoal @{text "i"}.  This rearranges subgoals and the
   main goal protection (\secref{sec:tactical-goals}), while retaining
   the syntactic context of the overall goal state (concerning
   schematic variables etc.).
 
-  \item @{ML PREFER_GOAL}~@{text "tac i"} rearranges subgoals to put
+  \<^descr> @{ML PREFER_GOAL}~@{text "tac i"} rearranges subgoals to put
   @{text "i"} in front.  This is similar to @{ML SELECT_GOAL}, but
   without changing the main goal protection.
 
@@ -297,29 +297,29 @@
 
   \begin{description}
 
-  \item @{ML resolve_tac}~@{text "ctxt thms i"} refines the goal state
+  \<^descr> @{ML resolve_tac}~@{text "ctxt thms i"} refines the goal state
   using the given theorems, which should normally be introduction
   rules.  The tactic resolves a rule's conclusion with subgoal @{text
   i}, replacing it by the corresponding versions of the rule's
   premises.
 
-  \item @{ML eresolve_tac}~@{text "ctxt thms i"} performs elim-resolution
+  \<^descr> @{ML eresolve_tac}~@{text "ctxt thms i"} performs elim-resolution
   with the given theorems, which are normally be elimination rules.
 
   Note that @{ML_text "eresolve_tac ctxt [asm_rl]"} is equivalent to @{ML_text
   "assume_tac ctxt"}, which facilitates mixing of assumption steps with
   genuine eliminations.
 
-  \item @{ML dresolve_tac}~@{text "ctxt thms i"} performs
+  \<^descr> @{ML dresolve_tac}~@{text "ctxt thms i"} performs
   destruct-resolution with the given theorems, which should normally
   be destruction rules.  This replaces an assumption by the result of
   applying one of the rules.
 
-  \item @{ML forward_tac} is like @{ML dresolve_tac} except that the
+  \<^descr> @{ML forward_tac} is like @{ML dresolve_tac} except that the
   selected assumption is not deleted.  It applies a rule to an
   assumption, adding the result as a new assumption.
 
-  \item @{ML biresolve_tac}~@{text "ctxt brls i"} refines the proof state
+  \<^descr> @{ML biresolve_tac}~@{text "ctxt brls i"} refines the proof state
   by resolution or elim-resolution on each rule, as indicated by its
   flag.  It affects subgoal @{text "i"} of the proof state.
 
@@ -329,16 +329,16 @@
   elimination rules, which is useful to organize the search process
   systematically in proof tools.
 
-  \item @{ML assume_tac}~@{text "ctxt i"} attempts to solve subgoal @{text i}
+  \<^descr> @{ML assume_tac}~@{text "ctxt i"} attempts to solve subgoal @{text i}
   by assumption (modulo higher-order unification).
 
-  \item @{ML eq_assume_tac} is similar to @{ML assume_tac}, but checks
+  \<^descr> @{ML eq_assume_tac} is similar to @{ML assume_tac}, but checks
   only for immediate @{text "\<alpha>"}-convertibility instead of using
   unification.  It succeeds (with a unique next state) if one of the
   assumptions is equal to the subgoal's conclusion.  Since it does not
   instantiate variables, it cannot make other subgoals unprovable.
 
-  \item @{ML match_tac}, @{ML ematch_tac}, @{ML dmatch_tac}, and @{ML
+  \<^descr> @{ML match_tac}, @{ML ematch_tac}, @{ML dmatch_tac}, and @{ML
   bimatch_tac} are similar to @{ML resolve_tac}, @{ML eresolve_tac},
   @{ML dresolve_tac}, and @{ML biresolve_tac}, respectively, but do
   not instantiate schematic variables in the goal state.%
@@ -421,31 +421,31 @@
 
   \begin{description}
 
-  \item @{ML Rule_Insts.res_inst_tac}~@{text "ctxt insts thm i"} instantiates the
+  \<^descr> @{ML Rule_Insts.res_inst_tac}~@{text "ctxt insts thm i"} instantiates the
   rule @{text thm} with the instantiations @{text insts}, as described
   above, and then performs resolution on subgoal @{text i}.
   
-  \item @{ML Rule_Insts.eres_inst_tac} is like @{ML Rule_Insts.res_inst_tac},
+  \<^descr> @{ML Rule_Insts.eres_inst_tac} is like @{ML Rule_Insts.res_inst_tac},
   but performs elim-resolution.
 
-  \item @{ML Rule_Insts.dres_inst_tac} is like @{ML Rule_Insts.res_inst_tac},
+  \<^descr> @{ML Rule_Insts.dres_inst_tac} is like @{ML Rule_Insts.res_inst_tac},
   but performs destruct-resolution.
 
-  \item @{ML Rule_Insts.forw_inst_tac} is like @{ML Rule_Insts.dres_inst_tac}
+  \<^descr> @{ML Rule_Insts.forw_inst_tac} is like @{ML Rule_Insts.dres_inst_tac}
   except that the selected assumption is not deleted.
 
-  \item @{ML Rule_Insts.subgoal_tac}~@{text "ctxt \<phi> i"} adds the proposition
+  \<^descr> @{ML Rule_Insts.subgoal_tac}~@{text "ctxt \<phi> i"} adds the proposition
   @{text "\<phi>"} as local premise to subgoal @{text "i"}, and poses the
   same as a new subgoal @{text "i + 1"} (in the original context).
 
-  \item @{ML Rule_Insts.thin_tac}~@{text "ctxt \<phi> i"} deletes the specified
+  \<^descr> @{ML Rule_Insts.thin_tac}~@{text "ctxt \<phi> i"} deletes the specified
   premise from subgoal @{text i}.  Note that @{text \<phi>} may contain
   schematic variables, to abbreviate the intended proposition; the
   first matching subgoal premise will be deleted.  Removing useless
   premises from a subgoal increases its readability and can make
   search tactics run faster.
 
-  \item @{ML rename_tac}~@{text "names i"} renames the innermost
+  \<^descr> @{ML rename_tac}~@{text "names i"} renames the innermost
   parameters of subgoal @{text i} according to the provided @{text
   names} (which need to be distinct identifiers).
 
@@ -475,14 +475,14 @@
 
   \begin{description}
 
-  \item @{ML rotate_tac}~@{text "n i"} rotates the premises of subgoal
+  \<^descr> @{ML rotate_tac}~@{text "n i"} rotates the premises of subgoal
   @{text i} by @{text n} positions: from right to left if @{text n} is
   positive, and from left to right if @{text n} is negative.
 
-  \item @{ML distinct_subgoals_tac} removes duplicate subgoals from a
+  \<^descr> @{ML distinct_subgoals_tac} removes duplicate subgoals from a
   proof state.  This is potentially inefficient.
 
-  \item @{ML flexflex_tac} removes all flex-flex pairs from the proof
+  \<^descr> @{ML flexflex_tac} removes all flex-flex pairs from the proof
   state by applying the trivial unifier.  This drastic step loses
   information.  It is already part of the Isar infrastructure for
   facts resulting from goals, and rarely needs to be invoked manually.
@@ -515,7 +515,7 @@
 
   \begin{description}
 
-  \item @{ML compose_tac}~@{text "ctxt (flag, rule, m) i"} refines subgoal
+  \<^descr> @{ML compose_tac}~@{text "ctxt (flag, rule, m) i"} refines subgoal
   @{text "i"} using @{text "rule"}, without lifting.  The @{text
   "rule"} is taken to have the form @{text "\<psi>\<^sub>1 \<Longrightarrow> \<dots> \<psi>\<^sub>m \<Longrightarrow> \<psi>"}, where
   @{text "\<psi>"} need not be atomic; thus @{text "m"} determines the
@@ -523,7 +523,7 @@
   performs elim-resolution --- it solves the first premise of @{text
   "rule"} by assumption and deletes that assumption.
 
-  \item @{ML Drule.compose}~@{text "(thm\<^sub>1, i, thm\<^sub>2)"} uses @{text "thm\<^sub>1"},
+  \<^descr> @{ML Drule.compose}~@{text "(thm\<^sub>1, i, thm\<^sub>2)"} uses @{text "thm\<^sub>1"},
   regarded as an atomic formula, to solve premise @{text "i"} of
   @{text "thm\<^sub>2"}.  Let @{text "thm\<^sub>1"} and @{text "thm\<^sub>2"} be @{text
   "\<psi>"} and @{text "\<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> \<phi>"}.  The unique @{text "s"} that
@@ -531,7 +531,7 @@
   \<dots> \<phi>\<^sub>i\<^sub>-\<^sub>1 \<Longrightarrow> \<phi>\<^sub>i\<^sub>+\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> \<phi>)s"}.  Multiple results are considered as
   error (exception @{ML THM}).
 
-  \item @{text "thm\<^sub>1 COMP thm\<^sub>2"} is the same as @{text "Drule.compose
+  \<^descr> @{text "thm\<^sub>1 COMP thm\<^sub>2"} is the same as @{text "Drule.compose
   (thm\<^sub>1, 1, thm\<^sub>2)"}.
 
   \end{description}
@@ -583,7 +583,7 @@
 
   \begin{description}
 
-  \item @{text "tac\<^sub>1"}~@{ML_op THEN}~@{text "tac\<^sub>2"} is the sequential
+  \<^descr> @{text "tac\<^sub>1"}~@{ML_op THEN}~@{text "tac\<^sub>2"} is the sequential
   composition of @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"}.  Applied to a goal
   state, it returns all states reachable in two steps by applying
   @{text "tac\<^sub>1"} followed by @{text "tac\<^sub>2"}.  First, it applies @{text
@@ -592,30 +592,30 @@
   concatenates the results to produce again one flat sequence of
   states.
 
-  \item @{text "tac\<^sub>1"}~@{ML_op ORELSE}~@{text "tac\<^sub>2"} makes a choice
+  \<^descr> @{text "tac\<^sub>1"}~@{ML_op ORELSE}~@{text "tac\<^sub>2"} makes a choice
   between @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"}.  Applied to a state, it
   tries @{text "tac\<^sub>1"} and returns the result if non-empty; if @{text
   "tac\<^sub>1"} fails then it uses @{text "tac\<^sub>2"}.  This is a deterministic
   choice: if @{text "tac\<^sub>1"} succeeds then @{text "tac\<^sub>2"} is excluded
   from the result.
 
-  \item @{text "tac\<^sub>1"}~@{ML_op APPEND}~@{text "tac\<^sub>2"} concatenates the
+  \<^descr> @{text "tac\<^sub>1"}~@{ML_op APPEND}~@{text "tac\<^sub>2"} concatenates the
   possible results of @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"}.  Unlike
   @{ML_op "ORELSE"} there is \emph{no commitment} to either tactic, so
   @{ML_op "APPEND"} helps to avoid incompleteness during search, at
   the cost of potential inefficiencies.
 
-  \item @{ML EVERY}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>n]"} abbreviates @{text
+  \<^descr> @{ML EVERY}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>n]"} abbreviates @{text
   "tac\<^sub>1"}~@{ML_op THEN}~@{text "\<dots>"}~@{ML_op THEN}~@{text "tac\<^sub>n"}.
   Note that @{ML "EVERY []"} is the same as @{ML all_tac}: it always
   succeeds.
 
-  \item @{ML FIRST}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>n]"} abbreviates @{text
+  \<^descr> @{ML FIRST}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>n]"} abbreviates @{text
   "tac\<^sub>1"}~@{ML_op ORELSE}~@{text "\<dots>"}~@{ML_op "ORELSE"}~@{text
   "tac\<^sub>n"}.  Note that @{ML "FIRST []"} is the same as @{ML no_tac}: it
   always fails.
 
-  \item @{ML_op "THEN'"} is the lifted version of @{ML_op "THEN"}, for
+  \<^descr> @{ML_op "THEN'"} is the lifted version of @{ML_op "THEN"}, for
   tactics with explicit subgoal addressing.  So @{text
   "(tac\<^sub>1"}~@{ML_op THEN'}~@{text "tac\<^sub>2) i"} is the same as @{text
   "(tac\<^sub>1 i"}~@{ML_op THEN}~@{text "tac\<^sub>2 i)"}.
@@ -643,7 +643,7 @@
 
   \begin{description}
 
-  \item @{ML TRY}~@{text "tac"} applies @{text "tac"} to the goal
+  \<^descr> @{ML TRY}~@{text "tac"} applies @{text "tac"} to the goal
   state and returns the resulting sequence, if non-empty; otherwise it
   returns the original state.  Thus, it applies @{text "tac"} at most
   once.
@@ -652,7 +652,7 @@
   applied via functional composition: @{ML "TRY"}~@{ML_op o}~@{text
   "tac"}.  There is no need for @{verbatim TRY'}.
 
-  \item @{ML REPEAT}~@{text "tac"} applies @{text "tac"} to the goal
+  \<^descr> @{ML REPEAT}~@{text "tac"} applies @{text "tac"} to the goal
   state and, recursively, to each element of the resulting sequence.
   The resulting sequence consists of those states that make @{text
   "tac"} fail.  Thus, it applies @{text "tac"} as many times as
@@ -660,16 +660,16 @@
   invocation of @{text "tac"}.  @{ML REPEAT} is more general than @{ML
   REPEAT_DETERM}, but requires more space.
 
-  \item @{ML REPEAT1}~@{text "tac"} is like @{ML REPEAT}~@{text "tac"}
+  \<^descr> @{ML REPEAT1}~@{text "tac"} is like @{ML REPEAT}~@{text "tac"}
   but it always applies @{text "tac"} at least once, failing if this
   is impossible.
 
-  \item @{ML REPEAT_DETERM}~@{text "tac"} applies @{text "tac"} to the
+  \<^descr> @{ML REPEAT_DETERM}~@{text "tac"} applies @{text "tac"} to the
   goal state and, recursively, to the head of the resulting sequence.
   It returns the first state to make @{text "tac"} fail.  It is
   deterministic, discarding alternative outcomes.
 
-  \item @{ML REPEAT_DETERM_N}~@{text "n tac"} is like @{ML
+  \<^descr> @{ML REPEAT_DETERM_N}~@{text "n tac"} is like @{ML
   REPEAT_DETERM}~@{text "tac"} but the number of repetitions is bound
   by @{text "n"} (where @{ML "~1"} means @{text "\<infinity>"}).
 
@@ -749,28 +749,28 @@
 
   \begin{description}
 
-  \item @{ML ALLGOALS}~@{text "tac"} is equivalent to @{text "tac
+  \<^descr> @{ML ALLGOALS}~@{text "tac"} is equivalent to @{text "tac
   n"}~@{ML_op THEN}~@{text "\<dots>"}~@{ML_op THEN}~@{text "tac 1"}.  It
   applies the @{text tac} to all the subgoals, counting downwards.
 
-  \item @{ML SOMEGOAL}~@{text "tac"} is equivalent to @{text "tac
+  \<^descr> @{ML SOMEGOAL}~@{text "tac"} is equivalent to @{text "tac
   n"}~@{ML_op ORELSE}~@{text "\<dots>"}~@{ML_op ORELSE}~@{text "tac 1"}.  It
   applies @{text "tac"} to one subgoal, counting downwards.
 
-  \item @{ML FIRSTGOAL}~@{text "tac"} is equivalent to @{text "tac
+  \<^descr> @{ML FIRSTGOAL}~@{text "tac"} is equivalent to @{text "tac
   1"}~@{ML_op ORELSE}~@{text "\<dots>"}~@{ML_op ORELSE}~@{text "tac n"}.  It
   applies @{text "tac"} to one subgoal, counting upwards.
 
-  \item @{ML HEADGOAL}~@{text "tac"} is equivalent to @{text "tac 1"}.
+  \<^descr> @{ML HEADGOAL}~@{text "tac"} is equivalent to @{text "tac 1"}.
   It applies @{text "tac"} unconditionally to the first subgoal.
 
-  \item @{ML REPEAT_SOME}~@{text "tac"} applies @{text "tac"} once or
+  \<^descr> @{ML REPEAT_SOME}~@{text "tac"} applies @{text "tac"} once or
   more to a subgoal, counting downwards.
 
-  \item @{ML REPEAT_FIRST}~@{text "tac"} applies @{text "tac"} once or
+  \<^descr> @{ML REPEAT_FIRST}~@{text "tac"} applies @{text "tac"} once or
   more to a subgoal, counting upwards.
 
-  \item @{ML RANGE}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>k] i"} is equivalent to
+  \<^descr> @{ML RANGE}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>k] i"} is equivalent to
   @{text "tac\<^sub>k (i + k - 1)"}~@{ML_op THEN}~@{text "\<dots>"}~@{ML_op
   THEN}~@{text "tac\<^sub>1 i"}.  It applies the given list of tactics to the
   corresponding range of subgoals, counting downwards.
@@ -802,11 +802,11 @@
 
   \begin{description}
 
-  \item @{ML FILTER}~@{text "sat tac"} applies @{text "tac"} to the
+  \<^descr> @{ML FILTER}~@{text "sat tac"} applies @{text "tac"} to the
   goal state and returns a sequence consisting of those result goal
   states that are satisfactory in the sense of @{text "sat"}.
 
-  \item @{ML CHANGED}~@{text "tac"} applies @{text "tac"} to the goal
+  \<^descr> @{ML CHANGED}~@{text "tac"} applies @{text "tac"} to the goal
   state and returns precisely those states that differ from the
   original state (according to @{ML Thm.eq_thm}).  Thus @{ML
   CHANGED}~@{text "tac"} always has some effect on the state.
@@ -826,17 +826,17 @@
 
   \begin{description}
 
-  \item @{ML DEPTH_FIRST}~@{text "sat tac"} returns the goal state if
+  \<^descr> @{ML DEPTH_FIRST}~@{text "sat tac"} returns the goal state if
   @{text "sat"} returns true.  Otherwise it applies @{text "tac"},
   then recursively searches from each element of the resulting
   sequence.  The code uses a stack for efficiency, in effect applying
   @{text "tac"}~@{ML_op THEN}~@{ML DEPTH_FIRST}~@{text "sat tac"} to
   the state.
 
-  \item @{ML DEPTH_SOLVE}@{text "tac"} uses @{ML DEPTH_FIRST} to
+  \<^descr> @{ML DEPTH_SOLVE}@{text "tac"} uses @{ML DEPTH_FIRST} to
   search for states having no subgoals.
 
-  \item @{ML DEPTH_SOLVE_1}~@{text "tac"} uses @{ML DEPTH_FIRST} to
+  \<^descr> @{ML DEPTH_SOLVE_1}~@{text "tac"} uses @{ML DEPTH_FIRST} to
   search for states having fewer subgoals than the given state.  Thus,
   it insists upon solving at least one subgoal.
 
@@ -859,11 +859,11 @@
 
   \begin{description}
 
-  \item @{ML BREADTH_FIRST}~@{text "sat tac"} uses breadth-first
+  \<^descr> @{ML BREADTH_FIRST}~@{text "sat tac"} uses breadth-first
   search to find states for which @{text "sat"} is true.  For most
   applications, it is too slow.
 
-  \item @{ML BEST_FIRST}~@{text "(sat, dist) tac"} does a heuristic
+  \<^descr> @{ML BEST_FIRST}~@{text "(sat, dist) tac"} does a heuristic
   search, using @{text "dist"} to estimate the distance from a
   satisfactory state (in the sense of @{text "sat"}).  It maintains a
   list of states ordered by distance.  It applies @{text "tac"} to the
@@ -875,7 +875,7 @@
   the size of the state.  The smaller the state, the fewer and simpler
   subgoals it has.
 
-  \item @{ML THEN_BEST_FIRST}~@{text "tac\<^sub>0 (sat, dist) tac"} is like
+  \<^descr> @{ML THEN_BEST_FIRST}~@{text "tac\<^sub>0 (sat, dist) tac"} is like
   @{ML BEST_FIRST}, except that the priority queue initially contains
   the result of applying @{text "tac\<^sub>0"} to the goal state.  This
   tactical permits separate tactics for starting the search and
@@ -897,22 +897,22 @@
 
   \begin{description}
 
-  \item @{ML COND}~@{text "sat tac\<^sub>1 tac\<^sub>2"} applies @{text "tac\<^sub>1"} to
+  \<^descr> @{ML COND}~@{text "sat tac\<^sub>1 tac\<^sub>2"} applies @{text "tac\<^sub>1"} to
   the goal state if it satisfies predicate @{text "sat"}, and applies
   @{text "tac\<^sub>2"}.  It is a conditional tactical in that only one of
   @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"} is applied to a goal state.
   However, both @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"} are evaluated
   because ML uses eager evaluation.
 
-  \item @{ML IF_UNSOLVED}~@{text "tac"} applies @{text "tac"} to the
+  \<^descr> @{ML IF_UNSOLVED}~@{text "tac"} applies @{text "tac"} to the
   goal state if it has any subgoals, and simply returns the goal state
   otherwise.  Many common tactics, such as @{ML resolve_tac}, fail if
   applied to a goal state that has no subgoals.
 
-  \item @{ML SOLVE}~@{text "tac"} applies @{text "tac"} to the goal
+  \<^descr> @{ML SOLVE}~@{text "tac"} applies @{text "tac"} to the goal
   state and then fails iff there are subgoals left.
 
-  \item @{ML DETERM}~@{text "tac"} applies @{text "tac"} to the goal
+  \<^descr> @{ML DETERM}~@{text "tac"} applies @{text "tac"} to the goal
   state and returns the head of the resulting sequence.  @{ML DETERM}
   limits the search space by making its argument deterministic.
 
@@ -932,19 +932,19 @@
 
   \begin{description}
 
-  \item @{ML has_fewer_prems}~@{text "n thm"} reports whether @{text
+  \<^descr> @{ML has_fewer_prems}~@{text "n thm"} reports whether @{text
   "thm"} has fewer than @{text "n"} premises.
 
-  \item @{ML Thm.eq_thm}~@{text "(thm\<^sub>1, thm\<^sub>2)"} reports whether @{text
+  \<^descr> @{ML Thm.eq_thm}~@{text "(thm\<^sub>1, thm\<^sub>2)"} reports whether @{text
   "thm\<^sub>1"} and @{text "thm\<^sub>2"} are equal.  Both theorems must have the
   same conclusions, the same set of hypotheses, and the same set of sort
   hypotheses.  Names of bound variables are ignored as usual.
 
-  \item @{ML Thm.eq_thm_prop}~@{text "(thm\<^sub>1, thm\<^sub>2)"} reports whether
+  \<^descr> @{ML Thm.eq_thm_prop}~@{text "(thm\<^sub>1, thm\<^sub>2)"} reports whether
   the propositions of @{text "thm\<^sub>1"} and @{text "thm\<^sub>2"} are equal.
   Names of bound variables are ignored.
 
-  \item @{ML size_of_thm}~@{text "thm"} computes the size of @{text
+  \<^descr> @{ML size_of_thm}~@{text "thm"} computes the size of @{text
   "thm"}, namely the number of variables, constants and abstractions
   in its conclusion.  It may serve as a distance function for
   @{ML BEST_FIRST}.
--- a/src/Doc/Isar_Ref/Document_Preparation.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Isar_Ref/Document_Preparation.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -51,19 +51,19 @@
 
   \begin{description}
 
-  \item @{command chapter}, @{command section}, @{command subsection}, and
+  \<^descr> @{command chapter}, @{command section}, @{command subsection}, and
   @{command subsubsection} mark chapter and section headings within the
   theory source; this works in any context, even before the initial
   @{command theory} command. The corresponding {\LaTeX} macros are
   @{verbatim \<open>\isamarkupchapter\<close>}, @{verbatim \<open>\isamarkupsection\<close>},
   @{verbatim \<open>\isamarkupsubsection\<close>}, @{verbatim \<open>\isamarkupsubsubsection\<close>}.
 
-  \item @{command text} and @{command txt} specify paragraphs of plain text.
+  \<^descr> @{command text} and @{command txt} specify paragraphs of plain text.
   This corresponds to a {\LaTeX} environment @{verbatim
   \<open>\begin{isamarkuptext}\<close>} @{text "\<dots>"} @{verbatim \<open>\end{isamarkuptext}\<close>}
   etc.
 
-  \item @{command text_raw} inserts {\LaTeX} source directly into the
+  \<^descr> @{command text_raw} inserts {\LaTeX} source directly into the
   output, without additional markup. Thus the full range of document
   manipulations becomes available, at the risk of messing up document
   output.
@@ -189,55 +189,55 @@
 
   \begin{description}
 
-  \item @{command "print_antiquotations"} prints all document antiquotations
+  \<^descr> @{command "print_antiquotations"} prints all document antiquotations
   that are defined in the current context; the ``@{text "!"}'' option
   indicates extra verbosity.
 
-  \item @{text "@{theory A}"} prints the name @{text "A"}, which is
+  \<^descr> @{text "@{theory A}"} prints the name @{text "A"}, which is
   guaranteed to refer to a valid ancestor theory in the current
   context.
 
-  \item @{text "@{thm a\<^sub>1 \<dots> a\<^sub>n}"} prints theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}.
+  \<^descr> @{text "@{thm a\<^sub>1 \<dots> a\<^sub>n}"} prints theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}.
   Full fact expressions are allowed here, including attributes
   (\secref{sec:syn-att}).
 
-  \item @{text "@{prop \<phi>}"} prints a well-typed proposition @{text
+  \<^descr> @{text "@{prop \<phi>}"} prints a well-typed proposition @{text
   "\<phi>"}.
 
-  \item @{text "@{lemma \<phi> by m}"} proves a well-typed proposition
+  \<^descr> @{text "@{lemma \<phi> by m}"} proves a well-typed proposition
   @{text "\<phi>"} by method @{text m} and prints the original @{text "\<phi>"}.
 
-  \item @{text "@{term t}"} prints a well-typed term @{text "t"}.
+  \<^descr> @{text "@{term t}"} prints a well-typed term @{text "t"}.
   
-  \item @{text "@{value t}"} evaluates a term @{text "t"} and prints
+  \<^descr> @{text "@{value t}"} evaluates a term @{text "t"} and prints
   its result, see also @{command_ref (HOL) value}.
 
-  \item @{text "@{term_type t}"} prints a well-typed term @{text "t"}
+  \<^descr> @{text "@{term_type t}"} prints a well-typed term @{text "t"}
   annotated with its type.
 
-  \item @{text "@{typeof t}"} prints the type of a well-typed term
+  \<^descr> @{text "@{typeof t}"} prints the type of a well-typed term
   @{text "t"}.
 
-  \item @{text "@{const c}"} prints a logical or syntactic constant
+  \<^descr> @{text "@{const c}"} prints a logical or syntactic constant
   @{text "c"}.
   
-  \item @{text "@{abbrev c x\<^sub>1 \<dots> x\<^sub>n}"} prints a constant abbreviation
+  \<^descr> @{text "@{abbrev c x\<^sub>1 \<dots> x\<^sub>n}"} prints a constant abbreviation
   @{text "c x\<^sub>1 \<dots> x\<^sub>n \<equiv> rhs"} as defined in the current context.
 
-  \item @{text "@{typ \<tau>}"} prints a well-formed type @{text "\<tau>"}.
+  \<^descr> @{text "@{typ \<tau>}"} prints a well-formed type @{text "\<tau>"}.
 
-  \item @{text "@{type \<kappa>}"} prints a (logical or syntactic) type
+  \<^descr> @{text "@{type \<kappa>}"} prints a (logical or syntactic) type
     constructor @{text "\<kappa>"}.
 
-  \item @{text "@{class c}"} prints a class @{text c}.
+  \<^descr> @{text "@{class c}"} prints a class @{text c}.
 
-  \item @{text "@{text s}"} prints uninterpreted source text @{text
+  \<^descr> @{text "@{text s}"} prints uninterpreted source text @{text
   s}.  This is particularly useful to print portions of text according
   to the Isabelle document style, without demanding well-formedness,
   e.g.\ small pieces of terms that should not be parsed or
   type-checked yet.
 
-  \item @{text "@{goals}"} prints the current \emph{dynamic} goal
+  \<^descr> @{text "@{goals}"} prints the current \emph{dynamic} goal
   state.  This is mainly for support of tactic-emulation scripts
   within Isar.  Presentation of goal states does not conform to the
   idea of human-readable proof documents!
@@ -246,38 +246,38 @@
   the reasoning via proper Isar proof commands, instead of peeking at
   the internal machine configuration.
   
-  \item @{text "@{subgoals}"} is similar to @{text "@{goals}"}, but
+  \<^descr> @{text "@{subgoals}"} is similar to @{text "@{goals}"}, but
   does not print the main goal.
   
-  \item @{text "@{prf a\<^sub>1 \<dots> a\<^sub>n}"} prints the (compact) proof terms
+  \<^descr> @{text "@{prf a\<^sub>1 \<dots> a\<^sub>n}"} prints the (compact) proof terms
   corresponding to the theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}. Note that this
   requires proof terms to be switched on for the current logic
   session.
   
-  \item @{text "@{full_prf a\<^sub>1 \<dots> a\<^sub>n}"} is like @{text "@{prf a\<^sub>1 \<dots>
+  \<^descr> @{text "@{full_prf a\<^sub>1 \<dots> a\<^sub>n}"} is like @{text "@{prf a\<^sub>1 \<dots>
   a\<^sub>n}"}, but prints the full proof terms, i.e.\ also displays
   information omitted in the compact proof term, which is denoted by
   ``@{text _}'' placeholders there.
   
-  \item @{text "@{ML s}"}, @{text "@{ML_op s}"}, @{text "@{ML_type
+  \<^descr> @{text "@{ML s}"}, @{text "@{ML_op s}"}, @{text "@{ML_type
   s}"}, @{text "@{ML_structure s}"}, and @{text "@{ML_functor s}"}
   check text @{text s} as ML value, infix operator, type, structure,
   and functor respectively.  The source is printed verbatim.
 
-  \item @{text "@{verbatim s}"} prints uninterpreted source text literally
+  \<^descr> @{text "@{verbatim s}"} prints uninterpreted source text literally
   as ASCII characters, using some type-writer font style.
 
-  \item @{text "@{file path}"} checks that @{text "path"} refers to a
+  \<^descr> @{text "@{file path}"} checks that @{text "path"} refers to a
   file (or directory) and prints it verbatim.
 
-  \item @{text "@{file_unchecked path}"} is like @{text "@{file
+  \<^descr> @{text "@{file_unchecked path}"} is like @{text "@{file
   path}"}, but does not check the existence of the @{text "path"}
   within the file-system.
 
-  \item @{text "@{url name}"} produces markup for the given URL, which
+  \<^descr> @{text "@{url name}"} produces markup for the given URL, which
   results in an active hyperlink within the text.
 
-  \item @{text "@{cite name}"} produces a citation @{verbatim
+  \<^descr> @{text "@{cite name}"} produces a citation @{verbatim
   \<open>\cite{name}\<close>} in {\LaTeX}, where the name refers to some Bib{\TeX}
   database entry.
 
@@ -305,17 +305,17 @@
 
   \begin{description}
   
-  \item @{text lhs} extracts the first argument of any application
+  \<^descr> @{text lhs} extracts the first argument of any application
   form with at least two arguments --- typically meta-level or
   object-level equality, or any other binary relation.
   
-  \item @{text rhs} is like @{text lhs}, but extracts the second
+  \<^descr> @{text rhs} is like @{text lhs}, but extracts the second
   argument.
   
-  \item @{text "concl"} extracts the conclusion @{text C} from a rule
+  \<^descr> @{text "concl"} extracts the conclusion @{text C} from a rule
   in Horn-clause normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}.
   
-  \item @{text "prem"} @{text n} extract premise number
+  \<^descr> @{text "prem"} @{text n} extract premise number
   @{text "n"} from from a rule in Horn-clause
   normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}
 
@@ -331,34 +331,34 @@
 
   \begin{description}
 
-  \item @{antiquotation_option_def show_types}~@{text "= bool"} and
+  \<^descr> @{antiquotation_option_def show_types}~@{text "= bool"} and
   @{antiquotation_option_def show_sorts}~@{text "= bool"} control
   printing of explicit type and sort constraints.
 
-  \item @{antiquotation_option_def show_structs}~@{text "= bool"}
+  \<^descr> @{antiquotation_option_def show_structs}~@{text "= bool"}
   controls printing of implicit structures.
 
-  \item @{antiquotation_option_def show_abbrevs}~@{text "= bool"}
+  \<^descr> @{antiquotation_option_def show_abbrevs}~@{text "= bool"}
   controls folding of abbreviations.
 
-  \item @{antiquotation_option_def names_long}~@{text "= bool"} forces
+  \<^descr> @{antiquotation_option_def names_long}~@{text "= bool"} forces
   names of types and constants etc.\ to be printed in their fully
   qualified internal form.
 
-  \item @{antiquotation_option_def names_short}~@{text "= bool"}
+  \<^descr> @{antiquotation_option_def names_short}~@{text "= bool"}
   forces names of types and constants etc.\ to be printed unqualified.
   Note that internalizing the output again in the current context may
   well yield a different result.
 
-  \item @{antiquotation_option_def names_unique}~@{text "= bool"}
+  \<^descr> @{antiquotation_option_def names_unique}~@{text "= bool"}
   determines whether the printed version of qualified names should be
   made sufficiently long to avoid overlap with names declared further
   back.  Set to @{text false} for more concise output.
 
-  \item @{antiquotation_option_def eta_contract}~@{text "= bool"}
+  \<^descr> @{antiquotation_option_def eta_contract}~@{text "= bool"}
   prints terms in @{text \<eta>}-contracted form.
 
-  \item @{antiquotation_option_def display}~@{text "= bool"} indicates
+  \<^descr> @{antiquotation_option_def display}~@{text "= bool"} indicates
   if the text is to be output as multi-line ``display material'',
   rather than a small piece of text without line breaks (which is the
   default).
@@ -366,26 +366,26 @@
   In this mode the embedded entities are printed in the same style as
   the main theory text.
 
-  \item @{antiquotation_option_def break}~@{text "= bool"} controls
+  \<^descr> @{antiquotation_option_def break}~@{text "= bool"} controls
   line breaks in non-display material.
 
-  \item @{antiquotation_option_def quotes}~@{text "= bool"} indicates
+  \<^descr> @{antiquotation_option_def quotes}~@{text "= bool"} indicates
   if the output should be enclosed in double quotes.
 
-  \item @{antiquotation_option_def mode}~@{text "= name"} adds @{text
+  \<^descr> @{antiquotation_option_def mode}~@{text "= name"} adds @{text
   name} to the print mode to be used for presentation.  Note that the
   standard setup for {\LaTeX} output is already present by default,
   including the modes @{text latex} and @{text xsymbols}.
 
-  \item @{antiquotation_option_def margin}~@{text "= nat"} and
+  \<^descr> @{antiquotation_option_def margin}~@{text "= nat"} and
   @{antiquotation_option_def indent}~@{text "= nat"} change the margin
   or indentation for pretty printing of display material.
 
-  \item @{antiquotation_option_def goals_limit}~@{text "= nat"}
+  \<^descr> @{antiquotation_option_def goals_limit}~@{text "= nat"}
   determines the maximum number of subgoals to be printed (for goal-based
   antiquotation).
 
-  \item @{antiquotation_option_def source}~@{text "= bool"} prints the
+  \<^descr> @{antiquotation_option_def source}~@{text "= bool"} prints the
   original source text of the antiquotation arguments, rather than its
   internal representation.  Note that formal checking of
   @{antiquotation "thm"}, @{antiquotation "term"}, etc. is still
@@ -592,7 +592,7 @@
 
   \begin{description}
 
-  \item @{command "display_drafts"}~@{text paths} performs simple output of a
+  \<^descr> @{command "display_drafts"}~@{text paths} performs simple output of a
   given list of raw source files. Only those symbols that do not require
   additional {\LaTeX} packages are displayed properly, everything else is left
   verbatim.
--- a/src/Doc/Isar_Ref/Generic.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Isar_Ref/Generic.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -40,11 +40,11 @@
 
   \begin{description}
   
-  \item @{command "print_options"} prints the available configuration
+  \<^descr> @{command "print_options"} prints the available configuration
   options, with names, types, and current values; the ``@{text "!"}'' option
   indicates extra verbosity.
   
-  \item @{text "name = value"} as an attribute expression modifies the
+  \<^descr> @{text "name = value"} as an attribute expression modifies the
   named option, with the syntax of the value depending on the option's
   type.  For @{ML_type bool} the default value is @{text true}.  Any
   attempt to change a global option in a local context is ignored.
@@ -85,16 +85,16 @@
 
   \begin{description}
   
-  \item @{method unfold}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{method fold}~@{text
+  \<^descr> @{method unfold}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{method fold}~@{text
   "a\<^sub>1 \<dots> a\<^sub>n"} expand (or fold back) the given definitions throughout
   all goals; any chained facts provided are inserted into the goal and
   subject to rewriting as well.
 
-  \item @{method insert}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} inserts theorems as facts
+  \<^descr> @{method insert}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} inserts theorems as facts
   into all goals of the proof state.  Note that current facts
   indicated for forward chaining are ignored.
 
-  \item @{method erule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, @{method
+  \<^descr> @{method erule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, @{method
   drule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, and @{method frule}~@{text
   "a\<^sub>1 \<dots> a\<^sub>n"} are similar to the basic @{method rule}
   method (see \secref{sec:pure-meth-att}), but apply rules by
@@ -111,21 +111,21 @@
   the plain @{method rule} method, with forward chaining of current
   facts.
 
-  \item @{method intro} and @{method elim} repeatedly refine some goal
+  \<^descr> @{method intro} and @{method elim} repeatedly refine some goal
   by intro- or elim-resolution, after having inserted any chained
   facts.  Exactly the rules given as arguments are taken into account;
   this allows fine-tuned decomposition of a proof problem, in contrast
   to common automated tools.
 
-  \item @{method fail} yields an empty result sequence; it is the
+  \<^descr> @{method fail} yields an empty result sequence; it is the
   identity of the ``@{text "|"}'' method combinator (cf.\
   \secref{sec:proof-meth}).
 
-  \item @{method succeed} yields a single (unchanged) result; it is
+  \<^descr> @{method succeed} yields a single (unchanged) result; it is
   the identity of the ``@{text ","}'' method combinator (cf.\
   \secref{sec:proof-meth}).
 
-  \item @{method sleep}~@{text s} succeeds after a real-time delay of @{text
+  \<^descr> @{method sleep}~@{text s} succeeds after a real-time delay of @{text
   s} seconds. This is occasionally useful for demonstration and testing
   purposes.
 
@@ -157,37 +157,37 @@
 
   \begin{description}
 
-  \item @{attribute tagged}~@{text "name value"} and @{attribute
+  \<^descr> @{attribute tagged}~@{text "name value"} and @{attribute
   untagged}~@{text name} add and remove \emph{tags} of some theorem.
   Tags may be any list of string pairs that serve as formal comment.
   The first string is considered the tag name, the second its value.
   Note that @{attribute untagged} removes any tags of the same name.
 
-  \item @{attribute THEN}~@{text a} composes rules by resolution; it
+  \<^descr> @{attribute THEN}~@{text a} composes rules by resolution; it
   resolves with the first premise of @{text a} (an alternative
   position may be also specified).  See also @{ML_op "RS"} in
   @{cite "isabelle-implementation"}.
   
-  \item @{attribute unfolded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{attribute
+  \<^descr> @{attribute unfolded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{attribute
   folded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} expand and fold back again the given
   definitions throughout a rule.
 
-  \item @{attribute abs_def} turns an equation of the form @{prop "f x
+  \<^descr> @{attribute abs_def} turns an equation of the form @{prop "f x
   y \<equiv> t"} into @{prop "f \<equiv> \<lambda>x y. t"}, which ensures that @{method
   simp} or @{method unfold} steps always expand it.  This also works
   for object-logic equality.
 
-  \item @{attribute rotated}~@{text n} rotate the premises of a
+  \<^descr> @{attribute rotated}~@{text n} rotate the premises of a
   theorem by @{text n} (default 1).
 
-  \item @{attribute (Pure) elim_format} turns a destruction rule into
+  \<^descr> @{attribute (Pure) elim_format} turns a destruction rule into
   elimination rule format, by resolving with the rule @{prop "PROP A \<Longrightarrow>
   (PROP A \<Longrightarrow> PROP B) \<Longrightarrow> PROP B"}.
   
   Note that the Classical Reasoner (\secref{sec:classical}) provides
   its own version of this operation.
 
-  \item @{attribute no_vars} replaces schematic variables by free
+  \<^descr> @{attribute no_vars} replaces schematic variables by free
   ones; this is mainly for tuning output of pretty printed theorems.
 
   \end{description}
@@ -218,14 +218,14 @@
 
   \begin{description}
 
-  \item @{method subst}~@{text eq} performs a single substitution step
+  \<^descr> @{method subst}~@{text eq} performs a single substitution step
   using rule @{text eq}, which may be either a meta or object
   equality.
 
-  \item @{method subst}~@{text "(asm) eq"} substitutes in an
+  \<^descr> @{method subst}~@{text "(asm) eq"} substitutes in an
   assumption.
 
-  \item @{method subst}~@{text "(i \<dots> j) eq"} performs several
+  \<^descr> @{method subst}~@{text "(i \<dots> j) eq"} performs several
   substitutions in the conclusion. The numbers @{text i} to @{text j}
   indicate the positions to substitute at.  Positions are ordered from
   the top of the term tree moving down from left to right. For
@@ -238,18 +238,18 @@
   assume all substitutions are performed simultaneously.  Otherwise
   the behaviour of @{text subst} is not specified.
 
-  \item @{method subst}~@{text "(asm) (i \<dots> j) eq"} performs the
+  \<^descr> @{method subst}~@{text "(asm) (i \<dots> j) eq"} performs the
   substitutions in the assumptions. The positions refer to the
   assumptions in order from left to right.  For example, given in a
   goal of the form @{text "P (a + b) \<Longrightarrow> P (c + d) \<Longrightarrow> \<dots>"}, position 1 of
   commutativity of @{text "+"} is the subterm @{text "a + b"} and
   position 2 is the subterm @{text "c + d"}.
 
-  \item @{method hypsubst} performs substitution using some
+  \<^descr> @{method hypsubst} performs substitution using some
   assumption; this only works for equations of the form @{text "x =
   t"} where @{text x} is a free or bound variable.
 
-  \item @{method split}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} performs single-step case
+  \<^descr> @{method split}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} performs single-step case
   splitting using the given rules.  Splitting is performed in the
   conclusion or some assumption of the subgoal, depending of the
   structure of the rule.
@@ -307,7 +307,7 @@
 
   \begin{description}
 
-  \item @{method simp} invokes the Simplifier on the first subgoal,
+  \<^descr> @{method simp} invokes the Simplifier on the first subgoal,
   after inserting chained facts as additional goal premises; further
   rule declarations may be included via @{text "(simp add: facts)"}.
   The proof method fails if the subgoal remains unchanged after
@@ -348,7 +348,7 @@
   congruence rules (see also \secref{sec:simp-rules}); the default is
   to add.
 
-  \item @{method simp_all} is similar to @{method simp}, but acts on
+  \<^descr> @{method simp_all} is similar to @{method simp}, but acts on
   all goals, working backwards from the last to the first one as usual
   in Isabelle.\footnote{The order is irrelevant for goals without
   schematic variables, so simplification might actually be performed
@@ -361,7 +361,7 @@
   The proof method fails if all subgoals remain unchanged after
   simplification.
 
-  \item @{attribute simp_depth_limit} limits the number of recursive
+  \<^descr> @{attribute simp_depth_limit} limits the number of recursive
   invocations of the Simplifier during conditional rewriting.
 
   \end{description}
@@ -515,7 +515,7 @@
 
   \begin{description}
 
-  \item @{attribute simp} declares rewrite rules, by adding or
+  \<^descr> @{attribute simp} declares rewrite rules, by adding or
   deleting them from the simpset within the theory or proof context.
   Rewrite rules are theorems expressing some form of equality, for
   example:
@@ -576,9 +576,9 @@
 
   \end{enumerate}
 
-  \item @{attribute split} declares case split rules.
+  \<^descr> @{attribute split} declares case split rules.
 
-  \item @{attribute cong} declares congruence rules to the Simplifier
+  \<^descr> @{attribute cong} declares congruence rules to the Simplifier
   context.
 
   Congruence rules are equalities of the form @{text [display]
@@ -619,7 +619,7 @@
   This can make simplification much faster, but may require an extra
   case split over the condition @{text "?q"} to prove the goal.
 
-  \item @{command "print_simpset"} prints the collection of rules declared
+  \<^descr> @{command "print_simpset"} prints the collection of rules declared
   to the Simplifier, which is also known as ``simpset'' internally; the
   ``@{text "!"}'' option indicates extra verbosity.
 
@@ -796,19 +796,19 @@
 
   \begin{description}
 
-  \item @{attribute simp_trace} makes the Simplifier output internal
+  \<^descr> @{attribute simp_trace} makes the Simplifier output internal
   operations.  This includes rewrite steps, but also bookkeeping like
   modifications of the simpset.
 
-  \item @{attribute simp_trace_depth_limit} limits the effect of
+  \<^descr> @{attribute simp_trace_depth_limit} limits the effect of
   @{attribute simp_trace} to the given depth of recursive Simplifier
   invocations (when solving conditions of rewrite rules).
 
-  \item @{attribute simp_debug} makes the Simplifier output some extra
+  \<^descr> @{attribute simp_debug} makes the Simplifier output some extra
   information about internal operations.  This includes any attempted
   invocation of simplification procedures.
 
-  \item @{attribute simp_trace_new} controls Simplifier tracing within
+  \<^descr> @{attribute simp_trace_new} controls Simplifier tracing within
   Isabelle/PIDE applications, notably Isabelle/jEdit @{cite "isabelle-jedit"}.
   This provides a hierarchical representation of the rewriting steps
   performed by the Simplifier.
@@ -820,7 +820,7 @@
   Interactive mode interrupts the normal flow of the Simplifier and defers
   the decision how to continue to the user via some GUI dialog.
 
-  \item @{attribute simp_break} declares term or theorem breakpoints for
+  \<^descr> @{attribute simp_break} declares term or theorem breakpoints for
   @{attribute simp_trace_new} as described above. Term breakpoints are
   patterns which are checked for matches on the redex of a rule application.
   Theorem breakpoints trigger when the corresponding theorem is applied in a
@@ -868,7 +868,7 @@
 
   \begin{description}
 
-  \item @{command "simproc_setup"} defines a named simplification
+  \<^descr> @{command "simproc_setup"} defines a named simplification
   procedure that is invoked by the Simplifier whenever any of the
   given term patterns match the current redex.  The implementation,
   which is provided as ML source text, needs to be of type @{ML_type
@@ -887,7 +887,7 @@
   Morphisms and identifiers are only relevant for simprocs that are
   defined within a local target context, e.g.\ in a locale.
 
-  \item @{text "simproc add: name"} and @{text "simproc del: name"}
+  \<^descr> @{text "simproc add: name"} and @{text "simproc del: name"}
   add or delete named simprocs to the current Simplifier context.  The
   default is to add a simproc.  Note that @{command "simproc_setup"}
   already adds the new simproc to the subsequent context.
@@ -946,11 +946,11 @@
 
   \begin{description}
 
-  \item @{ML Simplifier.set_subgoaler}~@{text "tac ctxt"} sets the
+  \<^descr> @{ML Simplifier.set_subgoaler}~@{text "tac ctxt"} sets the
   subgoaler of the context to @{text "tac"}.  The tactic will
   be applied to the context of the running Simplifier instance.
 
-  \item @{ML Simplifier.prems_of}~@{text "ctxt"} retrieves the current
+  \<^descr> @{ML Simplifier.prems_of}~@{text "ctxt"} retrieves the current
   set of premises from the context.  This may be non-empty only if
   the Simplifier has been told to utilize local assumptions in the
   first place (cf.\ the options in \secref{sec:simp-meth}).
@@ -1016,21 +1016,21 @@
 
   \begin{description}
 
-  \item @{ML Simplifier.mk_solver}~@{text "name tac"} turns @{text
+  \<^descr> @{ML Simplifier.mk_solver}~@{text "name tac"} turns @{text
   "tac"} into a solver; the @{text "name"} is only attached as a
   comment and has no further significance.
 
-  \item @{text "ctxt setSSolver solver"} installs @{text "solver"} as
+  \<^descr> @{text "ctxt setSSolver solver"} installs @{text "solver"} as
   the safe solver of @{text "ctxt"}.
 
-  \item @{text "ctxt addSSolver solver"} adds @{text "solver"} as an
+  \<^descr> @{text "ctxt addSSolver solver"} adds @{text "solver"} as an
   additional safe solver; it will be tried after the solvers which had
   already been present in @{text "ctxt"}.
 
-  \item @{text "ctxt setSolver solver"} installs @{text "solver"} as the
+  \<^descr> @{text "ctxt setSolver solver"} installs @{text "solver"} as the
   unsafe solver of @{text "ctxt"}.
 
-  \item @{text "ctxt addSolver solver"} adds @{text "solver"} as an
+  \<^descr> @{text "ctxt addSolver solver"} adds @{text "solver"} as an
   additional unsafe solver; it will be tried after the solvers which
   had already been present in @{text "ctxt"}.
 
@@ -1102,22 +1102,22 @@
 
   \begin{description}
 
-  \item @{text "ctxt setloop tac"} installs @{text "tac"} as the only
+  \<^descr> @{text "ctxt setloop tac"} installs @{text "tac"} as the only
   looper tactic of @{text "ctxt"}.
 
-  \item @{text "ctxt addloop (name, tac)"} adds @{text "tac"} as an
+  \<^descr> @{text "ctxt addloop (name, tac)"} adds @{text "tac"} as an
   additional looper tactic with name @{text "name"}, which is
   significant for managing the collection of loopers.  The tactic will
   be tried after the looper tactics that had already been present in
   @{text "ctxt"}.
 
-  \item @{text "ctxt delloop name"} deletes the looper tactic that was
+  \<^descr> @{text "ctxt delloop name"} deletes the looper tactic that was
   associated with @{text "name"} from @{text "ctxt"}.
 
-  \item @{ML Splitter.add_split}~@{text "thm ctxt"} adds split tactics
+  \<^descr> @{ML Splitter.add_split}~@{text "thm ctxt"} adds split tactics
   for @{text "thm"} as additional looper tactics of @{text "ctxt"}.
 
-  \item @{ML Splitter.del_split}~@{text "thm ctxt"} deletes the split
+  \<^descr> @{ML Splitter.del_split}~@{text "thm ctxt"} deletes the split
   tactic corresponding to @{text thm} from the looper tactics of
   @{text "ctxt"}.
 
@@ -1176,7 +1176,7 @@
 
   \begin{description}
   
-  \item @{attribute simplified}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} causes a theorem to
+  \<^descr> @{attribute simplified}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} causes a theorem to
   be simplified, either by exactly the specified rules @{text "a\<^sub>1, \<dots>,
   a\<^sub>n"}, or the implicit Simplifier context if no arguments are given.
   The result is fully simplified by default, including assumptions and
@@ -1438,11 +1438,11 @@
 
   \begin{description}
 
-  \item @{command "print_claset"} prints the collection of rules
+  \<^descr> @{command "print_claset"} prints the collection of rules
   declared to the Classical Reasoner, i.e.\ the @{ML_type claset}
   within the context.
 
-  \item @{attribute intro}, @{attribute elim}, and @{attribute dest}
+  \<^descr> @{attribute intro}, @{attribute elim}, and @{attribute dest}
   declare introduction, elimination, and destruction rules,
   respectively.  By default, rules are considered as \emph{unsafe}
   (i.e.\ not applied blindly without backtracking), while ``@{text
@@ -1467,11 +1467,11 @@
   added with some other classification, but the rule is added anyway
   as requested.
 
-  \item @{attribute rule}~@{text del} deletes all occurrences of a
+  \<^descr> @{attribute rule}~@{text del} deletes all occurrences of a
   rule from the classical context, regardless of its classification as
   introduction~/ elimination~/ destruction and safe~/ unsafe.
 
-  \item @{attribute iff} declares logical equivalences to the
+  \<^descr> @{attribute iff} declares logical equivalences to the
   Simplifier and the Classical reasoner at the same time.
   Non-conditional rules result in a safe introduction and elimination
   pair; conditional ones are considered unsafe.  Rules with negative
@@ -1482,7 +1482,7 @@
   the Isabelle/Pure context only, and omits the Simplifier
   declaration.
 
-  \item @{attribute swapped} turns an introduction rule into an
+  \<^descr> @{attribute swapped} turns an introduction rule into an
   elimination, by resolving with the classical swap principle @{text
   "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"} in the second position.  This is mainly for
   illustrative purposes: the Classical Reasoner already swaps rules
@@ -1506,7 +1506,7 @@
 
   \begin{description}
 
-  \item @{method rule} as offered by the Classical Reasoner is a
+  \<^descr> @{method rule} as offered by the Classical Reasoner is a
   refinement over the Pure one (see \secref{sec:pure-meth-att}).  Both
   versions work the same, but the classical version observes the
   classical rule context in addition to that of Isabelle/Pure.
@@ -1516,7 +1516,7 @@
   ones), but only few declarations to the rule context of
   Isabelle/Pure (\secref{sec:pure-meth-att}).
 
-  \item @{method contradiction} solves some goal by contradiction,
+  \<^descr> @{method contradiction} solves some goal by contradiction,
   deriving any result from both @{text "\<not> A"} and @{text A}.  Chained
   facts, which are guaranteed to participate, may appear in either
   order.
@@ -1566,7 +1566,7 @@
 
   \begin{description}
 
-  \item @{method blast} is a separate classical tableau prover that
+  \<^descr> @{method blast} is a separate classical tableau prover that
   uses the same classical rule declarations as explained before.
 
   Proof search is coded directly in ML using special data structures.
@@ -1604,7 +1604,7 @@
   be made much faster by supplying the successful search bound to this
   proof method instead.
 
-  \item @{method auto} combines classical reasoning with
+  \<^descr> @{method auto} combines classical reasoning with
   simplification.  It is intended for situations where there are a lot
   of mostly trivial subgoals; it proves all the easy ones, leaving the
   ones it cannot prove.  Occasionally, attempting to prove the hard
@@ -1616,12 +1616,12 @@
   for a slower but more general alternative that also takes wrappers
   into account.
 
-  \item @{method force} is intended to prove the first subgoal
+  \<^descr> @{method force} is intended to prove the first subgoal
   completely, using many fancy proof tools and performing a rather
   exhaustive search.  As a result, proof attempts may take rather long
   or diverge easily.
 
-  \item @{method fast}, @{method best}, @{method slow} attempt to
+  \<^descr> @{method fast}, @{method best}, @{method slow} attempt to
   prove the first subgoal using sequent-style reasoning as explained
   before.  Unlike @{method blast}, they construct proofs directly in
   Isabelle.
@@ -1635,13 +1635,13 @@
   search: it may, when backtracking from a failed proof attempt, undo
   even the step of proving a subgoal by assumption.
 
-  \item @{method fastforce}, @{method slowsimp}, @{method bestsimp}
+  \<^descr> @{method fastforce}, @{method slowsimp}, @{method bestsimp}
   are like @{method fast}, @{method slow}, @{method best},
   respectively, but use the Simplifier as additional wrapper. The name
   @{method fastforce}, reflects the behaviour of this popular method
   better without requiring an understanding of its implementation.
 
-  \item @{method deepen} works by exhaustive search up to a certain
+  \<^descr> @{method deepen} works by exhaustive search up to a certain
   depth.  The start depth is 4 (unless specified explicitly), and the
   depth is increased iteratively up to 10.  Unsafe rules are modified
   to preserve the formula they act on, so that it be used repeatedly.
@@ -1681,13 +1681,13 @@
 
   \begin{description}
 
-  \item @{method safe} repeatedly performs safe steps on all subgoals.
+  \<^descr> @{method safe} repeatedly performs safe steps on all subgoals.
   It is deterministic, with at most one outcome.
 
-  \item @{method clarify} performs a series of safe steps without
+  \<^descr> @{method clarify} performs a series of safe steps without
   splitting subgoals; see also @{method clarify_step}.
 
-  \item @{method clarsimp} acts like @{method clarify}, but also does
+  \<^descr> @{method clarsimp} acts like @{method clarify}, but also does
   simplification.  Note that if the Simplifier context includes a
   splitter for the premises, the subgoal may still be split.
 
@@ -1712,25 +1712,25 @@
 
   \begin{description}
 
-  \item @{method safe_step} performs a safe step on the first subgoal.
+  \<^descr> @{method safe_step} performs a safe step on the first subgoal.
   The safe wrapper tacticals are applied to a tactic that may include
   proof by assumption or Modus Ponens (taking care not to instantiate
   unknowns), or substitution.
 
-  \item @{method inst_step} is like @{method safe_step}, but allows
+  \<^descr> @{method inst_step} is like @{method safe_step}, but allows
   unknowns to be instantiated.
 
-  \item @{method step} is the basic step of the proof procedure, it
+  \<^descr> @{method step} is the basic step of the proof procedure, it
   operates on the first subgoal.  The unsafe wrapper tacticals are
   applied to a tactic that tries @{method safe}, @{method inst_step},
   or applies an unsafe rule from the context.
 
-  \item @{method slow_step} resembles @{method step}, but allows
+  \<^descr> @{method slow_step} resembles @{method step}, but allows
   backtracking between using safe rules with instantiation (@{method
   inst_step}) and using unsafe rules.  The resulting search space is
   larger.
 
-  \item @{method clarify_step} performs a safe step on the first
+  \<^descr> @{method clarify_step} performs a safe step on the first
   subgoal; no splitting step is applied.  For example, the subgoal
   @{text "A \<and> B"} is left as a conjunction.  Proof by assumption,
   Modus Ponens, etc., may be performed provided they do not
@@ -1792,40 +1792,40 @@
 
   \begin{description}
 
-  \item @{text "ctxt addSWrapper (name, wrapper)"} adds a new wrapper,
+  \<^descr> @{text "ctxt addSWrapper (name, wrapper)"} adds a new wrapper,
   which should yield a safe tactic, to modify the existing safe step
   tactic.
 
-  \item @{text "ctxt addSbefore (name, tac)"} adds the given tactic as a
+  \<^descr> @{text "ctxt addSbefore (name, tac)"} adds the given tactic as a
   safe wrapper, such that it is tried \emph{before} each safe step of
   the search.
 
-  \item @{text "ctxt addSafter (name, tac)"} adds the given tactic as a
+  \<^descr> @{text "ctxt addSafter (name, tac)"} adds the given tactic as a
   safe wrapper, such that it is tried when a safe step of the search
   would fail.
 
-  \item @{text "ctxt delSWrapper name"} deletes the safe wrapper with
+  \<^descr> @{text "ctxt delSWrapper name"} deletes the safe wrapper with
   the given name.
 
-  \item @{text "ctxt addWrapper (name, wrapper)"} adds a new wrapper to
+  \<^descr> @{text "ctxt addWrapper (name, wrapper)"} adds a new wrapper to
   modify the existing (unsafe) step tactic.
 
-  \item @{text "ctxt addbefore (name, tac)"} adds the given tactic as an
+  \<^descr> @{text "ctxt addbefore (name, tac)"} adds the given tactic as an
   unsafe wrapper, such that it its result is concatenated
   \emph{before} the result of each unsafe step.
 
-  \item @{text "ctxt addafter (name, tac)"} adds the given tactic as an
+  \<^descr> @{text "ctxt addafter (name, tac)"} adds the given tactic as an
   unsafe wrapper, such that it its result is concatenated \emph{after}
   the result of each unsafe step.
 
-  \item @{text "ctxt delWrapper name"} deletes the unsafe wrapper with
+  \<^descr> @{text "ctxt delWrapper name"} deletes the unsafe wrapper with
   the given name.
 
-  \item @{text "addSss"} adds the simpset of the context to its
+  \<^descr> @{text "addSss"} adds the simpset of the context to its
   classical set. The assumptions and goal will be simplified, in a
   rather safe way, after each safe step of the search.
 
-  \item @{text "addss"} adds the simpset of the context to its
+  \<^descr> @{text "addss"} adds the simpset of the context to its
   classical set. The assumptions and goal will be simplified, before
   the each unsafe step of the search.
 
@@ -1874,7 +1874,7 @@
 
   \begin{description}
   
-  \item @{command "judgment"}~@{text "c :: \<sigma> (mx)"} declares constant
+  \<^descr> @{command "judgment"}~@{text "c :: \<sigma> (mx)"} declares constant
   @{text c} as the truth judgment of the current object-logic.  Its
   type @{text \<sigma>} should specify a coercion of the category of
   object-level propositions to @{text prop} of the Pure meta-logic;
@@ -1883,7 +1883,7 @@
   with that of @{text prop}.  Only one @{command "judgment"}
   declaration may be given in any theory development.
   
-  \item @{method atomize} (as a method) rewrites any non-atomic
+  \<^descr> @{method atomize} (as a method) rewrites any non-atomic
   premises of a sub-goal, using the meta-level equations declared via
   @{attribute atomize} (as an attribute) beforehand.  As a result,
   heavily nested goals become amenable to fundamental operations such
@@ -1898,7 +1898,7 @@
   Meta-level conjunction should be covered as well (this is
   particularly important for locales, see \secref{sec:locale}).
 
-  \item @{attribute rule_format} rewrites a theorem by the equalities
+  \<^descr> @{attribute rule_format} rewrites a theorem by the equalities
   declared as @{attribute rulify} rules in the current object-logic.
   By default, the result is fully normalized, including assumptions
   and conclusions at any depth.  The @{text "(no_asm)"} option
@@ -1930,19 +1930,19 @@
 
   \begin{description}
 
-  \item @{attribute unify_trace_simp} controls tracing of the
+  \<^descr> @{attribute unify_trace_simp} controls tracing of the
   simplification phase of higher-order unification.
 
-  \item @{attribute unify_trace_types} controls warnings of
+  \<^descr> @{attribute unify_trace_types} controls warnings of
   incompleteness, when unification is not considering all possible
   instantiations of schematic type variables.
 
-  \item @{attribute unify_trace_bound} determines the depth where
+  \<^descr> @{attribute unify_trace_bound} determines the depth where
   unification starts to print tracing information once it reaches
   depth; 0 for full tracing.  At the default value, tracing
   information is almost never printed in practice.
 
-  \item @{attribute unify_search_bound} prevents unification from
+  \<^descr> @{attribute unify_search_bound} prevents unification from
   searching past the given depth.  Because of this bound, higher-order
   unification cannot return an infinite sequence, though it can return
   an exponentially long one.  The search rarely approaches the default
--- a/src/Doc/Isar_Ref/HOL_Specific.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Isar_Ref/HOL_Specific.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -118,7 +118,7 @@
 
   \begin{description}
 
-  \item @{command (HOL) "inductive"} and @{command (HOL)
+  \<^descr> @{command (HOL) "inductive"} and @{command (HOL)
   "coinductive"} define (co)inductive predicates from the introduction
   rules.
 
@@ -139,15 +139,15 @@
   \emph{monotonicity theorems}, which are required for each operator
   applied to a recursive set in the introduction rules.
 
-  \item @{command (HOL) "inductive_set"} and @{command (HOL)
+  \<^descr> @{command (HOL) "inductive_set"} and @{command (HOL)
   "coinductive_set"} are wrappers for to the previous commands for
   native HOL predicates.  This allows to define (co)inductive sets,
   where multiple arguments are simulated via tuples.
 
-  \item @{command "print_inductives"} prints (co)inductive definitions and
+  \<^descr> @{command "print_inductives"} prints (co)inductive definitions and
   monotonicity rules; the ``@{text "!"}'' option indicates extra verbosity.
 
-  \item @{attribute (HOL) mono} declares monotonicity rules in the
+  \<^descr> @{attribute (HOL) mono} declares monotonicity rules in the
   context.  These rule are involved in the automated monotonicity
   proof of the above inductive and coinductive definitions.
 
@@ -162,17 +162,17 @@
 
   \begin{description}
 
-  \item @{text R.intros} is the list of introduction rules as proven
+  \<^descr> @{text R.intros} is the list of introduction rules as proven
   theorems, for the recursive predicates (or sets).  The rules are
   also available individually, using the names given them in the
   theory file;
 
-  \item @{text R.cases} is the case analysis (or elimination) rule;
-
-  \item @{text R.induct} or @{text R.coinduct} is the (co)induction
+  \<^descr> @{text R.cases} is the case analysis (or elimination) rule;
+
+  \<^descr> @{text R.induct} or @{text R.coinduct} is the (co)induction
   rule;
 
-  \item @{text R.simps} is the equation unrolling the fixpoint of the
+  \<^descr> @{text R.simps} is the equation unrolling the fixpoint of the
   predicate one step.
 
   \end{description}
@@ -292,7 +292,7 @@
 
   \begin{description}
 
-  \item @{command (HOL) "primrec"} defines primitive recursive functions
+  \<^descr> @{command (HOL) "primrec"} defines primitive recursive functions
   over datatypes (see also @{command_ref (HOL) datatype}). The given @{text
   equations} specify reduction rules that are produced by instantiating the
   generic combinator for primitive recursion that is available for each
@@ -315,7 +315,7 @@
   normalize expressions of @{text "f"} applied to datatype constructions, by
   simulating symbolic computation via rewriting.
 
-  \item @{command (HOL) "function"} defines functions by general wellfounded
+  \<^descr> @{command (HOL) "function"} defines functions by general wellfounded
   recursion. A detailed description with examples can be found in @{cite
   "isabelle-function"}. The function is specified by a set of (possibly
   conditional) recursive equations with arbitrary pattern matching. The
@@ -328,18 +328,18 @@
   "f_dom"}. The @{command (HOL) "termination"} command can then be used to
   establish that the function is total.
 
-  \item @{command (HOL) "fun"} is a shorthand notation for ``@{command (HOL)
+  \<^descr> @{command (HOL) "fun"} is a shorthand notation for ``@{command (HOL)
   "function"}~@{text "(sequential)"}'', followed by automated proof attempts
   regarding pattern matching and termination. See @{cite
   "isabelle-function"} for further details.
 
-  \item @{command (HOL) "termination"}~@{text f} commences a termination
+  \<^descr> @{command (HOL) "termination"}~@{text f} commences a termination
   proof for the previously defined function @{text f}. If this is omitted,
   the command refers to the most recent function definition. After the proof
   is closed, the recursive equations and the induction principle is
   established.
 
-  \item @{command (HOL) "fun_cases"} generates specialized elimination rules
+  \<^descr> @{command (HOL) "fun_cases"} generates specialized elimination rules
   for function equations. It expects one or more function equations and
   produces rules that eliminate the given equalities, following the cases
   given in the function definition.
@@ -362,7 +362,7 @@
 
   \begin{description}
 
-  \item @{text sequential} enables a preprocessor which disambiguates
+  \<^descr> @{text sequential} enables a preprocessor which disambiguates
   overlapping patterns by making them mutually disjoint. Earlier equations
   take precedence over later ones. This allows to give the specification in
   a format very similar to functional programming. Note that the resulting
@@ -371,7 +371,7 @@
   equation given by the user may result in several theorems. Also note that
   this automatic transformation only works for ML-style datatype patterns.
 
-  \item @{text domintros} enables the automated generation of introduction
+  \<^descr> @{text domintros} enables the automated generation of introduction
   rules for the domain predicate. While mostly not needed, they can be
   helpful in some proofs about partial functions.
 
@@ -537,19 +537,19 @@
 
   \begin{description}
 
-  \item @{method (HOL) pat_completeness} is a specialized method to
+  \<^descr> @{method (HOL) pat_completeness} is a specialized method to
   solve goals regarding the completeness of pattern matching, as
   required by the @{command (HOL) "function"} package (cf.\
   @{cite "isabelle-function"}).
 
-  \item @{method (HOL) relation}~@{text R} introduces a termination
+  \<^descr> @{method (HOL) relation}~@{text R} introduces a termination
   proof using the relation @{text R}.  The resulting proof state will
   contain goals expressing that @{text R} is wellfounded, and that the
   arguments of recursive calls decrease with respect to @{text R}.
   Usually, this method is used as the initial proof step of manual
   termination proofs.
 
-  \item @{method (HOL) "lexicographic_order"} attempts a fully
+  \<^descr> @{method (HOL) "lexicographic_order"} attempts a fully
   automated termination proof by searching for a lexicographic
   combination of size measures on the arguments of the function. The
   method accepts the same arguments as the @{method auto} method,
@@ -559,7 +559,7 @@
   In case of failure, extensive information is printed, which can help
   to analyse the situation (cf.\ @{cite "isabelle-function"}).
 
-  \item @{method (HOL) "size_change"} also works on termination goals,
+  \<^descr> @{method (HOL) "size_change"} also works on termination goals,
   using a variation of the size-change principle, together with a
   graph decomposition technique (see @{cite krauss_phd} for details).
   Three kinds of orders are used internally: @{text max}, @{text min},
@@ -571,7 +571,7 @@
   For local descent proofs, the @{syntax clasimpmod} modifiers are
   accepted (as for @{method auto}).
 
-  \item @{method (HOL) induction_schema} derives user-specified
+  \<^descr> @{method (HOL) induction_schema} derives user-specified
   induction rules from well-founded induction and completeness of
   patterns. This factors out some operations that are done internally
   by the function package and makes them available separately. See
@@ -596,7 +596,7 @@
 
   \begin{description}
 
-  \item @{command (HOL) "partial_function"}~@{text "(mode)"} defines
+  \<^descr> @{command (HOL) "partial_function"}~@{text "(mode)"} defines
   recursive functions based on fixpoints in complete partial
   orders. No termination proof is required from the user or
   constructed internally. Instead, the possibility of non-termination
@@ -621,14 +621,14 @@
 
   \begin{description}
 
-  \item @{text option} defines functions that map into the @{type
+  \<^descr> @{text option} defines functions that map into the @{type
   option} type. Here, the value @{term None} is used to model a
   non-terminating computation. Monotonicity requires that if @{term
   None} is returned by a recursive call, then the overall result must
   also be @{term None}. This is best achieved through the use of the
   monadic operator @{const "Option.bind"}.
 
-  \item @{text tailrec} defines functions with an arbitrary result
+  \<^descr> @{text tailrec} defines functions with an arbitrary result
   type and uses the slightly degenerated partial order where @{term
   "undefined"} is the bottom element.  Now, monotonicity requires that
   if @{term undefined} is returned by a recursive call, then the
@@ -642,7 +642,7 @@
   Experienced users may define new modes by instantiating the locale
   @{const "partial_function_definitions"} appropriately.
 
-  \item @{attribute (HOL) partial_function_mono} declares rules for
+  \<^descr> @{attribute (HOL) partial_function_mono} declares rules for
   use in the internal monotonicity proofs of partial function
   definitions.
 
@@ -673,7 +673,7 @@
 
   \begin{description}
 
-  \item @{command (HOL) "recdef"} defines general well-founded
+  \<^descr> @{command (HOL) "recdef"} defines general well-founded
   recursive functions (using the TFL package), see also
   @{cite "isabelle-HOL"}.  The ``@{text "(permissive)"}'' option tells
   TFL to recover from failed proof attempts, returning unfinished
@@ -727,14 +727,14 @@
 
   \begin{description}
 
-  \item @{command "adhoc_overloading"}~@{text "c v\<^sub>1 ... v\<^sub>n"}
+  \<^descr> @{command "adhoc_overloading"}~@{text "c v\<^sub>1 ... v\<^sub>n"}
   associates variants with an existing constant.
 
-  \item @{command "no_adhoc_overloading"} is similar to
+  \<^descr> @{command "no_adhoc_overloading"} is similar to
   @{command "adhoc_overloading"}, but removes the specified variants
   from the present context.
 
-  \item @{attribute "show_variants"} controls printing of variants
+  \<^descr> @{attribute "show_variants"} controls printing of variants
   of overloaded constants. If enabled, the internally used variants
   are printed instead of their respective overloaded constants. This
   is occasionally useful to check whether the system agrees with a
@@ -760,7 +760,7 @@
 
   \begin{description}
 
-  \item @{command (HOL) "specification"}~@{text "decls \<phi>"} sets up a
+  \<^descr> @{command (HOL) "specification"}~@{text "decls \<phi>"} sets up a
   goal stating the existence of terms with the properties specified to
   hold for the constants given in @{text decls}.  After finishing the
   proof, the theory will be augmented with definitions for the given
@@ -797,10 +797,10 @@
 
   \begin{description}
 
-  \item @{command (HOL) "old_datatype"} defines old-style inductive
+  \<^descr> @{command (HOL) "old_datatype"} defines old-style inductive
   datatypes in HOL.
 
-  \item @{command (HOL) "old_rep_datatype"} represents existing types as
+  \<^descr> @{command (HOL) "old_rep_datatype"} represents existing types as
   old-style datatypes.
 
   \end{description}
@@ -945,7 +945,7 @@
 
   \begin{description}
 
-  \item @{command (HOL) "record"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t = \<tau> + c\<^sub>1 :: \<sigma>\<^sub>1
+  \<^descr> @{command (HOL) "record"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t = \<tau> + c\<^sub>1 :: \<sigma>\<^sub>1
   \<dots> c\<^sub>n :: \<sigma>\<^sub>n"} defines extensible record type @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"},
   derived from the optional parent record @{text "\<tau>"} by adding new
   field components @{text "c\<^sub>i :: \<sigma>\<^sub>i"} etc.
@@ -1179,7 +1179,7 @@
 
   \begin{description}
 
-  \item @{command (HOL) "typedef"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = A"} defines a
+  \<^descr> @{command (HOL) "typedef"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = A"} defines a
   new type @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} from the set @{text A} over an existing
   type. The set @{text A} may contain type variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"}
   as specified on the LHS, but no term variables. Non-emptiness of @{text A}
@@ -1274,7 +1274,7 @@
 
   \begin{description}
 
-  \item @{command (HOL) "functor"}~@{text "prefix: m"} allows to prove and
+  \<^descr> @{command (HOL) "functor"}~@{text "prefix: m"} allows to prove and
   register properties about the functorial structure of type constructors.
   These properties then can be used by other packages to deal with those
   type constructors in certain type constructions. Characteristic theorems
@@ -1332,7 +1332,7 @@
 
   \begin{description}
 
-  \item @{command (HOL) "quotient_type"} defines a new quotient type @{text
+  \<^descr> @{command (HOL) "quotient_type"} defines a new quotient type @{text
   \<tau>}. The injection from a quotient type to a raw type is called @{text
   rep_\<tau>}, its inverse @{text abs_\<tau>} unless explicit @{keyword (HOL)
   "morphisms"} specification provides alternative names. @{command (HOL)
@@ -1407,7 +1407,7 @@
 
   \begin{description}
 
-  \item @{command (HOL) "setup_lifting"} Sets up the Lifting package to work
+  \<^descr> @{command (HOL) "setup_lifting"} Sets up the Lifting package to work
   with a user-defined type. The command supports two modes.
 
   \begin{enumerate}
@@ -1446,7 +1446,7 @@
   by @{command (HOL) "lift_definition"}, the Lifting package proves and
   registers a code equation (if there is one) for the new constant.
 
-  \item @{command (HOL) "lift_definition"} @{text "f :: \<tau>"} @{keyword (HOL)
+  \<^descr> @{command (HOL) "lift_definition"} @{text "f :: \<tau>"} @{keyword (HOL)
   "is"} @{text t} Defines a new function @{text f} with an abstract type
   @{text \<tau>} in terms of a corresponding operation @{text t} on a
   representation type. More formally, if @{text "t :: \<sigma>"}, then the command
@@ -1490,12 +1490,14 @@
 
   \begin{description}
 
-  \item @{text "\<tau>"} is a type variable \item @{text "\<tau> = \<tau>\<^sub>1 \<dots> \<tau>\<^sub>n \<kappa>"},
+  \<^descr> @{text "\<tau>"} is a type variable
+
+  \<^descr> @{text "\<tau> = \<tau>\<^sub>1 \<dots> \<tau>\<^sub>n \<kappa>"},
   where @{text "\<kappa>"} is an abstract type constructor and @{text "\<tau>\<^sub>1 \<dots> \<tau>\<^sub>n"}
   do not contain abstract types (i.e.\ @{typ "int dlist"} is allowed whereas
   @{typ "int dlist dlist"} not)
 
-  \item @{text "\<tau> = \<tau>\<^sub>1 \<dots> \<tau>\<^sub>n \<kappa>"}, @{text "\<kappa>"} is a type constructor that
+  \<^descr> @{text "\<tau> = \<tau>\<^sub>1 \<dots> \<tau>\<^sub>n \<kappa>"}, @{text "\<kappa>"} is a type constructor that
   was defined as a (co)datatype whose constructor argument types do not
   contain either non-free datatypes or the function type.
 
@@ -1505,7 +1507,7 @@
   @{command (HOL) "lift_definition"} uses @{text f.abs_eq} as a code
   equation.
 
-  \item @{command (HOL) lifting_forget} and @{command (HOL) lifting_update}
+  \<^descr> @{command (HOL) lifting_forget} and @{command (HOL) lifting_update}
   These two commands serve for storing and deleting the set-up of the
   Lifting package and corresponding transfer rules defined by this package.
   This is useful for hiding of type construction details of an abstract type
@@ -1528,19 +1530,19 @@
   including a bundle (@{command "include"}, @{keyword "includes"} and
   @{command "including"}).
 
-  \item @{command (HOL) "print_quot_maps"} prints stored quotient map
+  \<^descr> @{command (HOL) "print_quot_maps"} prints stored quotient map
   theorems.
 
-  \item @{command (HOL) "print_quotients"} prints stored quotient theorems.
-
-  \item @{attribute (HOL) quot_map} registers a quotient map theorem, a
+  \<^descr> @{command (HOL) "print_quotients"} prints stored quotient theorems.
+
+  \<^descr> @{attribute (HOL) quot_map} registers a quotient map theorem, a
   theorem showing how to ``lift'' quotients over type constructors. E.g.\
   @{term "Quotient R Abs Rep T \<Longrightarrow> Quotient (rel_set R) (image Abs) (image
   Rep) (rel_set T)"}. For examples see @{file "~~/src/HOL/Lifting_Set.thy"}
   or @{file "~~/src/HOL/Lifting.thy"}. This property is proved automatically
   if the involved type is BNF without dead variables.
 
-  \item @{attribute (HOL) relator_eq_onp} registers a theorem that shows
+  \<^descr> @{attribute (HOL) relator_eq_onp} registers a theorem that shows
   that a relator applied to an equality restricted by a predicate @{term P}
   (i.e.\ @{term "eq_onp P"}) is equal to a predicator applied to the @{term
   P}. The combinator @{const eq_onp} is used for internal encoding of proper
@@ -1550,7 +1552,7 @@
   This property is proved automatically if the involved type is BNF without
   dead variables.
 
-  \item @{attribute (HOL) "relator_mono"} registers a property describing a
+  \<^descr> @{attribute (HOL) "relator_mono"} registers a property describing a
   monotonicity of a relator. E.g.\ @{prop "A \<le> B \<Longrightarrow> rel_set A \<le> rel_set B"}.
   This property is needed for proving a stronger transfer rule in
   @{command_def (HOL) "lift_definition"} when a parametricity theorem for
@@ -1559,7 +1561,7 @@
   "~~/src/HOL/Lifting.thy"}. This property is proved automatically if the
   involved type is BNF without dead variables.
 
-  \item @{attribute (HOL) "relator_distr"} registers a property describing a
+  \<^descr> @{attribute (HOL) "relator_distr"} registers a property describing a
   distributivity of the relation composition and a relator. E.g.\ @{text
   "rel_set R \<circ>\<circ> rel_set S = rel_set (R \<circ>\<circ> S)"}. This property is needed for
   proving a stronger transfer rule in @{command_def (HOL) "lift_definition"}
@@ -1573,14 +1575,14 @@
   property is proved automatically if the involved type is BNF without dead
   variables.
 
-  \item @{attribute (HOL) quot_del} deletes a corresponding Quotient theorem
+  \<^descr> @{attribute (HOL) quot_del} deletes a corresponding Quotient theorem
   from the Lifting infrastructure and thus de-register the corresponding
   quotient. This effectively causes that @{command (HOL) lift_definition}
   will not do any lifting for the corresponding type. This attribute is
   rather used for low-level manipulation with set-up of the Lifting package
   because @{command (HOL) lifting_forget} is preferred for normal usage.
 
-  \item @{attribute (HOL) lifting_restore} @{text "Quotient_thm pcr_def
+  \<^descr> @{attribute (HOL) lifting_restore} @{text "Quotient_thm pcr_def
   pcr_cr_eq_thm"} registers the Quotient theorem @{text Quotient_thm} in the
   Lifting infrastructure and thus sets up lifting for an abstract type
   @{text \<tau>} (that is defined by @{text Quotient_thm}). Optional theorems
@@ -1593,7 +1595,7 @@
   together with the commands @{command (HOL) lifting_forget} and @{command
   (HOL) lifting_update} is preferred for normal usage.
 
-  \item Integration with the BNF package @{cite "isabelle-datatypes"}: As
+  \<^descr> Integration with the BNF package @{cite "isabelle-datatypes"}: As
   already mentioned, the theorems that are registered by the following
   attributes are proved and registered automatically if the involved type is
   BNF without dead variables: @{attribute (HOL) quot_map}, @{attribute (HOL)
@@ -1628,25 +1630,25 @@
 
   \begin{description}
 
-  \item @{method (HOL) "transfer"} method replaces the current subgoal with
+  \<^descr> @{method (HOL) "transfer"} method replaces the current subgoal with
   a logically equivalent one that uses different types and constants. The
   replacement of types and constants is guided by the database of transfer
   rules. Goals are generalized over all free variables by default; this is
   necessary for variables whose types change, but can be overridden for
   specific variables with e.g. @{text "transfer fixing: x y z"}.
 
-  \item @{method (HOL) "transfer'"} is a variant of @{method (HOL) transfer}
+  \<^descr> @{method (HOL) "transfer'"} is a variant of @{method (HOL) transfer}
   that allows replacing a subgoal with one that is logically stronger
   (rather than equivalent). For example, a subgoal involving equality on a
   quotient type could be replaced with a subgoal involving equality (instead
   of the corresponding equivalence relation) on the underlying raw type.
 
-  \item @{method (HOL) "transfer_prover"} method assists with proving a
+  \<^descr> @{method (HOL) "transfer_prover"} method assists with proving a
   transfer rule for a new constant, provided the constant is defined in
   terms of other constants that already have transfer rules. It should be
   applied after unfolding the constant definitions.
 
-  \item @{method (HOL) "transfer_start"}, @{method (HOL) "transfer_step"},
+  \<^descr> @{method (HOL) "transfer_start"}, @{method (HOL) "transfer_step"},
   @{method (HOL) "transfer_end"}, @{method (HOL) "transfer_prover_start"}
   and @{method (HOL) "transfer_prover_end"} methods are meant to be used
   for debugging of @{method (HOL) "transfer"} and @{method (HOL) "transfer_prover"},
@@ -1657,17 +1659,17 @@
   @{method (HOL) "transfer_step"}+, @{method (HOL) "transfer_prover_end"}).
   For usage examples see @{file "~~/src/HOL/ex/Transfer_Debug.thy"} 
 
-  \item @{attribute (HOL) "untransferred"} proves the same equivalent
+  \<^descr> @{attribute (HOL) "untransferred"} proves the same equivalent
   theorem as @{method (HOL) "transfer"} internally does.
 
-  \item @{attribute (HOL) Transfer.transferred} works in the opposite
+  \<^descr> @{attribute (HOL) Transfer.transferred} works in the opposite
   direction than @{method (HOL) "transfer'"}. E.g.\ given the transfer
   relation @{text "ZN x n \<equiv> (x = int n)"}, corresponding transfer rules and
   the theorem @{text "\<forall>x::int \<in> {0..}. x < x + 1"}, the attribute would
   prove @{text "\<forall>n::nat. n < n + 1"}. The attribute is still in experimental
   phase of development.
 
-  \item @{attribute (HOL) "transfer_rule"} attribute maintains a collection
+  \<^descr> @{attribute (HOL) "transfer_rule"} attribute maintains a collection
   of transfer rules, which relate constants at two different types. Typical
   transfer rules may relate different type instances of the same polymorphic
   constant, or they may relate an operation on a raw type to a corresponding
@@ -1687,14 +1689,14 @@
   a relator is proved automatically if the involved type is BNF @{cite
   "isabelle-datatypes"} without dead variables.
 
-  \item @{attribute (HOL) "transfer_domain_rule"} attribute maintains a
+  \<^descr> @{attribute (HOL) "transfer_domain_rule"} attribute maintains a
   collection of rules, which specify a domain of a transfer relation by a
   predicate. E.g.\ given the transfer relation @{text "ZN x n \<equiv> (x = int
   n)"}, one can register the following transfer domain rule: @{text "Domainp
   ZN = (\<lambda>x. x \<ge> 0)"}. The rules allow the package to produce more readable
   transferred goals, e.g.\ when quantifiers are transferred.
 
-  \item @{attribute (HOL) relator_eq} attribute collects identity laws for
+  \<^descr> @{attribute (HOL) relator_eq} attribute collects identity laws for
   relators of various type constructors, e.g. @{term "rel_set (op =) = (op
   =)"}. The @{method (HOL) transfer} method uses these lemmas to infer
   transfer rules for non-polymorphic constants on the fly. For examples see
@@ -1702,7 +1704,7 @@
   This property is proved automatically if the involved type is BNF without
   dead variables.
 
-  \item @{attribute_def (HOL) "relator_domain"} attribute collects rules
+  \<^descr> @{attribute_def (HOL) "relator_domain"} attribute collects rules
   describing domains of relators by predicators. E.g.\ @{term "Domainp
   (rel_set T) = (\<lambda>A. Ball A (Domainp T))"}. This allows the package to lift
   transfer domain rules through type constructors. For examples see @{file
@@ -1753,16 +1755,16 @@
 
   \begin{description}
 
-  \item @{command (HOL) "quotient_definition"} defines a constant on the
+  \<^descr> @{command (HOL) "quotient_definition"} defines a constant on the
   quotient type.
 
-  \item @{command (HOL) "print_quotmapsQ3"} prints quotient map functions.
-
-  \item @{command (HOL) "print_quotientsQ3"} prints quotients.
-
-  \item @{command (HOL) "print_quotconsts"} prints quotient constants.
-
-  \item @{method (HOL) "lifting"} and @{method (HOL) "lifting_setup"}
+  \<^descr> @{command (HOL) "print_quotmapsQ3"} prints quotient map functions.
+
+  \<^descr> @{command (HOL) "print_quotientsQ3"} prints quotients.
+
+  \<^descr> @{command (HOL) "print_quotconsts"} prints quotient constants.
+
+  \<^descr> @{method (HOL) "lifting"} and @{method (HOL) "lifting_setup"}
   methods match the current goal with the given raw theorem to be lifted
   producing three new subgoals: regularization, injection and cleaning
   subgoals. @{method (HOL) "lifting"} tries to apply the heuristics for
@@ -1770,7 +1772,7 @@
   unsolved by the heuristics to the user as opposed to @{method (HOL)
   "lifting_setup"} which leaves the three subgoals unsolved.
 
-  \item @{method (HOL) "descending"} and @{method (HOL) "descending_setup"}
+  \<^descr> @{method (HOL) "descending"} and @{method (HOL) "descending_setup"}
   try to guess a raw statement that would lift to the current subgoal. Such
   statement is assumed as a new subgoal and @{method (HOL) "descending"}
   continues in the same way as @{method (HOL) "lifting"} does. @{method
@@ -1778,34 +1780,34 @@
   and cleaning subgoals with the analogous method @{method (HOL)
   "descending_setup"} which leaves the four unsolved subgoals.
 
-  \item @{method (HOL) "partiality_descending"} finds the regularized
+  \<^descr> @{method (HOL) "partiality_descending"} finds the regularized
   theorem that would lift to the current subgoal, lifts it and leaves as a
   subgoal. This method can be used with partial equivalence quotients where
   the non regularized statements would not be true. @{method (HOL)
   "partiality_descending_setup"} leaves the injection and cleaning subgoals
   unchanged.
 
-  \item @{method (HOL) "regularize"} applies the regularization heuristics
+  \<^descr> @{method (HOL) "regularize"} applies the regularization heuristics
   to the current subgoal.
 
-  \item @{method (HOL) "injection"} applies the injection heuristics to the
+  \<^descr> @{method (HOL) "injection"} applies the injection heuristics to the
   current goal using the stored quotient respectfulness theorems.
 
-  \item @{method (HOL) "cleaning"} applies the injection cleaning heuristics
+  \<^descr> @{method (HOL) "cleaning"} applies the injection cleaning heuristics
   to the current subgoal using the stored quotient preservation theorems.
 
-  \item @{attribute (HOL) quot_lifted} attribute tries to automatically
+  \<^descr> @{attribute (HOL) quot_lifted} attribute tries to automatically
   transport the theorem to the quotient type. The attribute uses all the
   defined quotients types and quotient constants often producing undesired
   results or theorems that cannot be lifted.
 
-  \item @{attribute (HOL) quot_respect} and @{attribute (HOL) quot_preserve}
+  \<^descr> @{attribute (HOL) quot_respect} and @{attribute (HOL) quot_preserve}
   attributes declare a theorem as a respectfulness and preservation theorem
   respectively. These are stored in the local theory store and used by the
   @{method (HOL) "injection"} and @{method (HOL) "cleaning"} methods
   respectively.
 
-  \item @{attribute (HOL) quot_thm} declares that a certain theorem is a
+  \<^descr> @{attribute (HOL) quot_thm} declares that a certain theorem is a
   quotient extension theorem. Quotient extension theorems allow for
   quotienting inside container types. Given a polymorphic type that serves
   as a container, a map function defined for this container using @{command
@@ -1855,28 +1857,28 @@
 
   \begin{description}
 
-  \item @{command (HOL) "solve_direct"} checks whether the current
+  \<^descr> @{command (HOL) "solve_direct"} checks whether the current
   subgoals can be solved directly by an existing theorem. Duplicate
   lemmas can be detected in this way.
 
-  \item @{command (HOL) "try0"} attempts to prove a subgoal
+  \<^descr> @{command (HOL) "try0"} attempts to prove a subgoal
   using a combination of standard proof methods (@{method auto},
   @{method simp}, @{method blast}, etc.).  Additional facts supplied
   via @{text "simp:"}, @{text "intro:"}, @{text "elim:"}, and @{text
   "dest:"} are passed to the appropriate proof methods.
 
-  \item @{command (HOL) "try"} attempts to prove or disprove a subgoal
+  \<^descr> @{command (HOL) "try"} attempts to prove or disprove a subgoal
   using a combination of provers and disprovers (@{command (HOL)
   "solve_direct"}, @{command (HOL) "quickcheck"}, @{command (HOL)
   "try0"}, @{command (HOL) "sledgehammer"}, @{command (HOL)
   "nitpick"}).
 
-  \item @{command (HOL) "sledgehammer"} attempts to prove a subgoal
+  \<^descr> @{command (HOL) "sledgehammer"} attempts to prove a subgoal
   using external automatic provers (resolution provers and SMT
   solvers). See the Sledgehammer manual @{cite "isabelle-sledgehammer"}
   for details.
 
-  \item @{command (HOL) "sledgehammer_params"} changes @{command (HOL)
+  \<^descr> @{command (HOL) "sledgehammer_params"} changes @{command (HOL)
   "sledgehammer"} configuration options persistently.
 
   \end{description}
@@ -1929,7 +1931,7 @@
 
   \begin{description}
 
-  \item @{command (HOL) "value"}~@{text t} evaluates and prints a
+  \<^descr> @{command (HOL) "value"}~@{text t} evaluates and prints a
   term; optionally @{text modes} can be specified, which are appended
   to the current print mode; see \secref{sec:print-modes}.
   Evaluation is tried first using ML, falling
@@ -1940,13 +1942,13 @@
   using the simplifier, @{text nbe} for \emph{normalization by
   evaluation} and \emph{code} for code generation in SML.
 
-  \item @{command (HOL) "values"}~@{text t} enumerates a set
+  \<^descr> @{command (HOL) "values"}~@{text t} enumerates a set
   comprehension by evaluation and prints its values up to the given
   number of solutions; optionally @{text modes} can be specified,
   which are appended to the current print mode; see
   \secref{sec:print-modes}.
 
-  \item @{command (HOL) "quickcheck"} tests the current goal for
+  \<^descr> @{command (HOL) "quickcheck"} tests the current goal for
   counterexamples using a series of assignments for its free
   variables; by default the first subgoal is tested, an other can be
   selected explicitly using an optional goal index.  Assignments can
@@ -1958,7 +1960,7 @@
 
     \begin{description}
 
-    \item[@{text tester}] specifies which testing approach to apply.
+    \<^descr>[@{text tester}] specifies which testing approach to apply.
     There are three testers, @{text exhaustive}, @{text random}, and
     @{text narrowing}.  An unknown configuration option is treated as
     an argument to tester, making @{text "tester ="} optional.  When
@@ -1969,31 +1971,31 @@
     quickcheck_random_active}, @{attribute
     quickcheck_narrowing_active} are set to true.
 
-    \item[@{text size}] specifies the maximum size of the search space
+    \<^descr>[@{text size}] specifies the maximum size of the search space
     for assignment values.
 
-    \item[@{text genuine_only}] sets quickcheck only to return genuine
+    \<^descr>[@{text genuine_only}] sets quickcheck only to return genuine
     counterexample, but not potentially spurious counterexamples due
     to underspecified functions.
 
-    \item[@{text abort_potential}] sets quickcheck to abort once it
+    \<^descr>[@{text abort_potential}] sets quickcheck to abort once it
     found a potentially spurious counterexample and to not continue
     to search for a further genuine counterexample.
     For this option to be effective, the @{text genuine_only} option
     must be set to false.
 
-    \item[@{text eval}] takes a term or a list of terms and evaluates
+    \<^descr>[@{text eval}] takes a term or a list of terms and evaluates
     these terms under the variable assignment found by quickcheck.
     This option is currently only supported by the default
     (exhaustive) tester.
 
-    \item[@{text iterations}] sets how many sets of assignments are
+    \<^descr>[@{text iterations}] sets how many sets of assignments are
     generated for each particular size.
 
-    \item[@{text no_assms}] specifies whether assumptions in
+    \<^descr>[@{text no_assms}] specifies whether assumptions in
     structured proofs should be ignored.
 
-    \item[@{text locale}] specifies how to process conjectures in
+    \<^descr>[@{text locale}] specifies how to process conjectures in
     a locale context, i.e.\ they can be interpreted or expanded.
     The option is a whitespace-separated list of the two words
     @{text interpret} and @{text expand}. The list determines the
@@ -2002,25 +2004,25 @@
     The option is only provided as attribute declaration, but not
     as parameter to the command.
 
-    \item[@{text timeout}] sets the time limit in seconds.
-
-    \item[@{text default_type}] sets the type(s) generally used to
+    \<^descr>[@{text timeout}] sets the time limit in seconds.
+
+    \<^descr>[@{text default_type}] sets the type(s) generally used to
     instantiate type variables.
 
-    \item[@{text report}] if set quickcheck reports how many tests
+    \<^descr>[@{text report}] if set quickcheck reports how many tests
     fulfilled the preconditions.
 
-    \item[@{text use_subtype}] if set quickcheck automatically lifts
+    \<^descr>[@{text use_subtype}] if set quickcheck automatically lifts
     conjectures to registered subtypes if possible, and tests the
     lifted conjecture.
 
-    \item[@{text quiet}] if set quickcheck does not output anything
+    \<^descr>[@{text quiet}] if set quickcheck does not output anything
     while testing.
 
-    \item[@{text verbose}] if set quickcheck informs about the current
+    \<^descr>[@{text verbose}] if set quickcheck informs about the current
     size and cardinality while testing.
 
-    \item[@{text expect}] can be used to check if the user's
+    \<^descr>[@{text expect}] can be used to check if the user's
     expectation was met (@{text no_expectation}, @{text
     no_counterexample}, or @{text counterexample}).
 
@@ -2030,8 +2032,10 @@
 
   Using the following type classes, the testers generate values and convert
   them back into Isabelle terms for displaying counterexamples.
+
     \begin{description}
-    \item[@{text exhaustive}] The parameters of the type classes @{class exhaustive}
+
+    \<^descr>[@{text exhaustive}] The parameters of the type classes @{class exhaustive}
       and @{class full_exhaustive} implement the testing. They take a
       testing function as a parameter, which takes a value of type @{typ "'a"}
       and optionally produces a counterexample, and a size parameter for the test values.
@@ -2043,13 +2047,13 @@
       testing function on all values up to the given size and stops as soon
       as a counterexample is found.
 
-    \item[@{text random}] The operation @{const Quickcheck_Random.random}
+    \<^descr>[@{text random}] The operation @{const Quickcheck_Random.random}
       of the type class @{class random} generates a pseudo-random
       value of the given size and a lazy term reconstruction of the value
       in the type @{typ Code_Evaluation.term}. A pseudo-randomness generator
       is defined in theory @{theory Random}.
 
-    \item[@{text narrowing}] implements Haskell's Lazy Smallcheck @{cite "runciman-naylor-lindblad"}
+    \<^descr>[@{text narrowing}] implements Haskell's Lazy Smallcheck @{cite "runciman-naylor-lindblad"}
       using the type classes @{class narrowing} and @{class partial_term_of}.
       Variables in the current goal are initially represented as symbolic variables.
       If the execution of the goal tries to evaluate one of them, the test engine
@@ -2086,22 +2090,22 @@
       but it does not ensures consistency with @{const narrowing}.
     \end{description}
 
-  \item @{command (HOL) "quickcheck_params"} changes @{command (HOL)
+  \<^descr> @{command (HOL) "quickcheck_params"} changes @{command (HOL)
   "quickcheck"} configuration options persistently.
 
-  \item @{command (HOL) "quickcheck_generator"} creates random and
+  \<^descr> @{command (HOL) "quickcheck_generator"} creates random and
   exhaustive value generators for a given type and operations.  It
   generates values by using the operations as if they were
   constructors of that type.
 
-  \item @{command (HOL) "nitpick"} tests the current goal for
+  \<^descr> @{command (HOL) "nitpick"} tests the current goal for
   counterexamples using a reduction to first-order relational
   logic. See the Nitpick manual @{cite "isabelle-nitpick"} for details.
 
-  \item @{command (HOL) "nitpick_params"} changes @{command (HOL)
+  \<^descr> @{command (HOL) "nitpick_params"} changes @{command (HOL)
   "nitpick"} configuration options persistently.
 
-  \item @{command (HOL) "find_unused_assms"} finds potentially superfluous
+  \<^descr> @{command (HOL) "find_unused_assms"} finds potentially superfluous
   assumptions in theorems using quickcheck.
   It takes the theory name to be checked for superfluous assumptions as
   optional argument. If not provided, it checks the current theory.
@@ -2140,18 +2144,18 @@
 
   \begin{description}
 
-  \item @{attribute (HOL) "coercion"}~@{text "f"} registers a new
+  \<^descr> @{attribute (HOL) "coercion"}~@{text "f"} registers a new
   coercion function @{text "f :: \<sigma>\<^sub>1 \<Rightarrow> \<sigma>\<^sub>2"} where @{text "\<sigma>\<^sub>1"} and
   @{text "\<sigma>\<^sub>2"} are type constructors without arguments.  Coercions are
   composed by the inference algorithm if needed.  Note that the type
   inference algorithm is complete only if the registered coercions
   form a lattice.
 
-  \item @{attribute (HOL) "coercion_delete"}~@{text "f"} deletes a
+  \<^descr> @{attribute (HOL) "coercion_delete"}~@{text "f"} deletes a
   preceding declaration (using @{attribute (HOL) "coercion"}) of the
   function @{text "f :: \<sigma>\<^sub>1 \<Rightarrow> \<sigma>\<^sub>2"} as a coercion.
 
-  \item @{attribute (HOL) "coercion_map"}~@{text "map"} registers a
+  \<^descr> @{attribute (HOL) "coercion_map"}~@{text "map"} registers a
   new map function to lift coercions through type constructors. The
   function @{text "map"} must conform to the following type pattern
 
@@ -2165,7 +2169,7 @@
   overwrites any existing map function for this particular type
   constructor.
 
-  \item @{attribute (HOL) "coercion_args"} can be used to disallow
+  \<^descr> @{attribute (HOL) "coercion_args"} can be used to disallow
   coercions to be inserted in certain positions in a term. For example,
   given the constant @{text "c :: \<sigma>\<^sub>1 \<Rightarrow> \<sigma>\<^sub>2 \<Rightarrow> \<sigma>\<^sub>3 \<Rightarrow> \<sigma>\<^sub>4"} and the list
   of policies @{text "- + 0"} as arguments, coercions will not be
@@ -2180,7 +2184,7 @@
   insertion of coercions (see, for example, the setup for the case syntax
   in @{theory Ctr_Sugar}). 
 
-  \item @{attribute (HOL) "coercion_enabled"} enables the coercion
+  \<^descr> @{attribute (HOL) "coercion_enabled"} enables the coercion
   inference algorithm.
 
   \end{description}
@@ -2198,14 +2202,14 @@
 
   \begin{description}
 
-  \item @{method (HOL) arith} decides linear arithmetic problems (on
+  \<^descr> @{method (HOL) arith} decides linear arithmetic problems (on
   types @{text nat}, @{text int}, @{text real}).  Any current facts
   are inserted into the goal before running the procedure.
 
-  \item @{attribute (HOL) arith} declares facts that are supplied to
+  \<^descr> @{attribute (HOL) arith} declares facts that are supplied to
   the arithmetic provers implicitly.
 
-  \item @{attribute (HOL) arith_split} attribute declares case split
+  \<^descr> @{attribute (HOL) arith_split} attribute declares case split
   rules to be expanded before @{method (HOL) arith} is invoked.
 
   \end{description}
@@ -2228,7 +2232,7 @@
 
   \begin{description}
 
-  \item @{method (HOL) iprover} performs intuitionistic proof search,
+  \<^descr> @{method (HOL) iprover} performs intuitionistic proof search,
   depending on specifically declared rules from the context, or given
   as explicit arguments.  Chained facts are inserted into the goal
   before commencing proof search.
@@ -2264,11 +2268,11 @@
 
   \begin{description}
 
-  \item @{method (HOL) meson} implements Loveland's model elimination
+  \<^descr> @{method (HOL) meson} implements Loveland's model elimination
   procedure @{cite "loveland-78"}.  See @{file
   "~~/src/HOL/ex/Meson_Test.thy"} for examples.
 
-  \item @{method (HOL) metis} combines ordered resolution and ordered
+  \<^descr> @{method (HOL) metis} combines ordered resolution and ordered
   paramodulation to find first-order (or mildly higher-order) proofs.
   The first optional argument specifies a type encoding; see the
   Sledgehammer manual @{cite "isabelle-sledgehammer"} for details.  The
@@ -2297,7 +2301,7 @@
 
   \begin{description}
 
-  \item @{method (HOL) algebra} performs algebraic reasoning via
+  \<^descr> @{method (HOL) algebra} performs algebraic reasoning via
   Gr\"obner bases, see also @{cite "Chaieb-Wenzel:2007"} and
   @{cite \<open>\S3.2\<close> "Chaieb-thesis"}. The method handles deals with two main
   classes of problems:
@@ -2334,7 +2338,7 @@
   This acts like declarations for the Simplifier
   (\secref{sec:simplifier}) on a private simpset for this tool.
 
-  \item @{attribute algebra} (as attribute) manages the default
+  \<^descr> @{attribute algebra} (as attribute) manages the default
   collection of pre-simplification rules of the above proof method.
 
   \end{description}
@@ -2378,7 +2382,7 @@
 
   \begin{description}
 
-  \item @{method (HOL) coherent} solves problems of \emph{Coherent
+  \<^descr> @{method (HOL) coherent} solves problems of \emph{Coherent
   Logic} @{cite "Bezem-Coquand:2005"}, which covers applications in
   confluence theory, lattice theory and projective geometry.  See
   @{file "~~/src/HOL/ex/Coherent.thy"} for some examples.
@@ -2415,7 +2419,7 @@
 
   \begin{description}
 
-  \item @{method (HOL) case_tac} and @{method (HOL) induct_tac} admit
+  \<^descr> @{method (HOL) case_tac} and @{method (HOL) induct_tac} admit
   to reason about inductive types.  Rules are selected according to
   the declarations by the @{attribute cases} and @{attribute induct}
   attributes, cf.\ \secref{sec:cases-induct}.  The @{command (HOL)
@@ -2429,7 +2433,7 @@
   statements, only the compact object-logic conclusion of the subgoal
   being addressed.
 
-  \item @{method (HOL) ind_cases} and @{command (HOL)
+  \<^descr> @{method (HOL) ind_cases} and @{command (HOL)
   "inductive_cases"} provide an interface to the internal @{ML_text
   mk_cases} operation.  Rules are simplified in an unrestricted
   forward manner.
@@ -2459,7 +2463,7 @@
 
   \begin{description}
 
-  \item @{attribute (HOL) split_format}\ @{text "(complete)"} causes
+  \<^descr> @{attribute (HOL) split_format}\ @{text "(complete)"} causes
   arguments in function applications to be represented canonically
   according to their tuple type structure.
 
@@ -2599,7 +2603,7 @@
 
   \begin{description}
 
-  \item @{command (HOL) "export_code"} generates code for a given list of
+  \<^descr> @{command (HOL) "export_code"} generates code for a given list of
   constants in the specified target language(s). If no serialization
   instruction is given, only abstract code is generated internally.
 
@@ -2627,7 +2631,7 @@
   "deriving (Read, Show)"}'' clause to each appropriate datatype
   declaration.
 
-  \item @{attribute (HOL) code} declare code equations for code generation.
+  \<^descr> @{attribute (HOL) code} declare code equations for code generation.
   Variant @{text "code equation"} declares a conventional equation as code
   equation. Variants @{text "code abstype"} and @{text "code abstract"}
   declare abstract datatype certificates or code equations on abstract
@@ -2645,49 +2649,49 @@
   Usually packages introducing code equations provide a reasonable default
   setup for selection.
 
-  \item @{command (HOL) "code_datatype"} specifies a constructor set for a
+  \<^descr> @{command (HOL) "code_datatype"} specifies a constructor set for a
   logical type.
 
-  \item @{command (HOL) "print_codesetup"} gives an overview on selected
+  \<^descr> @{command (HOL) "print_codesetup"} gives an overview on selected
   code equations and code generator datatypes.
 
-  \item @{attribute (HOL) code_unfold} declares (or with option ``@{text
+  \<^descr> @{attribute (HOL) code_unfold} declares (or with option ``@{text
   "del"}'' removes) theorems which during preprocessing are applied as
   rewrite rules to any code equation or evaluation input.
 
-  \item @{attribute (HOL) code_post} declares (or with option ``@{text
+  \<^descr> @{attribute (HOL) code_post} declares (or with option ``@{text
   "del"}'' removes) theorems which are applied as rewrite rules to any
   result of an evaluation.
 
-  \item @{attribute (HOL) code_abbrev} declares (or with option ``@{text
+  \<^descr> @{attribute (HOL) code_abbrev} declares (or with option ``@{text
   "del"}'' removes) equations which are applied as rewrite rules to any
   result of an evaluation and symmetrically during preprocessing to any code
   equation or evaluation input.
 
-  \item @{command (HOL) "print_codeproc"} prints the setup of the code
+  \<^descr> @{command (HOL) "print_codeproc"} prints the setup of the code
   generator preprocessor.
 
-  \item @{command (HOL) "code_thms"} prints a list of theorems representing
+  \<^descr> @{command (HOL) "code_thms"} prints a list of theorems representing
   the corresponding program containing all given constants after
   preprocessing.
 
-  \item @{command (HOL) "code_deps"} visualizes dependencies of theorems
+  \<^descr> @{command (HOL) "code_deps"} visualizes dependencies of theorems
   representing the corresponding program containing all given constants
   after preprocessing.
 
-  \item @{command (HOL) "code_reserved"} declares a list of names as
+  \<^descr> @{command (HOL) "code_reserved"} declares a list of names as
   reserved for a given target, preventing it to be shadowed by any generated
   code.
 
-  \item @{command (HOL) "code_printing"} associates a series of symbols
+  \<^descr> @{command (HOL) "code_printing"} associates a series of symbols
   (constants, type constructors, classes, class relations, instances, module
   names) with target-specific serializations; omitting a serialization
   deletes an existing serialization.
 
-  \item @{command (HOL) "code_monad"} provides an auxiliary mechanism to
+  \<^descr> @{command (HOL) "code_monad"} provides an auxiliary mechanism to
   generate monadic code for Haskell.
 
-  \item @{command (HOL) "code_identifier"} associates a a series of symbols
+  \<^descr> @{command (HOL) "code_identifier"} associates a a series of symbols
   (constants, type constructors, classes, class relations, instances, module
   names) with target-specific hints how these symbols shall be named. These
   hints gain precedence over names for symbols with no hints at all.
@@ -2696,7 +2700,7 @@
   identifiers in compound statements like type classes or datatypes are
   still the same.
 
-  \item @{command (HOL) "code_reflect"} without a ``@{text "file"}''
+  \<^descr> @{command (HOL) "code_reflect"} without a ``@{text "file"}''
   argument compiles code into the system runtime environment and modifies
   the code generator setup that future invocations of system runtime code
   generation referring to one of the ``@{text "datatypes"}'' or ``@{text
@@ -2704,7 +2708,7 @@
   "file"}'' argument, the corresponding code is generated into that
   specified file without modifying the code generator setup.
 
-  \item @{command (HOL) "code_pred"} creates code equations for a predicate
+  \<^descr> @{command (HOL) "code_pred"} creates code equations for a predicate
   given a set of introduction rules. Optional mode annotations determine
   which arguments are supposed to be input or output. If alternative
   introduction rules are declared, one must prove a corresponding
--- a/src/Doc/Isar_Ref/Inner_Syntax.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Isar_Ref/Inner_Syntax.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -64,41 +64,41 @@
 
   \begin{description}
 
-  \item @{command "typ"}~@{text \<tau>} reads and prints a type expression
+  \<^descr> @{command "typ"}~@{text \<tau>} reads and prints a type expression
   according to the current context.
 
-  \item @{command "typ"}~@{text "\<tau> :: s"} uses type-inference to
+  \<^descr> @{command "typ"}~@{text "\<tau> :: s"} uses type-inference to
   determine the most general way to make @{text "\<tau>"} conform to sort
   @{text "s"}.  For concrete @{text "\<tau>"} this checks if the type
   belongs to that sort.  Dummy type parameters ``@{text "_"}''
   (underscore) are assigned to fresh type variables with most general
   sorts, according the the principles of type-inference.
 
-  \item @{command "term"}~@{text t} and @{command "prop"}~@{text \<phi>}
+  \<^descr> @{command "term"}~@{text t} and @{command "prop"}~@{text \<phi>}
   read, type-check and print terms or propositions according to the
   current theory or proof context; the inferred type of @{text t} is
   output as well.  Note that these commands are also useful in
   inspecting the current environment of term abbreviations.
 
-  \item @{command "thm"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} retrieves
+  \<^descr> @{command "thm"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} retrieves
   theorems from the current theory or proof context.  Note that any
   attributes included in the theorem specifications are applied to a
   temporary context derived from the current theory or proof; the
   result is discarded, i.e.\ attributes involved in @{text "a\<^sub>1,
   \<dots>, a\<^sub>n"} do not have any permanent effect.
 
-  \item @{command "prf"} displays the (compact) proof term of the
+  \<^descr> @{command "prf"} displays the (compact) proof term of the
   current proof state (if present), or of the given theorems. Note
   that this requires proof terms to be switched on for the current
   object logic (see the ``Proof terms'' section of the Isabelle
   reference manual for information on how to do this).
 
-  \item @{command "full_prf"} is like @{command "prf"}, but displays
+  \<^descr> @{command "full_prf"} is like @{command "prf"}, but displays
   the full proof term, i.e.\ also displays information omitted in the
   compact proof term, which is denoted by ``@{text _}'' placeholders
   there.
 
-  \item @{command "print_state"} prints the current proof state (if
+  \<^descr> @{command "print_state"} prints the current proof state (if
   present), including current facts and goals.
 
   \end{description}
@@ -146,14 +146,14 @@
 
   \begin{description}
 
-  \item @{attribute show_markup} controls direct inlining of markup
+  \<^descr> @{attribute show_markup} controls direct inlining of markup
   into the printed representation of formal entities --- notably type
   and sort constraints.  This enables Prover IDE users to retrieve
   that information via tooltips or popups while hovering with the
   mouse over the output window, for example.  Consequently, this
   option is enabled by default for Isabelle/jEdit.
 
-  \item @{attribute show_types} and @{attribute show_sorts} control
+  \<^descr> @{attribute show_types} and @{attribute show_sorts} control
   printing of type constraints for term variables, and sort
   constraints for type variables.  By default, neither of these are
   shown in output.  If @{attribute show_sorts} is enabled, types are
@@ -165,29 +165,29 @@
   inference rule fails to resolve with some goal, or why a rewrite
   rule does not apply as expected.
 
-  \item @{attribute show_consts} controls printing of types of
+  \<^descr> @{attribute show_consts} controls printing of types of
   constants when displaying a goal state.
 
   Note that the output can be enormous, because polymorphic constants
   often occur at several different type instances.
 
-  \item @{attribute show_abbrevs} controls folding of constant
+  \<^descr> @{attribute show_abbrevs} controls folding of constant
   abbreviations.
 
-  \item @{attribute show_brackets} controls bracketing in pretty
+  \<^descr> @{attribute show_brackets} controls bracketing in pretty
   printed output.  If enabled, all sub-expressions of the pretty
   printing tree will be parenthesized, even if this produces malformed
   term syntax!  This crude way of showing the internal structure of
   pretty printed entities may occasionally help to diagnose problems
   with operator priorities, for example.
 
-  \item @{attribute names_long}, @{attribute names_short}, and
+  \<^descr> @{attribute names_long}, @{attribute names_short}, and
   @{attribute names_unique} control the way of printing fully
   qualified internal names in external form.  See also
   \secref{sec:antiq} for the document antiquotation options of the
   same names.
 
-  \item @{attribute eta_contract} controls @{text "\<eta>"}-contracted
+  \<^descr> @{attribute eta_contract} controls @{text "\<eta>"}-contracted
   printing of terms.
 
   The @{text \<eta>}-contraction law asserts @{prop "(\<lambda>x. f x) \<equiv> f"},
@@ -207,15 +207,15 @@
   rewriting operate modulo @{text "\<alpha>\<beta>\<eta>"}-conversion, some other tools
   might look at terms more discretely.
 
-  \item @{attribute goals_limit} controls the maximum number of
+  \<^descr> @{attribute goals_limit} controls the maximum number of
   subgoals to be printed.
 
-  \item @{attribute show_main_goal} controls whether the main result
+  \<^descr> @{attribute show_main_goal} controls whether the main result
   to be proven should be displayed.  This information might be
   relevant for schematic goals, to inspect the current claim that has
   been synthesized so far.
 
-  \item @{attribute show_hyps} controls printing of implicit
+  \<^descr> @{attribute show_hyps} controls printing of implicit
   hypotheses of local facts.  Normally, only those hypotheses are
   displayed that are \emph{not} covered by the assumptions of the
   current context: this situation indicates a fault in some tool being
@@ -225,7 +225,7 @@
   can be enforced, which is occasionally useful for diagnostic
   purposes.
 
-  \item @{attribute show_tags} controls printing of extra annotations
+  \<^descr> @{attribute show_tags} controls printing of extra annotations
   within theorems, such as internal position information, or the case
   names being attached by the attribute @{attribute case_names}.
 
@@ -233,7 +233,7 @@
   attributes provide low-level access to the collection of tags
   associated with a theorem.
 
-  \item @{attribute show_question_marks} controls printing of question
+  \<^descr> @{attribute show_question_marks} controls printing of question
   marks for schematic variables, such as @{text ?x}.  Only the leading
   question mark is affected, the remaining text is unchanged
   (including proper markup for schematic variables that might be
@@ -259,12 +259,12 @@
 
   \begin{description}
 
-  \item @{ML "print_mode_value ()"} yields the list of currently
+  \<^descr> @{ML "print_mode_value ()"} yields the list of currently
   active print mode names.  This should be understood as symbolic
   representation of certain individual features for printing (with
   precedence from left to right).
 
-  \item @{ML Print_Mode.with_modes}~@{text "modes f x"} evaluates
+  \<^descr> @{ML Print_Mode.with_modes}~@{text "modes f x"} evaluates
   @{text "f x"} in an execution context where the print mode is
   prepended by the given @{text "modes"}.  This provides a thread-safe
   way to augment print modes.  It is also monotonic in the set of mode
@@ -379,7 +379,7 @@
 
   \begin{description}
 
-  \item @{text "d"} is a delimiter, namely a non-empty sequence of
+  \<^descr> @{text "d"} is a delimiter, namely a non-empty sequence of
   characters other than the following special characters:
 
   \<^medskip>
@@ -393,7 +393,7 @@
   \end{tabular}
   \<^medskip>
 
-  \item @{verbatim "'"} escapes the special meaning of these
+  \<^descr> @{verbatim "'"} escapes the special meaning of these
   meta-characters, producing a literal version of the following
   character, unless that is a blank.
 
@@ -401,26 +401,26 @@
   affecting printing, but input tokens may have additional white space
   here.
 
-  \item @{verbatim "_"} is an argument position, which stands for a
+  \<^descr> @{verbatim "_"} is an argument position, which stands for a
   certain syntactic category in the underlying grammar.
 
-  \item @{text "\<index>"} is an indexed argument position; this is the place
+  \<^descr> @{text "\<index>"} is an indexed argument position; this is the place
   where implicit structure arguments can be attached.
 
-  \item @{text "s"} is a non-empty sequence of spaces for printing.
+  \<^descr> @{text "s"} is a non-empty sequence of spaces for printing.
   This and the following specifications do not affect parsing at all.
 
-  \item @{verbatim "("}@{text n} opens a pretty printing block.  The
+  \<^descr> @{verbatim "("}@{text n} opens a pretty printing block.  The
   optional number specifies how much indentation to add when a line
   break occurs within the block.  If the parenthesis is not followed
   by digits, the indentation defaults to 0.  A block specified via
   @{verbatim "(00"} is unbreakable.
 
-  \item @{verbatim ")"} closes a pretty printing block.
+  \<^descr> @{verbatim ")"} closes a pretty printing block.
 
-  \item @{verbatim "//"} forces a line break.
+  \<^descr> @{verbatim "//"} forces a line break.
 
-  \item @{verbatim "/"}@{text s} allows a line break.  Here @{text s}
+  \<^descr> @{verbatim "/"}@{text s} allows a line break.  Here @{text s}
   stands for the string of spaces (zero or more) right after the
   slash.  These spaces are printed if the break is \emph{not} taken.
 
@@ -534,23 +534,23 @@
 
   \begin{description}
 
-  \item @{command "type_notation"}~@{text "c (mx)"} associates mixfix
+  \<^descr> @{command "type_notation"}~@{text "c (mx)"} associates mixfix
   syntax with an existing type constructor.  The arity of the
   constructor is retrieved from the context.
 
-  \item @{command "no_type_notation"} is similar to @{command
+  \<^descr> @{command "no_type_notation"} is similar to @{command
   "type_notation"}, but removes the specified syntax annotation from
   the present context.
 
-  \item @{command "notation"}~@{text "c (mx)"} associates mixfix
+  \<^descr> @{command "notation"}~@{text "c (mx)"} associates mixfix
   syntax with an existing constant or fixed variable.  The type
   declaration of the given entity is retrieved from the context.
 
-  \item @{command "no_notation"} is similar to @{command "notation"},
+  \<^descr> @{command "no_notation"} is similar to @{command "notation"},
   but removes the specified syntax annotation from the present
   context.
 
-  \item @{command "write"} is similar to @{command "notation"}, but
+  \<^descr> @{command "write"} is similar to @{command "notation"}, but
   works within an Isar proof body.
 
   \end{description}
@@ -773,15 +773,15 @@
 
   \begin{description}
 
-  \item @{syntax_ref (inner) any} denotes any term.
+  \<^descr> @{syntax_ref (inner) any} denotes any term.
 
-  \item @{syntax_ref (inner) prop} denotes meta-level propositions,
+  \<^descr> @{syntax_ref (inner) prop} denotes meta-level propositions,
   which are terms of type @{typ prop}.  The syntax of such formulae of
   the meta-logic is carefully distinguished from usual conventions for
   object-logics.  In particular, plain @{text "\<lambda>"}-term notation is
   \emph{not} recognized as @{syntax (inner) prop}.
 
-  \item @{syntax_ref (inner) aprop} denotes atomic propositions, which
+  \<^descr> @{syntax_ref (inner) aprop} denotes atomic propositions, which
   are embedded into regular @{syntax (inner) prop} by means of an
   explicit @{verbatim PROP} token.
 
@@ -791,7 +791,7 @@
   the printed version will appear like @{syntax (inner) logic} and
   cannot be parsed again as @{syntax (inner) prop}.
 
-  \item @{syntax_ref (inner) logic} denotes arbitrary terms of a
+  \<^descr> @{syntax_ref (inner) logic} denotes arbitrary terms of a
   logical type, excluding type @{typ prop}.  This is the main
   syntactic category of object-logic entities, covering plain @{text
   \<lambda>}-term notation (variables, abstraction, application), plus
@@ -801,28 +801,28 @@
   (excluding @{typ prop}) are \emph{collapsed} to this single category
   of @{syntax (inner) logic}.
 
-  \item @{syntax_ref (inner) index} denotes an optional index term for
+  \<^descr> @{syntax_ref (inner) index} denotes an optional index term for
   indexed syntax.  If omitted, it refers to the first @{keyword_ref
   "structure"} variable in the context.  The special dummy ``@{text
   "\<index>"}'' serves as pattern variable in mixfix annotations that
   introduce indexed notation.
 
-  \item @{syntax_ref (inner) idt} denotes identifiers, possibly
+  \<^descr> @{syntax_ref (inner) idt} denotes identifiers, possibly
   constrained by types.
 
-  \item @{syntax_ref (inner) idts} denotes a sequence of @{syntax_ref
+  \<^descr> @{syntax_ref (inner) idts} denotes a sequence of @{syntax_ref
   (inner) idt}.  This is the most basic category for variables in
   iterated binders, such as @{text "\<lambda>"} or @{text "\<And>"}.
 
-  \item @{syntax_ref (inner) pttrn} and @{syntax_ref (inner) pttrns}
+  \<^descr> @{syntax_ref (inner) pttrn} and @{syntax_ref (inner) pttrns}
   denote patterns for abstraction, cases bindings etc.  In Pure, these
   categories start as a merely copy of @{syntax (inner) idt} and
   @{syntax (inner) idts}, respectively.  Object-logics may add
   additional productions for binding forms.
 
-  \item @{syntax_ref (inner) type} denotes types of the meta-logic.
+  \<^descr> @{syntax_ref (inner) type} denotes types of the meta-logic.
 
-  \item @{syntax_ref (inner) sort} denotes meta-level sorts.
+  \<^descr> @{syntax_ref (inner) sort} denotes meta-level sorts.
 
   \end{description}
 
@@ -851,20 +851,20 @@
 
   \begin{description}
 
-  \item A type ``@{text "_"}'' or ``@{text "_ :: sort"}'' acts like an
+  \<^descr> A type ``@{text "_"}'' or ``@{text "_ :: sort"}'' acts like an
   anonymous inference parameter, which is filled-in according to the
   most general type produced by the type-checking phase.
 
-  \item A bound ``@{text "_"}'' refers to a vacuous abstraction, where
+  \<^descr> A bound ``@{text "_"}'' refers to a vacuous abstraction, where
   the body does not refer to the binding introduced here.  As in the
   term @{term "\<lambda>x _. x"}, which is @{text "\<alpha>"}-equivalent to @{text
   "\<lambda>x y. x"}.
 
-  \item A free ``@{text "_"}'' refers to an implicit outer binding.
+  \<^descr> A free ``@{text "_"}'' refers to an implicit outer binding.
   Higher definitional packages usually allow forms like @{text "f x _
   = x"}.
 
-  \item A schematic ``@{text "_"}'' (within a term pattern, see
+  \<^descr> A schematic ``@{text "_"}'' (within a term pattern, see
   \secref{sec:term-decls}) refers to an anonymous variable that is
   implicitly abstracted over its context of locally bound variables.
   For example, this allows pattern matching of @{text "{x. f x = g
@@ -873,18 +873,18 @@
 
   \end{description}
 
-  \item The three literal dots ``@{verbatim "..."}'' may be also
+  \<^descr> The three literal dots ``@{verbatim "..."}'' may be also
   written as ellipsis symbol @{verbatim "\<dots>"}.  In both cases this
   refers to a special schematic variable, which is bound in the
   context.  This special term abbreviation works nicely with
   calculational reasoning (\secref{sec:calculation}).
 
-  \item @{verbatim CONST} ensures that the given identifier is treated
+  \<^descr> @{verbatim CONST} ensures that the given identifier is treated
   as constant term, and passed through the parse tree in fully
   internalized form.  This is particularly relevant for translation
   rules (\secref{sec:syn-trans}), notably on the RHS.
 
-  \item @{verbatim XCONST} is similar to @{verbatim CONST}, but
+  \<^descr> @{verbatim XCONST} is similar to @{verbatim CONST}, but
   retains the constant name as given.  This is only relevant to
   translation rules (\secref{sec:syn-trans}), notably on the LHS.
 
@@ -901,16 +901,16 @@
 
   \begin{description}
 
-  \item @{command "print_syntax"} prints the inner syntax of the
+  \<^descr> @{command "print_syntax"} prints the inner syntax of the
   current context.  The output can be quite large; the most important
   sections are explained below.
 
   \begin{description}
 
-  \item @{text "lexicon"} lists the delimiters of the inner token
+  \<^descr> @{text "lexicon"} lists the delimiters of the inner token
   language; see \secref{sec:inner-lex}.
 
-  \item @{text "prods"} lists the productions of the underlying
+  \<^descr> @{text "prods"} lists the productions of the underlying
   priority grammar; see \secref{sec:priority-grammar}.
 
   The nonterminal @{text "A\<^sup>(\<^sup>p\<^sup>)"} is rendered in plain text as @{text
@@ -931,18 +931,18 @@
   Priority information attached to chain productions is ignored; only
   the dummy value @{text "-1"} is displayed.
 
-  \item @{text "print modes"} lists the alternative print modes
+  \<^descr> @{text "print modes"} lists the alternative print modes
   provided by this grammar; see \secref{sec:print-modes}.
 
-  \item @{text "parse_rules"} and @{text "print_rules"} relate to
+  \<^descr> @{text "parse_rules"} and @{text "print_rules"} relate to
   syntax translations (macros); see \secref{sec:syn-trans}.
 
-  \item @{text "parse_ast_translation"} and @{text
+  \<^descr> @{text "parse_ast_translation"} and @{text
   "print_ast_translation"} list sets of constants that invoke
   translation functions for abstract syntax trees, which are only
   required in very special situations; see \secref{sec:tr-funs}.
 
-  \item @{text "parse_translation"} and @{text "print_translation"}
+  \<^descr> @{text "parse_translation"} and @{text "print_translation"}
   list the sets of constants that invoke regular translation
   functions; see \secref{sec:tr-funs}.
 
@@ -976,10 +976,10 @@
 
   \begin{description}
 
-  \item @{attribute syntax_ambiguity_warning} controls output of
+  \<^descr> @{attribute syntax_ambiguity_warning} controls output of
   explicit warning messages about syntax ambiguity.
 
-  \item @{attribute syntax_ambiguity_limit} determines the number of
+  \<^descr> @{attribute syntax_ambiguity_limit} determines the number of
   resulting parse trees that are shown as part of the printed message
   in case of an ambiguity.
 
@@ -1197,11 +1197,11 @@
 
   \begin{description}
 
-  \item @{command "nonterminal"}~@{text c} declares a type
+  \<^descr> @{command "nonterminal"}~@{text c} declares a type
   constructor @{text c} (without arguments) to act as purely syntactic
   type: a nonterminal symbol of the inner syntax.
 
-  \item @{command "syntax"}~@{text "(mode) c :: \<sigma> (mx)"} augments the
+  \<^descr> @{command "syntax"}~@{text "(mode) c :: \<sigma> (mx)"} augments the
   priority grammar and the pretty printer table for the given print
   mode (default @{verbatim \<open>""\<close>}). An optional keyword @{keyword_ref
   "output"} means that only the pretty printer table is affected.
@@ -1251,11 +1251,11 @@
   resulting parse tree @{text "t"} is copied directly, without any
   further decoration.
 
-  \item @{command "no_syntax"}~@{text "(mode) decls"} removes grammar
+  \<^descr> @{command "no_syntax"}~@{text "(mode) decls"} removes grammar
   declarations (and translations) resulting from @{text decls}, which
   are interpreted in the same manner as for @{command "syntax"} above.
 
-  \item @{command "translations"}~@{text rules} specifies syntactic
+  \<^descr> @{command "translations"}~@{text rules} specifies syntactic
   translation rules (i.e.\ macros) as first-order rewrite rules on
   ASTs (\secref{sec:ast}).  The theory context maintains two
   independent lists translation rules: parse rules (@{verbatim "=>"}
@@ -1302,11 +1302,11 @@
 
   \end{itemize}
 
-  \item @{command "no_translations"}~@{text rules} removes syntactic
+  \<^descr> @{command "no_translations"}~@{text rules} removes syntactic
   translation rules, which are interpreted in the same manner as for
   @{command "translations"} above.
 
-  \item @{attribute syntax_ast_trace} and @{attribute
+  \<^descr> @{attribute syntax_ast_trace} and @{attribute
   syntax_ast_stats} control diagnostic output in the AST normalization
   process, when translation rules are applied to concrete input or
   output.
@@ -1441,7 +1441,7 @@
 
   \begin{description}
 
-  \item @{command parse_translation} etc. declare syntax translation
+  \<^descr> @{command parse_translation} etc. declare syntax translation
   functions to the theory.  Any of these commands have a single
   @{syntax text} argument that refers to an ML expression of
   appropriate type as follows:
@@ -1473,14 +1473,14 @@
   associated with the translation functions of a theory under @{text
   "parse_ast_translation"} etc.
 
-  \item @{text "@{class_syntax c}"}, @{text "@{type_syntax c}"},
+  \<^descr> @{text "@{class_syntax c}"}, @{text "@{type_syntax c}"},
   @{text "@{const_syntax c}"} inline the authentic syntax name of the
   given formal entities into the ML source.  This is the
   fully-qualified logical name prefixed by a special marker to
   indicate its kind: thus different logical name spaces are properly
   distinguished within parse trees.
 
-  \item @{text "@{const_syntax c}"} inlines the name @{text "c"} of
+  \<^descr> @{text "@{const_syntax c}"} inlines the name @{text "c"} of
   the given syntax constant, having checked that it has been declared
   via some @{command syntax} commands within the theory context.  Note
   that the usual naming convention makes syntax constants start with
@@ -1517,12 +1517,12 @@
 
   \begin{description}
 
-  \item [Parse translations] are applied bottom-up.  The arguments are
+  \<^descr>[Parse translations] are applied bottom-up.  The arguments are
   already in translated form.  The translations must not fail;
   exceptions trigger an error message.  There may be at most one
   function associated with any syntactic name.
 
-  \item [Print translations] are applied top-down.  They are supplied
+  \<^descr>[Print translations] are applied top-down.  They are supplied
   with arguments that are partly still in internal form.  The result
   again undergoes translation; therefore a print translation should
   not introduce as head the very constant that invoked it.  The
--- a/src/Doc/Isar_Ref/Outer_Syntax.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Isar_Ref/Outer_Syntax.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -49,10 +49,10 @@
 
   \begin{description}
 
-  \item @{command "print_commands"} prints all outer syntax keywords
+  \<^descr> @{command "print_commands"} prints all outer syntax keywords
   and commands.
 
-  \item @{command "help"}~@{text "pats"} retrieves outer syntax
+  \<^descr> @{command "help"}~@{text "pats"} retrieves outer syntax
   commands according to the specified name patterns.
 
   \end{description}
@@ -505,35 +505,35 @@
 
   \begin{description}
 
-  \item @{command "print_theory"} prints the main logical content of the
+  \<^descr> @{command "print_theory"} prints the main logical content of the
   background theory; the ``@{text "!"}'' option indicates extra verbosity.
 
-  \item @{command "print_definitions"} prints dependencies of definitional
+  \<^descr> @{command "print_definitions"} prints dependencies of definitional
   specifications within the background theory, which may be constants
   \secref{sec:consts} or types (\secref{sec:types-pure},
   \secref{sec:hol-typedef}); the ``@{text "!"}'' option indicates extra
   verbosity.
 
-  \item @{command "print_methods"} prints all proof methods available in the
+  \<^descr> @{command "print_methods"} prints all proof methods available in the
   current theory context; the ``@{text "!"}'' option indicates extra
   verbosity.
 
-  \item @{command "print_attributes"} prints all attributes available in the
+  \<^descr> @{command "print_attributes"} prints all attributes available in the
   current theory context; the ``@{text "!"}'' option indicates extra
   verbosity.
 
-  \item @{command "print_theorems"} prints theorems of the background theory
+  \<^descr> @{command "print_theorems"} prints theorems of the background theory
   resulting from the last command; the ``@{text "!"}'' option indicates
   extra verbosity.
 
-  \item @{command "print_facts"} prints all local facts of the current
+  \<^descr> @{command "print_facts"} prints all local facts of the current
   context, both named and unnamed ones; the ``@{text "!"}'' option indicates
   extra verbosity.
 
-  \item @{command "print_term_bindings"} prints all term bindings that
+  \<^descr> @{command "print_term_bindings"} prints all term bindings that
   are present in the context.
 
-  \item @{command "find_theorems"}~@{text criteria} retrieves facts
+  \<^descr> @{command "find_theorems"}~@{text criteria} retrieves facts
   from the theory or proof context matching all of given search
   criteria.  The criterion @{text "name: p"} selects all theorems
   whose fully qualified name matches pattern @{text p}, which may
@@ -555,7 +555,7 @@
   default, duplicates are removed from the search result. Use
   @{text with_dups} to display duplicates.
 
-  \item @{command "find_consts"}~@{text criteria} prints all constants
+  \<^descr> @{command "find_consts"}~@{text criteria} prints all constants
   whose type meets all of the given criteria. The criterion @{text
   "strict: ty"} is met by any type that matches the type pattern
   @{text ty}.  Patterns may contain both the dummy type ``@{text _}''
@@ -564,11 +564,11 @@
   the prefix ``@{text "-"}'' function as described for @{command
   "find_theorems"}.
 
-  \item @{command "thm_deps"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}
+  \<^descr> @{command "thm_deps"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}
   visualizes dependencies of facts, using Isabelle's graph browser
   tool (see also @{cite "isabelle-system"}).
 
-  \item @{command "unused_thms"}~@{text "A\<^sub>1 \<dots> A\<^sub>m - B\<^sub>1 \<dots> B\<^sub>n"}
+  \<^descr> @{command "unused_thms"}~@{text "A\<^sub>1 \<dots> A\<^sub>m - B\<^sub>1 \<dots> B\<^sub>n"}
   displays all theorems that are proved in theories @{text "B\<^sub>1 \<dots> B\<^sub>n"}
   or their parents but not in @{text "A\<^sub>1 \<dots> A\<^sub>m"} or their parents and
   that are never used.
--- a/src/Doc/Isar_Ref/Proof.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Isar_Ref/Proof.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -13,16 +13,16 @@
 
   \begin{description}
 
-  \item @{text "proof(prove)"} means that a new goal has just been
+  \<^descr> @{text "proof(prove)"} means that a new goal has just been
   stated that is now to be \emph{proven}; the next command may refine
   it by some proof method, and enter a sub-proof to establish the
   actual result.
 
-  \item @{text "proof(state)"} is like a nested theory mode: the
+  \<^descr> @{text "proof(state)"} is like a nested theory mode: the
   context may be augmented by \emph{stating} additional assumptions,
   intermediate results etc.
 
-  \item @{text "proof(chain)"} is intermediate between @{text
+  \<^descr> @{text "proof(chain)"} is intermediate between @{text
   "proof(state)"} and @{text "proof(prove)"}: existing facts (i.e.\ the
   contents of the special @{fact_ref this} register) have been just picked
   up in order to be used when refining the goal claimed next.
@@ -60,7 +60,7 @@
 
   \begin{description}
 
-  \item @{command "notepad"}~@{keyword "begin"} opens a proof state without
+  \<^descr> @{command "notepad"}~@{keyword "begin"} opens a proof state without
   any goal statement. This allows to experiment with Isar, without producing
   any persistent result. The notepad is closed by @{command "end"}.
 
@@ -93,10 +93,10 @@
 
   \begin{description}
 
-  \item @{command "next"} switches to a fresh block within a
+  \<^descr> @{command "next"} switches to a fresh block within a
   sub-proof, resetting the local context to the initial one.
 
-  \item @{command "{"} and @{command "}"} explicitly open and close
+  \<^descr> @{command "{"} and @{command "}"} explicitly open and close
   blocks.  Any current facts pass through ``@{command "{"}''
   unchanged, while ``@{command "}"}'' causes any result to be
   \emph{exported} into the enclosing context.  Thus fixed variables
@@ -192,10 +192,10 @@
 
   \begin{description}
 
-  \item @{command "fix"}~@{text x} introduces a local variable @{text
+  \<^descr> @{command "fix"}~@{text x} introduces a local variable @{text
   x} that is \emph{arbitrary, but fixed.}
 
-  \item @{command "assume"}~@{text "a: \<phi>"} and @{command
+  \<^descr> @{command "assume"}~@{text "a: \<phi>"} and @{command
   "presume"}~@{text "a: \<phi>"} introduce a local fact @{text "\<phi> \<turnstile> \<phi>"} by
   assumption.  Subsequent results applied to an enclosing goal (e.g.\
   by @{command_ref "show"}) are handled as follows: @{command
@@ -206,7 +206,7 @@
   @{keyword_ref "and"}; the resulting list of current facts consists
   of all of these concatenated.
 
-  \item @{command "def"}~@{text "x \<equiv> t"} introduces a local
+  \<^descr> @{command "def"}~@{text "x \<equiv> t"} introduces a local
   (non-polymorphic) definition.  In results exported from the context,
   @{text x} is replaced by @{text t}.  Basically, ``@{command
   "def"}~@{text "x \<equiv> t"}'' abbreviates ``@{command "fix"}~@{text
@@ -264,11 +264,11 @@
 
   \begin{description}
 
-  \item @{command "let"}~@{text "p\<^sub>1 = t\<^sub>1 \<AND> \<dots> p\<^sub>n = t\<^sub>n"} binds any
+  \<^descr> @{command "let"}~@{text "p\<^sub>1 = t\<^sub>1 \<AND> \<dots> p\<^sub>n = t\<^sub>n"} binds any
   text variables in patterns @{text "p\<^sub>1, \<dots>, p\<^sub>n"} by simultaneous
   higher-order matching against terms @{text "t\<^sub>1, \<dots>, t\<^sub>n"}.
 
-  \item @{text "(\<IS> p\<^sub>1 \<dots> p\<^sub>n)"} resembles @{command "let"}, but
+  \<^descr> @{text "(\<IS> p\<^sub>1 \<dots> p\<^sub>n)"} resembles @{command "let"}, but
   matches @{text "p\<^sub>1, \<dots>, p\<^sub>n"} against the preceding statement.  Also
   note that @{keyword "is"} is not a separate command, but part of
   others (such as @{command "assume"}, @{command "have"} etc.).
@@ -320,12 +320,12 @@
 
   \begin{description}
 
-  \item @{command "note"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"} recalls existing facts
+  \<^descr> @{command "note"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"} recalls existing facts
   @{text "b\<^sub>1, \<dots>, b\<^sub>n"}, binding the result as @{text a}.  Note that
   attributes may be involved as well, both on the left and right hand
   sides.
 
-  \item @{command "then"} indicates forward chaining by the current
+  \<^descr> @{command "then"} indicates forward chaining by the current
   facts in order to establish the goal to be claimed next.  The
   initial proof method invoked to refine that will be offered the
   facts to do ``anything appropriate'' (see also
@@ -335,19 +335,19 @@
   facts into the goal state before operation.  This provides a simple
   scheme to control relevance of facts in automated proof search.
 
-  \item @{command "from"}~@{text b} abbreviates ``@{command
+  \<^descr> @{command "from"}~@{text b} abbreviates ``@{command
   "note"}~@{text b}~@{command "then"}''; thus @{command "then"} is
   equivalent to ``@{command "from"}~@{text this}''.
 
-  \item @{command "with"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} abbreviates ``@{command
+  \<^descr> @{command "with"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} abbreviates ``@{command
   "from"}~@{text "b\<^sub>1 \<dots> b\<^sub>n \<AND> this"}''; thus the forward chaining
   is from earlier facts together with the current ones.
 
-  \item @{command "using"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} augments the facts being
+  \<^descr> @{command "using"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} augments the facts being
   currently indicated for use by a subsequent refinement step (such as
   @{command_ref "apply"} or @{command_ref "proof"}).
 
-  \item @{command "unfolding"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} is structurally
+  \<^descr> @{command "unfolding"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} is structurally
   similar to @{command "using"}, but unfolds definitional equations
   @{text "b\<^sub>1, \<dots> b\<^sub>n"} throughout the goal state and facts.
 
@@ -445,7 +445,7 @@
 
   \begin{description}
 
-  \item @{command "lemma"}~@{text "a: \<phi>"} enters proof mode with
+  \<^descr> @{command "lemma"}~@{text "a: \<phi>"} enters proof mode with
   @{text \<phi>} as main goal, eventually resulting in some fact @{text "\<turnstile>
   \<phi>"} to be put back into the target context.  An additional @{syntax
   context} specification may build up an initial proof context for the
@@ -453,11 +453,11 @@
   well, see also @{syntax "includes"} in \secref{sec:bundle} and
   @{syntax context_elem} in \secref{sec:locale}.
 
-  \item @{command "theorem"}, @{command "corollary"}, and @{command
+  \<^descr> @{command "theorem"}, @{command "corollary"}, and @{command
   "proposition"} are the same as @{command "lemma"}. The different command
   names merely serve as a formal comment in the theory source.
 
-  \item @{command "schematic_goal"} is similar to @{command "theorem"},
+  \<^descr> @{command "schematic_goal"} is similar to @{command "theorem"},
   but allows the statement to contain unbound schematic variables.
 
   Under normal circumstances, an Isar proof text needs to specify
@@ -467,14 +467,14 @@
   proofs is lost, which also impacts performance, because proof
   checking is forced into sequential mode.
 
-  \item @{command "have"}~@{text "a: \<phi>"} claims a local goal,
+  \<^descr> @{command "have"}~@{text "a: \<phi>"} claims a local goal,
   eventually resulting in a fact within the current logical context.
   This operation is completely independent of any pending sub-goals of
   an enclosing goal statements, so @{command "have"} may be freely
   used for experimental exploration of potential results within a
   proof body.
 
-  \item @{command "show"}~@{text "a: \<phi>"} is like @{command
+  \<^descr> @{command "show"}~@{text "a: \<phi>"} is like @{command
   "have"}~@{text "a: \<phi>"} plus a second stage to refine some pending
   sub-goal for each one of the finished result, after having been
   exported into the corresponding context (at the head of the
@@ -487,16 +487,16 @@
   following message:
   @{verbatim [display] \<open>Local statement fails to refine any pending goal\<close>}
 
-  \item @{command "hence"} abbreviates ``@{command "then"}~@{command
+  \<^descr> @{command "hence"} abbreviates ``@{command "then"}~@{command
   "have"}'', i.e.\ claims a local goal to be proven by forward
   chaining the current facts.  Note that @{command "hence"} is also
   equivalent to ``@{command "from"}~@{text this}~@{command "have"}''.
 
-  \item @{command "thus"} abbreviates ``@{command "then"}~@{command
+  \<^descr> @{command "thus"} abbreviates ``@{command "then"}~@{command
   "show"}''.  Note that @{command "thus"} is also equivalent to
   ``@{command "from"}~@{text this}~@{command "show"}''.
 
-  \item @{command "print_statement"}~@{text a} prints facts from the
+  \<^descr> @{command "print_statement"}~@{text a} prints facts from the
   current theory or proof context in long statement form, according to
   the syntax for @{command "lemma"} given above.
 
@@ -576,7 +576,7 @@
 
   \begin{description}
 
-  \item @{command "also"}~@{text "(a\<^sub>1 \<dots> a\<^sub>n)"} maintains the auxiliary
+  \<^descr> @{command "also"}~@{text "(a\<^sub>1 \<dots> a\<^sub>n)"} maintains the auxiliary
   @{fact calculation} register as follows.  The first occurrence of
   @{command "also"} in some calculational thread initializes @{fact
   calculation} by @{fact this}. Any subsequent @{command "also"} on
@@ -586,7 +586,7 @@
   current context, unless alternative rules are given as explicit
   arguments.
 
-  \item @{command "finally"}~@{text "(a\<^sub>1 \<dots> a\<^sub>n)"} maintaining @{fact
+  \<^descr> @{command "finally"}~@{text "(a\<^sub>1 \<dots> a\<^sub>n)"} maintaining @{fact
   calculation} in the same way as @{command "also"}, and concludes the
   current calculational thread.  The final result is exhibited as fact
   for forward chaining towards the next goal. Basically, @{command
@@ -596,22 +596,22 @@
   "show"}~@{text ?thesis}~@{command "."}'' and ``@{command
   "finally"}~@{command "have"}~@{text \<phi>}~@{command "."}''.
 
-  \item @{command "moreover"} and @{command "ultimately"} are
+  \<^descr> @{command "moreover"} and @{command "ultimately"} are
   analogous to @{command "also"} and @{command "finally"}, but collect
   results only, without applying rules.
 
-  \item @{command "print_trans_rules"} prints the list of transitivity
+  \<^descr> @{command "print_trans_rules"} prints the list of transitivity
   rules (for calculational commands @{command "also"} and @{command
   "finally"}) and symmetry rules (for the @{attribute symmetric}
   operation and single step elimination patters) of the current
   context.
 
-  \item @{attribute trans} declares theorems as transitivity rules.
+  \<^descr> @{attribute trans} declares theorems as transitivity rules.
 
-  \item @{attribute sym} declares symmetry rules, as well as
+  \<^descr> @{attribute sym} declares symmetry rules, as well as
   @{attribute "Pure.elim"}@{text "?"} rules.
 
-  \item @{attribute symmetric} resolves a theorem with some rule
+  \<^descr> @{attribute symmetric} resolves a theorem with some rule
   declared as @{attribute sym} in the current context.  For example,
   ``@{command "assume"}~@{text "[symmetric]: x = y"}'' produces a
   swapped fact derived from that assumption.
@@ -751,11 +751,11 @@
 
   \begin{description}
 
-  \item @{command "proof"}~@{text "m\<^sub>1"} refines the goal by proof
+  \<^descr> @{command "proof"}~@{text "m\<^sub>1"} refines the goal by proof
   method @{text "m\<^sub>1"}; facts for forward chaining are passed if so
   indicated by @{text "proof(chain)"} mode.
 
-  \item @{command "qed"}~@{text "m\<^sub>2"} refines any remaining goals by
+  \<^descr> @{command "qed"}~@{text "m\<^sub>2"} refines any remaining goals by
   proof method @{text "m\<^sub>2"} and concludes the sub-proof by assumption.
   If the goal had been @{text "show"} (or @{text "thus"}), some
   pending sub-goal is solved as well by the rule resulting from the
@@ -768,7 +768,7 @@
   @{command "have"}, or weakening the local context by replacing
   occurrences of @{command "assume"} by @{command "presume"}.
 
-  \item @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} is a \emph{terminal
+  \<^descr> @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} is a \emph{terminal
   proof}\index{proof!terminal}; it abbreviates @{command
   "proof"}~@{text "m\<^sub>1"}~@{command "qed"}~@{text "m\<^sub>2"}, but with
   backtracking across both methods.  Debugging an unsuccessful
@@ -777,15 +777,15 @@
   @{text "apply"}~@{text "m\<^sub>1"}) is already sufficient to see the
   problem.
 
-  \item ``@{command ".."}'' is a \emph{standard
+  \<^descr> ``@{command ".."}'' is a \emph{standard
   proof}\index{proof!standard}; it abbreviates @{command "by"}~@{text
   "standard"}.
 
-  \item ``@{command "."}'' is a \emph{trivial
+  \<^descr> ``@{command "."}'' is a \emph{trivial
   proof}\index{proof!trivial}; it abbreviates @{command "by"}~@{text
   "this"}.
 
-  \item @{command "sorry"} is a \emph{fake proof}\index{proof!fake}
+  \<^descr> @{command "sorry"} is a \emph{fake proof}\index{proof!fake}
   pretending to solve the pending claim without further ado.  This
   only works in interactive development, or if the @{attribute
   quick_and_dirty} is enabled.  Facts emerging from fake
@@ -796,7 +796,7 @@
   The most important application of @{command "sorry"} is to support
   experimentation and top-down proof development.
 
-  \item @{method standard} refers to the default refinement step of some
+  \<^descr> @{method standard} refers to the default refinement step of some
   Isar language elements (notably @{command proof} and ``@{command ".."}'').
   It is \emph{dynamically scoped}, so the behaviour depends on the
   application environment.
@@ -862,7 +862,7 @@
 
   \begin{description}
 
-  \item @{command "print_rules"} prints rules declared via attributes
+  \<^descr> @{command "print_rules"} prints rules declared via attributes
   @{attribute (Pure) intro}, @{attribute (Pure) elim}, @{attribute
   (Pure) dest} of Isabelle/Pure.
 
@@ -870,7 +870,7 @@
   rule declarations of the classical reasoner
   (\secref{sec:classical}).
 
-  \item ``@{method "-"}'' (minus) inserts the forward chaining facts as
+  \<^descr> ``@{method "-"}'' (minus) inserts the forward chaining facts as
   premises into the goal, and nothing else.
 
   Note that command @{command_ref "proof"} without any method actually
@@ -878,7 +878,7 @@
   method; thus a plain \emph{do-nothing} proof step would be ``@{command
   "proof"}~@{text "-"}'' rather than @{command "proof"} alone.
 
-  \item @{method "goal_cases"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} turns the current subgoals
+  \<^descr> @{method "goal_cases"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} turns the current subgoals
   into cases within the context (see also \secref{sec:cases-induct}). The
   specified case names are used if present; otherwise cases are numbered
   starting from 1.
@@ -888,7 +888,7 @@
   premises, and @{command let} variable @{variable_ref ?case} refer to the
   conclusion.
 
-  \item @{method "fact"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} composes some fact from
+  \<^descr> @{method "fact"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} composes some fact from
   @{text "a\<^sub>1, \<dots>, a\<^sub>n"} (or implicitly from the current proof context)
   modulo unification of schematic type and term variables.  The rule
   structure is not taken into account, i.e.\ meta-level implication is
@@ -899,7 +899,7 @@
   @{text "\<turnstile> \<phi>"} is an instance of some known @{text "\<turnstile> \<phi>"} in the
   proof context.
 
-  \item @{method assumption} solves some goal by a single assumption
+  \<^descr> @{method assumption} solves some goal by a single assumption
   step.  All given facts are guaranteed to participate in the
   refinement; this means there may be only 0 or 1 in the first place.
   Recall that @{command "qed"} (\secref{sec:proof-steps}) already
@@ -907,11 +907,11 @@
   proofs usually need not quote the @{method assumption} method at
   all.
 
-  \item @{method this} applies all of the current facts directly as
+  \<^descr> @{method this} applies all of the current facts directly as
   rules.  Recall that ``@{command "."}'' (dot) abbreviates ``@{command
   "by"}~@{text this}''.
 
-  \item @{method (Pure) rule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} applies some rule given as
+  \<^descr> @{method (Pure) rule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} applies some rule given as
   argument in backward manner; facts are used to reduce the rule
   before applying it to the goal.  Thus @{method (Pure) rule} without facts
   is plain introduction, while with facts it becomes elimination.
@@ -923,7 +923,7 @@
   behaviour of @{command "proof"} and ``@{command ".."}'' (double-dot) steps
   (see \secref{sec:proof-steps}).
 
-  \item @{attribute (Pure) intro}, @{attribute (Pure) elim}, and
+  \<^descr> @{attribute (Pure) intro}, @{attribute (Pure) elim}, and
   @{attribute (Pure) dest} declare introduction, elimination, and
   destruct rules, to be used with method @{method (Pure) rule}, and similar
   tools.  Note that the latter will ignore rules declared with
@@ -934,10 +934,10 @@
   present versions of Isabelle/Pure, i.e.\ @{attribute (Pure)
   "Pure.intro"}.
 
-  \item @{attribute (Pure) rule}~@{text del} undeclares introduction,
+  \<^descr> @{attribute (Pure) rule}~@{text del} undeclares introduction,
   elimination, or destruct rules.
 
-  \item @{attribute OF}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} applies some theorem to all
+  \<^descr> @{attribute OF}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} applies some theorem to all
   of the given rules @{text "a\<^sub>1, \<dots>, a\<^sub>n"} in canonical right-to-left
   order, which means that premises stemming from the @{text "a\<^sub>i"}
   emerge in parallel in the result, without interfering with each
@@ -949,7 +949,7 @@
   (underscore), which refers to the propositional identity rule in the
   Pure theory.
 
-  \item @{attribute of}~@{text "t\<^sub>1 \<dots> t\<^sub>n"} performs positional
+  \<^descr> @{attribute of}~@{text "t\<^sub>1 \<dots> t\<^sub>n"} performs positional
   instantiation of term variables.  The terms @{text "t\<^sub>1, \<dots>, t\<^sub>n"} are
   substituted for any schematic variables occurring in a theorem from
   left to right; ``@{text _}'' (underscore) indicates to skip a
@@ -960,7 +960,7 @@
   be specified: the instantiated theorem is exported, and these
   variables become schematic (usually with some shifting of indices).
 
-  \item @{attribute "where"}~@{text "x\<^sub>1 = t\<^sub>1 \<AND> \<dots> x\<^sub>n = t\<^sub>n"}
+  \<^descr> @{attribute "where"}~@{text "x\<^sub>1 = t\<^sub>1 \<AND> \<dots> x\<^sub>n = t\<^sub>n"}
   performs named instantiation of schematic type and term variables
   occurring in a theorem.  Schematic variables have to be specified on
   the left-hand side (e.g.\ @{text "?x1.3"}).  The question mark may
@@ -988,7 +988,7 @@
 
   \begin{description}
 
-  \item @{command "method_setup"}~@{text "name = text description"}
+  \<^descr> @{command "method_setup"}~@{text "name = text description"}
   defines a proof method in the current context.  The given @{text
   "text"} has to be an ML expression of type
   @{ML_type "(Proof.context -> Proof.method) context_parser"}, cf.\
@@ -1096,7 +1096,7 @@
 
   \begin{description}
 
-  \item @{command "case"}~@{text "a: (c x\<^sub>1 \<dots> x\<^sub>m)"} invokes a named local
+  \<^descr> @{command "case"}~@{text "a: (c x\<^sub>1 \<dots> x\<^sub>m)"} invokes a named local
   context @{text "c: x\<^sub>1, \<dots>, x\<^sub>m, \<phi>\<^sub>1, \<dots>, \<phi>\<^sub>m"}, as provided by an
   appropriate proof method (such as @{method_ref cases} and @{method_ref
   induct}). The command ``@{command "case"}~@{text "a: (c x\<^sub>1 \<dots> x\<^sub>m)"}''
@@ -1109,17 +1109,17 @@
   re-use @{text c}. So @{command "case"}~@{text "(c x\<^sub>1 \<dots> x\<^sub>m)"} is the same
   as @{command "case"}~@{text "c: (c x\<^sub>1 \<dots> x\<^sub>m)"}.
 
-  \item @{command "print_cases"} prints all local contexts of the
+  \<^descr> @{command "print_cases"} prints all local contexts of the
   current state, using Isar proof language notation.
 
-  \item @{attribute case_names}~@{text "c\<^sub>1 \<dots> c\<^sub>k"} declares names for
+  \<^descr> @{attribute case_names}~@{text "c\<^sub>1 \<dots> c\<^sub>k"} declares names for
   the local contexts of premises of a theorem; @{text "c\<^sub>1, \<dots>, c\<^sub>k"}
   refers to the \emph{prefix} of the list of premises. Each of the
   cases @{text "c\<^sub>i"} can be of the form @{text "c[h\<^sub>1 \<dots> h\<^sub>n]"} where
   the @{text "h\<^sub>1 \<dots> h\<^sub>n"} are the names of the hypotheses in case @{text "c\<^sub>i"}
   from left to right.
 
-  \item @{attribute case_conclusion}~@{text "c d\<^sub>1 \<dots> d\<^sub>k"} declares
+  \<^descr> @{attribute case_conclusion}~@{text "c d\<^sub>1 \<dots> d\<^sub>k"} declares
   names for the conclusions of a named premise @{text c}; here @{text
   "d\<^sub>1, \<dots>, d\<^sub>k"} refers to the prefix of arguments of a logical formula
   built by nesting a binary connective (e.g.\ @{text "\<or>"}).
@@ -1129,7 +1129,7 @@
   whole.  The need to name subformulas only arises with cases that
   split into several sub-cases, as in common co-induction rules.
 
-  \item @{attribute params}~@{text "p\<^sub>1 \<dots> p\<^sub>m \<AND> \<dots> q\<^sub>1 \<dots> q\<^sub>n"} renames
+  \<^descr> @{attribute params}~@{text "p\<^sub>1 \<dots> p\<^sub>m \<AND> \<dots> q\<^sub>1 \<dots> q\<^sub>n"} renames
   the innermost parameters of premises @{text "1, \<dots>, n"} of some
   theorem.  An empty list of names may be given to skip positions,
   leaving the present parameters unchanged.
@@ -1137,7 +1137,7 @@
   Note that the default usage of case rules does \emph{not} directly
   expose parameters to the proof context.
 
-  \item @{attribute consumes}~@{text n} declares the number of ``major
+  \<^descr> @{attribute consumes}~@{text n} declares the number of ``major
   premises'' of a rule, i.e.\ the number of facts to be consumed when
   it is applied by an appropriate proof method.  The default value of
   @{attribute consumes} is @{text "n = 1"}, which is appropriate for
@@ -1216,7 +1216,7 @@
 
   \begin{description}
 
-  \item @{method cases}~@{text "insts R"} applies method @{method
+  \<^descr> @{method cases}~@{text "insts R"} applies method @{method
   rule} with an appropriate case distinction theorem, instantiated to
   the subjects @{text insts}.  Symbolic case names are bound according
   to the rule's local contexts.
@@ -1243,7 +1243,7 @@
   "(no_simp)"} option can be used to disable pre-simplification of
   cases (see the description of @{method induct} below for details).
 
-  \item @{method induct}~@{text "insts R"} and
+  \<^descr> @{method induct}~@{text "insts R"} and
   @{method induction}~@{text "insts R"} are analogous to the
   @{method cases} method, but refer to induction rules, which are
   determined as follows:
@@ -1300,7 +1300,7 @@
   pending variables in the rule.  Such schematic induction rules
   rarely occur in practice, though.
 
-  \item @{method coinduct}~@{text "inst R"} is analogous to the
+  \<^descr> @{method coinduct}~@{text "inst R"} is analogous to the
   @{method induct} method, but refers to coinduction rules, which are
   determined as follows:
 
@@ -1406,10 +1406,10 @@
 
   \begin{description}
 
-  \item @{command "print_induct_rules"} prints cases and induct rules
+  \<^descr> @{command "print_induct_rules"} prints cases and induct rules
   for predicates (or sets) and types of the current context.
 
-  \item @{attribute cases}, @{attribute induct}, and @{attribute
+  \<^descr> @{attribute cases}, @{attribute induct}, and @{attribute
   coinduct} (as attributes) declare rules for reasoning about
   (co)inductive predicates (or sets) and types, using the
   corresponding methods of the same name.  Certain definitional
@@ -1472,7 +1472,7 @@
 
   \begin{description}
 
-  \item @{command consider}~@{text "(a) \<^vec>x \<WHERE> \<^vec>A \<^vec>x
+  \<^descr> @{command consider}~@{text "(a) \<^vec>x \<WHERE> \<^vec>A \<^vec>x
   | (b) \<^vec>y \<WHERE> \<^vec>B \<^vec>y | \<dots> "} states a rule for case
   splitting into separate subgoals, such that each case involves new
   parameters and premises. After the proof is finished, the resulting rule
@@ -1502,7 +1502,7 @@
   statements, as well as @{command print_statement} to print existing rules
   in a similar format.
 
-  \item @{command obtain}~@{text "\<^vec>x \<WHERE> \<^vec>A \<^vec>x"}
+  \<^descr> @{command obtain}~@{text "\<^vec>x \<WHERE> \<^vec>A \<^vec>x"}
   states a generalized elimination rule with exactly one case. After the
   proof is finished, it is activated for the subsequent proof text: the
   context is augmented via @{command fix}~@{text "\<^vec>x"} @{command
@@ -1529,7 +1529,7 @@
     \quad @{command "fix"}~@{text "\<^vec>x"}~@{command "assume"}@{text "\<^sup>* a: \<^vec>A \<^vec>x"} \\
   \end{matharray}
 
-  \item @{command guess} is similar to @{command obtain}, but it derives the
+  \<^descr> @{command guess} is similar to @{command obtain}, but it derives the
   obtained context elements from the course of tactical reasoning in the
   proof. Thus it can considerably obscure the proof: it is classified as
   \emph{improper}.
--- a/src/Doc/Isar_Ref/Proof_Script.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Isar_Ref/Proof_Script.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -43,11 +43,11 @@
 
   \begin{description}
 
-  \item @{command "supply"} supports fact definitions during goal
+  \<^descr> @{command "supply"} supports fact definitions during goal
   refinement: it is similar to @{command "note"}, but it operates in
   backwards mode and does not have any impact on chained facts.
 
-  \item @{command "apply"}~@{text m} applies proof method @{text m} in
+  \<^descr> @{command "apply"}~@{text m} applies proof method @{text m} in
   initial position, but unlike @{command "proof"} it retains ``@{text
   "proof(prove)"}'' mode.  Thus consecutive method applications may be
   given just as in tactic scripts.
@@ -57,7 +57,7 @@
   further @{command "apply"} command would always work in a purely
   backward manner.
 
-  \item @{command "apply_end"}~@{text "m"} applies proof method @{text
+  \<^descr> @{command "apply_end"}~@{text "m"} applies proof method @{text
   m} as if in terminal position.  Basically, this simulates a
   multi-step tactic script for @{command "qed"}, but may be given
   anywhere within the proof body.
@@ -67,18 +67,18 @@
   "qed"}).  Thus the proof method may not refer to any assumptions
   introduced in the current body, for example.
 
-  \item @{command "done"} completes a proof script, provided that the
+  \<^descr> @{command "done"} completes a proof script, provided that the
   current goal state is solved completely.  Note that actual
   structured proof commands (e.g.\ ``@{command "."}'' or @{command
   "sorry"}) may be used to conclude proof scripts as well.
 
-  \item @{command "defer"}~@{text n} and @{command "prefer"}~@{text n}
+  \<^descr> @{command "defer"}~@{text n} and @{command "prefer"}~@{text n}
   shuffle the list of pending goals: @{command "defer"} puts off
   sub-goal @{text n} to the end of the list (@{text "n = 1"} by
   default), while @{command "prefer"} brings sub-goal @{text n} to the
   front.
 
-  \item @{command "back"} does back-tracking over the result sequence
+  \<^descr> @{command "back"} does back-tracking over the result sequence
   of the latest proof command.  Any proof command may return multiple
   results, and this command explores the possibilities step-by-step.
   It is mainly useful for experimentation and interactive exploration,
@@ -105,7 +105,7 @@
 
   \begin{description}
 
-  \item @{command "subgoal"} allows to impose some structure on backward
+  \<^descr> @{command "subgoal"} allows to impose some structure on backward
   refinements, to avoid proof scripts degenerating into long of @{command
   apply} sequences.
 
@@ -247,7 +247,7 @@
 
 \begin{description}
 
-  \item @{method rule_tac} etc. do resolution of rules with explicit
+  \<^descr> @{method rule_tac} etc. do resolution of rules with explicit
   instantiation.  This works the same way as the ML tactics @{ML
   Rule_Insts.res_inst_tac} etc.\ (see @{cite "isabelle-implementation"}).
 
@@ -255,40 +255,40 @@
   @{method rule_tac} is the same as @{ML resolve_tac} in ML (see
   @{cite "isabelle-implementation"}).
 
-  \item @{method cut_tac} inserts facts into the proof state as
+  \<^descr> @{method cut_tac} inserts facts into the proof state as
   assumption of a subgoal; instantiations may be given as well.  Note
   that the scope of schematic variables is spread over the main goal
   statement and rule premises are turned into new subgoals.  This is
   in contrast to the regular method @{method insert} which inserts
   closed rule statements.
 
-  \item @{method thin_tac}~@{text \<phi>} deletes the specified premise
+  \<^descr> @{method thin_tac}~@{text \<phi>} deletes the specified premise
   from a subgoal.  Note that @{text \<phi>} may contain schematic
   variables, to abbreviate the intended proposition; the first
   matching subgoal premise will be deleted.  Removing useless premises
   from a subgoal increases its readability and can make search tactics
   run faster.
 
-  \item @{method subgoal_tac}~@{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} adds the propositions
+  \<^descr> @{method subgoal_tac}~@{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} adds the propositions
   @{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} as local premises to a subgoal, and poses the same
   as new subgoals (in the original context).
 
-  \item @{method rename_tac}~@{text "x\<^sub>1 \<dots> x\<^sub>n"} renames parameters of a
+  \<^descr> @{method rename_tac}~@{text "x\<^sub>1 \<dots> x\<^sub>n"} renames parameters of a
   goal according to the list @{text "x\<^sub>1, \<dots>, x\<^sub>n"}, which refers to the
   \emph{suffix} of variables.
 
-  \item @{method rotate_tac}~@{text n} rotates the premises of a
+  \<^descr> @{method rotate_tac}~@{text n} rotates the premises of a
   subgoal by @{text n} positions: from right to left if @{text n} is
   positive, and from left to right if @{text n} is negative; the
   default value is 1.
 
-  \item @{method tactic}~@{text "text"} produces a proof method from
+  \<^descr> @{method tactic}~@{text "text"} produces a proof method from
   any ML text of type @{ML_type tactic}.  Apart from the usual ML
   environment and the current proof context, the ML code may refer to
   the locally bound values @{ML_text facts}, which indicates any
   current facts used for forward-chaining.
 
-  \item @{method raw_tactic} is similar to @{method tactic}, but
+  \<^descr> @{method raw_tactic} is similar to @{method tactic}, but
   presents the goal state in its raw internal form, where simultaneous
   subgoals appear as conjunction of the logical framework instead of
   the usual split into several subgoals.  While feature this is useful
--- a/src/Doc/Isar_Ref/Spec.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/Isar_Ref/Spec.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -74,7 +74,7 @@
 
   \begin{description}
 
-  \item @{command "theory"}~@{text "A \<IMPORTS> B\<^sub>1 \<dots> B\<^sub>n \<BEGIN>"}
+  \<^descr> @{command "theory"}~@{text "A \<IMPORTS> B\<^sub>1 \<dots> B\<^sub>n \<BEGIN>"}
   starts a new theory @{text A} based on the merge of existing
   theories @{text "B\<^sub>1 \<dots> B\<^sub>n"}.  Due to the possibility to import more
   than one ancestor, the resulting theory structure of an Isabelle
@@ -104,13 +104,13 @@
   It is possible to specify an alternative completion via @{verbatim
   "=="}~@{text "text"}, while the default is the corresponding keyword name.
   
-  \item @{command (global) "end"} concludes the current theory
+  \<^descr> @{command (global) "end"} concludes the current theory
   definition.  Note that some other commands, e.g.\ local theory
   targets @{command locale} or @{command class} may involve a
   @{keyword "begin"} that needs to be matched by @{command (local)
   "end"}, according to the usual rules for nested blocks.
 
-  \item @{command thy_deps} visualizes the theory hierarchy as a directed
+  \<^descr> @{command thy_deps} visualizes the theory hierarchy as a directed
   acyclic graph. By default, all imported theories are shown, taking the
   base session as a starting point. Alternatively, it is possibly to
   restrict the full theory graph by giving bounds, analogously to
@@ -155,13 +155,13 @@
 
   \begin{description}
   
-  \item @{command "context"}~@{text "c \<BEGIN>"} opens a named
+  \<^descr> @{command "context"}~@{text "c \<BEGIN>"} opens a named
   context, by recommencing an existing locale or class @{text c}.
   Note that locale and class definitions allow to include the
   @{keyword "begin"} keyword as well, in order to continue the local
   theory immediately after the initial specification.
 
-  \item @{command "context"}~@{text "bundles elements \<BEGIN>"} opens
+  \<^descr> @{command "context"}~@{text "bundles elements \<BEGIN>"} opens
   an unnamed context, by extending the enclosing global or local
   theory target by the given declaration bundles (\secref{sec:bundle})
   and context elements (@{text "\<FIXES>"}, @{text "\<ASSUMES>"}
@@ -169,12 +169,12 @@
   in the extended context will be exported into the enclosing target
   by lifting over extra parameters and premises.
   
-  \item @{command (local) "end"} concludes the current local theory,
+  \<^descr> @{command (local) "end"} concludes the current local theory,
   according to the nesting of contexts.  Note that a global @{command
   (global) "end"} has a different meaning: it concludes the theory
   itself (\secref{sec:begin-thy}).
   
-  \item @{keyword "private"} or @{keyword "qualified"} may be given as
+  \<^descr> @{keyword "private"} or @{keyword "qualified"} may be given as
   modifiers before any local theory command. This restricts name space
   accesses to the local scope, as determined by the enclosing @{command
   "context"}~@{keyword "begin"}~\dots~@{command "end"} block. Outside its
@@ -185,7 +185,7 @@
   a local scope by itself: an extra unnamed context is required to use
   @{keyword "private"} or @{keyword "qualified"} here.
 
-  \item @{text "("}@{keyword_def "in"}~@{text "c)"} given after any local
+  \<^descr> @{text "("}@{keyword_def "in"}~@{text "c)"} given after any local
   theory command specifies an immediate target, e.g.\ ``@{command
   "definition"}~@{text "(\<IN> c)"}'' or ``@{command "theorem"}~@{text
   "(\<IN> c)"}''. This works both in a local or global theory context; the
@@ -258,28 +258,28 @@
 
   \begin{description}
 
-  \item @{command bundle}~@{text "b = decls"} defines a bundle of
+  \<^descr> @{command bundle}~@{text "b = decls"} defines a bundle of
   declarations in the current context.  The RHS is similar to the one
   of the @{command declare} command.  Bundles defined in local theory
   targets are subject to transformations via morphisms, when moved
   into different application contexts; this works analogously to any
   other local theory specification.
 
-  \item @{command print_bundles} prints the named bundles that are available
+  \<^descr> @{command print_bundles} prints the named bundles that are available
   in the current context; the ``@{text "!"}'' option indicates extra
   verbosity.
 
-  \item @{command include}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} includes the declarations
+  \<^descr> @{command include}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} includes the declarations
   from the given bundles into the current proof body context.  This is
   analogous to @{command "note"} (\secref{sec:proof-facts}) with the
   expanded bundles.
 
-  \item @{command including} is similar to @{command include}, but
+  \<^descr> @{command including} is similar to @{command include}, but
   works in proof refinement (backward mode).  This is analogous to
   @{command "using"} (\secref{sec:proof-facts}) with the expanded
   bundles.
 
-  \item @{keyword "includes"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} is similar to
+  \<^descr> @{keyword "includes"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} is similar to
   @{command include}, but works in situations where a specification
   context is constructed, notably for @{command context} and long
   statements of @{command theorem} etc.
@@ -326,7 +326,7 @@
 
   \begin{description}
   
-  \item @{command "definition"}~@{text "c \<WHERE> eq"} produces an
+  \<^descr> @{command "definition"}~@{text "c \<WHERE> eq"} produces an
   internal definition @{text "c \<equiv> t"} according to the specification
   given as @{text eq}, which is then turned into a proven fact.  The
   given proposition may deviate from internal meta-level equality
@@ -340,10 +340,10 @@
   @{text "f \<equiv> \<lambda>x y. t"} and @{text "y \<noteq> 0 \<Longrightarrow> g x y = u"} instead of an
   unrestricted @{text "g \<equiv> \<lambda>x y. u"}.
 
-  \item @{command "print_defn_rules"} prints the definitional rewrite rules
+  \<^descr> @{command "print_defn_rules"} prints the definitional rewrite rules
   declared via @{attribute defn} in the current context.
 
-  \item @{command "abbreviation"}~@{text "c \<WHERE> eq"} introduces a
+  \<^descr> @{command "abbreviation"}~@{text "c \<WHERE> eq"} introduces a
   syntactic constant which is associated with a certain term according
   to the meta-level equality @{text eq}.
   
@@ -360,7 +360,7 @@
   declared for abbreviations, cf.\ @{command "syntax"} in
   \secref{sec:syn-trans}.
   
-  \item @{command "print_abbrevs"} prints all constant abbreviations of the
+  \<^descr> @{command "print_abbrevs"} prints all constant abbreviations of the
   current context; the ``@{text "!"}'' option indicates extra verbosity.
   
   \end{description}
@@ -382,7 +382,7 @@
 
   \begin{description}
 
-  \item @{command "axiomatization"}~@{text "c\<^sub>1 \<dots> c\<^sub>m \<WHERE> \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}
+  \<^descr> @{command "axiomatization"}~@{text "c\<^sub>1 \<dots> c\<^sub>m \<WHERE> \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}
   introduces several constants simultaneously and states axiomatic
   properties for these. The constants are marked as being specified once and
   for all, which prevents additional specifications for the same constants
@@ -432,7 +432,7 @@
 
   \begin{description}
 
-  \item @{command "declaration"}~@{text d} adds the declaration
+  \<^descr> @{command "declaration"}~@{text d} adds the declaration
   function @{text d} of ML type @{ML_type declaration}, to the current
   local theory under construction.  In later application contexts, the
   function is transformed according to the morphisms being involved in
@@ -442,11 +442,11 @@
   declaration is applied to all possible contexts involved, including
   the global background theory.
 
-  \item @{command "syntax_declaration"} is similar to @{command
+  \<^descr> @{command "syntax_declaration"} is similar to @{command
   "declaration"}, but is meant to affect only ``syntactic'' tools by
   convention (such as notation and type-checking information).
 
-  \item @{command "declare"}~@{text thms} declares theorems to the
+  \<^descr> @{command "declare"}~@{text thms} declares theorems to the
   current local theory context.  No theorem binding is involved here,
   unlike @{command "lemmas"} (cf.\ \secref{sec:theorems}), so
   @{command "declare"} only has the effect of applying attributes as
@@ -565,7 +565,7 @@
 
   \begin{description}
   
-  \item @{command "locale"}~@{text "loc = import + body"} defines a
+  \<^descr> @{command "locale"}~@{text "loc = import + body"} defines a
   new locale @{text loc} as a context consisting of a certain view of
   existing locales (@{text import}) plus some additional elements
   (@{text body}).  Both @{text import} and @{text body} are optional;
@@ -590,29 +590,29 @@
 
   \begin{description}
 
-  \item @{element "fixes"}~@{text "x :: \<tau> (mx)"} declares a local
+  \<^descr> @{element "fixes"}~@{text "x :: \<tau> (mx)"} declares a local
   parameter of type @{text \<tau>} and mixfix annotation @{text mx} (both
   are optional).  The special syntax declaration ``@{text
   "("}@{keyword_ref "structure"}@{text ")"}'' means that @{text x} may
   be referenced implicitly in this context.
 
-  \item @{element "constrains"}~@{text "x :: \<tau>"} introduces a type
+  \<^descr> @{element "constrains"}~@{text "x :: \<tau>"} introduces a type
   constraint @{text \<tau>} on the local parameter @{text x}.  This
   element is deprecated.  The type constraint should be introduced in
   the @{keyword "for"} clause or the relevant @{element "fixes"} element.
 
-  \item @{element "assumes"}~@{text "a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}
+  \<^descr> @{element "assumes"}~@{text "a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}
   introduces local premises, similar to @{command "assume"} within a
   proof (cf.\ \secref{sec:proof-context}).
 
-  \item @{element "defines"}~@{text "a: x \<equiv> t"} defines a previously
+  \<^descr> @{element "defines"}~@{text "a: x \<equiv> t"} defines a previously
   declared parameter.  This is similar to @{command "def"} within a
   proof (cf.\ \secref{sec:proof-context}), but @{element "defines"}
   takes an equational proposition instead of variable-term pair.  The
   left-hand side of the equation may have additional arguments, e.g.\
   ``@{element "defines"}~@{text "f x\<^sub>1 \<dots> x\<^sub>n \<equiv> t"}''.
 
-  \item @{element "notes"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"}
+  \<^descr> @{element "notes"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"}
   reconsiders facts within a local context.  Most notably, this may
   include arbitrary declarations in any attribute specifications
   included here, e.g.\ a local @{attribute simp} rule.
@@ -649,25 +649,25 @@
   \secref{sec:object-logic}).  Separate introduction rules @{text
   loc_axioms.intro} and @{text loc.intro} are provided as well.
 
-  \item @{command experiment}~@{text exprs}~@{keyword "begin"} opens an
+  \<^descr> @{command experiment}~@{text exprs}~@{keyword "begin"} opens an
   anonymous locale context with private naming policy. Specifications in its
   body are inaccessible from outside. This is useful to perform experiments,
   without polluting the name space.
 
-  \item @{command "print_locale"}~@{text "locale"} prints the
+  \<^descr> @{command "print_locale"}~@{text "locale"} prints the
   contents of the named locale.  The command omits @{element "notes"}
   elements by default.  Use @{command "print_locale"}@{text "!"} to
   have them included.
 
-  \item @{command "print_locales"} prints the names of all locales of the
+  \<^descr> @{command "print_locales"} prints the names of all locales of the
   current theory; the ``@{text "!"}'' option indicates extra verbosity.
 
-  \item @{command "locale_deps"} visualizes all locales and their
+  \<^descr> @{command "locale_deps"} visualizes all locales and their
   relations as a Hasse diagram. This includes locales defined as type
   classes (\secref{sec:class}).  See also @{command
   "print_dependencies"} below.
 
-  \item @{method intro_locales} and @{method unfold_locales}
+  \<^descr> @{method intro_locales} and @{method unfold_locales}
   repeatedly expand all introduction rules of locale predicates of the
   theory.  While @{method intro_locales} only applies the @{text
   loc.intro} introduction rules and therefore does not descend to
@@ -718,7 +718,7 @@
 
   \begin{description}
 
-  \item @{command "interpretation"}~@{text "expr \<WHERE> eqns"}
+  \<^descr> @{command "interpretation"}~@{text "expr \<WHERE> eqns"}
   interprets @{text expr} in a global or local theory.  The command
   generates proof obligations for the instantiated specifications.
   Once these are discharged by the user, instantiated declarations (in
@@ -759,13 +759,13 @@
   concepts introduced through definitions.  The equations must be
   proved.
 
-  \item @{command "interpret"}~@{text "expr \<WHERE> eqns"} interprets
+  \<^descr> @{command "interpret"}~@{text "expr \<WHERE> eqns"} interprets
   @{text expr} in the proof context and is otherwise similar to
   interpretation in local theories.  Note that for @{command
   "interpret"} the @{text eqns} should be
   explicitly universally quantified.
 
-  \item @{command "sublocale"}~@{text "name \<subseteq> expr \<WHERE>
+  \<^descr> @{command "sublocale"}~@{text "name \<subseteq> expr \<WHERE>
   eqns"}
   interprets @{text expr} in the locale @{text name}.  A proof that
   the specification of @{text name} implies the specification of
@@ -798,7 +798,7 @@
   be used, but the locale argument must be omitted.  The command then
   refers to the locale (or class) target of the context block.
 
-  \item @{command "print_dependencies"}~@{text "expr"} is useful for
+  \<^descr> @{command "print_dependencies"}~@{text "expr"} is useful for
   understanding the effect of an interpretation of @{text "expr"} in
   the current context.  It lists all locale instances for which
   interpretations would be added to the current context.  Variant
@@ -808,7 +808,7 @@
   latter is useful for understanding the dependencies of a locale
   expression.
 
-  \item @{command "print_interps"}~@{text "locale"} lists all
+  \<^descr> @{command "print_interps"}~@{text "locale"} lists all
   interpretations of @{text "locale"} in the current theory or proof
   context, including those due to a combination of an @{command
   "interpretation"} or @{command "interpret"} and one or several
@@ -874,7 +874,7 @@
 
   \begin{description}
 
-  \item @{command "permanent_interpretation"}~@{text "expr \<DEFINING> defs \<WHERE> eqns"}
+  \<^descr> @{command "permanent_interpretation"}~@{text "expr \<DEFINING> defs \<WHERE> eqns"}
   interprets @{text expr} in the current local theory.  The command
   generates proof obligations for the instantiated specifications.
   Instantiated declarations (in particular, facts) are added to the
@@ -972,7 +972,7 @@
 
   \begin{description}
 
-  \item @{command "class"}~@{text "c = superclasses + body"} defines
+  \<^descr> @{command "class"}~@{text "c = superclasses + body"} defines
   a new class @{text c}, inheriting from @{text superclasses}.  This
   introduces a locale @{text c} with import of all locales @{text
   superclasses}.
@@ -990,7 +990,7 @@
   --- the @{method intro_classes} method takes care of the details of
   class membership proofs.
 
-  \item @{command "instantiation"}~@{text "t :: (s\<^sub>1, \<dots>, s\<^sub>n)s
+  \<^descr> @{command "instantiation"}~@{text "t :: (s\<^sub>1, \<dots>, s\<^sub>n)s
   \<BEGIN>"} opens a target (cf.\ \secref{sec:target}) which
   allows to specify class operations @{text "f\<^sub>1, \<dots>, f\<^sub>n"} corresponding
   to sort @{text s} at the particular type instance @{text "(\<alpha>\<^sub>1 :: s\<^sub>1,
@@ -1002,7 +1002,7 @@
   this corresponds nicely to mutually recursive type definitions, e.g.\
   in Isabelle/HOL.
 
-  \item @{command "instance"} in an instantiation target body sets
+  \<^descr> @{command "instance"} in an instantiation target body sets
   up a goal stating the type arities claimed at the opening @{command
   "instantiation"}.  The proof would usually proceed by @{method
   intro_classes}, and then establish the characteristic theorems of
@@ -1014,7 +1014,7 @@
   need to specify operations: one can continue with the
   instantiation proof immediately.
 
-  \item @{command "subclass"}~@{text c} in a class context for class
+  \<^descr> @{command "subclass"}~@{text c} in a class context for class
   @{text d} sets up a goal stating that class @{text c} is logically
   contained in class @{text d}.  After finishing the proof, class
   @{text d} is proven to be subclass @{text c} and the locale @{text
@@ -1027,10 +1027,10 @@
   the logical connection are not sufficient on the locale level but on
   the theory level.
 
-  \item @{command "print_classes"} prints all classes in the current
+  \<^descr> @{command "print_classes"} prints all classes in the current
   theory.
 
-  \item @{command "class_deps"} visualizes classes and their subclass
+  \<^descr> @{command "class_deps"} visualizes classes and their subclass
   relations as a directed acyclic graph. By default, all classes from the
   current theory context are show. This may be restricted by optional bounds
   as follows: @{command "class_deps"}~@{text upper} or @{command
@@ -1038,7 +1038,7 @@
   subclass of some sort from @{text upper} and a superclass of some sort
   from @{text lower}.
 
-  \item @{method intro_classes} repeatedly expands all class
+  \<^descr> @{method intro_classes} repeatedly expands all class
   introduction rules of this theory.  Note that this method usually
   needs not be named explicitly, as it is already included in the
   default proof step (e.g.\ of @{command "proof"}).  In particular,
@@ -1143,7 +1143,7 @@
 
   \begin{description}
 
-  \item @{command "overloading"}~@{text "x\<^sub>1 \<equiv> c\<^sub>1 :: \<tau>\<^sub>1 \<AND> \<dots> x\<^sub>n \<equiv> c\<^sub>n :: \<tau>\<^sub>n \<BEGIN>"}
+  \<^descr> @{command "overloading"}~@{text "x\<^sub>1 \<equiv> c\<^sub>1 :: \<tau>\<^sub>1 \<AND> \<dots> x\<^sub>n \<equiv> c\<^sub>n :: \<tau>\<^sub>n \<BEGIN>"}
   opens a theory target (cf.\ \secref{sec:target}) which allows to
   specify constants with overloaded definitions.  These are identified
   by an explicitly given mapping from variable names @{text "x\<^sub>i"} to
@@ -1194,7 +1194,7 @@
 
   \begin{description}
 
-  \item @{command "SML_file"}~@{text "name"} reads and evaluates the
+  \<^descr> @{command "SML_file"}~@{text "name"} reads and evaluates the
   given Standard ML file.  Top-level SML bindings are stored within
   the theory context; the initial environment is restricted to the
   Standard ML implementation of Poly/ML, without the many add-ons of
@@ -1202,41 +1202,41 @@
   build larger Standard ML projects, independently of the regular
   Isabelle/ML environment.
 
-  \item @{command "ML_file"}~@{text "name"} reads and evaluates the
+  \<^descr> @{command "ML_file"}~@{text "name"} reads and evaluates the
   given ML file.  The current theory context is passed down to the ML
   toplevel and may be modified, using @{ML "Context.>>"} or derived ML
   commands.  Top-level ML bindings are stored within the (global or
   local) theory context.
   
-  \item @{command "ML"}~@{text "text"} is similar to @{command
+  \<^descr> @{command "ML"}~@{text "text"} is similar to @{command
   "ML_file"}, but evaluates directly the given @{text "text"}.
   Top-level ML bindings are stored within the (global or local) theory
   context.
 
-  \item @{command "ML_prf"} is analogous to @{command "ML"} but works
+  \<^descr> @{command "ML_prf"} is analogous to @{command "ML"} but works
   within a proof context.  Top-level ML bindings are stored within the
   proof context in a purely sequential fashion, disregarding the
   nested proof structure.  ML bindings introduced by @{command
   "ML_prf"} are discarded at the end of the proof.
 
-  \item @{command "ML_val"} and @{command "ML_command"} are diagnostic
+  \<^descr> @{command "ML_val"} and @{command "ML_command"} are diagnostic
   versions of @{command "ML"}, which means that the context may not be
   updated.  @{command "ML_val"} echos the bindings produced at the ML
   toplevel, but @{command "ML_command"} is silent.
   
-  \item @{command "setup"}~@{text "text"} changes the current theory
+  \<^descr> @{command "setup"}~@{text "text"} changes the current theory
   context by applying @{text "text"}, which refers to an ML expression
   of type @{ML_type "theory -> theory"}.  This enables to initialize
   any object-logic specific tools and packages written in ML, for
   example.
 
-  \item @{command "local_setup"} is similar to @{command "setup"} for
+  \<^descr> @{command "local_setup"} is similar to @{command "setup"} for
   a local theory context, and an ML expression of type @{ML_type
   "local_theory -> local_theory"}.  This allows to
   invoke local theory specification packages without going through
   concrete outer syntax, for example.
 
-  \item @{command "attribute_setup"}~@{text "name = text description"}
+  \<^descr> @{command "attribute_setup"}~@{text "name = text description"}
   defines an attribute in the current context.  The given @{text
   "text"} has to be an ML expression of type
   @{ML_type "attribute context_parser"}, cf.\ basic parsers defined in
@@ -1268,16 +1268,16 @@
 text \<open>
   \begin{description}
 
-  \item @{attribute ML_print_depth} controls the printing depth of the ML
+  \<^descr> @{attribute ML_print_depth} controls the printing depth of the ML
   toplevel pretty printer; the precise effect depends on the ML compiler and
   run-time system. Typically the limit should be less than 10. Bigger values
   such as 100--1000 are occasionally useful for debugging.
 
-  \item @{attribute ML_source_trace} indicates whether the source text that
+  \<^descr> @{attribute ML_source_trace} indicates whether the source text that
   is given to the ML compiler should be output: it shows the raw Standard ML
   after expansion of Isabelle/ML antiquotations.
 
-  \item @{attribute ML_exception_trace} indicates whether the ML run-time
+  \<^descr> @{attribute ML_exception_trace} indicates whether the ML run-time
   system should print a detailed stack trace on exceptions. The result is
   dependent on the particular ML compiler version. Note that after Poly/ML
   5.3 some optimizations in the run-time systems may hinder exception
@@ -1308,7 +1308,7 @@
 
   \begin{description}
 
-  \item @{command "default_sort"}~@{text s} makes sort @{text s} the
+  \<^descr> @{command "default_sort"}~@{text s} makes sort @{text s} the
   new default sort for any type variable that is given explicitly in
   the text, but lacks a sort constraint (wrt.\ the current context).
   Type variables generated by type inference are not affected.
@@ -1341,13 +1341,13 @@
 
   \begin{description}
 
-  \item @{command "type_synonym"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = \<tau>"} introduces a
+  \<^descr> @{command "type_synonym"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = \<tau>"} introduces a
   \emph{type synonym} @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} for the existing type @{text
   "\<tau>"}. Unlike the semantic type definitions in Isabelle/HOL, type synonyms
   are merely syntactic abbreviations without any logical significance.
   Internally, type synonyms are fully expanded.
   
-  \item @{command "typedecl"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} declares a new
+  \<^descr> @{command "typedecl"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} declares a new
   type constructor @{text t}.  If the object-logic defines a base sort
   @{text s}, then the constructor is declared to operate on that, via
   the axiomatic type-class instance @{text "t :: (s, \<dots>, s)s"}.
@@ -1424,12 +1424,12 @@
 
   \begin{description}
 
-  \item @{command "consts"}~@{text "c :: \<sigma>"} declares constant @{text
+  \<^descr> @{command "consts"}~@{text "c :: \<sigma>"} declares constant @{text
   c} to have any instance of type scheme @{text \<sigma>}.  The optional
   mixfix annotations may attach concrete syntax to the constants
   declared.
   
-  \item @{command "defs"}~@{text "name: eqn"} introduces @{text eqn}
+  \<^descr> @{command "defs"}~@{text "name: eqn"} introduces @{text eqn}
   as a definitional axiom for some existing constant.
   
   The @{text "(unchecked)"} option disables global dependency checks
@@ -1463,13 +1463,13 @@
 
   \begin{description}
   
-  \item @{command "lemmas"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"}~@{keyword_def
+  \<^descr> @{command "lemmas"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"}~@{keyword_def
   "for"}~@{text "x\<^sub>1 \<dots> x\<^sub>m"} evaluates given facts (with attributes) in
   the current context, which may be augmented by local variables.
   Results are standardized before being stored, i.e.\ schematic
   variables are renamed to enforce index @{text "0"} uniformly.
 
-  \item @{command "named_theorems"}~@{text "name description"} declares a
+  \<^descr> @{command "named_theorems"}~@{text "name description"} declares a
   dynamic fact within the context. The same @{text name} is used to define
   an attribute with the usual @{text add}/@{text del} syntax (e.g.\ see
   \secref{sec:simp-rules}) to maintain the content incrementally, in
@@ -1507,7 +1507,7 @@
 
   \begin{description}
 
-  \item @{command "oracle"}~@{text "name = text"} turns the given ML
+  \<^descr> @{command "oracle"}~@{text "name = text"} turns the given ML
   expression @{text "text"} of type @{ML_text "'a -> cterm"} into an
   ML function of type @{ML_text "'a -> thm"}, which is bound to the
   global identifier @{ML_text name}.  This acts like an infinitary
@@ -1545,7 +1545,7 @@
 
   \begin{description}
 
-  \item @{command "hide_class"}~@{text names} fully removes class
+  \<^descr> @{command "hide_class"}~@{text names} fully removes class
   declarations from a given name space; with the @{text "(open)"}
   option, only the unqualified base name is hidden.
 
@@ -1554,7 +1554,7 @@
   longer accessible to the user are printed with the special qualifier
   ``@{text "??"}'' prefixed to the full internal name.
 
-  \item @{command "hide_type"}, @{command "hide_const"}, and @{command
+  \<^descr> @{command "hide_type"}, @{command "hide_const"}, and @{command
   "hide_fact"} are similar to @{command "hide_class"}, but hide types,
   constants, and facts, respectively.
   
--- a/src/Doc/JEdit/JEdit.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/JEdit/JEdit.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -20,32 +20,32 @@
 
   \begin{description}
 
-  \item [PIDE] is a general framework for Prover IDEs based on Isabelle/Scala.
+  \<^descr>[PIDE] is a general framework for Prover IDEs based on Isabelle/Scala.
   It is built around a concept of parallel and asynchronous document
   processing, which is supported natively by the parallel proof engine that is
   implemented in Isabelle/ML. The traditional prover command loop is given up;
   instead there is direct support for editing of source text, with rich formal
   markup for GUI rendering.
 
-  \item [Isabelle/ML] is the implementation and extension language of
+  \<^descr>[Isabelle/ML] is the implementation and extension language of
   Isabelle, see also @{cite "isabelle-implementation"}. It is integrated
   into the logical context of Isabelle/Isar and allows to manipulate
   logical entities directly. Arbitrary add-on tools may be implemented
   for object-logics such as Isabelle/HOL.
 
-  \item [Isabelle/Scala] is the system programming language of
+  \<^descr>[Isabelle/Scala] is the system programming language of
   Isabelle. It extends the pure logical environment of Isabelle/ML
   towards the ``real world'' of graphical user interfaces, text
   editors, IDE frameworks, web services etc.  Special infrastructure
   allows to transfer algebraic datatypes and formatted text easily
   between ML and Scala, using asynchronous protocol commands.
 
-  \item [jEdit] is a sophisticated text editor implemented in
+  \<^descr>[jEdit] is a sophisticated text editor implemented in
   Java.\footnote{@{url "http://www.jedit.org"}} It is easily extensible
   by plugins written in languages that work on the JVM, e.g.\
   Scala\footnote{@{url "http://www.scala-lang.org"}}.
 
-  \item [Isabelle/jEdit] is the main example application of the PIDE
+  \<^descr>[Isabelle/jEdit] is the main example application of the PIDE
   framework and the default user-interface for Isabelle. It targets
   both beginners and experts. Technically, Isabelle/jEdit combines a
   slightly modified version of the jEdit code base with a special
@@ -288,7 +288,7 @@
 
   \begin{description}
 
-  \item[Linux:] The platform-independent \emph{Nimbus} is used by
+  \<^descr>[Linux:] The platform-independent \emph{Nimbus} is used by
   default.
 
   \emph{GTK+} also works under the side-condition that the overall GTK theme
@@ -300,10 +300,10 @@
   ``4K'' or ``UHD'' models), because the rendering by the external library is
   subject to global system settings for font scaling.}
 
-  \item[Windows:] Regular \emph{Windows} is used by default, but
+  \<^descr>[Windows:] Regular \emph{Windows} is used by default, but
   \emph{Windows Classic} also works.
 
-  \item[Mac OS X:] Regular \emph{Mac OS X} is used by default.
+  \<^descr>[Mac OS X:] Regular \emph{Mac OS X} is used by default.
 
   The bundled \emph{MacOSX} plugin provides various functions that are
   expected from applications on that particular platform: quit from menu or
@@ -1156,14 +1156,14 @@
 
   \begin{description}
 
-  \item[] @{verbatim "`"} (single ASCII back-quote) supports \emph{quotations}
+  \<^descr> @{verbatim "`"} (single ASCII back-quote) supports \emph{quotations}
   via text cartouches. There are three selections, which are always presented
   in the same order and do not depend on any context information. The default
   choice produces a template ``@{text "\<open>\<box>\<close>"}'', where the box indicates the
   cursor position after insertion; the other choices help to repair the block
   structure of unbalanced text cartouches.
 
-  \item[] @{verbatim "@{"} is completed to the template ``@{text "@{\<box>}"}'',
+  \<^descr> @{verbatim "@{"} is completed to the template ``@{text "@{\<box>}"}'',
   where the box indicates the cursor position after insertion. Here it is
   convenient to use the wildcard ``@{verbatim __}'' or a more specific name
   prefix to let semantic completion of name-space entries propose
@@ -1344,18 +1344,18 @@
 
   \begin{description}
 
-  \item[Explicit completion] works via action @{action_ref
+  \<^descr>[Explicit completion] works via action @{action_ref
   "isabelle.complete"} with keyboard shortcut @{verbatim "C+b"}. This
   overrides the shortcut for @{action_ref "complete-word"} in jEdit, but it is
   possible to restore the original jEdit keyboard mapping of @{action
   "complete-word"} via \emph{Global Options~/ Shortcuts} and invent a
   different one for @{action "isabelle.complete"}.
 
-  \item[Explicit spell-checker completion] works via @{action_ref
+  \<^descr>[Explicit spell-checker completion] works via @{action_ref
   "isabelle.complete-word"}, which is exposed in the jEdit context menu, if
   the mouse points to a word that the spell-checker can complete.
 
-  \item[Implicit completion] works via regular keyboard input of the editor.
+  \<^descr>[Implicit completion] works via regular keyboard input of the editor.
   It depends on further side-conditions:
 
   \begin{enumerate}
@@ -1436,15 +1436,15 @@
 
   \begin{description}
 
-  \item[No selection.] The original is removed and the replacement inserted,
+  \<^descr>[No selection.] The original is removed and the replacement inserted,
   depending on the caret position.
 
-  \item[Rectangular selection of zero width.] This special case is treated by
+  \<^descr>[Rectangular selection of zero width.] This special case is treated by
   jEdit as ``tall caret'' and insertion of completion imitates its normal
   behaviour: separate copies of the replacement are inserted for each line of
   the selection.
 
-  \item[Other rectangular selection or multiple selections.] Here the original
+  \<^descr>[Other rectangular selection or multiple selections.] Here the original
   is removed and the replacement is inserted for each line (or segment) of the
   selection.
 
--- a/src/Doc/System/Basics.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/System/Basics.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -150,7 +150,7 @@
 
   \begin{description}
 
-  \item[@{setting_def USER_HOME}@{text "\<^sup>*"}] Is the cross-platform
+  \<^descr>[@{setting_def USER_HOME}@{text "\<^sup>*"}] Is the cross-platform
   user home directory.  On Unix systems this is usually the same as
   @{setting HOME}, but on Windows it is the regular home directory of
   the user, not the one of within the Cygwin root
@@ -158,12 +158,12 @@
   its HOME should point to the @{file_unchecked "/home"} directory tree or the
   Windows user home.}
 
-  \item[@{setting_def ISABELLE_HOME}@{text "\<^sup>*"}] is the location of the
+  \<^descr>[@{setting_def ISABELLE_HOME}@{text "\<^sup>*"}] is the location of the
   top-level Isabelle distribution directory. This is automatically
   determined from the Isabelle executable that has been invoked.  Do
   not attempt to set @{setting ISABELLE_HOME} yourself from the shell!
   
-  \item[@{setting_def ISABELLE_HOME_USER}] is the user-specific
+  \<^descr>[@{setting_def ISABELLE_HOME_USER}] is the user-specific
   counterpart of @{setting ISABELLE_HOME}. The default value is
   relative to @{file_unchecked "$USER_HOME/.isabelle"}, under rare
   circumstances this may be changed in the global setting file.
@@ -172,21 +172,21 @@
   defaults may be overridden by a private @{verbatim
   "$ISABELLE_HOME_USER/etc/settings"}.
 
-  \item[@{setting_def ISABELLE_PLATFORM_FAMILY}@{text "\<^sup>*"}] is
+  \<^descr>[@{setting_def ISABELLE_PLATFORM_FAMILY}@{text "\<^sup>*"}] is
   automatically set to the general platform family: @{verbatim linux},
   @{verbatim macos}, @{verbatim windows}.  Note that
   platform-dependent tools usually need to refer to the more specific
   identification according to @{setting ISABELLE_PLATFORM}, @{setting
   ISABELLE_PLATFORM32}, @{setting ISABELLE_PLATFORM64}.
 
-  \item[@{setting_def ISABELLE_PLATFORM}@{text "\<^sup>*"}] is automatically
+  \<^descr>[@{setting_def ISABELLE_PLATFORM}@{text "\<^sup>*"}] is automatically
   set to a symbolic identifier for the underlying hardware and
   operating system.  The Isabelle platform identification always
   refers to the 32 bit variant, even this is a 64 bit machine.  Note
   that the ML or Java runtime may have a different idea, depending on
   which binaries are actually run.
 
-  \item[@{setting_def ISABELLE_PLATFORM64}@{text "\<^sup>*"}] is similar to
+  \<^descr>[@{setting_def ISABELLE_PLATFORM64}@{text "\<^sup>*"}] is similar to
   @{setting ISABELLE_PLATFORM} but refers to the proper 64 bit variant
   on a platform that supports this; the value is empty for 32 bit.
   Note that the following bash expression (including the quotes)
@@ -194,18 +194,18 @@
 
   @{verbatim [display] \<open>"${ISABELLE_PLATFORM64:-$ISABELLE_PLATFORM}"\<close>}
 
-  \item[@{setting_def ISABELLE_PROCESS}@{text "\<^sup>*"}, @{setting
+  \<^descr>[@{setting_def ISABELLE_PROCESS}@{text "\<^sup>*"}, @{setting
   ISABELLE_TOOL}@{text "\<^sup>*"}] are automatically set to the full path
   names of the @{executable "isabelle_process"} and @{executable
   isabelle} executables, respectively.  Thus other tools and scripts
   need not assume that the @{file "$ISABELLE_HOME/bin"} directory is
   on the current search path of the shell.
   
-  \item[@{setting_def ISABELLE_IDENTIFIER}@{text "\<^sup>*"}] refers
+  \<^descr>[@{setting_def ISABELLE_IDENTIFIER}@{text "\<^sup>*"}] refers
   to the name of this Isabelle distribution, e.g.\ ``@{verbatim
   Isabelle2012}''.
 
-  \item[@{setting_def ML_SYSTEM}, @{setting_def ML_HOME},
+  \<^descr>[@{setting_def ML_SYSTEM}, @{setting_def ML_HOME},
   @{setting_def ML_OPTIONS}, @{setting_def ML_PLATFORM}, @{setting_def
   ML_IDENTIFIER}@{text "\<^sup>*"}] specify the underlying ML system
   to be used for Isabelle.  There is only a fixed set of admissable
@@ -220,60 +220,60 @@
   automatically obtained by composing the values of @{setting
   ML_SYSTEM}, @{setting ML_PLATFORM} and the Isabelle version values.
 
-  \item[@{setting_def ML_SYSTEM_POLYML}@{text "\<^sup>*"}] is @{verbatim true}
+  \<^descr>[@{setting_def ML_SYSTEM_POLYML}@{text "\<^sup>*"}] is @{verbatim true}
   for @{setting ML_SYSTEM} values derived from Poly/ML, as opposed to
   SML/NJ where it is empty.  This is particularly useful with the
   build option @{system_option condition}
   (\secref{sec:system-options}) to restrict big sessions to something
   that SML/NJ can still handle.
 
-  \item[@{setting_def ISABELLE_JDK_HOME}] needs to point to a full JDK
+  \<^descr>[@{setting_def ISABELLE_JDK_HOME}] needs to point to a full JDK
   (Java Development Kit) installation with @{verbatim javac} and
   @{verbatim jar} executables.  This is essential for Isabelle/Scala
   and other JVM-based tools to work properly.  Note that conventional
   @{verbatim JAVA_HOME} usually points to the JRE (Java Runtime
   Environment), not JDK.
   
-  \item[@{setting_def ISABELLE_PATH}] is a list of directories
+  \<^descr>[@{setting_def ISABELLE_PATH}] is a list of directories
   (separated by colons) where Isabelle logic images may reside.  When
   looking up heaps files, the value of @{setting ML_IDENTIFIER} is
   appended to each component internally.
   
-  \item[@{setting_def ISABELLE_OUTPUT}@{text "\<^sup>*"}] is a
+  \<^descr>[@{setting_def ISABELLE_OUTPUT}@{text "\<^sup>*"}] is a
   directory where output heap files should be stored by default. The
   ML system and Isabelle version identifier is appended here, too.
   
-  \item[@{setting_def ISABELLE_BROWSER_INFO}] is the directory where
+  \<^descr>[@{setting_def ISABELLE_BROWSER_INFO}] is the directory where
   theory browser information (HTML text, graph data, and printable
   documents) is stored (see also \secref{sec:info}).  The default
   value is @{file_unchecked "$ISABELLE_HOME_USER/browser_info"}.
   
-  \item[@{setting_def ISABELLE_LOGIC}] specifies the default logic to
+  \<^descr>[@{setting_def ISABELLE_LOGIC}] specifies the default logic to
   load if none is given explicitely by the user.  The default value is
   @{verbatim HOL}.
   
-  \item[@{setting_def ISABELLE_LINE_EDITOR}] specifies the
+  \<^descr>[@{setting_def ISABELLE_LINE_EDITOR}] specifies the
   line editor for the @{tool_ref console} interface.
 
-  \item[@{setting_def ISABELLE_LATEX}, @{setting_def
+  \<^descr>[@{setting_def ISABELLE_LATEX}, @{setting_def
   ISABELLE_PDFLATEX}, @{setting_def ISABELLE_BIBTEX}] refer to {\LaTeX}
   related tools for Isabelle document preparation (see also
   \secref{sec:tool-latex}).
   
-  \item[@{setting_def ISABELLE_TOOLS}] is a colon separated list of
+  \<^descr>[@{setting_def ISABELLE_TOOLS}] is a colon separated list of
   directories that are scanned by @{executable isabelle} for external
   utility programs (see also \secref{sec:isabelle-tool}).
   
-  \item[@{setting_def ISABELLE_DOCS}] is a colon separated list of
+  \<^descr>[@{setting_def ISABELLE_DOCS}] is a colon separated list of
   directories with documentation files.
 
-  \item[@{setting_def PDF_VIEWER}] specifies the program to be used
+  \<^descr>[@{setting_def PDF_VIEWER}] specifies the program to be used
   for displaying @{verbatim pdf} files.
 
-  \item[@{setting_def DVI_VIEWER}] specifies the program to be used
+  \<^descr>[@{setting_def DVI_VIEWER}] specifies the program to be used
   for displaying @{verbatim dvi} files.
   
-  \item[@{setting_def ISABELLE_TMP_PREFIX}@{text "\<^sup>*"}] is the
+  \<^descr>[@{setting_def ISABELLE_TMP_PREFIX}@{text "\<^sup>*"}] is the
   prefix from which any running @{executable "isabelle_process"}
   derives an individual directory for temporary files.
   
--- a/src/Doc/System/Misc.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/System/Misc.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -126,17 +126,17 @@
 
   \begin{description}
 
-  \item[Open \dots] Open a new graph file.
+  \<^descr>[Open \dots] Open a new graph file.
 
-  \item[Export to PostScript] Outputs the current graph in Postscript
+  \<^descr>[Export to PostScript] Outputs the current graph in Postscript
   format, appropriately scaled to fit on one single sheet of A4 paper.
   The resulting file can be printed directly.
 
-  \item[Export to EPS] Outputs the current graph in Encapsulated
+  \<^descr>[Export to EPS] Outputs the current graph in Encapsulated
   Postscript format. The resulting file can be included in other
   documents.
 
-  \item[Quit] Quit the graph browser.
+  \<^descr>[Quit] Quit the graph browser.
 
   \end{description}
 \<close>
@@ -158,21 +158,21 @@
 
   \begin{description}
 
-  \item[@{text vertex_name}] The name of the vertex.
+  \<^descr>[@{text vertex_name}] The name of the vertex.
 
-  \item[@{text vertex_ID}] The vertex identifier. Note that there may
+  \<^descr>[@{text vertex_ID}] The vertex identifier. Note that there may
   be several vertices with equal names, whereas identifiers must be
   unique.
 
-  \item[@{text dir_name}] The name of the ``directory'' the vertex
+  \<^descr>[@{text dir_name}] The name of the ``directory'' the vertex
   should be placed in.  A ``@{verbatim "+"}'' sign after @{text
   dir_name} indicates that the nodes in the directory are initially
   visible. Directories are initially invisible by default.
 
-  \item[@{text path}] The path of the corresponding theory file. This
+  \<^descr>[@{text path}] The path of the corresponding theory file. This
   is specified relatively to the path of the graph definition file.
 
-  \item[List of successor/predecessor nodes] A ``@{verbatim "<"}''
+  \<^descr>[List of successor/predecessor nodes] A ``@{verbatim "<"}''
   sign before the list means that successor nodes are listed, a
   ``@{verbatim ">"}'' sign means that predecessor nodes are listed. If
   neither ``@{verbatim "<"}'' nor ``@{verbatim ">"}'' is found, the
--- a/src/Doc/System/Sessions.thy	Wed Oct 14 15:06:42 2015 +0200
+++ b/src/Doc/System/Sessions.thy	Wed Oct 14 15:10:32 2015 +0200
@@ -79,7 +79,7 @@
 
   \begin{description}
 
-  \item \isakeyword{session}~@{text "A = B + body"} defines a new
+  \<^descr> \isakeyword{session}~@{text "A = B + body"} defines a new
   session @{text "A"} based on parent session @{text "B"}, with its
   content given in @{text body} (theories and auxiliary source files).
   Note that a parent (like @{text "HOL"}) is mandatory in practical
@@ -90,7 +90,7 @@
   @{text "A"} should be sufficiently long and descriptive to stand on
   its own in a potentially large library.
 
-  \item \isakeyword{session}~@{text "A (groups)"} indicates a
+  \<^descr> \isakeyword{session}~@{text "A (groups)"} indicates a
   collection of groups where the new session is a member.  Group names
   are uninterpreted and merely follow certain conventions.  For
   example, the Isabelle distribution tags some important sessions by
@@ -98,7 +98,7 @@
   their own conventions, but this requires some care to avoid clashes
   within this unchecked name space.
 
-  \item \isakeyword{session}~@{text "A"}~\isakeyword{in}~@{text "dir"}
+  \<^descr> \isakeyword{session}~@{text "A"}~\isakeyword{in}~@{text "dir"}
   specifies an explicit directory for this session; by default this is
   the current directory of the @{verbatim ROOT} file.
 
@@ -106,30 +106,30 @@
   the session directory.  The prover process is run within the same as
   its current working directory.
 
-  \item \isakeyword{description}~@{text "text"} is a free-form
+  \<^descr> \isakeyword{description}~@{text "text"} is a free-form
   annotation for this session.
 
-  \item \isakeyword{options}~@{text "[x = a, y = b, z]"} defines
+  \<^descr> \isakeyword{options}~@{text "[x = a, y = b, z]"} defines
   separate options (\secref{sec:system-options}) that are used when
   processing this session, but \emph{without} propagation to child
   sessions.  Note that @{text "z"} abbreviates @{text "z = true"} for
   Boolean options.
 
-  \item \isakeyword{theories}~@{text "options names"} specifies a
+  \<^descr> \isakeyword{theories}~@{text "options names"} specifies a
   block of theories that are processed within an environment that is
   augmented by the given options, in addition to the global session
   options given before.  Any number of blocks of \isakeyword{theories}
   may be given.  Options are only active for each
   \isakeyword{theories} block separately.
 
-  \item \isakeyword{files}~@{text "files"} lists additional source
+  \<^descr> \isakeyword{files}~@{text "files"} lists additional source
   files that are involved in the processing of this session.  This
   should cover anything outside the formal content of the theory
   sources.  In contrast, files that are loaded formally
   within a theory, e.g.\ via @{command "ML_file"}, need not be
   declared again.
 
-  \item \isakeyword{document_files}~@{text "("}\isakeyword{in}~@{text
+  \<^descr> \isakeyword{document_files}~@{text "("}\isakeyword{in}~@{text
   "base_dir) files"} lists source files for document preparation,
   typically @{verbatim ".tex"} and @{verbatim ".sty"} for {\LaTeX}.
   Only these explicitly given files are copied from the base directory
@@ -138,7 +138,7 @@
   structure of the @{text files} is preserved, which allows to
   reconstruct the original directory hierarchy of @{text "base_dir"}.
 
-  \item \isakeyword{document_files}~@{text "files"} abbreviates
+  \<^descr> \isakeyword{document_files}~@{text "files"} abbreviates
   \isakeyword{document_files}~@{text "("}\isakeyword{in}~@{text
   "document) files"}, i.e.\ document sources are taken from the base
   directory @{verbatim document} within the session root directory.