more symbols;
authorwenzelm
Mon, 12 Oct 2015 22:03:24 +0200
changeset 61421 e0825405d398
parent 61420 ee42cba50933
child 61422 0dfcd0fb4172
more symbols;
src/Doc/Isar_Ref/Document_Preparation.thy
src/Doc/Isar_Ref/First_Order_Logic.thy
src/Doc/Isar_Ref/Framework.thy
src/Doc/Isar_Ref/Generic.thy
src/Doc/Isar_Ref/HOL_Specific.thy
src/Doc/Isar_Ref/Inner_Syntax.thy
src/Doc/Isar_Ref/Outer_Syntax.thy
src/Doc/Isar_Ref/Preface.thy
src/Doc/Isar_Ref/Proof.thy
src/Doc/Isar_Ref/Quick_Reference.thy
src/Doc/Isar_Ref/Spec.thy
src/Doc/Isar_Ref/Synopsis.thy
--- a/src/Doc/Isar_Ref/Document_Preparation.thy	Mon Oct 12 21:42:14 2015 +0200
+++ b/src/Doc/Isar_Ref/Document_Preparation.thy	Mon Oct 12 22:03:24 2015 +0200
@@ -75,7 +75,8 @@
   antiquotations}, see also \secref{sec:antiq}. These are interpreted in the
   present theory or proof context.
 
-  \medskip The proof markup commands closely resemble those for theory
+  \<^medskip>
+  The proof markup commands closely resemble those for theory
   specifications, but have a different formal status and produce
   different {\LaTeX} macros.
 \<close>
@@ -426,14 +427,15 @@
   Some tags are pre-declared for certain classes of commands, serving
   as default markup if no tags are given in the text:
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{ll}
     @{text "theory"} & theory begin/end \\
     @{text "proof"} & all proof commands \\
     @{text "ML"} & all commands involving ML code \\
   \end{tabular}
+  \<^medskip>
 
-  \medskip The Isabelle document preparation system
+  The Isabelle document preparation system
   @{cite "isabelle-system"} allows tagged command regions to be presented
   specifically, e.g.\ to fold proof texts, or drop parts of the text
   completely.
@@ -451,7 +453,8 @@
   sub-proof to be typeset as @{text visible} (unless some of its parts
   are tagged differently).
 
-  \medskip Command tags merely produce certain markup environments for
+  \<^medskip>
+  Command tags merely produce certain markup environments for
   type-setting.  The meaning of these is determined by {\LaTeX}
   macros, as defined in @{file "~~/lib/texinputs/isabelle.sty"} or
   by the document author.  The Isabelle document preparation tools
@@ -513,62 +516,62 @@
 
   \begin{itemize}
 
-  \item Empty @{verbatim "()"}
+  \<^item> Empty @{verbatim "()"}
 
   @{rail \<open>()\<close>}
 
-  \item Nonterminal @{verbatim "A"}
+  \<^item> Nonterminal @{verbatim "A"}
 
   @{rail \<open>A\<close>}
 
-  \item Nonterminal via Isabelle antiquotation
+  \<^item> Nonterminal via Isabelle antiquotation
   @{verbatim "@{syntax method}"}
 
   @{rail \<open>@{syntax method}\<close>}
 
-  \item Terminal @{verbatim "'xyz'"}
+  \<^item> Terminal @{verbatim "'xyz'"}
 
   @{rail \<open>'xyz'\<close>}
 
-  \item Terminal in keyword style @{verbatim "@'xyz'"}
+  \<^item> Terminal in keyword style @{verbatim "@'xyz'"}
 
   @{rail \<open>@'xyz'\<close>}
 
-  \item Terminal via Isabelle antiquotation
+  \<^item> Terminal via Isabelle antiquotation
   @{verbatim "@@{method rule}"}
 
   @{rail \<open>@@{method rule}\<close>}
 
-  \item Concatenation @{verbatim "A B C"}
+  \<^item> Concatenation @{verbatim "A B C"}
 
   @{rail \<open>A B C\<close>}
 
-  \item Newline inside concatenation
+  \<^item> Newline inside concatenation
   @{verbatim "A B C \<newline> D E F"}
 
   @{rail \<open>A B C \<newline> D E F\<close>}
 
-  \item Variants @{verbatim "A | B | C"}
+  \<^item> Variants @{verbatim "A | B | C"}
 
   @{rail \<open>A | B | C\<close>}
 
-  \item Option @{verbatim "A ?"}
+  \<^item> Option @{verbatim "A ?"}
 
   @{rail \<open>A ?\<close>}
 
-  \item Repetition @{verbatim "A *"}
+  \<^item> Repetition @{verbatim "A *"}
 
   @{rail \<open>A *\<close>}
 
-  \item Repetition with separator @{verbatim "A * sep"}
+  \<^item> Repetition with separator @{verbatim "A * sep"}
 
   @{rail \<open>A * sep\<close>}
 
-  \item Strict repetition @{verbatim "A +"}
+  \<^item> Strict repetition @{verbatim "A +"}
 
   @{rail \<open>A +\<close>}
 
-  \item Strict repetition with separator @{verbatim "A + sep"}
+  \<^item> Strict repetition with separator @{verbatim "A + sep"}
 
   @{rail \<open>A + sep\<close>}
 
--- a/src/Doc/Isar_Ref/First_Order_Logic.thy	Mon Oct 12 21:42:14 2015 +0200
+++ b/src/Doc/Isar_Ref/First_Order_Logic.thy	Mon Oct 12 22:03:24 2015 +0200
@@ -231,7 +231,8 @@
   ``@{text "\<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"}'' coincides with
   the original axiomatization of @{thm disjE}.
 
-  \medskip We continue propositional logic by introducing absurdity
+  \<^medskip>
+  We continue propositional logic by introducing absurdity
   with its characteristic elimination.  Plain truth may then be
   defined as a proposition that is trivially true.
 \<close>
@@ -248,8 +249,8 @@
   unfolding true_def ..
 
 text \<open>
-  \medskip Now negation represents an implication towards
-  absurdity:
+  \<^medskip>
+  Now negation represents an implication towards absurdity:
 \<close>
 
 definition
@@ -374,7 +375,7 @@
   can now be summarized as follows, using the native Isar statement
   format of \secref{sec:framework-stmt}.
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{l}
   @{text "impI: \<ASSUMES> A \<Longrightarrow> B \<SHOWS> A \<longrightarrow> B"} \\
   @{text "impD: \<ASSUMES> A \<longrightarrow> B \<AND> A \<SHOWS> B"} \\[1ex]
@@ -398,7 +399,7 @@
   @{text "exI: \<ASSUMES> B a \<SHOWS> \<exists>x. B x"} \\
   @{text "exE: \<ASSUMES> \<exists>x. B x \<OBTAINS> a \<WHERE> B a"}
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   This essentially provides a declarative reading of Pure
   rules as Isar reasoning patterns: the rule statements tells how a
@@ -511,7 +512,8 @@
 (*>*)
 
 text \<open>
-  \bigskip Of course, these proofs are merely examples.  As
+  \<^bigskip>
+  Of course, these proofs are merely examples.  As
   sketched in \secref{sec:framework-subproof}, there is a fair amount
   of flexibility in expressing Pure deductions in Isar.  Here the user
   is asked to express himself adequately, aiming at proof texts of
--- a/src/Doc/Isar_Ref/Framework.thy	Mon Oct 12 21:42:14 2015 +0200
+++ b/src/Doc/Isar_Ref/Framework.thy	Mon Oct 12 22:03:24 2015 +0200
@@ -52,7 +52,8 @@
   @{cite "isabelle-ZF"} is less extensively developed, although it would
   probably fit better for classical mathematics.
 
-  \medskip In order to illustrate natural deduction in Isar, we shall
+  \<^medskip>
+  In order to illustrate natural deduction in Isar, we shall
   refer to the background theory and library of Isabelle/HOL.  This
   includes common notions of predicate logic, naive set-theory etc.\
   using fairly standard mathematical notation.  From the perspective
@@ -69,7 +70,8 @@
   examples shall refer to set-theory, to minimize the danger of
   understanding connectives of predicate logic as something special.
 
-  \medskip The following deduction performs @{text "\<inter>"}-introduction,
+  \<^medskip>
+  The following deduction performs @{text "\<inter>"}-introduction,
   working forwards from assumptions towards the conclusion.  We give
   both the Isar text, and depict the primitive rule involved, as
   determined by unification of the problem against rules that are
@@ -97,7 +99,8 @@
 text_raw \<open>\end{minipage}\<close>
 
 text \<open>
-  \medskip Note that @{command assume} augments the proof
+  \<^medskip>
+  Note that @{command assume} augments the proof
   context, @{command then} indicates that the current fact shall be
   used in the next step, and @{command have} states an intermediate
   goal.  The two dots ``@{command ".."}'' refer to a complete proof of
@@ -152,7 +155,8 @@
 text_raw \<open>\end{minipage}\<close>
 
 text \<open>
-  \medskip This Isar reasoning pattern again refers to the
+  \<^medskip>
+  This Isar reasoning pattern again refers to the
   primitive rule depicted above.  The system determines it in the
   ``@{command proof}'' step, which could have been spelled out more
   explicitly as ``@{command proof}~@{text "(rule InterI)"}''.  Note
@@ -166,7 +170,8 @@
   refinement of the enclosing claim, using the rule derived from the
   proof body.
 
-  \medskip The next example involves @{term "\<Union>\<A>"}, which can be
+  \<^medskip>
+  The next example involves @{term "\<Union>\<A>"}, which can be
   characterized as the set of all @{term "x"} such that @{prop "\<exists>A. x
   \<in> A \<and> A \<in> \<A>"}.  The elimination rule for @{prop "x \<in> \<Union>\<A>"} does
   not mention @{text "\<exists>"} and @{text "\<and>"} at all, but admits to obtain
@@ -201,7 +206,8 @@
 text_raw \<open>\end{minipage}\<close>
 
 text \<open>
-  \medskip Although the Isar proof follows the natural
+  \<^medskip>
+  Although the Isar proof follows the natural
   deduction rule closely, the text reads not as natural as
   anticipated.  There is a double occurrence of an arbitrary
   conclusion @{prop "C"}, which represents the final result, but is
@@ -236,13 +242,13 @@
   parlance, there are three levels of @{text "\<lambda>"}-calculus with
   corresponding arrows @{text "\<Rightarrow>"}/@{text "\<And>"}/@{text "\<Longrightarrow>"}:
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{ll}
   @{text "\<alpha> \<Rightarrow> \<beta>"} & syntactic function space (terms depending on terms) \\
   @{text "\<And>x. B(x)"} & universal quantification (proofs depending on terms) \\
   @{text "A \<Longrightarrow> B"} & implication (proofs depending on proofs) \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   Here only the types of syntactic terms, and the
   propositions of proof terms have been shown.  The @{text
@@ -298,7 +304,8 @@
   prop"} with axioms for reflexivity, substitution, extensionality,
   and @{text "\<alpha>\<beta>\<eta>"}-conversion on @{text "\<lambda>"}-terms.
 
-  \medskip An object-logic introduces another layer on top of Pure,
+  \<^medskip>
+  An object-logic introduces another layer on top of Pure,
   e.g.\ with types @{text "i"} for individuals and @{text "o"} for
   propositions, term constants @{text "Trueprop :: o \<Rightarrow> prop"} as
   (implicit) derivability judgment and connectives like @{text "\<and> :: o
@@ -348,7 +355,8 @@
   @{text "InterI:"}~@{prop "(\<And>A. A \<in> \<A> \<Longrightarrow> x \<in> A) \<Longrightarrow> x \<in> \<Inter>\<A>"}
   \]
 
-  \medskip Goals are also represented as rules: @{text "A\<^sub>1 \<Longrightarrow>
+  \<^medskip>
+  Goals are also represented as rules: @{text "A\<^sub>1 \<Longrightarrow>
   \<dots> A\<^sub>n \<Longrightarrow> C"} states that the sub-goals @{text "A\<^sub>1, \<dots>,
   A\<^sub>n"} entail the result @{text "C"}; for @{text "n = 0"} the
   goal is finished.  To allow @{text "C"} being a rule statement
@@ -384,7 +392,7 @@
    \end{tabular}}
   \]
 
-  \medskip
+  \<^medskip>
 
   \[
   \infer[(@{inference_def assumption})]{@{text "C\<vartheta>"}}
@@ -399,7 +407,7 @@
   Isabelle/Pure:
 
   {\footnotesize
-  \medskip
+  \<^medskip>
   \begin{tabular}{r@ {\quad}l}
   @{text "(A \<and> B \<Longrightarrow> B \<and> A) \<Longrightarrow> #(A \<and> B \<Longrightarrow> B \<and> A)"} & @{text "(init)"} \\
   @{text "(A \<and> B \<Longrightarrow> B) \<Longrightarrow> (A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution B \<Longrightarrow> A \<Longrightarrow> B \<and> A)"} \\
@@ -409,7 +417,7 @@
   @{text "#\<dots>"} & @{text "(assumption)"} \\
   @{text "A \<and> B \<Longrightarrow> B \<and> A"} & @{text "(finish)"} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
   }
 
   Compositions of @{inference assumption} after @{inference
@@ -464,7 +472,7 @@
   \secref{sec:framework-stmt} for the separate category @{text
   "statement"}.
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{rcl}
     @{text "theory\<hyphen>stmt"} & = & @{command "theorem"}~@{text "statement proof  |"}~~@{command "definition"}~@{text "\<dots>  |  \<dots>"} \\[1ex]
 
@@ -484,15 +492,18 @@
     & @{text "|"} & @{command "show"}~@{text "name: props proof"} \\
   \end{tabular}
 
-  \medskip Simultaneous propositions or facts may be separated by the
+  \<^medskip>
+  Simultaneous propositions or facts may be separated by the
   @{keyword "and"} keyword.
 
-  \medskip The syntax for terms and propositions is inherited from
+  \<^medskip>
+  The syntax for terms and propositions is inherited from
   Pure (and the object-logic).  A @{text "pattern"} is a @{text
   "term"} with schematic variables, to be bound by higher-order
   matching.
 
-  \medskip Facts may be referenced by name or proposition.  For
+  \<^medskip>
+  Facts may be referenced by name or proposition.  For
   example, the result of ``@{command have}~@{text "a: A \<langle>proof\<rangle>"}''
   becomes available both as @{text "a"} and
   \isacharbackquoteopen@{text "A"}\isacharbackquoteclose.  Moreover,
@@ -509,12 +520,12 @@
   frequently together with @{command then} we provide some
   abbreviations:
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{rcl}
     @{command from}~@{text a} & @{text "\<equiv>"} & @{command note}~@{text a}~@{command then} \\
     @{command with}~@{text a} & @{text "\<equiv>"} & @{command from}~@{text "a \<AND> this"} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   The @{text "method"} category is essentially a parameter and may be
   populated later.  Methods use the facts indicated by @{command
@@ -541,7 +552,8 @@
   "by"}~@{method this}''.  The @{command unfolding} element operates
   directly on the current facts and goal by applying equalities.
 
-  \medskip Block structure can be indicated explicitly by ``@{command
+  \<^medskip>
+  Block structure can be indicated explicitly by ``@{command
   "{"}~@{text "\<dots>"}~@{command "}"}'', although the body of a sub-proof
   already involves implicit nesting.  In any case, @{command next}
   jumps into the next section of a block, i.e.\ it acts like closing
@@ -580,12 +592,12 @@
   @{inference discharge} as given below.  The additional variants
   @{command presume} and @{command def} are defined as follows:
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{rcl}
     @{command presume}~@{text A} & @{text "\<equiv>"} & @{command assume}~@{text "\<guillemotleft>weak\<hyphen>discharge\<guillemotright> A"} \\
     @{command def}~@{text "x \<equiv> a"} & @{text "\<equiv>"} & @{command fix}~@{text x}~@{command assume}~@{text "\<guillemotleft>expansion\<guillemotright> x \<equiv> a"} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   \[
   \infer[(@{inference_def discharge})]{@{text "\<strut>\<Gamma> - A \<turnstile> #A \<Longrightarrow> B"}}{@{text "\<strut>\<Gamma> \<turnstile> B"}}
@@ -597,7 +609,8 @@
   \infer[(@{inference_def expansion})]{@{text "\<strut>\<Gamma> - (x \<equiv> a) \<turnstile> B a"}}{@{text "\<strut>\<Gamma> \<turnstile> B x"}}
   \]
 
-  \medskip Note that @{inference discharge} and @{inference
+  \<^medskip>
+  Note that @{inference discharge} and @{inference
   "weak\<hyphen>discharge"} differ in the marker for @{prop A}, which is
   relevant when the result of a @{command fix}-@{command
   assume}-@{command show} outline is composed with a pending goal,
@@ -611,7 +624,7 @@
   with a proof of a case rule stating that this extension is
   conservative (i.e.\ may be removed from closed results later on):
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{l}
   @{text "\<langle>facts\<rangle>"}~~@{command obtain}~@{text "\<^vec>x \<WHERE> \<^vec>A \<^vec>x  \<langle>proof\<rangle> \<equiv>"} \\[0.5ex]
   \quad @{command have}~@{text "case: \<And>thesis. (\<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis) \<Longrightarrow> thesis\<rangle>"} \\
@@ -622,7 +635,7 @@
   \quad @{command qed} \\
   \quad @{command fix}~@{text "\<^vec>x"}~@{command assume}~@{text "\<guillemotleft>elimination case\<guillemotright> \<^vec>A \<^vec>x"} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   \[
   \infer[(@{inference elimination})]{@{text "\<Gamma> \<turnstile> B"}}{
@@ -644,7 +657,8 @@
   is similar to ``@{command have}~@{text "A \<AND> B"}'', but the
   latter involves multiple sub-goals.
 
-  \medskip The subsequent Isar proof texts explain all context
+  \<^medskip>
+  The subsequent Isar proof texts explain all context
   elements introduced above using the formal proof language itself.
   After finishing a local proof within a block, we indicate the
   exported result via @{command note}.
@@ -684,17 +698,19 @@
 (*>*)
 
 text \<open>
-  \bigskip This illustrates the meaning of Isar context
+  \<^bigskip>
+  This illustrates the meaning of Isar context
   elements without goals getting in between.
 \<close>
 
+
 subsection \<open>Structured statements \label{sec:framework-stmt}\<close>
 
 text \<open>
   The category @{text "statement"} of top-level theorem specifications
   is defined as follows:
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{rcl}
   @{text "statement"} & @{text "\<equiv>"} & @{text "name: props \<AND> \<dots>"} \\
   & @{text "|"} & @{text "context\<^sup>* conclusion"} \\[0.5ex]
@@ -707,7 +723,8 @@
   & & \quad @{text "\<BBAR> \<dots>"} \\
   \end{tabular}
 
-  \medskip A simple @{text "statement"} consists of named
+  \<^medskip>
+  A simple @{text "statement"} consists of named
   propositions.  The full form admits local context elements followed
   by the actual conclusions, such as ``@{keyword "fixes"}~@{text
   x}~@{keyword "assumes"}~@{text "A x"}~@{keyword "shows"}~@{text "B
@@ -721,14 +738,14 @@
   parameters (@{text "vars"}) and several premises (@{text "props"}).
   This specifies multi-branch elimination rules.
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{l}
   @{text "\<OBTAINS> \<^vec>x \<WHERE> \<^vec>A \<^vec>x   \<BBAR>   \<dots>   \<equiv>"} \\[0.5ex]
   \quad @{text "\<FIXES> thesis"} \\
   \quad @{text "\<ASSUMES> [intro]: \<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis  \<AND>  \<dots>"} \\
   \quad @{text "\<SHOWS> thesis"} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   Presenting structured statements in such an ``open'' format usually
   simplifies the subsequent proof, because the outer structure of the
@@ -761,7 +778,8 @@
 text_raw \<open>\end{minipage}\<close>
 
 text \<open>
-  \medskip Here local facts \isacharbackquoteopen@{text "A
+  \<^medskip>
+  Here local facts \isacharbackquoteopen@{text "A
   x"}\isacharbackquoteclose\ and \isacharbackquoteopen@{text "B
   y"}\isacharbackquoteclose\ are referenced immediately; there is no
   need to decompose the logical rule structure again.  In the second
@@ -789,7 +807,8 @@
   In Isar the algebraic values are facts or goals, and the operations
   are inferences.
 
-  \medskip The Isar/VM state maintains a stack of nodes, each node
+  \<^medskip>
+  The Isar/VM state maintains a stack of nodes, each node
   contains the local proof context, the linguistic mode, and a pending
   goal (optional).  The mode determines the type of transition that
   may be performed next, it essentially alternates between forward and
@@ -927,12 +946,14 @@
 text_raw \<open>\end{minipage}\<close>
 
 text \<open>
-  \medskip Such ``peephole optimizations'' of Isar texts are
+  \<^medskip>
+  Such ``peephole optimizations'' of Isar texts are
   practically important to improve readability, by rearranging
   contexts elements according to the natural flow of reasoning in the
   body, while still observing the overall scoping rules.
 
-  \medskip This illustrates the basic idea of structured proof
+  \<^medskip>
+  This illustrates the basic idea of structured proof
   processing in Isar.  The main mechanisms are based on natural
   deduction rule composition within the Pure framework.  In
   particular, there are no direct operations on goal states within the
@@ -1005,7 +1026,8 @@
   the calculational chain, but the exact correspondence is dependent
   on the transitivity rules being involved.
 
-  \medskip Symmetry rules such as @{prop "x = y \<Longrightarrow> y = x"} are like
+  \<^medskip>
+  Symmetry rules such as @{prop "x = y \<Longrightarrow> y = x"} are like
   transitivities with only one premise.  Isar maintains a separate
   rule collection declared via the @{attribute sym} attribute, to be
   used in fact expressions ``@{text "a [symmetric]"}'', or single-step
--- a/src/Doc/Isar_Ref/Generic.thy	Mon Oct 12 21:42:14 2015 +0200
+++ b/src/Doc/Isar_Ref/Generic.thy	Mon Oct 12 22:03:24 2015 +0200
@@ -293,7 +293,7 @@
     @{method_def simp_all} & : & @{text method} \\
     @{attribute_def simp_depth_limit} & : & @{text attribute} & default @{text 100} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   @{rail \<open>
     (@@{method simp} | @@{method simp_all}) opt? (@{syntax simpmod} * )
@@ -321,7 +321,8 @@
   either case, which might be required precisely like that in some
   boundary situations to perform the intended simplification step!
 
-  \medskip The @{text only} modifier first removes all other rewrite
+  \<^medskip>
+  The @{text only} modifier first removes all other rewrite
   rules, looper tactics (including split rules), congruence rules, and
   then behaves like @{text add}.  Implicit solvers remain, which means
   that trivial rules like reflexivity or introduction of @{text
@@ -330,7 +331,8 @@
   lead to some surprise of the meaning of ``only'' in Isabelle/HOL
   compared to English!
 
-  \medskip The @{text split} modifiers add or delete rules for the
+  \<^medskip>
+  The @{text split} modifiers add or delete rules for the
   Splitter (see also \secref{sec:simp-strategies} on the looper).
   This works only if the Simplifier method has been properly setup to
   include the Splitter (all major object logics such HOL, HOLCF, FOL,
@@ -341,7 +343,8 @@
   @{text "(split thms)"} can be imitated by ``@{text "(simp only:
   split: thms)"}''.
 
-  \medskip The @{text cong} modifiers add or delete Simplifier
+  \<^medskip>
+  The @{text cong} modifiers add or delete Simplifier
   congruence rules (see also \secref{sec:simp-rules}); the default is
   to add.
 
@@ -421,7 +424,8 @@
 lemma fixes x :: "'a :: monoid_add" shows "0 + (x + 0) = x + 0 + 0" by simp
 
 text \<open>
-  \medskip In many cases, assumptions of a subgoal are also needed in
+  \<^medskip>
+  In many cases, assumptions of a subgoal are also needed in
   the simplification process.  For example:
 \<close>
 
@@ -467,7 +471,9 @@
   have "f 0 = f 0 + 0" by simp
 end
 
-text \<open>\medskip Because assumptions may simplify each other, there
+text \<open>
+  \<^medskip>
+  Because assumptions may simplify each other, there
   can be very subtle cases of nontermination. For example, the regular
   @{method simp} method applied to @{prop "P (f x) \<Longrightarrow> y = x \<Longrightarrow> f x = f y
   \<Longrightarrow> Q"} gives rise to the infinite reduction sequence
@@ -518,11 +524,12 @@
   @{text "?P \<and> ?P \<longleftrightarrow> ?P"} \\
   @{text "?A \<union> ?B \<equiv> {x. x \<in> ?A \<or> x \<in> ?B}"}
 
-  \smallskip
+  \<^medskip>
   Conditional rewrites such as @{text "?m < ?n \<Longrightarrow> ?m div ?n = 0"} are
   also permitted; the conditions can be arbitrary formulas.
 
-  \medskip Internally, all rewrite rules are translated into Pure
+  \<^medskip>
+  Internally, all rewrite rules are translated into Pure
   equalities, theorems with conclusion @{text "lhs \<equiv> rhs"}. The
   simpset contains a function for extracting equalities from arbitrary
   theorems, which is usually installed when the object-logic is
@@ -536,7 +543,7 @@
 
   \begin{enumerate}
 
-  \item First-order patterns, considering the sublanguage of
+  \<^enum> First-order patterns, considering the sublanguage of
   application of constant operators to variable operands, without
   @{text "\<lambda>"}-abstractions or functional variables.
   For example:
@@ -544,7 +551,7 @@
   @{text "(?x + ?y) + ?z \<equiv> ?x + (?y + ?z)"} \\
   @{text "f (f ?x ?y) ?z \<equiv> f ?x (f ?y ?z)"}
 
-  \item Higher-order patterns in the sense of @{cite "nipkow-patterns"}.
+  \<^enum> Higher-order patterns in the sense of @{cite "nipkow-patterns"}.
   These are terms in @{text "\<beta>"}-normal form (this will always be the
   case unless you have done something strange) where each occurrence
   of an unknown is of the form @{text "?F x\<^sub>1 \<dots> x\<^sub>n"}, where the
@@ -554,7 +561,7 @@
   or its symmetric form, since the @{text "rhs"} is also a
   higher-order pattern.
 
-  \item Physical first-order patterns over raw @{text "\<lambda>"}-term
+  \<^enum> Physical first-order patterns over raw @{text "\<lambda>"}-term
   structure without @{text "\<alpha>\<beta>\<eta>"}-equality; abstractions and bound
   variables are treated like quasi-constant term material.
 
@@ -591,12 +598,14 @@
   %The local assumptions are also provided as theorems to the solver;
   %see \secref{sec:simp-solver} below.
 
-  \medskip The following congruence rule for bounded quantifiers also
+  \<^medskip>
+  The following congruence rule for bounded quantifiers also
   supplies contextual information --- about the bound variable:
   @{text [display] "(?A = ?B) \<Longrightarrow> (\<And>x. x \<in> ?B \<Longrightarrow> ?P x \<longleftrightarrow> ?Q x) \<Longrightarrow>
     (\<forall>x \<in> ?A. ?P x) \<longleftrightarrow> (\<forall>x \<in> ?B. ?Q x)"}
 
-  \medskip This congruence rule for conditional expressions can
+  \<^medskip>
+  This congruence rule for conditional expressions can
   supply contextual information for simplifying the arms:
   @{text [display] "?p = ?q \<Longrightarrow> (?q \<Longrightarrow> ?a = ?c) \<Longrightarrow> (\<not> ?q \<Longrightarrow> ?b = ?d) \<Longrightarrow>
     (if ?p then ?a else ?b) = (if ?q then ?c else ?d)"}
@@ -636,7 +645,8 @@
   @{command "definition"} is an exception in \emph{not} declaring
   anything.
 
-  \medskip It is up the user to manipulate the current simpset further
+  \<^medskip>
+  It is up the user to manipulate the current simpset further
   by explicitly adding or deleting theorems as simplification rules,
   or installing other tools via simplification procedures
   (\secref{sec:simproc}).  Good simpsets are hard to design.  Rules
@@ -682,7 +692,8 @@
   could be also changed locally for special applications via
   @{index_ML Simplifier.set_termless} in Isabelle/ML.
 
-  \medskip Permutative rewrite rules are declared to the Simplifier
+  \<^medskip>
+  Permutative rewrite rules are declared to the Simplifier
   just like other rewrite rules.  Their special status is recognized
   automatically, and their application is guarded by the term ordering
   accordingly.\<close>
@@ -697,12 +708,12 @@
 
   \begin{itemize}
 
-  \item The associative law must always be oriented from left to
+  \<^item> The associative law must always be oriented from left to
   right, namely @{text "f (f x y) z = f x (f y z)"}.  The opposite
   orientation, if used with commutativity, leads to looping in
   conjunction with the standard term order.
 
-  \item To complete your set of rewrite rules, you must add not just
+  \<^item> To complete your set of rewrite rules, you must add not just
   associativity (A) and commutativity (C) but also a derived rule
   \emph{left-commutativity} (LC): @{text "f x (f y z) = f y (f x z)"}.
 
@@ -769,7 +780,7 @@
     @{attribute_def simp_trace_new} & : & @{text attribute} \\
     @{attribute_def simp_break} & : & @{text attribute} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   @{rail \<open>
     @@{attribute simp_trace_new} ('interactive')? \<newline>
@@ -983,7 +994,8 @@
   Solvers are packaged up in abstract type @{ML_type solver}, with
   @{ML Simplifier.mk_solver} as the only operation to create a solver.
 
-  \medskip Rewriting does not instantiate unknowns.  For example,
+  \<^medskip>
+  Rewriting does not instantiate unknowns.  For example,
   rewriting alone cannot prove @{text "a \<in> ?A"} since this requires
   instantiating @{text "?A"}.  The solver, however, is an arbitrary
   tactic and may instantiate unknowns as it pleases.  This is the only
@@ -1024,7 +1036,8 @@
 
   \end{description}
 
-  \medskip The solver tactic is invoked with the context of the
+  \<^medskip>
+  The solver tactic is invoked with the context of the
   running Simplifier.  Further operations
   may be used to retrieve relevant information, such as the list of
   local Simplifier premises via @{ML Simplifier.prems_of} --- this
@@ -1035,7 +1048,8 @@
   assume_tac}), even if the Simplifier proper happens to ignore local
   premises at the moment.
 
-  \medskip As explained before, the subgoaler is also used to solve
+  \<^medskip>
+  As explained before, the subgoaler is also used to solve
   the premises of congruence rules.  These are usually of the form
   @{text "s = ?x"}, where @{text "s"} needs to be simplified and
   @{text "?x"} needs to be instantiated with the result.  Typically,
@@ -1051,8 +1065,7 @@
   try resolving with the theorem @{text "\<not> False"} of the
   object-logic.
 
-  \medskip
-
+  \<^medskip>
   \begin{warn}
   If a premise of a congruence rule cannot be proved, then the
   congruence is ignored.  This should only happen if the rule is
@@ -1563,21 +1576,21 @@
 
   \begin{itemize}
 
-  \item It does not use the classical wrapper tacticals, such as the
+  \<^item> It does not use the classical wrapper tacticals, such as the
   integration with the Simplifier of @{method fastforce}.
 
-  \item It does not perform higher-order unification, as needed by the
+  \<^item> It does not perform higher-order unification, as needed by the
   rule @{thm [source=false] rangeI} in HOL.  There are often
   alternatives to such rules, for example @{thm [source=false]
   range_eqI}.
 
-  \item Function variables may only be applied to parameters of the
+  \<^item> Function variables may only be applied to parameters of the
   subgoal.  (This restriction arises because the prover does not use
   higher-order unification.)  If other function variables are present
   then the prover will fail with the message
   @{verbatim [display] \<open>Function unknown's argument not a bound variable\<close>}
 
-  \item Its proof strategy is more general than @{method fast} but can
+  \<^item> Its proof strategy is more general than @{method fast} but can
   be slower.  If @{method blast} fails or seems to be running forever,
   try @{method fast} and the other proof tools described below.
 
@@ -1909,7 +1922,7 @@
     @{attribute_def unify_trace_bound} & : & @{text "attribute"} & default @{text "50"} \\
     @{attribute_def unify_search_bound} & : & @{text "attribute"} & default @{text "60"} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   Higher-order unification works well in most practical situations,
   but sometimes needs extra care to identify problems.  These tracing
--- a/src/Doc/Isar_Ref/HOL_Specific.thy	Mon Oct 12 21:42:14 2015 +0200
+++ b/src/Doc/Isar_Ref/HOL_Specific.thy	Mon Oct 12 22:03:24 2015 +0200
@@ -35,7 +35,8 @@
   mathematics, but for most applications this does not matter.  If you
   prefer ML to Lisp, you will probably prefer HOL to ZF.
 
-  \medskip The syntax of HOL follows @{text "\<lambda>"}-calculus and
+  \<^medskip>
+  The syntax of HOL follows @{text "\<lambda>"}-calculus and
   functional programming.  Function application is curried.  To apply
   the function @{text f} of type @{text "\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> \<tau>\<^sub>3"} to the
   arguments @{text a} and @{text b} in HOL, you simply write @{text "f
@@ -47,7 +48,8 @@
   be avoided by currying functions by default.  Explicit tuples are as
   infrequent in HOL formalizations as in good ML or Haskell programs.
 
-  \medskip Isabelle/HOL has a distinct feel, compared to other
+  \<^medskip>
+  Isabelle/HOL has a distinct feel, compared to other
   object-logics like Isabelle/ZF.  It identifies object-level types
   with meta-level types, taking advantage of the default
   type-inference mechanism of Isabelle/Pure.  HOL fully identifies
@@ -192,25 +194,25 @@
 
   \begin{itemize}
 
-  \item Theorems of the form @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for proving
+  \<^item> Theorems of the form @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for proving
   monotonicity of inductive definitions whose introduction rules have
   premises involving terms such as @{text "\<M> R t"}.
 
-  \item Monotonicity theorems for logical operators, which are of the
+  \<^item> Monotonicity theorems for logical operators, which are of the
   general form @{text "(\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> (\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> \<longrightarrow> \<dots>"}.  For example, in
   the case of the operator @{text "\<or>"}, the corresponding theorem is
   \[
   \infer{@{text "P\<^sub>1 \<or> P\<^sub>2 \<longrightarrow> Q\<^sub>1 \<or> Q\<^sub>2"}}{@{text "P\<^sub>1 \<longrightarrow> Q\<^sub>1"} & @{text "P\<^sub>2 \<longrightarrow> Q\<^sub>2"}}
   \]
 
-  \item De Morgan style equations for reasoning about the ``polarity''
+  \<^item> De Morgan style equations for reasoning about the ``polarity''
   of expressions, e.g.
   \[
   @{prop "\<not> \<not> P \<longleftrightarrow> P"} \qquad\qquad
   @{prop "\<not> (P \<and> Q) \<longleftrightarrow> \<not> P \<or> \<not> Q"}
   \]
 
-  \item Equations for reducing complex operators to more primitive
+  \<^item> Equations for reducing complex operators to more primitive
   ones whose monotonicity can easily be proved, e.g.
   \[
   @{prop "(P \<longrightarrow> Q) \<longleftrightarrow> \<not> P \<or> Q"} \qquad\qquad
@@ -395,8 +397,7 @@
   | And "'a bexp"  "'a bexp"
   | Neg "'a bexp"
 
-
-text \<open>\medskip Evaluation of arithmetic and boolean expressions\<close>
+text \<open>\<^medskip> Evaluation of arithmetic and boolean expressions\<close>
 
 primrec evala :: "('a \<Rightarrow> nat) \<Rightarrow> 'a aexp \<Rightarrow> nat"
   and evalb :: "('a \<Rightarrow> nat) \<Rightarrow> 'a bexp \<Rightarrow> bool"
@@ -415,7 +416,8 @@
   additional parameter, an \emph{environment} that maps variables to their
   values.
 
-  \medskip Substitution on expressions can be defined similarly. The mapping
+  \<^medskip>
+  Substitution on expressions can be defined similarly. The mapping
   @{text f} of type @{typ "'a \<Rightarrow> 'a aexp"} given as a parameter is lifted
   canonically on the types @{typ "'a aexp"} and @{typ "'a bexp"},
   respectively.
@@ -501,7 +503,8 @@
   be applied to an argument. In particular, @{term "map_tree f \<circ> ts"} is not
   allowed here.
 
-  \medskip Here is a simple composition lemma for @{term map_tree}:
+  \<^medskip>
+  Here is a simple composition lemma for @{term map_tree}:
 \<close>
 
 lemma "map_tree g (map_tree f t) = map_tree (g \<circ> f) t"
@@ -683,7 +686,8 @@
 
   \end{description}
 
-  \medskip Hints for @{command (HOL) "recdef"} may be also declared
+  \<^medskip>
+  Hints for @{command (HOL) "recdef"} may be also declared
   globally, using the following attributes.
 
   \begin{matharray}{rcl}
@@ -708,8 +712,7 @@
   @{attribute_def "show_variants"} & : & @{text "attribute"} & default @{text false} \\
   \end{tabular}
 
-  \medskip
-
+  \<^medskip>
   Adhoc overloading allows to overload a constant depending on
   its type. Typically this involves the introduction of an
   uninterpreted constant (used for input and output) and the addition
@@ -899,20 +902,22 @@
   element.  In fact, @{text "\<lparr>x = a, y = b\<rparr>"} is just an abbreviation
   for @{text "\<lparr>x = a, y = b, \<dots> = ()\<rparr>"}.
 
-  \medskip Two key observations make extensible records in a simply
+  \<^medskip>
+  Two key observations make extensible records in a simply
   typed language like HOL work out:
 
   \begin{enumerate}
 
-  \item the more part is internalized, as a free term or type
+  \<^enum> the more part is internalized, as a free term or type
   variable,
 
-  \item field names are externalized, they cannot be accessed within
+  \<^enum> field names are externalized, they cannot be accessed within
   the logic as first-class values.
 
   \end{enumerate}
 
-  \medskip In Isabelle/HOL record types have to be defined explicitly,
+  \<^medskip>
+  In Isabelle/HOL record types have to be defined explicitly,
   fixing their field names and types, and their (optional) parent
   record.  Afterwards, records may be formed using above syntax, while
   obeying the canonical order of fields as given by their declaration.
@@ -980,7 +985,8 @@
   assume for now that @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"} is a root record with fields
   @{text "c\<^sub>1 :: \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n"}.
 
-  \medskip \textbf{Selectors} and \textbf{updates} are available for any
+  \<^medskip>
+  \textbf{Selectors} and \textbf{updates} are available for any
   field (including ``@{text more}''):
 
   \begin{matharray}{lll}
@@ -998,14 +1004,16 @@
   equality is concerned. Thus commutativity of independent updates can be
   proven within the logic for any two fields, but not as a general theorem.
 
-  \medskip The \textbf{make} operation provides a cumulative record
+  \<^medskip>
+  The \textbf{make} operation provides a cumulative record
   constructor function:
 
   \begin{matharray}{lll}
     @{text "t.make"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
   \end{matharray}
 
-  \medskip We now reconsider the case of non-root records, which are derived
+  \<^medskip>
+  We now reconsider the case of non-root records, which are derived
   of some parent. In general, the latter may depend on another parent as
   well, resulting in a list of \emph{ancestor records}. Appending the lists
   of fields of all ancestors results in a certain field prefix. The record
@@ -1014,7 +1022,7 @@
   ancestor fields @{text "b\<^sub>1 :: \<rho>\<^sub>1, \<dots>, b\<^sub>k :: \<rho>\<^sub>k"}, the above record
   operations will get the following types:
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{lll}
     @{text "c\<^sub>i"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<sigma>\<^sub>i"} \\
     @{text "c\<^sub>i_update"} & @{text "::"} & @{text "\<sigma>\<^sub>i \<Rightarrow>
@@ -1023,7 +1031,7 @@
     @{text "t.make"} & @{text "::"} & @{text "\<rho>\<^sub>1 \<Rightarrow> \<dots> \<rho>\<^sub>k \<Rightarrow> \<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow>
       \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   Some further operations address the extension aspect of a
   derived record scheme specifically: @{text "t.fields"} produces a record
@@ -1032,14 +1040,14 @@
   record and adds a given more part; @{text "t.truncate"} restricts a record
   scheme to a fixed record.
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{lll}
     @{text "t.fields"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
     @{text "t.extend"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr> \<Rightarrow>
       \<zeta> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
     @{text "t.truncate"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   Note that @{text "t.make"} and @{text "t.fields"} coincide for
   root records.
@@ -1056,26 +1064,26 @@
 
   \begin{enumerate}
 
-  \item Standard conversions for selectors or updates applied to record
+  \<^enum> Standard conversions for selectors or updates applied to record
   constructor terms are made part of the default Simplifier context; thus
   proofs by reduction of basic operations merely require the @{method simp}
   method without further arguments. These rules are available as @{text
   "t.simps"}, too.
 
-  \item Selectors applied to updated records are automatically reduced by an
+  \<^enum> Selectors applied to updated records are automatically reduced by an
   internal simplification procedure, which is also part of the standard
   Simplifier setup.
 
-  \item Inject equations of a form analogous to @{prop "(x, y) = (x', y') \<equiv>
+  \<^enum> Inject equations of a form analogous to @{prop "(x, y) = (x', y') \<equiv>
   x = x' \<and> y = y'"} are declared to the Simplifier and Classical Reasoner as
   @{attribute iff} rules. These rules are available as @{text "t.iffs"}.
 
-  \item The introduction rule for record equality analogous to @{text "x r =
+  \<^enum> The introduction rule for record equality analogous to @{text "x r =
   x r' \<Longrightarrow> y r = y r' \<dots> \<Longrightarrow> r = r'"} is declared to the Simplifier, and as the
   basic rule context as ``@{attribute intro}@{text "?"}''. The rule is
   called @{text "t.equality"}.
 
-  \item Representations of arbitrary record expressions as canonical
+  \<^enum> Representations of arbitrary record expressions as canonical
   constructor terms are provided both in @{method cases} and @{method
   induct} format (cf.\ the generic proof methods of the same name,
   \secref{sec:cases-induct}). Several variations are available, for fixed
@@ -1086,7 +1094,7 @@
   need to apply something like ``@{text "(cases r)"}'' to a certain proof
   problem.
 
-  \item The derived record operations @{text "t.make"}, @{text "t.fields"},
+  \<^enum> The derived record operations @{text "t.make"}, @{text "t.fields"},
   @{text "t.extend"}, @{text "t.truncate"} are \emph{not} treated
   automatically, but usually need to be expanded by hand, using the
   collective fact @{text "t.defs"}.
@@ -1135,7 +1143,8 @@
   virtues'' of this relatively simple family of logics. STT has only ground
   types, without polymorphism and without type definitions.
 
-  \medskip M. Gordon @{cite "Gordon:1985:HOL"} augmented Church's STT by
+  \<^medskip>
+  M. Gordon @{cite "Gordon:1985:HOL"} augmented Church's STT by
   adding schematic polymorphism (type variables and type constructors) and a
   facility to introduce new types as semantic subtypes from existing types.
   This genuine extension of the logic was explained semantically by A. Pitts
@@ -1145,7 +1154,8 @@
   interpretations is closed by forming subsets (via predicates taken from
   the logic).
 
-  \medskip Isabelle/HOL goes beyond Gordon-style HOL by admitting overloaded
+  \<^medskip>
+  Isabelle/HOL goes beyond Gordon-style HOL by admitting overloaded
   constant definitions @{cite "Wenzel:1997:TPHOL" and
   "Haftmann-Wenzel:2006:classes"}, which are actually a concept of
   Isabelle/Pure and do not depend on particular set-theoretic semantics of
@@ -1402,7 +1412,7 @@
 
   \begin{enumerate}
 
-  \item The first one is a low-level mode when the user must provide as a
+  \<^enum> The first one is a low-level mode when the user must provide as a
   first argument of @{command (HOL) "setup_lifting"} a quotient theorem
   @{term "Quotient R Abs Rep T"}. The package configures a transfer rule for
   equality, a domain transfer rules and sets up the @{command_def (HOL)
@@ -1416,7 +1426,7 @@
   Users generally will not prove the @{text Quotient} theorem manually for
   new types, as special commands exist to automate the process.
 
-  \item When a new subtype is defined by @{command (HOL) typedef}, @{command
+  \<^enum> When a new subtype is defined by @{command (HOL) typedef}, @{command
   (HOL) "lift_definition"} can be used in its second mode, where only the
   @{term type_definition} theorem @{term "type_definition Rep Abs A"} is
   used as an argument of the command. The command internally proves the
@@ -1430,7 +1440,8 @@
   the corresponding Quotient theorem is proved and registered by @{command
   (HOL) setup_lifting}.
 
-  \medskip The command @{command (HOL) "setup_lifting"} also sets up the
+  \<^medskip>
+  The command @{command (HOL) "setup_lifting"} also sets up the
   code generator for the new type. Later on, when a new constant is defined
   by @{command (HOL) "lift_definition"}, the Lifting package proves and
   registers a code equation (if there is one) for the new constant.
@@ -1463,7 +1474,8 @@
   unconditional for total quotients. The equation defines @{text f} using
   the abstraction function.
 
-  \medskip Integration with [@{attribute code} abstract]: For subtypes
+  \<^medskip>
+  Integration with [@{attribute code} abstract]: For subtypes
   (e.g.\ corresponding to a datatype invariant, such as @{typ "'a dlist"}),
   @{command (HOL) "lift_definition"} uses a code certificate theorem @{text
   f.rep_eq} as a code equation. Because of the limitation of the code
@@ -2292,7 +2304,7 @@
 
   \begin{enumerate}
 
-  \item Universal problems over multivariate polynomials in a
+  \<^enum> Universal problems over multivariate polynomials in a
   (semi)-ring/field/idom; the capabilities of the method are augmented
   according to properties of these structures. For this problem class
   the method is only complete for algebraically closed fields, since
@@ -2302,7 +2314,7 @@
   The problems can contain equations @{text "p = 0"} or inequations
   @{text "q \<noteq> 0"} anywhere within a universal problem statement.
 
-  \item All-exists problems of the following restricted (but useful)
+  \<^enum> All-exists problems of the following restricted (but useful)
   form:
 
   @{text [display] "\<forall>x\<^sub>1 \<dots> x\<^sub>n.
--- a/src/Doc/Isar_Ref/Inner_Syntax.thy	Mon Oct 12 21:42:14 2015 +0200
+++ b/src/Doc/Isar_Ref/Inner_Syntax.thy	Mon Oct 12 22:03:24 2015 +0200
@@ -138,7 +138,7 @@
     @{attribute_def show_tags} & : & @{text attribute} & default @{text false} \\
     @{attribute_def show_question_marks} & : & @{text attribute} & default @{text true} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   These configuration options control the detail of information that
   is displayed for types, terms, theorems, goals etc.  See also
@@ -273,7 +273,8 @@
 
   \end{description}
 
-  \medskip The pretty printer for inner syntax maintains alternative
+  \<^medskip>
+  The pretty printer for inner syntax maintains alternative
   mixfix productions for any print mode name invented by the user, say
   in commands like @{command notation} or @{command abbreviation}.
   Mode names can be arbitrary, but the following ones have a specific
@@ -281,24 +282,24 @@
 
   \begin{itemize}
 
-  \item @{verbatim \<open>""\<close>} (the empty string): default mode;
+  \<^item> @{verbatim \<open>""\<close>} (the empty string): default mode;
   implicitly active as last element in the list of modes.
 
-  \item @{verbatim input}: dummy print mode that is never active; may
+  \<^item> @{verbatim input}: dummy print mode that is never active; may
   be used to specify notation that is only available for input.
 
-  \item @{verbatim internal} dummy print mode that is never active;
+  \<^item> @{verbatim internal} dummy print mode that is never active;
   used internally in Isabelle/Pure.
 
-  \item @{verbatim xsymbols}: enable proper mathematical symbols
+  \<^item> @{verbatim xsymbols}: enable proper mathematical symbols
   instead of ASCII art.\footnote{This traditional mode name stems from
   the ``X-Symbol'' package for classic Proof~General with XEmacs.}
 
-  \item @{verbatim HTML}: additional mode that is active in HTML
+  \<^item> @{verbatim HTML}: additional mode that is active in HTML
   presentation of Isabelle theory sources; allows to provide
   alternative output notation.
 
-  \item @{verbatim latex}: additional mode that is active in {\LaTeX}
+  \<^item> @{verbatim latex}: additional mode that is active in {\LaTeX}
   document preparation of Isabelle theory sources; allows to provide
   alternative output notation.
 
@@ -370,7 +371,8 @@
   instead.  If a term has fewer arguments than specified in the mixfix
   template, the concrete syntax is ignored.
 
-  \medskip A mixfix template may also contain additional directives
+  \<^medskip>
+  A mixfix template may also contain additional directives
   for pretty printing, notably spaces, blocks, and breaks.  The
   general template format is a sequence over any of the following
   entities.
@@ -380,7 +382,7 @@
   \item @{text "d"} is a delimiter, namely a non-empty sequence of
   characters other than the following special characters:
 
-  \smallskip
+  \<^medskip>
   \begin{tabular}{ll}
     @{verbatim "'"} & single quote \\
     @{verbatim "_"} & underscore \\
@@ -389,7 +391,7 @@
     @{verbatim ")"} & close parenthesis \\
     @{verbatim "/"} & slash \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   \item @{verbatim "'"} escapes the special meaning of these
   meta-characters, producing a literal version of the following
@@ -565,11 +567,11 @@
 
   \begin{enumerate}
 
-  \item \emph{delimiters} --- the literal tokens occurring in
+  \<^enum> \emph{delimiters} --- the literal tokens occurring in
   productions of the given priority grammar (cf.\
   \secref{sec:priority-grammar});
 
-  \item \emph{named tokens} --- various categories of identifiers etc.
+  \<^enum> \emph{named tokens} --- various categories of identifiers etc.
 
   \end{enumerate}
 
@@ -578,7 +580,8 @@
   alternative ways to refer to the same entity, potentially via
   qualified names.
 
-  \medskip The categories for named tokens are defined once and for
+  \<^medskip>
+  The categories for named tokens are defined once and for
   all as follows, reusing some categories of the outer token syntax
   (\secref{sec:outer-lex}).
 
@@ -628,13 +631,15 @@
   priority grammar can be translated into a normal context-free
   grammar by introducing new nonterminals and productions.
 
-  \medskip Formally, a set of context free productions @{text G}
+  \<^medskip>
+  Formally, a set of context free productions @{text G}
   induces a derivation relation @{text "\<longrightarrow>\<^sub>G"} as follows.  Let @{text
   \<alpha>} and @{text \<beta>} denote strings of terminal or nonterminal symbols.
   Then @{text "\<alpha> A\<^sup>(\<^sup>p\<^sup>) \<beta> \<longrightarrow>\<^sub>G \<alpha> \<gamma> \<beta>"} holds if and only if @{text G}
   contains some production @{text "A\<^sup>(\<^sup>q\<^sup>) = \<gamma>"} for @{text "p \<le> q"}.
 
-  \medskip The following grammar for arithmetic expressions
+  \<^medskip>
+  The following grammar for arithmetic expressions
   demonstrates how binding power and associativity of operators can be
   enforced by priorities.
 
@@ -652,21 +657,22 @@
   "+"}.  Furthermore @{verbatim "+"} associates to the left and
   @{verbatim "*"} to the right.
 
-  \medskip For clarity, grammars obey these conventions:
+  \<^medskip>
+  For clarity, grammars obey these conventions:
   \begin{itemize}
 
-  \item All priorities must lie between 0 and 1000.
+  \<^item> All priorities must lie between 0 and 1000.
 
-  \item Priority 0 on the right-hand side and priority 1000 on the
+  \<^item> Priority 0 on the right-hand side and priority 1000 on the
   left-hand side may be omitted.
 
-  \item The production @{text "A\<^sup>(\<^sup>p\<^sup>) = \<alpha>"} is written as @{text "A = \<alpha>
+  \<^item> The production @{text "A\<^sup>(\<^sup>p\<^sup>) = \<alpha>"} is written as @{text "A = \<alpha>
   (p)"}, i.e.\ the priority of the left-hand side actually appears in
   a column on the far right.
 
-  \item Alternatives are separated by @{text "|"}.
+  \<^item> Alternatives are separated by @{text "|"}.
 
-  \item Repetition is indicated by dots @{text "(\<dots>)"} in an informal
+  \<^item> Repetition is indicated by dots @{text "(\<dots>)"} in an informal
   but obvious way.
 
   \end{itemize}
@@ -759,7 +765,8 @@
   \end{supertabular}
   \end{center}
 
-  \medskip Here literal terminals are printed @{verbatim "verbatim"};
+  \<^medskip>
+  Here literal terminals are printed @{verbatim "verbatim"};
   see also \secref{sec:inner-lex} for further token categories of the
   inner syntax.  The meaning of the nonterminals defined by the above
   grammar is as follows:
@@ -823,23 +830,23 @@
 
   \begin{itemize}
 
-  \item In @{syntax (inner) idts}, note that @{text "x :: nat y"} is
+  \<^item> In @{syntax (inner) idts}, note that @{text "x :: nat y"} is
   parsed as @{text "x :: (nat y)"}, treating @{text y} like a type
   constructor applied to @{text nat}.  To avoid this interpretation,
   write @{text "(x :: nat) y"} with explicit parentheses.
 
-  \item Similarly, @{text "x :: nat y :: nat"} is parsed as @{text "x ::
+  \<^item> Similarly, @{text "x :: nat y :: nat"} is parsed as @{text "x ::
   (nat y :: nat)"}.  The correct form is @{text "(x :: nat) (y ::
   nat)"}, or @{text "(x :: nat) y :: nat"} if @{text y} is last in the
   sequence of identifiers.
 
-  \item Type constraints for terms bind very weakly.  For example,
+  \<^item> Type constraints for terms bind very weakly.  For example,
   @{text "x < y :: nat"} is normally parsed as @{text "(x < y) ::
   nat"}, unless @{text "<"} has a very low priority, in which case the
   input is likely to be ambiguous.  The correct form is @{text "x < (y
   :: nat)"}.
 
-  \item Dummy variables (written as underscore) may occur in different
+  \<^item> Dummy variables (written as underscore) may occur in different
   roles.
 
   \begin{description}
@@ -1053,7 +1060,8 @@
   @{verbatim "()"} and @{verbatim "(x)"} are excluded as ASTs, because
   they have too few subtrees.
 
-  \medskip AST application is merely a pro-forma mechanism to indicate
+  \<^medskip>
+  AST application is merely a pro-forma mechanism to indicate
   certain syntactic structures.  Thus @{verbatim "(c a b)"} could mean
   either term application or type application, depending on the
   syntactic context.
@@ -1090,7 +1098,8 @@
   variables (free or schematic) into @{ML Ast.Variable}.  This
   information is precise when printing fully formal @{text "\<lambda>"}-terms.
 
-  \medskip AST translation patterns (\secref{sec:syn-trans}) that
+  \<^medskip>
+  AST translation patterns (\secref{sec:syn-trans}) that
   represent terms cannot distinguish constants and variables
   syntactically.  Explicit indication of @{text "CONST c"} inside the
   term language is required, unless @{text "c"} is known as special
@@ -1126,16 +1135,16 @@
 
   \begin{itemize}
 
-  \item Input of term constants (or fixed variables) that are
+  \<^item> Input of term constants (or fixed variables) that are
   introduced by concrete syntax via @{command notation}: the
   correspondence of a particular grammar production to some known term
   entity is preserved.
 
-  \item Input of type constants (constructors) and type classes ---
+  \<^item> Input of type constants (constructors) and type classes ---
   thanks to explicit syntactic distinction independently on the
   context.
 
-  \item Output of term constants, type constants, type classes ---
+  \<^item> Output of term constants, type constants, type classes ---
   this information is already available from the internal term to be
   printed.
 
@@ -1159,8 +1168,7 @@
     @{attribute_def syntax_ast_trace} & : & @{text attribute} & default @{text false} \\
     @{attribute_def syntax_ast_stats} & : & @{text attribute} & default @{text false} \\
   \end{tabular}
-
-  \medskip
+  \<^medskip>
 
   Unlike mixfix notation for existing formal entities
   (\secref{sec:notation}), raw syntax declarations provide full access
@@ -1207,14 +1215,14 @@
   follows:
   \begin{itemize}
 
-  \item @{text "prop"} if @{text "\<tau>\<^sub>i = prop"}
+  \<^item> @{text "prop"} if @{text "\<tau>\<^sub>i = prop"}
 
-  \item @{text "logic"} if @{text "\<tau>\<^sub>i = (\<dots>)\<kappa>"} for logical type
+  \<^item> @{text "logic"} if @{text "\<tau>\<^sub>i = (\<dots>)\<kappa>"} for logical type
   constructor @{text "\<kappa> \<noteq> prop"}
 
-  \item @{text any} if @{text "\<tau>\<^sub>i = \<alpha>"} for type variables
+  \<^item> @{text any} if @{text "\<tau>\<^sub>i = \<alpha>"} for type variables
 
-  \item @{text "\<kappa>"} if @{text "\<tau>\<^sub>i = \<kappa>"} for nonterminal @{text "\<kappa>"}
+  \<^item> @{text "\<kappa>"} if @{text "\<tau>\<^sub>i = \<kappa>"} for nonterminal @{text "\<kappa>"}
   (syntactic type constructor)
 
   \end{itemize}
@@ -1225,7 +1233,8 @@
   The resulting nonterminal of the production is determined similarly
   from type @{text "\<tau>"}, with priority @{text "q"} and default 1000.
 
-  \medskip Parsing via this production produces parse trees @{text
+  \<^medskip>
+  Parsing via this production produces parse trees @{text
   "t\<^sub>1, \<dots>, t\<^sub>n"} for the argument slots.  The resulting parse tree is
   composed as @{text "c t\<^sub>1 \<dots> t\<^sub>n"}, by using the syntax constant @{text
   "c"} of the syntax declaration.
@@ -1236,7 +1245,8 @@
   "_foobar"}).  Names should be chosen with care, to avoid clashes
   with other syntax declarations.
 
-  \medskip The special case of copy production is specified by @{text
+  \<^medskip>
+  The special case of copy production is specified by @{text
   "c = "}@{verbatim \<open>""\<close>} (empty string).  It means that the
   resulting parse tree @{text "t"} is copied directly, without any
   further decoration.
@@ -1281,13 +1291,13 @@
 
   \begin{itemize}
 
-  \item Rules must be left linear: @{text "lhs"} must not contain
+  \<^item> Rules must be left linear: @{text "lhs"} must not contain
   repeated variables.\footnote{The deeper reason for this is that AST
   equality is not well-defined: different occurrences of the ``same''
   AST could be decorated differently by accidental type-constraints or
   source position information, for example.}
 
-  \item Every variable in @{text "rhs"} must also occur in @{text
+  \<^item> Every variable in @{text "rhs"} must also occur in @{text
   "lhs"}.
 
   \end{itemize}
@@ -1312,11 +1322,11 @@
 
   \begin{itemize}
 
-  \item Iterated replacement via recursive @{command translations}.
+  \<^item> Iterated replacement via recursive @{command translations}.
   For example, consider list enumeration @{term "[a, b, c, d]"} as
   defined in theory @{theory List} in Isabelle/HOL.
 
-  \item Change of binding status of variables: anything beyond the
+  \<^item> Change of binding status of variables: anything beyond the
   built-in @{keyword "binder"} mixfix annotation requires explicit
   syntax translations.  For example, consider list filter
   comprehension @{term "[x \<leftarrow> xs . P]"} as defined in theory @{theory
@@ -1348,21 +1358,21 @@
 
   \begin{itemize}
 
-  \item Objects of the form @{ML Ast.Variable}~@{text "x"} or @{ML
+  \<^item> Objects of the form @{ML Ast.Variable}~@{text "x"} or @{ML
   Ast.Constant}~@{text "x"} are matched by pattern @{ML
   Ast.Constant}~@{text "x"}.  Thus all atomic ASTs in the object are
   treated as (potential) constants, and a successful match makes them
   actual constants even before name space resolution (see also
   \secref{sec:ast}).
 
-  \item Object @{text "u"} is matched by pattern @{ML
+  \<^item> Object @{text "u"} is matched by pattern @{ML
   Ast.Variable}~@{text "x"}, binding @{text "x"} to @{text "u"}.
 
-  \item Object @{ML Ast.Appl}~@{text "us"} is matched by @{ML
+  \<^item> Object @{ML Ast.Appl}~@{text "us"} is matched by @{ML
   Ast.Appl}~@{text "ts"} if @{text "us"} and @{text "ts"} have the
   same length and each corresponding subtree matches.
 
-  \item In every other case, matching fails.
+  \<^item> In every other case, matching fails.
 
   \end{itemize}
 
@@ -1436,7 +1446,7 @@
   @{syntax text} argument that refers to an ML expression of
   appropriate type as follows:
 
-  \medskip
+  \<^medskip>
   {\footnotesize
   \begin{tabular}{l}
   @{command parse_ast_translation} : \\
@@ -1450,7 +1460,7 @@
   @{command print_ast_translation} : \\
   \quad @{ML_type "(string * (Proof.context -> Ast.ast list -> Ast.ast)) list"} \\
   \end{tabular}}
-  \medskip
+  \<^medskip>
 
   The argument list consists of @{text "(c, tr)"} pairs, where @{text
   "c"} is the syntax name of the formal entity involved, and @{text
@@ -1627,7 +1637,8 @@
   finally pretty-printed.  The built-in print AST translations reverse
   the corresponding parse AST translations.
 
-  \medskip For the actual printing process, the priority grammar
+  \<^medskip>
+  For the actual printing process, the priority grammar
   (\secref{sec:priority-grammar}) plays a vital role: productions are
   used as templates for pretty printing, with argument slots stemming
   from nonterminals, and syntactic sugar stemming from literal tokens.
@@ -1655,7 +1666,8 @@
   in order of appearance within the grammar.  An occurrence of some
   AST variable @{text "x"} is printed as @{text "x"} outright.
 
-  \medskip White space is \emph{not} inserted automatically.  If
+  \<^medskip>
+  White space is \emph{not} inserted automatically.  If
   blanks (or breaks) are required to separate tokens, they need to be
   specified in the mixfix declaration (\secref{sec:mixfix}).
 \<close>
--- a/src/Doc/Isar_Ref/Outer_Syntax.thy	Mon Oct 12 21:42:14 2015 +0200
+++ b/src/Doc/Isar_Ref/Outer_Syntax.thy	Mon Oct 12 22:03:24 2015 +0200
@@ -15,7 +15,8 @@
   Concrete theory and proof language elements will be introduced later
   on.
 
-  \medskip In order to get started with writing well-formed
+  \<^medskip>
+  In order to get started with writing well-formed
   Isabelle/Isar documents, the most important aspect to be noted is
   the difference of \emph{inner} versus \emph{outer} syntax.  Inner
   syntax is that of Isabelle types and terms of the logic, while outer
@@ -74,13 +75,13 @@
 
   \begin{enumerate}
 
-  \item \emph{major keywords} --- the command names that are available
+  \<^enum> \emph{major keywords} --- the command names that are available
   in the present logic session;
 
-  \item \emph{minor keywords} --- additional literal tokens required
+  \<^enum> \emph{minor keywords} --- additional literal tokens required
   by the syntax of commands;
 
-  \item \emph{named tokens} --- various categories of identifiers etc.
+  \<^enum> \emph{named tokens} --- various categories of identifiers etc.
 
   \end{enumerate}
 
@@ -102,8 +103,8 @@
   tabs, newlines and formfeeds between tokens serve as explicit
   separators.
 
-  \medskip The categories for named tokens are defined once and for
-  all as follows.
+  \<^medskip>
+  The categories for named tokens are defined once and for all as follows.
 
   \begin{center}
   \begin{supertabular}{rcl}
@@ -341,7 +342,8 @@
     @{syntax_def prop_pat}: '(' (@'is' @{syntax prop} +) ')'
   \<close>}
 
-  \medskip Declarations of local variables @{text "x :: \<tau>"} and
+  \<^medskip>
+  Declarations of local variables @{text "x :: \<tau>"} and
   logical propositions @{text "a : \<phi>"} represent different views on
   the same principle of introducing a local scope.  In practice, one
   may usually omit the typing of @{syntax vars} (due to
@@ -412,11 +414,11 @@
   There are three forms of theorem references:
   \begin{enumerate}
 
-  \item named facts @{text "a"},
+  \<^enum> named facts @{text "a"},
 
-  \item selections from named facts @{text "a(i)"} or @{text "a(j - k)"},
+  \<^enum> selections from named facts @{text "a(i)"} or @{text "a(j - k)"},
 
-  \item literal fact propositions using token syntax @{syntax_ref altstring}
+  \<^enum> literal fact propositions using token syntax @{syntax_ref altstring}
   @{verbatim "`"}@{text "\<phi>"}@{verbatim "`"} or @{syntax_ref cartouche}
   @{text "\<open>\<phi>\<close>"} (see also method @{method_ref fact}).
 
--- a/src/Doc/Isar_Ref/Preface.thy	Mon Oct 12 21:42:14 2015 +0200
+++ b/src/Doc/Isar_Ref/Preface.thy	Mon Oct 12 22:03:24 2015 +0200
@@ -26,7 +26,8 @@
   Isabelle/jEdit Prover IDE @{cite "Wenzel:2012"} emphasizes the
   document-oriented approach of Isabelle/Isar again more explicitly.
 
-  \medskip Apart from the technical advances over bare-bones ML
+  \<^medskip>
+  Apart from the technical advances over bare-bones ML
   programming, the main purpose of the Isar language is to provide a
   conceptually different view on machine-checked proofs
   @{cite "Wenzel:1999:TPHOL" and "Wenzel-PhD"}.  \emph{Isar} stands for
@@ -49,7 +50,8 @@
   structured proofs.  Isabelle/Isar supports a broad range of proof
   styles, both readable and unreadable ones.
 
-  \medskip The generic Isabelle/Isar framework (see
+  \<^medskip>
+  The generic Isabelle/Isar framework (see
   \chref{ch:isar-framework}) works reasonably well for any Isabelle
   object-logic that conforms to the natural deduction view of the
   Isabelle/Pure framework.  Specific language elements introduced by
@@ -58,7 +60,8 @@
   framework, examples given in the generic parts will usually refer to
   Isabelle/HOL.
 
-  \medskip Isar commands may be either \emph{proper} document
+  \<^medskip>
+  Isar commands may be either \emph{proper} document
   constructors, or \emph{improper commands}.  Some proof methods and
   attributes introduced later are classified as improper as well.
   Improper Isar language elements, which are marked by ``@{text
--- a/src/Doc/Isar_Ref/Proof.thy	Mon Oct 12 21:42:14 2015 +0200
+++ b/src/Doc/Isar_Ref/Proof.thy	Mon Oct 12 22:03:24 2015 +0200
@@ -247,7 +247,8 @@
   incremental type-inference, as the user proceeds to build up the
   Isar proof text from left to right.
 
-  \medskip Term abbreviations are quite different from local
+  \<^medskip>
+  Term abbreviations are quite different from local
   definitions as introduced via @{command "def"} (see
   \secref{sec:proof-context}).  The latter are visible within the
   logic as actual equations, while abbreviations disappear during the
@@ -554,7 +555,8 @@
   calculations, there is no separate \emph{begin-calculation} command
   required.
 
-  \medskip The Isar calculation proof commands may be defined as
+  \<^medskip>
+  The Isar calculation proof commands may be defined as
   follows:\footnote{We suppress internal bookkeeping such as proper
   handling of block-structure.}
 
@@ -651,7 +653,8 @@
   the subsequent mechanisms allow to imitate the effect of subgoal
   addressing that is known from ML tactics.
 
-  \medskip Goal \emph{restriction} means the proof state is wrapped-up in a
+  \<^medskip>
+  Goal \emph{restriction} means the proof state is wrapped-up in a
   way that certain subgoals are exposed, and other subgoals are ``parked''
   elsewhere. Thus a proof method has no other chance than to operate on the
   subgoals that are presently exposed.
@@ -671,7 +674,8 @@
   "(rule r, simp_all)[]"}'' simplifies all new goals that emerge from
   applying rule @{text "r"} to the originally first one.
 
-  \medskip Improper methods, notably tactic emulations, offer low-level goal
+  \<^medskip>
+  Improper methods, notably tactic emulations, offer low-level goal
   addressing as explicit argument to the individual tactic being involved.
   Here ``@{text "[!]"}'' refers to all goals, and ``@{text "[n-]"}'' to all
   goals starting from @{text "n"}.
@@ -702,13 +706,13 @@
 
   \begin{enumerate}
 
-  \item An \emph{initial} refinement step @{command_ref
+  \<^enum> An \emph{initial} refinement step @{command_ref
   "proof"}~@{text "m\<^sub>1"} reduces a newly stated goal to a number
   of sub-goals that are to be solved later.  Facts are passed to
   @{text "m\<^sub>1"} for forward chaining, if so indicated by @{text
   "proof(chain)"} mode.
 
-  \item A \emph{terminal} conclusion step @{command_ref "qed"}~@{text
+  \<^enum> A \emph{terminal} conclusion step @{command_ref "qed"}~@{text
   "m\<^sub>2"} is intended to solve remaining goals.  No facts are
   passed to @{text "m\<^sub>2"}.
 
@@ -720,7 +724,8 @@
   problem of unstructured tactic scripts that consist of numerous
   consecutive goal transformations, with invisible effects.
 
-  \medskip As a general rule of thumb for good proof style, initial
+  \<^medskip>
+  As a general rule of thumb for good proof style, initial
   proof methods should either solve the goal completely, or constitute
   some well-understood reduction to new sub-goals.  Arbitrary
   automatic proof tools that are prone leave a large number of badly
@@ -1053,7 +1058,8 @@
   y\<^sub>1 \<dots> y\<^sub>m)"}'' instead, the proof author is able to
   chose local names that fit nicely into the current context.
 
-  \medskip It is important to note that proper use of @{command
+  \<^medskip>
+  It is important to note that proper use of @{command
   "case"} does not provide means to peek at the current goal state,
   which is not directly observable in Isar!  Nonetheless, goal
   refinement commands do provide named cases @{text "goal\<^sub>i"}
@@ -1068,7 +1074,8 @@
   previous theory specifications in a canonical way (say from
   @{command "inductive"} definitions).
 
-  \medskip Proper cases are only available if both the proof method
+  \<^medskip>
+  Proper cases are only available if both the proof method
   and the rules involved support this.  By using appropriate
   attributes, case names, conclusions, and parameters may be also
   declared by hand.  Thus variant versions of rules that have been
@@ -1217,7 +1224,7 @@
   The rule is determined as follows, according to the facts and
   arguments passed to the @{method cases} method:
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{llll}
     facts           &                 & arguments   & rule \\\hline
     @{text "\<turnstile> R"}   & @{method cases} &             & implicit rule @{text R} \\
@@ -1226,7 +1233,7 @@
     @{text "\<turnstile> A t"} & @{method cases} & @{text "\<dots>"} & inductive predicate/set elimination (of @{text A}) \\
     @{text "\<dots>"}     & @{method cases} & @{text "\<dots> rule: R"} & explicit rule @{text R} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   Several instantiations may be given, referring to the \emph{suffix}
   of premises of the case rule; within each premise, the \emph{prefix}
@@ -1241,14 +1248,14 @@
   @{method cases} method, but refer to induction rules, which are
   determined as follows:
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{llll}
     facts           &                  & arguments            & rule \\\hline
                     & @{method induct} & @{text "P x"}        & datatype induction (type of @{text x}) \\
     @{text "\<turnstile> A x"} & @{method induct} & @{text "\<dots>"}          & predicate/set induction (of @{text A}) \\
     @{text "\<dots>"}     & @{method induct} & @{text "\<dots> rule: R"} & explicit rule @{text R} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   Several instantiations may be given, each referring to some part of
   a mutual inductive definition or datatype --- only related partial
@@ -1297,13 +1304,14 @@
   @{method induct} method, but refers to coinduction rules, which are
   determined as follows:
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{llll}
     goal          &                    & arguments & rule \\\hline
                   & @{method coinduct} & @{text x} & type coinduction (type of @{text x}) \\
     @{text "A x"} & @{method coinduct} & @{text "\<dots>"} & predicate/set coinduction (of @{text A}) \\
     @{text "\<dots>"}   & @{method coinduct} & @{text "\<dots> rule: R"} & explicit rule @{text R} \\
   \end{tabular}
+  \<^medskip>
 
   Coinduction is the dual of induction.  Induction essentially
   eliminates @{text "A x"} towards a generic result @{text "P x"},
@@ -1333,7 +1341,8 @@
   The @{command "print_cases"} command prints all named cases present
   in the current proof state.
 
-  \medskip Despite the additional infrastructure, both @{method cases}
+  \<^medskip>
+  Despite the additional infrastructure, both @{method cases}
   and @{method coinduct} merely apply a certain rule, after
   instantiation, while conforming due to the usual way of monotonic
   natural deduction: the context of a structured statement @{text
@@ -1363,7 +1372,8 @@
   @{text c.IH}, @{text c.hyps} and @{text c.prems}, as above.
 
 
-  \medskip Facts presented to either method are consumed according to
+  \<^medskip>
+  Facts presented to either method are consumed according to
   the number of ``major premises'' of the rule involved, which is
   usually 0 for plain cases and induction rules of datatypes etc.\ and
   1 for rules of inductive predicates or sets and the like.  The
@@ -1438,13 +1448,13 @@
   soundness of this temporary context extension. As representative examples,
   one may think of standard rules from Isabelle/HOL like this:
 
-  \medskip
+  \<^medskip>
   \begin{tabular}{ll}
   @{text "\<exists>x. B x \<Longrightarrow> (\<And>x. B x \<Longrightarrow> thesis) \<Longrightarrow> thesis"} \\
   @{text "A \<and> B \<Longrightarrow> (A \<Longrightarrow> B \<Longrightarrow> thesis) \<Longrightarrow> thesis"} \\
   @{text "A \<or> B \<Longrightarrow> (A \<Longrightarrow> thesis) \<Longrightarrow> (B \<Longrightarrow> thesis) \<Longrightarrow> thesis"} \\
   \end{tabular}
-  \medskip
+  \<^medskip>
 
   In general, these particular rules and connectives need to get involved at
   all: this concept works directly in Isabelle/Pure via Isar commands
@@ -1474,7 +1484,8 @@
   the rule this is a fact name, in the resulting rule it is used as
   annotation with the @{attribute_ref case_names} attribute.
 
-  \medskip Formally, the command @{command consider} is defined as derived
+  \<^medskip>
+  Formally, the command @{command consider} is defined as derived
   Isar language element as follows:
 
   \begin{matharray}{l}
@@ -1503,7 +1514,8 @@
   export will fail! This restriction conforms to the usual manner of
   existential reasoning in Natural Deduction.
 
-  \medskip Formally, the command @{command obtain} is defined as derived
+  \<^medskip>
+  Formally, the command @{command obtain} is defined as derived
   Isar language element as follows, using an instrumented variant of
   @{command assume}:
 
--- a/src/Doc/Isar_Ref/Quick_Reference.thy	Mon Oct 12 21:42:14 2015 +0200
+++ b/src/Doc/Isar_Ref/Quick_Reference.thy	Mon Oct 12 22:03:24 2015 +0200
@@ -25,7 +25,7 @@
     @{command "write"}~@{text "c  (mx)"} & declare local mixfix syntax \\
   \end{tabular}
 
-  \medskip
+  \<^medskip>
 
   \begin{tabular}{rcl}
     @{text "proof"} & = & @{text "prfx\<^sup>*"}~@{command "proof"}~@{text "method\<^sup>? stmt\<^sup>*"}~@{command "qed"}~@{text "method\<^sup>?"} \\
--- a/src/Doc/Isar_Ref/Spec.thy	Mon Oct 12 21:42:14 2015 +0200
+++ b/src/Doc/Isar_Ref/Spec.thy	Mon Oct 12 22:03:24 2015 +0200
@@ -45,7 +45,8 @@
   refer to local parameters and assumptions that are discharged later. See
   \secref{sec:target} for more details.
 
-  \medskip A theory is commenced by the @{command "theory"} command, which
+  \<^medskip>
+  A theory is commenced by the @{command "theory"} command, which
   indicates imports of previous theories, according to an acyclic
   foundational order. Before the initial @{command "theory"} command, there
   may be optional document header material (like @{command section} or
@@ -199,7 +200,8 @@
   "("}@{keyword "in"}~@{text "c)"}, but individual syntax diagrams omit that
   aspect for clarity.
 
-  \medskip The exact meaning of results produced within a local theory
+  \<^medskip>
+  The exact meaning of results produced within a local theory
   context depends on the underlying target infrastructure (locale, type
   class etc.). The general idea is as follows, considering a context named
   @{text c} with parameter @{text x} and assumption @{text "A[x]"}.
@@ -628,7 +630,8 @@
   \secref{sec:goals}, term bindings may be included as expected,
   though.
   
-  \medskip Locale specifications are ``closed up'' by
+  \<^medskip>
+  Locale specifications are ``closed up'' by
   turning the given text into a predicate definition @{text
   loc_axioms} and deriving the original assumptions as local lemmas
   (modulo local definitions).  The predicate statement covers only the
@@ -847,9 +850,13 @@
   of @{command "interpretation"} and @{command "sublocale"}.  It is
   available by importing theory @{file "~~/src/Tools/Permanent_Interpretation.thy"}
   and provides
+
   \begin{enumerate}
-    \item a unified view on arbitrary suitable local theories as interpretation target; 
-    \item rewrite morphisms by means of \emph{rewrite definitions}. 
+
+  \<^enum> a unified view on arbitrary suitable local theories as interpretation target;
+
+  \<^enum> rewrite morphisms by means of \emph{rewrite definitions}.
+
   \end{enumerate}
   
   \begin{matharray}{rcl}
@@ -860,7 +867,6 @@
     @@{command permanent_interpretation} @{syntax locale_expr} \<newline>
       definitions? equations?
     ;
-
     definitions: @'defining' (@{syntax thmdecl}? @{syntax name} \<newline>
       @{syntax mixfix}? @'=' @{syntax term} + @'and');
     equations: @'where' (@{syntax thmdecl}? @{syntax prop} + @'and')
@@ -894,11 +900,11 @@
   
   \begin{itemize}
   
-    \item produces a corresponding definition in
-      the local theory's underlying target \emph{and}
-    
-    \item augments the rewrite morphism with the equation
-      stemming from the symmetric of the corresponding definition.
+  \<^item> produces a corresponding definition in
+  the local theory's underlying target \emph{and}
+  
+  \<^item> augments the rewrite morphism with the equation
+  stemming from the symmetric of the corresponding definition.
   
   \end{itemize}
   
@@ -907,11 +913,11 @@
   
   \begin{itemize}
   
-    \item Definitions are parsed in the syntactic interpretation
-      context, just like equations.
-  
-    \item The proof needs not consider the equations stemming from
-      definitions -- they are proved implicitly by construction.
+  \<^item> Definitions are parsed in the syntactic interpretation
+  context, just like equations.
+
+  \<^item> The proof needs not consider the equations stemming from
+  definitions -- they are proved implicitly by construction.
       
   \end{itemize}
   
@@ -1054,14 +1060,14 @@
 
   \begin{itemize}
 
-  \item Local constant declarations @{text "g[\<alpha>]"} referring to the
+  \<^item> Local constant declarations @{text "g[\<alpha>]"} referring to the
   local type parameter @{text \<alpha>} and local parameters @{text "f[\<alpha>]"}
   are accompanied by theory-level constants @{text "g[?\<alpha> :: c]"}
   referring to theory-level class operations @{text "f[?\<alpha> :: c]"}.
 
-  \item Local theorem bindings are lifted as are assumptions.
+  \<^item> Local theorem bindings are lifted as are assumptions.
 
-  \item Local syntax refers to local operations @{text "g[\<alpha>]"} and
+  \<^item> Local syntax refers to local operations @{text "g[\<alpha>]"} and
   global operations @{text "g[?\<alpha> :: c]"} uniformly.  Type inference
   resolves ambiguities.  In rare cases, manual type annotations are
   needed.
@@ -1076,7 +1082,8 @@
   type-constructor arities must obey the principle of
   \emph{co-regularity} as defined below.
 
-  \medskip For the subsequent formulation of co-regularity we assume
+  \<^medskip>
+  For the subsequent formulation of co-regularity we assume
   that the class relation is closed by transitivity and reflexivity.
   Moreover the collection of arities @{text "t :: (\<^vec>s)c"} is
   completed such that @{text "t :: (\<^vec>s)c"} and @{text "c \<subseteq> c'"}
@@ -1092,7 +1099,8 @@
   This relation on sorts is further extended to tuples of sorts (of
   the same length) in the component-wise way.
 
-  \smallskip Co-regularity of the class relation together with the
+  \<^medskip>
+  Co-regularity of the class relation together with the
   arities relation means:
   \[
     @{text "t :: (\<^vec>s\<^sub>1)c\<^sub>1 \<Longrightarrow> t :: (\<^vec>s\<^sub>2)c\<^sub>2 \<Longrightarrow> c\<^sub>1 \<subseteq> c\<^sub>2 \<Longrightarrow> \<^vec>s\<^sub>1 \<subseteq> \<^vec>s\<^sub>2"}
@@ -1101,7 +1109,8 @@
   classes of some type-constructor arities are related, then the
   argument sorts need to be related in the same way.
 
-  \medskip Co-regularity is a very fundamental property of the
+  \<^medskip>
+  Co-regularity is a very fundamental property of the
   order-sorted algebra of types.  For example, it entails principle
   types and most general unifiers, e.g.\ see @{cite "nipkow-prehofer"}.
 \<close>
@@ -1376,21 +1385,22 @@
   definitions may be weakened by adding arbitrary pre-conditions:
   @{text "A \<Longrightarrow> c x y \<equiv> t"}.
 
-  \medskip The built-in well-formedness conditions for definitional
+  \<^medskip>
+  The built-in well-formedness conditions for definitional
   specifications are:
 
   \begin{itemize}
 
-  \item Arguments (on the left-hand side) must be distinct variables.
+  \<^item> Arguments (on the left-hand side) must be distinct variables.
 
-  \item All variables on the right-hand side must also appear on the
+  \<^item> All variables on the right-hand side must also appear on the
   left-hand side.
 
-  \item All type variables on the right-hand side must also appear on
+  \<^item> All type variables on the right-hand side must also appear on
   the left-hand side; this prohibits @{text "0 :: nat \<equiv> length ([] ::
   \<alpha> list)"} for example.
 
-  \item The definition must not be recursive.  Most object-logics
+  \<^item> The definition must not be recursive.  Most object-logics
   provide definitional principles that can be used to express
   recursion safely.
 
--- a/src/Doc/Isar_Ref/Synopsis.thy	Mon Oct 12 21:42:14 2015 +0200
+++ b/src/Doc/Isar_Ref/Synopsis.thy	Mon Oct 12 22:03:24 2015 +0200
@@ -131,18 +131,18 @@
 text \<open>
   \begin{itemize}
 
-  \item Lower-case identifiers are usually preferred.
+  \<^item> Lower-case identifiers are usually preferred.
 
-  \item Facts can be named after the main term within the proposition.
+  \<^item> Facts can be named after the main term within the proposition.
 
-  \item Facts should \emph{not} be named after the command that
+  \<^item> Facts should \emph{not} be named after the command that
   introduced them (@{command "assume"}, @{command "have"}).  This is
   misleading and hard to maintain.
 
-  \item Natural numbers can be used as ``meaningless'' names (more
+  \<^item> Natural numbers can be used as ``meaningless'' names (more
   appropriate than @{text "a1"}, @{text "a2"} etc.)
 
-  \item Symbolic identifiers are supported (e.g. @{text "*"}, @{text
+  \<^item> Symbolic identifiers are supported (e.g. @{text "*"}, @{text
   "**"}, @{text "***"}).
 
   \end{itemize}
@@ -226,13 +226,13 @@
 text \<open>
   \begin{itemize}
 
-  \item term @{text "?thesis"} --- the main conclusion of the
+  \<^item> term @{text "?thesis"} --- the main conclusion of the
   innermost pending claim
 
-  \item term @{text "\<dots>"} --- the argument of the last explicitly
-    stated result (for infix application this is the right-hand side)
+  \<^item> term @{text "\<dots>"} --- the argument of the last explicitly
+  stated result (for infix application this is the right-hand side)
 
-  \item fact @{text "this"} --- the last result produced in the text
+  \<^item> fact @{text "this"} --- the last result produced in the text
 
   \end{itemize}
 \<close>
@@ -315,10 +315,10 @@
 text \<open>
   \begin{itemize}
 
-  \item The notion of @{text trans} rule is very general due to the
+  \<^item> The notion of @{text trans} rule is very general due to the
   flexibility of Isabelle/Pure rule composition.
 
-  \item User applications may declare their own rules, with some care
+  \<^item> User applications may declare their own rules, with some care
   about the operational details of higher-order unification.
 
   \end{itemize}
@@ -393,12 +393,12 @@
   The proof method @{method induct} provides:
   \begin{itemize}
 
-  \item implicit rule selection and robust instantiation
+  \<^item> implicit rule selection and robust instantiation
 
-  \item context elements via symbolic case names
+  \<^item> context elements via symbolic case names
 
-  \item support for rule-structured induction statements, with local
-    parameters, premises, etc.
+  \<^item> support for rule-structured induction statements, with local
+  parameters, premises, etc.
 
   \end{itemize}
 \<close>
@@ -423,13 +423,13 @@
   The subsequent example combines the following proof patterns:
   \begin{itemize}
 
-  \item outermost induction (over the datatype structure of natural
+  \<^item> outermost induction (over the datatype structure of natural
   numbers), to decompose the proof problem in top-down manner
 
-  \item calculational reasoning (\secref{sec:calculations-synopsis})
+  \<^item> calculational reasoning (\secref{sec:calculations-synopsis})
   to compose the result in each case
 
-  \item solving local claims within the calculation by simplification
+  \<^item> solving local claims within the calculation by simplification
 
   \end{itemize}
 \<close>
@@ -684,9 +684,9 @@
 
   \begin{itemize}
 
-    \item backward-chaining of rules by @{inference resolution}
+  \<^item> backward-chaining of rules by @{inference resolution}
 
-    \item closing of branches by @{inference assumption}
+  \<^item> closing of branches by @{inference assumption}
 
   \end{itemize}
 
@@ -974,10 +974,10 @@
 
   \begin{itemize}
 
-  \item prefix of assumptions (or ``major premises'')
+  \<^item> prefix of assumptions (or ``major premises'')
 
-  \item one or more cases that enable to establish the main conclusion
-    in an augmented context
+  \<^item> one or more cases that enable to establish the main conclusion
+  in an augmented context
 
   \end{itemize}
 \<close>
@@ -1017,11 +1017,11 @@
 
   \begin{itemize}
 
-  \item native language elements to state eliminations
+  \<^item> native language elements to state eliminations
 
-  \item symbolic case names
+  \<^item> symbolic case names
 
-  \item method @{method cases} to recover this structure in a
+  \<^item> method @{method cases} to recover this structure in a
   sub-proof
 
   \end{itemize}