more cartouches;
authorwenzelm
Tue, 07 Oct 2014 21:29:59 +0200
changeset 58618 782f0b662cae
parent 58617 4f169d2cf6f3
child 58619 4b41df5fef81
more cartouches;
src/Doc/Implementation/Eq.thy
src/Doc/Implementation/Integration.thy
src/Doc/Implementation/Isar.thy
src/Doc/Implementation/Local_Theory.thy
src/Doc/Implementation/Logic.thy
src/Doc/Implementation/ML.thy
src/Doc/Implementation/Prelim.thy
src/Doc/Implementation/Proof.thy
src/Doc/Implementation/Syntax.thy
src/Doc/Implementation/Tactic.thy
src/Doc/Isar_Ref/Document_Preparation.thy
src/Doc/Isar_Ref/First_Order_Logic.thy
src/Doc/Isar_Ref/Framework.thy
src/Doc/Isar_Ref/Generic.thy
src/Doc/Isar_Ref/HOL_Specific.thy
src/Doc/Isar_Ref/Inner_Syntax.thy
src/Doc/Isar_Ref/ML_Tactic.thy
src/Doc/Isar_Ref/Misc.thy
src/Doc/Isar_Ref/Outer_Syntax.thy
src/Doc/Isar_Ref/Preface.thy
src/Doc/Isar_Ref/Proof.thy
src/Doc/Isar_Ref/Quick_Reference.thy
src/Doc/Isar_Ref/Spec.thy
src/Doc/Isar_Ref/Symbols.thy
src/Doc/Isar_Ref/Synopsis.thy
src/Doc/JEdit/JEdit.thy
src/Doc/System/Basics.thy
src/Doc/System/Misc.thy
src/Doc/System/Presentation.thy
src/Doc/System/Scala.thy
src/Doc/System/Sessions.thy
--- a/src/Doc/Implementation/Eq.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Implementation/Eq.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base
 begin
 
-chapter {* Equational reasoning *}
+chapter \<open>Equational reasoning\<close>
 
-text {* Equality is one of the most fundamental concepts of
+text \<open>Equality is one of the most fundamental concepts of
   mathematics.  The Isabelle/Pure logic (\chref{ch:logic}) provides a
   builtin relation @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> prop"} that expresses equality
   of arbitrary terms (or propositions) at the framework level, as
@@ -26,12 +26,12 @@
   equivalence, and relate it with the Pure equality.  This enables to
   re-use the Pure tools for equational reasoning for particular
   object-logic connectives as well.
-*}
+\<close>
 
 
-section {* Basic equality rules \label{sec:eq-rules} *}
+section \<open>Basic equality rules \label{sec:eq-rules}\<close>
 
-text {* Isabelle/Pure uses @{text "\<equiv>"} for equality of arbitrary
+text \<open>Isabelle/Pure uses @{text "\<equiv>"} for equality of arbitrary
   terms, which includes equivalence of propositions of the logical
   framework.  The conceptual axiomatization of the constant @{text "\<equiv>
   :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> prop"} is given in \figref{fig:pure-equality}.  The
@@ -50,9 +50,9 @@
   inference function, although the rule attribute @{attribute THEN} or
   ML operator @{ML "op RS"} involve the full machinery of higher-order
   unification (modulo @{text "\<beta>\<eta>"}-conversion) and lifting of @{text
-  "\<And>/\<Longrightarrow>"} contexts. *}
+  "\<And>/\<Longrightarrow>"} contexts.\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Thm.reflexive: "cterm -> thm"} \\
   @{index_ML Thm.symmetric: "thm -> thm"} \\
@@ -67,29 +67,29 @@
   these inference rules, and a few more for primitive @{text "\<beta>"} and
   @{text "\<eta>"} conversions.  Note that @{text "\<alpha>"} conversion is
   implicit due to the representation of terms with de-Bruijn indices
-  (\secref{sec:terms}). *}
+  (\secref{sec:terms}).\<close>
 
 
-section {* Conversions \label{sec:conv} *}
+section \<open>Conversions \label{sec:conv}\<close>
 
-text {*
+text \<open>
   %FIXME
 
   The classic article that introduces the concept of conversion (for
   Cambridge LCF) is @{cite "paulson:1983"}.
-*}
+\<close>
 
 
-section {* Rewriting \label{sec:rewriting} *}
+section \<open>Rewriting \label{sec:rewriting}\<close>
 
-text {* Rewriting normalizes a given term (theorem or goal) by
+text \<open>Rewriting normalizes a given term (theorem or goal) by
   replacing instances of given equalities @{text "t \<equiv> u"} in subterms.
   Rewriting continues until no rewrites are applicable to any subterm.
   This may be used to unfold simple definitions of the form @{text "f
   x\<^sub>1 \<dots> x\<^sub>n \<equiv> u"}, but is slightly more general than that.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML rewrite_rule: "Proof.context -> thm list -> thm -> thm"} \\
   @{index_ML rewrite_goals_rule: "Proof.context -> thm list -> thm -> thm"} \\
@@ -121,6 +121,6 @@
   in the proof state.
 
   \end{description}
-*}
+\<close>
 
 end
--- a/src/Doc/Implementation/Integration.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Implementation/Integration.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -4,11 +4,11 @@
 imports Base
 begin
 
-chapter {* System integration *}
+chapter \<open>System integration\<close>
 
-section {* Isar toplevel \label{sec:isar-toplevel} *}
+section \<open>Isar toplevel \label{sec:isar-toplevel}\<close>
 
-text {*
+text \<open>
   The Isar \emph{toplevel state} represents the outermost configuration that
   is transformed by a sequence of transitions (commands) within a theory body.
   This is a pure value with pure functions acting on it in a timeless and
@@ -17,12 +17,12 @@
   contemporary Isabelle/Isar, processing toplevel commands usually works in
   parallel in multi-threaded Isabelle/ML @{cite "Wenzel:2009" and
   "Wenzel:2013:ITP"}.
-*}
+\<close>
 
 
-subsection {* Toplevel state *}
+subsection \<open>Toplevel state\<close>
 
-text {*
+text \<open>
   The toplevel state is a disjoint sum of empty @{text toplevel}, or @{text
   theory}, or @{text proof}. The initial toplevel is empty; a theory is
   commenced by a @{command theory} header; within a theory we may use theory
@@ -33,9 +33,9 @@
   defining the resulting fact. Further theory declarations or theorem
   statements with proofs may follow, until we eventually conclude the theory
   development by issuing @{command end} to get back to the empty toplevel.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type Toplevel.state} \\
   @{index_ML_exception Toplevel.UNDEF} \\
@@ -65,9 +65,9 @@
   state if available, otherwise it raises @{ML Toplevel.UNDEF}.
 
   \end{description}
-*}
+\<close>
 
-text %mlantiq {*
+text %mlantiq \<open>
   \begin{matharray}{rcl}
   @{ML_antiquotation_def "Isar.state"} & : & @{text ML_antiquotation} \\
   \end{matharray}
@@ -81,12 +81,12 @@
   ML_val} or @{command ML_command}.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Toplevel transitions \label{sec:toplevel-transition} *}
+subsection \<open>Toplevel transitions \label{sec:toplevel-transition}\<close>
 
-text {*
+text \<open>
   An Isar toplevel transition consists of a partial function on the toplevel
   state, with additional information for diagnostics and error reporting:
   there are fields for command name, source position, and other meta-data.
@@ -103,9 +103,9 @@
   source position. It is then left to the individual command parser to turn
   the given concrete syntax into a suitable transition transformer that
   adjoins actual operations on a theory or proof state.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Toplevel.keep: "(Toplevel.state -> unit) ->
   Toplevel.transition -> Toplevel.transition"} \\
@@ -148,21 +148,21 @@
   resulting facts to the target context.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Theory loader database *}
+section \<open>Theory loader database\<close>
 
-text {*
+text \<open>
   In batch mode and within dumped logic images, the theory database maintains
   a collection of theories as a directed acyclic graph. A theory may refer to
   other theories as @{keyword "imports"}, or to auxiliary files via special
   \emph{load commands} (e.g.\ @{command ML_file}). For each theory, the base
   directory of its own theory file is called \emph{master directory}: this is
   used as the relative location to refer to other files from that theory.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML use_thy: "string -> unit"} \\
   @{index_ML use_thys: "string list -> unit"} \\[0.5ex]
@@ -197,6 +197,6 @@
   information according to the file store.
 
   \end{description}
-*}
+\<close>
 
 end
--- a/src/Doc/Implementation/Isar.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Implementation/Isar.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base
 begin
 
-chapter {* Isar language elements *}
+chapter \<open>Isar language elements\<close>
 
-text {* The Isar proof language (see also
+text \<open>The Isar proof language (see also
   @{cite \<open>\S2\<close> "isabelle-isar-ref"}) consists of three main categories of
   language elements:
 
@@ -36,12 +36,12 @@
   and @{attribute symmetric} (which affects the theorem).
 
   \end{enumerate}
-*}
+\<close>
 
 
-section {* Proof commands *}
+section \<open>Proof commands\<close>
 
-text {* A \emph{proof command} is state transition of the Isar/VM
+text \<open>A \emph{proof command} is state transition of the Isar/VM
   proof interpreter.
 
   In principle, Isar proof commands could be defined in user-space as
@@ -61,9 +61,9 @@
   poses a problem to the user as Isar proof state and processes the
   final result relatively to the context.  Thus a proof can be
   incorporated into the context of some user-space tool, without
-  modifying the Isar proof language itself.  *}
+  modifying the Isar proof language itself.\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type Proof.state} \\
   @{index_ML Proof.assert_forward: "Proof.state -> Proof.state"} \\
@@ -140,10 +140,10 @@
   invoked.
 
   \end{description}
-*}
+\<close>
 
 
-text %mlantiq {*
+text %mlantiq \<open>
   \begin{matharray}{rcl}
   @{ML_antiquotation_def "Isar.goal"} & : & @{text ML_antiquotation} \\
   \end{matharray}
@@ -158,26 +158,26 @@
   ML_val} or @{command ML_command}.
 
   \end{description}
-*}
+\<close>
 
-text %mlex {* The following example peeks at a certain goal configuration. *}
+text %mlex \<open>The following example peeks at a certain goal configuration.\<close>
 
 notepad
 begin
   have A and B and C
-    ML_val {*
+    ML_val \<open>
       val n = Thm.nprems_of (#goal @{Isar.goal});
       @{assert} (n = 3);
-    *}
+\<close>
     oops
 
-text {* Here we see 3 individual subgoals in the same way as regular
-  proof methods would do.  *}
+text \<open>Here we see 3 individual subgoals in the same way as regular
+  proof methods would do.\<close>
 
 
-section {* Proof methods *}
+section \<open>Proof methods\<close>
 
-text {* A @{text "method"} is a function @{text "context \<rightarrow> thm\<^sup>* \<rightarrow> goal
+text \<open>A @{text "method"} is a function @{text "context \<rightarrow> thm\<^sup>* \<rightarrow> goal
   \<rightarrow> (cases \<times> goal)\<^sup>*\<^sup>*"} that operates on the full Isar goal
   configuration with context, goal facts, and tactical goal state and
   enumerates possible follow-up goal states, with the potential
@@ -301,9 +301,9 @@
   Isabelle99 until Isabelle2009 the system did provide various odd
   combinations of method syntax wrappers that made applications more
   complicated than necessary.}
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type Proof.method} \\
   @{index_ML METHOD_CASES: "(thm list -> cases_tactic) -> Proof.method"} \\
@@ -348,16 +348,16 @@
   function.
 
   \end{description}
-*}
+\<close>
 
-text %mlex {* See also @{command method_setup} in
+text %mlex \<open>See also @{command method_setup} in
   @{cite "isabelle-isar-ref"} which includes some abstract examples.
 
   \medskip The following toy examples illustrate how the goal facts
   and state are passed to proof methods.  The predefined proof method
   called ``@{method tactic}'' wraps ML source of type @{ML_type
   tactic} (abstracted over @{ML_text facts}).  This allows immediate
-  experimentation without parsing of concrete syntax. *}
+  experimentation without parsing of concrete syntax.\<close>
 
 notepad
 begin
@@ -378,8 +378,8 @@
     done
 end
 
-text {* \medskip The next example implements a method that simplifies
-  the first subgoal by rewrite rules that are given as arguments.  *}
+text \<open>\medskip The next example implements a method that simplifies
+  the first subgoal by rewrite rules that are given as arguments.\<close>
 
 method_setup my_simp =
   \<open>Attrib.thms >> (fn thms => fn ctxt =>
@@ -388,7 +388,7 @@
         (put_simpset HOL_basic_ss ctxt addsimps thms) i)))\<close>
   "rewrite subgoal by given rules"
 
-text {* The concrete syntax wrapping of @{command method_setup} always
+text \<open>The concrete syntax wrapping of @{command method_setup} always
   passes-through the proof context at the end of parsing, but it is
   not used in this example.
 
@@ -406,7 +406,7 @@
 
   \medskip Method @{method my_simp} can be used in Isar proofs like
   this:
-*}
+\<close>
 
 notepad
 begin
@@ -416,8 +416,8 @@
   have "a = c" by (my_simp a b)
 end
 
-text {* Here is a similar method that operates on all subgoals,
-  instead of just the first one. *}
+text \<open>Here is a similar method that operates on all subgoals,
+  instead of just the first one.\<close>
 
 method_setup my_simp_all =
   \<open>Attrib.thms >> (fn thms => fn ctxt =>
@@ -435,15 +435,15 @@
   have "a = c" and "c = b" by (my_simp_all a b)
 end
 
-text {* \medskip Apart from explicit arguments, common proof methods
+text \<open>\medskip Apart from explicit arguments, common proof methods
   typically work with a default configuration provided by the context. As a
   shortcut to rule management we use a cheap solution via the @{command
-  named_theorems} command to declare a dynamic fact in the context. *}
+  named_theorems} command to declare a dynamic fact in the context.\<close>
 
 named_theorems my_simp
 
-text {* The proof method is now defined as before, but we append the
-  explicit arguments and the rules from the context. *}
+text \<open>The proof method is now defined as before, but we append the
+  explicit arguments and the rules from the context.\<close>
 
 method_setup my_simp' =
   \<open>Attrib.thms >> (fn thms => fn ctxt =>
@@ -457,10 +457,10 @@
     end)\<close>
   "rewrite subgoal by given rules and my_simp rules from the context"
 
-text {*
+text \<open>
   \medskip Method @{method my_simp'} can be used in Isar proofs
   like this:
-*}
+\<close>
 
 notepad
 begin
@@ -470,7 +470,7 @@
   have "a \<equiv> c" by my_simp'
 end
 
-text {* \medskip The @{method my_simp} variants defined above are
+text \<open>\medskip The @{method my_simp} variants defined above are
   ``simple'' methods, i.e.\ the goal facts are merely inserted as goal
   premises by the @{ML SIMPLE_METHOD'} or @{ML SIMPLE_METHOD} wrapper.
   For proof methods that are similar to the standard collection of
@@ -500,12 +500,12 @@
   left-hand sides of rewrite rules.  For variations on the Simplifier,
   re-use of the existing type @{ML_type simpset} is adequate, but
   scalability would require it be maintained statically within the
-  context data, not dynamically on each tool invocation.  *}
+  context data, not dynamically on each tool invocation.\<close>
 
 
-section {* Attributes \label{sec:attributes} *}
+section \<open>Attributes \label{sec:attributes}\<close>
 
-text {* An \emph{attribute} is a function @{text "context \<times> thm \<rightarrow>
+text \<open>An \emph{attribute} is a function @{text "context \<times> thm \<rightarrow>
   context \<times> thm"}, which means both a (generic) context and a theorem
   can be modified simultaneously.  In practice this mixed form is very
   rare, instead attributes are presented either as \emph{declaration
@@ -522,9 +522,9 @@
   exactly on the variant of the generic context that is provided by
   the system, which is either global theory context or local proof
   context.  In particular, the background theory of a local context
-  must not be modified in this situation! *}
+  must not be modified in this situation!\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type attribute} \\
   @{index_ML Thm.rule_attribute: "(Context.generic -> thm -> thm) -> attribute"} \\
@@ -551,9 +551,9 @@
   ML function.
 
   \end{description}
-*}
+\<close>
 
-text %mlantiq {*
+text %mlantiq \<open>
   \begin{matharray}{rcl}
   @{ML_antiquotation_def attributes} & : & @{text ML_antiquotation} \\
   \end{matharray}
@@ -572,9 +572,9 @@
   be subject to odd effects of dynamic scoping!
 
   \end{description}
-*}
+\<close>
 
-text %mlex {* See also @{command attribute_setup} in
-  @{cite "isabelle-isar-ref"} which includes some abstract examples. *}
+text %mlex \<open>See also @{command attribute_setup} in
+  @{cite "isabelle-isar-ref"} which includes some abstract examples.\<close>
 
 end
--- a/src/Doc/Implementation/Local_Theory.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Implementation/Local_Theory.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base
 begin
 
-chapter {* Local theory specifications \label{ch:local-theory} *}
+chapter \<open>Local theory specifications \label{ch:local-theory}\<close>
 
-text {*
+text \<open>
   A \emph{local theory} combines aspects of both theory and proof
   context (cf.\ \secref{sec:context}), such that definitional
   specifications may be given relatively to parameters and
@@ -33,12 +33,12 @@
   Isabelle.  Although a few old packages only work for global
   theories, the standard way of implementing definitional packages in
   Isabelle is via the local theory interface.
-*}
+\<close>
 
 
-section {* Definitional elements *}
+section \<open>Definitional elements\<close>
 
-text {*
+text \<open>
   There are separate elements @{text "\<DEFINE> c \<equiv> t"} for terms, and
   @{text "\<NOTE> b = thm"} for theorems.  Types are treated
   implicitly, according to Hindley-Milner discipline (cf.\
@@ -91,9 +91,9 @@
   terms and theorems that stem from the hypothetical @{text
   "\<DEFINE>"} and @{text "\<NOTE>"} elements, transformed by the
   particular target policy (see @{cite \<open>\S4--5\<close> "Haftmann-Wenzel:2009"}
-  for details).  *}
+  for details).\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type local_theory: Proof.context} \\
   @{index_ML Named_Target.init: "string -> theory -> local_theory"} \\[1ex]
@@ -151,15 +151,15 @@
   command, or @{command declare} if an empty name binding is given.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Morphisms and declarations \label{sec:morphisms} *}
+section \<open>Morphisms and declarations \label{sec:morphisms}\<close>
 
-text {*
+text \<open>
   %FIXME
 
   See also @{cite "Chaieb-Wenzel:2007"}.
-*}
+\<close>
 
 end
--- a/src/Doc/Implementation/Logic.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Implementation/Logic.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base
 begin
 
-chapter {* Primitive logic \label{ch:logic} *}
+chapter \<open>Primitive logic \label{ch:logic}\<close>
 
-text {*
+text \<open>
   The logical foundations of Isabelle/Isar are that of the Pure logic,
   which has been introduced as a Natural Deduction framework in
   @{cite paulson700}.  This is essentially the same logic as ``@{text
@@ -27,12 +27,12 @@
   of the core calculus: type constructors, term constants, and facts
   (proof constants) may involve arbitrary type schemes, but the type
   of a locally fixed term parameter is also fixed!}
-*}
+\<close>
 
 
-section {* Types \label{sec:types} *}
+section \<open>Types \label{sec:types}\<close>
 
-text {*
+text \<open>
   The language of types is an uninterpreted order-sorted first-order
   algebra; types are qualified by ordered type classes.
 
@@ -113,9 +113,9 @@
   Consequently, type unification has most general solutions (modulo
   equivalence of sorts), so type-inference produces primary types as
   expected @{cite "nipkow-prehofer"}.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type class: string} \\
   @{index_ML_type sort: "class list"} \\
@@ -184,9 +184,9 @@
   the arity @{text "\<kappa> :: (\<^vec>s)s"}.
 
   \end{description}
-*}
+\<close>
 
-text %mlantiq {*
+text %mlantiq \<open>
   \begin{matharray}{rcl}
   @{ML_antiquotation_def "class"} & : & @{text ML_antiquotation} \\
   @{ML_antiquotation_def "sort"} & : & @{text ML_antiquotation} \\
@@ -230,12 +230,12 @@
   --- as constructor term for datatype @{ML_type typ}.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Terms \label{sec:terms} *}
+section \<open>Terms \label{sec:terms}\<close>
 
-text {*
+text \<open>
   The language of terms is that of simply-typed @{text "\<lambda>"}-calculus
   with de-Bruijn indices for bound variables (cf.\ @{cite debruijn72}
   or @{cite "paulson-ml2"}), with the types being determined by the
@@ -350,9 +350,9 @@
   mostly for parsing and printing.  Full @{text "\<alpha>\<beta>\<eta>"}-conversion is
   commonplace in various standard operations (\secref{sec:obj-rules})
   that are based on higher-order unification and matching.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type term} \\
   @{index_ML_op "aconv": "term * term -> bool"} \\
@@ -433,9 +433,9 @@
   type instance vs.\ compact type arguments form.
 
   \end{description}
-*}
+\<close>
 
-text %mlantiq {*
+text %mlantiq \<open>
   \begin{matharray}{rcl}
   @{ML_antiquotation_def "const_name"} & : & @{text ML_antiquotation} \\
   @{ML_antiquotation_def "const_abbrev"} & : & @{text ML_antiquotation} \\
@@ -476,24 +476,24 @@
   @{text "\<phi>"} --- as constructor term for datatype @{ML_type term}.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Theorems \label{sec:thms} *}
+section \<open>Theorems \label{sec:thms}\<close>
 
-text {*
+text \<open>
   A \emph{proposition} is a well-typed term of type @{text "prop"}, a
   \emph{theorem} is a proven proposition (depending on a context of
   hypotheses and the background theory).  Primitive inferences include
   plain Natural Deduction rules for the primary connectives @{text
   "\<And>"} and @{text "\<Longrightarrow>"} of the framework.  There is also a builtin
   notion of equality/equivalence @{text "\<equiv>"}.
-*}
+\<close>
 
 
-subsection {* Primitive connectives and rules \label{sec:prim-rules} *}
+subsection \<open>Primitive connectives and rules \label{sec:prim-rules}\<close>
 
-text {*
+text \<open>
   The theory @{text "Pure"} contains constant declarations for the
   primitive connectives @{text "\<And>"}, @{text "\<Longrightarrow>"}, and @{text "\<equiv>"} of
   the logical framework, see \figref{fig:pure-connectives}.  The
@@ -629,9 +629,9 @@
   "\<^vec>\<alpha>"}.  Thus overloaded definitions essentially work by
   primitive recursion over the syntactic structure of a single type
   argument.  See also @{cite \<open>\S4.3\<close> "Haftmann-Wenzel:2006:classes"}.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Logic.all: "term -> term -> term"} \\
   @{index_ML Logic.mk_implies: "term * term -> term"} \\
@@ -767,10 +767,10 @@
   "\<^vec>d\<^sub>\<sigma>"}.
 
   \end{description}
-*}
+\<close>
 
 
-text %mlantiq {*
+text %mlantiq \<open>
   \begin{matharray}{rcl}
   @{ML_antiquotation_def "ctyp"} & : & @{text ML_antiquotation} \\
   @{ML_antiquotation_def "cterm"} & : & @{text ML_antiquotation} \\
@@ -830,12 +830,12 @@
 
   \end{description}
 
-*}
+\<close>
 
 
-subsection {* Auxiliary connectives \label{sec:logic-aux} *}
+subsection \<open>Auxiliary connectives \label{sec:logic-aux}\<close>
 
-text {* Theory @{text "Pure"} provides a few auxiliary connectives
+text \<open>Theory @{text "Pure"} provides a few auxiliary connectives
   that are defined on top of the primitive ones, see
   \figref{fig:pure-aux}.  These special constants are useful in
   certain internal encodings, and are normally not directly exposed to
@@ -890,9 +890,9 @@
   TYPE(\<alpha>) \<equiv> A[\<alpha>]"} defines @{text "c :: \<alpha> itself \<Rightarrow> prop"} in terms of
   a proposition @{text "A"} that depends on an additional type
   argument, which is essentially a predicate on types.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Conjunction.intr: "thm -> thm -> thm"} \\
   @{index_ML Conjunction.elim: "thm -> thm * thm"} \\
@@ -922,12 +922,12 @@
   @{text "\<tau>"}.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Sort hypotheses *}
+subsection \<open>Sort hypotheses\<close>
 
-text {* Type variables are decorated with sorts, as explained in
+text \<open>Type variables are decorated with sorts, as explained in
   \secref{sec:types}.  This constrains type instantiation to certain
   ranges of types: variable @{text "\<alpha>\<^sub>s"} may only be assigned to types
   @{text "\<tau>"} that belong to sort @{text "s"}.  Within the logic, sort
@@ -951,9 +951,9 @@
   they are typically left over from intermediate reasoning with type
   classes that can be satisfied by some concrete type @{text "\<tau>"} of
   sort @{text "s"} to replace the hypothetical type variable @{text
-  "\<alpha>\<^sub>s"}.  *}
+  "\<alpha>\<^sub>s"}.\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Thm.extra_shyps: "thm -> sort list"} \\
   @{index_ML Thm.strip_shyps: "thm -> thm"} \\
@@ -969,11 +969,11 @@
   sort hypotheses that can be witnessed from the type signature.
 
   \end{description}
-*}
+\<close>
 
-text %mlex {* The following artificial example demonstrates the
+text %mlex \<open>The following artificial example demonstrates the
   derivation of @{prop False} with a pending sort hypothesis involving
-  a logically empty sort.  *}
+  a logically empty sort.\<close>
 
 class empty =
   assumes bad: "\<And>(x::'a) y. x \<noteq> y"
@@ -981,19 +981,19 @@
 theorem (in empty) false: False
   using bad by blast
 
-ML {*
+ML \<open>
   @{assert} (Thm.extra_shyps @{thm false} = [@{sort empty}])
-*}
+\<close>
 
-text {* Thanks to the inference kernel managing sort hypothesis
+text \<open>Thanks to the inference kernel managing sort hypothesis
   according to their logical significance, this example is merely an
   instance of \emph{ex falso quodlibet consequitur} --- not a collapse
-  of the logical framework! *}
+  of the logical framework!\<close>
 
 
-section {* Object-level rules \label{sec:obj-rules} *}
+section \<open>Object-level rules \label{sec:obj-rules}\<close>
 
-text {*
+text \<open>
   The primitive inferences covered so far mostly serve foundational
   purposes.  User-level reasoning usually works via object-level rules
   that are represented as theorems of Pure.  Composition of rules
@@ -1002,12 +1002,12 @@
   \emph{lifting} of rules into a context of @{text "\<And>"} and @{text
   "\<Longrightarrow>"} connectives.  Thus the full power of higher-order Natural
   Deduction in Isabelle/Pure becomes readily available.
-*}
+\<close>
 
 
-subsection {* Hereditary Harrop Formulae *}
+subsection \<open>Hereditary Harrop Formulae\<close>
 
-text {*
+text \<open>
   The idea of object-level rules is to model Natural Deduction
   inferences in the style of Gentzen @{cite "Gentzen:1935"}, but we allow
   arbitrary nesting similar to @{cite extensions91}.  The most basic
@@ -1071,9 +1071,9 @@
   parameters, and vacuous quantifiers vanish automatically.
 
   \end{itemize}
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Simplifier.norm_hhf: "Proof.context -> thm -> thm"} \\
   \end{mldecls}
@@ -1086,12 +1086,12 @@
   handle Hereditary Harrop Formulae properly.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Rule composition *}
+subsection \<open>Rule composition\<close>
 
-text {*
+text \<open>
   The rule calculus of Isabelle/Pure provides two main inferences:
   @{inference resolution} (i.e.\ back-chaining of rules) and
   @{inference assumption} (i.e.\ closing a branch), both modulo
@@ -1148,9 +1148,9 @@
   \]
 
   %FIXME @{inference_def elim_resolution}, @{inference_def dest_resolution}
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_op "RSN": "thm * (int * thm) -> thm"} \\
   @{index_ML_op "RS": "thm * thm -> thm"} \\
@@ -1201,12 +1201,12 @@
   source language.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Proof terms \label{sec:proof-terms} *}
+section \<open>Proof terms \label{sec:proof-terms}\<close>
 
-text {* The Isabelle/Pure inference kernel can record the proof of
+text \<open>The Isabelle/Pure inference kernel can record the proof of
   each theorem as a proof term that contains all logical inferences in
   detail.  Rule composition by resolution (\secref{sec:obj-rules}) and
   type-class reasoning is broken down to primitive rules of the
@@ -1270,12 +1270,12 @@
   digest about oracles and promises occurring in the original proof.
   This allows the inference kernel to manage this critical information
   without the full overhead of explicit proof terms.
-*}
+\<close>
 
 
-subsection {* Reconstructing and checking proof terms *}
+subsection \<open>Reconstructing and checking proof terms\<close>
 
-text {* Fully explicit proof terms can be large, but most of this
+text \<open>Fully explicit proof terms can be large, but most of this
   information is redundant and can be reconstructed from the context.
   Therefore, the Isabelle/Pure inference kernel records only
   \emph{implicit} proof terms, by omitting all typing information in
@@ -1288,12 +1288,12 @@
 
   The \emph{proof checker} expects a fully reconstructed proof term,
   and can turn it into a theorem by replaying its primitive inferences
-  within the kernel.  *}
+  within the kernel.\<close>
 
 
-subsection {* Concrete syntax of proof terms *}
+subsection \<open>Concrete syntax of proof terms\<close>
 
-text {* The concrete syntax of proof terms is a slight extension of
+text \<open>The concrete syntax of proof terms is a slight extension of
   the regular inner syntax of Isabelle/Pure @{cite "isabelle-isar-ref"}.
   Its main syntactic category @{syntax (inner) proof} is defined as
   follows:
@@ -1328,9 +1328,9 @@
 
   \medskip There are separate read and print operations for proof
   terms, in order to avoid conflicts with the regular term language.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type proof} \\
   @{index_ML_type proof_body} \\
@@ -1404,10 +1404,10 @@
   pretty-prints the given proof term.
 
   \end{description}
-*}
+\<close>
 
-text %mlex {* Detailed proof information of a theorem may be retrieved
-  as follows: *}
+text %mlex \<open>Detailed proof information of a theorem may be retrieved
+  as follows:\<close>
 
 lemma ex: "A \<and> B \<longrightarrow> B \<and> A"
 proof
@@ -1416,7 +1416,7 @@
   then show "B \<and> A" ..
 qed
 
-ML_val {*
+ML_val \<open>
   (*proof body with digest*)
   val body = Proofterm.strip_thm (Thm.proof_body_of @{thm ex});
 
@@ -1428,18 +1428,18 @@
   val all_thms =
     Proofterm.fold_body_thms
       (fn (name, _, _) => insert (op =) name) [body] [];
-*}
+\<close>
 
-text {* The result refers to various basic facts of Isabelle/HOL:
+text \<open>The result refers to various basic facts of Isabelle/HOL:
   @{thm [source] HOL.impI}, @{thm [source] HOL.conjE}, @{thm [source]
   HOL.conjI} etc.  The combinator @{ML Proofterm.fold_body_thms}
   recursively explores the graph of the proofs of all theorems being
   used here.
 
   \medskip Alternatively, we may produce a proof term manually, and
-  turn it into a theorem as follows: *}
+  turn it into a theorem as follows:\<close>
 
-ML_val {*
+ML_val \<open>
   val thy = @{theory};
   val prf =
     Proof_Syntax.read_proof thy true false
@@ -1452,11 +1452,11 @@
     |> Reconstruct.reconstruct_proof thy @{prop "A \<and> B \<longrightarrow> B \<and> A"}
     |> Proof_Checker.thm_of_proof thy
     |> Drule.export_without_context;
-*}
+\<close>
 
-text {* \medskip See also @{file "~~/src/HOL/Proofs/ex/XML_Data.thy"}
+text \<open>\medskip See also @{file "~~/src/HOL/Proofs/ex/XML_Data.thy"}
   for further examples, with export and import of proof terms via
   XML/ML data representation.
-*}
+\<close>
 
 end
--- a/src/Doc/Implementation/ML.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Implementation/ML.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -4,9 +4,9 @@
 imports Base
 begin
 
-chapter {* Isabelle/ML *}
-
-text {* Isabelle/ML is best understood as a certain culture based on
+chapter \<open>Isabelle/ML\<close>
+
+text \<open>Isabelle/ML is best understood as a certain culture based on
   Standard ML.  Thus it is not a new programming language, but a
   certain way to use SML at an advanced level within the Isabelle
   environment.  This covers a variety of aspects that are geared
@@ -27,12 +27,12 @@
   @{url "http://isabelle.in.tum.de/repos/isabelle"} for the full
   Mercurial history.  There are symbolic tags to refer to official
   Isabelle releases, as opposed to arbitrary \emph{tip} versions that
-  merely reflect snapshots that are never really up-to-date.}  *}
-
-
-section {* Style and orthography *}
-
-text {* The sources of Isabelle/Isar are optimized for
+  merely reflect snapshots that are never really up-to-date.}\<close>
+
+
+section \<open>Style and orthography\<close>
+
+text \<open>The sources of Isabelle/Isar are optimized for
   \emph{readability} and \emph{maintainability}.  The main purpose is
   to tell an informed reader what is really going on and how things
   really work.  This is a non-trivial aim, but it is supported by a
@@ -57,12 +57,12 @@
   sources: it helps to read quickly over the text and see through the
   main points, without getting distracted by accidental presentation
   of free-style code.
-*}
-
-
-subsection {* Header and sectioning *}
-
-text {* Isabelle source files have a certain standardized header
+\<close>
+
+
+subsection \<open>Header and sectioning\<close>
+
+text \<open>Isabelle source files have a certain standardized header
   format (with precise spacing) that follows ancient traditions
   reaching back to the earliest versions of the system by Larry
   Paulson.  See @{file "~~/src/Pure/thm.ML"}, for example.
@@ -98,12 +98,12 @@
   \medskip The precise wording of the prose text given in these
   headings is chosen carefully to introduce the main theme of the
   subsequent formal ML text.
-*}
-
-
-subsection {* Naming conventions *}
-
-text {* Since ML is the primary medium to express the meaning of the
+\<close>
+
+
+subsection \<open>Naming conventions\<close>
+
+text \<open>Since ML is the primary medium to express the meaning of the
   source text, naming of ML entities requires special care.
 
   \paragraph{Notation.}  A name consists of 1--3 \emph{words} (rarely
@@ -282,12 +282,12 @@
   certificate.
 
   \end{itemize}
-*}
-
-
-subsection {* General source layout *}
-
-text {*
+\<close>
+
+
+subsection \<open>General source layout\<close>
+
+text \<open>
   The general Isabelle/ML source layout imitates regular type-setting
   conventions, augmented by the requirements for deeply nested expressions
   that are commonplace in functional programming.
@@ -519,12 +519,12 @@
   \medskip In general the source layout is meant to emphasize the
   structure of complex language expressions, not to pretend that SML
   had a completely different syntax (say that of Haskell, Scala, Java).
-*}
-
-
-section {* ML embedded into Isabelle/Isar *}
-
-text {* ML and Isar are intertwined via an open-ended bootstrap
+\<close>
+
+
+section \<open>ML embedded into Isabelle/Isar\<close>
+
+text \<open>ML and Isar are intertwined via an open-ended bootstrap
   process that provides more and more programming facilities and
   logical content in an alternating manner.  Bootstrapping starts from
   the raw environment of existing implementations of Standard ML
@@ -544,12 +544,12 @@
   add-on tools can be implemented in ML within the Isar context in the same
   manner: ML is part of the standard repertoire of Isabelle, and there is no
   distinction between ``users'' and ``developers'' in this respect.
-*}
-
-
-subsection {* Isar ML commands *}
-
-text {*
+\<close>
+
+
+subsection \<open>Isar ML commands\<close>
+
+text \<open>
   The primary Isar source language provides facilities to ``open a window'' to
   the underlying ML compiler. Especially see the Isar commands @{command_ref
   "ML_file"} and @{command_ref "ML"}: both work the same way, but the source
@@ -559,20 +559,20 @@
   @{command_ref declaration}. Even more fine-grained embedding of ML into Isar
   is encountered in the proof method @{method_ref tactic}, which refines the
   pending goal state via a given expression of type @{ML_type tactic}.
-*}
-
-text %mlex {* The following artificial example demonstrates some ML
+\<close>
+
+text %mlex \<open>The following artificial example demonstrates some ML
   toplevel declarations within the implicit Isar theory context.  This
   is regular functional programming without referring to logical
   entities yet.
-*}
-
-ML {*
+\<close>
+
+ML \<open>
   fun factorial 0 = 1
     | factorial n = n * factorial (n - 1)
-*}
-
-text {* Here the ML environment is already managed by Isabelle, i.e.\
+\<close>
+
+text \<open>Here the ML environment is already managed by Isabelle, i.e.\
   the @{ML factorial} function is not yet accessible in the preceding
   paragraph, nor in a different theory that is independent from the
   current one in the import hierarchy.
@@ -589,18 +589,18 @@
   @{command_ref "ML_prf"} instead of Instead of @{command_ref "ML"}.
   As illustrated below, the effect on the ML environment is local to
   the whole proof body, but ignoring the block structure.
-*}
+\<close>
 
 notepad
 begin
-  ML_prf %"ML" {* val a = 1 *}
+  ML_prf %"ML" \<open>val a = 1\<close>
   {
-    ML_prf %"ML" {* val b = a + 1 *}
-  } -- {* Isar block structure ignored by ML environment *}
-  ML_prf %"ML" {* val c = b + 1 *}
+    ML_prf %"ML" \<open>val b = a + 1\<close>
+  } -- \<open>Isar block structure ignored by ML environment\<close>
+  ML_prf %"ML" \<open>val c = b + 1\<close>
 end
 
-text {* By side-stepping the normal scoping rules for Isar proof
+text \<open>By side-stepping the normal scoping rules for Isar proof
   blocks, embedded ML code can refer to the different contexts and
   manipulate corresponding entities, e.g.\ export a fact from a block
   context.
@@ -612,29 +612,29 @@
   invoking @{ML factorial}: @{command ML_val} takes care of printing the ML
   toplevel result, but @{command ML_command} is silent so we produce an
   explicit output message.
-*}
-
-ML_val {* factorial 100 *}
-ML_command {* writeln (string_of_int (factorial 100)) *}
+\<close>
+
+ML_val \<open>factorial 100\<close>
+ML_command \<open>writeln (string_of_int (factorial 100))\<close>
 
 notepad
 begin
-  ML_val {* factorial 100 *}
-  ML_command {* writeln (string_of_int (factorial 100)) *}
+  ML_val \<open>factorial 100\<close>
+  ML_command \<open>writeln (string_of_int (factorial 100))\<close>
 end
 
 
-subsection {* Compile-time context *}
-
-text {* Whenever the ML compiler is invoked within Isabelle/Isar, the
+subsection \<open>Compile-time context\<close>
+
+text \<open>Whenever the ML compiler is invoked within Isabelle/Isar, the
   formal context is passed as a thread-local reference variable.  Thus
   ML code may access the theory context during compilation, by reading
   or writing the (local) theory under construction.  Note that such
   direct access to the compile-time context is rare.  In practice it
   is typically done via some derived ML functions instead.
-*}
-
-text %mlref {*
+\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML ML_Context.the_generic_context: "unit -> Context.generic"} \\
   @{index_ML "Context.>>": "(Context.generic -> Context.generic) -> unit"} \\
@@ -667,12 +667,12 @@
   invoked at run-time.  The majority of ML code either uses static
   antiquotations (\secref{sec:ML-antiq}) or refers to the theory or
   proof context at run-time, by explicit functional abstraction.
-*}
-
-
-subsection {* Antiquotations \label{sec:ML-antiq} *}
-
-text {* A very important consequence of embedding ML into Isar is the
+\<close>
+
+
+subsection \<open>Antiquotations \label{sec:ML-antiq}\<close>
+
+text \<open>A very important consequence of embedding ML into Isar is the
   concept of \emph{ML antiquotation}.  The standard token language of
   ML is augmented by special syntactic entities of the following form:
 
@@ -691,12 +691,12 @@
   scheme allows to refer to formal entities in a robust manner, with
   proper static scoping and with some degree of logical checking of
   small portions of the code.
-*}
-
-
-subsection {* Printing ML values *}
-
-text {* The ML compiler knows about the structure of values according
+\<close>
+
+
+subsection \<open>Printing ML values\<close>
+
+text \<open>The ML compiler knows about the structure of values according
   to their static type, and can print them in the manner of its
   toplevel, although the details are non-portable.  The
   antiquotations @{ML_antiquotation_def "make_string"} and
@@ -706,9 +706,9 @@
 
   This is occasionally useful for diagnostic or demonstration
   purposes.  Note that production-quality tools require proper
-  user-level error messages, avoiding raw ML values in the output. *}
-
-text %mlantiq {*
+  user-level error messages, avoiding raw ML values in the output.\<close>
+
+text %mlantiq \<open>
   \begin{matharray}{rcl}
   @{ML_antiquotation_def "make_string"} & : & @{text ML_antiquotation} \\
   @{ML_antiquotation_def "print"} & : & @{text ML_antiquotation} \\
@@ -733,12 +733,12 @@
   output function is @{ML writeln}.
 
   \end{description}
-*}
-
-text %mlex {* The following artificial examples show how to produce
-  adhoc output of ML values for debugging purposes. *}
-
-ML {*
+\<close>
+
+text %mlex \<open>The following artificial examples show how to produce
+  adhoc output of ML values for debugging purposes.\<close>
+
+ML \<open>
   val x = 42;
   val y = true;
 
@@ -746,12 +746,12 @@
 
   @{print} {x = x, y = y};
   @{print tracing} {x = x, y = y};
-*}
-
-
-section {* Canonical argument order \label{sec:canonical-argument-order} *}
-
-text {* Standard ML is a language in the tradition of @{text
+\<close>
+
+
+section \<open>Canonical argument order \label{sec:canonical-argument-order}\<close>
+
+text \<open>Standard ML is a language in the tradition of @{text
   "\<lambda>"}-calculus and \emph{higher-order functional programming},
   similar to OCaml, Haskell, or Isabelle/Pure and HOL as logical
   languages.  Getting acquainted with the native style of representing
@@ -818,12 +818,12 @@
   inversion of the composition order is due to conventional
   mathematical notation, which can be easily amended as explained
   below.
-*}
-
-
-subsection {* Forward application and composition *}
-
-text {* Regular function application and infix notation works best for
+\<close>
+
+
+subsection \<open>Forward application and composition\<close>
+
+text \<open>Regular function application and infix notation works best for
   relatively deeply structured expressions, e.g.\ @{text "h (f x y + g
   z)"}.  The important special case of \emph{linear transformation}
   applies a cascade of functions @{text "f\<^sub>n (\<dots> (f\<^sub>1 x))"}.  This
@@ -853,21 +853,21 @@
   @{text "(f #-> g) x"} & @{text "\<equiv>"} & @{text "x |> f |-> g"} \\
   \end{tabular}
   \medskip
-*}
-
-text %mlref {*
+\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_op "|> ": "'a * ('a -> 'b) -> 'b"} \\
   @{index_ML_op "|-> ": "('c * 'a) * ('c -> 'a -> 'b) -> 'b"} \\
   @{index_ML_op "#> ": "('a -> 'b) * ('b -> 'c) -> 'a -> 'c"} \\
   @{index_ML_op "#-> ": "('a -> 'c * 'b) * ('c -> 'b -> 'd) -> 'a -> 'd"} \\
   \end{mldecls}
-*}
-
-
-subsection {* Canonical iteration *}
-
-text {* As explained above, a function @{text "f: \<alpha> \<rightarrow> \<beta> \<rightarrow> \<beta>"} can be
+\<close>
+
+
+subsection \<open>Canonical iteration\<close>
+
+text \<open>As explained above, a function @{text "f: \<alpha> \<rightarrow> \<beta> \<rightarrow> \<beta>"} can be
   understood as update on a configuration of type @{text "\<beta>"},
   parameterized by an argument of type @{text "\<alpha>"}.  Given @{text "a: \<alpha>"}
   the partial application @{text "(f a): \<beta> \<rightarrow> \<beta>"} operates
@@ -891,9 +891,9 @@
   "fold"} and @{text "map"} simultaneously: each application of @{text
   "f"} produces an updated configuration together with a side-result;
   the iteration collects all such side-results as a separate list.
-*}
-
-text %mlref {*
+\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML fold: "('a -> 'b -> 'b) -> 'a list -> 'b -> 'b"} \\
   @{index_ML fold_rev: "('a -> 'b -> 'b) -> 'a list -> 'b -> 'b"} \\
@@ -923,14 +923,14 @@
   versions should be ignored, and the canonical @{ML fold} (or @{ML fold_rev})
   used exclusively.
   \end{warn}
-*}
-
-text %mlex {* The following example shows how to fill a text buffer
+\<close>
+
+text %mlex \<open>The following example shows how to fill a text buffer
   incrementally by adding strings, either individually or from a given
   list.
-*}
-
-ML {*
+\<close>
+
+ML \<open>
   val s =
     Buffer.empty
     |> Buffer.add "digits: "
@@ -938,9 +938,9 @@
     |> Buffer.content;
 
   @{assert} (s = "digits: 0123456789");
-*}
-
-text {* Note how @{ML "fold (Buffer.add o string_of_int)"} above saves
+\<close>
+
+text \<open>Note how @{ML "fold (Buffer.add o string_of_int)"} above saves
   an extra @{ML "map"} over the given list.  This kind of peephole
   optimization reduces both the code size and the tree structures in
   memory (``deforestation''), but it requires some practice to read
@@ -949,9 +949,9 @@
   \medskip The next example elaborates the idea of canonical
   iteration, demonstrating fast accumulation of tree content using a
   text buffer.
-*}
-
-ML {*
+\<close>
+
+ML \<open>
   datatype tree = Text of string | Elem of string * tree list;
 
   fun slow_content (Text txt) = txt
@@ -968,9 +968,9 @@
 
   fun fast_content tree =
     Buffer.empty |> add_content tree |> Buffer.content;
-*}
-
-text {* The slowness of @{ML slow_content} is due to the @{ML implode} of
+\<close>
+
+text \<open>The slowness of @{ML slow_content} is due to the @{ML implode} of
   the recursive results, because it copies previously produced strings
   again and again.
 
@@ -994,12 +994,12 @@
   many practical situations, it is customary to provide the
   incremental @{ML add_content} only and leave the initialization and
   termination to the concrete application to the user.
-*}
-
-
-section {* Message output channels \label{sec:message-channels} *}
-
-text {* Isabelle provides output channels for different kinds of
+\<close>
+
+
+section \<open>Message output channels \label{sec:message-channels}\<close>
+
+text \<open>Isabelle provides output channels for different kinds of
   messages: regular output, high-volume tracing information, warnings,
   and errors.
 
@@ -1015,9 +1015,9 @@
   resulting messages together.  For example, after deleting a command
   from a given theory document version, the corresponding message
   output can be retracted from the display.
-*}
-
-text %mlref {*
+\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML writeln: "string -> unit"} \\
   @{index_ML tracing: "string -> unit"} \\
@@ -1082,25 +1082,25 @@
   issued by a single invocation of @{ML writeln} etc.\ with the
   functional concatenation of all message constituents.
   \end{warn}
-*}
-
-text %mlex {* The following example demonstrates a multi-line
+\<close>
+
+text %mlex \<open>The following example demonstrates a multi-line
   warning.  Note that in some situations the user sees only the first
   line, so the most important point should be made first.
-*}
-
-ML_command {*
+\<close>
+
+ML_command \<open>
   warning (cat_lines
    ["Beware the Jabberwock, my son!",
     "The jaws that bite, the claws that catch!",
     "Beware the Jubjub Bird, and shun",
     "The frumious Bandersnatch!"]);
-*}
-
-
-section {* Exceptions \label{sec:exceptions} *}
-
-text {* The Standard ML semantics of strict functional evaluation
+\<close>
+
+
+section \<open>Exceptions \label{sec:exceptions}\<close>
+
+text \<open>The Standard ML semantics of strict functional evaluation
   together with exceptions is rather well defined, but some delicate
   points need to be observed to avoid that ML programs go wrong
   despite static type-checking.  Exceptions in Isabelle/ML are
@@ -1175,9 +1175,9 @@
   etc.\ needs to be followed immediately by re-raising of the original
   exception.
   \end{warn}
-*}
-
-text %mlref {*
+\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML try: "('a -> 'b) -> 'a -> 'b option"} \\
   @{index_ML can: "('a -> 'b) -> 'a -> bool"} \\
@@ -1222,9 +1222,9 @@
   useful for debugging, but not suitable for production code.
 
   \end{description}
-*}
-
-text %mlantiq {*
+\<close>
+
+text %mlantiq \<open>
   \begin{matharray}{rcl}
   @{ML_antiquotation_def "assert"} & : & @{text ML_antiquotation} \\
   \end{matharray}
@@ -1237,12 +1237,12 @@
   assertions is included in the error output.
 
   \end{description}
-*}
-
-
-section {* Strings of symbols \label{sec:symbols} *}
-
-text {* A \emph{symbol} constitutes the smallest textual unit in
+\<close>
+
+
+section \<open>Strings of symbols \label{sec:symbols}\<close>
+
+text \<open>A \emph{symbol} constitutes the smallest textual unit in
   Isabelle/ML --- raw ML characters are normally not encountered at
   all.  Isabelle strings consist of a sequence of symbols, represented
   as a packed string or an exploded list of strings.  Each symbol is
@@ -1294,9 +1294,9 @@
   ``\verb,\,\verb,<^bold>,\verb,\,\verb,<alpha>,'' as @{text "\<^bold>\<alpha>"}. On-screen
   rendering usually works by mapping a finite subset of Isabelle symbols to
   suitable Unicode characters.
-*}
-
-text %mlref {*
+\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type "Symbol.symbol": string} \\
   @{index_ML Symbol.explode: "string -> Symbol.symbol list"} \\
@@ -1348,12 +1348,12 @@
   ``symbols'' as explained above.  Thus Isabelle sources can refer to
   an infinite store of user-defined symbols, without having to worry
   about the multitude of Unicode encodings that have emerged over the
-  years.  *}
-
-
-section {* Basic data types *}
-
-text {* The basis library proposal of SML97 needs to be treated with
+  years.\<close>
+
+
+section \<open>Basic data types\<close>
+
+text \<open>The basis library proposal of SML97 needs to be treated with
   caution.  Many of its operations simply do not fit with important
   Isabelle/ML conventions (like ``canonical argument order'', see
   \secref{sec:canonical-argument-order}), others cause problems with
@@ -1362,12 +1362,12 @@
 
   Subsequently we give a brief overview of important operations on
   basic ML data types.
-*}
-
-
-subsection {* Characters *}
-
-text %mlref {*
+\<close>
+
+
+subsection \<open>Characters\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type char} \\
   \end{mldecls}
@@ -1379,12 +1379,12 @@
   \secref{sec:symbols}).
 
   \end{description}
-*}
-
-
-subsection {* Strings *}
-
-text %mlref {*
+\<close>
+
+
+subsection \<open>Strings\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type string} \\
   \end{mldecls}
@@ -1415,31 +1415,31 @@
   source text as strings of symbols, instead of raw characters.
 
   \end{description}
-*}
-
-text %mlex {* The subsequent example illustrates the difference of
+\<close>
+
+text %mlex \<open>The subsequent example illustrates the difference of
   physical addressing of bytes versus logical addressing of symbols in
   Isabelle strings.
-*}
-
-ML_val {*
+\<close>
+
+ML_val \<open>
   val s = "\<A>";
 
   @{assert} (length (Symbol.explode s) = 1);
   @{assert} (size s = 4);
-*}
-
-text {* Note that in Unicode renderings of the symbol @{text "\<A>"},
+\<close>
+
+text \<open>Note that in Unicode renderings of the symbol @{text "\<A>"},
   variations of encodings like UTF-8 or UTF-16 pose delicate questions
   about the multi-byte representations of its codepoint, which is outside
   of the 16-bit address space of the original Unicode standard from
   the 1990-ies.  In Isabelle/ML it is just ``\verb,\,\verb,<A>,''
-  literally, using plain ASCII characters beyond any doubts. *}
-
-
-subsection {* Integers *}
-
-text %mlref {*
+  literally, using plain ASCII characters beyond any doubts.\<close>
+
+
+subsection \<open>Integers\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type int} \\
   \end{mldecls}
@@ -1461,12 +1461,12 @@
   operations.
 
   \end{description}
-*}
-
-
-subsection {* Time *}
-
-text %mlref {*
+\<close>
+
+
+subsection \<open>Time\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type Time.time} \\
   @{index_ML seconds: "real -> Time.time"} \\
@@ -1485,12 +1485,12 @@
   are maintained externally.
 
   \end{description}
-*}
-
-
-subsection {* Options *}
-
-text %mlref {*
+\<close>
+
+
+subsection \<open>Options\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Option.map: "('a -> 'b) -> 'a option -> 'b option"} \\
   @{index_ML is_some: "'a option -> bool"} \\
@@ -1500,22 +1500,22 @@
   @{index_ML the_list: "'a option -> 'a list"} \\
   @{index_ML the_default: "'a -> 'a option -> 'a"} \\
   \end{mldecls}
-*}
-
-text {* Apart from @{ML Option.map} most other operations defined in
+\<close>
+
+text \<open>Apart from @{ML Option.map} most other operations defined in
   structure @{ML_structure Option} are alien to Isabelle/ML and never
   used.  The operations shown above are defined in @{file
-  "~~/src/Pure/General/basics.ML"}.  *}
-
-
-subsection {* Lists *}
-
-text {* Lists are ubiquitous in ML as simple and light-weight
+  "~~/src/Pure/General/basics.ML"}.\<close>
+
+
+subsection \<open>Lists\<close>
+
+text \<open>Lists are ubiquitous in ML as simple and light-weight
   ``collections'' for many everyday programming tasks.  Isabelle/ML
   provides important additions and improvements over operations that
-  are predefined in the SML97 library. *}
-
-text %mlref {*
+  are predefined in the SML97 library.\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML cons: "'a -> 'a list -> 'a list"} \\
   @{index_ML member: "('b * 'a -> bool) -> 'a list -> 'b -> bool"} \\
@@ -1546,17 +1546,17 @@
   source: later declarations take precedence over earlier ones.
 
   \end{description}
-*}
-
-text %mlex {* Using canonical @{ML fold} together with @{ML cons} (or
+\<close>
+
+text %mlex \<open>Using canonical @{ML fold} together with @{ML cons} (or
   similar standard operations) alternates the orientation of data.
   The is quite natural and should not be altered forcible by inserting
   extra applications of @{ML rev}.  The alternative @{ML fold_rev} can
   be used in the few situations, where alternation should be
   prevented.
-*}
-
-ML {*
+\<close>
+
+ML \<open>
   val items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
 
   val list1 = fold cons items [];
@@ -1564,16 +1564,16 @@
 
   val list2 = fold_rev cons items [];
   @{assert} (list2 = items);
-*}
-
-text {* The subsequent example demonstrates how to \emph{merge} two
-  lists in a natural way. *}
-
-ML {*
+\<close>
+
+text \<open>The subsequent example demonstrates how to \emph{merge} two
+  lists in a natural way.\<close>
+
+ML \<open>
   fun merge_lists eq (xs, ys) = fold_rev (insert eq) ys xs;
-*}
-
-text {* Here the first list is treated conservatively: only the new
+\<close>
+
+text \<open>Here the first list is treated conservatively: only the new
   elements from the second list are inserted.  The inside-out order of
   insertion via @{ML fold_rev} attempts to preserve the order of
   elements in the result.
@@ -1581,19 +1581,19 @@
   This way of merging lists is typical for context data
   (\secref{sec:context-data}).  See also @{ML merge} as defined in
   @{file "~~/src/Pure/library.ML"}.
-*}
-
-
-subsection {* Association lists *}
-
-text {* The operations for association lists interpret a concrete list
+\<close>
+
+
+subsection \<open>Association lists\<close>
+
+text \<open>The operations for association lists interpret a concrete list
   of pairs as a finite function from keys to values.  Redundant
   representations with multiple occurrences of the same key are
   implicitly normalized: lookup and update only take the first
   occurrence into account.
-*}
-
-text {*
+\<close>
+
+text \<open>
   \begin{mldecls}
   @{index_ML AList.lookup: "('a * 'b -> bool) -> ('b * 'c) list -> 'a -> 'c option"} \\
   @{index_ML AList.defined: "('a * 'b -> bool) -> ('b * 'c) list -> 'a -> bool"} \\
@@ -1623,21 +1623,21 @@
   in many practical situations. A more advanced table structure is defined in
   @{file "~~/src/Pure/General/table.ML"}; that version scales easily to
   thousands or millions of elements.
-*}
-
-
-subsection {* Unsynchronized references *}
-
-text %mlref {*
+\<close>
+
+
+subsection \<open>Unsynchronized references\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type "'a Unsynchronized.ref"} \\
   @{index_ML Unsynchronized.ref: "'a -> 'a Unsynchronized.ref"} \\
   @{index_ML "!": "'a Unsynchronized.ref -> 'a"} \\
   @{index_ML_op ":=": "'a Unsynchronized.ref * 'a -> unit"} \\
   \end{mldecls}
-*}
-
-text {* Due to ubiquitous parallelism in Isabelle/ML (see also
+\<close>
+
+text \<open>Due to ubiquitous parallelism in Isabelle/ML (see also
   \secref{sec:multi-threading}), the mutable reference cells of
   Standard ML are notorious for causing problems.  In a highly
   parallel system, both correctness \emph{and} performance are easily
@@ -1654,12 +1654,12 @@
   Never @{ML_text "open Unsynchronized"}, not even in a local scope!
   Pretending that mutable state is no problem is a very bad idea.
   \end{warn}
-*}
-
-
-section {* Thread-safe programming \label{sec:multi-threading} *}
-
-text {* Multi-threaded execution has become an everyday reality in
+\<close>
+
+
+section \<open>Thread-safe programming \label{sec:multi-threading}\<close>
+
+text \<open>Multi-threaded execution has become an everyday reality in
   Isabelle since Poly/ML 5.2.1 and Isabelle2008.  Isabelle/ML provides
   implicit and explicit parallelism by default, and there is no way
   for user-space tools to ``opt out''.  ML programs that are purely
@@ -1670,12 +1670,12 @@
 
   More ambitious tools with more fine-grained interaction with the
   environment need to observe the principles explained below.
-*}
-
-
-subsection {* Multi-threading with shared memory *}
-
-text {* Multiple threads help to organize advanced operations of the
+\<close>
+
+
+subsection \<open>Multi-threading with shared memory\<close>
+
+text \<open>Multiple threads help to organize advanced operations of the
   system, such as real-time conditions on command transactions,
   sub-components with explicit communication, general asynchronous
   interaction etc.  Moreover, parallel evaluation is a prerequisite to
@@ -1725,12 +1725,12 @@
   Isabelle/ML infrastructure.  User programs must not fork their own
   ML threads to perform heavy computations.
   \end{warn}
-*}
-
-
-subsection {* Critical shared resources *}
-
-text {* Thread-safeness is mainly concerned about concurrent
+\<close>
+
+
+subsection \<open>Critical shared resources\<close>
+
+text \<open>Thread-safeness is mainly concerned about concurrent
   read/write access to shared resources, which are outside the purely
   functional world of ML.  This covers the following in particular.
 
@@ -1798,9 +1798,9 @@
   thread-safe.
 
   \end{itemize}
-*}
-
-text %mlref {*
+\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML File.tmp_path: "Path.T -> Path.T"} \\
   @{index_ML serial_string: "unit -> string"} \\
@@ -1816,22 +1816,22 @@
   that is unique over the runtime of the Isabelle/ML process.
 
   \end{description}
-*}
-
-text %mlex {* The following example shows how to create unique
+\<close>
+
+text %mlex \<open>The following example shows how to create unique
   temporary file names.
-*}
-
-ML {*
+\<close>
+
+ML \<open>
   val tmp1 = File.tmp_path (Path.basic ("foo" ^ serial_string ()));
   val tmp2 = File.tmp_path (Path.basic ("foo" ^ serial_string ()));
   @{assert} (tmp1 <> tmp2);
-*}
-
-
-subsection {* Explicit synchronization *}
-
-text {* Isabelle/ML also provides some explicit synchronization
+\<close>
+
+
+subsection \<open>Explicit synchronization\<close>
+
+text \<open>Isabelle/ML also provides some explicit synchronization
   mechanisms, for the rare situations where mutable shared resources
   are really required.  These are based on the synchronizations
   primitives of Poly/ML, which have been adapted to the specific
@@ -1856,9 +1856,9 @@
   Here the synchronized access to the state variable is \emph{not}
   re-entrant: direct or indirect nesting within the same thread will
   cause a deadlock!
-*}
-
-text %mlref {*
+\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML NAMED_CRITICAL: "string -> (unit -> 'a) -> 'a"} \\
   @{index_ML CRITICAL: "(unit -> 'a) -> 'a"} \\
@@ -1907,14 +1907,14 @@
   There are some further variants of the @{ML
   Synchronized.guarded_access} combinator, see @{file
   "~~/src/Pure/Concurrent/synchronized.ML"} for details.
-*}
-
-text %mlex {* The following example implements a counter that produces
+\<close>
+
+text %mlex \<open>The following example implements a counter that produces
   positive integers that are unique over the runtime of the Isabelle
   process:
-*}
-
-ML {*
+\<close>
+
+ML \<open>
   local
     val counter = Synchronized.var "counter" 0;
   in
@@ -1924,22 +1924,22 @@
           let val j = i + 1
           in SOME (j, j) end);
   end;
-*}
-
-ML {*
+\<close>
+
+ML \<open>
   val a = next ();
   val b = next ();
   @{assert} (a <> b);
-*}
-
-text {* \medskip See @{file "~~/src/Pure/Concurrent/mailbox.ML"} how
+\<close>
+
+text \<open>\medskip See @{file "~~/src/Pure/Concurrent/mailbox.ML"} how
   to implement a mailbox as synchronized variable over a purely
-  functional list. *}
-
-
-section {* Managed evaluation *}
-
-text {* Execution of Standard ML follows the model of strict
+  functional list.\<close>
+
+
+section \<open>Managed evaluation\<close>
+
+text \<open>Execution of Standard ML follows the model of strict
   functional evaluation with optional exceptions.  Evaluation happens
   whenever some function is applied to (sufficiently many)
   arguments. The result is either an explicit value or an implicit
@@ -1983,9 +1983,9 @@
   their original occurrence (e.g.\ when printed at the toplevel).
   Interrupt counts as neutral element here: it is treated as minimal
   information about some canceled evaluation process, and is absorbed
-  by the presence of regular program exceptions.  *}
-
-text %mlref {*
+  by the presence of regular program exceptions.\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type "'a Exn.result"} \\
   @{index_ML Exn.capture: "('a -> 'b) -> 'a -> 'b Exn.result"} \\
@@ -2029,12 +2029,12 @@
   means in ML.
 
   \end{description}
-*}
-
-
-subsection {* Parallel skeletons \label{sec:parlist} *}
-
-text {*
+\<close>
+
+
+subsection \<open>Parallel skeletons \label{sec:parlist}\<close>
+
+text \<open>
   Algorithmic skeletons are combinators that operate on lists in
   parallel, in the manner of well-known @{text map}, @{text exists},
   @{text forall} etc.  Management of futures (\secref{sec:futures})
@@ -2048,9 +2048,9 @@
   fine-grained, CPU cycles are wasted due to the overhead of
   organizing parallel processing.  In the worst case, parallel
   performance will be less than the sequential counterpart!
-*}
-
-text %mlref {*
+\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Par_List.map: "('a -> 'b) -> 'a list -> 'b list"} \\
   @{index_ML Par_List.get_some: "('a -> 'b option) -> 'a list -> 'b option"} \\
@@ -2081,24 +2081,24 @@
   Par_List.forall}.
 
   \end{description}
-*}
-
-text %mlex {* Subsequently, the Ackermann function is evaluated in
-  parallel for some ranges of arguments. *}
-
-ML_val {*
+\<close>
+
+text %mlex \<open>Subsequently, the Ackermann function is evaluated in
+  parallel for some ranges of arguments.\<close>
+
+ML_val \<open>
   fun ackermann 0 n = n + 1
     | ackermann m 0 = ackermann (m - 1) 1
     | ackermann m n = ackermann (m - 1) (ackermann m (n - 1));
 
   Par_List.map (ackermann 2) (500 upto 1000);
   Par_List.map (ackermann 3) (5 upto 10);
-*}
-
-
-subsection {* Lazy evaluation *}
-
-text {*
+\<close>
+
+
+subsection \<open>Lazy evaluation\<close>
+
+text \<open>
   Classic lazy evaluation works via the @{text lazy}~/ @{text force} pair of
   operations: @{text lazy} to wrap an unevaluated expression, and @{text
   force} to evaluate it once and store its result persistently. Later
@@ -2119,9 +2119,9 @@
   and multiple receivers of intermediate interrupt events. Interrupts are
   \emph{not} made persistent: later evaluation attempts start again from the
   original expression.
-*}
-
-text %mlref {*
+\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type "'a lazy"} \\
   @{index_ML Lazy.lazy: "(unit -> 'a) -> 'a lazy"} \\
@@ -2148,12 +2148,12 @@
   to wait on a pending evaluation attempt by another thread.
 
   \end{description}
-*}
-
-
-subsection {* Futures \label{sec:futures} *}
-
-text {*
+\<close>
+
+
+subsection \<open>Futures \label{sec:futures}\<close>
+
+text \<open>
   Futures help to organize parallel execution in a value-oriented manner, with
   @{text fork}~/ @{text join} as the main pair of operations, and some further
   variants; see also @{cite "Wenzel:2009" and "Wenzel:2013:ITP"}. Unlike lazy
@@ -2203,9 +2203,9 @@
   used as minimal elements (or guards) within the future dependency graph:
   when these promises are fulfilled the evaluation of subsequent futures
   starts spontaneously, according to their own inter-dependencies.
-*}
-
-text %mlref {*
+\<close>
+
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type "'a future"} \\
   @{index_ML Future.fork: "(unit -> 'a) -> 'a future"} \\
@@ -2337,7 +2337,7 @@
   the attempt to fulfill it causes an exception.
 
   \end{description}
-*}
+\<close>
 
 
 end
--- a/src/Doc/Implementation/Prelim.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Implementation/Prelim.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,11 +2,11 @@
 imports Base
 begin
 
-chapter {* Preliminaries *}
+chapter \<open>Preliminaries\<close>
 
-section {* Contexts \label{sec:context} *}
+section \<open>Contexts \label{sec:context}\<close>
 
-text {*
+text \<open>
   A logical context represents the background that is required for
   formulating statements and composing proofs.  It acts as a medium to
   produce formal content, depending on earlier material (declarations,
@@ -68,12 +68,12 @@
   mechanisms (inductive predicates, recursive functions etc.).  All of
   this is ultimately based on the generic data management by theory
   and proof contexts introduced here.
-*}
+\<close>
 
 
-subsection {* Theory context \label{sec:context-theory} *}
+subsection \<open>Theory context \label{sec:context-theory}\<close>
 
-text {* A \emph{theory} is a data container with explicit name and
+text \<open>A \emph{theory} is a data container with explicit name and
   unique identifier.  Theories are related by a (nominal) sub-theory
   relation, which corresponds to the dependency graph of the original
   construction; each theory is derived from a certain sub-graph of
@@ -116,9 +116,9 @@
   \medskip Derived formal entities may retain a reference to the
   background theory in order to indicate the formal context from which
   they were produced.  This provides an immutable certificate of the
-  background theory.  *}
+  background theory.\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type theory} \\
   @{index_ML Theory.eq_thy: "theory * theory -> bool"} \\
@@ -157,9 +157,9 @@
   ancestors of @{text thy} (not including @{text thy} itself).
 
   \end{description}
-*}
+\<close>
 
-text %mlantiq {*
+text %mlantiq \<open>
   \begin{matharray}{rcl}
   @{ML_antiquotation_def "theory"} & : & @{text ML_antiquotation} \\
   @{ML_antiquotation_def "theory_context"} & : & @{text ML_antiquotation} \\
@@ -185,12 +185,12 @@
   (see also @{ML Proof_Context.init_global}).
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Proof context \label{sec:context-proof} *}
+subsection \<open>Proof context \label{sec:context-proof}\<close>
 
-text {* A proof context is a container for pure data that refers to
+text \<open>A proof context is a container for pure data that refers to
   the theory from which it is derived. The @{text "init"} operation
   creates a proof context from a given theory. There is an explicit
   @{text "transfer"} operation to force resynchronization with updates
@@ -214,9 +214,9 @@
   contexts internally.  For various technical reasons, the background
   theory of an Isar proof state must not be changed while the proof is
   still under construction!
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type Proof.context} \\
   @{index_ML Proof_Context.init_global: "theory -> Proof.context"} \\
@@ -239,9 +239,9 @@
   "thy"}.
 
   \end{description}
-*}
+\<close>
 
-text %mlantiq {*
+text %mlantiq \<open>
   \begin{matharray}{rcl}
   @{ML_antiquotation_def "context"} & : & @{text ML_antiquotation} \\
   \end{matharray}
@@ -256,12 +256,12 @@
   experimentation with ML inside Isar.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Generic contexts \label{sec:generic-context} *}
+subsection \<open>Generic contexts \label{sec:generic-context}\<close>
 
-text {*
+text \<open>
   A generic context is the disjoint sum of either a theory or proof
   context.  Occasionally, this enables uniform treatment of generic
   context data, typically extra-logical information.  Operations on
@@ -274,9 +274,9 @@
   can always be selected from the sum, while a proof context might
   have to be constructed by an ad-hoc @{text "init"} operation, which
   incurs a small runtime overhead.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type Context.generic} \\
   @{index_ML Context.theory_of: "Context.generic -> theory"} \\
@@ -299,12 +299,12 @@
   context data with each invocation).
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Context data \label{sec:context-data} *}
+subsection \<open>Context data \label{sec:context-data}\<close>
 
-text {* The main purpose of theory and proof contexts is to manage
+text \<open>The main purpose of theory and proof contexts is to manage
   arbitrary (pure) data.  New data types can be declared incrementally
   at compile time.  There are separate declaration mechanisms for any
   of the three kinds of contexts: theory, proof, generic.
@@ -377,9 +377,9 @@
   observes the ML discipline for types and scopes: there is no other
   way to access the corresponding data slot of a context.  By keeping
   these operations private, an Isabelle/ML module may maintain
-  abstract values authentically.  *}
+  abstract values authentically.\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_functor Theory_Data} \\
   @{index_ML_functor Proof_Data} \\
@@ -400,27 +400,27 @@
   @{ML_functor Theory_Data} for type @{ML_type Context.generic}.
 
   \end{description}
-*}
+\<close>
 
-text %mlex {*
+text %mlex \<open>
   The following artificial example demonstrates theory
   data: we maintain a set of terms that are supposed to be wellformed
   wrt.\ the enclosing theory.  The public interface is as follows:
-*}
+\<close>
 
-ML {*
+ML \<open>
   signature WELLFORMED_TERMS =
   sig
     val get: theory -> term list
     val add: term -> theory -> theory
   end;
-*}
+\<close>
 
-text {* The implementation uses private theory data internally, and
+text \<open>The implementation uses private theory data internally, and
   only exposes an operation that involves explicit argument checking
-  wrt.\ the given theory. *}
+  wrt.\ the given theory.\<close>
 
-ML {*
+ML \<open>
   structure Wellformed_Terms: WELLFORMED_TERMS =
   struct
 
@@ -443,9 +443,9 @@
     end;
 
   end;
-*}
+\<close>
 
-text {* Type @{ML_type "term Ord_List.T"} is used for reasonably
+text \<open>Type @{ML_type "term Ord_List.T"} is used for reasonably
   efficient representation of a set of terms: all operations are
   linear in the number of stored elements.  Here we assume that users
   of this module do not care about the declaration order, since that
@@ -482,12 +482,12 @@
   invariants too seriously.  The situation is different in the
   implementation of the inference kernel itself, which uses the very
   same data mechanisms for types, constants, axioms etc.
-*}
+\<close>
 
 
-subsection {* Configuration options \label{sec:config-options} *}
+subsection \<open>Configuration options \label{sec:config-options}\<close>
 
-text {* A \emph{configuration option} is a named optional value of
+text \<open>A \emph{configuration option} is a named optional value of
   some basic type (Boolean, integer, string) that is stored in the
   context.  It is a simple application of general context data
   (\secref{sec:context-data}) that is sufficiently common to justify
@@ -499,24 +499,24 @@
   show_types} controls output of explicit type constraints for
   variables in printed terms (cf.\ \secref{sec:read-print}).  Its
   value can be modified within Isar text like this:
-*}
+\<close>
 
 declare [[show_types = false]]
-  -- {* declaration within (local) theory context *}
+  -- \<open>declaration within (local) theory context\<close>
 
 notepad
 begin
   note [[show_types = true]]
-    -- {* declaration within proof (forward mode) *}
+    -- \<open>declaration within proof (forward mode)\<close>
   term x
 
   have "x = x"
     using [[show_types = false]]
-      -- {* declaration within proof (backward mode) *}
+      -- \<open>declaration within proof (backward mode)\<close>
     ..
 end
 
-text {* Configuration options that are not set explicitly hold a
+text \<open>Configuration options that are not set explicitly hold a
   default value that can depend on the application context.  This
   allows to retrieve the value from another slot within the context,
   or fall back on a global preference mechanism, for example.
@@ -525,9 +525,9 @@
   values are modeled as direct replacements for historic global
   references, only that the context is made explicit.  This allows
   easy configuration of tools, without relying on the execution order
-  as required for old-style mutable references.  *}
+  as required for old-style mutable references.\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Config.get: "Proof.context -> 'a Config.T -> 'a"} \\
   @{index_ML Config.map: "'a Config.T -> ('a -> 'a) -> Proof.context -> Proof.context"} \\
@@ -562,51 +562,51 @@
   types @{ML_type int} and @{ML_type string}, respectively.
 
   \end{description}
-*}
+\<close>
 
-text %mlex {* The following example shows how to declare and use a
+text %mlex \<open>The following example shows how to declare and use a
   Boolean configuration option called @{text "my_flag"} with constant
-  default value @{ML false}.  *}
+  default value @{ML false}.\<close>
 
-ML {*
+ML \<open>
   val my_flag =
     Attrib.setup_config_bool @{binding my_flag} (K false)
-*}
+\<close>
 
-text {* Now the user can refer to @{attribute my_flag} in
+text \<open>Now the user can refer to @{attribute my_flag} in
   declarations, while ML tools can retrieve the current value from the
-  context via @{ML Config.get}.  *}
+  context via @{ML Config.get}.\<close>
 
-ML_val {* @{assert} (Config.get @{context} my_flag = false) *}
+ML_val \<open>@{assert} (Config.get @{context} my_flag = false)\<close>
 
 declare [[my_flag = true]]
 
-ML_val {* @{assert} (Config.get @{context} my_flag = true) *}
+ML_val \<open>@{assert} (Config.get @{context} my_flag = true)\<close>
 
 notepad
 begin
   {
     note [[my_flag = false]]
-    ML_val {* @{assert} (Config.get @{context} my_flag = false) *}
+    ML_val \<open>@{assert} (Config.get @{context} my_flag = false)\<close>
   }
-  ML_val {* @{assert} (Config.get @{context} my_flag = true) *}
+  ML_val \<open>@{assert} (Config.get @{context} my_flag = true)\<close>
 end
 
-text {* Here is another example involving ML type @{ML_type real}
-  (floating-point numbers). *}
+text \<open>Here is another example involving ML type @{ML_type real}
+  (floating-point numbers).\<close>
 
-ML {*
+ML \<open>
   val airspeed_velocity =
     Attrib.setup_config_real @{binding airspeed_velocity} (K 0.0)
-*}
+\<close>
 
 declare [[airspeed_velocity = 10]]
 declare [[airspeed_velocity = 9.9]]
 
 
-section {* Names \label{sec:names} *}
+section \<open>Names \label{sec:names}\<close>
 
-text {* In principle, a name is just a string, but there are various
+text \<open>In principle, a name is just a string, but there are various
   conventions for representing additional structure.  For example,
   ``@{text "Foo.bar.baz"}'' is considered as a long name consisting of
   qualifier @{text "Foo.bar"} and base name @{text "baz"}.  The
@@ -630,12 +630,12 @@
   (\secref{sec:name-space}).
 
   \end{itemize}
-*}
+\<close>
 
 
-subsection {* Basic names \label{sec:basic-name} *}
+subsection \<open>Basic names \label{sec:basic-name}\<close>
 
-text {*
+text \<open>
   A \emph{basic name} essentially consists of a single Isabelle
   identifier.  There are conventions to mark separate classes of basic
   names, by attaching a suffix of underscores: one underscore means
@@ -667,9 +667,9 @@
   "foo"} results in variants @{text "fooa"}, @{text "foob"}, @{text
   "fooc"}, \dots, @{text "fooaa"}, @{text "fooab"} etc.; each renaming
   step picks the next unused variant from this sequence.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Name.internal: "string -> string"} \\
   @{index_ML Name.skolem: "string -> string"} \\
@@ -715,27 +715,27 @@
   \secref{sec:variables}).
 
   \end{description}
-*}
+\<close>
 
-text %mlex {* The following simple examples demonstrate how to produce
-  fresh names from the initial @{ML Name.context}. *}
+text %mlex \<open>The following simple examples demonstrate how to produce
+  fresh names from the initial @{ML Name.context}.\<close>
 
-ML {*
+ML \<open>
   val list1 = Name.invent Name.context "a" 5;
   @{assert} (list1 = ["a", "b", "c", "d", "e"]);
 
   val list2 =
     #1 (fold_map Name.variant ["x", "x", "a", "a", "'a", "'a"] Name.context);
   @{assert} (list2 = ["x", "xa", "a", "aa", "'a", "'aa"]);
-*}
+\<close>
 
-text {* \medskip The same works relatively to the formal context as
-  follows. *}
+text \<open>\medskip The same works relatively to the formal context as
+  follows.\<close>
 
 locale ex = fixes a b c :: 'a
 begin
 
-ML {*
+ML \<open>
   val names = Variable.names_of @{context};
 
   val list1 = Name.invent names "a" 5;
@@ -744,14 +744,14 @@
   val list2 =
     #1 (fold_map Name.variant ["x", "x", "a", "a", "'a", "'a"] names);
   @{assert} (list2 = ["x", "xa", "aa", "ab", "'aa", "'ab"]);
-*}
+\<close>
 
 end
 
 
-subsection {* Indexed names \label{sec:indexname} *}
+subsection \<open>Indexed names \label{sec:indexname}\<close>
 
-text {*
+text \<open>
   An \emph{indexed name} (or @{text "indexname"}) is a pair of a basic
   name and a natural number.  This representation allows efficient
   renaming by incrementing the second component only.  The canonical
@@ -784,9 +784,9 @@
   This works by producing variants of the corresponding basic name
   components.  For example, the collection @{text "?x1, ?x7, ?x42"}
   becomes @{text "?x, ?xa, ?xb"}.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type indexname: "string * int"} \\
   \end{mldecls}
@@ -800,12 +800,12 @@
   indexes should not be used.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Long names \label{sec:long-name} *}
+subsection \<open>Long names \label{sec:long-name}\<close>
 
-text {* A \emph{long name} consists of a sequence of non-empty name
+text \<open>A \emph{long name} consists of a sequence of non-empty name
   components.  The packed representation uses a dot as separator, as
   in ``@{text "A.b.c"}''.  The last component is called \emph{base
   name}, the remaining prefix is called \emph{qualifier} (which may be
@@ -825,9 +825,9 @@
   entities, or entities that are not entered into the corresponding
   name space, whenever this makes any sense.  The basic operations on
   long names map empty names again to empty names.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Long_Name.base_name: "string -> string"} \\
   @{index_ML Long_Name.qualifier: "string -> string"} \\
@@ -852,12 +852,12 @@
   representation and the explicit list form of long names.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Name spaces \label{sec:name-space} *}
+subsection \<open>Name spaces \label{sec:name-space}\<close>
 
-text {* A @{text "name space"} manages a collection of long names,
+text \<open>A @{text "name space"} manages a collection of long names,
   together with a mapping between partially qualified external names
   and fully qualified internal names (in both directions).  Note that
   the corresponding @{text "intern"} and @{text "extern"} operations
@@ -908,9 +908,9 @@
   type @{text "c"} & @{text "c_type.intro"} \\
   class @{text "c"} & @{text "c_class.intro"} \\
   \end{tabular}
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type binding} \\
   @{index_ML Binding.empty: binding} \\
@@ -1018,9 +1018,9 @@
   other tools are supposed to ignore!
 
   \end{description}
-*}
+\<close>
 
-text %mlantiq {*
+text %mlantiq \<open>
   \begin{matharray}{rcl}
   @{ML_antiquotation_def "binding"} & : & @{text ML_antiquotation} \\
   \end{matharray}
@@ -1037,33 +1037,33 @@
   appropriate than the more basic @{ML Binding.name} function.
 
   \end{description}
-*}
+\<close>
 
-text %mlex {* The following example yields the source position of some
+text %mlex \<open>The following example yields the source position of some
   concrete binding inlined into the text:
-*}
+\<close>
 
-ML {* Binding.pos_of @{binding here} *}
+ML \<open>Binding.pos_of @{binding here}\<close>
 
-text {* \medskip That position can be also printed in a message as
-  follows: *}
+text \<open>\medskip That position can be also printed in a message as
+  follows:\<close>
 
-ML_command {*
+ML_command \<open>
   writeln
     ("Look here" ^ Position.here (Binding.pos_of @{binding here}))
-*}
+\<close>
 
-text {* This illustrates a key virtue of formalized bindings as
+text \<open>This illustrates a key virtue of formalized bindings as
   opposed to raw specifications of base names: the system can use this
   additional information for feedback given to the user (error
   messages etc.).
 
   \medskip The following example refers to its source position
   directly, which is occasionally useful for experimentation and
-  diagnostic purposes: *}
+  diagnostic purposes:\<close>
 
-ML_command {*
+ML_command \<open>
   warning ("Look here" ^ Position.here @{here})
-*}
+\<close>
 
 end
--- a/src/Doc/Implementation/Proof.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Implementation/Proof.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,11 +2,11 @@
 imports Base
 begin
 
-chapter {* Structured proofs *}
+chapter \<open>Structured proofs\<close>
 
-section {* Variables \label{sec:variables} *}
+section \<open>Variables \label{sec:variables}\<close>
 
-text {*
+text \<open>
   Any variable that is not explicitly bound by @{text "\<lambda>"}-abstraction
   is considered as ``free''.  Logically, free variables act like
   outermost universal quantification at the sequent level: @{text
@@ -56,22 +56,22 @@
   \<turnstile> x\<^sub>\<alpha> \<equiv> x\<^sub>\<alpha>"} for fixed @{text "\<alpha>"}, and only in the second step
   @{text "\<turnstile> ?x\<^sub>?\<^sub>\<alpha> \<equiv> ?x\<^sub>?\<^sub>\<alpha>"} for schematic @{text "?x"} and @{text "?\<alpha>"}.
   The following Isar source text illustrates this scenario.
-*}
+\<close>
 
 notepad
 begin
   {
-    fix x  -- {* all potential occurrences of some @{text "x::\<tau>"} are fixed *}
+    fix x  -- \<open>all potential occurrences of some @{text "x::\<tau>"} are fixed\<close>
     {
-      have "x::'a \<equiv> x"  -- {* implicit type assignment by concrete occurrence *}
+      have "x::'a \<equiv> x"  -- \<open>implicit type assignment by concrete occurrence\<close>
         by (rule reflexive)
     }
-    thm this  -- {* result still with fixed type @{text "'a"} *}
+    thm this  -- \<open>result still with fixed type @{text "'a"}\<close>
   }
-  thm this  -- {* fully general result for arbitrary @{text "?x::?'a"} *}
+  thm this  -- \<open>fully general result for arbitrary @{text "?x::?'a"}\<close>
 end
 
-text {* The Isabelle/Isar proof context manages the details of term
+text \<open>The Isabelle/Isar proof context manages the details of term
   vs.\ type variables, with high-level principles for moving the
   frontier between fixed and schematic variables.
 
@@ -95,9 +95,9 @@
   "\<And>x\<^sub>1 \<dots> x\<^sub>n. B(x\<^sub>1, \<dots>, x\<^sub>n)"} is
   decomposed by inventing fixed variables @{text "x\<^sub>1, \<dots>,
   x\<^sub>n"} for the body.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Variable.add_fixes: "
   string list -> Proof.context -> string list * Proof.context"} \\
@@ -157,12 +157,12 @@
   "\<And>"} prefix of proposition @{text "B"}.
 
   \end{description}
-*}
+\<close>
 
-text %mlex {* The following example shows how to work with fixed term
-  and type parameters and with type-inference.  *}
+text %mlex \<open>The following example shows how to work with fixed term
+  and type parameters and with type-inference.\<close>
 
-ML {*
+ML \<open>
   (*static compile-time context -- for testing only*)
   val ctxt0 = @{context};
 
@@ -179,30 +179,30 @@
   (*official declaration of u -- propagates constraints etc.*)
   val ctxt2 = ctxt1 |> Variable.declare_term u;
   val t2 = Syntax.read_term ctxt2 "x";  (*x::nat is enforced*)
-*}
+\<close>
 
-text {* In the above example, the starting context is derived from the
+text \<open>In the above example, the starting context is derived from the
   toplevel theory, which means that fixed variables are internalized
   literally: @{text "x"} is mapped again to @{text "x"}, and
   attempting to fix it again in the subsequent context is an error.
   Alternatively, fixed parameters can be renamed explicitly as
-  follows: *}
+  follows:\<close>
 
-ML {*
+ML \<open>
   val ctxt0 = @{context};
   val ([x1, x2, x3], ctxt1) =
     ctxt0 |> Variable.variant_fixes ["x", "x", "x"];
-*}
+\<close>
 
-text {* The following ML code can now work with the invented names of
+text \<open>The following ML code can now work with the invented names of
   @{text x1}, @{text x2}, @{text x3}, without depending on
   the details on the system policy for introducing these variants.
   Recall that within a proof body the system always invents fresh
-  ``Skolem constants'', e.g.\ as follows: *}
+  ``Skolem constants'', e.g.\ as follows:\<close>
 
 notepad
 begin
-  ML_prf %"ML" {*
+  ML_prf %"ML" \<open>
     val ctxt0 = @{context};
 
     val ([x1], ctxt1) = ctxt0 |> Variable.add_fixes ["x"];
@@ -211,18 +211,18 @@
 
     val ([y1, y2], ctxt4) =
       ctxt3 |> Variable.variant_fixes ["y", "y"];
-  *}
+\<close>
 end
 
-text {* In this situation @{ML Variable.add_fixes} and @{ML
+text \<open>In this situation @{ML Variable.add_fixes} and @{ML
   Variable.variant_fixes} are very similar, but identical name
   proposals given in a row are only accepted by the second version.
-  *}
+\<close>
 
 
-section {* Assumptions \label{sec:assumptions} *}
+section \<open>Assumptions \label{sec:assumptions}\<close>
 
-text {*
+text \<open>
   An \emph{assumption} is a proposition that it is postulated in the
   current context.  Local conclusions may use assumptions as
   additional facts, but this imposes implicit hypotheses that weaken
@@ -274,9 +274,9 @@
   This works, because the assumption @{text "x \<equiv> t"} was introduced in
   a context with @{text "x"} being fresh, so @{text "x"} does not
   occur in @{text "\<Gamma>"} here.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type Assumption.export} \\
   @{index_ML Assumption.assume: "Proof.context -> cterm -> thm"} \\
@@ -318,15 +318,15 @@
   and @{ML "Assumption.export"} in the canonical way.
 
   \end{description}
-*}
+\<close>
 
-text %mlex {* The following example demonstrates how rules can be
+text %mlex \<open>The following example demonstrates how rules can be
   derived by building up a context of assumptions first, and exporting
   some local fact afterwards.  We refer to @{theory Pure} equality
   here for testing purposes.
-*}
+\<close>
 
-ML {*
+ML \<open>
   (*static compile-time context -- for testing only*)
   val ctxt0 = @{context};
 
@@ -336,17 +336,17 @@
 
   (*back to original context -- discharges assumption*)
   val r = Assumption.export false ctxt1 ctxt0 eq';
-*}
+\<close>
 
-text {* Note that the variables of the resulting rule are not
+text \<open>Note that the variables of the resulting rule are not
   generalized.  This would have required to fix them properly in the
   context beforehand, and export wrt.\ variables afterwards (cf.\ @{ML
-  Variable.export} or the combined @{ML "Proof_Context.export"}).  *}
+  Variable.export} or the combined @{ML "Proof_Context.export"}).\<close>
 
 
-section {* Structured goals and results \label{sec:struct-goals} *}
+section \<open>Structured goals and results \label{sec:struct-goals}\<close>
 
-text {*
+text \<open>
   Local results are established by monotonic reasoning from facts
   within a context.  This allows common combinations of theorems,
   e.g.\ via @{text "\<And>/\<Longrightarrow>"} elimination, resolution rules, or equational
@@ -383,9 +383,9 @@
   @{cite "isabelle-isar-ref"} for the user-level @{command obtain} and
   @{command guess} elements.  Final results, which may not refer to
   the parameters in the conclusion, need to exported explicitly into
-  the original context.  *}
+  the original context.\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML SUBPROOF: "(Subgoal.focus -> tactic) ->
   Proof.context -> int -> tactic"} \\
@@ -448,10 +448,10 @@
   exported explicitly.
 
   \end{description}
-*}
+\<close>
 
-text %mlex {* The following minimal example illustrates how to access
-  the focus information of a structured goal state. *}
+text %mlex \<open>The following minimal example illustrates how to access
+  the focus information of a structured goal state.\<close>
 
 notepad
 begin
@@ -459,34 +459,34 @@
 
   have "\<And>x. A x \<Longrightarrow> B x \<Longrightarrow> C x"
     ML_val
-    {*
+    \<open>
       val {goal, context = goal_ctxt, ...} = @{Isar.goal};
       val (focus as {params, asms, concl, ...}, goal') =
         Subgoal.focus goal_ctxt 1 goal;
       val [A, B] = #prems focus;
       val [(_, x)] = #params focus;
-    *}
+\<close>
     oops
 
-text {* \medskip The next example demonstrates forward-elimination in
-  a local context, using @{ML Obtain.result}.  *}
+text \<open>\medskip The next example demonstrates forward-elimination in
+  a local context, using @{ML Obtain.result}.\<close>
 
 notepad
 begin
   assume ex: "\<exists>x. B x"
 
-  ML_prf %"ML" {*
+  ML_prf %"ML" \<open>
     val ctxt0 = @{context};
     val (([(_, x)], [B]), ctxt1) = ctxt0
       |> Obtain.result (fn _ => etac @{thm exE} 1) [@{thm ex}];
-  *}
-  ML_prf %"ML" {*
+\<close>
+  ML_prf %"ML" \<open>
     singleton (Proof_Context.export ctxt1 ctxt0) @{thm refl};
-  *}
-  ML_prf %"ML" {*
+\<close>
+  ML_prf %"ML" \<open>
     Proof_Context.export ctxt1 ctxt0 [Thm.reflexive x]
       handle ERROR msg => (warning msg; []);
-  *}
+\<close>
 end
 
 end
--- a/src/Doc/Implementation/Syntax.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Implementation/Syntax.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -4,9 +4,9 @@
 imports Base
 begin
 
-chapter {* Concrete syntax and type-checking *}
+chapter \<open>Concrete syntax and type-checking\<close>
 
-text {* Pure @{text "\<lambda>"}-calculus as introduced in \chref{ch:logic} is
+text \<open>Pure @{text "\<lambda>"}-calculus as introduced in \chref{ch:logic} is
   an adequate foundation for logical languages --- in the tradition of
   \emph{higher-order abstract syntax} --- but end-users require
   additional means for reading and printing of terms and types.  This
@@ -57,12 +57,12 @@
 
   There are analogous operations to read and print types, with the same
   sub-division into phases.
-*}
+\<close>
 
 
-section {* Reading and pretty printing \label{sec:read-print} *}
+section \<open>Reading and pretty printing \label{sec:read-print}\<close>
 
-text {*
+text \<open>
   Read and print operations are roughly dual to each other, such that for the
   user @{text "s' = pretty (read s)"} looks similar to the original source
   text @{text "s"}, but the details depend on many side-conditions. There are
@@ -70,9 +70,9 @@
   output. The default configuration routinely looses information, so @{text
   "t' = read (pretty t)"} might fail, or produce a differently typed term, or
   a completely different term in the face of syntactic overloading.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Syntax.read_typs: "Proof.context -> string list -> typ list"} \\
   @{index_ML Syntax.read_terms: "Proof.context -> string list -> term list"} \\
@@ -143,12 +143,12 @@
   obtained from one of the latter may be directly passed to the corresponding
   read operation: this yields PIDE markup of the input and precise positions
   for warning and error messages.
-*}
+\<close>
 
 
-section {* Parsing and unparsing \label{sec:parse-unparse} *}
+section \<open>Parsing and unparsing \label{sec:parse-unparse}\<close>
 
-text {*
+text \<open>
   Parsing and unparsing converts between actual source text and a certain
   \emph{pre-term} format, where all bindings and scopes are already resolved
   faithfully. Thus the names of free variables or constants are determined in
@@ -165,9 +165,9 @@
   "isabelle-isar-ref"}. The final scope-resolution is performed by the system,
   according to name spaces for types, term variables and constants determined
   by the context.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Syntax.parse_typ: "Proof.context -> string -> typ"} \\
   @{index_ML Syntax.parse_term: "Proof.context -> string -> term"} \\
@@ -201,12 +201,12 @@
 
   These operations always operate on a single item; use the combinator @{ML
   map} to apply them to a list.
-*}
+\<close>
 
 
-section {* Checking and unchecking \label{sec:term-check} *}
+section \<open>Checking and unchecking \label{sec:term-check}\<close>
 
-text {* These operations define the transition from pre-terms and
+text \<open>These operations define the transition from pre-terms and
   fully-annotated terms in the sense of the logical core
   (\chref{ch:logic}).
 
@@ -232,9 +232,9 @@
   logical foundation.  This involves ``type improvement''
   (specialization of slightly too general types) and replacement by
   certain locale parameters.  See also @{cite "Haftmann-Wenzel:2009"}.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Syntax.check_typs: "Proof.context -> typ list -> typ list"} \\
   @{index_ML Syntax.check_terms: "Proof.context -> term list -> term list"} \\
@@ -276,6 +276,6 @@
 
   These operations always operate simultaneously on a list; use the combinator
   @{ML singleton} to apply them to a single item.
-*}
+\<close>
 
 end
--- a/src/Doc/Implementation/Tactic.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Implementation/Tactic.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,20 +2,20 @@
 imports Base
 begin
 
-chapter {* Tactical reasoning *}
+chapter \<open>Tactical reasoning\<close>
 
-text {* Tactical reasoning works by refining an initial claim in a
+text \<open>Tactical reasoning works by refining an initial claim in a
   backwards fashion, until a solved form is reached.  A @{text "goal"}
   consists of several subgoals that need to be solved in order to
   achieve the main statement; zero subgoals means that the proof may
   be finished.  A @{text "tactic"} is a refinement operation that maps
   a goal to a lazy sequence of potential successors.  A @{text
-  "tactical"} is a combinator for composing tactics.  *}
+  "tactical"} is a combinator for composing tactics.\<close>
 
 
-section {* Goals \label{sec:tactical-goals} *}
+section \<open>Goals \label{sec:tactical-goals}\<close>
 
-text {*
+text \<open>
   Isabelle/Pure represents a goal as a theorem stating that the
   subgoals imply the main goal: @{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow>
   C"}.  The outermost goal structure is that of a Horn Clause: i.e.\
@@ -58,9 +58,9 @@
   \[
   \infer[@{text "(conclude)"}]{@{text "A \<Longrightarrow> \<dots> \<Longrightarrow> C"}}{@{text "A \<Longrightarrow> \<dots> \<Longrightarrow> #C"}}
   \]
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML Goal.init: "cterm -> thm"} \\
   @{index_ML Goal.finish: "Proof.context -> thm -> thm"} \\
@@ -86,12 +86,12 @@
   protection, even if there are pending subgoals.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Tactics\label{sec:tactics} *}
+section \<open>Tactics\label{sec:tactics}\<close>
 
-text {* A @{text "tactic"} is a function @{text "goal \<rightarrow> goal\<^sup>*\<^sup>*"} that
+text \<open>A @{text "tactic"} is a function @{text "goal \<rightarrow> goal\<^sup>*\<^sup>*"} that
   maps a given goal state (represented as a theorem, cf.\
   \secref{sec:tactical-goals}) to a lazy sequence of potential
   successor states.  The underlying sequence implementation is lazy
@@ -168,9 +168,9 @@
   experienced by the user (e.g.\ tactics that insist in being
   applicable only to singleton goals, or prevent composition via
   standard tacticals such as @{ML REPEAT}).
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_type tactic: "thm -> thm Seq.seq"} \\
   @{index_ML no_tac: tactic} \\
@@ -231,12 +231,12 @@
   without changing the main goal protection.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Resolution and assumption tactics \label{sec:resolve-assume-tac} *}
+subsection \<open>Resolution and assumption tactics \label{sec:resolve-assume-tac}\<close>
 
-text {* \emph{Resolution} is the most basic mechanism for refining a
+text \<open>\emph{Resolution} is the most basic mechanism for refining a
   subgoal using a theorem as object-level rule.
   \emph{Elim-resolution} is particularly suited for elimination rules:
   it resolves with a rule, proves its first premise by assumption, and
@@ -273,9 +273,9 @@
 
   Recall that higher-order unification may produce multiple results
   that are enumerated here.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML resolve_tac: "thm list -> int -> tactic"} \\
   @{index_ML eresolve_tac: "thm list -> int -> tactic"} \\
@@ -346,12 +346,12 @@
 
   Flexible subgoals are not updated at will, but are left alone.
   \end{description}
-*}
+\<close>
 
 
-subsection {* Explicit instantiation within a subgoal context *}
+subsection \<open>Explicit instantiation within a subgoal context\<close>
 
-text {* The main resolution tactics (\secref{sec:resolve-assume-tac})
+text \<open>The main resolution tactics (\secref{sec:resolve-assume-tac})
   use higher-order unification, which works well in many practical
   situations despite its daunting theoretical properties.
   Nonetheless, there are important problem classes where unguided
@@ -390,9 +390,9 @@
   Types are instantiated before terms are.  Since term instantiation
   already performs simple type-inference, so explicit type
   instantiations are seldom necessary.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML res_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
   @{index_ML eres_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
@@ -440,17 +440,17 @@
   ML code.  The slightly more advanced @{ML Subgoal.FOCUS} combinator
   of \secref{sec:struct-goals} allows to refer to internal goal
   structure with explicit context management.
-*}
+\<close>
 
 
-subsection {* Rearranging goal states *}
+subsection \<open>Rearranging goal states\<close>
 
-text {* In rare situations there is a need to rearrange goal states:
+text \<open>In rare situations there is a need to rearrange goal states:
   either the overall collection of subgoals, or the local structure of
   a subgoal.  Various administrative tactics allow to operate on the
-  concrete presentation these conceptual sets of formulae. *}
+  concrete presentation these conceptual sets of formulae.\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML rotate_tac: "int -> int -> tactic"} \\
   @{index_ML distinct_subgoals_tac: tactic} \\
@@ -477,20 +477,20 @@
   ignored; they often disappear as unknowns get instantiated.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Raw composition: resolution without lifting *}
+subsection \<open>Raw composition: resolution without lifting\<close>
 
-text {*
+text \<open>
   Raw composition of two rules means resolving them without prior
   lifting or renaming of unknowns.  This low-level operation, which
   underlies the resolution tactics, may occasionally be useful for
   special effects.  Schematic variables are not renamed by default, so
   beware of clashes!
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML compose_tac: "(bool * thm * int) -> int -> tactic"} \\
   @{index_ML Drule.compose: "thm * int * thm -> thm"} \\
@@ -526,31 +526,31 @@
   the consequences, they may produce results that cause problems with
   standard rules and tactics later on.
   \end{warn}
-*}
+\<close>
 
 
-section {* Tacticals \label{sec:tacticals} *}
+section \<open>Tacticals \label{sec:tacticals}\<close>
 
-text {* A \emph{tactical} is a functional combinator for building up
+text \<open>A \emph{tactical} is a functional combinator for building up
   complex tactics from simpler ones.  Common tacticals perform
   sequential composition, disjunctive choice, iteration, or goal
   addressing.  Various search strategies may be expressed via
   tacticals.
-*}
+\<close>
 
 
-subsection {* Combining tactics *}
+subsection \<open>Combining tactics\<close>
 
-text {* Sequential composition and alternative choices are the most
+text \<open>Sequential composition and alternative choices are the most
   basic ways to combine tactics, similarly to ``@{verbatim ","}'' and
   ``@{verbatim "|"}'' in Isar method notation.  This corresponds to
   @{ML_op "THEN"} and @{ML_op "ORELSE"} in ML, but there are further
   possibilities for fine-tuning alternation of tactics such as @{ML_op
   "APPEND"}.  Further details become visible in ML due to explicit
   subgoal addressing.
-*}
+\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML_op "THEN": "tactic * tactic -> tactic"} \\
   @{index_ML_op "ORELSE": "tactic * tactic -> tactic"} \\
@@ -607,16 +607,16 @@
   The other primed tacticals work analogously.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Repetition tacticals *}
+subsection \<open>Repetition tacticals\<close>
 
-text {* These tacticals provide further control over repetition of
+text \<open>These tacticals provide further control over repetition of
   tactics, beyond the stylized forms of ``@{verbatim "?"}''  and
-  ``@{verbatim "+"}'' in Isar method expressions. *}
+  ``@{verbatim "+"}'' in Isar method expressions.\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML "TRY": "tactic -> tactic"} \\
   @{index_ML "REPEAT": "tactic -> tactic"} \\
@@ -658,9 +658,9 @@
   by @{text "n"} (where @{ML "~1"} means @{text "\<infinity>"}).
 
   \end{description}
-*}
+\<close>
 
-text %mlex {* The basic tactics and tacticals considered above follow
+text %mlex \<open>The basic tactics and tacticals considered above follow
   some algebraic laws:
 
   \begin{itemize}
@@ -678,14 +678,14 @@
   implementation tricks):
 
   \end{itemize}
-*}
+\<close>
 
-ML {*
+ML \<open>
   fun TRY tac = tac ORELSE all_tac;
   fun REPEAT tac st = ((tac THEN REPEAT tac) ORELSE all_tac) st;
-*}
+\<close>
 
-text {* If @{text "tac"} can return multiple outcomes then so can @{ML
+text \<open>If @{text "tac"} can return multiple outcomes then so can @{ML
   REPEAT}~@{text "tac"}.  @{ML REPEAT} uses @{ML_op "ORELSE"} and not
   @{ML_op "APPEND"}, it applies @{text "tac"} as many times as
   possible in each outcome.
@@ -697,17 +697,17 @@
   evaluation in Standard ML.  The following attempt would make @{ML
   REPEAT}~@{text "tac"} loop:
   \end{warn}
-*}
+\<close>
 
-ML {*
+ML \<open>
   (*BAD -- does not terminate!*)
   fun REPEAT tac = (tac THEN REPEAT tac) ORELSE all_tac;
-*}
+\<close>
 
 
-subsection {* Applying tactics to subgoal ranges *}
+subsection \<open>Applying tactics to subgoal ranges\<close>
 
-text {* Tactics with explicit subgoal addressing
+text \<open>Tactics with explicit subgoal addressing
   @{ML_type "int -> tactic"} can be used together with tacticals that
   act like ``subgoal quantifiers'': guided by success of the body
   tactic a certain range of subgoals is covered.  Thus the body tactic
@@ -718,9 +718,9 @@
   @{text "n"} towards @{text "1"}.  This has the fortunate effect that
   newly emerging subgoals are concatenated in the result, without
   interfering each other.  Nonetheless, there might be situations
-  where a different order is desired. *}
+  where a different order is desired.\<close>
 
-text %mlref {*
+text %mlref \<open>
   \begin{mldecls}
   @{index_ML ALLGOALS: "(int -> tactic) -> tactic"} \\
   @{index_ML SOMEGOAL: "(int -> tactic) -> tactic"} \\
@@ -760,25 +760,25 @@
   corresponding range of subgoals, counting downwards.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Control and search tacticals *}
+subsection \<open>Control and search tacticals\<close>
 
-text {* A predicate on theorems @{ML_type "thm -> bool"} can test
+text \<open>A predicate on theorems @{ML_type "thm -> bool"} can test
   whether a goal state enjoys some desirable property --- such as
   having no subgoals.  Tactics that search for satisfactory goal
   states are easy to express.  The main search procedures,
   depth-first, breadth-first and best-first, are provided as
   tacticals.  They generate the search tree by repeatedly applying a
-  given tactic.  *}
+  given tactic.\<close>
 
 
 text %mlref ""
 
-subsubsection {* Filtering a tactic's results *}
+subsubsection \<open>Filtering a tactic's results\<close>
 
-text {*
+text \<open>
   \begin{mldecls}
   @{index_ML FILTER: "(thm -> bool) -> tactic -> tactic"} \\
   @{index_ML CHANGED: "tactic -> tactic"} \\
@@ -796,12 +796,12 @@
   CHANGED}~@{text "tac"} always has some effect on the state.
 
   \end{description}
-*}
+\<close>
 
 
-subsubsection {* Depth-first search *}
+subsubsection \<open>Depth-first search\<close>
 
-text {*
+text \<open>
   \begin{mldecls}
   @{index_ML DEPTH_FIRST: "(thm -> bool) -> tactic -> tactic"} \\
   @{index_ML DEPTH_SOLVE: "tactic -> tactic"} \\
@@ -825,12 +825,12 @@
   it insists upon solving at least one subgoal.
 
   \end{description}
-*}
+\<close>
 
 
-subsubsection {* Other search strategies *}
+subsubsection \<open>Other search strategies\<close>
 
-text {*
+text \<open>
   \begin{mldecls}
   @{index_ML BREADTH_FIRST: "(thm -> bool) -> tactic -> tactic"} \\
   @{index_ML BEST_FIRST: "(thm -> bool) * (thm -> int) -> tactic -> tactic"} \\
@@ -866,12 +866,12 @@
   continuing the search.
 
   \end{description}
-*}
+\<close>
 
 
-subsubsection {* Auxiliary tacticals for searching *}
+subsubsection \<open>Auxiliary tacticals for searching\<close>
 
-text {*
+text \<open>
   \begin{mldecls}
   @{index_ML COND: "(thm -> bool) -> tactic -> tactic -> tactic"} \\
   @{index_ML IF_UNSOLVED: "tactic -> tactic"} \\
@@ -901,12 +901,12 @@
   limits the search space by making its argument deterministic.
 
   \end{description}
-*}
+\<close>
 
 
-subsubsection {* Predicates and functions useful for searching *}
+subsubsection \<open>Predicates and functions useful for searching\<close>
 
-text {*
+text \<open>
   \begin{mldecls}
   @{index_ML has_fewer_prems: "int -> thm -> bool"} \\
   @{index_ML Thm.eq_thm: "thm * thm -> bool"} \\
@@ -934,6 +934,6 @@
   @{ML BEST_FIRST}.
 
   \end{description}
-*}
+\<close>
 
 end
--- a/src/Doc/Isar_Ref/Document_Preparation.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/Document_Preparation.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base Main
 begin
 
-chapter {* Document preparation \label{ch:document-prep} *}
+chapter \<open>Document preparation \label{ch:document-prep}\<close>
 
-text {* Isabelle/Isar provides a simple document preparation system
+text \<open>Isabelle/Isar provides a simple document preparation system
   based on {PDF-\LaTeX}, with support for hyperlinks and bookmarks
   within that format.  This allows to produce papers, books, theses
   etc.\ from Isabelle theory sources.
@@ -15,12 +15,12 @@
   document preparation are @{tool_ref mkroot} and @{tool_ref build}.
 
   The classic Isabelle/HOL tutorial @{cite "isabelle-hol-book"} also
-  explains some aspects of theory presentation.  *}
+  explains some aspects of theory presentation.\<close>
 
 
-section {* Markup commands \label{sec:markup} *}
+section \<open>Markup commands \label{sec:markup}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "header"} & : & @{text "toplevel \<rightarrow> toplevel"} \\[0.5ex]
     @{command_def "chapter"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@@ -96,12 +96,12 @@
   specifications, but have a different formal status and produce
   different {\LaTeX} macros.  The default definitions coincide for
   analogous commands such as @{command section} and @{command sect}.
-*}
+\<close>
 
 
-section {* Document Antiquotations \label{sec:antiq} *}
+section \<open>Document Antiquotations \label{sec:antiq}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{antiquotation_def "theory"} & : & @{text antiquotation} \\
     @{antiquotation_def "thm"} & : & @{text antiquotation} \\
@@ -296,12 +296,12 @@
   [cite_macro = nocite] foobar}"} produces @{verbatim "\\nocite{foobar}"}.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Styled antiquotations *}
+subsection \<open>Styled antiquotations\<close>
 
-text {* The antiquotations @{text thm}, @{text prop} and @{text
+text \<open>The antiquotations @{text thm}, @{text prop} and @{text
   term} admit an extra \emph{style} specification to modify the
   printed result.  A style is specified by a name with a possibly
   empty number of arguments;  multiple styles can be sequenced with
@@ -324,12 +324,12 @@
   normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* General options *}
+subsection \<open>General options\<close>
 
-text {* The following options are available to tune the printed output
+text \<open>The following options are available to tune the printed output
   of antiquotations.  Note that many of these coincide with system and
   configuration options of the same names.
 
@@ -413,12 +413,12 @@
   For Boolean flags, ``@{text "name = true"}'' may be abbreviated as
   ``@{text name}''.  All of the above flags are disabled by default,
   unless changed specifically for a logic session in the corresponding
-  @{verbatim "ROOT"} file.  *}
+  @{verbatim "ROOT"} file.\<close>
 
 
-section {* Markup via command tags \label{sec:tags} *}
+section \<open>Markup via command tags \label{sec:tags}\<close>
 
-text {* Each Isabelle/Isar command may be decorated by additional
+text \<open>Each Isabelle/Isar command may be decorated by additional
   presentation tags, to indicate some modification in the way it is
   printed in the document.
 
@@ -465,12 +465,12 @@
   parts of the text.  Logic sessions may also specify ``document
   versions'', where given tags are interpreted in some particular way.
   Again see @{cite "isabelle-sys"} for further details.
-*}
+\<close>
 
 
-section {* Railroad diagrams *}
+section \<open>Railroad diagrams\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{antiquotation_def "rail"} & : & @{text antiquotation} \\
   \end{matharray}
@@ -578,12 +578,12 @@
   @{rail \<open>A + sep\<close>}
 
   \end{itemize}
-*}
+\<close>
 
 
-section {* Draft presentation *}
+section \<open>Draft presentation\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "display_drafts"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
   \end{matharray}
@@ -600,6 +600,6 @@
   verbatim.
 
   \end{description}
-*}
+\<close>
 
 end
--- a/src/Doc/Isar_Ref/First_Order_Logic.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/First_Order_Logic.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -1,17 +1,17 @@
 
-header {* Example: First-Order Logic *}
+header \<open>Example: First-Order Logic\<close>
 
 theory %visible First_Order_Logic
 imports Base  (* FIXME Pure!? *)
 begin
 
-text {*
+text \<open>
   \noindent In order to commence a new object-logic within
   Isabelle/Pure we introduce abstract syntactic categories @{text "i"}
   for individuals and @{text "o"} for object-propositions.  The latter
   is embedded into the language of Pure propositions by means of a
   separate judgment.
-*}
+\<close>
 
 typedecl i
 typedecl o
@@ -19,24 +19,24 @@
 judgment
   Trueprop :: "o \<Rightarrow> prop"    ("_" 5)
 
-text {*
+text \<open>
   \noindent Note that the object-logic judgment is implicit in the
   syntax: writing @{prop A} produces @{term "Trueprop A"} internally.
   From the Pure perspective this means ``@{prop A} is derivable in the
   object-logic''.
-*}
+\<close>
 
 
-subsection {* Equational reasoning \label{sec:framework-ex-equal} *}
+subsection \<open>Equational reasoning \label{sec:framework-ex-equal}\<close>
 
-text {*
+text \<open>
   Equality is axiomatized as a binary predicate on individuals, with
   reflexivity as introduction, and substitution as elimination
   principle.  Note that the latter is particularly convenient in a
   framework like Isabelle, because syntactic congruences are
   implicitly produced by unification of @{term "B x"} against
   expressions containing occurrences of @{term x}.
-*}
+\<close>
 
 axiomatization
   equal :: "i \<Rightarrow> i \<Rightarrow> o"  (infix "=" 50)
@@ -44,33 +44,33 @@
   refl [intro]: "x = x" and
   subst [elim]: "x = y \<Longrightarrow> B x \<Longrightarrow> B y"
 
-text {*
+text \<open>
   \noindent Substitution is very powerful, but also hard to control in
   full generality.  We derive some common symmetry~/ transitivity
   schemes of @{term equal} as particular consequences.
-*}
+\<close>
 
 theorem sym [sym]:
   assumes "x = y"
   shows "y = x"
 proof -
   have "x = x" ..
-  with `x = y` show "y = x" ..
+  with \<open>x = y\<close> show "y = x" ..
 qed
 
 theorem forw_subst [trans]:
   assumes "y = x" and "B x"
   shows "B y"
 proof -
-  from `y = x` have "x = y" ..
-  from this and `B x` show "B y" ..
+  from \<open>y = x\<close> have "x = y" ..
+  from this and \<open>B x\<close> show "B y" ..
 qed
 
 theorem back_subst [trans]:
   assumes "B x" and "x = y"
   shows "B y"
 proof -
-  from `x = y` and `B x`
+  from \<open>x = y\<close> and \<open>B x\<close>
   show "B y" ..
 qed
 
@@ -78,19 +78,19 @@
   assumes "x = y" and "y = z"
   shows "x = z"
 proof -
-  from `y = z` and `x = y`
+  from \<open>y = z\<close> and \<open>x = y\<close>
   show "x = z" ..
 qed
 
 
-subsection {* Basic group theory *}
+subsection \<open>Basic group theory\<close>
 
-text {*
+text \<open>
   As an example for equational reasoning we consider some bits of
   group theory.  The subsequent locale definition postulates group
   operations and axioms; we also derive some consequences of this
   specification.
-*}
+\<close>
 
 locale group =
   fixes prod :: "i \<Rightarrow> i \<Rightarrow> i"  (infix "\<circ>" 70)
@@ -123,7 +123,7 @@
   finally show "x \<circ> 1 = x" .
 qed
 
-text {*
+text \<open>
   \noindent Reasoning from basic axioms is often tedious.  Our proofs
   work by producing various instances of the given rules (potentially
   the symmetric form) using the pattern ``@{command have}~@{text
@@ -139,7 +139,7 @@
   not be over-emphasized.  The other extreme is to compose a chain by
   plain transitivity only, with replacements occurring always in
   topmost position. For example:
-*}
+\<close>
 
 (*<*)
 theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
@@ -156,7 +156,7 @@
 qed
 (*>*)
 
-text {*
+text \<open>
   \noindent Here we have re-used the built-in mechanism for unfolding
   definitions in order to normalize each equational problem.  A more
   realistic object-logic would include proper setup for the Simplifier
@@ -164,18 +164,18 @@
   reasoning in Isabelle.  Then ``@{command unfolding}~@{thm
   left_inv}~@{command ".."}'' would become ``@{command "by"}~@{text
   "(simp only: left_inv)"}'' etc.
-*}
+\<close>
 
 end
 
 
-subsection {* Propositional logic \label{sec:framework-ex-prop} *}
+subsection \<open>Propositional logic \label{sec:framework-ex-prop}\<close>
 
-text {*
+text \<open>
   We axiomatize basic connectives of propositional logic: implication,
   disjunction, and conjunction.  The associated rules are modeled
   after Gentzen's system of Natural Deduction @{cite "Gentzen:1935"}.
-*}
+\<close>
 
 axiomatization
   imp :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<longrightarrow>" 25) where
@@ -194,26 +194,26 @@
   conjD\<^sub>1: "A \<and> B \<Longrightarrow> A" and
   conjD\<^sub>2: "A \<and> B \<Longrightarrow> B"
 
-text {*
+text \<open>
   \noindent The conjunctive destructions have the disadvantage that
   decomposing @{prop "A \<and> B"} involves an immediate decision which
   component should be projected.  The more convenient simultaneous
   elimination @{prop "A \<and> B \<Longrightarrow> (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C"} can be derived as
   follows:
-*}
+\<close>
 
 theorem conjE [elim]:
   assumes "A \<and> B"
   obtains A and B
 proof
-  from `A \<and> B` show A by (rule conjD\<^sub>1)
-  from `A \<and> B` show B by (rule conjD\<^sub>2)
+  from \<open>A \<and> B\<close> show A by (rule conjD\<^sub>1)
+  from \<open>A \<and> B\<close> show B by (rule conjD\<^sub>2)
 qed
 
-text {*
+text \<open>
   \noindent Here is an example of swapping conjuncts with a single
   intermediate elimination step:
-*}
+\<close>
 
 (*<*)
 lemma "\<And>A. PROP A \<Longrightarrow> PROP A"
@@ -226,7 +226,7 @@
 qed
 (*>*)
 
-text {*
+text \<open>
   \noindent Note that the analogous elimination rule for disjunction
   ``@{text "\<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"}'' coincides with
   the original axiomatization of @{thm disjE}.
@@ -234,7 +234,7 @@
   \medskip We continue propositional logic by introducing absurdity
   with its characteristic elimination.  Plain truth may then be
   defined as a proposition that is trivially true.
-*}
+\<close>
 
 axiomatization
   false :: o  ("\<bottom>") where
@@ -247,10 +247,10 @@
 theorem trueI [intro]: \<top>
   unfolding true_def ..
 
-text {*
+text \<open>
   \medskip\noindent Now negation represents an implication towards
   absurdity:
-*}
+\<close>
 
 definition
   not :: "o \<Rightarrow> o"  ("\<not> _" [40] 40) where
@@ -262,27 +262,27 @@
 unfolding not_def
 proof
   assume A
-  then show \<bottom> by (rule `A \<Longrightarrow> \<bottom>`)
+  then show \<bottom> by (rule \<open>A \<Longrightarrow> \<bottom>\<close>)
 qed
 
 theorem notE [elim]:
   assumes "\<not> A" and A
   shows B
 proof -
-  from `\<not> A` have "A \<longrightarrow> \<bottom>" unfolding not_def .
-  from `A \<longrightarrow> \<bottom>` and `A` have \<bottom> ..
+  from \<open>\<not> A\<close> have "A \<longrightarrow> \<bottom>" unfolding not_def .
+  from \<open>A \<longrightarrow> \<bottom>\<close> and \<open>A\<close> have \<bottom> ..
   then show B ..
 qed
 
 
-subsection {* Classical logic *}
+subsection \<open>Classical logic\<close>
 
-text {*
+text \<open>
   Subsequently we state the principle of classical contradiction as a
   local assumption.  Thus we refrain from forcing the object-logic
   into the classical perspective.  Within that context, we may derive
   well-known consequences of the classical principle.
-*}
+\<close>
 
 locale classical =
   assumes classical: "(\<not> C \<Longrightarrow> C) \<Longrightarrow> C"
@@ -293,7 +293,7 @@
   shows C
 proof (rule classical)
   assume "\<not> C"
-  with `\<not> \<not> C` show C ..
+  with \<open>\<not> \<not> C\<close> show C ..
 qed
 
 theorem tertium_non_datur: "C \<or> \<not> C"
@@ -304,14 +304,14 @@
     have "\<not> C"
     proof
       assume C then have "C \<or> \<not> C" ..
-      with `\<not> (C \<or> \<not> C)` show \<bottom> ..
+      with \<open>\<not> (C \<or> \<not> C)\<close> show \<bottom> ..
     qed
     then have "C \<or> \<not> C" ..
-    with `\<not> (C \<or> \<not> C)` show \<bottom> ..
+    with \<open>\<not> (C \<or> \<not> C)\<close> show \<bottom> ..
   qed
 qed
 
-text {*
+text \<open>
   \noindent These examples illustrate both classical reasoning and
   non-trivial propositional proofs in general.  All three rules
   characterize classical logic independently, but the original rule is
@@ -323,21 +323,21 @@
   This also explains nicely how classical reasoning really works:
   whatever the main @{text thesis} might be, we may always assume its
   negation!
-*}
+\<close>
 
 end
 
 
-subsection {* Quantifiers \label{sec:framework-ex-quant} *}
+subsection \<open>Quantifiers \label{sec:framework-ex-quant}\<close>
 
-text {*
+text \<open>
   Representing quantifiers is easy, thanks to the higher-order nature
   of the underlying framework.  According to the well-known technique
   introduced by Church @{cite "church40"}, quantifiers are operators on
   predicates, which are syntactically represented as @{text "\<lambda>"}-terms
   of type @{typ "i \<Rightarrow> o"}.  Binder notation turns @{text "All (\<lambda>x. B
   x)"} into @{text "\<forall>x. B x"} etc.
-*}
+\<close>
 
 axiomatization
   All :: "(i \<Rightarrow> o) \<Rightarrow> o"  (binder "\<forall>" 10) where
@@ -349,26 +349,26 @@
   exI [intro]: "B a \<Longrightarrow> (\<exists>x. B x)" and
   exE [elim]: "(\<exists>x. B x) \<Longrightarrow> (\<And>x. B x \<Longrightarrow> C) \<Longrightarrow> C"
 
-text {*
+text \<open>
   \noindent The statement of @{thm exE} corresponds to ``@{text
   "\<ASSUMES> \<exists>x. B x \<OBTAINS> x \<WHERE> B x"}'' in Isar.  In the
   subsequent example we illustrate quantifier reasoning involving all
   four rules:
-*}
+\<close>
 
 theorem
   assumes "\<exists>x. \<forall>y. R x y"
   shows "\<forall>y. \<exists>x. R x y"
-proof    -- {* @{text "\<forall>"} introduction *}
-  obtain x where "\<forall>y. R x y" using `\<exists>x. \<forall>y. R x y` ..    -- {* @{text "\<exists>"} elimination *}
-  fix y have "R x y" using `\<forall>y. R x y` ..    -- {* @{text "\<forall>"} destruction *}
-  then show "\<exists>x. R x y" ..    -- {* @{text "\<exists>"} introduction *}
+proof    -- \<open>@{text "\<forall>"} introduction\<close>
+  obtain x where "\<forall>y. R x y" using \<open>\<exists>x. \<forall>y. R x y\<close> ..    -- \<open>@{text "\<exists>"} elimination\<close>
+  fix y have "R x y" using \<open>\<forall>y. R x y\<close> ..    -- \<open>@{text "\<forall>"} destruction\<close>
+  then show "\<exists>x. R x y" ..    -- \<open>@{text "\<exists>"} introduction\<close>
 qed
 
 
-subsection {* Canonical reasoning patterns *}
+subsection \<open>Canonical reasoning patterns\<close>
 
-text {*
+text \<open>
   The main rules of first-order predicate logic from
   \secref{sec:framework-ex-prop} and \secref{sec:framework-ex-quant}
   can now be summarized as follows, using the native Isar statement
@@ -407,14 +407,14 @@
   (Pure) elim}, @{attribute (Pure) dest} --- each according to its
   particular shape --- we can immediately write Isar proof texts as
   follows:
-*}
+\<close>
 
 (*<*)
 theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
 proof -
 (*>*)
 
-  txt_raw {*\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have "A \<longrightarrow> B"
   proof
@@ -422,12 +422,12 @@
     show B sorry %noproof
   qed
 
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have "A \<longrightarrow> B" and A sorry %noproof
   then have B ..
 
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have A sorry %noproof
   then have "A \<or> B" ..
@@ -435,7 +435,7 @@
   have B sorry %noproof
   then have "A \<or> B" ..
 
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have "A \<or> B" sorry %noproof
   then have C
@@ -447,26 +447,26 @@
     then show C sorry %noproof
   qed
 
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have A and B sorry %noproof
   then have "A \<and> B" ..
 
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have "A \<and> B" sorry %noproof
   then obtain A and B ..
 
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have "\<bottom>" sorry %noproof
   then have A ..
 
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have "\<top>" ..
 
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have "\<not> A"
   proof
@@ -474,12 +474,12 @@
     then show "\<bottom>" sorry %noproof
   qed
 
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have "\<not> A" and A sorry %noproof
   then have B ..
 
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have "\<forall>x. B x"
   proof
@@ -487,35 +487,35 @@
     show "B x" sorry %noproof
   qed
 
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have "\<forall>x. B x" sorry %noproof
   then have "B a" ..
 
-  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have "\<exists>x. B x"
   proof
     show "B a" sorry %noproof
   qed
 
-  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+  txt_raw \<open>\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}\<close>(*<*)next(*>*)
 
   have "\<exists>x. B x" sorry %noproof
   then obtain a where "B a" ..
 
-  txt_raw {*\end{minipage}*}
+  txt_raw \<open>\end{minipage}\<close>
 
 (*<*)
 qed
 (*>*)
 
-text {*
+text \<open>
   \bigskip\noindent Of course, these proofs are merely examples.  As
   sketched in \secref{sec:framework-subproof}, there is a fair amount
   of flexibility in expressing Pure deductions in Isar.  Here the user
   is asked to express himself adequately, aiming at proof texts of
   literary quality.
-*}
+\<close>
 
 end %visible
--- a/src/Doc/Isar_Ref/Framework.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/Framework.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base Main
 begin
 
-chapter {* The Isabelle/Isar Framework \label{ch:isar-framework} *}
+chapter \<open>The Isabelle/Isar Framework \label{ch:isar-framework}\<close>
 
-text {*
+text \<open>
   Isabelle/Isar @{cite "Wenzel:1999:TPHOL" and "Wenzel-PhD" and
   "Nipkow-TYPES02" and "Wenzel-Paulson:2006" and "Wenzel:2006:Festschrift"}
   is intended as a generic framework for developing formal
@@ -74,9 +74,9 @@
   both the Isar text, and depict the primitive rule involved, as
   determined by unification of the problem against rules that are
   declared in the library context.
-*}
+\<close>
 
-text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
+text_raw \<open>\medskip\begin{minipage}{0.6\textwidth}\<close>
 
 (*<*)
 notepad
@@ -88,15 +88,15 @@
 end
 (*>*)
 
-text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
+text_raw \<open>\end{minipage}\begin{minipage}{0.4\textwidth}\<close>
 
-text {*
+text \<open>
   \infer{@{prop "x \<in> A \<inter> B"}}{@{prop "x \<in> A"} & @{prop "x \<in> B"}}
-*}
+\<close>
 
-text_raw {*\end{minipage}*}
+text_raw \<open>\end{minipage}\<close>
 
-text {*
+text \<open>
   \medskip\noindent Note that @{command assume} augments the proof
   context, @{command then} indicates that the current fact shall be
   used in the next step, and @{command have} states an intermediate
@@ -104,7 +104,7 @@
   this claim, using the indicated facts and a canonical rule from the
   context.  We could have been more explicit here by spelling out the
   final proof step via the @{command "by"} command:
-*}
+\<close>
 
 (*<*)
 notepad
@@ -116,7 +116,7 @@
 end
 (*>*)
 
-text {*
+text \<open>
   \noindent The format of the @{text "\<inter>"}-introduction rule represents
   the most basic inference, which proceeds from given premises to a
   conclusion, without any nested proof context involved.
@@ -125,9 +125,9 @@
   the intersection of all sets within a given set.  This requires a
   nested proof of set membership within a local context, where @{term
   A} is an arbitrary-but-fixed member of the collection:
-*}
+\<close>
 
-text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
+text_raw \<open>\medskip\begin{minipage}{0.6\textwidth}\<close>
 
 (*<*)
 notepad
@@ -143,15 +143,15 @@
 end
 (*>*)
 
-text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
+text_raw \<open>\end{minipage}\begin{minipage}{0.4\textwidth}\<close>
 
-text {*
+text \<open>
   \infer{@{prop "x \<in> \<Inter>\<A>"}}{\infer*{@{prop "x \<in> A"}}{@{text "[A][A \<in> \<A>]"}}}
-*}
+\<close>
 
-text_raw {*\end{minipage}*}
+text_raw \<open>\end{minipage}\<close>
 
-text {*
+text \<open>
   \medskip\noindent This Isar reasoning pattern again refers to the
   primitive rule depicted above.  The system determines it in the
   ``@{command proof}'' step, which could have been spelled out more
@@ -173,9 +173,9 @@
   directly a local @{term "A"} such that @{prop "x \<in> A"} and @{prop "A
   \<in> \<A>"} hold.  This corresponds to the following Isar proof and
   inference rule, respectively:
-*}
+\<close>
 
-text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
+text_raw \<open>\medskip\begin{minipage}{0.6\textwidth}\<close>
 
 (*<*)
 notepad
@@ -192,15 +192,15 @@
 end
 (*>*)
 
-text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
+text_raw \<open>\end{minipage}\begin{minipage}{0.4\textwidth}\<close>
 
-text {*
+text \<open>
   \infer{@{prop "C"}}{@{prop "x \<in> \<Union>\<A>"} & \infer*{@{prop "C"}~}{@{text "[A][x \<in> A, A \<in> \<A>]"}}}
-*}
+\<close>
 
-text_raw {*\end{minipage}*}
+text_raw \<open>\end{minipage}\<close>
 
-text {*
+text \<open>
   \medskip\noindent Although the Isar proof follows the natural
   deduction rule closely, the text reads not as natural as
   anticipated.  There is a double occurrence of an arbitrary
@@ -209,7 +209,7 @@
   involving local parameters.  Isar provides the derived language
   element @{command obtain}, which is able to perform the same
   elimination proof more conveniently:
-*}
+\<close>
 
 (*<*)
 notepad
@@ -221,16 +221,16 @@
 end
 (*>*)
 
-text {*
+text \<open>
   \noindent Here we avoid to mention the final conclusion @{prop "C"}
   and return to plain forward reasoning.  The rule involved in the
   ``@{command ".."}'' proof is the same as before.
-*}
+\<close>
 
 
-section {* The Pure framework \label{sec:framework-pure} *}
+section \<open>The Pure framework \label{sec:framework-pure}\<close>
 
-text {*
+text \<open>
   The Pure logic @{cite "paulson-found" and "paulson700"} is an intuitionistic
   fragment of higher-order logic @{cite "church40"}.  In type-theoretic
   parlance, there are three levels of @{text "\<lambda>"}-calculus with
@@ -257,12 +257,12 @@
   internalized as formulae over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
   Combining such rule statements may involve higher-order unification
   @{cite "paulson-natural"}.
-*}
+\<close>
 
 
-subsection {* Primitive inferences *}
+subsection \<open>Primitive inferences\<close>
 
-text {*
+text \<open>
   Term syntax provides explicit notation for abstraction @{text "\<lambda>x ::
   \<alpha>. b(x)"} and application @{text "b a"}, while types are usually
   implicit thanks to type-inference; terms of type @{text "prop"} are
@@ -308,12 +308,12 @@
   Pure.  After the initial object-logic setup, further axiomatizations
   are usually avoided; plain definitions and derived principles are
   used exclusively.
-*}
+\<close>
 
 
-subsection {* Reasoning with rules \label{sec:framework-resolution} *}
+subsection \<open>Reasoning with rules \label{sec:framework-resolution}\<close>
 
-text {*
+text \<open>
   Primitive inferences mostly serve foundational purposes.  The main
   reasoning mechanisms of Pure operate on nested natural deduction
   rules expressed as formulae, using @{text "\<And>"} to bind local
@@ -444,12 +444,12 @@
   premises.  Consequently, @{command fix}-@{command assume}-@{command
   show} enables to fit the result of a sub-proof quite robustly into a
   pending sub-goal, while maintaining a good measure of flexibility.
-*}
+\<close>
 
 
-section {* The Isar proof language \label{sec:framework-isar} *}
+section \<open>The Isar proof language \label{sec:framework-isar}\<close>
 
-text {*
+text \<open>
   Structured proofs are presented as high-level expressions for
   composing entities of Pure (propositions, facts, and goals).  The
   Isar proof language allows to organize reasoning within the
@@ -554,12 +554,12 @@
   from a nested sub-proof (see \secref{sec:framework-subproof}).
   Further derived concepts will support calculational reasoning (see
   \secref{sec:framework-calc}).
-*}
+\<close>
 
 
-subsection {* Context elements \label{sec:framework-context} *}
+subsection \<open>Context elements \label{sec:framework-context}\<close>
 
-text {*
+text \<open>
   In judgments @{text "\<Gamma> \<turnstile> \<phi>"} of the primitive framework, @{text "\<Gamma>"}
   essentially acts like a proof context.  Isar elaborates this idea
   towards a higher-level notion, with additional information for
@@ -648,49 +648,49 @@
   elements introduced above using the formal proof language itself.
   After finishing a local proof within a block, we indicate the
   exported result via @{command note}.
-*}
+\<close>
 
 (*<*)
 theorem True
 proof
 (*>*)
-  txt_raw {* \begin{minipage}[t]{0.45\textwidth} *}
+  txt_raw \<open>\begin{minipage}[t]{0.45\textwidth}\<close>
   {
     fix x
     have "B x" sorry %noproof
   }
-  note `\<And>x. B x`
-  txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
+  note \<open>\<And>x. B x\<close>
+  txt_raw \<open>\end{minipage}\quad\begin{minipage}[t]{0.45\textwidth}\<close>(*<*)next(*>*)
   {
     assume A
     have B sorry %noproof
   }
-  note `A \<Longrightarrow> B`
-  txt_raw {* \end{minipage}\\[3ex]\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
+  note \<open>A \<Longrightarrow> B\<close>
+  txt_raw \<open>\end{minipage}\\[3ex]\begin{minipage}[t]{0.45\textwidth}\<close>(*<*)next(*>*)
   {
     def x \<equiv> a
     have "B x" sorry %noproof
   }
-  note `B a`
-  txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
+  note \<open>B a\<close>
+  txt_raw \<open>\end{minipage}\quad\begin{minipage}[t]{0.45\textwidth}\<close>(*<*)next(*>*)
   {
     obtain x where "A x" sorry %noproof
     have B sorry %noproof
   }
-  note `B`
-  txt_raw {* \end{minipage} *}
+  note \<open>B\<close>
+  txt_raw \<open>\end{minipage}\<close>
 (*<*)
 qed
 (*>*)
 
-text {*
+text \<open>
   \bigskip\noindent This illustrates the meaning of Isar context
   elements without goals getting in between.
-*}
+\<close>
 
-subsection {* Structured statements \label{sec:framework-stmt} *}
+subsection \<open>Structured statements \label{sec:framework-stmt}\<close>
 
-text {*
+text \<open>
   The category @{text "statement"} of top-level theorem specifications
   is defined as follows:
 
@@ -735,20 +735,20 @@
   problem is already laid out directly.  E.g.\ consider the following
   canonical patterns for @{text "\<SHOWS>"} and @{text "\<OBTAINS>"},
   respectively:
-*}
+\<close>
 
-text_raw {*\begin{minipage}{0.5\textwidth}*}
+text_raw \<open>\begin{minipage}{0.5\textwidth}\<close>
 
 theorem
   fixes x and y
   assumes "A x" and "B y"
   shows "C x y"
 proof -
-  from `A x` and `B y`
+  from \<open>A x\<close> and \<open>B y\<close>
   show "C x y" sorry %noproof
 qed
 
-text_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
+text_raw \<open>\end{minipage}\begin{minipage}{0.5\textwidth}\<close>
 
 theorem
   obtains x and y
@@ -758,9 +758,9 @@
   then show thesis ..
 qed
 
-text_raw {*\end{minipage}*}
+text_raw \<open>\end{minipage}\<close>
 
-text {*
+text \<open>
   \medskip\noindent Here local facts \isacharbackquoteopen@{text "A
   x"}\isacharbackquoteclose\ and \isacharbackquoteopen@{text "B
   y"}\isacharbackquoteclose\ are referenced immediately; there is no
@@ -769,12 +769,12 @@
   thesis}~@{command ".."}''  involves the local rule case @{text "\<And>x
   y. A x \<Longrightarrow> B y \<Longrightarrow> thesis"} for the particular instance of terms @{text
   "a"} and @{text "b"} produced in the body.
-*}
+\<close>
 
 
-subsection {* Structured proof refinement \label{sec:framework-subproof} *}
+subsection \<open>Structured proof refinement \label{sec:framework-subproof}\<close>
 
-text {*
+text \<open>
   By breaking up the grammar for the Isar proof language, we may
   understand a proof text as a linear sequence of individual proof
   commands.  These are interpreted as transitions of the Isar virtual
@@ -814,19 +814,19 @@
   @{text "state"} mode one level upwards.  The subsequent Isar/VM
   trace indicates block structure, linguistic mode, goal state, and
   inferences:
-*}
+\<close>
 
-text_raw {* \begingroup\footnotesize *}
+text_raw \<open>\begingroup\footnotesize\<close>
 (*<*)notepad begin
 (*>*)
-  txt_raw {* \begin{minipage}[t]{0.18\textwidth} *}
+  txt_raw \<open>\begin{minipage}[t]{0.18\textwidth}\<close>
   have "A \<longrightarrow> B"
   proof
     assume A
     show B
       sorry %noproof
   qed
-  txt_raw {* \end{minipage}\quad
+  txt_raw \<open>\end{minipage}\quad
 \begin{minipage}[t]{0.06\textwidth}
 @{text "begin"} \\
 \\
@@ -856,13 +856,13 @@
 \\
 @{text "(refinement #A \<Longrightarrow> B)"} \\
 @{text "(finish)"} \\
-\end{minipage} *}
+\end{minipage}\<close>
 (*<*)
 end
 (*>*)
-text_raw {* \endgroup *}
+text_raw \<open>\endgroup\<close>
 
-text {*
+text \<open>
   \noindent Here the @{inference refinement} inference from
   \secref{sec:framework-resolution} mediates composition of Isar
   sub-proofs nicely.  Observe that this principle incorporates some
@@ -871,9 +871,9 @@
   according to Hereditary Harrop Form.  Moreover, context elements
   that are not used in a sub-proof may be omitted altogether.  For
   example:
-*}
+\<close>
 
-text_raw {*\begin{minipage}{0.5\textwidth}*}
+text_raw \<open>\begin{minipage}{0.5\textwidth}\<close>
 
 (*<*)
 notepad
@@ -886,7 +886,7 @@
     show "C x y" sorry %noproof
   qed
 
-txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
+txt_raw \<open>\end{minipage}\begin{minipage}{0.5\textwidth}\<close>
 
 (*<*)
 next
@@ -898,7 +898,7 @@
     show "C x y" sorry %noproof
   qed
 
-txt_raw {*\end{minipage}\\[3ex]\begin{minipage}{0.5\textwidth}*}
+txt_raw \<open>\end{minipage}\\[3ex]\begin{minipage}{0.5\textwidth}\<close>
 
 (*<*)
 next
@@ -910,7 +910,7 @@
     show "C x y" sorry
   qed
 
-txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
+txt_raw \<open>\end{minipage}\begin{minipage}{0.5\textwidth}\<close>
 (*<*)
 next
 (*>*)
@@ -924,9 +924,9 @@
 end
 (*>*)
 
-text_raw {*\end{minipage}*}
+text_raw \<open>\end{minipage}\<close>
 
-text {*
+text \<open>
   \medskip\noindent Such ``peephole optimizations'' of Isar texts are
   practically important to improve readability, by rearranging
   contexts elements according to the natural flow of reasoning in the
@@ -938,12 +938,12 @@
   particular, there are no direct operations on goal states within the
   proof body.  Moreover, there is no hidden automated reasoning
   involved, just plain unification.
-*}
+\<close>
 
 
-subsection {* Calculational reasoning \label{sec:framework-calc} *}
+subsection \<open>Calculational reasoning \label{sec:framework-calc}\<close>
 
-text {*
+text \<open>
   The existing Isar infrastructure is sufficiently flexible to support
   calculational reasoning (chains of transitivity steps) as derived
   concept.  The generic proof elements introduced below depend on
@@ -983,7 +983,7 @@
 
   Here is a canonical proof pattern, using @{command have} to
   establish the intermediate results:
-*}
+\<close>
 
 (*<*)
 notepad
@@ -997,7 +997,7 @@
 end
 (*>*)
 
-text {*
+text \<open>
   \noindent The term ``@{text "\<dots>"}'' above is a special abbreviation
   provided by the Isabelle/Isar syntax layer: it statically refers to
   the right-hand side argument of the previous statement given in the
@@ -1011,6 +1011,6 @@
   used in fact expressions ``@{text "a [symmetric]"}'', or single-step
   proofs ``@{command assume}~@{text "x = y"}~@{command then}~@{command
   have}~@{text "y = x"}~@{command ".."}''.
-*}
+\<close>
 
 end
\ No newline at end of file
--- a/src/Doc/Isar_Ref/Generic.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/Generic.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,11 +2,11 @@
 imports Base Main
 begin
 
-chapter {* Generic tools and packages \label{ch:gen-tools} *}
+chapter \<open>Generic tools and packages \label{ch:gen-tools}\<close>
 
-section {* Configuration options \label{sec:config} *}
+section \<open>Configuration options \label{sec:config}\<close>
 
-text {* Isabelle/Pure maintains a record of named configuration
+text \<open>Isabelle/Pure maintains a record of named configuration
   options within the theory or proof context, with values of type
   @{ML_type bool}, @{ML_type int}, @{ML_type real}, or @{ML_type
   string}.  Tools may declare options in ML, and then refer to these
@@ -16,7 +16,7 @@
   name.  This form of context declaration works particularly well with
   commands such as @{command "declare"} or @{command "using"} like
   this:
-*}
+\<close>
 
 declare [[show_main_goal = false]]
 
@@ -25,7 +25,7 @@
   note [[show_main_goal = true]]
 end
 
-text {* For historical reasons, some tools cannot take the full proof
+text \<open>For historical reasons, some tools cannot take the full proof
   context into account and merely refer to the background theory.
   This is accommodated by configuration options being declared as
   ``global'', which may not be changed within a local context.
@@ -49,14 +49,14 @@
   attempt to change a global option in a local context is ignored.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Basic proof tools *}
+section \<open>Basic proof tools\<close>
 
-subsection {* Miscellaneous methods and attributes \label{sec:misc-meth-att} *}
+subsection \<open>Miscellaneous methods and attributes \label{sec:misc-meth-att}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{method_def unfold} & : & @{text method} \\
     @{method_def fold} & : & @{text method} \\
@@ -183,12 +183,12 @@
   ones; this is mainly for tuning output of pretty printed theorems.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Low-level equational reasoning *}
+subsection \<open>Low-level equational reasoning\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{method_def subst} & : & @{text method} \\
     @{method_def hypsubst} & : & @{text method} \\
@@ -251,12 +251,12 @@
   @{attribute split}, for example.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Further tactic emulations \label{sec:tactics} *}
+subsection \<open>Further tactic emulations \label{sec:tactics}\<close>
 
-text {*
+text \<open>
   The following improper proof methods emulate traditional tactics.
   These admit direct access to the goal state, which is normally
   considered harmful!  In particular, this may involve both numbered
@@ -366,12 +366,12 @@
   appear in production theories.
 
   \end{description}
-*}
+\<close>
 
 
-section {* The Simplifier \label{sec:simplifier} *}
+section \<open>The Simplifier \label{sec:simplifier}\<close>
 
-text {* The Simplifier performs conditional and unconditional
+text \<open>The Simplifier performs conditional and unconditional
   rewriting and uses contextual information: rule declarations in the
   background theory or local proof context are taken into account, as
   well as chained facts and subgoal premises (``local assumptions'').
@@ -389,12 +389,12 @@
   engaging into the internal structures.  Thus it serves as
   general-purpose proof tool with the main focus on equational
   reasoning, and a bit more than that.
-*}
+\<close>
 
 
-subsection {* Simplification methods \label{sec:simp-meth} *}
+subsection \<open>Simplification methods \label{sec:simp-meth}\<close>
 
-text {*
+text \<open>
   \begin{tabular}{rcll}
     @{method_def simp} & : & @{text method} \\
     @{method_def simp_all} & : & @{text method} \\
@@ -505,38 +505,38 @@
 
   \end{supertabular}
   \end{center}
-*}
+\<close>
 
 
-subsubsection {* Examples *}
+subsubsection \<open>Examples\<close>
 
-text {* We consider basic algebraic simplifications in Isabelle/HOL.
+text \<open>We consider basic algebraic simplifications in Isabelle/HOL.
   The rather trivial goal @{prop "0 + (x + 0) = x + 0 + 0"} looks like
   a good candidate to be solved by a single call of @{method simp}:
-*}
+\<close>
 
 lemma "0 + (x + 0) = x + 0 + 0" apply simp? oops
 
-text {* The above attempt \emph{fails}, because @{term "0"} and @{term
+text \<open>The above attempt \emph{fails}, because @{term "0"} and @{term
   "op +"} in the HOL library are declared as generic type class
   operations, without stating any algebraic laws yet.  More specific
   types are required to get access to certain standard simplifications
-  of the theory context, e.g.\ like this: *}
+  of the theory context, e.g.\ like this:\<close>
 
 lemma fixes x :: nat shows "0 + (x + 0) = x + 0 + 0" by simp
 lemma fixes x :: int shows "0 + (x + 0) = x + 0 + 0" by simp
 lemma fixes x :: "'a :: monoid_add" shows "0 + (x + 0) = x + 0 + 0" by simp
 
-text {*
+text \<open>
   \medskip In many cases, assumptions of a subgoal are also needed in
   the simplification process.  For example:
-*}
+\<close>
 
 lemma fixes x :: nat shows "x = 0 \<Longrightarrow> x + x = 0" by simp
 lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" apply simp oops
 lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" using assms by simp
 
-text {* As seen above, local assumptions that shall contribute to
+text \<open>As seen above, local assumptions that shall contribute to
   simplification need to be part of the subgoal already, or indicated
   explicitly for use by the subsequent method invocation.  Both too
   little or too much information can make simplification fail, for
@@ -550,19 +550,19 @@
   nontermination, but not this one.  The problem can be solved
   nonetheless, by ignoring assumptions via special options as
   explained before:
-*}
+\<close>
 
 lemma "(\<And>x::nat. f x = g (f (g x))) \<Longrightarrow> f 0 = f 0 + 0"
   by (simp (no_asm))
 
-text {* The latter form is typical for long unstructured proof
+text \<open>The latter form is typical for long unstructured proof
   scripts, where the control over the goal content is limited.  In
   structured proofs it is usually better to avoid pushing too many
   facts into the goal state in the first place.  Assumptions in the
   Isar proof context do not intrude the reasoning if not used
   explicitly.  This is illustrated for a toplevel statement and a
   local proof body as follows:
-*}
+\<close>
 
 lemma
   assumes "\<And>x::nat. f x = g (f (g x))"
@@ -574,7 +574,7 @@
   have "f 0 = f 0 + 0" by simp
 end
 
-text {* \medskip Because assumptions may simplify each other, there
+text \<open>\medskip Because assumptions may simplify each other, there
   can be very subtle cases of nontermination. For example, the regular
   @{method simp} method applied to @{prop "P (f x) \<Longrightarrow> y = x \<Longrightarrow> f x = f y
   \<Longrightarrow> Q"} gives rise to the infinite reduction sequence
@@ -585,21 +585,21 @@
   \]
   whereas applying the same to @{prop "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow>
   Q"} terminates (without solving the goal):
-*}
+\<close>
 
 lemma "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow> Q"
   apply simp
   oops
 
-text {* See also \secref{sec:simp-trace} for options to enable
+text \<open>See also \secref{sec:simp-trace} for options to enable
   Simplifier trace mode, which often helps to diagnose problems with
   rewrite systems.
-*}
+\<close>
 
 
-subsection {* Declaring rules \label{sec:simp-rules} *}
+subsection \<open>Declaring rules \label{sec:simp-rules}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{attribute_def simp} & : & @{text attribute} \\
     @{attribute_def split} & : & @{text attribute} \\
@@ -762,12 +762,12 @@
   will contain the unwanted rules, and thus have to be deleted again
   in the theory body.
   \end{warn}
-*}
+\<close>
 
 
-subsection {* Ordered rewriting with permutative rules *}
+subsection \<open>Ordered rewriting with permutative rules\<close>
 
-text {* A rewrite rule is \emph{permutative} if the left-hand side and
+text \<open>A rewrite rule is \emph{permutative} if the left-hand side and
   right-hand side are the equal up to renaming of variables.  The most
   common permutative rule is commutativity: @{text "?x + ?y = ?y +
   ?x"}.  Other examples include @{text "(?x - ?y) - ?z = (?x - ?z) -
@@ -790,12 +790,12 @@
   \medskip Permutative rewrite rules are declared to the Simplifier
   just like other rewrite rules.  Their special status is recognized
   automatically, and their application is guarded by the term ordering
-  accordingly. *}
+  accordingly.\<close>
 
 
-subsubsection {* Rewriting with AC operators *}
+subsubsection \<open>Rewriting with AC operators\<close>
 
-text {* Ordered rewriting is particularly effective in the case of
+text \<open>Ordered rewriting is particularly effective in the case of
   associative-commutative operators.  (Associativity by itself is not
   permutative.)  When dealing with an AC-operator @{text "f"}, keep
   the following points in mind:
@@ -815,7 +815,7 @@
 
   Ordered rewriting with the combination of A, C, and LC sorts a term
   lexicographically --- the rewriting engine imitates bubble-sort.
-*}
+\<close>
 
 locale AC_example =
   fixes f :: "'a \<Rightarrow> 'a \<Rightarrow> 'a"  (infix "\<bullet>" 60)
@@ -831,13 +831,13 @@
 
 lemmas AC_rules = assoc commute left_commute
 
-text {* Thus the Simplifier is able to establish equalities with
+text \<open>Thus the Simplifier is able to establish equalities with
   arbitrary permutations of subterms, by normalizing to a common
-  standard form.  For example: *}
+  standard form.  For example:\<close>
 
 lemma "(b \<bullet> c) \<bullet> a = xxx"
   apply (simp only: AC_rules)
-  txt {* @{subgoals} *}
+  txt \<open>@{subgoals}\<close>
   oops
 
 lemma "(b \<bullet> c) \<bullet> a = a \<bullet> (b \<bullet> c)" by (simp only: AC_rules)
@@ -846,27 +846,27 @@
 
 end
 
-text {* Martin and Nipkow @{cite "martin-nipkow"} discuss the theory and
+text \<open>Martin and Nipkow @{cite "martin-nipkow"} discuss the theory and
   give many examples; other algebraic structures are amenable to
   ordered rewriting, such as Boolean rings.  The Boyer-Moore theorem
   prover @{cite bm88book} also employs ordered rewriting.
-*}
+\<close>
 
 
-subsubsection {* Re-orienting equalities *}
+subsubsection \<open>Re-orienting equalities\<close>
 
-text {* Another application of ordered rewriting uses the derived rule
+text \<open>Another application of ordered rewriting uses the derived rule
   @{thm [source] eq_commute}: @{thm [source = false] eq_commute} to
   reverse equations.
 
   This is occasionally useful to re-orient local assumptions according
   to the term ordering, when other built-in mechanisms of
-  reorientation and mutual simplification fail to apply.  *}
+  reorientation and mutual simplification fail to apply.\<close>
 
 
-subsection {* Simplifier tracing and debugging \label{sec:simp-trace} *}
+subsection \<open>Simplifier tracing and debugging \label{sec:simp-trace}\<close>
 
-text {*
+text \<open>
   \begin{tabular}{rcll}
     @{attribute_def simp_trace} & : & @{text attribute} & default @{text false} \\
     @{attribute_def simp_trace_depth_limit} & : & @{text attribute} & default @{text 1} \\
@@ -921,15 +921,15 @@
   rewrite step. For example:
 
   \end{description}
-*}
+\<close>
 
 declare conjI [simp_break]
 declare [[simp_break "?x \<and> ?y"]]
 
 
-subsection {* Simplification procedures \label{sec:simproc} *}
+subsection \<open>Simplification procedures \label{sec:simproc}\<close>
 
-text {* Simplification procedures are ML functions that produce proven
+text \<open>Simplification procedures are ML functions that produce proven
   rewrite rules on demand.  They are associated with higher-order
   patterns that approximate the left-hand sides of equations.  The
   Simplifier first matches the current redex against one of the LHS
@@ -985,43 +985,43 @@
   already adds the new simproc to the subsequent context.
 
   \end{description}
-*}
+\<close>
 
 
-subsubsection {* Example *}
+subsubsection \<open>Example\<close>
 
-text {* The following simplification procedure for @{thm
+text \<open>The following simplification procedure for @{thm
   [source=false, show_types] unit_eq} in HOL performs fine-grained
   control over rule application, beyond higher-order pattern matching.
   Declaring @{thm unit_eq} as @{attribute simp} directly would make
   the Simplifier loop!  Note that a version of this simplification
-  procedure is already active in Isabelle/HOL.  *}
+  procedure is already active in Isabelle/HOL.\<close>
 
-simproc_setup unit ("x::unit") = {*
+simproc_setup unit ("x::unit") = \<open>
   fn _ => fn _ => fn ct =>
     if HOLogic.is_unit (term_of ct) then NONE
     else SOME (mk_meta_eq @{thm unit_eq})
-*}
+\<close>
 
-text {* Since the Simplifier applies simplification procedures
+text \<open>Since the Simplifier applies simplification procedures
   frequently, it is important to make the failure check in ML
-  reasonably fast. *}
+  reasonably fast.\<close>
 
 
-subsection {* Configurable Simplifier strategies \label{sec:simp-strategies} *}
+subsection \<open>Configurable Simplifier strategies \label{sec:simp-strategies}\<close>
 
-text {* The core term-rewriting engine of the Simplifier is normally
+text \<open>The core term-rewriting engine of the Simplifier is normally
   used in combination with some add-on components that modify the
   strategy and allow to integrate other non-Simplifier proof tools.
   These may be reconfigured in ML as explained below.  Even if the
   default strategies of object-logics like Isabelle/HOL are used
   unchanged, it helps to understand how the standard Simplifier
-  strategies work. *}
+  strategies work.\<close>
 
 
-subsubsection {* The subgoaler *}
+subsubsection \<open>The subgoaler\<close>
 
-text {*
+text \<open>
   \begin{mldecls}
   @{index_ML Simplifier.set_subgoaler: "(Proof.context -> int -> tactic) ->
   Proof.context -> Proof.context"} \\
@@ -1049,23 +1049,23 @@
   \end{description}
 
   As an example, consider the following alternative subgoaler:
-*}
+\<close>
 
-ML {*
+ML \<open>
   fun subgoaler_tac ctxt =
     assume_tac ORELSE'
     resolve_tac (Simplifier.prems_of ctxt) ORELSE'
     asm_simp_tac ctxt
-*}
+\<close>
 
-text {* This tactic first tries to solve the subgoal by assumption or
+text \<open>This tactic first tries to solve the subgoal by assumption or
   by resolving with with one of the premises, calling simplification
-  only if that fails. *}
+  only if that fails.\<close>
 
 
-subsubsection {* The solver *}
+subsubsection \<open>The solver\<close>
 
-text {*
+text \<open>
   \begin{mldecls}
   @{index_ML_type solver} \\
   @{index_ML Simplifier.mk_solver: "string ->
@@ -1162,12 +1162,12 @@
   @{text "t = ?x"}.  Otherwise it indicates that some congruence rule,
   or possibly the subgoaler or solver, is faulty.
   \end{warn}
-*}
+\<close>
 
 
-subsubsection {* The looper *}
+subsubsection \<open>The looper\<close>
 
-text {*
+text \<open>
   \begin{mldecls}
   @{index_ML_op setloop: "Proof.context *
   (Proof.context -> int -> tactic) -> Proof.context"} \\
@@ -1246,12 +1246,12 @@
   in tactic expressions that silently assume 0 or 1 subgoals after
   simplification.
   \end{warn}
-*}
+\<close>
 
 
-subsection {* Forward simplification \label{sec:simp-forward} *}
+subsection \<open>Forward simplification \label{sec:simp-forward}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{attribute_def simplified} & : & @{text attribute} \\
   \end{matharray}
@@ -1279,14 +1279,14 @@
   under normal circumstances.
 
   \end{description}
-*}
+\<close>
 
 
-section {* The Classical Reasoner \label{sec:classical} *}
+section \<open>The Classical Reasoner \label{sec:classical}\<close>
 
-subsection {* Basic concepts *}
+subsection \<open>Basic concepts\<close>
 
-text {* Although Isabelle is generic, many users will be working in
+text \<open>Although Isabelle is generic, many users will be working in
   some extension of classical first-order logic.  Isabelle/ZF is built
   upon theory FOL, while Isabelle/HOL conceptually contains
   first-order logic as a fragment.  Theorem-proving in predicate logic
@@ -1300,7 +1300,7 @@
   with high-end automated theorem provers, but they can save
   considerable time and effort in practice.  They can prove theorems
   such as Pelletier's @{cite pelletier86} problems 40 and 41 in a few
-  milliseconds (including full proof reconstruction): *}
+  milliseconds (including full proof reconstruction):\<close>
 
 lemma "(\<exists>y. \<forall>x. F x y \<longleftrightarrow> F x x) \<longrightarrow> \<not> (\<forall>x. \<exists>y. \<forall>z. F z y \<longleftrightarrow> \<not> F z x)"
   by blast
@@ -1308,16 +1308,16 @@
 lemma "(\<forall>z. \<exists>y. \<forall>x. f x y \<longleftrightarrow> f x z \<and> \<not> f x x) \<longrightarrow> \<not> (\<exists>z. \<forall>x. f x z)"
   by blast
 
-text {* The proof tools are generic.  They are not restricted to
+text \<open>The proof tools are generic.  They are not restricted to
   first-order logic, and have been heavily used in the development of
   the Isabelle/HOL library and applications.  The tactics can be
   traced, and their components can be called directly; in this manner,
-  any proof can be viewed interactively.  *}
+  any proof can be viewed interactively.\<close>
 
 
-subsubsection {* The sequent calculus *}
+subsubsection \<open>The sequent calculus\<close>
 
-text {* Isabelle supports natural deduction, which is easy to use for
+text \<open>Isabelle supports natural deduction, which is easy to use for
   interactive proof.  But natural deduction does not easily lend
   itself to automation, and has a bias towards intuitionism.  For
   certain proofs in classical logic, it can not be called natural.
@@ -1371,12 +1371,12 @@
   manner.  This yields a surprisingly effective proof procedure.
   Quantifiers add only few complications, since Isabelle handles
   parameters and schematic variables.  See @{cite \<open>Chapter 10\<close>
-  "paulson-ml2"} for further discussion.  *}
+  "paulson-ml2"} for further discussion.\<close>
 
 
-subsubsection {* Simulating sequents by natural deduction *}
+subsubsection \<open>Simulating sequents by natural deduction\<close>
 
-text {* Isabelle can represent sequents directly, as in the
+text \<open>Isabelle can represent sequents directly, as in the
   object-logic LK.  But natural deduction is easier to work with, and
   most object-logics employ it.  Fortunately, we can simulate the
   sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} by the Isabelle formula
@@ -1417,12 +1417,12 @@
   regard for readability of intermediate goal states, we could treat
   the right side more uniformly by representing sequents as @{text
   [display] "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> \<bottom>"}
-*}
+\<close>
 
 
-subsubsection {* Extra rules for the sequent calculus *}
+subsubsection \<open>Extra rules for the sequent calculus\<close>
 
-text {* As mentioned, destruction rules such as @{text "(\<and>E1, 2)"} and
+text \<open>As mentioned, destruction rules such as @{text "(\<and>E1, 2)"} and
   @{text "(\<forall>E)"} must be replaced by sequent-style elimination rules.
   In addition, we need rules to embody the classical equivalence
   between @{text "P \<longrightarrow> Q"} and @{text "\<not> P \<or> Q"}.  The introduction
@@ -1472,12 +1472,12 @@
   Depth-first search may well go down a blind alley; best-first search
   is better behaved in an infinite search space.  However, quantifier
   replication is too expensive to prove any but the simplest theorems.
-*}
+\<close>
 
 
-subsection {* Rule declarations *}
+subsection \<open>Rule declarations\<close>
 
-text {* The proof tools of the Classical Reasoner depend on
+text \<open>The proof tools of the Classical Reasoner depend on
   collections of rules declared in the context, which are classified
   as introduction, elimination or destruction and as \emph{safe} or
   \emph{unsafe}.  In general, safe rules can be attempted blindly,
@@ -1578,12 +1578,12 @@
   internally as explained above.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Structured methods *}
+subsection \<open>Structured methods\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{method_def rule} & : & @{text method} \\
     @{method_def contradiction} & : & @{text method} \\
@@ -1611,12 +1611,12 @@
   order.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Fully automated methods *}
+subsection \<open>Fully automated methods\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{method_def blast} & : & @{text method} \\
     @{method_def auto} & : & @{text method} \\
@@ -1746,12 +1746,12 @@
   semantics of these ad-hoc rule declarations is analogous to the
   attributes given before.  Facts provided by forward chaining are
   inserted into the goal before commencing proof search.
-*}
+\<close>
 
 
-subsection {* Partially automated methods *}
+subsection \<open>Partially automated methods\<close>
 
-text {* These proof methods may help in situations when the
+text \<open>These proof methods may help in situations when the
   fully-automated tools fail.  The result is a simpler subgoal that
   can be tackled by other means, such as by manual instantiation of
   quantifiers.
@@ -1781,12 +1781,12 @@
   splitter for the premises, the subgoal may still be split.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Single-step tactics *}
+subsection \<open>Single-step tactics\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{method_def safe_step} & : & @{text method} \\
     @{method_def inst_step} & : & @{text method} \\
@@ -1827,12 +1827,12 @@
   be eliminated.  The safe wrapper tactical is applied.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Modifying the search step *}
+subsection \<open>Modifying the search step\<close>
 
-text {*
+text \<open>
   \begin{mldecls}
     @{index_ML_type wrapper: "(int -> tactic) -> (int -> tactic)"} \\[0.5ex]
     @{index_ML_op addSWrapper: "Proof.context *
@@ -1919,12 +1919,12 @@
   the each unsafe step of the search.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Object-logic setup \label{sec:object-logic} *}
+section \<open>Object-logic setup \label{sec:object-logic}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "judgment"} & : & @{text "theory \<rightarrow> theory"} \\
     @{method_def atomize} & : & @{text method} \\
@@ -1999,12 +1999,12 @@
   rule statements over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Tracing higher-order unification *}
+section \<open>Tracing higher-order unification\<close>
 
-text {*
+text \<open>
   \begin{tabular}{rcll}
     @{attribute_def unify_trace_simp} & : & @{text "attribute"} & default @{text "false"} \\
     @{attribute_def unify_trace_types} & : & @{text "attribute"} & default @{text "false"} \\
@@ -2044,6 +2044,6 @@
   Options for unification cannot be modified in a local context.  Only
   the global theory content is taken into account.
   \end{warn}
-*}
+\<close>
 
 end
--- a/src/Doc/Isar_Ref/HOL_Specific.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/HOL_Specific.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -3,9 +3,9 @@
   "~~/src/Tools/Adhoc_Overloading"
 begin
 
-chapter {* Higher-Order Logic *}
-
-text {* Isabelle/HOL is based on Higher-Order Logic, a polymorphic
+chapter \<open>Higher-Order Logic\<close>
+
+text \<open>Isabelle/HOL is based on Higher-Order Logic, a polymorphic
   version of Church's Simple Theory of Types.  HOL can be best
   understood as a simply-typed version of classical set theory.  The
   logic was first implemented in Gordon's HOL system
@@ -54,14 +54,14 @@
   type-inference with type-classes, which are both used extensively in
   the standard libraries and applications.  Beginners can set
   @{attribute show_types} or even @{attribute show_sorts} to get more
-  explicit information about the result of type-inference.  *}
-
-
-chapter {* Derived specification elements *}
-
-section {* Inductive and coinductive definitions \label{sec:hol-inductive} *}
-
-text {*
+  explicit information about the result of type-inference.\<close>
+
+
+chapter \<open>Derived specification elements\<close>
+
+section \<open>Inductive and coinductive definitions \label{sec:hol-inductive}\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{command_def (HOL) "inductive"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
     @{command_def (HOL) "inductive_set"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@@ -145,12 +145,12 @@
   proof of the above inductive and coinductive definitions.
 
   \end{description}
-*}
-
-
-subsection {* Derived rules *}
-
-text {* A (co)inductive definition of @{text R} provides the following
+\<close>
+
+
+subsection \<open>Derived rules\<close>
+
+text \<open>A (co)inductive definition of @{text R} provides the following
   main theorems:
 
   \begin{description}
@@ -176,12 +176,12 @@
   called @{text "R\<^sub>1.cases, \<dots>, R\<^sub>n.cases"}, and the list
   of mutual induction rules is called @{text
   "R\<^sub>1_\<dots>_R\<^sub>n.inducts"}.
-*}
-
-
-subsection {* Monotonicity theorems *}
-
-text {* The context maintains a default set of theorems that are used
+\<close>
+
+
+subsection \<open>Monotonicity theorems\<close>
+
+text \<open>The context maintains a default set of theorems that are used
   in monotonicity proofs.  New rules can be declared via the
   @{attribute (HOL) mono} attribute.  See the main Isabelle/HOL
   sources for some examples.  The general format of such monotonicity
@@ -215,26 +215,26 @@
   \]
 
   \end{itemize}
-*}
-
-subsubsection {* Examples *}
-
-text {* The finite powerset operator can be defined inductively like this: *}
+\<close>
+
+subsubsection \<open>Examples\<close>
+
+text \<open>The finite powerset operator can be defined inductively like this:\<close>
 
 inductive_set Fin :: "'a set \<Rightarrow> 'a set set" for A :: "'a set"
 where
   empty: "{} \<in> Fin A"
 | insert: "a \<in> A \<Longrightarrow> B \<in> Fin A \<Longrightarrow> insert a B \<in> Fin A"
 
-text {* The accessible part of a relation is defined as follows: *}
+text \<open>The accessible part of a relation is defined as follows:\<close>
 
 inductive acc :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> bool"
   for r :: "'a \<Rightarrow> 'a \<Rightarrow> bool"  (infix "\<prec>" 50)
 where acc: "(\<And>y. y \<prec> x \<Longrightarrow> acc r y) \<Longrightarrow> acc r x"
 
-text {* Common logical connectives can be easily characterized as
+text \<open>Common logical connectives can be easily characterized as
 non-recursive inductive definitions with parameters, but without
-arguments. *}
+arguments.\<close>
 
 inductive AND for A B :: bool
 where "A \<Longrightarrow> B \<Longrightarrow> AND A B"
@@ -246,17 +246,17 @@
 inductive EXISTS for B :: "'a \<Rightarrow> bool"
 where "B a \<Longrightarrow> EXISTS B"
 
-text {* Here the @{text "cases"} or @{text "induct"} rules produced by
+text \<open>Here the @{text "cases"} or @{text "induct"} rules produced by
   the @{command inductive} package coincide with the expected
   elimination rules for Natural Deduction.  Already in the original
   article by Gerhard Gentzen @{cite "Gentzen:1935"} there is a hint that
   each connective can be characterized by its introductions, and the
-  elimination can be constructed systematically. *}
-
-
-section {* Recursive functions \label{sec:recursion} *}
-
-text {*
+  elimination can be constructed systematically.\<close>
+
+
+section \<open>Recursive functions \label{sec:recursion}\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{command_def (HOL) "primrec"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
     @{command_def (HOL) "fun"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@@ -371,13 +371,13 @@
   needed, they can be helpful in some proofs about partial functions.
 
   \end{description}
-*}
-
-subsubsection {* Example: evaluation of expressions *}
-
-text {* Subsequently, we define mutual datatypes for arithmetic and
+\<close>
+
+subsubsection \<open>Example: evaluation of expressions\<close>
+
+text \<open>Subsequently, we define mutual datatypes for arithmetic and
   boolean expressions, and use @{command primrec} for evaluation
-  functions that follow the same recursive structure. *}
+  functions that follow the same recursive structure.\<close>
 
 datatype 'a aexp =
     IF "'a bexp"  "'a aexp"  "'a aexp"
@@ -391,7 +391,7 @@
   | Neg "'a bexp"
 
 
-text {* \medskip Evaluation of arithmetic and boolean expressions *}
+text \<open>\medskip Evaluation of arithmetic and boolean expressions\<close>
 
 primrec evala :: "('a \<Rightarrow> nat) \<Rightarrow> 'a aexp \<Rightarrow> nat"
   and evalb :: "('a \<Rightarrow> nat) \<Rightarrow> 'a bexp \<Rightarrow> bool"
@@ -405,7 +405,7 @@
 | "evalb env (And b1 b2) = (evalb env b1 \<and> evalb env b2)"
 | "evalb env (Neg b) = (\<not> evalb env b)"
 
-text {* Since the value of an expression depends on the value of its
+text \<open>Since the value of an expression depends on the value of its
   variables, the functions @{const evala} and @{const evalb} take an
   additional parameter, an \emph{environment} that maps variables to
   their values.
@@ -414,7 +414,7 @@
   mapping @{text f} of type @{typ "'a \<Rightarrow> 'a aexp"} given as a
   parameter is lifted canonically on the types @{typ "'a aexp"} and
   @{typ "'a bexp"}, respectively.
-*}
+\<close>
 
 primrec substa :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a aexp \<Rightarrow> 'b aexp"
   and substb :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a bexp \<Rightarrow> 'b bexp"
@@ -428,11 +428,11 @@
 | "substb f (And b1 b2) = And (substb f b1) (substb f b2)"
 | "substb f (Neg b) = Neg (substb f b)"
 
-text {* In textbooks about semantics one often finds substitution
+text \<open>In textbooks about semantics one often finds substitution
   theorems, which express the relationship between substitution and
   evaluation.  For @{typ "'a aexp"} and @{typ "'a bexp"}, we can prove
   such a theorem by mutual induction, followed by simplification.
-*}
+\<close>
 
 lemma subst_one:
   "evala env (substa (Var (v := a')) a) = evala (env (v := evala env a')) a"
@@ -445,16 +445,16 @@
   by (induct a and b) simp_all
 
 
-subsubsection {* Example: a substitution function for terms *}
-
-text {* Functions on datatypes with nested recursion are also defined
-  by mutual primitive recursion. *}
+subsubsection \<open>Example: a substitution function for terms\<close>
+
+text \<open>Functions on datatypes with nested recursion are also defined
+  by mutual primitive recursion.\<close>
 
 datatype ('a, 'b) "term" = Var 'a | App 'b "('a, 'b) term list"
 
-text {* A substitution function on type @{typ "('a, 'b) term"} can be
+text \<open>A substitution function on type @{typ "('a, 'b) term"} can be
   defined as follows, by working simultaneously on @{typ "('a, 'b)
-  term list"}: *}
+  term list"}:\<close>
 
 primrec subst_term :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term \<Rightarrow> ('a, 'b) term" and
   subst_term_list :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term list \<Rightarrow> ('a, 'b) term list"
@@ -464,21 +464,21 @@
 | "subst_term_list f [] = []"
 | "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
 
-text {* The recursion scheme follows the structure of the unfolded
+text \<open>The recursion scheme follows the structure of the unfolded
   definition of type @{typ "('a, 'b) term"}.  To prove properties of this
   substitution function, mutual induction is needed:
-*}
+\<close>
 
 lemma "subst_term (subst_term f1 \<circ> f2) t = subst_term f1 (subst_term f2 t)" and
   "subst_term_list (subst_term f1 \<circ> f2) ts = subst_term_list f1 (subst_term_list f2 ts)"
   by (induct t and ts rule: subst_term.induct subst_term_list.induct) simp_all
 
 
-subsubsection {* Example: a map function for infinitely branching trees *}
-
-text {* Defining functions on infinitely branching datatypes by
+subsubsection \<open>Example: a map function for infinitely branching trees\<close>
+
+text \<open>Defining functions on infinitely branching datatypes by
   primitive recursion is just as easy.
-*}
+\<close>
 
 datatype 'a tree = Atom 'a | Branch "nat \<Rightarrow> 'a tree"
 
@@ -487,19 +487,19 @@
   "map_tree f (Atom a) = Atom (f a)"
 | "map_tree f (Branch ts) = Branch (\<lambda>x. map_tree f (ts x))"
 
-text {* Note that all occurrences of functions such as @{text ts}
+text \<open>Note that all occurrences of functions such as @{text ts}
   above must be applied to an argument.  In particular, @{term
-  "map_tree f \<circ> ts"} is not allowed here. *}
-
-text {* Here is a simple composition lemma for @{term map_tree}: *}
+  "map_tree f \<circ> ts"} is not allowed here.\<close>
+
+text \<open>Here is a simple composition lemma for @{term map_tree}:\<close>
 
 lemma "map_tree g (map_tree f t) = map_tree (g \<circ> f) t"
   by (induct t) simp_all
 
 
-subsection {* Proof methods related to recursive definitions *}
-
-text {*
+subsection \<open>Proof methods related to recursive definitions\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{method_def (HOL) pat_completeness} & : & @{text method} \\
     @{method_def (HOL) relation} & : & @{text method} \\
@@ -563,12 +563,12 @@
   @{file "~~/src/HOL/ex/Induction_Schema.thy"} for examples.
 
   \end{description}
-*}
-
-
-subsection {* Functions with explicit partiality *}
-
-text {*
+\<close>
+
+
+subsection \<open>Functions with explicit partiality\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{command_def (HOL) "partial_function"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
     @{attribute_def (HOL) "partial_function_mono"} & : & @{text attribute} \\
@@ -634,12 +634,12 @@
 
   \end{description}
 
-*}
-
-
-subsection {* Old-style recursive function definitions (TFL) *}
-
-text {*
+\<close>
+
+
+subsection \<open>Old-style recursive function definitions (TFL)\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{command_def (HOL) "recdef"} & : & @{text "theory \<rightarrow> theory)"} \\
     @{command_def (HOL) "recdef_tc"}@{text "\<^sup>*"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
@@ -699,12 +699,12 @@
     (@@{attribute (HOL) recdef_simp} | @@{attribute (HOL) recdef_cong} |
       @@{attribute (HOL) recdef_wf}) (() | 'add' | 'del')
   \<close>}
-*}
-
-
-section {* Old-style datatypes \label{sec:hol-datatype} *}
-
-text {*
+\<close>
+
+
+section \<open>Old-style datatypes \label{sec:hol-datatype}\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{command_def (HOL) "old_datatype"} & : & @{text "theory \<rightarrow> theory"} \\
     @{command_def (HOL) "old_rep_datatype"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
@@ -741,43 +741,43 @@
   induct_tac} available, see \secref{sec:hol-induct-tac}; these admit
   to refer directly to the internal structure of subgoals (including
   internally bound parameters).
-*}
-
-
-subsubsection {* Examples *}
-
-text {* We define a type of finite sequences, with slightly different
+\<close>
+
+
+subsubsection \<open>Examples\<close>
+
+text \<open>We define a type of finite sequences, with slightly different
   names than the existing @{typ "'a list"} that is already in @{theory
-  Main}: *}
+  Main}:\<close>
 
 datatype 'a seq = Empty | Seq 'a "'a seq"
 
-text {* We can now prove some simple lemma by structural induction: *}
+text \<open>We can now prove some simple lemma by structural induction:\<close>
 
 lemma "Seq x xs \<noteq> xs"
 proof (induct xs arbitrary: x)
   case Empty
-  txt {* This case can be proved using the simplifier: the freeness
+  txt \<open>This case can be proved using the simplifier: the freeness
     properties of the datatype are already declared as @{attribute
-    simp} rules. *}
+    simp} rules.\<close>
   show "Seq x Empty \<noteq> Empty"
     by simp
 next
   case (Seq y ys)
-  txt {* The step case is proved similarly. *}
+  txt \<open>The step case is proved similarly.\<close>
   show "Seq x (Seq y ys) \<noteq> Seq y ys"
-    using `Seq y ys \<noteq> ys` by simp
+    using \<open>Seq y ys \<noteq> ys\<close> by simp
 qed
 
-text {* Here is a more succinct version of the same proof: *}
+text \<open>Here is a more succinct version of the same proof:\<close>
 
 lemma "Seq x xs \<noteq> xs"
   by (induct xs arbitrary: x) simp_all
 
 
-section {* Records \label{sec:hol-record} *}
-
-text {*
+section \<open>Records \label{sec:hol-record}\<close>
+
+text \<open>
   In principle, records merely generalize the concept of tuples, where
   components may be addressed by labels instead of just position.  The
   logical infrastructure of records in Isabelle/HOL is slightly more
@@ -786,12 +786,12 @@
   extension, yielding ``object-oriented'' effects like (single)
   inheritance.  See also @{cite "NaraschewskiW-TPHOLs98"} for more
   details on object-oriented verification and record subtyping in HOL.
-*}
-
-
-subsection {* Basic concepts *}
-
-text {*
+\<close>
+
+
+subsection \<open>Basic concepts\<close>
+
+text \<open>
   Isabelle/HOL supports both \emph{fixed} and \emph{schematic} records
   at the level of terms and types.  The notation is as follows:
 
@@ -849,12 +849,12 @@
   tools enable succinct reasoning patterns.  See also the Isabelle/HOL
   tutorial @{cite "isabelle-hol-book"} for further instructions on using
   records in practice.
-*}
-
-
-subsection {* Record specifications *}
-
-text {*
+\<close>
+
+
+subsection \<open>Record specifications\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{command_def (HOL) "record"} & : & @{text "theory \<rightarrow> theory"} \\
   \end{matharray}
@@ -896,12 +896,12 @@
   \<zeta>\<rparr>"}.
 
   \end{description}
-*}
-
-
-subsection {* Record operations *}
-
-text {*
+\<close>
+
+
+subsection \<open>Record operations\<close>
+
+text \<open>
   Any record definition of the form presented above produces certain
   standard operations.  Selectors and updates are provided for any
   field, including the improper one ``@{text more}''.  There are also
@@ -975,12 +975,12 @@
 
   \noindent Note that @{text "t.make"} and @{text "t.fields"} coincide
   for root records.
-*}
-
-
-subsection {* Derived rules and proof tools *}
-
-text {*
+\<close>
+
+
+subsection \<open>Derived rules and proof tools\<close>
+
+text \<open>
   The record package proves several results internally, declaring
   these facts to appropriate proof tools.  This enables users to
   reason about record structures quite conveniently.  Assume that
@@ -1025,16 +1025,16 @@
   using the collective fact @{text "t.defs"}.
 
   \end{enumerate}
-*}
-
-
-subsubsection {* Examples *}
-
-text {* See @{file "~~/src/HOL/ex/Records.thy"}, for example. *}
-
-section {* Typedef axiomatization \label{sec:hol-typedef} *}
-
-text {*
+\<close>
+
+
+subsubsection \<open>Examples\<close>
+
+text \<open>See @{file "~~/src/HOL/ex/Records.thy"}, for example.\<close>
+
+section \<open>Typedef axiomatization \label{sec:hol-typedef}\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{command_def (HOL) "typedef"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
   \end{matharray}
@@ -1118,11 +1118,11 @@
   respectively.
 
   \end{description}
-*}
-
-subsubsection {* Examples *}
-
-text {* Type definitions permit the introduction of abstract data
+\<close>
+
+subsubsection \<open>Examples\<close>
+
+text \<open>Type definitions permit the introduction of abstract data
   types in a safe way, namely by providing models based on already
   existing types.  Given some abstract axiomatic description @{text P}
   of a type, this involves two steps:
@@ -1142,7 +1142,7 @@
   terms of the abstract properties @{text P}.
 
   \medskip The following trivial example pulls a three-element type
-  into existence within the formal logical environment of HOL. *}
+  into existence within the formal logical environment of HOL.\<close>
 
 typedef three = "{(True, True), (True, False), (False, True)}"
   by blast
@@ -1158,19 +1158,19 @@
   fixes x :: three obtains "x = One" | "x = Two" | "x = Three"
   by (cases x) (auto simp: One_def Two_def Three_def Abs_three_inject)
 
-text {* Note that such trivial constructions are better done with
-  derived specification mechanisms such as @{command datatype}: *}
+text \<open>Note that such trivial constructions are better done with
+  derived specification mechanisms such as @{command datatype}:\<close>
 
 datatype three' = One' | Two' | Three'
 
-text {* This avoids re-doing basic definitions and proofs from the
-  primitive @{command typedef} above. *}
-
-
-
-section {* Functorial structure of types *}
-
-text {*
+text \<open>This avoids re-doing basic definitions and proofs from the
+  primitive @{command typedef} above.\<close>
+
+
+
+section \<open>Functorial structure of types\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{command_def (HOL) "functor"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
   \end{matharray}
@@ -1207,12 +1207,12 @@
   \<alpha>\<^sub>n"}.
 
   \end{description}
-*}
-
-
-section {* Quotient types *}
-
-text {*
+\<close>
+
+
+section \<open>Quotient types\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{command_def (HOL) "quotient_type"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
     @{command_def (HOL) "quotient_definition"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
@@ -1361,12 +1361,12 @@
     theorems.
 
   \end{description}
-*}
-
-
-section {* Definition by specification \label{sec:hol-specification} *}
-
-text {*
+\<close>
+
+
+section \<open>Definition by specification \label{sec:hol-specification}\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{command_def (HOL) "specification"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
   \end{matharray}
@@ -1393,12 +1393,12 @@
   the declaration.  Overloaded constants should be declared as such.
 
   \end{description}
-*}
-
-
-section {* Adhoc overloading of constants *}
-
-text {*
+\<close>
+
+
+section \<open>Adhoc overloading of constants\<close>
+
+text \<open>
   \begin{tabular}{rcll}
   @{command_def "adhoc_overloading"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
   @{command_def "no_adhoc_overloading"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@@ -1435,14 +1435,14 @@
   user's expectations about derived variants.
 
   \end{description}
-*}
-
-
-chapter {* Proof tools *}
-
-section {* Adhoc tuples *}
-
-text {*
+\<close>
+
+
+chapter \<open>Proof tools\<close>
+
+section \<open>Adhoc tuples\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{attribute_def (HOL) split_format}@{text "\<^sup>*"} & : & @{text attribute} \\
   \end{matharray}
@@ -1461,12 +1461,12 @@
   parameters introduced.
 
   \end{description}
-*}
-
-
-section {* Transfer package *}
-
-text {*
+\<close>
+
+
+section \<open>Transfer package\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{method_def (HOL) "transfer"} & : & @{text method} \\
     @{method_def (HOL) "transfer'"} & : & @{text method} \\
@@ -1558,12 +1558,12 @@
   \end{description}
 
   Theoretical background can be found in @{cite "Huffman-Kuncar:2013:lifting_transfer"}.
-*}
-
-
-section {* Lifting package *}
-
-text {*
+\<close>
+
+
+section \<open>Lifting package\<close>
+
+text \<open>
   The Lifting package allows users to lift terms of the raw type to the abstract type, which is 
   a necessary step in building a library for an abstract type. Lifting defines a new constant 
   by combining coercion functions (Abs and Rep) with the raw term. It also proves an appropriate 
@@ -1781,12 +1781,12 @@
     simplification rules for a predicator are again proved automatically.
   
   \end{description}
-*}
-
-
-section {* Coercive subtyping *}
-
-text {*
+\<close>
+
+
+section \<open>Coercive subtyping\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{attribute_def (HOL) coercion} & : & @{text attribute} \\
     @{attribute_def (HOL) coercion_enabled} & : & @{text attribute} \\
@@ -1831,12 +1831,12 @@
   inference algorithm.
 
   \end{description}
-*}
-
-
-section {* Arithmetic proof support *}
-
-text {*
+\<close>
+
+
+section \<open>Arithmetic proof support\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{method_def (HOL) arith} & : & @{text method} \\
     @{attribute_def (HOL) arith} & : & @{text attribute} \\
@@ -1859,12 +1859,12 @@
 
   Note that a simpler (but faster) arithmetic prover is already
   invoked by the Simplifier.
-*}
-
-
-section {* Intuitionistic proof search *}
-
-text {*
+\<close>
+
+
+section \<open>Intuitionistic proof search\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{method_def (HOL) iprover} & : & @{text method} \\
   \end{matharray}
@@ -1890,12 +1890,12 @@
   number of rule premises will be taken into account here.
 
   \end{description}
-*}
-
-
-section {* Model Elimination and Resolution *}
-
-text {*
+\<close>
+
+
+section \<open>Model Elimination and Resolution\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{method_def (HOL) "meson"} & : & @{text method} \\
     @{method_def (HOL) "metis"} & : & @{text method} \\
@@ -1923,12 +1923,12 @@
   theories developed to a large extent using @{method (HOL) metis}.
 
   \end{description}
-*}
-
-
-section {* Algebraic reasoning via Gr\"obner bases *}
-
-text {*
+\<close>
+
+
+section \<open>Algebraic reasoning via Gr\"obner bases\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{method_def (HOL) "algebra"} & : & @{text method} \\
     @{attribute_def (HOL) algebra} & : & @{text attribute} \\
@@ -1985,13 +1985,13 @@
   collection of pre-simplification rules of the above proof method.
 
   \end{description}
-*}
-
-
-subsubsection {* Example *}
-
-text {* The subsequent example is from geometry: collinearity is
-  invariant by rotation.  *}
+\<close>
+
+
+subsubsection \<open>Example\<close>
+
+text \<open>The subsequent example is from geometry: collinearity is
+  invariant by rotation.\<close>
 
 type_synonym point = "int \<times> int"
 
@@ -2005,14 +2005,14 @@
     (Bx * c - By * s, By * c + Bx * s) (Cx * c - Cy * s, Cy * c + Cx * s)"
   using assms by (algebra add: collinear.simps)
 
-text {*
+text \<open>
  See also @{file "~~/src/HOL/ex/Groebner_Examples.thy"}.
-*}
-
-
-section {* Coherent Logic *}
-
-text {*
+\<close>
+
+
+section \<open>Coherent Logic\<close>
+
+text \<open>
   \begin{matharray}{rcl}
     @{method_def (HOL) "coherent"} & : & @{text method} \\
   \end{matharray}
@@ -2029,12 +2029,12 @@
   @{file "~~/src/HOL/ex/Coherent.thy"} for some examples.
 
   \end{description}
-*}
-
-
-section {* Proving propositions *}
-
-text {*
+\<close>
+
+
+section \<open>Proving propositions\<close>
+
+text \<open>
   In addition to the standard proof methods, a number of diagnosis
   tools search for proofs and provide an Isar proof snippet on success.
   These tools are available via the following commands.
@@ -2092,12 +2092,12 @@
   "sledgehammer"} configuration options persistently.
 
   \end{description}
-*}
-
-
-section {* Checking and refuting propositions *}
-
-text {*
+\<close>
+
+
+section \<open>Checking and refuting propositions\<close>
+
+text \<open>
   Identifying incorrect propositions usually involves evaluation of
   particular assignments and systematic counterexample search.  This
   is supported by the following commands.
@@ -2321,12 +2321,12 @@
   common configuration declarations.
 
   \end{description}
-*}
-
-
-section {* Unstructured case analysis and induction \label{sec:hol-induct-tac} *}
-
-text {*
+\<close>
+
+
+section \<open>Unstructured case analysis and induction \label{sec:hol-induct-tac}\<close>
+
+text \<open>
   The following tools of Isabelle/HOL support cases analysis and
   induction in unstructured tactic scripts; see also
   \secref{sec:cases-induct} for proper Isar versions of similar ideas.
@@ -2379,12 +2379,12 @@
   be generalized before applying the resulting rule.
 
   \end{description}
-*}
-
-
-chapter {* Executable code *}
-
-text {* For validation purposes, it is often useful to \emph{execute}
+\<close>
+
+
+chapter \<open>Executable code\<close>
+
+text \<open>For validation purposes, it is often useful to \emph{execute}
   specifications.  In principle, execution could be simulated by
   Isabelle's inference kernel, i.e. by a combination of resolution and
   simplification.  Unfortunately, this approach is rather inefficient.
@@ -2665,6 +2665,6 @@
     prove a corresponding elimination rule.
 
   \end{description}
-*}
+\<close>
 
 end
--- a/src/Doc/Isar_Ref/Inner_Syntax.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/Inner_Syntax.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base Main
 begin
 
-chapter {* Inner syntax --- the term language \label{ch:inner-syntax} *}
+chapter \<open>Inner syntax --- the term language \label{ch:inner-syntax}\<close>
 
-text {* The inner syntax of Isabelle provides concrete notation for
+text \<open>The inner syntax of Isabelle provides concrete notation for
   the main entities of the logical framework, notably @{text
   "\<lambda>"}-terms with types and type classes.  Applications may either
   extend existing syntactic categories by additional notation, or
@@ -25,14 +25,14 @@
   distinction of lexical language versus context-free grammar (see
   \secref{sec:pure-syntax}), and various mechanisms for \emph{syntax
   transformations} (see \secref{sec:syntax-transformations}).
-*}
+\<close>
 
 
-section {* Printing logical entities *}
+section \<open>Printing logical entities\<close>
 
-subsection {* Diagnostic commands \label{sec:print-diag} *}
+subsection \<open>Diagnostic commands \label{sec:print-diag}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "typ"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
     @{command_def "term"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
@@ -115,12 +115,12 @@
   Note that antiquotations (cf.\ \secref{sec:antiq}) provide a more
   systematic way to include formal items into the printed text
   document.
-*}
+\<close>
 
 
-subsection {* Details of printed content *}
+subsection \<open>Details of printed content\<close>
 
-text {*
+text \<open>
   \begin{tabular}{rcll}
     @{attribute_def show_markup} & : & @{text attribute} \\
     @{attribute_def show_types} & : & @{text attribute} & default @{text false} \\
@@ -241,12 +241,12 @@
   relevant for user interfaces).
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Alternative print modes \label{sec:print-modes} *}
+subsection \<open>Alternative print modes \label{sec:print-modes}\<close>
 
-text {*
+text \<open>
   \begin{mldecls}
     @{index_ML print_mode_value: "unit -> string list"} \\
     @{index_ML Print_Mode.with_modes: "string list -> ('a -> 'b) -> 'a -> 'b"} \\
@@ -312,12 +312,12 @@
   alternative output notation.
 
   \end{itemize}
-*}
+\<close>
 
 
-section {* Mixfix annotations \label{sec:mixfix} *}
+section \<open>Mixfix annotations \label{sec:mixfix}\<close>
 
-text {* Mixfix annotations specify concrete \emph{inner syntax} of
+text \<open>Mixfix annotations specify concrete \emph{inner syntax} of
   Isabelle types and terms.  Locally fixed parameters in toplevel
   theorem statements, locale and class specifications also admit
   mixfix annotations in a fairly uniform manner.  A mixfix annotation
@@ -352,12 +352,12 @@
   Infix and binder declarations provide common abbreviations for
   particular mixfix declarations.  So in practice, mixfix templates
   mostly degenerate to literal text for concrete syntax, such as
-  ``@{verbatim "++"}'' for an infix symbol.  *}
+  ``@{verbatim "++"}'' for an infix symbol.\<close>
 
 
-subsection {* The general mixfix form *}
+subsection \<open>The general mixfix form\<close>
 
-text {* In full generality, mixfix declarations work as follows.
+text \<open>In full generality, mixfix declarations work as follows.
   Suppose a constant @{text "c :: \<tau>\<^sub>1 \<Rightarrow> \<dots> \<tau>\<^sub>n \<Rightarrow> \<tau>"} is annotated by
   @{text "(mixfix [p\<^sub>1, \<dots>, p\<^sub>n] p)"}, where @{text "mixfix"} is a string
   @{text "d\<^sub>0 _ d\<^sub>1 _ \<dots> _ d\<^sub>n"} consisting of delimiters that surround
@@ -436,12 +436,12 @@
 
   The general idea of pretty printing with blocks and breaks is also
   described in @{cite "paulson-ml2"}; it goes back to @{cite "Oppen:1980"}.
-*}
+\<close>
 
 
-subsection {* Infixes *}
+subsection \<open>Infixes\<close>
 
-text {* Infix operators are specified by convenient short forms that
+text \<open>Infix operators are specified by convenient short forms that
   abbreviate general mixfix annotations as follows:
 
   \begin{center}
@@ -468,12 +468,12 @@
   The alternative notation @{verbatim "op"}~@{text sy} is introduced
   in addition.  Thus any infix operator may be written in prefix form
   (as in ML), independently of the number of arguments in the term.
-*}
+\<close>
 
 
-subsection {* Binders *}
+subsection \<open>Binders\<close>
 
-text {* A \emph{binder} is a variable-binding construct such as a
+text \<open>A \emph{binder} is a variable-binding construct such as a
   quantifier.  The idea to formalize @{text "\<forall>x. b"} as @{text "All
   (\<lambda>x. b)"} for @{text "All :: ('a \<Rightarrow> bool) \<Rightarrow> bool"} already goes back
   to @{cite church40}.  Isabelle declarations of certain higher-order
@@ -509,12 +509,12 @@
 
   Furthermore, a syntax translation to transforms @{text "c_binder x\<^sub>1
   \<dots> x\<^sub>n b"} into iterated application @{text "c (\<lambda>x\<^sub>1. \<dots> c (\<lambda>x\<^sub>n. b)\<dots>)"}.
-  This works in both directions, for parsing and printing.  *}
+  This works in both directions, for parsing and printing.\<close>
 
 
-section {* Explicit notation \label{sec:notation} *}
+section \<open>Explicit notation \label{sec:notation}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcll}
     @{command_def "type_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
     @{command_def "no_type_notation"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@@ -562,14 +562,14 @@
   works within an Isar proof body.
 
   \end{description}
-*}
+\<close>
 
 
-section {* The Pure syntax \label{sec:pure-syntax} *}
+section \<open>The Pure syntax \label{sec:pure-syntax}\<close>
 
-subsection {* Lexical matters \label{sec:inner-lex} *}
+subsection \<open>Lexical matters \label{sec:inner-lex}\<close>
 
-text {* The inner lexical syntax vaguely resembles the outer one
+text \<open>The inner lexical syntax vaguely resembles the outer one
   (\secref{sec:outer-lex}), but some details are different.  There are
   two main categories of inner syntax tokens:
 
@@ -617,12 +617,12 @@
   The derived categories @{syntax_def (inner) num_const}, and @{syntax_def
   (inner) float_const}, provide robust access to the respective tokens: the
   syntax tree holds a syntactic constant instead of a free variable.
-*}
+\<close>
 
 
-subsection {* Priority grammars \label{sec:priority-grammar} *}
+subsection \<open>Priority grammars \label{sec:priority-grammar}\<close>
 
-text {* A context-free grammar consists of a set of \emph{terminal
+text \<open>A context-free grammar consists of a set of \emph{terminal
   symbols}, a set of \emph{nonterminal symbols} and a set of
   \emph{productions}.  Productions have the form @{text "A = \<gamma>"},
   where @{text A} is a nonterminal and @{text \<gamma>} is a string of
@@ -692,12 +692,12 @@
               & @{text "|"} & @{verbatim "-"} @{text "A\<^sup>(\<^sup>3\<^sup>)"} & @{text "(3)"} \\
   \end{tabular}
   \end{center}
-*}
+\<close>
 
 
-subsection {* The Pure grammar \label{sec:pure-grammar} *}
+subsection \<open>The Pure grammar \label{sec:pure-grammar}\<close>
 
-text {* The priority grammar of the @{text "Pure"} theory is defined
+text \<open>The priority grammar of the @{text "Pure"} theory is defined
   approximately like this:
 
   \begin{center}
@@ -896,12 +896,12 @@
   translation rules (\secref{sec:syn-trans}), notably on the LHS.
 
   \end{itemize}
-*}
+\<close>
 
 
-subsection {* Inspecting the syntax *}
+subsection \<open>Inspecting the syntax\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "print_syntax"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
   \end{matharray}
@@ -956,12 +956,12 @@
   \end{description}
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Ambiguity of parsed expressions *}
+subsection \<open>Ambiguity of parsed expressions\<close>
 
-text {*
+text \<open>
   \begin{tabular}{rcll}
     @{attribute_def syntax_ambiguity_warning} & : & @{text attribute} & default @{text true} \\
     @{attribute_def syntax_ambiguity_limit} & : & @{text attribute} & default @{text 10} \\
@@ -991,12 +991,12 @@
   in case of an ambiguity.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Syntax transformations \label{sec:syntax-transformations} *}
+section \<open>Syntax transformations \label{sec:syntax-transformations}\<close>
 
-text {* The inner syntax engine of Isabelle provides separate
+text \<open>The inner syntax engine of Isabelle provides separate
   mechanisms to transform parse trees either via rewrite systems on
   first-order ASTs (\secref{sec:syn-trans}), or ML functions on ASTs
   or syntactic @{text "\<lambda>"}-terms (\secref{sec:tr-funs}).  This works
@@ -1042,12 +1042,12 @@
   or constants needs to be implemented as syntax transformation (see
   below).  Anything else is better done via check/uncheck: a prominent
   example application is the @{command abbreviation} concept of
-  Isabelle/Pure. *}
+  Isabelle/Pure.\<close>
 
 
-subsection {* Abstract syntax trees \label{sec:ast} *}
+subsection \<open>Abstract syntax trees \label{sec:ast}\<close>
 
-text {* The ML datatype @{ML_type Ast.ast} explicitly represents the
+text \<open>The ML datatype @{ML_type Ast.ast} explicitly represents the
   intermediate AST format that is used for syntax rewriting
   (\secref{sec:syn-trans}).  It is defined in ML as follows:
   \begin{ttbox}
@@ -1081,12 +1081,12 @@
   where @{verbatim "(\"_abs\" x t)"} becomes an @{ML Abs} node and
   occurrences of @{verbatim x} in @{verbatim t} are replaced by bound
   variables (represented as de-Bruijn indices).
-*}
+\<close>
 
 
-subsubsection {* AST constants versus variables *}
+subsubsection \<open>AST constants versus variables\<close>
 
-text {* Depending on the situation --- input syntax, output syntax,
+text \<open>Depending on the situation --- input syntax, output syntax,
   translation patterns --- the distinction of atomic ASTs as @{ML
   Ast.Constant} versus @{ML Ast.Variable} serves slightly different
   purposes.
@@ -1121,12 +1121,12 @@
   corresponds to an AST application of some constant for @{text foo}
   and variable arguments for @{text "'a"} and @{text "'b"}.  Note that
   the postfix application is merely a feature of the concrete syntax,
-  while in the AST the constructor occurs in head position.  *}
+  while in the AST the constructor occurs in head position.\<close>
 
 
-subsubsection {* Authentic syntax names *}
+subsubsection \<open>Authentic syntax names\<close>
 
-text {* Naming constant entities within ASTs is another delicate
+text \<open>Naming constant entities within ASTs is another delicate
   issue.  Unqualified names are resolved in the name space tables in
   the last stage of parsing, after all translations have been applied.
   Since syntax transformations do not know about this later name
@@ -1160,12 +1160,12 @@
   In other words, syntax transformations that operate on input terms
   written as prefix applications are difficult to make robust.
   Luckily, this case rarely occurs in practice, because syntax forms
-  to be translated usually correspond to some concrete notation. *}
+  to be translated usually correspond to some concrete notation.\<close>
 
 
-subsection {* Raw syntax and translations \label{sec:syn-trans} *}
+subsection \<open>Raw syntax and translations \label{sec:syn-trans}\<close>
 
-text {*
+text \<open>
   \begin{tabular}{rcll}
     @{command_def "nonterminal"} & : & @{text "theory \<rightarrow> theory"} \\
     @{command_def "syntax"} & : & @{text "theory \<rightarrow> theory"} \\
@@ -1337,11 +1337,11 @@
   List} in Isabelle/HOL.
 
   \end{itemize}
-*}
+\<close>
 
-subsubsection {* Applying translation rules *}
+subsubsection \<open>Applying translation rules\<close>
 
-text {* As a term is being parsed or printed, an AST is generated as
+text \<open>As a term is being parsed or printed, an AST is generated as
   an intermediate form according to \figref{fig:parse-print}.  The AST
   is normalized by applying translation rules in the manner of a
   first-order term rewriting system.  We first examine how a single
@@ -1410,12 +1410,12 @@
   functions (see also \secref{sec:tr-funs}), which is in fact the same
   mechanism used in built-in @{keyword "binder"} declarations.
   \end{warn}
-*}
+\<close>
 
 
-subsection {* Syntax translation functions \label{sec:tr-funs} *}
+subsection \<open>Syntax translation functions \label{sec:tr-funs}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "parse_ast_translation"} & : & @{text "theory \<rightarrow> theory"} \\
     @{command_def "parse_translation"} & : & @{text "theory \<rightarrow> theory"} \\
@@ -1492,12 +1492,12 @@
   names occurring in parse trees (unqualified constants etc.).
 
   \end{description}
-*}
+\<close>
 
 
-subsubsection {* The translation strategy *}
+subsubsection \<open>The translation strategy\<close>
 
-text {* The different kinds of translation functions are invoked during
+text \<open>The different kinds of translation functions are invoked during
   the transformations between parse trees, ASTs and syntactic terms
   (cf.\ \figref{fig:parse-print}).  Whenever a combination of the form
   @{text "c x\<^sub>1 \<dots> x\<^sub>n"} is encountered, and a translation function
@@ -1545,20 +1545,20 @@
   the status of constant versus variable is not yet know during
   parsing.  This is in contrast to print translations, where constants
   are explicitly known from the given term in its fully internal form.
-*}
+\<close>
 
 
-subsection {* Built-in syntax transformations *}
+subsection \<open>Built-in syntax transformations\<close>
 
-text {*
+text \<open>
   Here are some further details of the main syntax transformation
   phases of \figref{fig:parse-print}.
-*}
+\<close>
 
 
-subsubsection {* Transforming parse trees to ASTs *}
+subsubsection \<open>Transforming parse trees to ASTs\<close>
 
-text {* The parse tree is the raw output of the parser.  It is
+text \<open>The parse tree is the raw output of the parser.  It is
   transformed into an AST according to some basic scheme that may be
   augmented by AST translation functions as explained in
   \secref{sec:tr-funs}.
@@ -1598,12 +1598,12 @@
   translations need to be ready to cope with them.  The built-in
   syntax transformation from parse trees to ASTs insert additional
   constraints that represent source positions.
-*}
+\<close>
 
 
-subsubsection {* Transforming ASTs to terms *}
+subsubsection \<open>Transforming ASTs to terms\<close>
 
-text {* After application of macros (\secref{sec:syn-trans}), the AST
+text \<open>After application of macros (\secref{sec:syn-trans}), the AST
   is transformed into a term.  This term still lacks proper type
   information, but it might contain some constraints consisting of
   applications with head @{verbatim "_constrain"}, where the second
@@ -1620,12 +1620,12 @@
   bound variables are introduced by parse translations associated with
   certain syntax constants.  Thus @{verbatim "(_abs x x)"} eventually
   becomes a de-Bruijn term @{verbatim "Abs (\"x\", _, Bound 0)"}.
-*}
+\<close>
 
 
-subsubsection {* Printing of terms *}
+subsubsection \<open>Printing of terms\<close>
 
-text {* The output phase is essentially the inverse of the input
+text \<open>The output phase is essentially the inverse of the input
   phase.  Terms are translated via abstract syntax trees into
   pretty-printed text.
 
@@ -1672,6 +1672,6 @@
   \medskip White space is \emph{not} inserted automatically.  If
   blanks (or breaks) are required to separate tokens, they need to be
   specified in the mixfix declaration (\secref{sec:mixfix}).
-*}
+\<close>
 
 end
--- a/src/Doc/Isar_Ref/ML_Tactic.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/ML_Tactic.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base Main
 begin
 
-chapter {* ML tactic expressions *}
+chapter \<open>ML tactic expressions\<close>
 
-text {*
+text \<open>
   Isar Proof methods closely resemble traditional tactics, when used
   in unstructured sequences of @{command "apply"} commands.
   Isabelle/Isar provides emulations for all major ML tactics of
@@ -22,12 +22,12 @@
   asm_simp_tac}~/ @{ML full_simp_tac}~/ @{ML asm_full_simp_tac}, there
   is also concrete syntax for augmenting the Simplifier context (the
   current ``simpset'') in a convenient way.
-*}
+\<close>
 
 
-section {* Resolution tactics *}
+section \<open>Resolution tactics\<close>
 
-text {*
+text \<open>
   Classic Isabelle provides several variant forms of tactics for
   single-step rule applications (based on higher-order resolution).
   The space of resolution tactics has the following main dimensions.
@@ -75,12 +75,12 @@
   Note that explicit goal addressing may be usually avoided by
   changing the order of subgoals with @{command "defer"} or @{command
   "prefer"} (see \secref{sec:tactic-commands}).
-*}
+\<close>
 
 
-section {* Simplifier tactics *}
+section \<open>Simplifier tactics\<close>
 
-text {* The main Simplifier tactics @{ML simp_tac} and variants are
+text \<open>The main Simplifier tactics @{ML simp_tac} and variants are
   all covered by the @{method simp} and @{method simp_all} methods
   (see \secref{sec:simplifier}).  Note that there is no individual
   goal addressing available, simplification acts either on the first
@@ -96,21 +96,21 @@
     @{ML asm_lr_simp_tac}~@{text "@{context} 1"} & & @{method simp}~@{text "(asm_lr)"} \\
   \end{tabular}
   \medskip
-*}
+\<close>
 
 
-section {* Classical Reasoner tactics *}
+section \<open>Classical Reasoner tactics\<close>
 
-text {* The Classical Reasoner provides a rather large number of
+text \<open>The Classical Reasoner provides a rather large number of
   variations of automated tactics, such as @{ML blast_tac}, @{ML
   fast_tac}, @{ML clarify_tac} etc.  The corresponding Isar methods
   usually share the same base name, such as @{method blast}, @{method
-  fast}, @{method clarify} etc.\ (see \secref{sec:classical}).  *}
+  fast}, @{method clarify} etc.\ (see \secref{sec:classical}).\<close>
 
 
-section {* Miscellaneous tactics *}
+section \<open>Miscellaneous tactics\<close>
 
-text {*
+text \<open>
   There are a few additional tactics defined in various theories of
   Isabelle/HOL, some of these also in Isabelle/FOL or Isabelle/ZF.
   The most common ones of these may be ported to Isar as follows.
@@ -123,12 +123,12 @@
       & @{text "\<approx>"} & @{text "simp only: split_tupled_all"} \\
       & @{text "\<lless>"} & @{text "clarify"} \\
   \end{tabular}
-*}
+\<close>
 
 
-section {* Tacticals *}
+section \<open>Tacticals\<close>
 
-text {*
+text \<open>
   Classic Isabelle provides a huge amount of tacticals for combination
   and modification of existing tactics.  This has been greatly reduced
   in Isar, providing the bare minimum of combinators only: ``@{text
@@ -172,6 +172,6 @@
   @{ML_text "REPEAT (FIRSTGOAL (resolve_tac ...))"}, is usually better
   expressed using the @{method intro} and @{method elim} methods of
   Isar (see \secref{sec:classical}).
-*}
+\<close>
 
 end
--- a/src/Doc/Isar_Ref/Misc.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/Misc.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,11 +2,11 @@
 imports Base Main
 begin
 
-chapter {* Other commands *}
+chapter \<open>Other commands\<close>
 
-section {* Inspecting the context *}
+section \<open>Inspecting the context\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "print_theory"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
     @{command_def "print_methods"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
@@ -110,6 +110,6 @@
   only the unused theorems in the current theory are displayed.
   
   \end{description}
-*}
+\<close>
 
 end
--- a/src/Doc/Isar_Ref/Outer_Syntax.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/Outer_Syntax.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base Main
 begin
 
-chapter {* Outer syntax --- the theory language \label{ch:outer-syntax} *}
+chapter \<open>Outer syntax --- the theory language \label{ch:outer-syntax}\<close>
 
-text {*
+text \<open>
   The rather generic framework of Isabelle/Isar syntax emerges from
   three main syntactic categories: \emph{commands} of the top-level
   Isar engine (covering theory and proof elements), \emph{methods} for
@@ -61,12 +61,12 @@
     logic image).  Note that option @{verbatim "-L"} does both
     of this at the same time.
   \end{warn}
-*}
+\<close>
 
 
-section {* Commands *}
+section \<open>Commands\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "print_commands"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
     @{command_def "help"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
@@ -85,21 +85,21 @@
   commands according to the specified name patterns.
 
   \end{description}
-*}
+\<close>
 
 
-subsubsection {* Examples *}
+subsubsection \<open>Examples\<close>
 
-text {* Some common diagnostic commands are retrieved like this
-  (according to usual naming conventions): *}
+text \<open>Some common diagnostic commands are retrieved like this
+  (according to usual naming conventions):\<close>
 
 help "print"
 help "find"
 
 
-section {* Lexical matters \label{sec:outer-lex} *}
+section \<open>Lexical matters \label{sec:outer-lex}\<close>
 
-text {* The outer lexical syntax consists of three main categories of
+text \<open>The outer lexical syntax consists of three main categories of
   syntax tokens:
 
   \begin{enumerate}
@@ -211,21 +211,21 @@
   predefined Isabelle symbols that work well with these tools is given
   in \appref{app:symbols}.  Note that @{verbatim "\<lambda>"} does not belong
   to the @{text letter} category, since it is already used differently
-  in the Pure term language.  *}
+  in the Pure term language.\<close>
 
 
-section {* Common syntax entities *}
+section \<open>Common syntax entities\<close>
 
-text {*
+text \<open>
   We now introduce several basic syntactic entities, such as names,
   terms, and theorem specifications, which are factored out of the
   actual Isar language elements to be described later.
-*}
+\<close>
 
 
-subsection {* Names *}
+subsection \<open>Names\<close>
 
-text {* Entity @{syntax name} usually refers to any name of types,
+text \<open>Entity @{syntax name} usually refers to any name of types,
   constants, theorems etc.\ that are to be \emph{declared} or
   \emph{defined} (so qualified identifiers are excluded here).  Quoted
   strings provide an escape for non-identifier names or those ruled
@@ -241,12 +241,12 @@
     ;
     @{syntax_def nameref}: @{syntax name} | @{syntax longident}
   \<close>}
-*}
+\<close>
 
 
-subsection {* Numbers *}
+subsection \<open>Numbers\<close>
 
-text {* The outer lexical syntax (\secref{sec:outer-lex}) admits
+text \<open>The outer lexical syntax (\secref{sec:outer-lex}) admits
   natural numbers and floating point numbers.  These are combined as
   @{syntax int} and @{syntax real} as follows.
 
@@ -258,12 +258,12 @@
 
   Note that there is an overlap with the category @{syntax name},
   which also includes @{syntax nat}.
-*}
+\<close>
 
 
-subsection {* Comments \label{sec:comments} *}
+subsection \<open>Comments \label{sec:comments}\<close>
 
-text {* Large chunks of plain @{syntax text} are usually given
+text \<open>Large chunks of plain @{syntax text} are usually given
   @{syntax verbatim}, i.e.\ enclosed in @{verbatim "{"}@{verbatim
   "*"}~@{text "\<dots>"}~@{verbatim "*"}@{verbatim "}"}, or as @{syntax
   cartouche} @{text "\<open>\<dots>\<close>"}.  For convenience, any of the smaller text
@@ -276,12 +276,12 @@
     ;
     @{syntax_def comment}: '--' @{syntax text}
   \<close>}
-*}
+\<close>
 
 
-subsection {* Type classes, sorts and arities *}
+subsection \<open>Type classes, sorts and arities\<close>
 
-text {*
+text \<open>
   Classes are specified by plain names.  Sorts have a very simple
   inner syntax, which is either a single class name @{text c} or a
   list @{text "{c\<^sub>1, \<dots>, c\<^sub>n}"} referring to the
@@ -295,12 +295,12 @@
     ;
     @{syntax_def arity}: ('(' (@{syntax sort} + ',') ')')? @{syntax sort}
   \<close>}
-*}
+\<close>
 
 
-subsection {* Types and terms \label{sec:types-terms} *}
+subsection \<open>Types and terms \label{sec:types-terms}\<close>
 
-text {*
+text \<open>
   The actual inner Isabelle syntax, that of types and terms of the
   logic, is far too sophisticated in order to be modelled explicitly
   at the outer theory level.  Basically, any such entity has to be
@@ -346,12 +346,12 @@
       (() | (@{syntax typefree} ('::' @{syntax sort})?) |
         '(' ( (@{syntax typefree} ('::' @{syntax sort})?) + ',' ) ')') @{syntax name}
   \<close>}
-*}
+\<close>
 
 
-subsection {* Term patterns and declarations \label{sec:term-decls} *}
+subsection \<open>Term patterns and declarations \label{sec:term-decls}\<close>
 
-text {* Wherever explicit propositions (or term fragments) occur in a
+text \<open>Wherever explicit propositions (or term fragments) occur in a
   proof text, casual binding of schematic term variables may be given
   specified via patterns of the form ``@{text "(\<IS> p\<^sub>1 \<dots> p\<^sub>n)"}''.
   This works both for @{syntax term} and @{syntax prop}.
@@ -385,12 +385,12 @@
   another level of iteration, with explicit @{keyword_ref "and"}
   separators; e.g.\ see @{command "fix"} and @{command "assume"} in
   \secref{sec:proof-context}.
-*}
+\<close>
 
 
-subsection {* Attributes and theorems \label{sec:syn-att} *}
+subsection \<open>Attributes and theorems \label{sec:syn-att}\<close>
 
-text {* Attributes have their own ``semi-inner'' syntax, in the sense
+text \<open>Attributes have their own ``semi-inner'' syntax, in the sense
   that input conforming to @{syntax args} below is parsed by the
   attribute a second time.  The attribute argument specifications may
   be any sequence of atomic entities (identifiers, strings etc.), or
@@ -462,6 +462,6 @@
     ;
     selection: '(' ((@{syntax nat} | @{syntax nat} '-' @{syntax nat}?) + ',') ')'
   \<close>}
-*}
+\<close>
 
 end
--- a/src/Doc/Isar_Ref/Preface.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/Preface.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base Main
 begin
 
-chapter {* Preface *}
+chapter \<open>Preface\<close>
 
-text {*
+text \<open>
   The \emph{Isabelle} system essentially provides a generic
   infrastructure for building deductive systems (programmed in
   Standard ML), with a special focus on interactive theorem proving in
@@ -69,6 +69,6 @@
   the final human-readable outcome.  Typical examples are diagnostic
   commands that print terms or theorems according to the current
   context; other commands emulate old-style tactical theorem proving.
-*}
+\<close>
 
 end
--- a/src/Doc/Isar_Ref/Proof.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/Proof.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base Main
 begin
 
-chapter {* Proofs \label{ch:proofs} *}
+chapter \<open>Proofs \label{ch:proofs}\<close>
 
-text {*
+text \<open>
   Proof commands perform transitions of Isar/VM machine
   configurations, which are block-structured, consisting of a stack of
   nodes with three main components: logical proof context, current
@@ -41,14 +41,14 @@
   language emerging that way from the different types of proof
   commands.  The main ideas of the overall Isar framework are
   explained in \chref{ch:isar-framework}.
-*}
+\<close>
 
 
-section {* Proof structure *}
+section \<open>Proof structure\<close>
 
-subsection {* Formal notepad *}
+subsection \<open>Formal notepad\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "notepad"} & : & @{text "local_theory \<rightarrow> proof(state)"} \\
   \end{matharray}
@@ -69,12 +69,12 @@
   @{command "oops"}.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Blocks *}
+subsection \<open>Blocks\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "next"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
     @{command_def "{"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
@@ -111,12 +111,12 @@
   the result exported at @{command "show"} time.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Omitting proofs *}
+subsection \<open>Omitting proofs\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "oops"} & : & @{text "proof \<rightarrow> local_theory | theory"} \\
   \end{matharray}
@@ -138,14 +138,14 @@
   logically sound manner.  Note that the Isabelle {\LaTeX} macros can
   be easily adapted to print something like ``@{text "\<dots>"}'' instead of
   the keyword ``@{command "oops"}''.
-*}
+\<close>
 
 
-section {* Statements *}
+section \<open>Statements\<close>
 
-subsection {* Context elements \label{sec:proof-context} *}
+subsection \<open>Context elements \label{sec:proof-context}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "fix"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
     @{command_def "assume"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
@@ -225,12 +225,12 @@
   The special name @{fact_ref prems} refers to all assumptions of the
   current context as a list of theorems.  This feature should be used
   with great care!  It is better avoided in final proof texts.
-*}
+\<close>
 
 
-subsection {* Term abbreviations \label{sec:term-abbrev} *}
+subsection \<open>Term abbreviations \label{sec:term-abbrev}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "let"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
     @{keyword_def "is"} & : & syntax \\
@@ -292,12 +292,12 @@
   @{text t} is bound to the special text variable ``@{variable "\<dots>"}''
   (three dots).  The canonical application of this convenience are
   calculational proofs (see \secref{sec:calculation}).
-*}
+\<close>
 
 
-subsection {* Facts and forward chaining \label{sec:proof-facts} *}
+subsection \<open>Facts and forward chaining \label{sec:proof-facts}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "note"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
     @{command_def "then"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
@@ -377,12 +377,12 @@
   insert any given facts before their usual operation.  Depending on
   the kind of procedure involved, the order of facts is less
   significant here.
-*}
+\<close>
 
 
-subsection {* Goals \label{sec:goals} *}
+subsection \<open>Goals \label{sec:goals}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "lemma"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
     @{command_def "theorem"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
@@ -524,14 +524,14 @@
   introductions, (2) in the resulting rule they become annotations for
   symbolic case splits, e.g.\ for the @{method_ref cases} method
   (\secref{sec:cases-induct}).
-*}
+\<close>
 
 
-section {* Refinement steps *}
+section \<open>Refinement steps\<close>
 
-subsection {* Proof method expressions \label{sec:proof-meth} *}
+subsection \<open>Proof method expressions \label{sec:proof-meth}\<close>
 
-text {* Proof methods are either basic ones, or expressions composed
+text \<open>Proof methods are either basic ones, or expressions composed
   of methods via ``@{verbatim ","}'' (sequential composition),
   ``@{verbatim "|"}'' (alternative choices), ``@{verbatim "?"}''
   (try), ``@{verbatim "+"}'' (repeat at least once), ``@{verbatim
@@ -568,12 +568,12 @@
     @{syntax_def goal_spec}:
       '[' (@{syntax nat} '-' @{syntax nat} | @{syntax nat} '-' | @{syntax nat} | '!' ) ']'
   \<close>}
-*}
+\<close>
 
 
-subsection {* Initial and terminal proof steps \label{sec:proof-steps} *}
+subsection \<open>Initial and terminal proof steps \label{sec:proof-steps}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "proof"} & : & @{text "proof(prove) \<rightarrow> proof(state)"} \\
     @{command_def "qed"} & : & @{text "proof(state) \<rightarrow> proof(state) | local_theory | theory"} \\
@@ -679,12 +679,12 @@
   experimentation and top-down proof development.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Fundamental methods and attributes \label{sec:pure-meth-att} *}
+subsection \<open>Fundamental methods and attributes \label{sec:pure-meth-att}\<close>
 
-text {*
+text \<open>
   The following proof methods and attributes refer to basic logical
   operations of Isar.  Further methods and attributes are provided by
   several generic and object-logic specific tools and packages (see
@@ -831,12 +831,12 @@
   be specified as for @{attribute "of"} above.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Emulating tactic scripts \label{sec:tactic-commands} *}
+subsection \<open>Emulating tactic scripts \label{sec:tactic-commands}\<close>
 
-text {*
+text \<open>
   The Isar provides separate commands to accommodate tactic-style
   proof scripts within the same system.  While being outside the
   orthodox Isar proof language, these might come in handy for
@@ -908,12 +908,12 @@
   such as @{command "apply"}.  A few additional emulations of actual
   tactics are provided as well; these would be never used in actual
   structured proofs, of course.
-*}
+\<close>
 
 
-subsection {* Defining proof methods *}
+subsection \<open>Defining proof methods\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "method_setup"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
   \end{matharray}
@@ -938,26 +938,26 @@
   Here are some example method definitions:
 
   \end{description}
-*}
+\<close>
 
-  method_setup my_method1 = {*
+  method_setup my_method1 = \<open>
     Scan.succeed (K (SIMPLE_METHOD' (fn i: int => no_tac)))
-  *}  "my first method (without any arguments)"
+\<close>  "my first method (without any arguments)"
 
-  method_setup my_method2 = {*
+  method_setup my_method2 = \<open>
     Scan.succeed (fn ctxt: Proof.context =>
       SIMPLE_METHOD' (fn i: int => no_tac))
-  *}  "my second method (with context)"
+\<close>  "my second method (with context)"
 
-  method_setup my_method3 = {*
+  method_setup my_method3 = \<open>
     Attrib.thms >> (fn thms: thm list => fn ctxt: Proof.context =>
       SIMPLE_METHOD' (fn i: int => no_tac))
-  *}  "my third method (with theorem arguments and context)"
+\<close>  "my third method (with theorem arguments and context)"
 
 
-section {* Generalized elimination \label{sec:obtain} *}
+section \<open>Generalized elimination \label{sec:obtain}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "obtain"} & : & @{text "proof(state) | proof(chain) \<rightarrow> proof(prove)"} \\
     @{command_def "guess"}@{text "\<^sup>*"} & : & @{text "proof(state) | proof(chain) \<rightarrow> proof(prove)"} \\
@@ -1032,12 +1032,12 @@
   It is important to note that the facts introduced by @{command
   "obtain"} and @{command "guess"} may not be polymorphic: any
   type-variables occurring here are fixed in the present context!
-*}
+\<close>
 
 
-section {* Calculational reasoning \label{sec:calculation} *}
+section \<open>Calculational reasoning \label{sec:calculation}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "also"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
     @{command_def "finally"} & : & @{text "proof(state) \<rightarrow> proof(chain)"} \\
@@ -1143,14 +1143,14 @@
   "y = x"}~@{command ".."}''.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Proof by cases and induction \label{sec:cases-induct} *}
+section \<open>Proof by cases and induction \label{sec:cases-induct}\<close>
 
-subsection {* Rule contexts *}
+subsection \<open>Rule contexts\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "case"} & : & @{text "proof(state) \<rightarrow> proof(state)"} \\
     @{command_def "print_cases"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
@@ -1281,12 +1281,12 @@
   @{attribute coinduct} declarations.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Proof methods *}
+subsection \<open>Proof methods\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{method_def cases} & : & @{text method} \\
     @{method_def induct} & : & @{text method} \\
@@ -1497,12 +1497,12 @@
   remaining facts are inserted into the goal verbatim before the
   actual @{text cases}, @{text induct}, or @{text coinduct} rule is
   applied.
-*}
+\<close>
 
 
-subsection {* Declaring rules *}
+subsection \<open>Declaring rules\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "print_induct_rules"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
     @{attribute_def cases} & : & @{text attribute} \\
@@ -1547,6 +1547,6 @@
   consumes}~@{text 1} for ``predicate'' / ``set'' rules.
 
   \end{description}
-*}
+\<close>
 
 end
--- a/src/Doc/Isar_Ref/Quick_Reference.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/Quick_Reference.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,13 +2,13 @@
 imports Base Main
 begin
 
-chapter {* Isabelle/Isar quick reference \label{ap:refcard} *}
+chapter \<open>Isabelle/Isar quick reference \label{ap:refcard}\<close>
 
-section {* Proof commands *}
+section \<open>Proof commands\<close>
 
-subsection {* Primitives and basic syntax *}
+subsection \<open>Primitives and basic syntax\<close>
 
-text {*
+text \<open>
   \begin{tabular}{ll}
     @{command "fix"}~@{text x} & augment context by @{text "\<And>x. \<box>"} \\
     @{command "assume"}~@{text "a: \<phi>"} & augment context by @{text "\<phi> \<Longrightarrow> \<box>"} \\
@@ -44,12 +44,12 @@
     @{text goal} & = & @{command "have"}~@{text "name: props proof"} \\
     & @{text "|"} & @{command "show"}~@{text "name: props proof"} \\
   \end{tabular}
-*}
+\<close>
 
 
-subsection {* Abbreviations and synonyms *}
+subsection \<open>Abbreviations and synonyms\<close>
 
-text {*
+text \<open>
   \begin{tabular}{rcl}
     @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} & @{text "\<equiv>"} &
       @{command "proof"}~@{text "m\<^sub>1"}~@{command "qed"}~@{text "m\<^sub>2"} \\
@@ -63,12 +63,12 @@
     @{command "from"}~@{text this}~@{command "have"} & @{text "\<equiv>"} & @{command "hence"} \\
     @{command "from"}~@{text this}~@{command "show"} & @{text "\<equiv>"} & @{command "thus"} \\
   \end{tabular}
-*}
+\<close>
 
 
-subsection {* Derived elements *}
+subsection \<open>Derived elements\<close>
 
-text {*
+text \<open>
   \begin{tabular}{rcl}
     @{command "also"}@{text "\<^sub>0"} & @{text "\<approx>"} &
       @{command "note"}~@{text "calculation = this"} \\
@@ -91,12 +91,12 @@
     @{command "sorry"} & @{text "\<approx>"} &
       @{command "by"}~@{text cheating} \\
   \end{tabular}
-*}
+\<close>
 
 
-subsection {* Diagnostic commands *}
+subsection \<open>Diagnostic commands\<close>
 
-text {*
+text \<open>
   \begin{tabular}{ll}
     @{command "print_state"} & print proof state \\
     @{command "print_statement"} & print fact in long statement form \\
@@ -105,12 +105,12 @@
     @{command "term"}~@{text t} & print term \\
     @{command "typ"}~@{text \<tau>} & print type \\
   \end{tabular}
-*}
+\<close>
 
 
-section {* Proof methods *}
+section \<open>Proof methods\<close>
 
-text {*
+text \<open>
   \begin{tabular}{ll}
     \multicolumn{2}{l}{\textbf{Single steps (forward-chaining facts)}} \\[0.5ex]
     @{method assumption} & apply some assumption \\
@@ -135,12 +135,12 @@
     @{method auto}, @{method force} & Simplifier + Classical Reasoner \\
     @{method arith} & Arithmetic procedures \\
   \end{tabular}
-*}
+\<close>
 
 
-section {* Attributes *}
+section \<open>Attributes\<close>
 
-text {*
+text \<open>
   \begin{tabular}{ll}
     \multicolumn{2}{l}{\textbf{Rules}} \\[0.5ex]
     @{attribute OF}~@{text a} & rule resolved with facts (skipping ``@{text _}'') \\
@@ -159,12 +159,12 @@
     @{attribute trans} & transitivity rule \\
     @{attribute sym} & symmetry rule \\
   \end{tabular}
-*}
+\<close>
 
 
-section {* Rule declarations and methods *}
+section \<open>Rule declarations and methods\<close>
 
-text {*
+text \<open>
   \begin{tabular}{l|lllll}
       & @{method rule} & @{method iprover} & @{method blast} & @{method simp} & @{method auto} \\
       &                &                   & @{method fast} & @{method simp_all} & @{method force} \\
@@ -190,14 +190,14 @@
     @{attribute split}
       &                &                    &                      & @{text "\<times>"}         & @{text "\<times>"} \\
   \end{tabular}
-*}
+\<close>
 
 
-section {* Emulating tactic scripts *}
+section \<open>Emulating tactic scripts\<close>
 
-subsection {* Commands *}
+subsection \<open>Commands\<close>
 
-text {*
+text \<open>
   \begin{tabular}{ll}
     @{command "apply"}~@{text m} & apply proof method at initial position \\
     @{command "apply_end"}~@{text m} & apply proof method near terminal position \\
@@ -206,12 +206,12 @@
     @{command "prefer"}~@{text n} & move subgoal to beginning \\
     @{command "back"} & backtrack last command \\
   \end{tabular}
-*}
+\<close>
 
 
-subsection {* Methods *}
+subsection \<open>Methods\<close>
 
-text {*
+text \<open>
   \begin{tabular}{ll}
     @{method rule_tac}~@{text insts} & resolution (with instantiation) \\
     @{method erule_tac}~@{text insts} & elim-resolution (with instantiation) \\
@@ -227,6 +227,6 @@
     @{method induct_tac}~@{text x} & induction (datatypes) \\
     @{method ind_cases}~@{text t} & exhaustion + simplification (inductive predicates) \\
   \end{tabular}
-*}
+\<close>
 
 end
--- a/src/Doc/Isar_Ref/Spec.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/Spec.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base Main
 begin
 
-chapter {* Specifications *}
+chapter \<open>Specifications\<close>
 
-text {*
+text \<open>
   The Isabelle/Isar theory format integrates specifications and
   proofs, supporting interactive development with unlimited undo
   operation.  There is an integrated document preparation system (see
@@ -19,12 +19,12 @@
   qed}).  Some theory specification mechanisms also require a proof,
   such as @{command typedef} in HOL, which demands non-emptiness of
   the representing sets.
-*}
+\<close>
 
 
-section {* Defining theories \label{sec:begin-thy} *}
+section \<open>Defining theories \label{sec:begin-thy}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "theory"} & : & @{text "toplevel \<rightarrow> theory"} \\
     @{command_def (global) "end"} & : & @{text "theory \<rightarrow> toplevel"} \\
@@ -95,12 +95,12 @@
   "end"}, according to the usual rules for nested blocks.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Local theory targets \label{sec:target} *}
+section \<open>Local theory targets \label{sec:target}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcll}
     @{command_def "context"} & : & @{text "theory \<rightarrow> local_theory"} \\
     @{command_def (local) "end"} & : & @{text "local_theory \<rightarrow> theory"} \\
@@ -182,12 +182,12 @@
   \medskip The Isabelle/HOL library contains numerous applications of
   locales and classes, e.g.\ see @{file "~~/src/HOL/Algebra"}.  An
   example for an unnamed auxiliary contexts is given in @{file
-  "~~/src/HOL/Isar_Examples/Group_Context.thy"}.  *}
+  "~~/src/HOL/Isar_Examples/Group_Context.thy"}.\<close>
 
 
-section {* Bundled declarations \label{sec:bundle} *}
+section \<open>Bundled declarations \label{sec:bundle}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "bundle"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
     @{command_def "print_bundles"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow> "} \\
@@ -249,7 +249,7 @@
   \end{description}
 
   Here is an artificial example of bundling various configuration
-  options: *}
+  options:\<close>
 
 bundle trace = [[simp_trace, linarith_trace, metis_trace, smt_trace]]
 
@@ -257,9 +257,9 @@
   including trace by metis
 
 
-section {* Term definitions \label{sec:term-definitions} *}
+section \<open>Term definitions \label{sec:term-definitions}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcll}
     @{command_def "definition"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
     @{attribute_def "defn"} & : & @{text attribute} \\
@@ -324,12 +324,12 @@
   of the current context.
   
   \end{description}
-*}
+\<close>
 
 
-section {* Axiomatizations \label{sec:axiomatizations} *}
+section \<open>Axiomatizations \label{sec:axiomatizations}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcll}
     @{command_def "axiomatization"} & : & @{text "theory \<rightarrow> theory"} & (axiomatic!) \\
   \end{matharray}
@@ -366,12 +366,12 @@
   logic and its libraries.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Generic declarations *}
+section \<open>Generic declarations\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "declaration"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
     @{command_def "syntax_declaration"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@@ -417,12 +417,12 @@
   of applying attributes as included in the theorem specification.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Locales \label{sec:locale} *}
+section \<open>Locales \label{sec:locale}\<close>
 
-text {*
+text \<open>
   A locale is a functor that maps parameters (including implicit type
   parameters) and a specification to a list of declarations.  The
   syntax of locales is modeled after the Isar proof context commands
@@ -443,12 +443,12 @@
   redundant if it is subsumed by an instance encountered earlier.  A
   more detailed description of this process is available elsewhere
   @{cite Ballarin2014}.
-*}
+\<close>
 
 
-subsection {* Locale expressions \label{sec:locale-expr} *}
+subsection \<open>Locale expressions \label{sec:locale-expr}\<close>
 
-text {*
+text \<open>
   A \emph{locale expression} denotes a context composed of instances
   of existing locales.  The context consists of the declaration
   elements from the locale instances.  Redundant locale instances are
@@ -489,12 +489,12 @@
   the default is ``mandatory'', for @{command "locale"} and @{command
   "sublocale"} the default is ``optional''.  Qualifiers play no role
   in determining whether one locale instance subsumes another.
-*}
+\<close>
 
 
-subsection {* Locale declarations *}
+subsection \<open>Locale declarations\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "locale"} & : & @{text "theory \<rightarrow> local_theory"} \\
     @{command_def "print_locale"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
@@ -631,12 +631,12 @@
   by the current context are discharged automatically.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Locale interpretation *}
+subsection \<open>Locale interpretation\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "interpretation"} & : & @{text "theory | local_theory \<rightarrow> proof(prove)"} \\
     @{command_def "interpret"} & : & @{text "proof(state) | proof(chain) \<rightarrow> proof(prove)"} \\
@@ -793,12 +793,12 @@
     first.  The locale package does not attempt to remove subsumed
     interpretations.
   \end{warn}
-*}
+\<close>
 
 
-section {* Classes \label{sec:class} *}
+section \<open>Classes \label{sec:class}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "class"} & : & @{text "theory \<rightarrow> local_theory"} \\
     @{command_def "instantiation"} & : & @{text "theory \<rightarrow> local_theory"} \\
@@ -909,12 +909,12 @@
   single ``@{command ".."}'' proof step.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* The class target *}
+subsection \<open>The class target\<close>
 
-text {*
+text \<open>
   %FIXME check
 
   A named context may refer to a locale (cf.\ \secref{sec:target}).
@@ -936,12 +936,12 @@
   needed.
   
   \end{itemize}
-*}
+\<close>
 
 
-subsection {* Co-regularity of type classes and arities *}
+subsection \<open>Co-regularity of type classes and arities\<close>
 
-text {* The class relation together with the collection of
+text \<open>The class relation together with the collection of
   type-constructor arities must obey the principle of
   \emph{co-regularity} as defined below.
 
@@ -973,12 +973,12 @@
   \medskip Co-regularity is a very fundamental property of the
   order-sorted algebra of types.  For example, it entails principle
   types and most general unifiers, e.g.\ see @{cite "nipkow-prehofer"}.
-*}
+\<close>
 
 
-section {* Unrestricted overloading *}
+section \<open>Unrestricted overloading\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "overloading"} & : & @{text "theory \<rightarrow> local_theory"} \\
   \end{matharray}
@@ -1020,12 +1020,12 @@
   malformed theory specifications!
 
   \end{description}
-*}
+\<close>
 
 
-section {* Incorporating ML code \label{sec:ML} *}
+section \<open>Incorporating ML code \label{sec:ML}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "SML_file"} & : & @{text "theory \<rightarrow> theory"} \\
     @{command_def "ML_file"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@@ -1108,23 +1108,23 @@
   other serves as parameter.  Here are examples for these two cases:
 
   \end{description}
-*}
+\<close>
 
-  attribute_setup my_rule = {*
+  attribute_setup my_rule = \<open>
     Attrib.thms >> (fn ths =>
       Thm.rule_attribute
         (fn context: Context.generic => fn th: thm =>
           let val th' = th OF ths
-          in th' end)) *}
+          in th' end))\<close>
 
-  attribute_setup my_declaration = {*
+  attribute_setup my_declaration = \<open>
     Attrib.thms >> (fn ths =>
       Thm.declaration_attribute
         (fn th: thm => fn context: Context.generic =>
           let val context' = context
-          in context' end)) *}
+          in context' end))\<close>
 
-text {*
+text \<open>
   \begin{description}
 
   \item @{attribute ML_print_depth} controls the printing depth of the ML
@@ -1149,14 +1149,14 @@
   happens.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Primitive specification elements *}
+section \<open>Primitive specification elements\<close>
 
-subsection {* Sorts *}
+subsection \<open>Sorts\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcll}
     @{command_def "default_sort"} & : & @{text "local_theory \<rightarrow> local_theory"}
   \end{matharray}
@@ -1181,12 +1181,12 @@
   are joined.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Types \label{sec:types-pure} *}
+subsection \<open>Types \label{sec:types-pure}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcll}
     @{command_def "type_synonym"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
     @{command_def "typedecl"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@@ -1224,12 +1224,12 @@
   (\secref{sec:hol-typedef}), or any other extension that people might have
   introduced elsewhere.
   \end{warn}
-*}
+\<close>
 
 
-subsection {* Constants and definitions \label{sec:consts} *}
+subsection \<open>Constants and definitions \label{sec:consts}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "consts"} & : & @{text "theory \<rightarrow> theory"} \\
     @{command_def "defs"} & : & @{text "theory \<rightarrow> theory"} \\
@@ -1301,12 +1301,12 @@
   special type than that of the corresponding constant declaration.
   
   \end{description}
-*}
+\<close>
 
 
-section {* Naming existing theorems \label{sec:theorems} *}
+section \<open>Naming existing theorems \label{sec:theorems}\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcll}
     @{command_def "lemmas"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
     @{command_def "theorems"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@@ -1340,12 +1340,12 @@
   canonical declaration order of the text structure.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Oracles *}
+section \<open>Oracles\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcll}
     @{command_def "oracle"} & : & @{text "theory \<rightarrow> theory"} & (axiomatic!) \\
   \end{matharray}
@@ -1383,12 +1383,12 @@
   See @{file "~~/src/HOL/ex/Iff_Oracle.thy"} for a worked example of
   defining a new primitive rule as oracle, and turning it into a proof
   method.
-*}
+\<close>
 
 
-section {* Name spaces *}
+section \<open>Name spaces\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def "hide_class"} & : & @{text "theory \<rightarrow> theory"} \\
     @{command_def "hide_type"} & : & @{text "theory \<rightarrow> theory"} \\
@@ -1423,6 +1423,6 @@
   constants, and facts, respectively.
   
   \end{description}
-*}
+\<close>
 
 end
--- a/src/Doc/Isar_Ref/Symbols.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/Symbols.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base Main
 begin
 
-chapter {* Predefined Isabelle symbols \label{app:symbols} *}
+chapter \<open>Predefined Isabelle symbols \label{app:symbols}\<close>
 
-text {*
+text \<open>
   Isabelle supports an infinite number of non-ASCII symbols, which are
   represented in source text as @{verbatim "\\"}@{verbatim "<"}@{text
   name}@{verbatim ">"} (where @{text name} may be any identifier).  It
@@ -36,6 +36,6 @@
   \input{syms}  
   \end{isabellebody}
   \end{center}
-*}
+\<close>
 
 end
\ No newline at end of file
--- a/src/Doc/Isar_Ref/Synopsis.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/Isar_Ref/Synopsis.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,120 +2,120 @@
 imports Base Main
 begin
 
-chapter {* Synopsis *}
+chapter \<open>Synopsis\<close>
 
-section {* Notepad *}
+section \<open>Notepad\<close>
 
-text {*
+text \<open>
   An Isar proof body serves as mathematical notepad to compose logical
   content, consisting of types, terms, facts.
-*}
+\<close>
 
 
-subsection {* Types and terms *}
+subsection \<open>Types and terms\<close>
 
 notepad
 begin
-  txt {* Locally fixed entities: *}
-  fix x   -- {* local constant, without any type information yet *}
-  fix x :: 'a  -- {* variant with explicit type-constraint for subsequent use*}
+  txt \<open>Locally fixed entities:\<close>
+  fix x   -- \<open>local constant, without any type information yet\<close>
+  fix x :: 'a  -- \<open>variant with explicit type-constraint for subsequent use\<close>
 
   fix a b
-  assume "a = b"  -- {* type assignment at first occurrence in concrete term *}
+  assume "a = b"  -- \<open>type assignment at first occurrence in concrete term\<close>
 
-  txt {* Definitions (non-polymorphic): *}
+  txt \<open>Definitions (non-polymorphic):\<close>
   def x \<equiv> "t::'a"
 
-  txt {* Abbreviations (polymorphic): *}
+  txt \<open>Abbreviations (polymorphic):\<close>
   let ?f = "\<lambda>x. x"
   term "?f ?f"
 
-  txt {* Notation: *}
+  txt \<open>Notation:\<close>
   write x  ("***")
 end
 
 
-subsection {* Facts *}
+subsection \<open>Facts\<close>
 
-text {*
+text \<open>
   A fact is a simultaneous list of theorems.
-*}
+\<close>
 
 
-subsubsection {* Producing facts *}
+subsubsection \<open>Producing facts\<close>
 
 notepad
 begin
 
-  txt {* Via assumption (``lambda''): *}
+  txt \<open>Via assumption (``lambda''):\<close>
   assume a: A
 
-  txt {* Via proof (``let''): *}
+  txt \<open>Via proof (``let''):\<close>
   have b: B sorry
 
-  txt {* Via abbreviation (``let''): *}
+  txt \<open>Via abbreviation (``let''):\<close>
   note c = a b
 
 end
 
 
-subsubsection {* Referencing facts *}
+subsubsection \<open>Referencing facts\<close>
 
 notepad
 begin
-  txt {* Via explicit name: *}
+  txt \<open>Via explicit name:\<close>
   assume a: A
   note a
 
-  txt {* Via implicit name: *}
+  txt \<open>Via implicit name:\<close>
   assume A
   note this
 
-  txt {* Via literal proposition (unification with results from the proof text): *}
+  txt \<open>Via literal proposition (unification with results from the proof text):\<close>
   assume A
-  note `A`
+  note \<open>A\<close>
 
   assume "\<And>x. B x"
-  note `B a`
-  note `B b`
+  note \<open>B a\<close>
+  note \<open>B b\<close>
 end
 
 
-subsubsection {* Manipulating facts *}
+subsubsection \<open>Manipulating facts\<close>
 
 notepad
 begin
-  txt {* Instantiation: *}
+  txt \<open>Instantiation:\<close>
   assume a: "\<And>x. B x"
   note a
   note a [of b]
   note a [where x = b]
 
-  txt {* Backchaining: *}
+  txt \<open>Backchaining:\<close>
   assume 1: A
   assume 2: "A \<Longrightarrow> C"
   note 2 [OF 1]
   note 1 [THEN 2]
 
-  txt {* Symmetric results: *}
+  txt \<open>Symmetric results:\<close>
   assume "x = y"
   note this [symmetric]
 
   assume "x \<noteq> y"
   note this [symmetric]
 
-  txt {* Adhoc-simplification (take care!): *}
+  txt \<open>Adhoc-simplification (take care!):\<close>
   assume "P ([] @ xs)"
   note this [simplified]
 end
 
 
-subsubsection {* Projections *}
+subsubsection \<open>Projections\<close>
 
-text {*
+text \<open>
   Isar facts consist of multiple theorems.  There is notation to project
   interval ranges.
-*}
+\<close>
 
 notepad
 begin
@@ -126,9 +126,9 @@
 end
 
 
-subsubsection {* Naming conventions *}
+subsubsection \<open>Naming conventions\<close>
 
-text {*
+text \<open>
   \begin{itemize}
 
   \item Lower-case identifiers are usually preferred.
@@ -146,15 +146,15 @@
   "**"}, @{text "***"}).
 
   \end{itemize}
-*}
+\<close>
 
 
-subsection {* Block structure *}
+subsection \<open>Block structure\<close>
 
-text {*
+text \<open>
   The formal notepad is block structured.  The fact produced by the last
   entry of a block is exported into the outer context.
-*}
+\<close>
 
 notepad
 begin
@@ -164,14 +164,14 @@
     note a b
   }
   note this
-  note `A`
-  note `B`
+  note \<open>A\<close>
+  note \<open>B\<close>
 end
 
-text {* Explicit blocks as well as implicit blocks of nested goal
+text \<open>Explicit blocks as well as implicit blocks of nested goal
   statements (e.g.\ @{command have}) automatically introduce one extra
   pair of parentheses in reserve.  The @{command next} command allows
-  to ``jump'' between these sub-blocks. *}
+  to ``jump'' between these sub-blocks.\<close>
 
 notepad
 begin
@@ -189,7 +189,7 @@
     qed
   }
 
-  txt {* Alternative version with explicit parentheses everywhere: *}
+  txt \<open>Alternative version with explicit parentheses everywhere:\<close>
 
   {
     {
@@ -214,16 +214,16 @@
 end
 
 
-section {* Calculational reasoning \label{sec:calculations-synopsis} *}
+section \<open>Calculational reasoning \label{sec:calculations-synopsis}\<close>
 
-text {*
+text \<open>
   For example, see @{file "~~/src/HOL/Isar_Examples/Group.thy"}.
-*}
+\<close>
 
 
-subsection {* Special names in Isar proofs *}
+subsection \<open>Special names in Isar proofs\<close>
 
-text {*
+text \<open>
   \begin{itemize}
 
   \item term @{text "?thesis"} --- the main conclusion of the
@@ -235,7 +235,7 @@
   \item fact @{text "this"} --- the last result produced in the text
 
   \end{itemize}
-*}
+\<close>
 
 notepad
 begin
@@ -243,23 +243,23 @@
   proof -
     term ?thesis
     show ?thesis sorry
-    term ?thesis  -- {* static! *}
+    term ?thesis  -- \<open>static!\<close>
   qed
   term "\<dots>"
   thm this
 end
 
-text {* Calculational reasoning maintains the special fact called
+text \<open>Calculational reasoning maintains the special fact called
   ``@{text calculation}'' in the background.  Certain language
   elements combine primary @{text this} with secondary @{text
-  calculation}. *}
+  calculation}.\<close>
 
 
-subsection {* Transitive chains *}
+subsection \<open>Transitive chains\<close>
 
-text {* The Idea is to combine @{text this} and @{text calculation}
+text \<open>The Idea is to combine @{text this} and @{text calculation}
   via typical @{text trans} rules (see also @{command
-  print_trans_rules}): *}
+  print_trans_rules}):\<close>
 
 thm trans
 thm less_trans
@@ -267,7 +267,7 @@
 
 notepad
 begin
-  txt {* Plain bottom-up calculation: *}
+  txt \<open>Plain bottom-up calculation:\<close>
   have "a = b" sorry
   also
   have "b = c" sorry
@@ -276,7 +276,7 @@
   finally
   have "a = d" .
 
-  txt {* Variant using the @{text "\<dots>"} abbreviation: *}
+  txt \<open>Variant using the @{text "\<dots>"} abbreviation:\<close>
   have "a = b" sorry
   also
   have "\<dots> = c" sorry
@@ -285,7 +285,7 @@
   finally
   have "a = d" .
 
-  txt {* Top-down version with explicit claim at the head: *}
+  txt \<open>Top-down version with explicit claim at the head:\<close>
   have "a = d"
   proof -
     have "a = b" sorry
@@ -297,7 +297,7 @@
     show ?thesis .
   qed
 next
-  txt {* Mixed inequalities (require suitable base type): *}
+  txt \<open>Mixed inequalities (require suitable base type):\<close>
   fix a b c d :: nat
 
   have "a < b" sorry
@@ -310,9 +310,9 @@
 end
 
 
-subsubsection {* Notes *}
+subsubsection \<open>Notes\<close>
 
-text {*
+text \<open>
   \begin{itemize}
 
   \item The notion of @{text trans} rule is very general due to the
@@ -322,17 +322,17 @@
   about the operational details of higher-order unification.
 
   \end{itemize}
-*}
+\<close>
 
 
-subsection {* Degenerate calculations and bigstep reasoning *}
+subsection \<open>Degenerate calculations and bigstep reasoning\<close>
 
-text {* The Idea is to append @{text this} to @{text calculation},
-  without rule composition.  *}
+text \<open>The Idea is to append @{text this} to @{text calculation},
+  without rule composition.\<close>
 
 notepad
 begin
-  txt {* A vacuous proof: *}
+  txt \<open>A vacuous proof:\<close>
   have A sorry
   moreover
   have B sorry
@@ -341,7 +341,7 @@
   ultimately
   have A and B and C .
 next
-  txt {* Slightly more content (trivial bigstep reasoning): *}
+  txt \<open>Slightly more content (trivial bigstep reasoning):\<close>
   have A sorry
   moreover
   have B sorry
@@ -350,7 +350,7 @@
   ultimately
   have "A \<and> B \<and> C" by blast
 next
-  txt {* More ambitious bigstep reasoning involving structured results: *}
+  txt \<open>More ambitious bigstep reasoning involving structured results:\<close>
   have "A \<or> B \<or> C" sorry
   moreover
   { assume A have R sorry }
@@ -359,17 +359,17 @@
   moreover
   { assume C have R sorry }
   ultimately
-  have R by blast  -- {* ``big-bang integration'' of proof blocks (occasionally fragile) *}
+  have R by blast  -- \<open>``big-bang integration'' of proof blocks (occasionally fragile)\<close>
 end
 
 
-section {* Induction *}
+section \<open>Induction\<close>
 
-subsection {* Induction as Natural Deduction *}
+subsection \<open>Induction as Natural Deduction\<close>
 
-text {* In principle, induction is just a special case of Natural
+text \<open>In principle, induction is just a special case of Natural
   Deduction (see also \secref{sec:natural-deduction-synopsis}).  For
-  example: *}
+  example:\<close>
 
 thm nat.induct
 print_statement nat.induct
@@ -378,7 +378,7 @@
 begin
   fix n :: nat
   have "P n"
-  proof (rule nat.induct)  -- {* fragile rule application! *}
+  proof (rule nat.induct)  -- \<open>fragile rule application!\<close>
     show "P 0" sorry
   next
     fix n :: nat
@@ -387,7 +387,7 @@
   qed
 end
 
-text {*
+text \<open>
   In practice, much more proof infrastructure is required.
 
   The proof method @{method induct} provides:
@@ -401,7 +401,7 @@
     parameters, premises, etc.
 
   \end{itemize}
-*}
+\<close>
 
 notepad
 begin
@@ -417,9 +417,9 @@
 end
 
 
-subsubsection {* Example *}
+subsubsection \<open>Example\<close>
 
-text {*
+text \<open>
   The subsequent example combines the following proof patterns:
   \begin{itemize}
 
@@ -432,7 +432,7 @@
   \item solving local claims within the calculation by simplification
 
   \end{itemize}
-*}
+\<close>
 
 lemma
   fixes n :: nat
@@ -451,19 +451,19 @@
   finally show ?case .
 qed
 
-text {* This demonstrates how induction proofs can be done without
-  having to consider the raw Natural Deduction structure. *}
+text \<open>This demonstrates how induction proofs can be done without
+  having to consider the raw Natural Deduction structure.\<close>
 
 
-subsection {* Induction with local parameters and premises *}
+subsection \<open>Induction with local parameters and premises\<close>
 
-text {* Idea: Pure rule statements are passed through the induction
+text \<open>Idea: Pure rule statements are passed through the induction
   rule.  This achieves convenient proof patterns, thanks to some
   internal trickery in the @{method induct} method.
 
   Important: Using compact HOL formulae with @{text "\<forall>/\<longrightarrow>"} is a
   well-known anti-pattern! It would produce useless formal noise.
-*}
+\<close>
 
 notepad
 begin
@@ -477,17 +477,17 @@
     show "P 0" sorry
   next
     case (Suc n)
-    from `P n` show "P (Suc n)" sorry
+    from \<open>P n\<close> show "P (Suc n)" sorry
   qed
 
   have "A n \<Longrightarrow> P n"
   proof (induct n)
     case 0
-    from `A 0` show "P 0" sorry
+    from \<open>A 0\<close> show "P 0" sorry
   next
     case (Suc n)
-    from `A n \<Longrightarrow> P n`
-      and `A (Suc n)` show "P (Suc n)" sorry
+    from \<open>A n \<Longrightarrow> P n\<close>
+      and \<open>A (Suc n)\<close> show "P (Suc n)" sorry
   qed
 
   have "\<And>x. Q x n"
@@ -496,19 +496,19 @@
     show "Q x 0" sorry
   next
     case (Suc n)
-    from `\<And>x. Q x n` show "Q x (Suc n)" sorry
-    txt {* Local quantification admits arbitrary instances: *}
-    note `Q a n` and `Q b n`
+    from \<open>\<And>x. Q x n\<close> show "Q x (Suc n)" sorry
+    txt \<open>Local quantification admits arbitrary instances:\<close>
+    note \<open>Q a n\<close> and \<open>Q b n\<close>
   qed
 end
 
 
-subsection {* Implicit induction context *}
+subsection \<open>Implicit induction context\<close>
 
-text {* The @{method induct} method can isolate local parameters and
+text \<open>The @{method induct} method can isolate local parameters and
   premises directly from the given statement.  This is convenient in
   practical applications, but requires some understanding of what is
-  going on internally (as explained above).  *}
+  going on internally (as explained above).\<close>
 
 notepad
 begin
@@ -520,24 +520,24 @@
   then have "Q x n"
   proof (induct n arbitrary: x)
     case 0
-    from `A x 0` show "Q x 0" sorry
+    from \<open>A x 0\<close> show "Q x 0" sorry
   next
     case (Suc n)
-    from `\<And>x. A x n \<Longrightarrow> Q x n`  -- {* arbitrary instances can be produced here *}
-      and `A x (Suc n)` show "Q x (Suc n)" sorry
+    from \<open>\<And>x. A x n \<Longrightarrow> Q x n\<close>  -- \<open>arbitrary instances can be produced here\<close>
+      and \<open>A x (Suc n)\<close> show "Q x (Suc n)" sorry
   qed
 end
 
 
-subsection {* Advanced induction with term definitions *}
+subsection \<open>Advanced induction with term definitions\<close>
 
-text {* Induction over subexpressions of a certain shape are delicate
+text \<open>Induction over subexpressions of a certain shape are delicate
   to formalize.  The Isar @{method induct} method provides
   infrastructure for this.
 
   Idea: sub-expressions of the problem are turned into a defined
   induction variable; often accompanied with fixing of auxiliary
-  parameters in the original expression.  *}
+  parameters in the original expression.\<close>
 
 notepad
 begin
@@ -548,35 +548,35 @@
   then have "P (a x)"
   proof (induct "a x" arbitrary: x)
     case 0
-    note prem = `A (a x)`
-      and defn = `0 = a x`
+    note prem = \<open>A (a x)\<close>
+      and defn = \<open>0 = a x\<close>
     show "P (a x)" sorry
   next
     case (Suc n)
-    note hyp = `\<And>x. n = a x \<Longrightarrow> A (a x) \<Longrightarrow> P (a x)`
-      and prem = `A (a x)`
-      and defn = `Suc n = a x`
+    note hyp = \<open>\<And>x. n = a x \<Longrightarrow> A (a x) \<Longrightarrow> P (a x)\<close>
+      and prem = \<open>A (a x)\<close>
+      and defn = \<open>Suc n = a x\<close>
     show "P (a x)" sorry
   qed
 end
 
 
-section {* Natural Deduction \label{sec:natural-deduction-synopsis} *}
+section \<open>Natural Deduction \label{sec:natural-deduction-synopsis}\<close>
 
-subsection {* Rule statements *}
+subsection \<open>Rule statements\<close>
 
-text {*
+text \<open>
   Isabelle/Pure ``theorems'' are always natural deduction rules,
   which sometimes happen to consist of a conclusion only.
 
   The framework connectives @{text "\<And>"} and @{text "\<Longrightarrow>"} indicate the
-  rule structure declaratively.  For example: *}
+  rule structure declaratively.  For example:\<close>
 
 thm conjI
 thm impI
 thm nat.induct
 
-text {*
+text \<open>
   The object-logic is embedded into the Pure framework via an implicit
   derivability judgment @{term "Trueprop :: bool \<Rightarrow> prop"}.
 
@@ -584,7 +584,7 @@
   the rule structure outlines the corresponding proof pattern.
 
   This can be made explicit as follows:
-*}
+\<close>
 
 notepad
 begin
@@ -595,22 +595,22 @@
   thm nat.induct
 end
 
-text {*
+text \<open>
   Isar provides first-class notation for rule statements as follows.
-*}
+\<close>
 
 print_statement conjI
 print_statement impI
 print_statement nat.induct
 
 
-subsubsection {* Examples *}
+subsubsection \<open>Examples\<close>
 
-text {*
+text \<open>
   Introductions and eliminations of some standard connectives of
   the object-logic can be written as rule statements as follows.  (The
   proof ``@{command "by"}~@{method blast}'' serves as sanity check.)
-*}
+\<close>
 
 lemma "(P \<Longrightarrow> False) \<Longrightarrow> \<not> P" by blast
 lemma "\<not> P \<Longrightarrow> P \<Longrightarrow> Q" by blast
@@ -636,11 +636,11 @@
 lemma "x \<in> A \<union> B \<Longrightarrow> (x \<in> A \<Longrightarrow> R) \<Longrightarrow> (x \<in> B \<Longrightarrow> R) \<Longrightarrow> R" by blast
 
 
-subsection {* Isar context elements *}
+subsection \<open>Isar context elements\<close>
 
-text {* We derive some results out of the blue, using Isar context
+text \<open>We derive some results out of the blue, using Isar context
   elements and some explicit blocks.  This illustrates their meaning
-  wrt.\ Pure connectives, without goal states getting in the way.  *}
+  wrt.\ Pure connectives, without goal states getting in the way.\<close>
 
 notepad
 begin
@@ -677,9 +677,9 @@
 end
 
 
-subsection {* Pure rule composition *}
+subsection \<open>Pure rule composition\<close>
 
-text {*
+text \<open>
   The Pure framework provides means for:
 
   \begin{itemize}
@@ -691,7 +691,7 @@
   \end{itemize}
 
   Both principles involve higher-order unification of @{text \<lambda>}-terms
-  modulo @{text "\<alpha>\<beta>\<eta>"}-equivalence (cf.\ Huet and Miller).  *}
+  modulo @{text "\<alpha>\<beta>\<eta>"}-equivalence (cf.\ Huet and Miller).\<close>
 
 notepad
 begin
@@ -700,20 +700,20 @@
   thm conjI [of A B]  -- "instantiation"
   thm conjI [of A B, OF a b]  -- "instantiation and composition"
   thm conjI [OF a b]  -- "composition via unification (trivial)"
-  thm conjI [OF `A` `B`]
+  thm conjI [OF \<open>A\<close> \<open>B\<close>]
 
   thm conjI [OF disjI1]
 end
 
-text {* Note: Low-level rule composition is tedious and leads to
-  unreadable~/ unmaintainable expressions in the text.  *}
+text \<open>Note: Low-level rule composition is tedious and leads to
+  unreadable~/ unmaintainable expressions in the text.\<close>
 
 
-subsection {* Structured backward reasoning *}
+subsection \<open>Structured backward reasoning\<close>
 
-text {* Idea: Canonical proof decomposition via @{command fix}~/
+text \<open>Idea: Canonical proof decomposition via @{command fix}~/
   @{command assume}~/ @{command show}, where the body produces a
-  natural deduction rule to refine some goal.  *}
+  natural deduction rule to refine some goal.\<close>
 
 notepad
 begin
@@ -733,22 +733,22 @@
       assume "A x"
       show "B x" sorry
     } -- "implicit block structure made explicit"
-    note `\<And>x. A x \<Longrightarrow> B x`
+    note \<open>\<And>x. A x \<Longrightarrow> B x\<close>
       -- "side exit for the resulting rule"
   qed
 end
 
 
-subsection {* Structured rule application *}
+subsection \<open>Structured rule application\<close>
 
-text {*
+text \<open>
   Idea: Previous facts and new claims are composed with a rule from
   the context (or background library).
-*}
+\<close>
 
 notepad
 begin
-  assume r1: "A \<Longrightarrow> B \<Longrightarrow> C"  -- {* simple rule (Horn clause) *}
+  assume r1: "A \<Longrightarrow> B \<Longrightarrow> C"  -- \<open>simple rule (Horn clause)\<close>
 
   have A sorry  -- "prefix of facts via outer sub-proof"
   then have C
@@ -772,7 +772,7 @@
 
 next
 
-  assume r2: "A \<Longrightarrow> (\<And>x. B1 x \<Longrightarrow> B2 x) \<Longrightarrow> C"  -- {* nested rule *}
+  assume r2: "A \<Longrightarrow> (\<And>x. B1 x \<Longrightarrow> B2 x) \<Longrightarrow> C"  -- \<open>nested rule\<close>
 
   have A sorry
   then have C
@@ -782,18 +782,18 @@
     show "B2 x" sorry
   qed
 
-  txt {* The compound rule premise @{prop "\<And>x. B1 x \<Longrightarrow> B2 x"} is better
+  txt \<open>The compound rule premise @{prop "\<And>x. B1 x \<Longrightarrow> B2 x"} is better
     addressed via @{command fix}~/ @{command assume}~/ @{command show}
-    in the nested proof body.  *}
+    in the nested proof body.\<close>
 end
 
 
-subsection {* Example: predicate logic *}
+subsection \<open>Example: predicate logic\<close>
 
-text {*
+text \<open>
   Using the above principles, standard introduction and elimination proofs
   of predicate logic connectives of HOL work as follows.
-*}
+\<close>
 
 notepad
 begin
@@ -861,56 +861,56 @@
     show C sorry
   qed
 
-  txt {* Less awkward version using @{command obtain}: *}
+  txt \<open>Less awkward version using @{command obtain}:\<close>
   have "\<exists>x. P x" sorry
   then obtain a where "P a" ..
 end
 
-text {* Further variations to illustrate Isar sub-proofs involving
-  @{command show}: *}
+text \<open>Further variations to illustrate Isar sub-proofs involving
+  @{command show}:\<close>
 
 notepad
 begin
   have "A \<and> B"
-  proof  -- {* two strictly isolated subproofs *}
+  proof  -- \<open>two strictly isolated subproofs\<close>
     show A sorry
   next
     show B sorry
   qed
 
   have "A \<and> B"
-  proof  -- {* one simultaneous sub-proof *}
+  proof  -- \<open>one simultaneous sub-proof\<close>
     show A and B sorry
   qed
 
   have "A \<and> B"
-  proof  -- {* two subproofs in the same context *}
+  proof  -- \<open>two subproofs in the same context\<close>
     show A sorry
     show B sorry
   qed
 
   have "A \<and> B"
-  proof  -- {* swapped order *}
+  proof  -- \<open>swapped order\<close>
     show B sorry
     show A sorry
   qed
 
   have "A \<and> B"
-  proof  -- {* sequential subproofs *}
+  proof  -- \<open>sequential subproofs\<close>
     show A sorry
-    show B using `A` sorry
+    show B using \<open>A\<close> sorry
   qed
 end
 
 
-subsubsection {* Example: set-theoretic operators *}
+subsubsection \<open>Example: set-theoretic operators\<close>
 
-text {* There is nothing special about logical connectives (@{text
+text \<open>There is nothing special about logical connectives (@{text
   "\<and>"}, @{text "\<or>"}, @{text "\<forall>"}, @{text "\<exists>"} etc.).  Operators from
   set-theory or lattice-theory work analogously.  It is only a matter
   of rule declarations in the library; rules can be also specified
   explicitly.
-*}
+\<close>
 
 notepad
 begin
@@ -955,20 +955,20 @@
 end
 
 
-section {* Generalized elimination and cases *}
+section \<open>Generalized elimination and cases\<close>
 
-subsection {* General elimination rules *}
+subsection \<open>General elimination rules\<close>
 
-text {*
+text \<open>
   The general format of elimination rules is illustrated by the
   following typical representatives:
-*}
+\<close>
 
-thm exE     -- {* local parameter *}
-thm conjE   -- {* local premises *}
-thm disjE   -- {* split into cases *}
+thm exE     -- \<open>local parameter\<close>
+thm conjE   -- \<open>local premises\<close>
+thm disjE   -- \<open>split into cases\<close>
 
-text {*
+text \<open>
   Combining these characteristics leads to the following general scheme
   for elimination rules with cases:
 
@@ -980,7 +980,7 @@
     in an augmented context
 
   \end{itemize}
-*}
+\<close>
 
 notepad
 begin
@@ -1003,13 +1003,13 @@
   qed
 end
 
-text {* Here @{text "?thesis"} is used to refer to the unchanged goal
-  statement.  *}
+text \<open>Here @{text "?thesis"} is used to refer to the unchanged goal
+  statement.\<close>
 
 
-subsection {* Rules with cases *}
+subsection \<open>Rules with cases\<close>
 
-text {*
+text \<open>
   Applying an elimination rule to some goal, leaves that unchanged
   but allows to augment the context in the sub-proof of each case.
 
@@ -1025,21 +1025,21 @@
   sub-proof
 
   \end{itemize}
-*}
+\<close>
 
 print_statement exE
 print_statement conjE
 print_statement disjE
 
 lemma
-  assumes A1 and A2  -- {* assumptions *}
+  assumes A1 and A2  -- \<open>assumptions\<close>
   obtains
     (case1)  x y where "B1 x y" and "C1 x y"
   | (case2)  x y where "B2 x y" and "C2 x y"
   sorry
 
 
-subsubsection {* Example *}
+subsubsection \<open>Example\<close>
 
 lemma tertium_non_datur:
   obtains
@@ -1053,20 +1053,20 @@
   have C
   proof (cases "x = y" rule: tertium_non_datur)
     case T
-    from `x = y` show ?thesis sorry
+    from \<open>x = y\<close> show ?thesis sorry
   next
     case F
-    from `x \<noteq> y` show ?thesis sorry
+    from \<open>x \<noteq> y\<close> show ?thesis sorry
   qed
 end
 
 
-subsubsection {* Example *}
+subsubsection \<open>Example\<close>
 
-text {*
+text \<open>
   Isabelle/HOL specification mechanisms (datatype, inductive, etc.)
   provide suitable derived cases rules.
-*}
+\<close>
 
 datatype foo = Foo | Bar foo
 
@@ -1076,33 +1076,33 @@
   have C
   proof (cases x)
     case Foo
-    from `x = Foo` show ?thesis sorry
+    from \<open>x = Foo\<close> show ?thesis sorry
   next
     case (Bar a)
-    from `x = Bar a` show ?thesis sorry
+    from \<open>x = Bar a\<close> show ?thesis sorry
   qed
 end
 
 
-subsection {* Obtaining local contexts *}
+subsection \<open>Obtaining local contexts\<close>
 
-text {* A single ``case'' branch may be inlined into Isar proof text
+text \<open>A single ``case'' branch may be inlined into Isar proof text
   via @{command obtain}.  This proves @{prop "(\<And>x. B x \<Longrightarrow> thesis) \<Longrightarrow>
-  thesis"} on the spot, and augments the context afterwards.  *}
+  thesis"} on the spot, and augments the context afterwards.\<close>
 
 notepad
 begin
   fix B :: "'a \<Rightarrow> bool"
 
   obtain x where "B x" sorry
-  note `B x`
+  note \<open>B x\<close>
 
-  txt {* Conclusions from this context may not mention @{term x} again! *}
+  txt \<open>Conclusions from this context may not mention @{term x} again!\<close>
   {
     obtain x where "B x" sorry
-    from `B x` have C sorry
+    from \<open>B x\<close> have C sorry
   }
-  note `C`
+  note \<open>C\<close>
 end
 
 end
--- a/src/Doc/JEdit/JEdit.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/JEdit/JEdit.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -4,11 +4,11 @@
 imports Base
 begin
 
-chapter {* Introduction *}
+chapter \<open>Introduction\<close>
 
-section {* Concepts and terminology *}
+section \<open>Concepts and terminology\<close>
 
-text {*
+text \<open>
   Isabelle/jEdit is a Prover IDE that integrates \emph{parallel proof
   checking} @{cite "Wenzel:2009" and "Wenzel:2013:ITP"} with
   \emph{asynchronous user interaction} @{cite "Wenzel:2010" and
@@ -58,12 +58,12 @@
   Isabelle/Scala versus Scala, Isabelle/jEdit versus jEdit need to be
   taken into account when discussing any of these PIDE building blocks
   in public forums, mailing lists, or even scientific publications.
-  *}
+\<close>
 
 
-section {* The Isabelle/jEdit Prover IDE *}
+section \<open>The Isabelle/jEdit Prover IDE\<close>
 
-text {*
+text \<open>
   \begin{figure}[htb]
   \begin{center}
   \includegraphics[scale=0.333]{isabelle-jedit}
@@ -104,12 +104,12 @@
   Thus the Prover IDE gives an impression of direct access to formal content
   of the prover within the editor, but in reality only certain aspects are
   exposed, according to the possibilities of the prover and its many tools.
-*}
+\<close>
 
 
-subsection {* Documentation *}
+subsection \<open>Documentation\<close>
 
-text {*
+text \<open>
   The \emph{Documentation} panel of Isabelle/jEdit provides access to the
   standard Isabelle documentation: PDF files are opened by regular desktop
   operations of the underlying platform. The section ``Original jEdit
@@ -124,12 +124,12 @@
   the official jEdit documentation does not know about the Isabelle plugin
   with its support for continuous checking of formal source text: jEdit is a
   plain text editor, but Isabelle/jEdit is a Prover IDE.
-*}
+\<close>
 
 
-subsection {* Plugins *}
+subsection \<open>Plugins\<close>
 
-text {*
+text \<open>
   The \emph{Plugin Manager} of jEdit allows to augment editor functionality by
   JVM modules (jars) that are provided by the central plugin repository, which
   is accessible via various mirror sites.
@@ -153,12 +153,12 @@
   (e.g.\ \emph{ErrorList}, \emph{Code2HTML}) are included to saturate the
   dependencies of bundled plugins, but have no particular use in
   Isabelle/jEdit.
-*}
+\<close>
 
 
-subsection {* Options \label{sec:options} *}
+subsection \<open>Options \label{sec:options}\<close>
 
-text {* Both jEdit and Isabelle have distinctive management of
+text \<open>Both jEdit and Isabelle have distinctive management of
   persistent options.
 
   Regular jEdit options are accessible via the dialogs \emph{Utilities~/
@@ -192,12 +192,12 @@
   Isabelle/jEdit. Editing the machine-generated @{file_unchecked
   "$ISABELLE_HOME_USER/jedit/properties"} or @{file_unchecked
   "$ISABELLE_HOME_USER/etc/preferences"} manually while the application is
-  running is likely to cause surprise due to lost update! *}
+  running is likely to cause surprise due to lost update!\<close>
 
 
-subsection {* Keymaps *}
+subsection \<open>Keymaps\<close>
 
-text {* Keyboard shortcuts used to be managed as jEdit properties in
+text \<open>Keyboard shortcuts used to be managed as jEdit properties in
   the past, but recent versions (2013) have a separate concept of
   \emph{keymap} that is configurable via \emph{Global Options~/
   Shortcuts}.  The @{verbatim imported} keymap is derived from the
@@ -210,12 +210,12 @@
   keyboard shortcuts manually (see also @{file_unchecked
   "$ISABELLE_HOME_USER/jedit/keymaps"} versus @{verbatim shortcut} properties
   in @{file "$ISABELLE_HOME/src/Tools/jEdit/src/jEdit.props"}).
-*}
+\<close>
 
 
-section {* Command-line invocation \label{sec:command-line} *}
+section \<open>Command-line invocation \label{sec:command-line}\<close>
 
-text {*
+text \<open>
   Isabelle/jEdit is normally invoked as standalone application, with
   platform-specific executable wrappers for Linux, Windows, Mac OS X.
   Nonetheless it is occasionally useful to invoke the Prover IDE on the
@@ -265,14 +265,14 @@
   sources, which also requires an auxiliary @{verbatim jedit_build} component
   from @{url "http://isabelle.in.tum.de/components"}. The official
   Isabelle release already includes a pre-built version of Isabelle/jEdit.
-*}
+\<close>
 
 
-chapter {* Augmented jEdit functionality *}
+chapter \<open>Augmented jEdit functionality\<close>
 
-section {* Look-and-feel *}
+section \<open>Look-and-feel\<close>
 
-text {* jEdit is a Java/AWT/Swing application with some ambition to
+text \<open>jEdit is a Java/AWT/Swing application with some ambition to
   support ``native'' look-and-feel on all platforms, within the limits
   of what Oracle as Java provider and major operating system
   distributors allow (see also \secref{sec:problems}).
@@ -312,12 +312,12 @@
 
   After changing the look-and-feel in \emph{Global Options~/
   Appearance}, it is advisable to restart Isabelle/jEdit in order to
-  take full effect.  *}
+  take full effect.\<close>
 
 
-section {* Dockable windows \label{sec:dockables} *}
+section \<open>Dockable windows \label{sec:dockables}\<close>
 
-text {*
+text \<open>
   In jEdit terminology, a \emph{view} is an editor window with one or more
   \emph{text areas} that show the content of one or more \emph{buffers}. A
   regular view may be surrounded by \emph{dockable windows} that show
@@ -365,12 +365,12 @@
   similar \emph{Detach} operation as an icon.
 
   \end{itemize}
-*}
+\<close>
 
 
-section {* Isabelle symbols \label{sec:symbols} *}
+section \<open>Isabelle symbols \label{sec:symbols}\<close>
 
-text {*
+text \<open>
   Isabelle sources consist of \emph{symbols} that extend plain ASCII to allow
   infinitely many mathematical symbols within the formal sources. This works
   without depending on particular encodings and varying Unicode
@@ -520,12 +520,12 @@
 
   To produce a single control symbol, it is also possible to complete
   on @{verbatim "\\"}@{verbatim sup}, @{verbatim "\\"}@{verbatim sub},
-  @{verbatim "\\"}@{verbatim bold} as for regular symbols.  *}
+  @{verbatim "\\"}@{verbatim bold} as for regular symbols.\<close>
 
 
-section {* SideKick parsers \label{sec:sidekick} *}
+section \<open>SideKick parsers \label{sec:sidekick}\<close>
 
-text {*
+text \<open>
   The \emph{SideKick} plugin provides some general services to display buffer
   structure in a tree view.
 
@@ -548,12 +548,12 @@
   for informative purposes, but the amount of displayed information
   might cause problems for large buffers, both for the human and the
   machine.
-*}
+\<close>
 
 
-section {* Scala console \label{sec:scala-console} *}
+section \<open>Scala console \label{sec:scala-console}\<close>
 
-text {*
+text \<open>
   The \emph{Console} plugin manages various shells (command interpreters),
   e.g.\ \emph{BeanShell}, which is the official jEdit scripting language, and
   the cross-platform \emph{System} shell. Thus the console provides similar
@@ -575,12 +575,12 @@
   This helps to explore Isabelle/Scala functionality interactively. Some care
   is required to avoid interference with the internals of the running
   application, especially in production use.
-*}
+\<close>
 
 
-section {* File-system access *}
+section \<open>File-system access\<close>
 
-text {*
+text \<open>
   File specifications in jEdit follow various formats and conventions
   according to \emph{Virtual File Systems}, which may be also provided by
   additional plugins. This allows to access remote files via the @{verbatim
@@ -628,14 +628,14 @@
   Formally checked paths in prover input are subject to completion
   (\secref{sec:completion}): partial specifications are resolved via
   directory content and possible completions are offered in a popup.
-*}
+\<close>
 
 
-chapter {* Prover IDE functionality \label{sec:document-model} *}
+chapter \<open>Prover IDE functionality \label{sec:document-model}\<close>
 
-section {* Document model \label{sec:document-model} *}
+section \<open>Document model \label{sec:document-model}\<close>
 
-text {*
+text \<open>
   The document model is central to the PIDE architecture: the editor and the
   prover have a common notion of structured source text with markup, which is
   produced by formal processing. The editor is responsible for edits of
@@ -645,12 +645,12 @@
   Isabelle/jEdit handles classic editor events of jEdit, in order to connect
   the physical world of the GUI (with its singleton state) to the mathematical
   world of multiple document versions (with timeless and stateless updates).
-*}
+\<close>
 
 
-subsection {* Editor buffers and document nodes \label{sec:buffer-node} *}
+subsection \<open>Editor buffers and document nodes \label{sec:buffer-node}\<close>
 
-text {*
+text \<open>
   As a regular text editor, jEdit maintains a collection of \emph{buffers} to
   store text files; each buffer may be associated with any number of visible
   \emph{text areas}. Buffers are subject to an \emph{edit mode} that is
@@ -686,12 +686,12 @@
   physical file-system only plays a subordinate role. The relevant version of
   source text is passed directly from the editor to the prover, via internal
   communication channels.
-*}
+\<close>
 
 
-subsection {* Theories \label{sec:theories} *}
+subsection \<open>Theories \label{sec:theories}\<close>
 
-text {*
+text \<open>
   The \emph{Theories} panel (see also \figref{fig:theories}) provides an
   overview of the status of continuous checking of theory nodes within the
   document model. Unlike batch sessions of @{tool build} @{cite
@@ -744,11 +744,11 @@
   (e.g.\ information messages with \emph{sendback} markup by automated provers
   or disprovers in the background).
 
-*}
+\<close>
 
-subsection {* Auxiliary files \label{sec:aux-files} *}
+subsection \<open>Auxiliary files \label{sec:aux-files}\<close>
 
-text {*
+text \<open>
   Special load commands like @{command_ref ML_file} and @{command_ref
   SML_file} @{cite "isabelle-isar-ref"} refer to auxiliary files within some
   theory. Conceptually, the file argument of the command extends the theory
@@ -795,12 +795,12 @@
   fully-featured IDE for Standard ML, independently of theory or proof
   development: the required theory merely serves as some kind of project
   file for a collection of SML source modules.
-*}
+\<close>
 
 
-section {* Output \label{sec:output} *}
+section \<open>Output \label{sec:output}\<close>
 
-text {*
+text \<open>
   Prover output consists of \emph{markup} and \emph{messages}. Both are
   directly attached to the corresponding positions in the original source
   text, and visualized in the text area, e.g.\ as text colours for free and
@@ -869,12 +869,12 @@
   be explored recursively via tooltips or hyperlinks (see
   \secref{sec:tooltips-hyperlinks}), or clicked directly to initiate
   certain actions (see \secref{sec:auto-tools} and
-  \secref{sec:sledgehammer}).  *}
+  \secref{sec:sledgehammer}).\<close>
 
 
-section {* Query \label{sec:query} *}
+section \<open>Query \label{sec:query}\<close>
 
-text {*
+text \<open>
   The \emph{Query} panel provides various GUI forms to request extra
   information from the prover. In old times the user would have issued some
   diagnostic command like @{command find_theorems} and inspected its output,
@@ -921,12 +921,12 @@
   window, using a menu operation of the dockable window manager. The printed
   result usually provides sufficient clues about the original query, with some
   hyperlink to its context (via markup of its head line).
-*}
+\<close>
 
 
-subsection {* Find theorems *}
+subsection \<open>Find theorems\<close>
 
-text {*
+text \<open>
   The \emph{Query} panel in \emph{Find Theorems} mode retrieves facts from the
   theory or proof context matching all of given criteria in the \emph{Find}
   text field. A single criterium has the following syntax:
@@ -938,12 +938,12 @@
 
   See also the Isar command @{command_ref find_theorems} in
   @{cite "isabelle-isar-ref"}.
-*}
+\<close>
 
 
-subsection {* Find constants *}
+subsection \<open>Find constants\<close>
 
-text {*
+text \<open>
   The \emph{Query} panel in \emph{Find Constants} mode prints all constants
   whose type meets all of the given criteria in the \emph{Find} text field.
   A single criterium has the following syntax:
@@ -955,23 +955,23 @@
 
   See also the Isar command @{command_ref find_consts} in @{cite
   "isabelle-isar-ref"}.
-*}
+\<close>
 
 
-subsection {* Print context *}
+subsection \<open>Print context\<close>
 
-text {*
+text \<open>
   The \emph{Query} panel in \emph{Print Context} mode prints information from
   the theory or proof context, or proof state. See also the Isar commands
   @{command_ref print_context}, @{command_ref print_cases}, @{command_ref
   print_term_bindings}, @{command_ref print_theorems}, @{command_ref
   print_state} described in @{cite "isabelle-isar-ref"}.
-*}
+\<close>
 
 
-section {* Tooltips and hyperlinks \label{sec:tooltips-hyperlinks} *}
+section \<open>Tooltips and hyperlinks \label{sec:tooltips-hyperlinks}\<close>
 
-text {*
+text \<open>
   Formally processed text (prover input or output) contains rich markup
   information that can be explored further by using the @{verbatim CONTROL}
   modifier key on Linux and Windows, or @{verbatim COMMAND} on Mac OS X.
@@ -1017,12 +1017,12 @@
   subject to formal document processing of the editor session and thus
   prevents further exploration: the chain of hyperlinks may end in
   some source file of the underlying logic image, or within the
-  Isabelle/ML bootstrap sources of Isabelle/Pure. *}
+  Isabelle/ML bootstrap sources of Isabelle/Pure.\<close>
 
 
-section {* Completion \label{sec:completion} *}
+section \<open>Completion \label{sec:completion}\<close>
 
-text {*
+text \<open>
   Smart completion of partial input is the IDE functionality \emph{par
   excellance}. Isabelle/jEdit combines several sources of information to
   achieve that. Despite its complexity, it should be possible to get some idea
@@ -1051,14 +1051,14 @@
   immediately or producing a popup. Although there is an inherent danger of
   non-deterministic behaviour due to such real-time parameters, the general
   completion policy aims at determined results as far as possible.
-*}
+\<close>
 
 
-subsection {* Varieties of completion \label{sec:completion-varieties} *}
+subsection \<open>Varieties of completion \label{sec:completion-varieties}\<close>
 
-subsubsection {* Built-in templates *}
+subsubsection \<open>Built-in templates\<close>
 
-text {*
+text \<open>
   Isabelle is ultimately a framework of nested sub-languages of different
   kinds and purposes. The completion mechanism supports this by the following
   built-in templates:
@@ -1084,12 +1084,12 @@
   embedded languages should work fluently. Note that national keyboard layouts
   might cause problems with back-quote as dead key: if possible, dead keys
   should be disabled.
-*}
+\<close>
 
 
-subsubsection {* Syntax keywords *}
+subsubsection \<open>Syntax keywords\<close>
 
-text {*
+text \<open>
   Syntax completion tables are determined statically from the keywords of the
   ``outer syntax'' of the underlying edit mode: for theory files this is the
   syntax of Isar commands.
@@ -1104,12 +1104,12 @@
   keyword itself. An empty string means to suppress the keyword altogether,
   which is occasionally useful to avoid confusion, e.g.\ the rare keyword
   @{command simproc_setup} vs.\ the frequent name-space entry @{text simp}.
-*}
+\<close>
 
 
-subsubsection {* Isabelle symbols *}
+subsubsection \<open>Isabelle symbols\<close>
 
-text {*
+text \<open>
   The completion tables for Isabelle symbols (\secref{sec:symbols}) are
   determined statically from @{file "$ISABELLE_HOME/etc/symbols"} and
   @{file_unchecked "$ISABELLE_HOME_USER/etc/symbols"} for each symbol
@@ -1137,12 +1137,12 @@
   particular sub-language of Isabelle. For example, symbol completion is
   suppressed within document source to avoid confusion with {\LaTeX} macros
   that use similar notation.
-*}
+\<close>
 
 
-subsubsection {* Name-space entries *}
+subsubsection \<open>Name-space entries\<close>
 
-text {*
+text \<open>
   This is genuine semantic completion, using information from the prover, so
   it requires some delay. A \emph{failed name-space lookup} produces an error
   message that is annotated with a list of alternative names that are legal.
@@ -1162,12 +1162,12 @@
   mechanism (truncated according to @{system_option completion_limit}). This
   is occasionally useful to explore an unknown name-space, e.g.\ in some
   template.
-*}
+\<close>
 
 
-subsubsection {* File-system paths *}
+subsubsection \<open>File-system paths\<close>
 
-text {*
+text \<open>
   Depending on prover markup about file-system path specifications in the
   source text, e.g.\ for the argument of a load command
   (\secref{sec:aux-files}), the completion mechanism explores the directory
@@ -1178,12 +1178,12 @@
 
   A suffix of slashes may be used to continue the exploration of an already
   recognized directory name.
-*}
+\<close>
 
 
-subsubsection {* Spell-checking *}
+subsubsection \<open>Spell-checking\<close>
 
-text {*
+text \<open>
   The spell-checker combines semantic markup from the prover (regions of plain
   words) with static dictionaries (word lists) that are known to the editor.
 
@@ -1210,12 +1210,12 @@
   upper-case, and capitalized words. This is oriented on common use in
   English, where this aspect is not decisive for proper spelling, in contrast
   to German, for example.
-*}
+\<close>
 
 
-subsection {* Semantic completion context \label{sec:completion-context} *}
+subsection \<open>Semantic completion context \label{sec:completion-context}\<close>
 
-text {*
+text \<open>
   Completion depends on a semantic context that is provided by the prover,
   although with some delay, because at least a full PIDE protocol round-trip
   is required. Until that information becomes available in the PIDE
@@ -1238,12 +1238,12 @@
   \secref{sec:completion-varieties}. This allows to complete within broken
   input that escapes its normal semantic context, e.g.\ antiquotations or
   string literals in ML, which do not allow arbitrary backslash sequences.
-*}
+\<close>
 
 
-subsection {* Input events \label{sec:completion-input} *}
+subsection \<open>Input events \label{sec:completion-input}\<close>
 
-text {*
+text \<open>
   Completion is triggered by certain events produced by the user, with
   optional delay after keyboard input according to @{system_option
   jedit_completion_delay}.
@@ -1290,12 +1290,12 @@
   \end{enumerate}
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Completion popup \label{sec:completion-popup} *}
+subsection \<open>Completion popup \label{sec:completion-popup}\<close>
 
-text {*
+text \<open>
   A \emph{completion popup} is a minimally invasive GUI component over the
   text area that offers a selection of completion items to be inserted into
   the text, e.g.\ by mouse clicks. Items are sorted dynamically, according to
@@ -1325,12 +1325,12 @@
   Movement within the popup is only active for multiple items.
   Otherwise the corresponding key event retains its standard meaning
   within the underlying text area.
-*}
+\<close>
 
 
-subsection {* Insertion \label{sec:completion-insert} *}
+subsection \<open>Insertion \label{sec:completion-insert}\<close>
 
-text {*
+text \<open>
   Completion may first propose replacements to be selected (via a popup), or
   replace text immediately in certain situations and depending on certain
   options like @{system_option jedit_completion_immediate}. In any case,
@@ -1368,12 +1368,12 @@
   of jEdit. According to normal jEdit policies, the recovered text after
   @{action undo} is selected: @{verbatim ESCAPE} is required to reset the
   selection and to continue typing more text.
-*}
+\<close>
 
 
-subsection {* Options \label{sec:completion-options} *}
+subsection \<open>Options \label{sec:completion-options}\<close>
 
-text {*
+text \<open>
   This is a summary of Isabelle/Scala system options that are relevant for
   completion. They may be configured in \emph{Plugin Options~/ Isabelle~/
   General} as usual.
@@ -1427,12 +1427,12 @@
   that is subject to spell-checking, including various forms of comments.
 
   \end{itemize}
-*}
+\<close>
 
 
-section {* Automatically tried tools \label{sec:auto-tools} *}
+section \<open>Automatically tried tools \label{sec:auto-tools}\<close>
 
-text {*
+text \<open>
   Continuous document processing works asynchronously in the background.
   Visible document source that has been evaluated may get augmented by
   additional results of \emph{asynchronous print functions}. The canonical
@@ -1533,12 +1533,12 @@
   Users should experiment how the available CPU resources (number of
   cores) are best invested to get additional feedback from prover in
   the background, by using a selection of weaker or stronger tools.
-*}
+\<close>
 
 
-section {* Sledgehammer \label{sec:sledgehammer} *}
+section \<open>Sledgehammer \label{sec:sledgehammer}\<close>
 
-text {* The \emph{Sledgehammer} panel (\figref{fig:sledgehammer})
+text \<open>The \emph{Sledgehammer} panel (\figref{fig:sledgehammer})
   provides a view on some independent execution of the Isar command
   @{command_ref sledgehammer}, with process indicator (spinning wheel) and
   GUI elements for important Sledgehammer arguments and options.  Any
@@ -1564,14 +1564,14 @@
   Proposed proof snippets are marked-up as \emph{sendback}, which
   means a single mouse click inserts the text into a suitable place of
   the original source.  Some manual editing may be required
-  nonetheless, say to remove earlier proof attempts. *}
+  nonetheless, say to remove earlier proof attempts.\<close>
 
 
-chapter {* Miscellaneous tools *}
+chapter \<open>Miscellaneous tools\<close>
 
-section {* Timing *}
+section \<open>Timing\<close>
 
-text {* Managed evaluation of commands within PIDE documents includes
+text \<open>Managed evaluation of commands within PIDE documents includes
   timing information, which consists of elapsed (wall-clock) time, CPU
   time, and GC (garbage collection) time.  Note that in a
   multithreaded system it is difficult to measure execution time
@@ -1603,12 +1603,12 @@
   runtime itself --- on the Java Virtual Machine that runs Isabelle/Scala, not
   Isabelle/ML. Internally, the Isabelle/Scala module @{verbatim
   isabelle.ML_Statistics} provides further access to statistics of
-  Isabelle/ML. *}
+  Isabelle/ML.\<close>
 
 
-section {* Low-level output *}
+section \<open>Low-level output\<close>
 
-text {* Prover output is normally shown directly in the main text area
+text \<open>Prover output is normally shown directly in the main text area
   or secondary \emph{Output} panels, as explained in
   \secref{sec:output}.
 
@@ -1655,12 +1655,12 @@
   ignored.
 
   \end{itemize}
-*}
+\<close>
 
 
-chapter {* Known problems and workarounds \label{sec:problems} *}
+chapter \<open>Known problems and workarounds \label{sec:problems}\<close>
 
-text {*
+text \<open>
   \begin{itemize}
 
   \item \textbf{Problem:} Odd behavior of some diagnostic commands with
@@ -1725,6 +1725,6 @@
   manager (notably on Mac OS X).
 
   \end{itemize}
-*}
+\<close>
 
 end
\ No newline at end of file
--- a/src/Doc/System/Basics.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/System/Basics.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base
 begin
 
-chapter {* The Isabelle system environment *}
+chapter \<open>The Isabelle system environment\<close>
 
-text {* This manual describes Isabelle together with related tools and
+text \<open>This manual describes Isabelle together with related tools and
   user interfaces as seen from a system oriented view.  See also the
   \emph{Isabelle/Isar Reference Manual} @{cite "isabelle-isar-ref"} for
   the actual Isabelle input language and related concepts, and
@@ -33,12 +33,12 @@
   from the settings mechanism.
 
   \end{enumerate}
-*}
+\<close>
 
 
-section {* Isabelle settings \label{sec:settings} *}
+section \<open>Isabelle settings \label{sec:settings}\<close>
 
-text {*
+text \<open>
   The Isabelle system heavily depends on the \emph{settings
   mechanism}\indexbold{settings}.  Essentially, this is a statically
   scoped collection of environment variables, such as @{setting
@@ -59,12 +59,12 @@
   Occasionally, users would still want to put the @{file
   "$ISABELLE_HOME/bin"} directory into their shell's search path, but
   this is not required.
-*}
+\<close>
 
 
-subsection {* Bootstrapping the environment \label{sec:boot} *}
+subsection \<open>Bootstrapping the environment \label{sec:boot}\<close>
 
-text {* Isabelle executables need to be run within a proper settings
+text \<open>Isabelle executables need to be run within a proper settings
   environment.  This is bootstrapped as described below, on the first
   invocation of one of the outer wrapper scripts (such as
   @{executable_ref isabelle}).  This happens only once for each
@@ -134,12 +134,12 @@
 
   \medskip Note that the settings environment may be inspected with
   the @{tool getenv} tool.  This might help to figure out the effect
-  of complex settings scripts.  *}
+  of complex settings scripts.\<close>
 
 
-subsection {* Common variables *}
+subsection \<open>Common variables\<close>
 
-text {*
+text \<open>
   This is a reference of common Isabelle settings variables. Note that
   the list is somewhat open-ended. Third-party utilities or interfaces
   may add their own selection. Variables that are special in some
@@ -276,12 +276,12 @@
   somewhere in @{file_unchecked "/tmp"}.
   
   \end{description}
-*}
+\<close>
 
 
-subsection {* Additional components \label{sec:components} *}
+subsection \<open>Additional components \label{sec:components}\<close>
 
-text {* Any directory may be registered as an explicit \emph{Isabelle
+text \<open>Any directory may be registered as an explicit \emph{Isabelle
   component}.  The general layout conventions are that of the main
   Isabelle distribution itself, and the following two files (both
   optional) have a special meaning:
@@ -347,12 +347,12 @@
   See also \secref{sec:tool-components} for some tool-support for
   resolving components that are formally initialized but not installed
   yet.
-*}
+\<close>
 
 
-section {* The raw Isabelle process \label{sec:isabelle-process} *}
+section \<open>The raw Isabelle process \label{sec:isabelle-process}\<close>
 
-text {*
+text \<open>
   The @{executable_def "isabelle_process"} executable runs bare-bones
   Isabelle logic sessions --- either interactively or in batch mode.
   It provides an abstraction over the underlying ML system, and over
@@ -390,12 +390,12 @@
   actual file locations may also be given by including at least one
   slash (@{verbatim "/"}) in the name (hint: use @{verbatim "./"} to
   refer to the current directory).
-*}
+\<close>
 
 
-subsubsection {* Options *}
+subsubsection \<open>Options\<close>
 
-text {*
+text \<open>
   If the input heap file does not have write permission bits set, or
   the @{verbatim "-r"} option is given explicitly, then the session
   started will be read-only.  That is, the ML world cannot be
@@ -450,12 +450,12 @@
   \medskip The @{verbatim "-S"} option makes the Isabelle process more
   secure by disabling some critical operations, notably runtime
   compilation and evaluation of ML source code.
-*}
+\<close>
 
 
-subsubsection {* Examples *}
+subsubsection \<open>Examples\<close>
 
-text {*
+text \<open>
   Run an interactive session of the default object-logic (as specified
   by the @{setting ISABELLE_LOGIC} setting) like this:
 \begin{ttbox}
@@ -495,12 +495,12 @@
   messages by the ML runtime environment.  The @{verbatim "-W"} option
   allows to communicate with the Isabelle process via an external
   program in a more robust fashion.
-*}
+\<close>
 
 
-section {* The Isabelle tool wrapper \label{sec:isabelle-tool} *}
+section \<open>The Isabelle tool wrapper \label{sec:isabelle-tool}\<close>
 
-text {*
+text \<open>
   All Isabelle related tools and interfaces are called via a common
   wrapper --- @{executable isabelle}:
 
@@ -520,12 +520,12 @@
   ISABELLE_TOOLS} setting.  Do not try to call the scripts directly
   from the shell.  Neither should you add the tool directories to your
   shell's search path!
-*}
+\<close>
 
 
-subsubsection {* Examples *}
+subsubsection \<open>Examples\<close>
 
-text {* Show the list of available documentation of the Isabelle
+text \<open>Show the list of available documentation of the Isabelle
   distribution:
 
 \begin{ttbox}
@@ -541,6 +541,6 @@
 \begin{ttbox}
   isabelle getenv ISABELLE_HOME_USER
 \end{ttbox}
-*}
+\<close>
 
 end
\ No newline at end of file
--- a/src/Doc/System/Misc.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/System/Misc.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,27 +2,27 @@
 imports Base
 begin
 
-chapter {* Miscellaneous tools \label{ch:tools} *}
+chapter \<open>Miscellaneous tools \label{ch:tools}\<close>
 
-text {*
+text \<open>
   Subsequently we describe various Isabelle related utilities, given
   in alphabetical order.
-*}
+\<close>
 
 
-section {* Theory graph browser \label{sec:browse} *}
+section \<open>Theory graph browser \label{sec:browse}\<close>
 
-text {* The Isabelle graph browser is a general tool for visualizing
+text \<open>The Isabelle graph browser is a general tool for visualizing
   dependency graphs.  Certain nodes of the graph (i.e.\ theories) can
   be grouped together in ``directories'', whose contents may be
   hidden, thus enabling the user to collapse irrelevant portions of
   information.  The browser is written in Java, it can be used both as
-  a stand-alone application and as an applet.  *}
+  a stand-alone application and as an applet.\<close>
 
 
-subsection {* Invoking the graph browser *}
+subsection \<open>Invoking the graph browser\<close>
 
-text {* The stand-alone version of the graph browser is wrapped up as
+text \<open>The stand-alone version of the graph browser is wrapped up as
   @{tool_def browser}:
 \begin{ttbox}
 Usage: isabelle browser [OPTIONS] [GRAPHFILE]
@@ -49,12 +49,12 @@
   \medskip The applet version of the browser is part of the standard
   WWW theory presentation, see the link ``theory dependencies'' within
   each session index.
-*}
+\<close>
 
 
-subsection {* Using the graph browser *}
+subsection \<open>Using the graph browser\<close>
 
-text {*
+text \<open>
   The browser's main window, which is shown in
   \figref{fig:browserwindow}, consists of two sub-windows.  In the
   left sub-window, the directory tree is displayed. The graph itself
@@ -64,12 +64,12 @@
   \includegraphics[width=\textwidth]{browser_screenshot}
   \caption{\label{fig:browserwindow} Browser main window}
   \end{figure}
-*}
+\<close>
 
 
-subsubsection {* The directory tree window *}
+subsubsection \<open>The directory tree window\<close>
 
-text {*
+text \<open>
   We describe the usage of the directory browser and the meaning of
   the different items in the browser window.
 
@@ -95,12 +95,12 @@
   displayed.
 
   \end{itemize}
-*}
+\<close>
 
 
-subsubsection {* The graph display window *}
+subsubsection \<open>The graph display window\<close>
 
-text {*
+text \<open>
   When pointing on an ordinary node, an upward and a downward arrow is
   shown.  Initially, both of these arrows are green. Clicking on the
   upward or downward arrow collapses all predecessor or successor
@@ -112,12 +112,12 @@
   "[....]"}''. Similar to the directory browser, the contents of
   theory files can be displayed by double clicking on the
   corresponding node.
-*}
+\<close>
 
 
-subsubsection {* The ``File'' menu *}
+subsubsection \<open>The ``File'' menu\<close>
 
-text {*
+text \<open>
   Due to Java Applet security restrictions this menu is only available
   in the full application version. The meaning of the menu items is as
   follows:
@@ -137,12 +137,12 @@
   \item[Quit] Quit the graph browser.
 
   \end{description}
-*}
+\<close>
 
 
-subsection {* Syntax of graph definition files *}
+subsection \<open>Syntax of graph definition files\<close>
 
-text {*
+text \<open>
   A graph definition file has the following syntax:
 
   \begin{center}\small
@@ -177,12 +177,12 @@
   browser assumes that successor nodes are listed.
 
   \end{description}
-*}
+\<close>
 
 
-section {* Resolving Isabelle components \label{sec:tool-components} *}
+section \<open>Resolving Isabelle components \label{sec:tool-components}\<close>
 
-text {*
+text \<open>
   The @{tool_def components} tool resolves Isabelle components:
 \begin{ttbox}
 Usage: isabelle components [OPTIONS] [COMPONENTS ...]
@@ -227,12 +227,12 @@
   repository clone --- this does not make any sense for regular
   Isabelle releases.  If the file already exists, it needs to be
   edited manually according to the printed explanation.
-*}
+\<close>
 
 
-section {* Raw ML console *}
+section \<open>Raw ML console\<close>
 
-text {*
+text \<open>
   The @{tool_def console} tool runs the Isabelle process with raw ML console:
 \begin{ttbox}
 Usage: isabelle console [OPTIONS]
@@ -268,12 +268,12 @@
   Isabelle/Isar nor Isabelle/ML within the usual formal context. Some useful
   ML commands at this stage are @{ML cd}, @{ML pwd}, @{ML use}, @{ML use_thy},
   @{ML use_thys}.
-*}
+\<close>
 
 
-section {* Displaying documents \label{sec:tool-display} *}
+section \<open>Displaying documents \label{sec:tool-display}\<close>
 
-text {* The @{tool_def display} tool displays documents in DVI or PDF
+text \<open>The @{tool_def display} tool displays documents in DVI or PDF
   format:
 \begin{ttbox}
 Usage: isabelle display DOCUMENT
@@ -286,12 +286,12 @@
   file formats.  Normally this opens the document via the desktop
   environment, potentially in an asynchronous manner with re-use of
   previews views.
-*}
+\<close>
 
 
-section {* Viewing documentation \label{sec:tool-doc} *}
+section \<open>Viewing documentation \label{sec:tool-doc}\<close>
 
-text {*
+text \<open>
   The @{tool_def doc} tool displays Isabelle documentation:
 \begin{ttbox}
 Usage: isabelle doc [DOC ...]
@@ -306,12 +306,12 @@
 
   \medskip The @{setting ISABELLE_DOCS} setting specifies the list of
   directories (separated by colons) to be scanned for documentations.
-*}
+\<close>
 
 
-section {* Shell commands within the settings environment \label{sec:tool-env} *}
+section \<open>Shell commands within the settings environment \label{sec:tool-env}\<close>
 
-text {* The @{tool_def env} tool is a direct wrapper for the standard
+text \<open>The @{tool_def env} tool is a direct wrapper for the standard
   @{verbatim "/usr/bin/env"} command on POSIX systems, running within
   the Isabelle settings environment (\secref{sec:settings}).
 
@@ -321,12 +321,12 @@
 \begin{alltt}
   isabelle env bash
 \end{alltt}
-*}
+\<close>
 
 
-section {* Inspecting the settings environment \label{sec:tool-getenv} *}
+section \<open>Inspecting the settings environment \label{sec:tool-getenv}\<close>
 
-text {* The Isabelle settings environment --- as provided by the
+text \<open>The Isabelle settings environment --- as provided by the
   site-default and user-specific settings files --- can be inspected
   with the @{tool_def getenv} tool:
 \begin{ttbox}
@@ -351,12 +351,12 @@
   Option @{verbatim "-d"} produces a dump of the complete environment
   to the specified file.  Entries are terminated by the ASCII null
   character, i.e.\ the C string terminator.
-*}
+\<close>
 
 
-subsubsection {* Examples *}
+subsubsection \<open>Examples\<close>
 
-text {* Get the location of @{setting ISABELLE_HOME_USER} where
+text \<open>Get the location of @{setting ISABELLE_HOME_USER} where
   user-specific information is stored:
 \begin{ttbox}
 isabelle getenv ISABELLE_HOME_USER
@@ -367,12 +367,12 @@
 \begin{ttbox}
 isabelle getenv -b ISABELLE_OUTPUT
 \end{ttbox}
-*}
+\<close>
 
 
-section {* Installing standalone Isabelle executables \label{sec:tool-install} *}
+section \<open>Installing standalone Isabelle executables \label{sec:tool-install}\<close>
 
-text {* By default, the main Isabelle binaries (@{executable
+text \<open>By default, the main Isabelle binaries (@{executable
   "isabelle"} etc.)  are just run from their location within the
   distribution directory, probably indirectly by the shell through its
   @{setting PATH}.  Other schemes of installation are supported by the
@@ -398,12 +398,12 @@
 
   \medskip It is also possible to make symbolic links of the main
   Isabelle executables manually, but making separate copies outside
-  the Isabelle distribution directory will not work!  *}
+  the Isabelle distribution directory will not work!\<close>
 
 
-section {* Creating instances of the Isabelle logo *}
+section \<open>Creating instances of the Isabelle logo\<close>
 
-text {* The @{tool_def logo} tool creates instances of the generic
+text \<open>The @{tool_def logo} tool creates instances of the generic
   Isabelle logo as EPS and PDF, for inclusion in {\LaTeX} documents.
 \begin{ttbox}
 Usage: isabelle logo [OPTIONS] XYZ
@@ -423,12 +423,12 @@
 
   \medskip Implementors of Isabelle tools and applications are
   encouraged to make derived Isabelle logos for their own projects
-  using this template.  *}
+  using this template.\<close>
 
 
-section {* Output the version identifier of the Isabelle distribution *}
+section \<open>Output the version identifier of the Isabelle distribution\<close>
 
-text {*
+text \<open>
   The @{tool_def version} tool displays Isabelle version information:
 \begin{ttbox}
 Usage: isabelle version [OPTIONS]
@@ -444,12 +444,12 @@
 
   The @{verbatim "-i"} option produces a short identification derived
   from the Mercurial id of the @{setting ISABELLE_HOME} directory.
-*}
+\<close>
 
 
-section {* Convert XML to YXML *}
+section \<open>Convert XML to YXML\<close>
 
-text {*
+text \<open>
   The @{tool_def yxml} tool converts a standard XML document (stdin)
   to the much simpler and more efficient YXML format of Isabelle
   (stdout).  The YXML format is defined as follows.
@@ -489,6 +489,6 @@
 
   YXML documents may be detected quickly by checking that the first
   two characters are @{text "\<^bold>X\<^bold>Y"}.
-*}
+\<close>
 
 end
\ No newline at end of file
--- a/src/Doc/System/Presentation.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/System/Presentation.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base
 begin
 
-chapter {* Presenting theories \label{ch:present} *}
+chapter \<open>Presenting theories \label{ch:present}\<close>
 
-text {* Isabelle provides several ways to present the outcome of
+text \<open>Isabelle provides several ways to present the outcome of
   formal developments, including WWW-based browsable libraries or
   actual printable documents.  Presentation is centered around the
   concept of \emph{sessions} (\chref{ch:session}).  The global session
@@ -45,12 +45,12 @@
   \caption{The tool chain of Isabelle session presentation} \label{fig:session-tools}
   \end{center}
   \end{figure}
-*}
+\<close>
 
 
-section {* Generating theory browser information \label{sec:info} *}
+section \<open>Generating theory browser information \label{sec:info}\<close>
 
-text {*
+text \<open>
   \index{theory browsing information|bold}
 
   As a side-effect of building sessions, Isabelle is able to generate
@@ -103,11 +103,11 @@
   ISABELLE_BROWSER_INFO} setting plus a prefix corresponding to the
   session chapter and identifier.  In order to present Isabelle
   applications on the web, the corresponding subdirectory from
-  @{setting ISABELLE_BROWSER_INFO} can be put on a WWW server.  *}
+  @{setting ISABELLE_BROWSER_INFO} can be put on a WWW server.\<close>
 
-section {* Preparing session root directories \label{sec:tool-mkroot} *}
+section \<open>Preparing session root directories \label{sec:tool-mkroot}\<close>
 
-text {* The @{tool_def mkroot} tool configures a given directory as
+text \<open>The @{tool_def mkroot} tool configures a given directory as
   session root, with some @{verbatim ROOT} file and optional document
   source directory.  Its usage is:
 \begin{ttbox}
@@ -137,12 +137,12 @@
   \medskip The implicit Isabelle settings variable @{setting
   ISABELLE_LOGIC} specifies the parent session, and @{setting
   ISABELLE_DOCUMENT_FORMAT} the document format to be filled filled
-  into the generated @{verbatim ROOT} file.  *}
+  into the generated @{verbatim ROOT} file.\<close>
 
 
-subsubsection {* Examples *}
+subsubsection \<open>Examples\<close>
 
-text {* Produce session @{verbatim Test} (with document preparation)
+text \<open>Produce session @{verbatim Test} (with document preparation)
   within a separate directory of the same name:
 \begin{ttbox}
 isabelle mkroot -d Test && isabelle build -D Test
@@ -153,13 +153,13 @@
 \begin{ttbox}
 isabelle mkroot -d && isabelle build -D .
 \end{ttbox}
-*}
+\<close>
 
 
-section {* Preparing Isabelle session documents
-  \label{sec:tool-document} *}
+section \<open>Preparing Isabelle session documents
+  \label{sec:tool-document}\<close>
 
-text {* The @{tool_def document} tool prepares logic session
+text \<open>The @{tool_def document} tool prepares logic session
   documents, processing the sources as provided by the user and
   generated by Isabelle.  Its usage is:
 \begin{ttbox}
@@ -263,13 +263,13 @@
   Some care is needed if the document output location is configured
   differently, say within a directory whose content is still required
   afterwards!
-*}
+\<close>
 
 
-section {* Running {\LaTeX} within the Isabelle environment
-  \label{sec:tool-latex} *}
+section \<open>Running {\LaTeX} within the Isabelle environment
+  \label{sec:tool-latex}\<close>
 
-text {* The @{tool_def latex} tool provides the basic interface for
+text \<open>The @{tool_def latex} tool provides the basic interface for
   Isabelle document preparation.  Its usage is:
 \begin{ttbox}
 Usage: isabelle latex [OPTIONS] [FILE]
@@ -297,12 +297,12 @@
   The @{verbatim syms} output is for internal use; it generates lists
   of symbols that are available without loading additional {\LaTeX}
   packages.
-*}
+\<close>
 
 
-subsubsection {* Examples *}
+subsubsection \<open>Examples\<close>
 
-text {* Invoking @{tool latex} by hand may be occasionally useful when
+text \<open>Invoking @{tool latex} by hand may be occasionally useful when
   debugging failed attempts of the automatic document preparation
   stage of batch-mode Isabelle.  The abortive process leaves the
   sources at a certain place within @{setting ISABELLE_BROWSER_INFO},
@@ -313,6 +313,6 @@
   cd "$(isabelle getenv -b ISABELLE_BROWSER_INFO)/Unsorted/Test/document"
   isabelle latex -o pdf
 \end{ttbox}
-*}
+\<close>
 
 end
\ No newline at end of file
--- a/src/Doc/System/Scala.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/System/Scala.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,19 +2,19 @@
 imports Base
 begin
 
-chapter {* Isabelle/Scala development tools *}
+chapter \<open>Isabelle/Scala development tools\<close>
 
-text {* Isabelle/ML and Isabelle/Scala are the two main language
+text \<open>Isabelle/ML and Isabelle/Scala are the two main language
 environments for Isabelle tool implementations.  There are some basic
 command-line tools to work with the underlying Java Virtual Machine,
 the Scala toplevel and compiler.  Note that Isabelle/jEdit
 @{cite "isabelle-jedit"} provides a Scala Console for interactive
-experimentation within the running application. *}
+experimentation within the running application.\<close>
 
 
-section {* Java Runtime Environment within Isabelle \label{sec:tool-java} *}
+section \<open>Java Runtime Environment within Isabelle \label{sec:tool-java}\<close>
 
-text {* The @{tool_def java} tool is a direct wrapper for the Java
+text \<open>The @{tool_def java} tool is a direct wrapper for the Java
   Runtime Environment, within the regular Isabelle settings
   environment (\secref{sec:settings}).  The command line arguments are
   that of the underlying Java version.  It is run in @{verbatim
@@ -33,12 +33,12 @@
 \begin{alltt}
   isabelle java isabelle.GUI_Setup
 \end{alltt}
-*}
+\<close>
 
 
-section {* Scala toplevel \label{sec:tool-scala} *}
+section \<open>Scala toplevel \label{sec:tool-scala}\<close>
 
-text {* The @{tool_def scala} tool is a direct wrapper for the Scala
+text \<open>The @{tool_def scala} tool is a direct wrapper for the Scala
   toplevel; see also @{tool java} above.  The command line arguments
   are that of the underlying Scala version.
 
@@ -50,12 +50,12 @@
   scala> options.bool("browser_info")
   scala> options.string("document")
 \end{alltt}
-*}
+\<close>
 
 
-section {* Scala compiler \label{sec:tool-scalac} *}
+section \<open>Scala compiler \label{sec:tool-scalac}\<close>
 
-text {* The @{tool_def scalac} tool is a direct wrapper for the Scala
+text \<open>The @{tool_def scalac} tool is a direct wrapper for the Scala
   compiler; see also @{tool scala} above.  The command line arguments
   are that of the underlying Scala version.
 
@@ -68,12 +68,12 @@
 
   Note that jEdit @{cite "isabelle-jedit"} has its own mechanisms for
   adding plugin components, which needs special attention since
-  it overrides the standard Java class loader.  *}
+  it overrides the standard Java class loader.\<close>
 
 
-section {* Scala script wrapper *}
+section \<open>Scala script wrapper\<close>
 
-text {* The executable @{executable
+text \<open>The executable @{executable
   "$ISABELLE_HOME/bin/isabelle_scala_script"} allows to run
   Isabelle/Scala source files stand-alone programs, by using a
   suitable ``hash-bang'' line and executable file permissions.
@@ -92,6 +92,6 @@
 
   Alternatively the full @{file
   "$ISABELLE_HOME/bin/isabelle_scala_script"} may be specified in
-  expanded form.  *}
+  expanded form.\<close>
 
 end
--- a/src/Doc/System/Sessions.thy	Tue Oct 07 21:28:24 2014 +0200
+++ b/src/Doc/System/Sessions.thy	Tue Oct 07 21:29:59 2014 +0200
@@ -2,9 +2,9 @@
 imports Base
 begin
 
-chapter {* Isabelle sessions and build management \label{ch:session} *}
+chapter \<open>Isabelle sessions and build management \label{ch:session}\<close>
 
-text {* An Isabelle \emph{session} consists of a collection of related
+text \<open>An Isabelle \emph{session} consists of a collection of related
   theories that may be associated with formal documents
   (\chref{ch:present}).  There is also a notion of \emph{persistent
   heap} image to capture the state of a session, similar to
@@ -24,12 +24,12 @@
   management helps to organize this efficiently.  This includes
   support for parallel build jobs, in addition to the multithreaded
   theory and proof checking that is already provided by the prover
-  process itself.  *}
+  process itself.\<close>
 
 
-section {* Session ROOT specifications \label{sec:session-root} *}
+section \<open>Session ROOT specifications \label{sec:session-root}\<close>
 
-text {* Session specifications reside in files called @{verbatim ROOT}
+text \<open>Session specifications reside in files called @{verbatim ROOT}
   within certain directories, such as the home locations of registered
   Isabelle components or additional project directories given by the
   user.
@@ -144,23 +144,23 @@
   directory @{verbatim document} within the session root directory.
 
   \end{description}
-*}
+\<close>
 
 
-subsubsection {* Examples *}
+subsubsection \<open>Examples\<close>
 
-text {* See @{file "~~/src/HOL/ROOT"} for a diversity of practically
+text \<open>See @{file "~~/src/HOL/ROOT"} for a diversity of practically
   relevant situations, although it uses relatively complex
   quasi-hierarchic naming conventions like @{text "HOL\<dash>SPARK"},
   @{text "HOL\<dash>SPARK\<dash>Examples"}.  An alternative is to use
   unqualified names that are relatively long and descriptive, as in
   the Archive of Formal Proofs (@{url "http://afp.sourceforge.net"}), for
-  example. *}
+  example.\<close>
 
 
-section {* System build options \label{sec:system-options} *}
+section \<open>System build options \label{sec:system-options}\<close>
 
-text {* See @{file "~~/etc/options"} for the main defaults provided by
+text \<open>See @{file "~~/etc/options"} for the main defaults provided by
   the Isabelle distribution.  Isabelle/jEdit @{cite "isabelle-jedit"}
   includes a simple editing mode @{verbatim "isabelle-options"} for
   this file-format.
@@ -269,12 +269,12 @@
 
   Option @{verbatim "-x"} specifies a file to export the result in
   YXML format, instead of printing it in human-readable form.
-*}
+\<close>
 
 
-section {* Invoking the build process \label{sec:tool-build} *}
+section \<open>Invoking the build process \label{sec:tool-build}\<close>
 
-text {* The @{tool_def build} tool invokes the build process for
+text \<open>The @{tool_def build} tool invokes the build process for
   Isabelle sessions.  It manages dependencies between sessions,
   related sources of theories and auxiliary files, and target heap
   images.  Accordingly, it runs instances of the prover process with
@@ -378,11 +378,11 @@
   \medskip Option @{verbatim "-v"} increases the general level of
   verbosity.  Option @{verbatim "-l"} lists the source files that
   contribute to a session.
-*}
+\<close>
 
-subsubsection {* Examples *}
+subsubsection \<open>Examples\<close>
 
-text {*
+text \<open>
   Build a specific logic image:
 \begin{ttbox}
 isabelle build -b HOLCF
@@ -434,6 +434,6 @@
 \begin{ttbox}
 isabelle build -D '$AFP' -R -v -n
 \end{ttbox}
-*}
+\<close>
 
 end