isabelle update_cartouches -t;
authorwenzelm
Wed, 26 Dec 2018 16:25:20 +0100
changeset 69505 cc2d676d5395
parent 69504 bda7527ccf05
child 69506 7d59af98af29
isabelle update_cartouches -t;
src/Doc/Classes/Classes.thy
src/Doc/Codegen/Adaptation.thy
src/Doc/Codegen/Computations.thy
src/Doc/Codegen/Evaluation.thy
src/Doc/Codegen/Foundations.thy
src/Doc/Codegen/Further.thy
src/Doc/Codegen/Inductive_Predicate.thy
src/Doc/Codegen/Introduction.thy
src/Doc/Codegen/Refinement.thy
src/Doc/Corec/Corec.thy
src/Doc/Datatypes/Datatypes.thy
src/Doc/Functions/Functions.thy
src/Doc/How_to_Prove_it/How_to_Prove_it.thy
src/Doc/Locales/Examples.thy
src/Doc/Locales/Examples1.thy
src/Doc/Locales/Examples2.thy
src/Doc/Locales/Examples3.thy
src/Doc/Logics_ZF/FOL_examples.thy
src/Doc/Logics_ZF/IFOL_examples.thy
src/Doc/Logics_ZF/If.thy
src/Doc/Logics_ZF/ZF_Isar.thy
src/Doc/Logics_ZF/ZF_examples.thy
src/Doc/Prog_Prove/Basics.thy
src/Doc/Prog_Prove/Bool_nat_list.thy
src/Doc/Prog_Prove/Isar.thy
src/Doc/Prog_Prove/Logic.thy
src/Doc/Prog_Prove/Types_and_funs.thy
src/Doc/Sugar/Sugar.thy
src/Doc/Tutorial/Advanced/Partial.thy
src/Doc/Tutorial/Advanced/WFrec.thy
src/Doc/Tutorial/Advanced/simp2.thy
src/Doc/Tutorial/CTL/Base.thy
src/Doc/Tutorial/CTL/CTL.thy
src/Doc/Tutorial/CTL/CTLind.thy
src/Doc/Tutorial/CTL/PDL.thy
src/Doc/Tutorial/CodeGen/CodeGen.thy
src/Doc/Tutorial/Datatype/ABexpr.thy
src/Doc/Tutorial/Datatype/Fundata.thy
src/Doc/Tutorial/Datatype/Nested.thy
src/Doc/Tutorial/Documents/Documents.thy
src/Doc/Tutorial/Fun/fun0.thy
src/Doc/Tutorial/Ifexpr/Ifexpr.thy
src/Doc/Tutorial/Inductive/AB.thy
src/Doc/Tutorial/Inductive/Advanced.thy
src/Doc/Tutorial/Inductive/Even.thy
src/Doc/Tutorial/Inductive/Mutual.thy
src/Doc/Tutorial/Inductive/Star.thy
src/Doc/Tutorial/Misc/AdvancedInd.thy
src/Doc/Tutorial/Misc/Itrev.thy
src/Doc/Tutorial/Misc/Option2.thy
src/Doc/Tutorial/Misc/Tree2.thy
src/Doc/Tutorial/Misc/case_exprs.thy
src/Doc/Tutorial/Misc/natsum.thy
src/Doc/Tutorial/Misc/pairs2.thy
src/Doc/Tutorial/Misc/prime_def.thy
src/Doc/Tutorial/Misc/simp.thy
src/Doc/Tutorial/Misc/types.thy
src/Doc/Tutorial/Protocol/Event.thy
src/Doc/Tutorial/Protocol/Message.thy
src/Doc/Tutorial/Protocol/NS_Public.thy
src/Doc/Tutorial/Protocol/Public.thy
src/Doc/Tutorial/Recdef/Induction.thy
src/Doc/Tutorial/Recdef/Nested1.thy
src/Doc/Tutorial/Recdef/Nested2.thy
src/Doc/Tutorial/Recdef/examples.thy
src/Doc/Tutorial/Recdef/simplification.thy
src/Doc/Tutorial/Recdef/termination.thy
src/Doc/Tutorial/Rules/find2.thy
src/Doc/Tutorial/ToyList/ToyList.thy
src/Doc/Tutorial/Trie/Trie.thy
src/Doc/Tutorial/Types/Axioms.thy
src/Doc/Tutorial/Types/Overloading.thy
src/Doc/Tutorial/Types/Pairs.thy
src/Doc/Tutorial/Types/Records.thy
src/Doc/Tutorial/Types/Typedefs.thy
src/Doc/Typeclass_Hierarchy/Typeclass_Hierarchy.thy
src/HOL/Data_Structures/AA_Set.thy
src/HOL/Data_Structures/AList_Upd_Del.thy
src/HOL/Data_Structures/Sorted_Less.thy
src/HOL/IMP/AExp.thy
src/HOL/IMP/Abs_Int0.thy
src/HOL/IMP/Abs_Int1.thy
src/HOL/IMP/Abs_Int1_parity.thy
src/HOL/IMP/Abs_Int2_ivl.thy
src/HOL/IMP/Abs_Int3.thy
src/HOL/IMP/Big_Step.thy
src/HOL/IMP/Compiler.thy
src/HOL/IMP/Compiler2.thy
src/HOL/IMP/Hoare_Examples.thy
src/HOL/IMP/Hoare_Total.thy
src/HOL/IMP/Hoare_Total_EX.thy
src/HOL/IMP/Hoare_Total_EX2.thy
src/HOL/IMP/Live_True.thy
src/HOL/IMP/Sec_Type_Expr.thy
src/HOL/IMP/Small_Step.thy
src/HOL/IMP/Vars.thy
--- a/src/Doc/Classes/Classes.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Classes/Classes.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -10,30 +10,30 @@
   of overloading\footnote{throughout this tutorial, we are referring
   to classical Haskell 1.0 type classes, not considering later
   additions in expressiveness}.  As a canonical example, a polymorphic
-  equality function @{text "eq :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"} which is overloaded on
-  different types for @{text "\<alpha>"}, which is achieved by splitting
-  introduction of the @{text eq} function from its overloaded
-  definitions by means of @{text class} and @{text instance}
+  equality function \<open>eq :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool\<close> which is overloaded on
+  different types for \<open>\<alpha>\<close>, which is achieved by splitting
+  introduction of the \<open>eq\<close> function from its overloaded
+  definitions by means of \<open>class\<close> and \<open>instance\<close>
   declarations: \footnote{syntax here is a kind of isabellized
   Haskell}
 
   \begin{quote}
 
-  \<^noindent> @{text "class eq where"} \\
-  \hspace*{2ex}@{text "eq :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"}
+  \<^noindent> \<open>class eq where\<close> \\
+  \hspace*{2ex}\<open>eq :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool\<close>
 
-  \<^medskip>\<^noindent> @{text "instance nat :: eq where"} \\
-  \hspace*{2ex}@{text "eq 0 0 = True"} \\
-  \hspace*{2ex}@{text "eq 0 _ = False"} \\
-  \hspace*{2ex}@{text "eq _ 0 = False"} \\
-  \hspace*{2ex}@{text "eq (Suc n) (Suc m) = eq n m"}
+  \<^medskip>\<^noindent> \<open>instance nat :: eq where\<close> \\
+  \hspace*{2ex}\<open>eq 0 0 = True\<close> \\
+  \hspace*{2ex}\<open>eq 0 _ = False\<close> \\
+  \hspace*{2ex}\<open>eq _ 0 = False\<close> \\
+  \hspace*{2ex}\<open>eq (Suc n) (Suc m) = eq n m\<close>
 
-  \<^medskip>\<^noindent> @{text "instance (\<alpha>::eq, \<beta>::eq) pair :: eq where"} \\
-  \hspace*{2ex}@{text "eq (x1, y1) (x2, y2) = eq x1 x2 \<and> eq y1 y2"}
+  \<^medskip>\<^noindent> \<open>instance (\<alpha>::eq, \<beta>::eq) pair :: eq where\<close> \\
+  \hspace*{2ex}\<open>eq (x1, y1) (x2, y2) = eq x1 x2 \<and> eq y1 y2\<close>
 
-  \<^medskip>\<^noindent> @{text "class ord extends eq where"} \\
-  \hspace*{2ex}@{text "less_eq :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"} \\
-  \hspace*{2ex}@{text "less :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"}
+  \<^medskip>\<^noindent> \<open>class ord extends eq where\<close> \\
+  \hspace*{2ex}\<open>less_eq :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool\<close> \\
+  \hspace*{2ex}\<open>less :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool\<close>
 
   \end{quote}
 
@@ -49,19 +49,19 @@
   correspond to interfaces in object-oriented languages like Java; so,
   it is naturally desirable that type classes do not only provide
   functions (class parameters) but also state specifications
-  implementations must obey.  For example, the @{text "class eq"}
+  implementations must obey.  For example, the \<open>class eq\<close>
   above could be given the following specification, demanding that
-  @{text "class eq"} is an equivalence relation obeying reflexivity,
+  \<open>class eq\<close> is an equivalence relation obeying reflexivity,
   symmetry and transitivity:
 
   \begin{quote}
 
-  \<^noindent> @{text "class eq where"} \\
-  \hspace*{2ex}@{text "eq :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"} \\
-  @{text "satisfying"} \\
-  \hspace*{2ex}@{text "refl: eq x x"} \\
-  \hspace*{2ex}@{text "sym: eq x y \<longleftrightarrow> eq x y"} \\
-  \hspace*{2ex}@{text "trans: eq x y \<and> eq y z \<longrightarrow> eq x z"}
+  \<^noindent> \<open>class eq where\<close> \\
+  \hspace*{2ex}\<open>eq :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool\<close> \\
+  \<open>satisfying\<close> \\
+  \hspace*{2ex}\<open>refl: eq x x\<close> \\
+  \hspace*{2ex}\<open>sym: eq x y \<longleftrightarrow> eq x y\<close> \\
+  \hspace*{2ex}\<open>trans: eq x y \<and> eq y z \<longrightarrow> eq x z\<close>
 
   \end{quote}
 
@@ -96,8 +96,7 @@
 subsection \<open>Class definition\<close>
 
 text \<open>
-  Depending on an arbitrary type @{text "\<alpha>"}, class @{text
-  "semigroup"} introduces a binary operator @{text "(\<otimes>)"} that is
+  Depending on an arbitrary type \<open>\<alpha>\<close>, class \<open>semigroup\<close> introduces a binary operator \<open>(\<otimes>)\<close> that is
   assumed to be associative:
 \<close>
 
@@ -121,8 +120,7 @@
 
 text \<open>
   The concrete type @{typ int} is made a @{class semigroup} instance
-  by providing a suitable definition for the class parameter @{text
-  "(\<otimes>)"} and a proof for the specification of @{fact assoc}.  This is
+  by providing a suitable definition for the class parameter \<open>(\<otimes>)\<close> and a proof for the specification of @{fact assoc}.  This is
   accomplished by the @{command instantiation} target:
 \<close>
 
@@ -175,10 +173,10 @@
 end %quote
 
 text \<open>
-  \<^noindent> Note the occurrence of the name @{text mult_nat} in the
+  \<^noindent> Note the occurrence of the name \<open>mult_nat\<close> in the
   primrec declaration; by default, the local name of a class operation
-  @{text f} to be instantiated on type constructor @{text \<kappa>} is
-  mangled as @{text f_\<kappa>}.  In case of uncertainty, these names may be
+  \<open>f\<close> to be instantiated on type constructor \<open>\<kappa>\<close> is
+  mangled as \<open>f_\<kappa>\<close>.  In case of uncertainty, these names may be
   inspected using the @{command "print_context"} command.
 \<close>
 
@@ -206,7 +204,7 @@
 
 text \<open>
   \<^noindent> Associativity of product semigroups is established using
-  the definition of @{text "(\<otimes>)"} on products and the hypothetical
+  the definition of \<open>(\<otimes>)\<close> on products and the hypothetical
   associativity of the type components; these hypotheses are
   legitimate due to the @{class semigroup} constraints imposed on the
   type components by the @{command instance} proposition.  Indeed,
@@ -217,9 +215,9 @@
 subsection \<open>Subclassing\<close>
 
 text \<open>
-  We define a subclass @{text monoidl} (a semigroup with a left-hand
+  We define a subclass \<open>monoidl\<close> (a semigroup with a left-hand
   neutral) by extending @{class semigroup} with one additional
-  parameter @{text neutral} together with its characteristic property:
+  parameter \<open>neutral\<close> together with its characteristic property:
 \<close>
 
 class %quote monoidl = semigroup +
@@ -303,8 +301,7 @@
 end %quote
 
 text \<open>
-  \<^noindent> To finish our small algebra example, we add a @{text
-  group} class with a corresponding instance:
+  \<^noindent> To finish our small algebra example, we add a \<open>group\<close> class with a corresponding instance:
 \<close>
 
 class %quote group = monoidl +
@@ -356,7 +353,7 @@
 
 text \<open>
   \<^noindent> The connection to the type system is done by means of a
-  primitive type class @{text "idem"}, together with a corresponding
+  primitive type class \<open>idem\<close>, together with a corresponding
   interpretation:
 \<close>
 
@@ -366,8 +363,8 @@
 
 text \<open>
   \<^noindent> This gives you the full power of the Isabelle module system;
-  conclusions in locale @{text idem} are implicitly propagated
-  to class @{text idem}.
+  conclusions in locale \<open>idem\<close> are implicitly propagated
+  to class \<open>idem\<close>.
 \<close> (*<*)setup %invisible \<open>Sign.parent_path\<close>
 (*>*)
 subsection \<open>Abstract reasoning\<close>
@@ -375,8 +372,8 @@
 text \<open>
   Isabelle locales enable reasoning at a general level, while results
   are implicitly transferred to all instances.  For example, we can
-  now establish the @{text "left_cancel"} lemma for groups, which
-  states that the function @{text "(x \<otimes>)"} is injective:
+  now establish the \<open>left_cancel\<close> lemma for groups, which
+  states that the function \<open>(x \<otimes>)\<close> is injective:
 \<close>
 
 lemma %quote (in group) left_cancel: "x \<otimes> y = x \<otimes> z \<longleftrightarrow> y = z"
@@ -395,8 +392,8 @@
   specification indicates that the result is recorded within that
   context for later use.  This local theorem is also lifted to the
   global one @{fact "group.left_cancel:"} @{prop [source] "\<And>x y z ::
-  \<alpha>::group. x \<otimes> y = x \<otimes> z \<longleftrightarrow> y = z"}.  Since type @{text "int"} has been
-  made an instance of @{text "group"} before, we may refer to that
+  \<alpha>::group. x \<otimes> y = x \<otimes> z \<longleftrightarrow> y = z"}.  Since type \<open>int\<close> has been
+  made an instance of \<open>group\<close> before, we may refer to that
   fact as well: @{prop [source] "\<And>x y z :: int. x \<otimes> y = x \<otimes> z \<longleftrightarrow> y =
   z"}.
 \<close>
@@ -413,7 +410,7 @@
   | "pow_nat (Suc n) x = x \<otimes> pow_nat n x"
 
 text \<open>
-  \<^noindent> If the locale @{text group} is also a class, this local
+  \<^noindent> If the locale \<open>group\<close> is also a class, this local
   definition is propagated onto a global definition of @{term [source]
   "pow_nat :: nat \<Rightarrow> \<alpha>::monoid \<Rightarrow> \<alpha>::monoid"} with corresponding theorems
 
@@ -435,7 +432,7 @@
   further by stating that type classes essentially correspond to
   functors that have a canonical interpretation as type classes.
   There is also the possibility of other interpretations.  For
-  example, @{text list}s also form a monoid with @{text append} and
+  example, \<open>list\<close>s also form a monoid with \<open>append\<close> and
   @{term "[]"} as operations, but it seems inappropriate to apply to
   lists the same operations as for genuinely algebraic types.  In such
   a case, we can simply make a particular interpretation of monoids
@@ -472,18 +469,17 @@
 text \<open>
   \<^noindent> This pattern is also helpful to reuse abstract
   specifications on the \emph{same} type.  For example, think of a
-  class @{text preorder}; for type @{typ nat}, there are at least two
+  class \<open>preorder\<close>; for type @{typ nat}, there are at least two
   possible instances: the natural order or the order induced by the
   divides relation.  But only one of these instances can be used for
-  @{command instantiation}; using the locale behind the class @{text
-  preorder}, it is still possible to utilise the same abstract
+  @{command instantiation}; using the locale behind the class \<open>preorder\<close>, it is still possible to utilise the same abstract
   specification again using @{command interpretation}.
 \<close>
 
 subsection \<open>Additional subclass relations\<close>
 
 text \<open>
-  Any @{text "group"} is also a @{text "monoid"}; this can be made
+  Any \<open>group\<close> is also a \<open>monoid\<close>; this can be made
   explicit by claiming an additional subclass relation, together with
   a proof of the logical difference:
 \<close>
@@ -498,8 +494,8 @@
 
 text \<open>
   The logical proof is carried out on the locale level.  Afterwards it
-  is propagated to the type system, making @{text group} an instance
-  of @{text monoid} by adding an additional edge to the graph of
+  is propagated to the type system, making \<open>group\<close> an instance
+  of \<open>monoid\<close> by adding an additional edge to the graph of
   subclass relations (\figref{fig:subclass}).
 
   \begin{figure}[htbp]
@@ -507,33 +503,32 @@
      \small
      \unitlength 0.6mm
      \begin{picture}(40,60)(0,0)
-       \put(20,60){\makebox(0,0){@{text semigroup}}}
-       \put(20,40){\makebox(0,0){@{text monoidl}}}
-       \put(00,20){\makebox(0,0){@{text monoid}}}
-       \put(40,00){\makebox(0,0){@{text group}}}
+       \put(20,60){\makebox(0,0){\<open>semigroup\<close>}}
+       \put(20,40){\makebox(0,0){\<open>monoidl\<close>}}
+       \put(00,20){\makebox(0,0){\<open>monoid\<close>}}
+       \put(40,00){\makebox(0,0){\<open>group\<close>}}
        \put(20,55){\vector(0,-1){10}}
        \put(15,35){\vector(-1,-1){10}}
        \put(25,35){\vector(1,-3){10}}
      \end{picture}
      \hspace{8em}
      \begin{picture}(40,60)(0,0)
-       \put(20,60){\makebox(0,0){@{text semigroup}}}
-       \put(20,40){\makebox(0,0){@{text monoidl}}}
-       \put(00,20){\makebox(0,0){@{text monoid}}}
-       \put(40,00){\makebox(0,0){@{text group}}}
+       \put(20,60){\makebox(0,0){\<open>semigroup\<close>}}
+       \put(20,40){\makebox(0,0){\<open>monoidl\<close>}}
+       \put(00,20){\makebox(0,0){\<open>monoid\<close>}}
+       \put(40,00){\makebox(0,0){\<open>group\<close>}}
        \put(20,55){\vector(0,-1){10}}
        \put(15,35){\vector(-1,-1){10}}
        \put(05,15){\vector(3,-1){30}}
      \end{picture}
      \caption{Subclass relationship of monoids and groups:
         before and after establishing the relationship
-        @{text "group \<subseteq> monoid"};  transitive edges are left out.}
+        \<open>group \<subseteq> monoid\<close>;  transitive edges are left out.}
      \label{fig:subclass}
    \end{center}
   \end{figure}
 
-  For illustration, a derived definition in @{text group} using @{text
-  pow_nat}
+  For illustration, a derived definition in \<open>group\<close> using \<open>pow_nat\<close>
 \<close>
 
 definition %quote (in group) pow_int :: "int \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>" where
@@ -567,10 +562,10 @@
 
 text \<open>
   \<^noindent> Here in example 1, the term refers to the local class
-  operation @{text "mult [\<alpha>]"}, whereas in example 2 the type
-  constraint enforces the global class operation @{text "mult [nat]"}.
+  operation \<open>mult [\<alpha>]\<close>, whereas in example 2 the type
+  constraint enforces the global class operation \<open>mult [nat]\<close>.
   In the global context in example 3, the reference is to the
-  polymorphic global class operation @{text "mult [?\<alpha> :: semigroup]"}.
+  polymorphic global class operation \<open>mult [?\<alpha> :: semigroup]\<close>.
 \<close>
 
 section \<open>Further issues\<close>
--- a/src/Doc/Codegen/Adaptation.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Codegen/Adaptation.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -34,7 +34,7 @@
     \item improving readability and aesthetics of generated code
     \item gaining efficiency
     \item interface with language parts which have no direct counterpart
-      in @{text "HOL"} (say, imperative data structures)
+      in \<open>HOL\<close> (say, imperative data structures)
   \end{itemize}
 
   \noindent Generally, you should avoid using those features yourself
@@ -55,7 +55,7 @@
   \end{itemize}
 
   \noindent However, even if you ought refrain from setting up
-  adaptation yourself, already @{text "HOL"} comes with some
+  adaptation yourself, already \<open>HOL\<close> comes with some
   reasonable default adaptations (say, using target language list
   syntax).  There also some common adaptation cases which you can
   setup by importing particular library theories.  In order to
@@ -112,34 +112,30 @@
   \end{figure}
 
   \noindent In the tame view, code generation acts as broker between
-  @{text logic}, @{text "intermediate language"} and @{text "target
-  language"} by means of @{text translation} and @{text
-  serialisation}; for the latter, the serialiser has to observe the
-  structure of the @{text language} itself plus some @{text reserved}
+  \<open>logic\<close>, \<open>intermediate language\<close> and \<open>target
+  language\<close> by means of \<open>translation\<close> and \<open>serialisation\<close>; for the latter, the serialiser has to observe the
+  structure of the \<open>language\<close> itself plus some \<open>reserved\<close>
   keywords which have to be avoided for generated code.  However, if
-  you consider @{text adaptation} mechanisms, the code generated by
+  you consider \<open>adaptation\<close> mechanisms, the code generated by
   the serializer is just the tip of the iceberg:
 
   \begin{itemize}
 
-    \item @{text serialisation} can be \emph{parametrised} such that
+    \item \<open>serialisation\<close> can be \emph{parametrised} such that
       logical entities are mapped to target-specific ones
       (e.g. target-specific list syntax, see also
       \secref{sec:adaptation_mechanisms})
 
     \item Such parametrisations can involve references to a
-      target-specific standard @{text library} (e.g. using the @{text
-      Haskell} @{verbatim Maybe} type instead of the @{text HOL}
+      target-specific standard \<open>library\<close> (e.g. using the \<open>Haskell\<close> @{verbatim Maybe} type instead of the \<open>HOL\<close>
       @{type "option"} type); if such are used, the corresponding
       identifiers (in our example, @{verbatim Maybe}, @{verbatim
-      Nothing} and @{verbatim Just}) also have to be considered @{text
-      reserved}.
+      Nothing} and @{verbatim Just}) also have to be considered \<open>reserved\<close>.
 
     \item Even more, the user can enrich the library of the
-      target-language by providing code snippets (\qt{@{text
-      "includes"}}) which are prepended to any generated code (see
+      target-language by providing code snippets (\qt{\<open>includes\<close>}) which are prepended to any generated code (see
       \secref{sec:include}); this typically also involves further
-      @{text reserved} identifiers.
+      \<open>reserved\<close> identifiers.
 
   \end{itemize}
 
@@ -166,17 +162,17 @@
        is mapped to target-language built-in integers; @{typ natural}
        is implemented as abstract type over @{typ integer}.
        Useful for code setups which involve e.g.~indexing
-       of target-language arrays.  Part of @{text "HOL-Main"}.
+       of target-language arrays.  Part of \<open>HOL-Main\<close>.
 
     \item[@{theory "HOL.String"}] provides an additional datatype @{typ
        String.literal} which is isomorphic to lists of 7-bit (ASCII) characters;
        @{typ String.literal}s are mapped to target-language strings.
 
        Literal values of type @{typ String.literal} can be written
-       as @{text "STR ''\<dots>''"} for sequences of printable characters and
-       @{text "STR 0x\<dots>"} for one single ASCII code point given
+       as \<open>STR ''\<dots>''\<close> for sequences of printable characters and
+       \<open>STR 0x\<dots>\<close> for one single ASCII code point given
        as hexadecimal numeral; @{typ String.literal} supports concatenation
-       @{text "\<dots> + \<dots>"} for all standard target languages.
+       \<open>\<dots> + \<dots>\<close> for all standard target languages.
 
        Note that the particular notion of \qt{string} is target-language
        specific (sequence of 8-bit units, sequence of unicode code points, \ldots);
@@ -195,25 +191,25 @@
        String.explode} and @{term_type String.implode}
        are implemented.
        
-       Part of @{text "HOL-Main"}.
+       Part of \<open>HOL-Main\<close>.
 
-    \item[@{text "Code_Target_Int"}] implements type @{typ int}
+    \item[\<open>Code_Target_Int\<close>] implements type @{typ int}
        by @{typ integer} and thus by target-language built-in integers.
 
-    \item[@{text "Code_Binary_Nat"}] implements type
+    \item[\<open>Code_Binary_Nat\<close>] implements type
        @{typ nat} using a binary rather than a linear representation,
        which yields a considerable speedup for computations.
        Pattern matching with @{term "0::nat"} / @{const "Suc"} is eliminated
        by a preprocessor.\label{abstract_nat}
 
-    \item[@{text "Code_Target_Nat"}] implements type @{typ nat}
+    \item[\<open>Code_Target_Nat\<close>] implements type @{typ nat}
        by @{typ integer} and thus by target-language built-in integers.
        Pattern matching with @{term "0::nat"} / @{const "Suc"} is eliminated
        by a preprocessor.
 
-    \item[@{text "Code_Target_Numeral"}] is a convenience theory
-       containing both @{text "Code_Target_Nat"} and
-       @{text "Code_Target_Int"}.
+    \item[\<open>Code_Target_Numeral\<close>] is a convenience theory
+       containing both \<open>Code_Target_Nat\<close> and
+       \<open>Code_Target_Int\<close>.
 
     \item[@{theory "HOL-Library.IArray"}] provides a type @{typ "'a iarray"}
        isomorphic to lists but implemented by (effectively immutable)
@@ -337,11 +333,11 @@
 \<close>
 
 
-subsection \<open>@{text Haskell} serialisation\<close>
+subsection \<open>\<open>Haskell\<close> serialisation\<close>
 
 text \<open>
-  For convenience, the default @{text HOL} setup for @{text Haskell}
-  maps the @{class equal} class to its counterpart in @{text Haskell},
+  For convenience, the default \<open>HOL\<close> setup for \<open>Haskell\<close>
+  maps the @{class equal} class to its counterpart in \<open>Haskell\<close>,
   giving custom serialisations for the class @{class equal}
   and its operation @{const [source] HOL.equal}.
 \<close>
@@ -352,9 +348,8 @@
 
 text \<open>
   \noindent A problem now occurs whenever a type which is an instance
-  of @{class equal} in @{text HOL} is mapped on a @{text
-  Haskell}-built-in type which is also an instance of @{text Haskell}
-  @{text Eq}:
+  of @{class equal} in \<open>HOL\<close> is mapped on a \<open>Haskell\<close>-built-in type which is also an instance of \<open>Haskell\<close>
+  \<open>Eq\<close>:
 \<close>
 
 typedecl %quote bar
@@ -373,7 +368,7 @@
 
 text \<open>
   \noindent The code generator would produce an additional instance,
-  which of course is rejected by the @{text Haskell} compiler.  To
+  which of course is rejected by the \<open>Haskell\<close> compiler.  To
   suppress this additional instance:
 \<close>
 
--- a/src/Doc/Codegen/Computations.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Codegen/Computations.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -5,7 +5,7 @@
 
 section \<open>Computations \label{sec:computations}\<close>
 
-subsection \<open>Prelude -- The @{text code} antiquotation \label{sec:code_antiq}\<close>
+subsection \<open>Prelude -- The \<open>code\<close> antiquotation \label{sec:code_antiq}\<close>
 
 text \<open>
   The @{ML_antiquotation_def code} antiquotation allows to include constants
@@ -42,46 +42,46 @@
 
 text \<open>
   Computations embody the simple idea that for each
-  monomorphic Isabelle/HOL term of type @{text \<tau>} by virtue of
-  code generation there exists an corresponding ML type @{text T} and
-  a morphism @{text "\<Phi> :: \<tau> \<rightarrow> T"} satisfying
-  @{text "\<Phi> (t\<^sub>1 \<cdot> t\<^sub>2) = \<Phi> t\<^sub>1 \<cdot> \<Phi> t\<^sub>2"}, with @{text \<cdot>} denoting
+  monomorphic Isabelle/HOL term of type \<open>\<tau>\<close> by virtue of
+  code generation there exists an corresponding ML type \<open>T\<close> and
+  a morphism \<open>\<Phi> :: \<tau> \<rightarrow> T\<close> satisfying
+  \<open>\<Phi> (t\<^sub>1 \<cdot> t\<^sub>2) = \<Phi> t\<^sub>1 \<cdot> \<Phi> t\<^sub>2\<close>, with \<open>\<cdot>\<close> denoting
   term application.
 
-  For a given Isabelle/HOL type @{text \<tau>}, parts of @{text \<Phi>} can be
-  implemented by a corresponding ML function @{text "\<phi>\<^sub>\<tau> :: term \<rightarrow> T"}.
+  For a given Isabelle/HOL type \<open>\<tau>\<close>, parts of \<open>\<Phi>\<close> can be
+  implemented by a corresponding ML function \<open>\<phi>\<^sub>\<tau> :: term \<rightarrow> T\<close>.
   How?
 
     \<^descr> \<open>Let input be a constant C :: \<tau>.\<close> \\
-      Then @{text "\<phi>\<^sub>\<tau> C = f\<^sub>C"} with @{text "f\<^sub>C"} being
-      the image of @{text C} under code generation.
+      Then \<open>\<phi>\<^sub>\<tau> C = f\<^sub>C\<close> with \<open>f\<^sub>C\<close> being
+      the image of \<open>C\<close> under code generation.
 
     \<^descr> \<open>Let input be an application (t\<^sub>1 \<cdot> t\<^sub>2) :: \<tau>.\<close> \\
-      Then @{text "\<phi>\<^sub>\<tau> (t\<^sub>1 \<cdot> t\<^sub>2) = \<phi>\<^sub>\<tau> t\<^sub>1 (\<phi>\<^sub>\<tau> t\<^sub>2)"}.
+      Then \<open>\<phi>\<^sub>\<tau> (t\<^sub>1 \<cdot> t\<^sub>2) = \<phi>\<^sub>\<tau> t\<^sub>1 (\<phi>\<^sub>\<tau> t\<^sub>2)\<close>.
 
   \noindent Using these trivial properties, each monomorphic constant
-  @{text "C : \<^vec>\<tau>\<^sub>n \<rightarrow> \<tau>"} yields the following
+  \<open>C : \<^vec>\<tau>\<^sub>n \<rightarrow> \<tau>\<close> yields the following
   equations:
 \<close>
 
 text %quote \<open>
-  @{text "\<phi>\<^bsub>(\<tau>\<^sub>1 \<rightarrow> \<tau>\<^sub>2 \<rightarrow> \<dots> \<rightarrow> \<tau>\<^sub>n \<rightarrow> \<tau>)\<^esub> C = f\<^sub>C"} \\
-  @{text "\<phi>\<^bsub>(\<tau>\<^sub>2 \<rightarrow> \<dots> \<rightarrow> \<tau>\<^sub>n \<rightarrow> \<tau>)\<^esub> (C \<cdot> t\<^sub>1) = f\<^sub>C (\<phi>\<^bsub>\<tau>\<^sub>1\<^esub> t\<^sub>1)"} \\
-  @{text "\<dots>"} \\
-  @{text "\<phi>\<^bsub>\<tau>\<^esub> (C \<cdot> t\<^sub>1 \<cdot> \<dots> \<cdot> t\<^sub>n) = f\<^sub>C (\<phi>\<^bsub>\<tau>\<^sub>1\<^esub> t\<^sub>1) \<dots> (\<phi>\<^bsub>\<tau>\<^sub>n\<^esub> t\<^sub>n)"}
+  \<open>\<phi>\<^bsub>(\<tau>\<^sub>1 \<rightarrow> \<tau>\<^sub>2 \<rightarrow> \<dots> \<rightarrow> \<tau>\<^sub>n \<rightarrow> \<tau>)\<^esub> C = f\<^sub>C\<close> \\
+  \<open>\<phi>\<^bsub>(\<tau>\<^sub>2 \<rightarrow> \<dots> \<rightarrow> \<tau>\<^sub>n \<rightarrow> \<tau>)\<^esub> (C \<cdot> t\<^sub>1) = f\<^sub>C (\<phi>\<^bsub>\<tau>\<^sub>1\<^esub> t\<^sub>1)\<close> \\
+  \<open>\<dots>\<close> \\
+  \<open>\<phi>\<^bsub>\<tau>\<^esub> (C \<cdot> t\<^sub>1 \<cdot> \<dots> \<cdot> t\<^sub>n) = f\<^sub>C (\<phi>\<^bsub>\<tau>\<^sub>1\<^esub> t\<^sub>1) \<dots> (\<phi>\<^bsub>\<tau>\<^sub>n\<^esub> t\<^sub>n)\<close>
 \<close>
   
 text \<open>
   \noindent Hence a computation is characterized as follows:
 
-    \<^item> Let @{text "input constants"} denote a set of monomorphic constants.
+    \<^item> Let \<open>input constants\<close> denote a set of monomorphic constants.
 
-    \<^item> Let @{text \<tau>} denote a monomorphic type and @{text "'ml"} be a schematic
+    \<^item> Let \<open>\<tau>\<close> denote a monomorphic type and \<open>'ml\<close> be a schematic
       placeholder for its corresponding type in ML under code generation.
 
     \<^item> Then the corresponding computation is an ML function of type
       @{ML_type "Proof.context -> term -> 'ml"}
-      partially implementing the morphism @{text "\<Phi> :: \<tau> \<rightarrow> T"} for all
+      partially implementing the morphism \<open>\<Phi> :: \<tau> \<rightarrow> T\<close> for all
       \<^emph>\<open>input terms\<close> consisting only of input constants and applications.
 
   \noindent The charming idea is that all required code is automatically generated
@@ -91,7 +91,7 @@
 \<close>
 
 
-subsection \<open>The @{text computation} antiquotation\<close>
+subsection \<open>The \<open>computation\<close> antiquotation\<close>
 
 text \<open>
   The following example illustrates its basic usage:
@@ -124,23 +124,23 @@
 
     \<^item> Input constants are specified the following ways:
 
-        \<^item> Each term following @{text "terms:"} specifies all constants
+        \<^item> Each term following \<open>terms:\<close> specifies all constants
           it contains as input constants.
 
-        \<^item> Each type following @{text "datatypes:"} specifies all constructors
+        \<^item> Each type following \<open>datatypes:\<close> specifies all constructors
           of the corresponding code datatype as input constants.  Note that
           this does not increase expressiveness but succinctness for datatypes
           with many constructors.  Abstract type constructors are skipped
           silently.
 
-    \<^item> The code generated by a @{text computation} antiquotation takes a functional argument
+    \<^item> The code generated by a \<open>computation\<close> antiquotation takes a functional argument
       which describes how to conclude the computation.  What's the rationale
       behind this?
 
         \<^item> There is no automated way to generate a reconstruction function
           from the resulting ML type to a Isabelle term -- this is in the
           responsibility of the implementor.  One possible approach
-          for robust term reconstruction is the @{text code} antiquotation.
+          for robust term reconstruction is the \<open>code\<close> antiquotation.
 
         \<^item> Both statically specified input constants and dynamically provided input
           terms are subject to preprocessing.  Likewise the result
@@ -205,24 +205,24 @@
 text \<open>
     \<^descr> \<open>Complete type coverage.\<close> Specified input constants must
       be \<^emph>\<open>complete\<close> in the sense that for each
-      required type @{text \<tau>} there is at least one corresponding
+      required type \<open>\<tau>\<close> there is at least one corresponding
       input constant which can actually \<^emph>\<open>construct\<close> a concrete
-      value of type @{text \<tau>}, potentially requiring more types recursively;
+      value of type \<open>\<tau>\<close>, potentially requiring more types recursively;
       otherwise the system of equations cannot be generated properly.
       Hence such incomplete input constants sets are rejected immediately.
 
     \<^descr> \<open>Unsuitful right hand sides.\<close> The generated code for a computation
       must compile in the strict ML runtime environment.  This imposes
       the technical restriction that each compiled input constant
-      @{text f\<^sub>C} on the right hand side of a generated equations
+      \<open>f\<^sub>C\<close> on the right hand side of a generated equations
       must compile without throwing an exception.  That rules
       out pathological examples like @{term [source] "undefined :: nat"}
       as input constants, as well as abstract constructors (cf. \secref{sec:invariant}).
 
     \<^descr> \<open>Preprocessing.\<close> For consistency, input constants are subject
       to preprocessing;  however, the overall approach requires
-      to operate on constants @{text C} and their respective compiled images
-      @{text f\<^sub>C}.\footnote{Technical restrictions of the implementation
+      to operate on constants \<open>C\<close> and their respective compiled images
+      \<open>f\<^sub>C\<close>.\footnote{Technical restrictions of the implementation
       enforce this, although those could be lifted in the future.}
       This is a problem whenever preprocessing maps an input constant
       to a non-constant.
@@ -258,7 +258,7 @@
 \<close>
 
   
-subsection \<open>Computations using the @{text computation_conv} antiquotation\<close>
+subsection \<open>Computations using the \<open>computation_conv\<close> antiquotation\<close>
 
 text \<open>
   Computations are a device to implement fast proof procedures.
@@ -301,7 +301,7 @@
       will only yield \qt{valid} results in the context of that particular
       computation, the implementor must make sure that it does not leave
       the local ML scope;  in this example, this is achieved using
-      an explicit @{text local} ML block.  The very presence of the oracle
+      an explicit \<open>local\<close> ML block.  The very presence of the oracle
       in the code acknowledges that each computation requires explicit thinking
       before it can be considered trustworthy!
 
@@ -383,10 +383,10 @@
 \<close>
 
 
-subsection \<open>Computations using the @{text computation_check} antiquotation\<close>
+subsection \<open>Computations using the \<open>computation_check\<close> antiquotation\<close>
 
 text \<open>
-  The @{text computation_check} antiquotation is convenient if
+  The \<open>computation_check\<close> antiquotation is convenient if
   only a positive checking of propositions is desired, because then
   the result type is fixed (@{typ prop}) and all the technical
   matter concerning postprocessing and oracles is done in the framework
@@ -537,8 +537,8 @@
   to @{const interp} does not contain any free variables and can thus be evaluated
   using evaluation.
 
-  A less meager example can be found in the AFP, session @{text "Regular-Sets"},
-  theory @{text Regexp_Method}.
+  A less meager example can be found in the AFP, session \<open>Regular-Sets\<close>,
+  theory \<open>Regexp_Method\<close>.
 \<close>
 
 end
--- a/src/Doc/Codegen/Evaluation.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Codegen/Evaluation.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -42,7 +42,7 @@
 \<close>
 
 
-subsubsection \<open>The simplifier (@{text simp})\<close>
+subsubsection \<open>The simplifier (\<open>simp\<close>)\<close>
 
 text \<open>
   The simplest way for evaluation is just using the simplifier with
@@ -54,7 +54,7 @@
 \<close>
 
 
-subsubsection \<open>Normalization by evaluation (@{text nbe})\<close>
+subsubsection \<open>Normalization by evaluation (\<open>nbe\<close>)\<close>
 
 text \<open>
   Normalization by evaluation @{cite "Aehlig-Haftmann-Nipkow:2008:nbe"}
@@ -64,7 +64,7 @@
 \<close>
 
 
-subsubsection \<open>Evaluation in ML (@{text code})\<close>
+subsubsection \<open>Evaluation in ML (\<open>code\<close>)\<close>
 
 text \<open>
   Considerable performance can be achieved using evaluation in ML, at the cost
@@ -103,7 +103,7 @@
 
 text \<open>
   To employ dynamic evaluation in documents, there is also
-  a @{text value} antiquotation with the same evaluation techniques 
+  a \<open>value\<close> antiquotation with the same evaluation techniques 
   as @{command value}.
 \<close>
 
@@ -156,8 +156,8 @@
   \newcommand{\ttsize}{\fontsize{5.8pt}{8pt}\selectfont}
   \fontsize{9pt}{12pt}\selectfont
   \begin{tabular}{l||c|c|c}
-    & @{text simp} & @{text nbe} & @{text code} \tabularnewline \hline \hline
-    interactive evaluation & @{command value} @{text "[simp]"} & @{command value} @{text "[nbe]"} & @{command value} @{text "[code]"} \tabularnewline
+    & \<open>simp\<close> & \<open>nbe\<close> & \<open>code\<close> \tabularnewline \hline \hline
+    interactive evaluation & @{command value} \<open>[simp]\<close> & @{command value} \<open>[nbe]\<close> & @{command value} \<open>[code]\<close> \tabularnewline
     plain evaluation & & & \ttsize@{ML "Code_Evaluation.dynamic_value"} \tabularnewline \hline
     evaluation method & @{method code_simp} & @{method normalization} & @{method eval} \tabularnewline
     property conversion & & & \ttsize@{ML "Code_Runtime.dynamic_holds_conv"} \tabularnewline \hline
@@ -177,10 +177,10 @@
 \<close>
 
 
-subsubsection \<open>Static evaluation using @{text simp} and @{text nbe}\<close>
+subsubsection \<open>Static evaluation using \<open>simp\<close> and \<open>nbe\<close>\<close>
   
 text \<open>
-  For @{text simp} and @{text nbe} static evaluation can be achieved using 
+  For \<open>simp\<close> and \<open>nbe\<close> static evaluation can be achieved using 
   @{ML Code_Simp.static_conv} and @{ML Nbe.static_conv}.
   Note that @{ML Nbe.static_conv} by its very nature
   requires an invocation of the ML compiler for every call,
@@ -191,7 +191,7 @@
 subsubsection \<open>Intimate connection between logic and system runtime\<close>
 
 text \<open>
-  Static evaluation for @{text eval} operates differently --
+  Static evaluation for \<open>eval\<close> operates differently --
   the required generated code is inserted directly into an ML
   block using antiquotations.  The idea is that generated
   code performing static evaluation (called a \<^emph>\<open>computation\<close>)
--- a/src/Doc/Codegen/Foundations.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Codegen/Foundations.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -56,9 +56,9 @@
 
   \noindent Central to code generation is the notion of \emph{code
   equations}.  A code equation as a first approximation is a theorem
-  of the form @{text "f t\<^sub>1 t\<^sub>2 \<dots> t\<^sub>n \<equiv> t"} (an equation headed by a
-  constant @{text f} with arguments @{text "t\<^sub>1 t\<^sub>2 \<dots> t\<^sub>n"} and right
-  hand side @{text t}).
+  of the form \<open>f t\<^sub>1 t\<^sub>2 \<dots> t\<^sub>n \<equiv> t\<close> (an equation headed by a
+  constant \<open>f\<close> with arguments \<open>t\<^sub>1 t\<^sub>2 \<dots> t\<^sub>n\<close> and right
+  hand side \<open>t\<close>).
 
   \begin{itemize}
 
@@ -75,9 +75,9 @@
 
     \item These code equations are \qn{translated} to a program in an
       abstract intermediate language.  Think of it as a kind of
-      \qt{Mini-Haskell} with four \qn{statements}: @{text data} (for
-      datatypes), @{text fun} (stemming from code equations), also
-      @{text class} and @{text inst} (for type classes).
+      \qt{Mini-Haskell} with four \qn{statements}: \<open>data\<close> (for
+      datatypes), \<open>fun\<close> (stemming from code equations), also
+      \<open>class\<close> and \<open>inst\<close> (for type classes).
 
     \item Finally, the abstract program is \qn{serialised} into
       concrete source code of a target language.  This step only
@@ -145,7 +145,7 @@
   interface, transforming a list of function theorems to another list
   of function theorems, provided that neither the heading constant nor
   its type change.  The @{term "0::nat"} / @{const Suc} pattern
-  used in theory @{text Code_Abstract_Nat} (see \secref{abstract_nat})
+  used in theory \<open>Code_Abstract_Nat\<close> (see \secref{abstract_nat})
   uses this interface.
 
   \noindent The current setup of the pre- and postprocessor may be inspected
@@ -184,9 +184,9 @@
   by (cases xs, simp_all) (cases "rev xs", simp_all)
 
 text \<open>
-  \noindent The annotation @{text "[code]"} is an @{text attribute}
+  \noindent The annotation \<open>[code]\<close> is an \<open>attribute\<close>
   which states that the given theorems should be considered as code
-  equations for a @{text fun} statement -- the corresponding constant
+  equations for a \<open>fun\<close> statement -- the corresponding constant
   is determined syntactically.  The resulting code:
 \<close>
 
@@ -243,7 +243,7 @@
 text \<open>
   \noindent During preprocessing, the membership test is rewritten,
   resulting in @{const List.member}, which itself performs an explicit
-  equality check, as can be seen in the corresponding @{text SML} code:
+  equality check, as can be seen in the corresponding \<open>SML\<close> code:
 \<close>
 
 text %quotetypewriter \<open>
@@ -258,7 +258,7 @@
   framework does the rest by propagating the @{class equal} constraints
   through all dependent code equations.  For datatypes, instances of
   @{class equal} are implicitly derived when possible.  For other types,
-  you may instantiate @{text equal} manually like any other type class.
+  you may instantiate \<open>equal\<close> manually like any other type class.
 \<close>
 
 
@@ -317,7 +317,7 @@
   since there is never a successful pattern match on the left hand
   side.  In order to categorise a constant into that category
   explicitly, use the @{attribute code} attribute with
-  @{text abort}:
+  \<open>abort\<close>:
 \<close>
 
 declare %quote [[code abort: empty_queue]]
--- a/src/Doc/Codegen/Further.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Codegen/Further.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -4,7 +4,7 @@
 
 section \<open>Further issues \label{sec:further}\<close>
 
-subsection \<open>Incorporating generated code directly into the system runtime -- @{text code_reflect}\<close>
+subsection \<open>Incorporating generated code directly into the system runtime -- \<open>code_reflect\<close>\<close>
 
 subsubsection \<open>Static embedding of generated code into the system runtime\<close>
 
@@ -55,36 +55,36 @@
   file which can be included into the system runtime later on.
 \<close>
 
-subsection \<open>Specialities of the @{text Scala} target language \label{sec:scala}\<close>
+subsection \<open>Specialities of the \<open>Scala\<close> target language \label{sec:scala}\<close>
 
 text \<open>
-  @{text Scala} deviates from languages of the ML family in a couple
+  \<open>Scala\<close> deviates from languages of the ML family in a couple
   of aspects; those which affect code generation mainly have to do with
-  @{text Scala}'s type system:
+  \<open>Scala\<close>'s type system:
 
   \begin{itemize}
 
-    \item @{text Scala} prefers tupled syntax over curried syntax.
+    \item \<open>Scala\<close> prefers tupled syntax over curried syntax.
 
-    \item @{text Scala} sacrifices Hindely-Milner type inference for a
+    \item \<open>Scala\<close> sacrifices Hindely-Milner type inference for a
       much more rich type system with subtyping etc.  For this reason
       type arguments sometimes have to be given explicitly in square
       brackets (mimicking System F syntax).
 
-    \item In contrast to @{text Haskell} where most specialities of
+    \item In contrast to \<open>Haskell\<close> where most specialities of
       the type system are implemented using \emph{type classes},
-      @{text Scala} provides a sophisticated system of \emph{implicit
+      \<open>Scala\<close> provides a sophisticated system of \emph{implicit
       arguments}.
 
   \end{itemize}
 
-  \noindent Concerning currying, the @{text Scala} serializer counts
+  \noindent Concerning currying, the \<open>Scala\<close> serializer counts
   arguments in code equations to determine how many arguments
   shall be tupled; remaining arguments and abstractions in terms
   rather than function definitions are always curried.
 
   The second aspect affects user-defined adaptations with @{command
-  code_printing}.  For regular terms, the @{text Scala} serializer prints
+  code_printing}.  For regular terms, the \<open>Scala\<close> serializer prints
   all type arguments explicitly.  For user-defined term adaptations
   this is only possible for adaptations which take no arguments: here
   the type arguments are just appended.  Otherwise they are ignored;
@@ -107,9 +107,8 @@
 
   Then sometimes the awkward situation occurs that dependencies
   between definitions introduce cyclic dependencies between modules,
-  which in the @{text Haskell} world leaves you to the mercy of the
-  @{text Haskell} implementation you are using, while for @{text
-  SML}/@{text OCaml} code generation is not possible.
+  which in the \<open>Haskell\<close> world leaves you to the mercy of the
+  \<open>Haskell\<close> implementation you are using, while for \<open>SML\<close>/\<open>OCaml\<close> code generation is not possible.
 
   A solution is to declare module names explicitly.  Let use assume
   the three cyclically dependent modules are named \emph{A}, \emph{B}
@@ -144,7 +143,7 @@
 begin
 
 text \<open>
-  \noindent Inside that locale we can lift @{text power} to exponent
+  \noindent Inside that locale we can lift \<open>power\<close> to exponent
   lists by means of a specification relative to that locale:
 \<close>
 
@@ -170,11 +169,10 @@
 
 text \<open>
   After an interpretation of this locale (say, @{command_def
-  global_interpretation} @{text "fun_power:"} @{term [source] "power (\<lambda>n (f
-  :: 'a \<Rightarrow> 'a). f ^^ n)"}), one could naively expect to have a constant @{text
-  "fun_power.powers :: nat list \<Rightarrow> ('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a"} for which code
+  global_interpretation} \<open>fun_power:\<close> @{term [source] "power (\<lambda>n (f
+  :: 'a \<Rightarrow> 'a). f ^^ n)"}), one could naively expect to have a constant \<open>fun_power.powers :: nat list \<Rightarrow> ('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a\<close> for which code
   can be generated.  But this not the case: internally, the term
-  @{text "fun_power.powers"} is an abbreviation for the foundational
+  \<open>fun_power.powers\<close> is an abbreviation for the foundational
   term @{term [source] "power.powers (\<lambda>n (f :: 'a \<Rightarrow> 'a). f ^^ n)"}
   (see @{cite "isabelle-locale"} for the details behind).
 
@@ -203,7 +201,7 @@
 subsection \<open>Parallel computation\<close>
 
 text \<open>
-  Theory @{text Parallel} in \<^dir>\<open>~~/src/HOL/Library\<close> contains
+  Theory \<open>Parallel\<close> in \<^dir>\<open>~~/src/HOL/Library\<close> contains
   operations to exploit parallelism inside the Isabelle/ML
   runtime engine.
 \<close>
@@ -215,7 +213,7 @@
   specific application, you should consider \emph{Imperative
   Functional Programming with Isabelle/HOL}
   @{cite "bulwahn-et-al:2008:imperative"}; the framework described there
-  is available in session @{text Imperative_HOL}, together with a
+  is available in session \<open>Imperative_HOL\<close>, together with a
   short primer document.
 \<close>
 
--- a/src/Doc/Codegen/Inductive_Predicate.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Codegen/Inductive_Predicate.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -19,7 +19,7 @@
 section \<open>Inductive Predicates \label{sec:inductive}\<close>
 
 text \<open>
-  The @{text "predicate compiler"} is an extension of the code generator
+  The \<open>predicate compiler\<close> is an extension of the code generator
   which turns inductive specifications into equational ones, from
   which in turn executable code can be generated.  The mechanisms of
   this compiler are described in detail in
@@ -46,25 +46,25 @@
   correctness proof.  The compiler infers possible modes for the
   predicate and produces the derived code equations.  Modes annotate
   which (parts of the) arguments are to be taken as input, and which
-  output. Modes are similar to types, but use the notation @{text "i"}
-  for input and @{text "o"} for output.
+  output. Modes are similar to types, but use the notation \<open>i\<close>
+  for input and \<open>o\<close> for output.
  
   For @{term "append"}, the compiler can infer the following modes:
   \begin{itemize}
-    \item @{text "i \<Rightarrow> i \<Rightarrow> i \<Rightarrow> bool"}
-    \item @{text "i \<Rightarrow> i \<Rightarrow> o \<Rightarrow> bool"}
-    \item @{text "o \<Rightarrow> o \<Rightarrow> i \<Rightarrow> bool"}
+    \item \<open>i \<Rightarrow> i \<Rightarrow> i \<Rightarrow> bool\<close>
+    \item \<open>i \<Rightarrow> i \<Rightarrow> o \<Rightarrow> bool\<close>
+    \item \<open>o \<Rightarrow> o \<Rightarrow> i \<Rightarrow> bool\<close>
   \end{itemize}
   You can compute sets of predicates using @{command_def "values"}:
 \<close>
 
 values %quote "{zs. append [(1::nat),2,3] [4,5] zs}"
 
-text \<open>\noindent outputs @{text "{[1, 2, 3, 4, 5]}"}, and\<close>
+text \<open>\noindent outputs \<open>{[1, 2, 3, 4, 5]}\<close>, and\<close>
 
 values %quote "{(xs, ys). append xs ys [(2::nat),3]}"
 
-text \<open>\noindent outputs @{text "{([], [2, 3]), ([2], [3]), ([2, 3], [])}"}.\<close>
+text \<open>\noindent outputs \<open>{([], [2, 3]), ([2], [3]), ([2, 3], [])}\<close>.\<close>
 
 text \<open>
   \noindent If you are only interested in the first elements of the
@@ -81,7 +81,7 @@
   comprehensions for which a mode has been inferred.
 
   The code equations for a predicate are made available as theorems with
-  the suffix @{text "equation"}, and can be inspected with:
+  the suffix \<open>equation\<close>, and can be inspected with:
 \<close>
 
 thm %quote append.equation
@@ -94,8 +94,7 @@
 
 text \<open>
   By default, the functions generated from a predicate are named after
-  the predicate with the mode mangled into the name (e.g., @{text
-  "append_i_i_o"}).  You can specify your own names as follows:
+  the predicate with the mode mangled into the name (e.g., \<open>append_i_i_o\<close>).  You can specify your own names as follows:
 \<close>
 
 code_pred %quote (modes: i \<Rightarrow> i \<Rightarrow> o \<Rightarrow> bool as concat,
@@ -122,7 +121,7 @@
 
 text \<open>
   \noindent These rules do not suit well for executing the transitive
-  closure with the mode @{text "(i \<Rightarrow> o \<Rightarrow> bool) \<Rightarrow> i \<Rightarrow> o \<Rightarrow> bool"}, as
+  closure with the mode \<open>(i \<Rightarrow> o \<Rightarrow> bool) \<Rightarrow> i \<Rightarrow> o \<Rightarrow> bool\<close>, as
   the second rule will cause an infinite loop in the recursive call.
   This can be avoided using the following alternative rules which are
   declared to the predicate compiler by the attribute @{attribute
@@ -215,12 +214,11 @@
 code_pred %quote succ .
 
 text \<open>
-  \noindent For this, the predicate compiler can infer modes @{text "o
-  \<Rightarrow> o \<Rightarrow> bool"}, @{text "i \<Rightarrow> o \<Rightarrow> bool"}, @{text "o \<Rightarrow> i \<Rightarrow> bool"} and
-  @{text "i \<Rightarrow> i \<Rightarrow> bool"}.  The invocation of @{command "values"}
-  @{text "{n. tranclp succ 10 n}"} loops, as multiple modes for the
-  predicate @{text "succ"} are possible and here the first mode @{text
-  "o \<Rightarrow> o \<Rightarrow> bool"} is chosen. To choose another mode for the argument,
+  \noindent For this, the predicate compiler can infer modes \<open>o
+  \<Rightarrow> o \<Rightarrow> bool\<close>, \<open>i \<Rightarrow> o \<Rightarrow> bool\<close>, \<open>o \<Rightarrow> i \<Rightarrow> bool\<close> and
+  \<open>i \<Rightarrow> i \<Rightarrow> bool\<close>.  The invocation of @{command "values"}
+  \<open>{n. tranclp succ 10 n}\<close> loops, as multiple modes for the
+  predicate \<open>succ\<close> are possible and here the first mode \<open>o \<Rightarrow> o \<Rightarrow> bool\<close> is chosen. To choose another mode for the argument,
   you can declare the mode for the argument between the @{command
   "values"} and the number of elements.
 \<close>
@@ -264,7 +262,7 @@
   Further examples for compiling inductive predicates can be found in
   \<^file>\<open>~~/src/HOL/Predicate_Compile_Examples/Examples.thy\<close>.  There are
   also some examples in the Archive of Formal Proofs, notably in the
-  @{text "POPLmark-deBruijn"} and the @{text "FeatherweightJava"}
+  \<open>POPLmark-deBruijn\<close> and the \<open>FeatherweightJava\<close>
   sessions.
 \<close>
 
--- a/src/Doc/Codegen/Introduction.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Codegen/Introduction.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -9,11 +9,10 @@
 section \<open>Introduction\<close>
 
 text \<open>
-  This tutorial introduces the code generator facilities of @{text
-  "Isabelle/HOL"}.  It allows to turn (a certain class of) HOL
+  This tutorial introduces the code generator facilities of \<open>Isabelle/HOL\<close>.  It allows to turn (a certain class of) HOL
   specifications into corresponding executable code in the programming
-  languages @{text SML} @{cite SML}, @{text OCaml} @{cite OCaml},
-  @{text Haskell} @{cite "haskell-revised-report"} and @{text Scala}
+  languages \<open>SML\<close> @{cite SML}, \<open>OCaml\<close> @{cite OCaml},
+  \<open>Haskell\<close> @{cite "haskell-revised-report"} and \<open>Scala\<close>
   @{cite "scala-overview-tech-report"}.
 
   To profit from this tutorial, some familiarity and experience with
@@ -67,7 +66,7 @@
   "xs \<noteq> [] \<Longrightarrow> dequeue (AQueue xs []) = (case rev xs of y # ys \<Rightarrow> (Some y, AQueue [] ys))"
   by (cases xs) (simp_all split: list.splits) (*>*)
 
-text \<open>\noindent Then we can generate code e.g.~for @{text SML} as follows:\<close>
+text \<open>\noindent Then we can generate code e.g.~for \<open>SML\<close> as follows:\<close>
 
 export_code %quote empty dequeue enqueue in SML
   module_name Example file "$ISABELLE_TMP/example.ML"
@@ -85,9 +84,9 @@
   target language identifier and a freely chosen module name.  A file
   name denotes the destination to store the generated code.  Note that
   the semantics of the destination depends on the target language: for
-  @{text SML}, @{text OCaml} and @{text Scala} it denotes a \emph{file},
-  for @{text Haskell} it denotes a \emph{directory} where a file named as the
-  module name (with extension @{text ".hs"}) is written:
+  \<open>SML\<close>, \<open>OCaml\<close> and \<open>Scala\<close> it denotes a \emph{file},
+  for \<open>Haskell\<close> it denotes a \emph{directory} where a file named as the
+  module name (with extension \<open>.hs\<close>) is written:
 \<close>
 
 export_code %quote empty dequeue enqueue in Haskell
@@ -179,7 +178,7 @@
 text \<open>
   \noindent This is a convenient place to show how explicit dictionary
   construction manifests in generated code -- the same example in
-  @{text SML}:
+  \<open>SML\<close>:
 \<close>
 
 text %quotetypewriter \<open>
--- a/src/Doc/Codegen/Refinement.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Codegen/Refinement.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -111,14 +111,13 @@
 
 text \<open>
   \noindent Here we define a \qt{constructor} @{const "AQueue"} which
-  is defined in terms of @{text "Queue"} and interprets its arguments
+  is defined in terms of \<open>Queue\<close> and interprets its arguments
   according to what the \emph{content} of an amortised queue is supposed
   to be.
 
   The prerequisite for datatype constructors is only syntactical: a
-  constructor must be of type @{text "\<tau> = \<dots> \<Rightarrow> \<kappa> \<alpha>\<^sub>1 \<dots> \<alpha>\<^sub>n"} where @{text
-  "{\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n}"} is exactly the set of \emph{all} type variables in
-  @{text "\<tau>"}; then @{text "\<kappa>"} is its corresponding datatype.  The
+  constructor must be of type \<open>\<tau> = \<dots> \<Rightarrow> \<kappa> \<alpha>\<^sub>1 \<dots> \<alpha>\<^sub>n\<close> where \<open>{\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n}\<close> is exactly the set of \emph{all} type variables in
+  \<open>\<tau>\<close>; then \<open>\<kappa>\<close> is its corresponding datatype.  The
   HOL datatype package by default registers any new datatype with its
   constructors, but this may be changed using @{command_def
   code_datatype}; the currently chosen constructors can be inspected
@@ -227,7 +226,7 @@
   
   The primitive operations on @{typ "'a dlist"} are specified
   indirectly using the projection @{const list_of_dlist}.  For
-  the empty @{text "dlist"}, @{const Dlist.empty}, we finally want
+  the empty \<open>dlist\<close>, @{const Dlist.empty}, we finally want
   the code equation
 \<close>
 
--- a/src/Doc/Corec/Corec.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Corec/Corec.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -52,7 +52,7 @@
 \emph{friendly}. Intuitively, a function is friendly if it needs to destruct
 at most one constructor of input to produce one constructor of output.
 We can register functions as friendly using the @{command friend_of_corec}
-command, or by passing the @{text friend} option to @{command corec}. The
+command, or by passing the \<open>friend\<close> option to @{command corec}. The
 friendliness check relies on an internal syntactic check in combination with
 a parametricity subgoal, which must be discharged manually (typically using
 @{method transfer_prover} or @{method transfer_prover_eq}).
@@ -74,7 +74,7 @@
 characteristic theorems associated with the specified corecursive functions are
 derived rather than introduced axiomatically.
 (Exceptionally, most of the internal proof obligations are omitted if the
-@{text quick_and_dirty} option is enabled.)
+\<open>quick_and_dirty\<close> option is enabled.)
 The package is described in a pair of scientific papers
 @{cite "blanchette-et-al-2015-fouco" and "blanchette-et-al-201x-amico"}. Some
 of the text and examples below originate from there.
@@ -99,7 +99,7 @@
 \item Section \ref{sec:attribute}, ``Attribute,'' briefly describes the
 @{attribute friend_of_corec_simps} attribute, which can be used to strengthen
 the tactics underlying the @{command friend_of_corec} and @{command corec}
-@{text "(friend)"} commands.
+\<open>(friend)\<close> commands.
 
 \item Section \ref{sec:known-bugs-and-limitations}, ``Known Bugs and
 Limitations,'' concludes with known open issues.
@@ -165,7 +165,7 @@
 specified and is trivial to discharge. The second subgoal is a parametricity
 property that captures the the requirement that the function may destruct at
 most one constructor of input to produce one constructor of output. This subgoal
-can usually be discharged using the @{text transfer_prover} or
+can usually be discharged using the \<open>transfer_prover\<close> or
 @{method transfer_prover_eq} proof method (Section~\ref{ssec:transfer-prover-eq}).
 The latter replaces equality relations by their relator terms according to the
 @{thm [source] relator_eq} theorem collection before it invokes
@@ -184,7 +184,7 @@
       "fibB = ssum (SCons 0 (SCons 1 fibB)) (SCons 0 fibB)"
 
 text \<open>
-Using the @{text "friend"} option, we can simultaneously define a function and
+Using the \<open>friend\<close> option, we can simultaneously define a function and
 register it as a friend:
 \<close>
 
@@ -201,7 +201,7 @@
 
 text \<open>
 \noindent
-The parametricity subgoal is given to @{text transfer_prover_eq}
+The parametricity subgoal is given to \<open>transfer_prover_eq\<close>
 (Section~\ref{ssec:transfer-prover-eq}).
 
 The @{const sprod} and @{const sexp} functions provide shuffle product and
@@ -263,7 +263,7 @@
 \noindent
 Here, @{const map} is the standard map function on lists, and @{const zip}
 converts two parallel lists into a list of pairs. The @{const tsum} function is
-primitively corecursive. Instead of @{command corec} @{text "(friend)"}, we could
+primitively corecursive. Instead of @{command corec} \<open>(friend)\<close>, we could
 also have used \keyw{primcorec} and @{command friend_of_corec}, as we did for
 @{const ssum}.
 
@@ -279,8 +279,8 @@
 All the syntactic convenience provided by \keyw{primcorec} is also supported by
 @{command corec}, @{command corecursive}, and @{command friend_of_corec}. In
 particular, nesting through the function type can be expressed using
-@{text \<lambda>}-abstractions and function applications rather than through composition
-(@{term "(\<circ>)"}, the map function for @{text \<Rightarrow>}). For example:
+\<open>\<lambda>\<close>-abstractions and function applications rather than through composition
+(@{term "(\<circ>)"}, the map function for \<open>\<Rightarrow>\<close>). For example:
 \<close>
 
     codatatype 'a language =
@@ -343,18 +343,18 @@
 The @{command corecursive} command is a variant of @{command corec} that allows
 us to specify a termination argument for any unguarded self-call.
 
-When called with @{text "m = 1"} and @{text "n = 2"}, the @{const primes}
+When called with \<open>m = 1\<close> and \<open>n = 2\<close>, the @{const primes}
 function computes the stream of prime numbers. The unguarded call in the
-@{text else} branch increments @{term n} until it is coprime to the first
+\<open>else\<close> branch increments @{term n} until it is coprime to the first
 argument @{term m} (i.e., the greatest common divisor of @{term m} and
-@{term n} is @{text 1}).
+@{term n} is \<open>1\<close>).
 
 For any positive integers @{term m} and @{term n}, the numbers @{term m} and
-@{text "m * n + 1"} are coprime, yielding an upper bound on the number of times
-@{term n} is increased. Hence, the function will take the @{text else} branch at
+\<open>m * n + 1\<close> are coprime, yielding an upper bound on the number of times
+@{term n} is increased. Hence, the function will take the \<open>else\<close> branch at
 most finitely often before taking the then branch and producing one constructor.
-There is a slight complication when @{text "m = 0 \<and> n > 1"}: Without the first
-disjunct in the @{text "if"} condition, the function could stall. (This corner
+There is a slight complication when \<open>m = 0 \<and> n > 1\<close>: Without the first
+disjunct in the \<open>if\<close> condition, the function could stall. (This corner
 case was overlooked in the original example
 @{cite "di-gianantonio-miculan-2003"}.)
 
@@ -405,7 +405,7 @@
 
 text \<open>
 Once a corecursive specification has been accepted, we normally want to reason
-about it. The @{text codatatype} command generates a structural coinduction
+about it. The \<open>codatatype\<close> command generates a structural coinduction
 principle that matches primitively corecursive functions. For nonprimitive
 specifications, our package provides the more advanced proof principle of
 \emph{coinduction up to congruence}---or simply \emph{coinduction up-to}.
@@ -417,8 +417,8 @@
 @{thm stream.coinduct[no_vars]}
 \end{indentblock}
 %
-Coinduction allows us to prove an equality @{text "l = r"} on streams by
-providing a relation @{text R} that relates @{text l} and @{text r} (first
+Coinduction allows us to prove an equality \<open>l = r\<close> on streams by
+providing a relation \<open>R\<close> that relates \<open>l\<close> and \<open>r\<close> (first
 premise) and that constitutes a bisimulation (second premise). Streams that are
 related by a bisimulation cannot be distinguished by taking observations (via
 the selectors @{const shd} and @{const stl}); hence they must be equal.
@@ -475,7 +475,7 @@
 because @{const sexp} has a more restrictive result type than @{const sskew}
 (@{typ "nat stream"} vs. @{typ "('a :: {plus,times}) stream"}.
 
-The version numbers, here @{text v5}, distinguish the different congruence
+The version numbers, here \<open>v5\<close>, distinguish the different congruence
 closures generated for a given codatatype as more friends are registered. As
 much as possible, it is recommended to avoid referring to them in proof
 documents.
@@ -568,7 +568,7 @@
 corecursive definition is the unique solution to a fixpoint equation.
 
 The @{command corec}, @{command corecursive}, and @{command friend_of_corec}
-commands generate a property @{text f.unique} about the function of interest
+commands generate a property \<open>f.unique\<close> about the function of interest
 @{term f} that can be used to prove that any function that satisfies
 @{term f}'s corecursive specification must be equal to~@{term f}. For example:
 \[@{thm ssum.unique[no_vars]}\]
@@ -621,8 +621,8 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "corec"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-  @{command_def "corecursive"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
+  @{command_def "corec"} & : & \<open>local_theory \<rightarrow> local_theory\<close> \\
+  @{command_def "corecursive"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -649,17 +649,17 @@
 \setlength{\itemsep}{0pt}
 
 \item
-The @{text plugins} option indicates which plugins should be enabled
-(@{text only}) or disabled (@{text del}). By default, all plugins are enabled.
+The \<open>plugins\<close> option indicates which plugins should be enabled
+(\<open>only\<close>) or disabled (\<open>del\<close>). By default, all plugins are enabled.
 
 \item
-The @{text friend} option indicates that the defined function should be
+The \<open>friend\<close> option indicates that the defined function should be
 registered as a friend. This gives rise to additional proof obligations.
 
 \item
-The @{text transfer} option indicates that an unconditional transfer rule
-should be generated and proved @{text "by transfer_prover"}. The
-@{text "[transfer_rule]"} attribute is set on the generated theorem.
+The \<open>transfer\<close> option indicates that an unconditional transfer rule
+should be generated and proved \<open>by transfer_prover\<close>. The
+\<open>[transfer_rule]\<close> attribute is set on the generated theorem.
 \end{itemize}
 
 The @{command corec} command is an abbreviation for @{command corecursive}
@@ -674,7 +674,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "friend_of_corec"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
+  @{command_def "friend_of_corec"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -701,13 +701,13 @@
 \setlength{\itemsep}{0pt}
 
 \item
-The @{text plugins} option indicates which plugins should be enabled
-(@{text only}) or disabled (@{text del}). By default, all plugins are enabled.
+The \<open>plugins\<close> option indicates which plugins should be enabled
+(\<open>only\<close>) or disabled (\<open>del\<close>). By default, all plugins are enabled.
 
 \item
-The @{text transfer} option indicates that an unconditional transfer rule
-should be generated and proved @{text "by transfer_prover"}. The
-@{text "[transfer_rule]"} attribute is set on the generated theorem.
+The \<open>transfer\<close> option indicates that an unconditional transfer rule
+should be generated and proved \<open>by transfer_prover\<close>. The
+\<open>[transfer_rule]\<close> attribute is set on the generated theorem.
 \end{itemize}
 \<close>
 
@@ -717,7 +717,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "coinduction_upto"} & : & @{text "local_theory \<rightarrow> local_theory"}
+  @{command_def "coinduction_upto"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -758,14 +758,14 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{coinduct_upto}]
+\item[\<open>t.\<close>\hthm{coinduct_upto}]
 
-\item[@{text "t."}\hthm{cong_intros}]
+\item[\<open>t.\<close>\hthm{cong_intros}]
 
 \end{description}
 \end{indentblock}
 %
-for the corresponding codatatype @{text t} so that they always contain the most
+for the corresponding codatatype \<open>t\<close> so that they always contain the most
 powerful coinduction up-to principles derived so far.
 \<close>
 
@@ -774,30 +774,30 @@
   \label{ssec:corec-and-corecursive-theorems}\<close>
 
 text \<open>
-For a function @{term f} over codatatype @{text t}, the @{command corec} and
+For a function @{term f} over codatatype \<open>t\<close>, the @{command corec} and
 @{command corecursive} commands generate the following properties (listed for
 @{const sexp}, cf. Section~\ref{ssec:simple-corecursion}):
 
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "f."}\hthm{code} @{text "[code]"}\rm:] ~ \\
+\item[\<open>f.\<close>\hthm{code} \<open>[code]\<close>\rm:] ~ \\
 @{thm sexp.code[no_vars]} \\
-The @{text "[code]"} attribute is set by the @{text code} plugin
+The \<open>[code]\<close> attribute is set by the \<open>code\<close> plugin
 @{cite "isabelle-datatypes"}.
 
-\item[@{text "f."}\hthm{coinduct} @{text "[consumes 1, case_names t, case_conclusion D\<^sub>1 \<dots>
-  D\<^sub>n]"}\rm:] ~ \\
+\item[\<open>f.\<close>\hthm{coinduct} \<open>[consumes 1, case_names t, case_conclusion D\<^sub>1 \<dots>
+  D\<^sub>n]\<close>\rm:] ~ \\
 @{thm sexp.coinduct[no_vars]}
 
-\item[@{text "f."}\hthm{cong_intros}\rm:] ~ \\
+\item[\<open>f.\<close>\hthm{cong_intros}\rm:] ~ \\
 @{thm sexp.cong_intros[no_vars]}
 
-\item[@{text "f."}\hthm{unique}\rm:] ~ \\
+\item[\<open>f.\<close>\hthm{unique}\rm:] ~ \\
 @{thm sexp.unique[no_vars]} \\
 This property is not generated for mixed recursive--corecursive definitions.
 
-\item[@{text "f."}\hthm{inner_induct}\rm:] ~ \\
+\item[\<open>f.\<close>\hthm{inner_induct}\rm:] ~ \\
 This property is only generated for mixed recursive--corecursive definitions.
 For @{const primes} (Section~\ref{ssec:mixed-recursion-corecursion}, it reads as
 follows: \\[\jot]
@@ -807,26 +807,26 @@
 \end{indentblock}
 
 \noindent
-The individual rules making up @{text "f.cong_intros"} are available as
+The individual rules making up \<open>f.cong_intros\<close> are available as
 
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "f."}\hthm{cong_base}]
+\item[\<open>f.\<close>\hthm{cong_base}]
 
-\item[@{text "f."}\hthm{cong_refl}]
+\item[\<open>f.\<close>\hthm{cong_refl}]
 
-\item[@{text "f."}\hthm{cong_sym}]
+\item[\<open>f.\<close>\hthm{cong_sym}]
 
-\item[@{text "f."}\hthm{cong_trans}]
+\item[\<open>f.\<close>\hthm{cong_trans}]
 
-\item[@{text "f."}\hthm{cong_C}@{text "\<^sub>1"}, \ldots, @{text "f."}\hthm{cong_C}@{text "\<^sub>n"}] ~ \\
-where @{text "C\<^sub>1"}, @{text "\<dots>"}, @{text "C\<^sub>n"} are @{text t}'s
+\item[\<open>f.\<close>\hthm{cong_C}\<open>\<^sub>1\<close>, \ldots, \<open>f.\<close>\hthm{cong_C}\<open>\<^sub>n\<close>] ~ \\
+where \<open>C\<^sub>1\<close>, \<open>\<dots>\<close>, \<open>C\<^sub>n\<close> are \<open>t\<close>'s
 constructors
 
-\item[@{text "f."}\hthm{cong_f}@{text "\<^sub>1"}, \ldots, @{text "f."}\hthm{cong_f}@{text "\<^sub>m"}] ~ \\
-where @{text "f\<^sub>1"}, @{text "\<dots>"}, @{text "f\<^sub>m"} are the available
-friends for @{text t}
+\item[\<open>f.\<close>\hthm{cong_f}\<open>\<^sub>1\<close>, \ldots, \<open>f.\<close>\hthm{cong_f}\<open>\<^sub>m\<close>] ~ \\
+where \<open>f\<^sub>1\<close>, \<open>\<dots>\<close>, \<open>f\<^sub>m\<close> are the available
+friends for \<open>t\<close>
 
 \end{description}
 \end{indentblock}
@@ -839,8 +839,8 @@
 text \<open>
 The @{command friend_of_corec} command generates the same theorems as
 @{command corec} and @{command corecursive}, except that it adds an optional
-@{text "friend."} component to the names to prevent potential clashes (e.g.,
-@{text "f.friend.code"}).
+\<open>friend.\<close> component to the names to prevent potential clashes (e.g.,
+\<open>f.friend.code\<close>).
 \<close>
 
 
@@ -849,27 +849,27 @@
 
 text \<open>
 The @{command coinduction_upto} command generates the following properties
-(listed for @{text nat_int_tllist}):
+(listed for \<open>nat_int_tllist\<close>):
 
 \begin{indentblock}
 \begin{description}
 
 \item[\begin{tabular}{@ {}l@ {}}
-  @{text "t."}\hthm{coinduct_upto} @{text "[consumes 1, case_names t,"} \\
-  \phantom{@{text "t."}\hthm{coinduct_upto} @{text "["}}@{text "case_conclusion D\<^sub>1 \<dots>
-  D\<^sub>n]"}\rm:
+  \<open>t.\<close>\hthm{coinduct_upto} \<open>[consumes 1, case_names t,\<close> \\
+  \phantom{\<open>t.\<close>\hthm{coinduct_upto} \<open>[\<close>}\<open>case_conclusion D\<^sub>1 \<dots>
+  D\<^sub>n]\<close>\rm:
 \end{tabular}] ~ \\
 @{thm nat_int_tllist.coinduct_upto[no_vars]}
 
-\item[@{text "t."}\hthm{cong_intros}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{cong_intros}\rm:] ~ \\
 @{thm nat_int_tllist.cong_intros[no_vars]}
 
 \end{description}
 \end{indentblock}
 
 \noindent
-The individual rules making up @{text "t.cong_intros"} are available
-separately as @{text "t.cong_base"}, @{text "t.cong_refl"}, etc.\
+The individual rules making up \<open>t.cong_intros\<close> are available
+separately as \<open>t.cong_base\<close>, \<open>t.cong_refl\<close>, etc.\
 (Section~\ref{ssec:corec-and-corecursive-theorems}).
 \<close>
 
@@ -910,7 +910,7 @@
 
 text \<open>
 The @{attribute friend_of_corec_simps} attribute declares naturality theorems
-to be used by @{command friend_of_corec} and @{command corec} @{text "(friend)"} in
+to be used by @{command friend_of_corec} and @{command corec} \<open>(friend)\<close> in
 deriving the user specification from reduction to primitive corecursion.
 Internally, these commands derive naturality theorems from the parametricity proof
 obligations dischared by the user or the @{method transfer_prover_eq} method, but
@@ -945,7 +945,7 @@
 (Section~\ref{ssec:friend-of-corec-simps}).
 
 \item
-\emph{The @{text transfer} option is not implemented yet.}
+\emph{The \<open>transfer\<close> option is not implemented yet.}
 
 \item
 \emph{The constructor and destructor views offered by {\upshape\keyw{primcorec}}
@@ -958,7 +958,7 @@
 \emph{The package does not interact well with locales.}
 
 \item
-\emph{The undocumented @{text corecUU_transfer} theorem is not as polymorphic as
+\emph{The undocumented \<open>corecUU_transfer\<close> theorem is not as polymorphic as
 it could be.}
 
 \item
--- a/src/Doc/Datatypes/Datatypes.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Datatypes/Datatypes.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -85,7 +85,7 @@
 (co)datatypes are derived rather than introduced axiomatically.%
 \footnote{However, some of the
 internal constructions and most of the internal proof obligations are omitted
-if the @{text quick_and_dirty} option is enabled.}
+if the \<open>quick_and_dirty\<close> option is enabled.}
 The package is described in a number of scientific papers
 @{cite "traytel-et-al-2012" and "blanchette-et-al-2014-impl" and
 "panny-et-al-2014" and "blanchette-et-al-2015-wit"}.
@@ -194,8 +194,8 @@
 
 text \<open>
 \noindent
-The constructors are @{text "None :: 'a option"} and
-@{text "Some :: 'a \<Rightarrow> 'a option"}.
+The constructors are \<open>None :: 'a option\<close> and
+\<open>Some :: 'a \<Rightarrow> 'a option\<close>.
 
 The next example has three type parameters:
 \<close>
@@ -205,7 +205,7 @@
 text \<open>
 \noindent
 The constructor is
-@{text "Triple :: 'a \<Rightarrow> 'b \<Rightarrow> 'c \<Rightarrow> ('a, 'b, 'c) triple"}.
+\<open>Triple :: 'a \<Rightarrow> 'b \<Rightarrow> 'c \<Rightarrow> ('a, 'b, 'c) triple\<close>.
 Unlike in Standard ML, curried constructors are supported. The uncurried variant
 is also possible:
 \<close>
@@ -286,9 +286,9 @@
 
 text \<open>
 \noindent
-The issue is that the function arrow @{text "\<Rightarrow>"} allows recursion
+The issue is that the function arrow \<open>\<Rightarrow>\<close> allows recursion
 only through its right-hand side. This issue is inherited by polymorphic
-datatypes defined in terms of~@{text "\<Rightarrow>"}:
+datatypes defined in terms of~\<open>\<Rightarrow>\<close>:
 \<close>
 
     datatype ('a, 'b) fun_copy = Fun "'a \<Rightarrow> 'b"
@@ -311,9 +311,9 @@
 
 text \<open>
 \noindent
-In general, type constructors @{text "('a\<^sub>1, \<dots>, 'a\<^sub>m) t"}
-allow recursion on a subset of their type arguments @{text 'a\<^sub>1}, \ldots,
-@{text 'a\<^sub>m}. These type arguments are called \emph{live}; the remaining
+In general, type constructors \<open>('a\<^sub>1, \<dots>, 'a\<^sub>m) t\<close>
+allow recursion on a subset of their type arguments \<open>'a\<^sub>1\<close>, \ldots,
+\<open>'a\<^sub>m\<close>. These type arguments are called \emph{live}; the remaining
 type arguments are called \emph{dead}. In @{typ "'a \<Rightarrow> 'b"} and
 @{typ "('a, 'b) fun_copy"}, the type variable @{typ 'a} is dead and
 @{typ 'b} is live.
@@ -345,10 +345,10 @@
 The @{command datatype} command introduces various constants in addition to
 the constructors. With each datatype are associated set functions, a map
 function, a predicator, a relator, discriminators, and selectors, all of which can be given
-custom names. In the example below, the familiar names @{text null}, @{text hd},
-@{text tl}, @{text set}, @{text map}, and @{text list_all2} override the
-default names @{text is_Nil}, @{text un_Cons1}, @{text un_Cons2},
-@{text set_list}, @{text map_list}, and @{text rel_list}:
+custom names. In the example below, the familiar names \<open>null\<close>, \<open>hd\<close>,
+\<open>tl\<close>, \<open>set\<close>, \<open>map\<close>, and \<open>list_all2\<close> override the
+default names \<open>is_Nil\<close>, \<open>un_Cons1\<close>, \<open>un_Cons2\<close>,
+\<open>set_list\<close>, \<open>map_list\<close>, and \<open>rel_list\<close>:
 \<close>
 
 (*<*)
@@ -384,21 +384,21 @@
 
 \begin{tabular}{@ {}ll@ {}}
 Constructors: &
-  @{text "Nil :: 'a list"} \\
+  \<open>Nil :: 'a list\<close> \\
 &
-  @{text "Cons :: 'a \<Rightarrow> 'a list \<Rightarrow> 'a list"} \\
+  \<open>Cons :: 'a \<Rightarrow> 'a list \<Rightarrow> 'a list\<close> \\
 Discriminator: &
-  @{text "null :: 'a list \<Rightarrow> bool"} \\
+  \<open>null :: 'a list \<Rightarrow> bool\<close> \\
 Selectors: &
-  @{text "hd :: 'a list \<Rightarrow> 'a"} \\
+  \<open>hd :: 'a list \<Rightarrow> 'a\<close> \\
 &
-  @{text "tl :: 'a list \<Rightarrow> 'a list"} \\
+  \<open>tl :: 'a list \<Rightarrow> 'a list\<close> \\
 Set function: &
-  @{text "set :: 'a list \<Rightarrow> 'a set"} \\
+  \<open>set :: 'a list \<Rightarrow> 'a set\<close> \\
 Map function: &
-  @{text "map :: ('a \<Rightarrow> 'b) \<Rightarrow> 'a list \<Rightarrow> 'b list"} \\
+  \<open>map :: ('a \<Rightarrow> 'b) \<Rightarrow> 'a list \<Rightarrow> 'b list\<close> \\
 Relator: &
-  @{text "list_all2 :: ('a \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'b list \<Rightarrow> bool"}
+  \<open>list_all2 :: ('a \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'b list \<Rightarrow> bool\<close>
 \end{tabular}
 
 \medskip
@@ -466,7 +466,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "datatype"} & : & @{text "local_theory \<rightarrow> local_theory"}
+  @{command_def "datatype"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -489,7 +489,7 @@
 datatypes specified by their constructors.
 
 The syntactic entity \synt{target} can be used to specify a local
-context (e.g., @{text "(in linorder)"} @{cite "isabelle-isar-ref"}),
+context (e.g., \<open>(in linorder)\<close> @{cite "isabelle-isar-ref"}),
 and \synt{prop} denotes a HOL proposition.
 
 The optional target is optionally followed by a combination of the following
@@ -499,19 +499,19 @@
 \setlength{\itemsep}{0pt}
 
 \item
-The @{text plugins} option indicates which plugins should be enabled
-(@{text only}) or disabled (@{text del}). By default, all plugins are enabled.
+The \<open>plugins\<close> option indicates which plugins should be enabled
+(\<open>only\<close>) or disabled (\<open>del\<close>). By default, all plugins are enabled.
 
 \item
-The @{text "discs_sels"} option indicates that discriminators and selectors
+The \<open>discs_sels\<close> option indicates that discriminators and selectors
 should be generated. The option is implicitly enabled if names are specified for
 discriminators or selectors.
 \end{itemize}
 
 The optional \keyw{where} clause specifies default values for selectors.
 Each proposition must be an equation of the form
-@{text "un_D (C \<dots>) = \<dots>"}, where @{text C} is a constructor and
-@{text un_D} is a selector.
+\<open>un_D (C \<dots>) = \<dots>\<close>, where \<open>C\<close> is a constructor and
+\<open>un_D\<close> is a selector.
 
 The left-hand sides of the datatype equations specify the name of the type to
 define, its type parameters, and additional information:
@@ -530,9 +530,9 @@
 variable (@{typ 'a}, @{typ 'b}, \ldots) @{cite "isabelle-isar-ref"}.
 
 The optional names preceding the type variables allow to override the default
-names of the set functions (@{text set\<^sub>1_t}, \ldots, @{text set\<^sub>m_t}). Type
-arguments can be marked as dead by entering @{text dead} in front of the
-type variable (e.g., @{text "(dead 'a)"}); otherwise, they are live or dead
+names of the set functions (\<open>set\<^sub>1_t\<close>, \ldots, \<open>set\<^sub>m_t\<close>). Type
+arguments can be marked as dead by entering \<open>dead\<close> in front of the
+type variable (e.g., \<open>(dead 'a)\<close>); otherwise, they are live or dead
 (and a set function is generated or not) depending on where they occur in the
 right-hand sides of the definition. Declaring a type argument as dead can speed
 up the type definition but will prevent any later (co)recursion through that
@@ -551,9 +551,9 @@
 The main constituents of a constructor specification are the name of the
 constructor and the list of its argument types. An optional discriminator name
 can be supplied at the front. If discriminators are enabled (cf.\ the
-@{text "discs_sels"} option) but no name is supplied, the default is
-@{text "\<lambda>x. x = C\<^sub>j"} for nullary constructors and
-@{text t.is_C\<^sub>j} otherwise.
+\<open>discs_sels\<close> option) but no name is supplied, the default is
+\<open>\<lambda>x. x = C\<^sub>j\<close> for nullary constructors and
+\<open>t.is_C\<^sub>j\<close> otherwise.
 
 @{rail \<open>
   @{syntax_def dt_ctor_arg}: type | '(' name ':' type ')'
@@ -567,8 +567,8 @@
 In addition to the type of a constructor argument, it is possible to specify a
 name for the corresponding selector. The same selector name can be reused for
 arguments to several constructors as long as the arguments share the same type.
-If selectors are enabled (cf.\ the @{text "discs_sels"} option) but no name is
-supplied, the default name is @{text un_C\<^sub>ji}.
+If selectors are enabled (cf.\ the \<open>discs_sels\<close> option) but no name is
+supplied, the default name is \<open>un_C\<^sub>ji\<close>.
 \<close>
 
 
@@ -577,7 +577,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "datatype_compat"} & : & @{text "local_theory \<rightarrow> local_theory"}
+  @{command_def "datatype_compat"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -609,12 +609,12 @@
 \setlength{\itemsep}{0pt}
 
 \item The old-style, nested-as-mutual induction rule and recursor theorems are
-generated under their usual names but with ``@{text "compat_"}'' prefixed
-(e.g., @{text compat_tree.induct}, @{text compat_tree.inducts}, and
-@{text compat_tree.rec}). These theorems should be identical to the ones
+generated under their usual names but with ``\<open>compat_\<close>'' prefixed
+(e.g., \<open>compat_tree.induct\<close>, \<open>compat_tree.inducts\<close>, and
+\<open>compat_tree.rec\<close>). These theorems should be identical to the ones
 generated by the old datatype package, \emph{up to the order of the
-premises}---meaning that the subgoals generated by the @{text induct} or
-@{text induction} method may be in a different order than before.
+premises}---meaning that the subgoals generated by the \<open>induct\<close> or
+\<open>induction\<close> method may be in a different order than before.
 
 \item All types through which recursion takes place must be new-style datatypes
 or the function type.
@@ -626,42 +626,42 @@
   \label{ssec:datatype-generated-constants}\<close>
 
 text \<open>
-Given a datatype @{text "('a\<^sub>1, \<dots>, 'a\<^sub>m) t"} with $m$ live type variables
-and $n$ constructors @{text "t.C\<^sub>1"}, \ldots, @{text "t.C\<^sub>n"}, the following
+Given a datatype \<open>('a\<^sub>1, \<dots>, 'a\<^sub>m) t\<close> with $m$ live type variables
+and $n$ constructors \<open>t.C\<^sub>1\<close>, \ldots, \<open>t.C\<^sub>n\<close>, the following
 auxiliary constants are introduced:
 
 \medskip
 
 \begin{tabular}{@ {}ll@ {}}
 Case combinator: &
-  @{text t.case_t} (rendered using the familiar @{text case}--@{text of} syntax) \\
+  \<open>t.case_t\<close> (rendered using the familiar \<open>case\<close>--\<open>of\<close> syntax) \\
 Discriminators: &
-  @{text t.is_C\<^sub>1}$, \ldots, $@{text t.is_C\<^sub>n} \\
+  \<open>t.is_C\<^sub>1\<close>$, \ldots, $\<open>t.is_C\<^sub>n\<close> \\
 Selectors: &
-  @{text t.un_C\<^sub>11}$, \ldots, $@{text t.un_C\<^sub>1k\<^sub>1} \\
+  \<open>t.un_C\<^sub>11\<close>$, \ldots, $\<open>t.un_C\<^sub>1k\<^sub>1\<close> \\
 & \quad\vdots \\
-& @{text t.un_C\<^sub>n1}$, \ldots, $@{text t.un_C\<^sub>nk\<^sub>n} \\
+& \<open>t.un_C\<^sub>n1\<close>$, \ldots, $\<open>t.un_C\<^sub>nk\<^sub>n\<close> \\
 Set functions: &
-  @{text t.set\<^sub>1_t}, \ldots, @{text t.set\<^sub>m_t} \\
+  \<open>t.set\<^sub>1_t\<close>, \ldots, \<open>t.set\<^sub>m_t\<close> \\
 Map function: &
-  @{text t.map_t} \\
+  \<open>t.map_t\<close> \\
 Relator: &
-  @{text t.rel_t} \\
+  \<open>t.rel_t\<close> \\
 Recursor: &
-  @{text t.rec_t}
+  \<open>t.rec_t\<close>
 \end{tabular}
 
 \medskip
 
 \noindent
-The discriminators and selectors are generated only if the @{text "discs_sels"}
+The discriminators and selectors are generated only if the \<open>discs_sels\<close>
 option is enabled or if names are specified for discriminators or selectors.
 The set functions, map function, predicator, and relator are generated only if $m > 0$.
 
 In addition, some of the plugins introduce their own constants
 (Section~\ref{sec:selecting-plugins}). The case combinator, discriminators,
 and selectors are collectively called \emph{destructors}. The prefix
-``@{text "t."}'' is an optional component of the names and is normally hidden.
+``\<open>t.\<close>'' is an optional component of the names and is normally hidden.
 \<close>
 
 
@@ -717,17 +717,17 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{inject} @{text "[iff, induct_simp]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{inject} \<open>[iff, induct_simp]\<close>\rm:] ~ \\
 @{thm list.inject[no_vars]}
 
-\item[@{text "t."}\hthm{distinct} @{text "[simp, induct_simp]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{distinct} \<open>[simp, induct_simp]\<close>\rm:] ~ \\
 @{thm list.distinct(1)[no_vars]} \\
 @{thm list.distinct(2)[no_vars]}
 
-\item[@{text "t."}\hthm{exhaust} @{text "[cases t, case_names C\<^sub>1 \<dots> C\<^sub>n]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{exhaust} \<open>[cases t, case_names C\<^sub>1 \<dots> C\<^sub>n]\<close>\rm:] ~ \\
 @{thm list.exhaust[no_vars]}
 
-\item[@{text "t."}\hthm{nchotomy}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{nchotomy}\rm:] ~ \\
 @{thm list.nchotomy[no_vars]}
 
 \end{description}
@@ -739,7 +739,7 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{distinct {\upshape[}THEN notE}@{text ", elim!"}\hthm{\upshape]}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{distinct {\upshape[}THEN notE}\<open>, elim!\<close>\hthm{\upshape]}\rm:] ~ \\
 @{thm list.distinct(1)[THEN notE, elim!, no_vars]} \\
 @{thm list.distinct(2)[THEN notE, elim!, no_vars]}
 
@@ -752,28 +752,28 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{case} @{text "[simp, code]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{case} \<open>[simp, code]\<close>\rm:] ~ \\
 @{thm list.case(1)[no_vars]} \\
 @{thm list.case(2)[no_vars]} \\
-The @{text "[code]"} attribute is set by the @{text code} plugin
+The \<open>[code]\<close> attribute is set by the \<open>code\<close> plugin
 (Section~\ref{ssec:code-generator}).
 
-\item[@{text "t."}\hthm{case_cong} @{text "[fundef_cong]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{case_cong} \<open>[fundef_cong]\<close>\rm:] ~ \\
 @{thm list.case_cong[no_vars]}
 
-\item[@{text "t."}\hthm{case_cong_weak} @{text "[cong]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{case_cong_weak} \<open>[cong]\<close>\rm:] ~ \\
 @{thm list.case_cong_weak[no_vars]}
 
-\item[@{text "t."}\hthm{case_distrib}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{case_distrib}\rm:] ~ \\
 @{thm list.case_distrib[no_vars]}
 
-\item[@{text "t."}\hthm{split}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{split}\rm:] ~ \\
 @{thm list.split[no_vars]}
 
-\item[@{text "t."}\hthm{split_asm}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{split_asm}\rm:] ~ \\
 @{thm list.split_asm[no_vars]}
 
-\item[@{text "t."}\hthm{splits} = @{text "split split_asm"}]
+\item[\<open>t.\<close>\hthm{splits} = \<open>split split_asm\<close>]
 
 \end{description}
 \end{indentblock}
@@ -784,55 +784,55 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{disc} @{text "[simp]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{disc} \<open>[simp]\<close>\rm:] ~ \\
 @{thm list.disc(1)[no_vars]} \\
 @{thm list.disc(2)[no_vars]}
 
-\item[@{text "t."}\hthm{discI}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{discI}\rm:] ~ \\
 @{thm list.discI(1)[no_vars]} \\
 @{thm list.discI(2)[no_vars]}
 
-\item[@{text "t."}\hthm{sel} @{text "[simp, code]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{sel} \<open>[simp, code]\<close>\rm:] ~ \\
 @{thm list.sel(1)[no_vars]} \\
 @{thm list.sel(2)[no_vars]} \\
-The @{text "[code]"} attribute is set by the @{text code} plugin
+The \<open>[code]\<close> attribute is set by the \<open>code\<close> plugin
 (Section~\ref{ssec:code-generator}).
 
-\item[@{text "t."}\hthm{collapse} @{text "[simp]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{collapse} \<open>[simp]\<close>\rm:] ~ \\
 @{thm list.collapse(1)[no_vars]} \\
 @{thm list.collapse(2)[no_vars]} \\
-The @{text "[simp]"} attribute is exceptionally omitted for datatypes equipped
+The \<open>[simp]\<close> attribute is exceptionally omitted for datatypes equipped
 with a single nullary constructor, because a property of the form
 @{prop "x = C"} is not suitable as a simplification rule.
 
-\item[@{text "t."}\hthm{distinct_disc} @{text "[dest]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{distinct_disc} \<open>[dest]\<close>\rm:] ~ \\
 These properties are missing for @{typ "'a list"} because there is only one
 proper discriminator. If the datatype had been introduced with a second
 discriminator called @{const nonnull}, they would have read as follows: \\[\jot]
 @{prop "null list \<Longrightarrow> \<not> nonnull list"} \\
 @{prop "nonnull list \<Longrightarrow> \<not> null list"}
 
-\item[@{text "t."}\hthm{exhaust_disc} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{exhaust_disc} \<open>[case_names C\<^sub>1 \<dots> C\<^sub>n]\<close>\rm:] ~ \\
 @{thm list.exhaust_disc[no_vars]}
 
-\item[@{text "t."}\hthm{exhaust_sel} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{exhaust_sel} \<open>[case_names C\<^sub>1 \<dots> C\<^sub>n]\<close>\rm:] ~ \\
 @{thm list.exhaust_sel[no_vars]}
 
-\item[@{text "t."}\hthm{expand}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{expand}\rm:] ~ \\
 @{thm list.expand[no_vars]}
 
-\item[@{text "t."}\hthm{split_sel}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{split_sel}\rm:] ~ \\
 @{thm list.split_sel[no_vars]}
 
-\item[@{text "t."}\hthm{split_sel_asm}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{split_sel_asm}\rm:] ~ \\
 @{thm list.split_sel_asm[no_vars]}
 
-\item[@{text "t."}\hthm{split_sels} = @{text "split_sel split_sel_asm"}]
-
-\item[@{text "t."}\hthm{case_eq_if}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{split_sels} = \<open>split_sel split_sel_asm\<close>]
+
+\item[\<open>t.\<close>\hthm{case_eq_if}\rm:] ~ \\
 @{thm list.case_eq_if[no_vars]}
 
-\item[@{text "t."}\hthm{disc_eq_case}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{disc_eq_case}\rm:] ~ \\
 @{thm list.disc_eq_case(1)[no_vars]} \\
 @{thm list.disc_eq_case(2)[no_vars]}
 
@@ -840,9 +840,9 @@
 \end{indentblock}
 
 \noindent
-In addition, equational versions of @{text t.disc} are registered with the
-@{text "[code]"} attribute. The @{text "[code]"} attribute is set by the
-@{text code} plugin (Section~\ref{ssec:code-generator}).
+In addition, equational versions of \<open>t.disc\<close> are registered with the
+\<open>[code]\<close> attribute. The \<open>[code]\<close> attribute is set by the
+\<open>code\<close> plugin (Section~\ref{ssec:code-generator}).
 \<close>
 
 
@@ -859,88 +859,87 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{case_transfer} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{case_transfer} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.case_transfer[no_vars]} \\
-This property is generated by the @{text transfer} plugin
+This property is generated by the \<open>transfer\<close> plugin
 (Section~\ref{ssec:transfer}).
-%The @{text "[transfer_rule]"} attribute is set by the @{text transfer} plugin
+%The \<open>[transfer_rule]\<close> attribute is set by the \<open>transfer\<close> plugin
 %(Section~\ref{ssec:transfer}).
 
-\item[@{text "t."}\hthm{sel_transfer} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{sel_transfer} \<open>[transfer_rule]\<close>\rm:] ~ \\
 This property is missing for @{typ "'a list"} because there is no common
 selector to all constructors. \\
-The @{text "[transfer_rule]"} attribute is set by the @{text transfer} plugin
+The \<open>[transfer_rule]\<close> attribute is set by the \<open>transfer\<close> plugin
 (Section~\ref{ssec:transfer}).
 
-\item[@{text "t."}\hthm{ctr_transfer} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{ctr_transfer} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.ctr_transfer(1)[no_vars]} \\
 @{thm list.ctr_transfer(2)[no_vars]} \\
-The @{text "[transfer_rule]"} attribute is set by the @{text transfer} plugin
+The \<open>[transfer_rule]\<close> attribute is set by the \<open>transfer\<close> plugin
 (Section~\ref{ssec:transfer}) .
 
-\item[@{text "t."}\hthm{disc_transfer} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{disc_transfer} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.disc_transfer(1)[no_vars]} \\
 @{thm list.disc_transfer(2)[no_vars]} \\
-The @{text "[transfer_rule]"} attribute is set by the @{text transfer} plugin
+The \<open>[transfer_rule]\<close> attribute is set by the \<open>transfer\<close> plugin
 (Section~\ref{ssec:transfer}).
 
-\item[@{text "t."}\hthm{set} @{text "[simp, code]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{set} \<open>[simp, code]\<close>\rm:] ~ \\
 @{thm list.set(1)[no_vars]} \\
 @{thm list.set(2)[no_vars]} \\
-The @{text "[code]"} attribute is set by the @{text code} plugin
+The \<open>[code]\<close> attribute is set by the \<open>code\<close> plugin
 (Section~\ref{ssec:code-generator}).
 
-\item[@{text "t."}\hthm{set_cases} @{text "[consumes 1, cases set: set\<^sub>i_t]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{set_cases} \<open>[consumes 1, cases set: set\<^sub>i_t]\<close>\rm:] ~ \\
 @{thm list.set_cases[no_vars]}
 
-\item[@{text "t."}\hthm{set_intros}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{set_intros}\rm:] ~ \\
 @{thm list.set_intros(1)[no_vars]} \\
 @{thm list.set_intros(2)[no_vars]}
 
-\item[@{text "t."}\hthm{set_sel}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{set_sel}\rm:] ~ \\
 @{thm list.set_sel[no_vars]}
 
-\item[@{text "t."}\hthm{map} @{text "[simp, code]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{map} \<open>[simp, code]\<close>\rm:] ~ \\
 @{thm list.map(1)[no_vars]} \\
 @{thm list.map(2)[no_vars]} \\
-The @{text "[code]"} attribute is set by the @{text code} plugin
+The \<open>[code]\<close> attribute is set by the \<open>code\<close> plugin
 (Section~\ref{ssec:code-generator}).
 
-\item[@{text "t."}\hthm{map_disc_iff} @{text "[simp]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{map_disc_iff} \<open>[simp]\<close>\rm:] ~ \\
 @{thm list.map_disc_iff[no_vars]}
 
-\item[@{text "t."}\hthm{map_sel}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{map_sel}\rm:] ~ \\
 @{thm list.map_sel[no_vars]}
 
-\item[@{text "t."}\hthm{pred_inject} @{text "[simp]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{pred_inject} \<open>[simp]\<close>\rm:] ~ \\
 @{thm list.pred_inject(1)[no_vars]} \\
 @{thm list.pred_inject(2)[no_vars]}
 
-\item[@{text "t."}\hthm{rel_inject} @{text "[simp]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_inject} \<open>[simp]\<close>\rm:] ~ \\
 @{thm list.rel_inject(1)[no_vars]} \\
 @{thm list.rel_inject(2)[no_vars]}
 
-\item[@{text "t."}\hthm{rel_distinct} @{text "[simp]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_distinct} \<open>[simp]\<close>\rm:] ~ \\
 @{thm list.rel_distinct(1)[no_vars]} \\
 @{thm list.rel_distinct(2)[no_vars]}
 
-\item[@{text "t."}\hthm{rel_intros}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_intros}\rm:] ~ \\
 @{thm list.rel_intros(1)[no_vars]} \\
 @{thm list.rel_intros(2)[no_vars]}
 
-\item[@{text "t."}\hthm{rel_cases} @{text "[consumes 1, case_names t\<^sub>1 \<dots> t\<^sub>m, cases pred]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_cases} \<open>[consumes 1, case_names t\<^sub>1 \<dots> t\<^sub>m, cases pred]\<close>\rm:] ~ \\
 @{thm list.rel_cases[no_vars]}
 
-\item[@{text "t."}\hthm{rel_sel}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_sel}\rm:] ~ \\
 @{thm list.rel_sel[no_vars]}
 
 \end{description}
 \end{indentblock}
 
 \noindent
-In addition, equational versions of @{text t.rel_inject} and @{text
-rel_distinct} are registered with the @{text "[code]"} attribute. The
-@{text "[code]"} attribute is set by the @{text code} plugin
+In addition, equational versions of \<open>t.rel_inject\<close> and \<open>rel_distinct\<close> are registered with the \<open>[code]\<close> attribute. The
+\<open>[code]\<close> attribute is set by the \<open>code\<close> plugin
 (Section~\ref{ssec:code-generator}).
 
 The second subgroup consists of more abstract properties of the set functions,
@@ -949,128 +948,128 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{inj_map}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{inj_map}\rm:] ~ \\
 @{thm list.inj_map[no_vars]}
 
-\item[@{text "t."}\hthm{inj_map_strong}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{inj_map_strong}\rm:] ~ \\
 @{thm list.inj_map_strong[no_vars]}
 
-\item[@{text "t."}\hthm{map_comp}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{map_comp}\rm:] ~ \\
 @{thm list.map_comp[no_vars]}
 
-\item[@{text "t."}\hthm{map_cong0}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{map_cong0}\rm:] ~ \\
 @{thm list.map_cong0[no_vars]}
 
-\item[@{text "t."}\hthm{map_cong} @{text "[fundef_cong]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{map_cong} \<open>[fundef_cong]\<close>\rm:] ~ \\
 @{thm list.map_cong[no_vars]}
 
-\item[@{text "t."}\hthm{map_cong_pred}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{map_cong_pred}\rm:] ~ \\
 @{thm list.map_cong_pred[no_vars]}
 
-\item[@{text "t."}\hthm{map_cong_simp}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{map_cong_simp}\rm:] ~ \\
 @{thm list.map_cong_simp[no_vars]}
 
-\item[@{text "t."}\hthm{map_id0}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{map_id0}\rm:] ~ \\
 @{thm list.map_id0[no_vars]}
 
-\item[@{text "t."}\hthm{map_id}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{map_id}\rm:] ~ \\
 @{thm list.map_id[no_vars]}
 
-\item[@{text "t."}\hthm{map_ident}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{map_ident}\rm:] ~ \\
 @{thm list.map_ident[no_vars]}
 
-\item[@{text "t."}\hthm{map_transfer} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{map_transfer} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.map_transfer[no_vars]} \\
-The @{text "[transfer_rule]"} attribute is set by the @{text transfer} plugin
+The \<open>[transfer_rule]\<close> attribute is set by the \<open>transfer\<close> plugin
 (Section~\ref{ssec:transfer}) for type constructors with no dead type arguments.
 
-\item[@{text "t."}\hthm{pred_cong} @{text "[fundef_cong]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{pred_cong} \<open>[fundef_cong]\<close>\rm:] ~ \\
 @{thm list.pred_cong[no_vars]}
 
-\item[@{text "t."}\hthm{pred_cong_simp}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{pred_cong_simp}\rm:] ~ \\
 @{thm list.pred_cong_simp[no_vars]}
 
-\item[@{text "t."}\hthm{pred_map}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{pred_map}\rm:] ~ \\
 @{thm list.pred_map[no_vars]}
 
-\item[@{text "t."}\hthm{pred_mono_strong}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{pred_mono_strong}\rm:] ~ \\
 @{thm list.pred_mono_strong[no_vars]}
 
-\item[@{text "t."}\hthm{pred_rel}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{pred_rel}\rm:] ~ \\
 @{thm list.pred_rel[no_vars]}
 
-\item[@{text "t."}\hthm{pred_set}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{pred_set}\rm:] ~ \\
 @{thm list.pred_set[no_vars]}
 
-\item[@{text "t."}\hthm{pred_transfer} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{pred_transfer} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.pred_transfer[no_vars]} \\
-The @{text "[transfer_rule]"} attribute is set by the @{text transfer} plugin
+The \<open>[transfer_rule]\<close> attribute is set by the \<open>transfer\<close> plugin
 (Section~\ref{ssec:transfer}) for type constructors with no dead type arguments.
 
-\item[@{text "t."}\hthm{pred_True}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{pred_True}\rm:] ~ \\
 @{thm list.pred_True[no_vars]}
 
-\item[@{text "t."}\hthm{set_map}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{set_map}\rm:] ~ \\
 @{thm list.set_map[no_vars]}
 
-\item[@{text "t."}\hthm{set_transfer} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{set_transfer} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.set_transfer[no_vars]} \\
-The @{text "[transfer_rule]"} attribute is set by the @{text transfer} plugin
+The \<open>[transfer_rule]\<close> attribute is set by the \<open>transfer\<close> plugin
 (Section~\ref{ssec:transfer}) for type constructors with no dead type arguments.
 
-\item[@{text "t."}\hthm{rel_compp} @{text "[relator_distr]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_compp} \<open>[relator_distr]\<close>\rm:] ~ \\
 @{thm list.rel_compp[no_vars]} \\
-The @{text "[relator_distr]"} attribute is set by the @{text lifting} plugin
+The \<open>[relator_distr]\<close> attribute is set by the \<open>lifting\<close> plugin
 (Section~\ref{ssec:lifting}).
 
-\item[@{text "t."}\hthm{rel_conversep}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_conversep}\rm:] ~ \\
 @{thm list.rel_conversep[no_vars]}
 
-\item[@{text "t."}\hthm{rel_eq}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_eq}\rm:] ~ \\
 @{thm list.rel_eq[no_vars]}
 
-\item[@{text "t."}\hthm{rel_eq_onp}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_eq_onp}\rm:] ~ \\
 @{thm list.rel_eq_onp[no_vars]}
 
-\item[@{text "t."}\hthm{rel_flip}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_flip}\rm:] ~ \\
 @{thm list.rel_flip[no_vars]}
 
-\item[@{text "t."}\hthm{rel_map}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_map}\rm:] ~ \\
 @{thm list.rel_map(1)[no_vars]} \\
 @{thm list.rel_map(2)[no_vars]}
 
-\item[@{text "t."}\hthm{rel_mono} @{text "[mono, relator_mono]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_mono} \<open>[mono, relator_mono]\<close>\rm:] ~ \\
 @{thm list.rel_mono[no_vars]} \\
-The @{text "[relator_mono]"} attribute is set by the @{text lifting} plugin
+The \<open>[relator_mono]\<close> attribute is set by the \<open>lifting\<close> plugin
 (Section~\ref{ssec:lifting}).
 
-\item[@{text "t."}\hthm{rel_mono_strong}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_mono_strong}\rm:] ~ \\
 @{thm list.rel_mono_strong[no_vars]}
 
-\item[@{text "t."}\hthm{rel_cong} @{text "[fundef_cong]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_cong} \<open>[fundef_cong]\<close>\rm:] ~ \\
 @{thm list.rel_cong[no_vars]}
 
-\item[@{text "t."}\hthm{rel_cong_simp}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_cong_simp}\rm:] ~ \\
 @{thm list.rel_cong_simp[no_vars]}
 
-\item[@{text "t."}\hthm{rel_refl}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_refl}\rm:] ~ \\
 @{thm list.rel_refl[no_vars]}
 
-\item[@{text "t."}\hthm{rel_refl_strong}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_refl_strong}\rm:] ~ \\
 @{thm list.rel_refl_strong[no_vars]}
 
-\item[@{text "t."}\hthm{rel_reflp}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_reflp}\rm:] ~ \\
 @{thm list.rel_reflp[no_vars]}
 
-\item[@{text "t."}\hthm{rel_symp}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_symp}\rm:] ~ \\
 @{thm list.rel_symp[no_vars]}
 
-\item[@{text "t."}\hthm{rel_transp}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_transp}\rm:] ~ \\
 @{thm list.rel_transp[no_vars]}
 
-\item[@{text "t."}\hthm{rel_transfer} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_transfer} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.rel_transfer[no_vars]} \\
-The @{text "[transfer_rule]"} attribute is set by the @{text transfer} plugin
+The \<open>[transfer_rule]\<close> attribute is set by the \<open>transfer\<close> plugin
 (Section~\ref{ssec:transfer}) for type constructors with no dead type arguments.
 
 \end{description}
@@ -1087,31 +1086,31 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n, induct t]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{induct} \<open>[case_names C\<^sub>1 \<dots> C\<^sub>n, induct t]\<close>\rm:] ~ \\
 @{thm list.induct[no_vars]}
 
-\item[@{text "t."}\hthm{rel_induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n, induct pred]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rel_induct} \<open>[case_names C\<^sub>1 \<dots> C\<^sub>n, induct pred]\<close>\rm:] ~ \\
 @{thm list.rel_induct[no_vars]}
 
 \item[\begin{tabular}{@ {}l@ {}}
-  @{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n]"}\rm: \\
-  @{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{rel_induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n]"}\rm: \\
+  \<open>t\<^sub>1_\<dots>_t\<^sub>m.\<close>\hthm{induct} \<open>[case_names C\<^sub>1 \<dots> C\<^sub>n]\<close>\rm: \\
+  \<open>t\<^sub>1_\<dots>_t\<^sub>m.\<close>\hthm{rel_induct} \<open>[case_names C\<^sub>1 \<dots> C\<^sub>n]\<close>\rm: \\
 \end{tabular}] ~ \\
 Given $m > 1$ mutually recursive datatypes, this induction rule can be used to
 prove $m$ properties simultaneously.
 
-\item[@{text "t."}\hthm{rec} @{text "[simp, code]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rec} \<open>[simp, code]\<close>\rm:] ~ \\
 @{thm list.rec(1)[no_vars]} \\
 @{thm list.rec(2)[no_vars]} \\
-The @{text "[code]"} attribute is set by the @{text code} plugin
+The \<open>[code]\<close> attribute is set by the \<open>code\<close> plugin
 (Section~\ref{ssec:code-generator}).
 
-\item[@{text "t."}\hthm{rec_o_map}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rec_o_map}\rm:] ~ \\
 @{thm list.rec_o_map[no_vars]}
 
-\item[@{text "t."}\hthm{rec_transfer} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{rec_transfer} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.rec_transfer[no_vars]} \\
-The @{text "[transfer_rule]"} attribute is set by the @{text transfer} plugin
+The \<open>[transfer_rule]\<close> attribute is set by the \<open>transfer\<close> plugin
 (Section~\ref{ssec:transfer}) for type constructors with no dead type arguments.
 
 \end{description}
@@ -1123,8 +1122,8 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{simps}] = @{text t.inject} @{text t.distinct} @{text t.case} @{text t.rec} @{text t.map} @{text t.rel_inject} \\
-@{text t.rel_distinct} @{text t.set}
+\item[\<open>t.\<close>\hthm{simps}] = \<open>t.inject\<close> \<open>t.distinct\<close> \<open>t.case\<close> \<open>t.rec\<close> \<open>t.map\<close> \<open>t.rel_inject\<close> \\
+\<open>t.rel_distinct\<close> \<open>t.set\<close>
 
 \end{description}
 \end{indentblock}
@@ -1161,10 +1160,10 @@
 
 \item \emph{The Standard ML interfaces are different.} Tools and extensions
 written to call the old ML interfaces will need to be adapted to the new
-interfaces. The @{text BNF_LFP_Compat} structure provides convenience functions
+interfaces. The \<open>BNF_LFP_Compat\<close> structure provides convenience functions
 that simulate the old interfaces in terms of the new ones.
 
-\item \emph{The recursor @{text rec_t} has a different signature for nested
+\item \emph{The recursor \<open>rec_t\<close> has a different signature for nested
 recursive datatypes.} In the old package, nested recursion through non-functions
 was internally reduced to mutual recursion. This reduction was visible in the
 type of the recursor, used by \keyw{primrec}. Recursion through functions was
@@ -1180,17 +1179,17 @@
 using @{command primrec} if the recursion is via new-style datatypes, as
 explained in Section~\ref{sssec:primrec-nested-as-mutual-recursion}, or using
 @{command datatype_compat}. For recursion through functions, the old-style
-induction rule can be obtained by applying the @{text "[unfolded
-all_mem_range]"} attribute on @{text t.induct}.
+induction rule can be obtained by applying the \<open>[unfolded
+all_mem_range]\<close> attribute on \<open>t.induct\<close>.
 
 \item \emph{The @{const size} function has a slightly different definition.}
-The new function returns @{text 1} instead of @{text 0} for some nonrecursive
+The new function returns \<open>1\<close> instead of \<open>0\<close> for some nonrecursive
 constructors. This departure from the old behavior made it possible to implement
-@{const size} in terms of the generic function @{text "t.size_t"}. Moreover,
+@{const size} in terms of the generic function \<open>t.size_t\<close>. Moreover,
 the new function considers nested occurrences of a value, in the nested
-recursive case. The old behavior can be obtained by disabling the @{text size}
+recursive case. The old behavior can be obtained by disabling the \<open>size\<close>
 plugin (Section~\ref{sec:selecting-plugins}) and instantiating the
-@{text size} type class manually.
+\<open>size\<close> type class manually.
 
 \item \emph{The internal constructions are completely different.} Proof texts
 that unfold the definition of constants introduced by the old command will
@@ -1198,23 +1197,23 @@
 
 \item \emph{Some constants and theorems have different names.}
 For non-mutually recursive datatypes,
-the alias @{text t.inducts} for @{text t.induct} is no longer generated.
+the alias \<open>t.inducts\<close> for \<open>t.induct\<close> is no longer generated.
 For $m > 1$ mutually recursive datatypes,
-@{text "rec_t\<^sub>1_\<dots>_t\<^sub>m_i"} has been renamed
-@{text "rec_t\<^sub>i"} for each @{text "i \<in> {1, \<dots>, m}"},
-@{text "t\<^sub>1_\<dots>_t\<^sub>m.inducts(i)"} has been renamed
-@{text "t\<^sub>i.induct"} for each @{text "i \<in> {1, \<dots>, m}"}, and the
-collection @{text "t\<^sub>1_\<dots>_t\<^sub>m.size"} (generated by the
-@{text size} plugin, Section~\ref{ssec:size}) has been divided into
-@{text "t\<^sub>1.size"}, \ldots, @{text "t\<^sub>m.size"}.
-
-\item \emph{The @{text t.simps} collection has been extended.}
+\<open>rec_t\<^sub>1_\<dots>_t\<^sub>m_i\<close> has been renamed
+\<open>rec_t\<^sub>i\<close> for each \<open>i \<in> {1, \<dots>, m}\<close>,
+\<open>t\<^sub>1_\<dots>_t\<^sub>m.inducts(i)\<close> has been renamed
+\<open>t\<^sub>i.induct\<close> for each \<open>i \<in> {1, \<dots>, m}\<close>, and the
+collection \<open>t\<^sub>1_\<dots>_t\<^sub>m.size\<close> (generated by the
+\<open>size\<close> plugin, Section~\ref{ssec:size}) has been divided into
+\<open>t\<^sub>1.size\<close>, \ldots, \<open>t\<^sub>m.size\<close>.
+
+\item \emph{The \<open>t.simps\<close> collection has been extended.}
 Previously available theorems are available at the same index as before.
 
 \item \emph{Variables in generated properties have different names.} This is
 rarely an issue, except in proof texts that refer to variable names in the
-@{text "[where \<dots>]"} attribute. The solution is to use the more robust
-@{text "[of \<dots>]"} syntax.
+\<open>[where \<dots>]\<close> attribute. The solution is to use the more robust
+\<open>[of \<dots>]\<close> syntax.
 \end{itemize}
 \<close>
 
@@ -1396,8 +1395,8 @@
 
 text \<open>
 \noindent
-The next example features recursion through the @{text option} type. Although
-@{text option} is not a new-style datatype, it is registered as a BNF with the
+The next example features recursion through the \<open>option\<close> type. Although
+\<open>option\<close> is not a new-style datatype, it is registered as a BNF with the
 map function @{const map_option}:
 \<close>
 
@@ -1410,7 +1409,7 @@
 \noindent
 The same principle applies for arbitrary type constructors through which
 recursion is possible. Notably, the map function for the function type
-(@{text \<Rightarrow>}) is simply composition (@{text "(\<circ>)"}):
+(\<open>\<Rightarrow>\<close>) is simply composition (\<open>(\<circ>)\<close>):
 \<close>
 
     primrec (*<*)(in early) (*>*)relabel_ft :: "('a \<Rightarrow> 'a) \<Rightarrow> 'a ftree \<Rightarrow> 'a ftree" where
@@ -1559,7 +1558,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "primrec"} & : & @{text "local_theory \<rightarrow> local_theory"}
+  @{command_def "primrec"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -1589,18 +1588,18 @@
 \setlength{\itemsep}{0pt}
 
 \item
-The @{text plugins} option indicates which plugins should be enabled
-(@{text only}) or disabled (@{text del}). By default, all plugins are enabled.
+The \<open>plugins\<close> option indicates which plugins should be enabled
+(\<open>only\<close>) or disabled (\<open>del\<close>). By default, all plugins are enabled.
 
 \item
-The @{text nonexhaustive} option indicates that the functions are not
+The \<open>nonexhaustive\<close> option indicates that the functions are not
 necessarily specified for all constructors. It can be used to suppress the
 warning that is normally emitted when some constructors are missing.
 
 \item
-The @{text transfer} option indicates that an unconditional transfer rule
-should be generated and proved @{text "by transfer_prover"}. The
-@{text "[transfer_rule]"} attribute is set on the generated theorem.
+The \<open>transfer\<close> option indicates that an unconditional transfer rule
+should be generated and proved \<open>by transfer_prover\<close>. The
+\<open>[transfer_rule]\<close> attribute is set on the generated theorem.
 \end{itemize}
 
 %%% TODO: elaborate on format of the equations
@@ -1623,23 +1622,23 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "f."}\hthm{simps} @{text "[simp, code]"}\rm:] ~ \\
+\item[\<open>f.\<close>\hthm{simps} \<open>[simp, code]\<close>\rm:] ~ \\
 @{thm tfold.simps(1)[no_vars]} \\
 @{thm tfold.simps(2)[no_vars]} \\
-The @{text "[code]"} attribute is set by the @{text code} plugin
+The \<open>[code]\<close> attribute is set by the \<open>code\<close> plugin
 (Section~\ref{ssec:code-generator}).
 
-\item[@{text "f."}\hthm{transfer} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>f.\<close>\hthm{transfer} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm tfold.transfer[no_vars]} \\
-This theorem is generated by the @{text transfer} plugin
-(Section~\ref{ssec:transfer}) for functions declared with the @{text transfer}
+This theorem is generated by the \<open>transfer\<close> plugin
+(Section~\ref{ssec:transfer}) for functions declared with the \<open>transfer\<close>
 option enabled.
 
-\item[@{text "f."}\hthm{induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n]"}\rm:] ~ \\
+\item[\<open>f.\<close>\hthm{induct} \<open>[case_names C\<^sub>1 \<dots> C\<^sub>n]\<close>\rm:] ~ \\
 This induction rule is generated for nested-as-mutual recursive functions
 (Section~\ref{sssec:primrec-nested-as-mutual-recursion}).
 
-\item[@{text "f\<^sub>1_\<dots>_f\<^sub>m."}\hthm{induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n]"}\rm:] ~ \\
+\item[\<open>f\<^sub>1_\<dots>_f\<^sub>m.\<close>\hthm{induct} \<open>[case_names C\<^sub>1 \<dots> C\<^sub>n]\<close>\rm:] ~ \\
 This induction rule is generated for nested-as-mutual recursive functions
 (Section~\ref{sssec:primrec-nested-as-mutual-recursion}). Given $m > 1$ mutually
 recursive functions, this rule can be used to prove $m$ properties
@@ -1658,7 +1657,7 @@
   \label{ssec:primrec-recursive-default-values-for-selectors}\<close>
 
 text \<open>
-A datatype selector @{text un_D} can have a default value for each constructor
+A datatype selector \<open>un_D\<close> can have a default value for each constructor
 on which it is not otherwise specified. Occasionally, it is useful to have the
 default value be defined recursively. This leads to a chicken-and-egg
 situation, because the datatype is not introduced yet at the moment when the
@@ -1673,20 +1672,20 @@
 \setlength{\itemsep}{0pt}
 
 \item
-Introduce a fully unspecified constant @{text "un_D\<^sub>0 :: 'a"} using
+Introduce a fully unspecified constant \<open>un_D\<^sub>0 :: 'a\<close> using
 @{command consts}.
 
 \item
-Define the datatype, specifying @{text "un_D\<^sub>0"} as the selector's default
+Define the datatype, specifying \<open>un_D\<^sub>0\<close> as the selector's default
 value.
 
 \item
-Define the behavior of @{text "un_D\<^sub>0"} on values of the newly introduced
+Define the behavior of \<open>un_D\<^sub>0\<close> on values of the newly introduced
 datatype using the \keyw{overloading} command.
 
 \item
-Derive the desired equation on @{text un_D} from the characteristic equations
-for @{text "un_D\<^sub>0"}.
+Derive the desired equation on \<open>un_D\<close> from the characteristic equations
+for \<open>un_D\<^sub>0\<close>.
 \end{enumerate}
 
 \noindent
@@ -1737,8 +1736,8 @@
 
 \item \emph{Some theorems have different names.}
 For $m > 1$ mutually recursive functions,
-@{text "f\<^sub>1_\<dots>_f\<^sub>m.simps"} has been broken down into separate
-subcollections @{text "f\<^sub>i.simps"}.
+\<open>f\<^sub>1_\<dots>_f\<^sub>m.simps\<close> has been broken down into separate
+subcollections \<open>f\<^sub>i.simps\<close>.
 \end{itemize}
 \<close>
 
@@ -1781,8 +1780,8 @@
 
 text \<open>
 \noindent
-Lazy lists can be infinite, such as @{text "LCons 0 (LCons 0 (\<dots>))"} and
-@{text "LCons 0 (LCons 1 (LCons 2 (\<dots>)))"}. Here is a related type, that of
+Lazy lists can be infinite, such as \<open>LCons 0 (LCons 0 (\<dots>))\<close> and
+\<open>LCons 0 (LCons 1 (LCons 2 (\<dots>)))\<close>. Here is a related type, that of
 infinite streams:
 \<close>
 
@@ -1802,9 +1801,9 @@
 
 text \<open>
 \noindent
-This type has exactly one infinite element, @{text "ESucc (ESucc (ESucc (\<dots>)))"},
+This type has exactly one infinite element, \<open>ESucc (ESucc (ESucc (\<dots>)))\<close>,
 that represents $\infty$. In addition, it has finite values of the form
-@{text "ESucc (\<dots> (ESucc EZero)\<dots>)"}.
+\<open>ESucc (\<dots> (ESucc EZero)\<dots>)\<close>.
 
 Here is an example with many constructors:
 \<close>
@@ -1861,7 +1860,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "codatatype"} & : & @{text "local_theory \<rightarrow> local_theory"}
+  @{command_def "codatatype"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -1872,7 +1871,7 @@
 
 \noindent
 Definitions of codatatypes have almost exactly the same syntax as for datatypes
-(Section~\ref{ssec:datatype-command-syntax}). The @{text "discs_sels"} option
+(Section~\ref{ssec:datatype-command-syntax}). The \<open>discs_sels\<close> option
 is superfluous because discriminators and selectors are always generated for
 codatatypes.
 \<close>
@@ -1882,9 +1881,9 @@
   \label{ssec:codatatype-generated-constants}\<close>
 
 text \<open>
-Given a codatatype @{text "('a\<^sub>1, \<dots>, 'a\<^sub>m) t"}
-with $m > 0$ live type variables and $n$ constructors @{text "t.C\<^sub>1"},
-\ldots, @{text "t.C\<^sub>n"}, the same auxiliary constants are generated as for
+Given a codatatype \<open>('a\<^sub>1, \<dots>, 'a\<^sub>m) t\<close>
+with $m > 0$ live type variables and $n$ constructors \<open>t.C\<^sub>1\<close>,
+\ldots, \<open>t.C\<^sub>n\<close>, the same auxiliary constants are generated as for
 datatypes (Section~\ref{ssec:datatype-generated-constants}), except that the
 recursor is replaced by a dual concept:
 
@@ -1892,7 +1891,7 @@
 
 \begin{tabular}{@ {}ll@ {}}
 Corecursor: &
-  @{text t.corec_t}
+  \<open>t.corec_t\<close>
 \end{tabular}
 \<close>
 
@@ -1934,69 +1933,69 @@
 \begin{description}
 
 \item[\begin{tabular}{@ {}l@ {}}
-  @{text "t."}\hthm{coinduct} @{text "[consumes m, case_names t\<^sub>1 \<dots> t\<^sub>m,"} \\
-  \phantom{@{text "t."}\hthm{coinduct} @{text "["}}@{text "case_conclusion D\<^sub>1 \<dots>
-  D\<^sub>n, coinduct t]"}\rm:
+  \<open>t.\<close>\hthm{coinduct} \<open>[consumes m, case_names t\<^sub>1 \<dots> t\<^sub>m,\<close> \\
+  \phantom{\<open>t.\<close>\hthm{coinduct} \<open>[\<close>}\<open>case_conclusion D\<^sub>1 \<dots>
+  D\<^sub>n, coinduct t]\<close>\rm:
 \end{tabular}] ~ \\
 @{thm llist.coinduct[no_vars]}
 
 \item[\begin{tabular}{@ {}l@ {}}
-  @{text "t."}\hthm{coinduct_strong} @{text "[consumes m, case_names t\<^sub>1 \<dots> t\<^sub>m,"} \\
-  \phantom{@{text "t."}\hthm{coinduct_strong} @{text "["}}@{text "case_conclusion D\<^sub>1 \<dots> D\<^sub>n]"}\rm:
+  \<open>t.\<close>\hthm{coinduct_strong} \<open>[consumes m, case_names t\<^sub>1 \<dots> t\<^sub>m,\<close> \\
+  \phantom{\<open>t.\<close>\hthm{coinduct_strong} \<open>[\<close>}\<open>case_conclusion D\<^sub>1 \<dots> D\<^sub>n]\<close>\rm:
 \end{tabular}] ~ \\
 @{thm llist.coinduct_strong[no_vars]}
 
 \item[\begin{tabular}{@ {}l@ {}}
-  @{text "t."}\hthm{rel_coinduct} @{text "[consumes m, case_names t\<^sub>1 \<dots> t\<^sub>m,"} \\
-  \phantom{@{text "t."}\hthm{rel_coinduct} @{text "["}}@{text "case_conclusion D\<^sub>1 \<dots>
-  D\<^sub>n, coinduct pred]"}\rm:
+  \<open>t.\<close>\hthm{rel_coinduct} \<open>[consumes m, case_names t\<^sub>1 \<dots> t\<^sub>m,\<close> \\
+  \phantom{\<open>t.\<close>\hthm{rel_coinduct} \<open>[\<close>}\<open>case_conclusion D\<^sub>1 \<dots>
+  D\<^sub>n, coinduct pred]\<close>\rm:
 \end{tabular}] ~ \\
 @{thm llist.rel_coinduct[no_vars]}
 
 \item[\begin{tabular}{@ {}l@ {}}
-  @{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{coinduct} @{text "[case_names t\<^sub>1 \<dots> t\<^sub>m, case_conclusion D\<^sub>1 \<dots> D\<^sub>n]"} \\
-  @{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{coinduct_strong} @{text "[case_names t\<^sub>1 \<dots> t\<^sub>m,"} \\
-  \phantom{@{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{coinduct_strong} @{text "["}}@{text "case_conclusion D\<^sub>1 \<dots> D\<^sub>n]"}\rm: \\
-  @{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{rel_coinduct} @{text "[case_names t\<^sub>1 \<dots> t\<^sub>m,"} \\
-  \phantom{@{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{rel_coinduct} @{text "["}}@{text "case_conclusion D\<^sub>1 \<dots> D\<^sub>n]"}\rm: \\
+  \<open>t\<^sub>1_\<dots>_t\<^sub>m.\<close>\hthm{coinduct} \<open>[case_names t\<^sub>1 \<dots> t\<^sub>m, case_conclusion D\<^sub>1 \<dots> D\<^sub>n]\<close> \\
+  \<open>t\<^sub>1_\<dots>_t\<^sub>m.\<close>\hthm{coinduct_strong} \<open>[case_names t\<^sub>1 \<dots> t\<^sub>m,\<close> \\
+  \phantom{\<open>t\<^sub>1_\<dots>_t\<^sub>m.\<close>\hthm{coinduct_strong} \<open>[\<close>}\<open>case_conclusion D\<^sub>1 \<dots> D\<^sub>n]\<close>\rm: \\
+  \<open>t\<^sub>1_\<dots>_t\<^sub>m.\<close>\hthm{rel_coinduct} \<open>[case_names t\<^sub>1 \<dots> t\<^sub>m,\<close> \\
+  \phantom{\<open>t\<^sub>1_\<dots>_t\<^sub>m.\<close>\hthm{rel_coinduct} \<open>[\<close>}\<open>case_conclusion D\<^sub>1 \<dots> D\<^sub>n]\<close>\rm: \\
 \end{tabular}] ~ \\
 Given $m > 1$ mutually corecursive codatatypes, these coinduction rules can be
 used to prove $m$ properties simultaneously.
 
 \item[\begin{tabular}{@ {}l@ {}}
-  @{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{set_induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n,"} \\
-  \phantom{@{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{set_induct} @{text "["}}@{text "induct set: set\<^sub>j_t\<^sub>1, \<dots>, induct set: set\<^sub>j_t\<^sub>m]"}\rm: \\
+  \<open>t\<^sub>1_\<dots>_t\<^sub>m.\<close>\hthm{set_induct} \<open>[case_names C\<^sub>1 \<dots> C\<^sub>n,\<close> \\
+  \phantom{\<open>t\<^sub>1_\<dots>_t\<^sub>m.\<close>\hthm{set_induct} \<open>[\<close>}\<open>induct set: set\<^sub>j_t\<^sub>1, \<dots>, induct set: set\<^sub>j_t\<^sub>m]\<close>\rm: \\
 \end{tabular}] ~ \\
 @{thm llist.set_induct[no_vars]} \\
-If $m = 1$, the attribute @{text "[consumes 1]"} is generated as well.
-
-\item[@{text "t."}\hthm{corec}\rm:] ~ \\
+If $m = 1$, the attribute \<open>[consumes 1]\<close> is generated as well.
+
+\item[\<open>t.\<close>\hthm{corec}\rm:] ~ \\
 @{thm llist.corec(1)[no_vars]} \\
 @{thm llist.corec(2)[no_vars]}
 
-\item[@{text "t."}\hthm{corec_code} @{text "[code]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{corec_code} \<open>[code]\<close>\rm:] ~ \\
 @{thm llist.corec_code[no_vars]} \\
-The @{text "[code]"} attribute is set by the @{text code} plugin
+The \<open>[code]\<close> attribute is set by the \<open>code\<close> plugin
 (Section~\ref{ssec:code-generator}).
 
-\item[@{text "t."}\hthm{corec_disc}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{corec_disc}\rm:] ~ \\
 @{thm llist.corec_disc(1)[no_vars]} \\
 @{thm llist.corec_disc(2)[no_vars]}
 
-\item[@{text "t."}\hthm{corec_disc_iff} @{text "[simp]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{corec_disc_iff} \<open>[simp]\<close>\rm:] ~ \\
 @{thm llist.corec_disc_iff(1)[no_vars]} \\
 @{thm llist.corec_disc_iff(2)[no_vars]}
 
-\item[@{text "t."}\hthm{corec_sel} @{text "[simp]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{corec_sel} \<open>[simp]\<close>\rm:] ~ \\
 @{thm llist.corec_sel(1)[no_vars]} \\
 @{thm llist.corec_sel(2)[no_vars]}
 
-\item[@{text "t."}\hthm{map_o_corec}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{map_o_corec}\rm:] ~ \\
 @{thm llist.map_o_corec[no_vars]}
 
-\item[@{text "t."}\hthm{corec_transfer} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{corec_transfer} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm llist.corec_transfer[no_vars]} \\
-The @{text "[transfer_rule]"} attribute is set by the @{text transfer} plugin
+The \<open>[transfer_rule]\<close> attribute is set by the \<open>transfer\<close> plugin
 (Section~\ref{ssec:transfer}) for type constructors with no dead type arguments.
 
 \end{description}
@@ -2008,8 +2007,8 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{simps}] = @{text t.inject} @{text t.distinct} @{text t.case} @{text t.corec_disc_iff} @{text t.corec_sel} \\
-@{text t.map} @{text t.rel_inject} @{text t.rel_distinct} @{text t.set}
+\item[\<open>t.\<close>\hthm{simps}] = \<open>t.inject\<close> \<open>t.distinct\<close> \<open>t.case\<close> \<open>t.corec_disc_iff\<close> \<open>t.corec_sel\<close> \\
+\<open>t.map\<close> \<open>t.rel_inject\<close> \<open>t.rel_distinct\<close> \<open>t.set\<close>
 
 \end{description}
 \end{indentblock}
@@ -2042,17 +2041,17 @@
 \belowdisplayskip=.5\belowdisplayskip
 
 \item The \emph{destructor view} specifies $f$ by implications of the form
-\[@{text "\<dots> \<Longrightarrow> is_C\<^sub>j (f x\<^sub>1 \<dots> x\<^sub>n)"}\] and
+\[\<open>\<dots> \<Longrightarrow> is_C\<^sub>j (f x\<^sub>1 \<dots> x\<^sub>n)\<close>\] and
 equations of the form
-\[@{text "un_C\<^sub>ji (f x\<^sub>1 \<dots> x\<^sub>n) = \<dots>"}\]
+\[\<open>un_C\<^sub>ji (f x\<^sub>1 \<dots> x\<^sub>n) = \<dots>\<close>\]
 This style is popular in the coalgebraic literature.
 
 \item The \emph{constructor view} specifies $f$ by equations of the form
-\[@{text "\<dots> \<Longrightarrow> f x\<^sub>1 \<dots> x\<^sub>n = C\<^sub>j \<dots>"}\]
+\[\<open>\<dots> \<Longrightarrow> f x\<^sub>1 \<dots> x\<^sub>n = C\<^sub>j \<dots>\<close>\]
 This style is often more concise than the previous one.
 
 \item The \emph{code view} specifies $f$ by a single equation of the form
-\[@{text "f x\<^sub>1 \<dots> x\<^sub>n = \<dots>"}\]
+\[\<open>f x\<^sub>1 \<dots> x\<^sub>n = \<dots>\<close>\]
 with restrictions on the format of the right-hand side. Lazy functional
 programming languages such as Haskell support a generalized version of this
 style.
@@ -2100,9 +2099,9 @@
 \noindent
 The constructor ensures that progress is made---i.e., the function is
 \emph{productive}. The above functions compute the infinite lazy list or stream
-@{text "[x, g x, g (g x), \<dots>]"}. Productivity guarantees that prefixes
-@{text "[x, g x, g (g x), \<dots>, (g ^^ k) x]"} of arbitrary finite length
-@{text k} can be computed by unfolding the code equation a finite number of
+\<open>[x, g x, g (g x), \<dots>]\<close>. Productivity guarantees that prefixes
+\<open>[x, g x, g (g x), \<dots>, (g ^^ k) x]\<close> of arbitrary finite length
+\<open>k\<close> can be computed by unfolding the code equation a finite number of
 times.
 
 Corecursive functions construct codatatype values, but nothing prevents them
@@ -2115,8 +2114,7 @@
 
 text \<open>
 \noindent
-Constructs such as @{text "let"}--@{text "in"}, @{text
-"if"}--@{text "then"}--@{text "else"}, and @{text "case"}--@{text "of"} may
+Constructs such as \<open>let\<close>--\<open>in\<close>, \<open>if\<close>--\<open>then\<close>--\<open>else\<close>, and \<open>case\<close>--\<open>of\<close> may
 appear around constructors that guard corecursive calls:
 \<close>
 
@@ -2128,7 +2126,7 @@
 
 text \<open>
 \noindent
-For technical reasons, @{text "case"}--@{text "of"} is only supported for
+For technical reasons, \<open>case\<close>--\<open>of\<close> is only supported for
 case distinctions on (co)datatypes that provide discriminators and selectors.
 
 Pattern matching is not supported by @{command primcorec}. Fortunately, it is
@@ -2155,8 +2153,8 @@
 text \<open>
 \noindent
 The example below constructs a pseudorandom process value. It takes a stream of
-actions (@{text s}), a pseudorandom function generator (@{text f}), and a
-pseudorandom seed (@{text n}):
+actions (\<open>s\<close>), a pseudorandom function generator (\<open>f\<close>), and a
+pseudorandom seed (\<open>n\<close>):
 \<close>
 
 (*<*)
@@ -2210,8 +2208,7 @@
 text \<open>
 The next pair of examples generalize the @{const literate} and @{const siterate}
 functions (Section~\ref{sssec:primcorec-nested-corecursion}) to possibly
-infinite trees in which subnodes are organized either as a lazy list (@{text
-tree\<^sub>i\<^sub>i}) or as a finite set (@{text tree\<^sub>i\<^sub>s}). They rely on the map functions of
+infinite trees in which subnodes are organized either as a lazy list (\<open>tree\<^sub>i\<^sub>i\<close>) or as a finite set (\<open>tree\<^sub>i\<^sub>s\<close>). They rely on the map functions of
 the nesting type constructors to lift the corecursive calls:
 \<close>
 
@@ -2252,9 +2249,9 @@
 text \<open>
 The next example illustrates corecursion through functions, which is a bit
 special. Deterministic finite automata (DFAs) are traditionally defined as
-5-tuples @{text "(Q, \<Sigma>, \<delta>, q\<^sub>0, F)"}, where @{text Q} is a finite set of states,
-@{text \<Sigma>} is a finite alphabet, @{text \<delta>} is a transition function, @{text q\<^sub>0}
-is an initial state, and @{text F} is a set of final states. The following
+5-tuples \<open>(Q, \<Sigma>, \<delta>, q\<^sub>0, F)\<close>, where \<open>Q\<close> is a finite set of states,
+\<open>\<Sigma>\<close> is a finite alphabet, \<open>\<delta>\<close> is a transition function, \<open>q\<^sub>0\<close>
+is an initial state, and \<open>F\<close> is a set of final states. The following
 function translates a DFA into a state machine:
 \<close>
 
@@ -2263,8 +2260,8 @@
 
 text \<open>
 \noindent
-The map function for the function type (@{text \<Rightarrow>}) is composition
-(@{text "(\<circ>)"}). For convenience, corecursion through functions can
+The map function for the function type (\<open>\<Rightarrow>\<close>) is composition
+(\<open>(\<circ>)\<close>). For convenience, corecursion through functions can
 also be expressed using $\lambda$-abstractions and function application rather
 than through composition. For example:
 \<close>
@@ -2343,7 +2340,7 @@
 @{thm [source] iterate\<^sub>i\<^sub>i.coinduct},
 @{thm [source] iterates\<^sub>i\<^sub>i.coinduct}, and
 @{thm [source] iterate\<^sub>i\<^sub>i_iterates\<^sub>i\<^sub>i.coinduct}
-and analogously for @{text coinduct_strong}. These rules and the
+and analogously for \<open>coinduct_strong\<close>. These rules and the
 underlying corecursors are generated dynamically and are kept in a cache
 to speed up subsequent definitions.
 \<close>
@@ -2418,7 +2415,7 @@
 obligations, one for each pair of conditions
 @{term "(n mod (4::int) = i, n mod (4::int) = j)"}
 with @{term "i < j"}. If we prefer not to discharge any obligations, we can
-enable the @{text "sequential"} option. This pushes the problem to the users of
+enable the \<open>sequential\<close> option. This pushes the problem to the users of
 the generated properties.
 %Here are more examples to conclude:
 \<close>
@@ -2435,7 +2432,7 @@
 text \<open>
 The destructor view is in many respects dual to the constructor view. Conditions
 determine which constructor to choose, and these conditions are interpreted
-sequentially or not depending on the @{text "sequential"} option.
+sequentially or not depending on the \<open>sequential\<close> option.
 Consider the following examples:
 \<close>
 
@@ -2517,7 +2514,7 @@
 
 text \<open>
 \noindent
-Using the @{text "of"} keyword, different equations are specified for @{const
+Using the \<open>of\<close> keyword, different equations are specified for @{const
 cont} depending on which constructor is selected.
 
 Here are more examples to conclude:
@@ -2549,8 +2546,8 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "primcorec"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
-  @{command_def "primcorecursive"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
+  @{command_def "primcorec"} & : & \<open>local_theory \<rightarrow> local_theory\<close> \\
+  @{command_def "primcorecursive"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -2580,27 +2577,27 @@
 \setlength{\itemsep}{0pt}
 
 \item
-The @{text plugins} option indicates which plugins should be enabled
-(@{text only}) or disabled (@{text del}). By default, all plugins are enabled.
+The \<open>plugins\<close> option indicates which plugins should be enabled
+(\<open>only\<close>) or disabled (\<open>del\<close>). By default, all plugins are enabled.
 
 \item
-The @{text sequential} option indicates that the conditions in specifications
+The \<open>sequential\<close> option indicates that the conditions in specifications
 expressed using the constructor or destructor view are to be interpreted
 sequentially.
 
 \item
-The @{text exhaustive} option indicates that the conditions in specifications
+The \<open>exhaustive\<close> option indicates that the conditions in specifications
 expressed using the constructor or destructor view cover all possible cases.
 This generally gives rise to an additional proof obligation.
 
 \item
-The @{text transfer} option indicates that an unconditional transfer rule
-should be generated and proved @{text "by transfer_prover"}. The
-@{text "[transfer_rule]"} attribute is set on the generated theorem.
+The \<open>transfer\<close> option indicates that an unconditional transfer rule
+should be generated and proved \<open>by transfer_prover\<close>. The
+\<open>[transfer_rule]\<close> attribute is set on the generated theorem.
 \end{itemize}
 
 The @{command primcorec} command is an abbreviation for @{command
-primcorecursive} with @{text "by auto?"} to discharge any emerging proof
+primcorecursive} with \<open>by auto?\<close> to discharge any emerging proof
 obligations.
 
 %%% TODO: elaborate on format of the propositions
@@ -2618,60 +2615,60 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "f."}\hthm{code} @{text "[code]"}\rm:] ~ \\
+\item[\<open>f.\<close>\hthm{code} \<open>[code]\<close>\rm:] ~ \\
 @{thm literate.code[no_vars]} \\
-The @{text "[code]"} attribute is set by the @{text code} plugin
+The \<open>[code]\<close> attribute is set by the \<open>code\<close> plugin
 (Section~\ref{ssec:code-generator}).
 
-\item[@{text "f."}\hthm{ctr}\rm:] ~ \\
+\item[\<open>f.\<close>\hthm{ctr}\rm:] ~ \\
 @{thm literate.ctr[no_vars]}
 
-\item[@{text "f."}\hthm{disc} @{text "[simp, code]"}\rm:] ~ \\
+\item[\<open>f.\<close>\hthm{disc} \<open>[simp, code]\<close>\rm:] ~ \\
 @{thm literate.disc[no_vars]} \\
-The @{text "[code]"} attribute is set by the @{text code} plugin
-(Section~\ref{ssec:code-generator}). The @{text "[simp]"} attribute is set only
-for functions for which @{text f.disc_iff} is not available.
-
-\item[@{text "f."}\hthm{disc_iff} @{text "[simp]"}\rm:] ~ \\
+The \<open>[code]\<close> attribute is set by the \<open>code\<close> plugin
+(Section~\ref{ssec:code-generator}). The \<open>[simp]\<close> attribute is set only
+for functions for which \<open>f.disc_iff\<close> is not available.
+
+\item[\<open>f.\<close>\hthm{disc_iff} \<open>[simp]\<close>\rm:] ~ \\
 @{thm literate.disc_iff[no_vars]} \\
 This property is generated only for functions declared with the
-@{text exhaustive} option or whose conditions are trivially exhaustive.
-
-\item[@{text "f."}\hthm{sel} @{text "[simp, code]"}\rm:] ~ \\
+\<open>exhaustive\<close> option or whose conditions are trivially exhaustive.
+
+\item[\<open>f.\<close>\hthm{sel} \<open>[simp, code]\<close>\rm:] ~ \\
 @{thm literate.disc[no_vars]} \\
-The @{text "[code]"} attribute is set by the @{text code} plugin
+The \<open>[code]\<close> attribute is set by the \<open>code\<close> plugin
 (Section~\ref{ssec:code-generator}).
 
-\item[@{text "f."}\hthm{exclude}\rm:] ~ \\
+\item[\<open>f.\<close>\hthm{exclude}\rm:] ~ \\
 These properties are missing for @{const literate} because no exclusiveness
 proof obligations arose. In general, the properties correspond to the
 discharged proof obligations.
 
-\item[@{text "f."}\hthm{exhaust}\rm:] ~ \\
+\item[\<open>f.\<close>\hthm{exhaust}\rm:] ~ \\
 This property is missing for @{const literate} because no exhaustiveness
 proof obligation arose. In general, the property correspond to the discharged
 proof obligation.
 
 \item[\begin{tabular}{@ {}l@ {}}
-  @{text "f."}\hthm{coinduct} @{text "[consumes m, case_names t\<^sub>1 \<dots> t\<^sub>m,"} \\
-  \phantom{@{text "f."}\hthm{coinduct} @{text "["}}@{text "case_conclusion D\<^sub>1 \<dots>
-  D\<^sub>n]"}\rm:
+  \<open>f.\<close>\hthm{coinduct} \<open>[consumes m, case_names t\<^sub>1 \<dots> t\<^sub>m,\<close> \\
+  \phantom{\<open>f.\<close>\hthm{coinduct} \<open>[\<close>}\<open>case_conclusion D\<^sub>1 \<dots>
+  D\<^sub>n]\<close>\rm:
 \end{tabular}] ~ \\
 This coinduction rule is generated for nested-as-mutual corecursive functions
 (Section~\ref{sssec:primcorec-nested-as-mutual-corecursion}).
 
 \item[\begin{tabular}{@ {}l@ {}}
-  @{text "f."}\hthm{coinduct_strong} @{text "[consumes m, case_names t\<^sub>1 \<dots> t\<^sub>m,"} \\
-  \phantom{@{text "f."}\hthm{coinduct_strong} @{text "["}}@{text "case_conclusion D\<^sub>1 \<dots>
-  D\<^sub>n]"}\rm:
+  \<open>f.\<close>\hthm{coinduct_strong} \<open>[consumes m, case_names t\<^sub>1 \<dots> t\<^sub>m,\<close> \\
+  \phantom{\<open>f.\<close>\hthm{coinduct_strong} \<open>[\<close>}\<open>case_conclusion D\<^sub>1 \<dots>
+  D\<^sub>n]\<close>\rm:
 \end{tabular}] ~ \\
 This coinduction rule is generated for nested-as-mutual corecursive functions
 (Section~\ref{sssec:primcorec-nested-as-mutual-corecursion}).
 
 \item[\begin{tabular}{@ {}l@ {}}
-  @{text "f\<^sub>1_\<dots>_f\<^sub>m."}\hthm{coinduct} @{text "[case_names t\<^sub>1 \<dots> t\<^sub>m,"} \\
-  \phantom{@{text "f."}\hthm{coinduct} @{text "["}}@{text "case_conclusion D\<^sub>1 \<dots>
-  D\<^sub>n]"}\rm:
+  \<open>f\<^sub>1_\<dots>_f\<^sub>m.\<close>\hthm{coinduct} \<open>[case_names t\<^sub>1 \<dots> t\<^sub>m,\<close> \\
+  \phantom{\<open>f.\<close>\hthm{coinduct} \<open>[\<close>}\<open>case_conclusion D\<^sub>1 \<dots>
+  D\<^sub>n]\<close>\rm:
 \end{tabular}] ~ \\
 This coinduction rule is generated for nested-as-mutual corecursive functions
 (Section~\ref{sssec:primcorec-nested-as-mutual-corecursion}). Given $m > 1$
@@ -2679,9 +2676,9 @@
 simultaneously.
 
 \item[\begin{tabular}{@ {}l@ {}}
-  @{text "f\<^sub>1_\<dots>_f\<^sub>m."}\hthm{coinduct_strong} @{text "[case_names t\<^sub>1 \<dots> t\<^sub>m,"} \\
-  \phantom{@{text "f."}\hthm{coinduct_strong} @{text "["}}@{text "case_conclusion D\<^sub>1 \<dots>
-  D\<^sub>n]"}\rm:
+  \<open>f\<^sub>1_\<dots>_f\<^sub>m.\<close>\hthm{coinduct_strong} \<open>[case_names t\<^sub>1 \<dots> t\<^sub>m,\<close> \\
+  \phantom{\<open>f.\<close>\hthm{coinduct_strong} \<open>[\<close>}\<open>case_conclusion D\<^sub>1 \<dots>
+  D\<^sub>n]\<close>\rm:
 \end{tabular}] ~ \\
 This coinduction rule is generated for nested-as-mutual corecursive functions
 (Section~\ref{sssec:primcorec-nested-as-mutual-corecursion}). Given $m > 1$
@@ -2698,7 +2695,7 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "f."}\hthm{simps}] = @{text f.disc_iff} (or @{text f.disc}) @{text t.sel}
+\item[\<open>f.\<close>\hthm{simps}] = \<open>f.disc_iff\<close> (or \<open>f.disc\<close>) \<open>t.sel\<close>
 
 \end{description}
 \end{indentblock}
@@ -2738,14 +2735,14 @@
 (functorial action), $n$ set functions (natural transformations),
 and an infinite cardinal bound that satisfy certain properties.
 For example, @{typ "'a llist"} is a unary BNF.
-Its predicator @{text "llist_all ::
+Its predicator \<open>llist_all ::
   ('a \<Rightarrow> bool) \<Rightarrow>
-  'a llist \<Rightarrow> bool"}
+  'a llist \<Rightarrow> bool\<close>
 extends unary predicates over elements to unary predicates over
 lazy lists.
-Similarly, its relator @{text "llist_all2 ::
+Similarly, its relator \<open>llist_all2 ::
   ('a \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow>
-  'a llist \<Rightarrow> 'b llist \<Rightarrow> bool"}
+  'a llist \<Rightarrow> 'b llist \<Rightarrow> bool\<close>
 extends binary predicates over elements to binary predicates over parallel
 lazy lists. The cardinal bound limits the number of elements returned by the
 set function; it may not depend on the cardinality of @{typ 'a}.
@@ -2947,11 +2944,11 @@
 text \<open>
 The next example declares a BNF axiomatically. This can be convenient for
 reasoning abstractly about an arbitrary BNF. The @{command bnf_axiomatization}
-command below introduces a type @{text "('a, 'b, 'c) F"}, three set constants,
+command below introduces a type \<open>('a, 'b, 'c) F\<close>, three set constants,
 a map function, a predicator, a relator, and a nonemptiness witness that depends only on
-@{typ 'a}. The type @{text "'a \<Rightarrow> ('a, 'b, 'c) F"} of the witness can be read
+@{typ 'a}. The type \<open>'a \<Rightarrow> ('a, 'b, 'c) F\<close> of the witness can be read
 as an implication: Given a witness for @{typ 'a}, we can construct a witness for
-@{text "('a, 'b, 'c) F"}. The BNF properties are postulated as axioms.
+\<open>('a, 'b, 'c) F\<close>. The BNF properties are postulated as axioms.
 \<close>
 
     bnf_axiomatization (setA: 'a, setB: 'b, setC: 'c) F
@@ -2971,7 +2968,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "bnf"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
+  @{command_def "bnf"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -2994,7 +2991,7 @@
 @{cite "isabelle-isar-ref"}.
 
 The @{syntax plugins} option indicates which plugins should be enabled
-(@{text only}) or disabled (@{text del}). By default, all plugins are enabled.
+(\<open>only\<close>) or disabled (\<open>del\<close>). By default, all plugins are enabled.
 
 %%% TODO: elaborate on proof obligations
 \<close>
@@ -3004,7 +3001,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "lift_bnf"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
+  @{command_def "lift_bnf"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -3024,7 +3021,7 @@
 type}) using the @{command typedef} command. To achieve this, it lifts the BNF
 structure on the raw type to the abstract type following a @{term
 type_definition} theorem. The theorem is usually inferred from the type, but can
-also be explicitly supplied by means of the optional @{text via} clause. In
+also be explicitly supplied by means of the optional \<open>via\<close> clause. In
 addition, custom names for the set functions, the map function, the predicator, and the relator,
 as well as nonemptiness witnesses can be specified.
 
@@ -3032,7 +3029,7 @@
 incomplete. They must be given as terms (on the raw type) and proved to be
 witnesses. The command warns about witness types that are present in the raw
 type's BNF but not supplied by the user. The warning can be disabled by
-specifying the @{text no_warn_wits} option.
+specifying the \<open>no_warn_wits\<close> option.
 \<close>
 
 subsubsection \<open>\keyw{copy_bnf}
@@ -3040,7 +3037,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "copy_bnf"} & : & @{text "local_theory \<rightarrow> local_theory"}
+  @{command_def "copy_bnf"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -3061,7 +3058,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "bnf_axiomatization"} & : & @{text "local_theory \<rightarrow> local_theory"}
+  @{command_def "bnf_axiomatization"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -3086,10 +3083,10 @@
 @{cite "isabelle-isar-ref"}.
 
 The @{syntax plugins} option indicates which plugins should be enabled
-(@{text only}) or disabled (@{text del}). By default, all plugins are enabled.
+(\<open>only\<close>) or disabled (\<open>del\<close>). By default, all plugins are enabled.
 
 Type arguments are live by default; they can be marked as dead by entering
-@{text dead} in front of the type variable (e.g., @{text "(dead 'a)"})
+\<open>dead\<close> in front of the type variable (e.g., \<open>(dead 'a)\<close>)
 instead of an identifier for the corresponding set function. Witnesses can be
 specified by their types. Otherwise, the syntax of @{command bnf_axiomatization}
 is identical to the left-hand side of a @{command datatype} or
@@ -3107,7 +3104,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "print_bnfs"} & : & @{text "local_theory \<rightarrow>"}
+  @{command_def "print_bnfs"} & : & \<open>local_theory \<rightarrow>\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -3128,7 +3125,7 @@
 %    a type not introduced by ...
 %
 %  * @{command free_constructors}
-%    * @{text plugins}, @{text discs_sels}
+%    * \<open>plugins\<close>, \<open>discs_sels\<close>
 %    * hack to have both co and nonco view via locale (cf. ext nats)
 \<close>
 
@@ -3147,7 +3144,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "free_constructors"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
+  @{command_def "free_constructors"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -3177,8 +3174,8 @@
 constructor itself (as a term), and a list of optional names for the selectors.
 
 Section~\ref{ssec:datatype-generated-theorems} lists the generated theorems.
-For bootstrapping reasons, the generally useful @{text "[fundef_cong]"}
-attribute is not set on the generated @{text case_cong} theorem. It can be
+For bootstrapping reasons, the generally useful \<open>[fundef_cong]\<close>
+attribute is not set on the generated \<open>case_cong\<close> theorem. It can be
 added manually using \keyw{declare}.
 \<close>
 
@@ -3188,7 +3185,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "simps_of_case"} & : & @{text "local_theory \<rightarrow> local_theory"}
+  @{command_def "simps_of_case"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -3227,7 +3224,7 @@
 
 text \<open>
 \begin{matharray}{rcl}
-  @{command_def "case_of_simps"} & : & @{text "local_theory \<rightarrow> local_theory"}
+  @{command_def "case_of_simps"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
 \end{matharray}
 
 @{rail \<open>
@@ -3308,10 +3305,10 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{eq.refl} @{text "[code nbe]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{eq.refl} \<open>[code nbe]\<close>\rm:] ~ \\
 @{thm list.eq.refl[no_vars]}
 
-\item[@{text "t."}\hthm{eq.simps} @{text "[code]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{eq.simps} \<open>[code]\<close>\rm:] ~ \\
 @{thm list.eq.simps(1)[no_vars]} \\
 @{thm list.eq.simps(2)[no_vars]} \\
 @{thm list.eq.simps(3)[no_vars]} \\
@@ -3322,7 +3319,7 @@
 \end{description}
 \end{indentblock}
 
-In addition, the plugin sets the @{text "[code]"} attribute on a number of
+In addition, the plugin sets the \<open>[code]\<close> attribute on a number of
 properties of freely generated types and of (co)recursive functions, as
 documented in Sections \ref{ssec:datatype-generated-theorems},
 \ref{ssec:primrec-generated-theorems}, \ref{ssec:codatatype-generated-theorems},
@@ -3334,9 +3331,9 @@
   \label{ssec:size}\<close>
 
 text \<open>
-For each datatype @{text t}, the \hthm{size} plugin generates a generic size
-function @{text "t.size_t"} as well as a specific instance
-@{text "size :: t \<Rightarrow> nat"} belonging to the @{text size} type class. The
+For each datatype \<open>t\<close>, the \hthm{size} plugin generates a generic size
+function \<open>t.size_t\<close> as well as a specific instance
+\<open>size :: t \<Rightarrow> nat\<close> belonging to the \<open>size\<close> type class. The
 \keyw{fun} command relies on @{const size} to prove termination of recursive
 functions on datatypes.
 
@@ -3345,20 +3342,20 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{size} @{text "[simp, code]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{size} \<open>[simp, code]\<close>\rm:] ~ \\
 @{thm list.size(1)[no_vars]} \\
 @{thm list.size(2)[no_vars]} \\
 @{thm list.size(3)[no_vars]} \\
 @{thm list.size(4)[no_vars]}
 
-\item[@{text "t."}\hthm{size_gen}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{size_gen}\rm:] ~ \\
 @{thm list.size_gen(1)[no_vars]} \\
 @{thm list.size_gen(2)[no_vars]}
 
-\item[@{text "t."}\hthm{size_gen_o_map}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{size_gen_o_map}\rm:] ~ \\
 @{thm list.size_gen_o_map[no_vars]}
 
-\item[@{text "t."}\hthm{size_neq}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{size_neq}\rm:] ~ \\
 This property is missing for @{typ "'a list"}. If the @{term size} function
 always evaluates to a non-zero value, this theorem has the form
 @{prop "\<not> size x = 0"}.
@@ -3366,14 +3363,14 @@
 \end{description}
 \end{indentblock}
 
-The @{text "t.size"} and @{text "t.size_t"} functions generated for datatypes
-defined by nested recursion through a datatype @{text u} depend on
-@{text "u.size_u"}.
-
-If the recursion is through a non-datatype @{text u} with type arguments
-@{text "'a\<^sub>1, \<dots>, 'a\<^sub>m"}, by default @{text u} values are given a size of 0. This
+The \<open>t.size\<close> and \<open>t.size_t\<close> functions generated for datatypes
+defined by nested recursion through a datatype \<open>u\<close> depend on
+\<open>u.size_u\<close>.
+
+If the recursion is through a non-datatype \<open>u\<close> with type arguments
+\<open>'a\<^sub>1, \<dots>, 'a\<^sub>m\<close>, by default \<open>u\<close> values are given a size of 0. This
 can be improved upon by registering a custom size function of type
-@{text "('a\<^sub>1 \<Rightarrow> nat) \<Rightarrow> \<dots> \<Rightarrow> ('a\<^sub>m \<Rightarrow> nat) \<Rightarrow> u \<Rightarrow> nat"} using
+\<open>('a\<^sub>1 \<Rightarrow> nat) \<Rightarrow> \<dots> \<Rightarrow> ('a\<^sub>m \<Rightarrow> nat) \<Rightarrow> u \<Rightarrow> nat\<close> using
 the ML function @{ML BNF_LFP_Size.register_size} or
 @{ML BNF_LFP_Size.register_size_global}. See theory
 \<^file>\<open>~~/src/HOL/Library/Multiset.thy\<close> for an example.
@@ -3385,7 +3382,7 @@
 
 text \<open>
 For each (co)datatype with live type arguments and each manually registered BNF,
-the \hthm{transfer} plugin generates a predicator @{text "t.pred_t"} and
+the \hthm{transfer} plugin generates a predicator \<open>t.pred_t\<close> and
 properties that guide the Transfer tool.
 
 For types with at least one live type argument and \emph{no dead type
@@ -3394,48 +3391,48 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{Domainp_rel} @{text "[relator_domain]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{Domainp_rel} \<open>[relator_domain]\<close>\rm:] ~ \\
 @{thm list.Domainp_rel[no_vars]}
 
-\item[@{text "t."}\hthm{left_total_rel} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{left_total_rel} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.left_total_rel[no_vars]}
 
-\item[@{text "t."}\hthm{left_unique_rel} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{left_unique_rel} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.left_unique_rel[no_vars]}
 
-\item[@{text "t."}\hthm{right_total_rel} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{right_total_rel} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.right_total_rel[no_vars]}
 
-\item[@{text "t."}\hthm{right_unique_rel} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{right_unique_rel} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.right_unique_rel[no_vars]}
 
-\item[@{text "t."}\hthm{bi_total_rel} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{bi_total_rel} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.bi_total_rel[no_vars]}
 
-\item[@{text "t."}\hthm{bi_unique_rel} @{text "[transfer_rule]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{bi_unique_rel} \<open>[transfer_rule]\<close>\rm:] ~ \\
 @{thm list.bi_unique_rel[no_vars]}
 
 \end{description}
 \end{indentblock}
 
 For (co)datatypes with at least one live type argument, the plugin sets the
-@{text "[transfer_rule]"} attribute on the following (co)datatypes properties:
-@{text "t.case_"}\allowbreak @{text "transfer"},
-@{text "t.sel_"}\allowbreak @{text "transfer"},
-@{text "t.ctr_"}\allowbreak @{text "transfer"},
-@{text "t.disc_"}\allowbreak @{text "transfer"},
-@{text "t.rec_"}\allowbreak @{text "transfer"}, and
-@{text "t.corec_"}\allowbreak @{text "transfer"}.
+\<open>[transfer_rule]\<close> attribute on the following (co)datatypes properties:
+\<open>t.case_\<close>\allowbreak \<open>transfer\<close>,
+\<open>t.sel_\<close>\allowbreak \<open>transfer\<close>,
+\<open>t.ctr_\<close>\allowbreak \<open>transfer\<close>,
+\<open>t.disc_\<close>\allowbreak \<open>transfer\<close>,
+\<open>t.rec_\<close>\allowbreak \<open>transfer\<close>, and
+\<open>t.corec_\<close>\allowbreak \<open>transfer\<close>.
 For (co)datatypes that further have \emph{no dead type arguments}, the plugin
-sets @{text "[transfer_rule]"} on
-@{text "t.set_"}\allowbreak @{text "transfer"},
-@{text "t.map_"}\allowbreak @{text "transfer"}, and
-@{text "t.rel_"}\allowbreak @{text "transfer"}.
+sets \<open>[transfer_rule]\<close> on
+\<open>t.set_\<close>\allowbreak \<open>transfer\<close>,
+\<open>t.map_\<close>\allowbreak \<open>transfer\<close>, and
+\<open>t.rel_\<close>\allowbreak \<open>transfer\<close>.
 
 For @{command primrec}, @{command primcorec}, and @{command primcorecursive},
-the plugin implements the generation of the @{text "f.transfer"} property,
-conditioned by the @{text transfer} option, and sets the
-@{text "[transfer_rule]"} attribute on these.
+the plugin implements the generation of the \<open>f.transfer\<close> property,
+conditioned by the \<open>transfer\<close> option, and sets the
+\<open>[transfer_rule]\<close> attribute on these.
 \<close>
 
 
@@ -3452,16 +3449,16 @@
 \begin{indentblock}
 \begin{description}
 
-\item[@{text "t."}\hthm{Quotient} @{text "[quot_map]"}\rm:] ~ \\
+\item[\<open>t.\<close>\hthm{Quotient} \<open>[quot_map]\<close>\rm:] ~ \\
 @{thm list.Quotient[no_vars]}
 
 \end{description}
 \end{indentblock}
 
-In addition, the plugin sets the @{text "[relator_eq]"} attribute on a
-variant of the @{text t.rel_eq_onp} property, the @{text "[relator_mono]"}
-attribute on @{text t.rel_mono}, and the @{text "[relator_distr]"} attribute
-on @{text t.rel_compp}.
+In addition, the plugin sets the \<open>[relator_eq]\<close> attribute on a
+variant of the \<open>t.rel_eq_onp\<close> property, the \<open>[relator_mono]\<close>
+attribute on \<open>t.rel_mono\<close>, and the \<open>[relator_distr]\<close> attribute
+on \<open>t.rel_compp\<close>.
 \<close>
 
 
@@ -3522,12 +3519,12 @@
 
 \item
 \emph{The \emph{\keyw{primcorec}} command does not allow corecursion under
-@{text "case"}--@{text "of"} for datatypes that are defined without
+\<open>case\<close>--\<open>of\<close> for datatypes that are defined without
 discriminators and selectors.}
 
 \item
 \emph{There is no way to use an overloaded constant from a syntactic type
-class, such as @{text 0}, as a constructor.}
+class, such as \<open>0\<close>, as a constructor.}
 
 \item
 \emph{There is no way to register the same type as both a datatype and a
@@ -3553,7 +3550,7 @@
 (co)datatype package. Andreas Lochbihler provided lots of comments on earlier
 versions of the package, especially on the coinductive part. Brian Huffman
 suggested major simplifications to the internal constructions. Ond\v{r}ej
-Kun\v{c}ar implemented the @{text transfer} and @{text lifting} plugins.
+Kun\v{c}ar implemented the \<open>transfer\<close> and \<open>lifting\<close> plugins.
 Christian Sternagel and Ren\'e Thiemann ported the \keyw{derive} command
 from the \emph{Archive of Formal Proofs} to the new datatypes. Gerwin Klein and
 Lars Noschinski implemented the @{command simps_of_case} and @{command
--- a/src/Doc/Functions/Functions.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Functions/Functions.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -26,8 +26,8 @@
   and a set of defining recursive equations.
   If we leave out the type, the most general type will be
   inferred, which can sometimes lead to surprises: Since both @{term
-  "1::nat"} and @{text "+"} are overloaded, we would end up
-  with @{text "fib :: nat \<Rightarrow> 'a::{one,plus}"}.
+  "1::nat"} and \<open>+\<close> are overloaded, we would end up
+  with \<open>fib :: nat \<Rightarrow> 'a::{one,plus}\<close>.
 \<close>
 
 text \<open>
@@ -35,8 +35,8 @@
   every recursive call. 
   Since HOL is a logic of total functions, termination is a
   fundamental requirement to prevent inconsistencies\footnote{From the
-  \qt{definition} @{text "f(n) = f(n) + 1"} we could prove 
-  @{text "0 = 1"} by subtracting @{text "f(n)"} on both sides.}.
+  \qt{definition} \<open>f(n) = f(n) + 1\<close> we could prove 
+  \<open>0 = 1\<close> by subtracting \<open>f(n)\<close> on both sides.}.
   Isabelle tries to prove termination automatically when a definition
   is made. In \S\ref{termination}, we will look at cases where this
   fails and see what to do then.
@@ -134,19 +134,19 @@
 
 
 \[\left[\;\begin{minipage}{0.25\textwidth}\vspace{6pt}
-\cmd{fun} @{text "f :: \<tau>"}\\%
+\cmd{fun} \<open>f :: \<tau>\<close>\\%
 \cmd{where}\\%
 \hspace*{2ex}{\it equations}\\%
 \hspace*{2ex}\vdots\vspace*{6pt}
 \end{minipage}\right]
 \quad\equiv\quad
 \left[\;\begin{minipage}{0.48\textwidth}\vspace{6pt}
-\cmd{function} @{text "("}\cmd{sequential}@{text ") f :: \<tau>"}\\%
+\cmd{function} \<open>(\<close>\cmd{sequential}\<open>) f :: \<tau>\<close>\\%
 \cmd{where}\\%
 \hspace*{2ex}{\it equations}\\%
 \hspace*{2ex}\vdots\\%
-\cmd{by} @{text "pat_completeness auto"}\\%
-\cmd{termination by} @{text "lexicographic_order"}\vspace{6pt}
+\cmd{by} \<open>pat_completeness auto\<close>\\%
+\cmd{termination by} \<open>lexicographic_order\<close>\vspace{6pt}
 \end{minipage}
 \right]\]
 
@@ -162,8 +162,8 @@
 
   \item A function definition produces a proof obligation which
   expresses completeness and compatibility of patterns (we talk about
-  this later). The combination of the methods @{text "pat_completeness"} and
-  @{text "auto"} is used to solve this proof obligation.
+  this later). The combination of the methods \<open>pat_completeness\<close> and
+  \<open>auto\<close> is used to solve this proof obligation.
 
   \item A termination proof follows the definition, started by the
   \cmd{termination} command. This will be explained in \S\ref{termination}.
@@ -177,7 +177,7 @@
 section \<open>Termination\<close>
 
 text \<open>\label{termination}
-  The method @{text "lexicographic_order"} is the default method for
+  The method \<open>lexicographic_order\<close> is the default method for
   termination proofs. It can prove termination of a
   certain class of functions by searching for a suitable lexicographic
   combination of size measures. Of course, not all functions have such
@@ -188,7 +188,7 @@
 subsection \<open>The {\tt relation} method\<close>
 text\<open>
   Consider the following function, which sums up natural numbers up to
-  @{text "N"}, using a counter @{text "i"}:
+  \<open>N\<close>, using a counter \<open>i\<close>:
 \<close>
 
 function sum :: "nat \<Rightarrow> nat \<Rightarrow> nat"
@@ -197,15 +197,15 @@
 by pat_completeness auto
 
 text \<open>
-  \noindent The @{text "lexicographic_order"} method fails on this example, because none of the
+  \noindent The \<open>lexicographic_order\<close> method fails on this example, because none of the
   arguments decreases in the recursive call, with respect to the standard size ordering.
   To prove termination manually, we must provide a custom wellfounded relation.
 
-  The termination argument for @{text "sum"} is based on the fact that
-  the \emph{difference} between @{text "i"} and @{text "N"} gets
-  smaller in every step, and that the recursion stops when @{text "i"}
-  is greater than @{text "N"}. Phrased differently, the expression 
-  @{text "N + 1 - i"} always decreases.
+  The termination argument for \<open>sum\<close> is based on the fact that
+  the \emph{difference} between \<open>i\<close> and \<open>N\<close> gets
+  smaller in every step, and that the recursion stops when \<open>i\<close>
+  is greater than \<open>N\<close>. Phrased differently, the expression 
+  \<open>N + 1 - i\<close> always decreases.
 
   We can use this expression as a measure function suitable to prove termination.
 \<close>
@@ -215,10 +215,10 @@
 
 text \<open>
   The \cmd{termination} command sets up the termination goal for the
-  specified function @{text "sum"}. If the function name is omitted, it
+  specified function \<open>sum\<close>. If the function name is omitted, it
   implicitly refers to the last function definition.
 
-  The @{text relation} method takes a relation of
+  The \<open>relation\<close> method takes a relation of
   type @{typ "('a \<times> 'a) set"}, where @{typ "'a"} is the argument type of
   the function. If the function has multiple curried arguments, then
   these are packed together into a tuple, as it happened in the above
@@ -228,14 +228,14 @@
   wellfounded relation from a mapping into the natural numbers (a
   \emph{measure function}). 
 
-  After the invocation of @{text "relation"}, we must prove that (a)
+  After the invocation of \<open>relation\<close>, we must prove that (a)
   the relation we supplied is wellfounded, and (b) that the arguments
   of recursive calls indeed decrease with respect to the
   relation:
 
   @{subgoals[display,indent=0]}
 
-  These goals are all solved by @{text "auto"}:
+  These goals are all solved by \<open>auto\<close>:
 \<close>
 
 apply auto
@@ -254,20 +254,20 @@
 by pat_completeness auto
 
 text \<open>
-  When @{text "i"} has reached @{text "N"}, it starts at zero again
-  and @{text "N"} is decremented.
+  When \<open>i\<close> has reached \<open>N\<close>, it starts at zero again
+  and \<open>N\<close> is decremented.
   This corresponds to a nested
   loop where one index counts up and the other down. Termination can
   be proved using a lexicographic combination of two measures, namely
-  the value of @{text "N"} and the above difference. The @{const
-  "measures"} combinator generalizes @{text "measure"} by taking a
+  the value of \<open>N\<close> and the above difference. The @{const
+  "measures"} combinator generalizes \<open>measure\<close> by taking a
   list of measure functions.  
 \<close>
 
 termination 
 by (relation "measures [\<lambda>(i, N). N, \<lambda>(i,N). N + 1 - i]") auto
 
-subsection \<open>How @{text "lexicographic_order"} works\<close>
+subsection \<open>How \<open>lexicographic_order\<close> works\<close>
 
 (*fun fails :: "nat \<Rightarrow> nat list \<Rightarrow> nat"
 where
@@ -281,10 +281,10 @@
   termination prover, see @{cite bulwahnKN07}}:
 
 \end{isamarkuptext}  
-\cmd{fun} @{text "fails :: \"nat \<Rightarrow> nat list \<Rightarrow> nat\""}\\%
+\cmd{fun} \<open>fails :: "nat \<Rightarrow> nat list \<Rightarrow> nat"\<close>\\%
 \cmd{where}\\%
-\hspace*{2ex}@{text "\"fails a [] = a\""}\\%
-|\hspace*{1.5ex}@{text "\"fails a (x#xs) = fails (x + a) (x#xs)\""}\\
+\hspace*{2ex}\<open>"fails a [] = a"\<close>\\%
+|\hspace*{1.5ex}\<open>"fails a (x#xs) = fails (x + a) (x#xs)"\<close>\\
 \begin{isamarkuptext}
 
 \noindent Isabelle responds with the following error:
@@ -292,16 +292,16 @@
 \begin{isabelle}
 *** Unfinished subgoals:\newline
 *** (a, 1, <):\newline
-*** \ 1.~@{text "\<And>x. x = 0"}\newline
+*** \ 1.~\<open>\<And>x. x = 0\<close>\newline
 *** (a, 1, <=):\newline
 *** \ 1.~False\newline
 *** (a, 2, <):\newline
 *** \ 1.~False\newline
 *** Calls:\newline
-*** a) @{text "(a, x # xs) -->> (x + a, x # xs)"}\newline
+*** a) \<open>(a, x # xs) -->> (x + a, x # xs)\<close>\newline
 *** Measures:\newline
-*** 1) @{text "\<lambda>x. size (fst x)"}\newline
-*** 2) @{text "\<lambda>x. size (snd x)"}\newline
+*** 1) \<open>\<lambda>x. size (fst x)\<close>\newline
+*** 2) \<open>\<lambda>x. size (snd x)\<close>\newline
 *** Result matrix:\newline
 *** \ \ \ \ 1\ \ 2  \newline
 *** a:  ?   <= \newline
@@ -317,26 +317,26 @@
   argument tuple to a natural number). 
 
   The contents of the matrix summarize what is known about argument
-  descents: The second argument has a weak descent (@{text "<="}) at the
+  descents: The second argument has a weak descent (\<open><=\<close>) at the
   recursive call, and for the first argument nothing could be proved,
-  which is expressed by @{text "?"}. In general, there are the values
-  @{text "<"}, @{text "<="} and @{text "?"}.
+  which is expressed by \<open>?\<close>. In general, there are the values
+  \<open><\<close>, \<open><=\<close> and \<open>?\<close>.
 
   For the failed proof attempts, the unfinished subgoals are also
   printed. Looking at these will often point to a missing lemma.
 \<close>
 
-subsection \<open>The @{text size_change} method\<close>
+subsection \<open>The \<open>size_change\<close> method\<close>
 
 text \<open>
   Some termination goals that are beyond the powers of
-  @{text lexicographic_order} can be solved automatically by the
-  more powerful @{text size_change} method, which uses a variant of
+  \<open>lexicographic_order\<close> can be solved automatically by the
+  more powerful \<open>size_change\<close> method, which uses a variant of
   the size-change principle, together with some other
   techniques. While the details are discussed
   elsewhere @{cite krauss_phd},
   here are a few typical situations where
-  @{text lexicographic_order} has difficulties and @{text size_change}
+  \<open>lexicographic_order\<close> has difficulties and \<open>size_change\<close>
   may be worth a try:
   \begin{itemize}
   \item Arguments are permuted in a recursive call.
@@ -345,7 +345,7 @@
   occur in sequence).
   \end{itemize}
 
-  Loading the theory @{text Multiset} makes the @{text size_change}
+  Loading the theory \<open>Multiset\<close> makes the \<open>size_change\<close>
   method a bit stronger: it can then use multiset orders internally.
 \<close>
 
@@ -353,7 +353,7 @@
 
 text \<open>
   If two or more functions call one another mutually, they have to be defined
-  in one step. Here are @{text "even"} and @{text "odd"}:
+  in one step. Here are \<open>even\<close> and \<open>odd\<close>:
 \<close>
 
 function even :: "nat \<Rightarrow> bool"
@@ -378,7 +378,7 @@
 by (relation "measure (\<lambda>x. case x of Inl n \<Rightarrow> n | Inr n \<Rightarrow> n)") auto
 
 text \<open>
-  We could also have used @{text lexicographic_order}, which
+  We could also have used \<open>lexicographic_order\<close>, which
   supports mutual recursive termination proofs to a certain extent.
 \<close>
 
@@ -408,7 +408,7 @@
   definition of @{const even} and @{const odd}:
   @{subgoals[display,indent=0]}
   Simplification solves the first two goals, leaving us with two
-  statements about the @{text "mod"} operation to prove:
+  statements about the \<open>mod\<close> operation to prove:
 \<close>
 
 apply simp_all
@@ -449,7 +449,7 @@
 section \<open>Elimination\<close>
 
 text \<open>
-  A definition of function @{text f} gives rise to two kinds of elimination rules. Rule @{text f.cases}
+  A definition of function \<open>f\<close> gives rise to two kinds of elimination rules. Rule \<open>f.cases\<close>
   simply describes case analysis according to the patterns used in the definition:
 \<close>
 
@@ -486,7 +486,7 @@
 
 
 text \<open>
-  Sometimes it is convenient to derive specialized versions of the @{text elim} rules above and
+  Sometimes it is convenient to derive specialized versions of the \<open>elim\<close> rules above and
   keep them around as facts explicitly. For example, it is natural to show that if 
   @{prop "list_to_option xs = Some y"}, then @{term xs} must be a singleton. The command 
   \cmd{fun\_cases} derives such facts automatically, by instantiating and simplifying the general 
@@ -596,8 +596,8 @@
   the function's input type must match at least one of the patterns\footnote{Completeness could
   be equivalently stated as a disjunction of existential statements: 
 @{term "(\<exists>p. x = (T, p)) \<or> (\<exists>p. x = (p, T)) \<or> (\<exists>p. x = (p, F)) \<or>
-  (\<exists>p. x = (F, p)) \<or> (x = (X, X))"}, and you can use the method @{text atomize_elim} to get that form instead.}. If the patterns just involve
-  datatypes, we can solve it with the @{text "pat_completeness"}
+  (\<exists>p. x = (F, p)) \<or> (x = (X, X))"}, and you can use the method \<open>atomize_elim\<close> to get that form instead.}. If the patterns just involve
+  datatypes, we can solve it with the \<open>pat_completeness\<close>
   method:
 \<close>
 
@@ -609,7 +609,7 @@
   case, the result (i.e.~the right hand sides of the equations) must
   also be equal. For each pair of two patterns, there is one such
   subgoal. Usually this needs injectivity of the constructors, which
-  is used automatically by @{text "auto"}.
+  is used automatically by \<open>auto\<close>.
 \<close>
 
 by auto
@@ -646,9 +646,8 @@
   @{subgoals[display,indent=0,goals_limit=1]}
 
   This is an arithmetic triviality, but unfortunately the
-  @{text arith} method cannot handle this specific form of an
-  elimination rule. However, we can use the method @{text
-  "atomize_elim"} to do an ad-hoc conversion to a disjunction of
+  \<open>arith\<close> method cannot handle this specific form of an
+  elimination rule. However, we can use the method \<open>atomize_elim\<close> to do an ad-hoc conversion to a disjunction of
   existentials, which can then be solved by the arithmetic decision procedure.
   Pattern compatibility and termination are automatic as usual.
 \<close>
@@ -720,10 +719,10 @@
   following definition:
 
 \end{isamarkuptext}
-\noindent\cmd{fun} @{text "check :: \"string \<Rightarrow> bool\""}\\%
+\noindent\cmd{fun} \<open>check :: "string \<Rightarrow> bool"\<close>\\%
 \cmd{where}\\%
-\hspace*{2ex}@{text "\"check (''good'') = True\""}\\%
-@{text "| \"check s = False\""}
+\hspace*{2ex}\<open>"check (''good'') = True"\<close>\\%
+\<open>| "check s = False"\<close>
 \begin{isamarkuptext}
 
   \noindent An invocation of the above \cmd{fun} command does not
@@ -733,7 +732,7 @@
   be handled by Isabelle.
 
   There are two things we can do here. Either we write an explicit
-  @{text "if"} on the right hand side, or we can use conditional patterns:
+  \<open>if\<close> on the right hand side, or we can use conditional patterns:
 \<close>
 
 function check :: "string \<Rightarrow> bool"
@@ -766,8 +765,8 @@
 
 text \<open>
   \noindent Clearly, any attempt of a termination proof must fail. And without
-  that, we do not get the usual rules @{text "findzero.simps"} and 
-  @{text "findzero.induct"}. So what was the definition good for at all?
+  that, we do not get the usual rules \<open>findzero.simps\<close> and 
+  \<open>findzero.induct\<close>. So what was the definition good for at all?
 \<close>
 
 subsection \<open>Domain predicates\<close>
@@ -779,8 +778,7 @@
   partial function just as a total function with an additional domain
   predicate, we can derive simplification and
   induction rules as we do for total functions. They are guarded
-  by domain conditions and are called @{text psimps} and @{text
-  pinduct}: 
+  by domain conditions and are called \<open>psimps\<close> and \<open>pinduct\<close>: 
 \<close>
 
 text \<open>
@@ -819,7 +817,7 @@
   \noindent The hypothesis in our lemma was used to satisfy the first premise in
   the induction rule. However, we also get @{term
   "findzero_dom (f, n)"} as a local assumption in the induction step. This
-  allows unfolding @{term "findzero f n"} using the @{text psimps}
+  allows unfolding @{term "findzero f n"} using the \<open>psimps\<close>
   rule, and the rest is trivial.
 \<close>
 apply (simp add: findzero.psimps)
@@ -880,20 +878,18 @@
   actually true for some values. Otherwise we would have just proved
   lemmas with @{term False} as a premise.
 
-  Essentially, we need some introduction rules for @{text
-  findzero_dom}. The function package can prove such domain
+  Essentially, we need some introduction rules for \<open>findzero_dom\<close>. The function package can prove such domain
   introduction rules automatically. But since they are not used very
   often (they are almost never needed if the function is total), this
   functionality is disabled by default for efficiency reasons. So we have to go
-  back and ask for them explicitly by passing the @{text
-  "(domintros)"} option to the function package:
+  back and ask for them explicitly by passing the \<open>(domintros)\<close> option to the function package:
 
 \vspace{1ex}
-\noindent\cmd{function} @{text "(domintros) findzero :: \"(nat \<Rightarrow> nat) \<Rightarrow> nat \<Rightarrow> nat\""}\\%
+\noindent\cmd{function} \<open>(domintros) findzero :: "(nat \<Rightarrow> nat) \<Rightarrow> nat \<Rightarrow> nat"\<close>\\%
 \cmd{where}\isanewline%
 \ \ \ldots\\
 
-  \noindent Now the package has proved an introduction rule for @{text findzero_dom}:
+  \noindent Now the package has proved an introduction rule for \<open>findzero_dom\<close>:
 \<close>
 
 thm findzero.domintros
@@ -915,7 +911,7 @@
   \begin{center}@{thm inc_induct}\hfill(@{thm [source] "inc_induct"})\end{center}
 
   Figure \ref{findzero_term} gives a detailed Isar proof of the fact
-  that @{text findzero} terminates if there is a zero which is greater
+  that \<open>findzero\<close> terminates if there is a zero which is greater
   or equal to @{term n}. First we derive two useful rules which will
   solve the base case and the step case of the induction. The
   induction is then straightforward, except for the unusual induction
@@ -952,7 +948,7 @@
 text_raw \<open>
 \isamarkupfalse\isabellestyle{tt}
 \end{minipage}\vspace{6pt}\hrule
-\caption{Termination proof for @{text findzero}}\label{findzero_term}
+\caption{Termination proof for \<open>findzero\<close>}\label{findzero_term}
 \end{figure}
 \<close>
       
@@ -982,7 +978,7 @@
 
 text \<open>
   Sometimes it is useful to know what the definition of the domain
-  predicate looks like. Actually, @{text findzero_dom} is just an
+  predicate looks like. Actually, \<open>findzero_dom\<close> is just an
   abbreviation:
 
   @{abbrev[display] findzero_dom}
@@ -1005,8 +1001,7 @@
 
   The predicate @{term "Wellfounded.accp findzero_rel"} is the accessible part of
   that relation. An argument belongs to the accessible part, if it can
-  be reached in a finite number of steps (cf.~its definition in @{text
-  "Wellfounded.thy"}).
+  be reached in a finite number of steps (cf.~its definition in \<open>Wellfounded.thy\<close>).
 
   Since the domain predicate is just an abbreviation, you can use
   lemmas for @{const Wellfounded.accp} and @{const findzero_rel} directly. Some
@@ -1056,8 +1051,7 @@
 
 text \<open>
   We formulate this as a partial correctness lemma with the condition
-  @{term "nz_dom n"}. This allows us to prove it with the @{text
-  pinduct} rule before we have proved termination. With this lemma,
+  @{term "nz_dom n"}. This allows us to prove it with the \<open>pinduct\<close> rule before we have proved termination. With this lemma,
   the termination proof works as expected:
 \<close>
 
@@ -1170,8 +1164,8 @@
   needed for each higher-order construct that is used when defining
   new functions. In fact, even basic functions like @{const
   If} and @{const Let} are handled by this mechanism. The congruence
-  rule for @{const If} states that the @{text then} branch is only
-  relevant if the condition is true, and the @{text else} branch only if it
+  rule for @{const If} states that the \<open>then\<close> branch is only
+  relevant if the condition is true, and the \<open>else\<close> branch only if it
   is false:
 
   @{thm[display] if_cong}
--- a/src/Doc/How_to_Prove_it/How_to_Prove_it.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/How_to_Prove_it/How_to_Prove_it.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -17,10 +17,10 @@
 \end{quote}
 This is often called ``complete induction''. It is applied like this:
 \begin{quote}
-(@{text"induction n rule: less_induct"})
+(\<open>induction n rule: less_induct\<close>)
 \end{quote}
 In fact, it is not restricted to @{typ nat} but works for any wellfounded
-order @{text"<"}.
+order \<open><\<close>.
 
 There are many more special induction rules. You can find all of them
 via the Find button (in Isabelle/jedit) with the following search criteria:
@@ -39,7 +39,7 @@
 lemma fixes x :: int shows "x ^ 3 = x * x * x"
 by (simp add: numeral_eq_Suc)
 
-text\<open>This is a typical situation: function ``@{text"^"}'' is defined
+text\<open>This is a typical situation: function ``\<open>^\<close>'' is defined
 by pattern matching on @{const Suc} but is applied to a numeral.
 
 Note: simplification with @{thm[source] numeral_eq_Suc} will convert all numerals.
@@ -74,8 +74,8 @@
 \section{Algebraic simplification}
 
 On the numeric types @{typ nat}, @{typ int} and @{typ real},
-proof method @{text simp} and friends can deal with a limited amount of linear
-arithmetic (no multiplication except by numerals) and method @{text arith} can
+proof method \<open>simp\<close> and friends can deal with a limited amount of linear
+arithmetic (no multiplication except by numerals) and method \<open>arith\<close> can
 handle full linear arithmetic (on @{typ nat}, @{typ int} including quantifiers).
 But what to do when proper multiplication is involved?
 At this point it can be helpful to simplify with the lemma list
--- a/src/Doc/Locales/Examples.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Locales/Examples.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -21,14 +21,13 @@
   Locales are based on contexts.  A \emph{context} can be seen as a
   formula schema
 \[
-  @{text "\<And>x\<^sub>1\<dots>x\<^sub>n. \<lbrakk> A\<^sub>1; \<dots> ;A\<^sub>m \<rbrakk> \<Longrightarrow> \<dots>"}
+  \<open>\<And>x\<^sub>1\<dots>x\<^sub>n. \<lbrakk> A\<^sub>1; \<dots> ;A\<^sub>m \<rbrakk> \<Longrightarrow> \<dots>\<close>
 \]
-  where the variables~@{text "x\<^sub>1"}, \ldots,~@{text "x\<^sub>n"} are called
-  \emph{parameters} and the premises $@{text "A\<^sub>1"}, \ldots,~@{text
-  "A\<^sub>m"}$ \emph{assumptions}.  A formula~@{text "C"}
+  where the variables~\<open>x\<^sub>1\<close>, \ldots,~\<open>x\<^sub>n\<close> are called
+  \emph{parameters} and the premises $\<open>A\<^sub>1\<close>, \ldots,~\<open>A\<^sub>m\<close>$ \emph{assumptions}.  A formula~\<open>C\<close>
   is a \emph{theorem} in the context if it is a conclusion
 \[
-  @{text "\<And>x\<^sub>1\<dots>x\<^sub>n. \<lbrakk> A\<^sub>1; \<dots> ;A\<^sub>m \<rbrakk> \<Longrightarrow> C"}.
+  \<open>\<And>x\<^sub>1\<dots>x\<^sub>n. \<lbrakk> A\<^sub>1; \<dots> ;A\<^sub>m \<rbrakk> \<Longrightarrow> C\<close>.
 \]
   Isabelle/Isar's notion of context goes beyond this logical view.
   Its contexts record, in a consecutive order, proved
@@ -47,7 +46,7 @@
   \emph{locale declaration} consists of a sequence of context elements
   declaring parameters (keyword \isakeyword{fixes}) and assumptions
   (keyword \isakeyword{assumes}).  The following is the specification of
-  partial orders, as locale @{text partial_order}.
+  partial orders, as locale \<open>partial_order\<close>.
 \<close>
 
   locale partial_order =
@@ -56,8 +55,8 @@
       and anti_sym [intro]: "\<lbrakk> x \<sqsubseteq> y; y \<sqsubseteq> x \<rbrakk> \<Longrightarrow> x = y"
       and trans [trans]: "\<lbrakk> x \<sqsubseteq> y; y \<sqsubseteq> z \<rbrakk> \<Longrightarrow> x \<sqsubseteq> z"
 
-text (in partial_order) \<open>The parameter of this locale is~@{text le},
-  which is a binary predicate with infix syntax~@{text \<sqsubseteq>}.  The
+text (in partial_order) \<open>The parameter of this locale is~\<open>le\<close>,
+  which is a binary predicate with infix syntax~\<open>\<sqsubseteq>\<close>.  The
   parameter syntax is available in the subsequent
   assumptions, which are the familiar partial order axioms.
 
@@ -146,9 +145,9 @@
     less :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infixl "\<sqsubset>" 50)
     where "(x \<sqsubset> y) = (x \<sqsubseteq> y \<and> x \<noteq> y)"
 
-text (in partial_order) \<open>The strict order @{text less} with infix
-  syntax~@{text \<sqsubset>} is
-  defined in terms of the locale parameter~@{text le} and the general
+text (in partial_order) \<open>The strict order \<open>less\<close> with infix
+  syntax~\<open>\<sqsubset>\<close> is
+  defined in terms of the locale parameter~\<open>le\<close> and the general
   equality of the object logic we work in.  The definition generates a
   \emph{foundational constant}
   @{term partial_order.less} with definition @{thm [source]
@@ -156,9 +155,9 @@
   @{thm [display, indent=2] partial_order.less_def}
   At the same time, the locale is extended by syntax transformations
   hiding this construction in the context of the locale.  Here, the
-  abbreviation @{text less} is available for
-  @{text "partial_order.less le"}, and it is printed
-  and parsed as infix~@{text \<sqsubset>}.  Finally, the conclusion @{thm [source]
+  abbreviation \<open>less\<close> is available for
+  \<open>partial_order.less le\<close>, and it is printed
+  and parsed as infix~\<open>\<sqsubset>\<close>.  Finally, the conclusion @{thm [source]
   less_def} is added to the locale:
   @{thm [display, indent=2] less_def}
 \<close>
@@ -172,8 +171,7 @@
     unfolding %visible less_def by %visible (blast intro: trans)
 
 text \<open>In the context of the proof, conclusions of the
-  locale may be used like theorems.  Attributes are effective: @{text
-  anti_sym} was
+  locale may be used like theorems.  Attributes are effective: \<open>anti_sym\<close> was
   declared as introduction rule, hence it is in the context's set of
   rules used by the classical reasoner by default.\<close>
 
@@ -319,7 +317,7 @@
   begin
 
 text \<open>These assumptions refer to the predicates for infimum
-  and supremum defined for @{text partial_order} in the previous
+  and supremum defined for \<open>partial_order\<close> in the previous
   section.  We now introduce the notions of meet and join,
   together with an example theorem.\<close>
 
@@ -600,10 +598,10 @@
 \begin{center}
 \subfigure[Declared hierarchy]{
 \begin{tikzpicture}
-  \node (po) at (0,0) {@{text partial_order}};
-  \node (lat) at (-1.5,-1) {@{text lattice}};
-  \node (dlat) at (-1.5,-2) {@{text distrib_lattice}};
-  \node (to) at (1.5,-1) {@{text total_order}};
+  \node (po) at (0,0) {\<open>partial_order\<close>};
+  \node (lat) at (-1.5,-1) {\<open>lattice\<close>};
+  \node (dlat) at (-1.5,-2) {\<open>distrib_lattice\<close>};
+  \node (to) at (1.5,-1) {\<open>total_order\<close>};
   \draw (po) -- (lat);
   \draw (lat) -- (dlat);
   \draw (po) -- (to);
@@ -612,10 +610,10 @@
 } \\
 \subfigure[Total orders are lattices]{
 \begin{tikzpicture}
-  \node (po) at (0,0) {@{text partial_order}};
-  \node (lat) at (0,-1) {@{text lattice}};
-  \node (dlat) at (-1.5,-2) {@{text distrib_lattice}};
-  \node (to) at (1.5,-2) {@{text total_order}};
+  \node (po) at (0,0) {\<open>partial_order\<close>};
+  \node (lat) at (0,-1) {\<open>lattice\<close>};
+  \node (dlat) at (-1.5,-2) {\<open>distrib_lattice\<close>};
+  \node (to) at (1.5,-2) {\<open>total_order\<close>};
   \draw (po) -- (lat);
   \draw (lat) -- (dlat);
   \draw (lat) -- (to);
@@ -624,10 +622,10 @@
 } \quad
 \subfigure[Total orders are distributive lattices]{
 \begin{tikzpicture}
-  \node (po) at (0,0) {@{text partial_order}};
-  \node (lat) at (0,-1) {@{text lattice}};
-  \node (dlat) at (0,-2) {@{text distrib_lattice}};
-  \node (to) at (0,-3) {@{text total_order}};
+  \node (po) at (0,0) {\<open>partial_order\<close>};
+  \node (lat) at (0,-1) {\<open>lattice\<close>};
+  \node (dlat) at (0,-2) {\<open>distrib_lattice\<close>};
+  \node (to) at (0,-3) {\<open>total_order\<close>};
   \draw (po) -- (lat);
   \draw (lat) -- (dlat);
   \draw (dlat) -- (to);
@@ -680,33 +678,31 @@
   sublocale %visible total_order \<subseteq> lattice
 
 txt \<open>\normalsize
-  This enters the context of locale @{text total_order}, in
+  This enters the context of locale \<open>total_order\<close>, in
   which the goal @{subgoals [display]} must be shown.
   Now the
   locale predicate needs to be unfolded --- for example, using its
   definition or by introduction rules
   provided by the locale package.  For automation, the locale package
-  provides the methods @{text intro_locales} and @{text
-  unfold_locales}.  They are aware of the
+  provides the methods \<open>intro_locales\<close> and \<open>unfold_locales\<close>.  They are aware of the
   current context and dependencies between locales and automatically
-  discharge goals implied by these.  While @{text unfold_locales}
-  always unfolds locale predicates to assumptions, @{text
-  intro_locales} only unfolds definitions along the locale
+  discharge goals implied by these.  While \<open>unfold_locales\<close>
+  always unfolds locale predicates to assumptions, \<open>intro_locales\<close> only unfolds definitions along the locale
   hierarchy, leaving a goal consisting of predicates defined by the
   locale package.  Occasionally the latter is of advantage since the goal
   is smaller.
 
   For the current goal, we would like to get hold of
-  the assumptions of @{text lattice}, which need to be shown, hence
-  @{text unfold_locales} is appropriate.\<close>
+  the assumptions of \<open>lattice\<close>, which need to be shown, hence
+  \<open>unfold_locales\<close> is appropriate.\<close>
 
   proof unfold_locales
 
 txt \<open>\normalsize
   Since the fact that both lattices and total orders are partial
   orders is already reflected in the locale hierarchy, the assumptions
-  of @{text partial_order} are discharged automatically, and only the
-  assumptions introduced in @{text lattice} remain as subgoals
+  of \<open>partial_order\<close> are discharged automatically, and only the
+  assumptions introduced in \<open>lattice\<close> remain as subgoals
   @{subgoals [display]}
   The proof for the first subgoal is obtained by constructing an
   infimum, whose existence is implied by totality.\<close>
--- a/src/Doc/Locales/Examples1.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Locales/Examples1.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -32,7 +32,7 @@
 text \<open>
   The command \isakeyword{interpretation} is for the interpretation of
   locale in theories.  In the following example, the parameter of locale
-  @{text partial_order} is replaced by @{term "(\<le>) :: int \<Rightarrow> int \<Rightarrow>
+  \<open>partial_order\<close> is replaced by @{term "(\<le>) :: int \<Rightarrow> int \<Rightarrow>
   bool"} and the locale instance is interpreted in the current
   theory.\<close>
 
@@ -40,7 +40,7 @@
 txt \<open>\normalsize
   The argument of the command is a simple \emph{locale expression}
   consisting of the name of the interpreted locale, which is
-  preceded by the qualifier @{text "int:"} and succeeded by a
+  preceded by the qualifier \<open>int:\<close> and succeeded by a
   white-space-separated list of terms, which provide a full
   instantiation of the locale parameters.  The parameters are referred
   to by order of declaration, which is also the order in which
@@ -59,22 +59,21 @@
   int} is named @{thm [source] int.trans} and is the following
   theorem:
   @{thm [display, indent=2] int.trans}
-  It is not possible to reference this theorem simply as @{text
-  trans}.  This prevents unwanted hiding of existing theorems of the
+  It is not possible to reference this theorem simply as \<open>trans\<close>.  This prevents unwanted hiding of existing theorems of the
   theory by an interpretation.\<close>
 
 
 subsection \<open>Second Version: Replacement of Definitions\<close>
 
 text \<open>Not only does the above interpretation qualify theorem names.
-  The prefix @{text int} is applied to all names introduced in locale
+  The prefix \<open>int\<close> is applied to all names introduced in locale
   conclusions including names introduced in definitions.  The
-  qualified name @{text int.less} is short for
-  the interpretation of the definition, which is @{text "partial_order.less (\<le>)"}.
+  qualified name \<open>int.less\<close> is short for
+  the interpretation of the definition, which is \<open>partial_order.less (\<le>)\<close>.
   Qualified name and expanded form may be used almost
   interchangeably.%
-\footnote{Since @{term "(\<le>)"} is polymorphic, for @{text "partial_order.less (\<le>)"} a
-  more general type will be inferred than for @{text int.less} which
+\footnote{Since @{term "(\<le>)"} is polymorphic, for \<open>partial_order.less (\<le>)\<close> a
+  more general type will be inferred than for \<open>int.less\<close> which
   is over type @{typ int}.}
   The former is preferred on output, as for example in the theorem
   @{thm [source] int.less_le_trans}: @{thm [display, indent=2]
--- a/src/Doc/Locales/Examples2.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Locales/Examples2.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -6,7 +6,7 @@
   proof -
     txt \<open>\normalsize The goals are now:
       @{subgoals [display]}
-      The proof that~@{text \<le>} is a partial order is as above.\<close>
+      The proof that~\<open>\<le>\<close> is a partial order is as above.\<close>
     show "partial_order ((\<le>) :: int \<Rightarrow> int \<Rightarrow> bool)"
       by unfold_locales auto
     txt \<open>\normalsize The second goal is shown by unfolding the
@@ -17,7 +17,6 @@
   qed
 
 text \<open>Note that the above proof is not in the context of the
-  interpreted locale.  Hence, the premise of @{text
-  "partial_order.less_def"} is discharged manually with @{text OF}.
+  interpreted locale.  Hence, the premise of \<open>partial_order.less_def\<close> is discharged manually with \<open>OF\<close>.
 \<close>
 end
--- a/src/Doc/Locales/Examples3.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Locales/Examples3.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -7,7 +7,7 @@
 
 text \<open>In the above example, the fact that @{term "(\<le>)"} is a partial
   order for the integers was used in the second goal to
-  discharge the premise in the definition of @{text "(\<sqsubset>)"}.  In
+  discharge the premise in the definition of \<open>(\<sqsubset>)\<close>.  In
   general, proofs of the equations not only may involve definitions
   from the interpreted locale but arbitrarily complex arguments in the
   context of the locale.  Therefore it would be convenient to have the
@@ -30,7 +30,7 @@
   and proved by assumption (Isar short hand ``.'').  It enriches the
   local proof context by the theorems
   also obtained in the interpretation from Section~\ref{sec:po-first},
-  and @{text int.less_def} may directly be used to unfold the
+  and \<open>int.less_def\<close> may directly be used to unfold the
   definition.  Theorems from the local interpretation disappear after
   leaving the proof context --- that is, after the succeeding
   \isakeyword{next} or \isakeyword{qed} statement.\<close>
@@ -39,8 +39,8 @@
 subsection \<open>Further Interpretations\<close>
 
 text \<open>Further interpretations are necessary for
-  the other locales.  In @{text lattice} the operations~@{text \<sqinter>}
-  and~@{text \<squnion>} are substituted by @{term "min :: int \<Rightarrow> int \<Rightarrow> int"}
+  the other locales.  In \<open>lattice\<close> the operations~\<open>\<sqinter>\<close>
+  and~\<open>\<squnion>\<close> are substituted by @{term "min :: int \<Rightarrow> int \<Rightarrow> int"}
   and @{term "max :: int \<Rightarrow> int \<Rightarrow> int"}.  The entire proof for the
   interpretation is reproduced to give an example of a more
   elaborate interpretation proof.  Note that the equations are named
@@ -57,13 +57,13 @@
       txt \<open>\normalsize hence only the lattice axioms remain to be
         shown.
         @{subgoals [display]}
-        By @{text is_inf} and @{text is_sup},\<close>
+        By \<open>is_inf\<close> and \<open>is_sup\<close>,\<close>
       apply (unfold int.is_inf_def int.is_sup_def)
       txt \<open>\normalsize the goals are transformed to these
         statements:
         @{subgoals [display]}
         This is Presburger arithmetic, which can be solved by the
-        method @{text arith}.\<close>
+        method \<open>arith\<close>.\<close>
       by arith+
     txt \<open>\normalsize In order to show the equations, we put ourselves
       in a situation where the lattice theorems can be used in a
@@ -75,7 +75,7 @@
       by (bestsimp simp: int.join_def int.is_sup_def)
   qed
 
-text \<open>Next follows that @{text "(\<le>)"} is a total order, again for
+text \<open>Next follows that \<open>(\<le>)\<close> is a total order, again for
   the integers.\<close>
 
   interpretation %visible int: total_order "(\<le>) :: int \<Rightarrow> int \<Rightarrow> bool"
@@ -89,24 +89,24 @@
 \vspace{2ex}
 \begin{center}
 \begin{tabular}{l}
-  @{thm [source] int.less_def} from locale @{text partial_order}: \\
+  @{thm [source] int.less_def} from locale \<open>partial_order\<close>: \\
   \quad @{thm int.less_def} \\
-  @{thm [source] int.meet_left} from locale @{text lattice}: \\
+  @{thm [source] int.meet_left} from locale \<open>lattice\<close>: \\
   \quad @{thm int.meet_left} \\
-  @{thm [source] int.join_distr} from locale @{text distrib_lattice}: \\
+  @{thm [source] int.join_distr} from locale \<open>distrib_lattice\<close>: \\
   \quad @{thm int.join_distr} \\
-  @{thm [source] int.less_total} from locale @{text total_order}: \\
+  @{thm [source] int.less_total} from locale \<open>total_order\<close>: \\
   \quad @{thm int.less_total}
 \end{tabular}
 \end{center}
 \hrule
-\caption{Interpreted theorems for~@{text \<le>} on the integers.}
+\caption{Interpreted theorems for~\<open>\<le>\<close> on the integers.}
 \label{tab:int-lattice}
 \end{table}
 
 \begin{itemize}
 \item
-  Locale @{text distrib_lattice} was also interpreted.  Since the
+  Locale \<open>distrib_lattice\<close> was also interpreted.  Since the
   locale hierarchy reflects that total orders are distributive
   lattices, the interpretation of the latter was inserted
   automatically with the interpretation of the former.  In general,
@@ -117,8 +117,8 @@
 \item
   The predicate @{term "(<)"} appears in theorem @{thm [source]
   int.less_total}
-  although an equation for the replacement of @{text "(\<sqsubset>)"} was only
-  given in the interpretation of @{text partial_order}.  The
+  although an equation for the replacement of \<open>(\<sqsubset>)\<close> was only
+  given in the interpretation of \<open>partial_order\<close>.  The
   interpretation equations are pushed downwards the hierarchy for
   related interpretations --- that is, for interpretations that share
   the instances of parameters they have in common.
@@ -148,21 +148,19 @@
 section \<open>Locale Expressions \label{sec:expressions}\<close>
 
 text \<open>
-  A map~@{term \<phi>} between partial orders~@{text \<sqsubseteq>} and~@{text \<preceq>}
-  is called order preserving if @{text "x \<sqsubseteq> y"} implies @{text "\<phi> x \<preceq>
-  \<phi> y"}.  This situation is more complex than those encountered so
+  A map~@{term \<phi>} between partial orders~\<open>\<sqsubseteq>\<close> and~\<open>\<preceq>\<close>
+  is called order preserving if \<open>x \<sqsubseteq> y\<close> implies \<open>\<phi> x \<preceq>
+  \<phi> y\<close>.  This situation is more complex than those encountered so
   far: it involves two partial orders, and it is desirable to use the
   existing locale for both.
 
-  A locale for order preserving maps requires three parameters: @{text
-  le}~(\isakeyword{infixl}~@{text \<sqsubseteq>}) and @{text
-  le'}~(\isakeyword{infixl}~@{text \<preceq>}) for the orders and~@{text \<phi>}
+  A locale for order preserving maps requires three parameters: \<open>le\<close>~(\isakeyword{infixl}~\<open>\<sqsubseteq>\<close>) and \<open>le'\<close>~(\isakeyword{infixl}~\<open>\<preceq>\<close>) for the orders and~\<open>\<phi>\<close>
   for the map.
 
   In order to reuse the existing locale for partial orders, which has
-  the single parameter~@{text le}, it must be imported twice, once
-  mapping its parameter to~@{text le} from the new locale and once
-  to~@{text le'}.  This can be achieved with a compound locale
+  the single parameter~\<open>le\<close>, it must be imported twice, once
+  mapping its parameter to~\<open>le\<close> from the new locale and once
+  to~\<open>le'\<close>.  This can be achieved with a compound locale
   expression.
 
   In general, a locale expression is a sequence of \emph{locale instances}
@@ -179,7 +177,7 @@
   different instances of the same locale, and unless designated with a
   ``?'' it must occur in name references.
 
-  Since the parameters~@{text le} and~@{text le'} are to be partial
+  Since the parameters~\<open>le\<close> and~\<open>le'\<close> are to be partial
   orders, our locale for order preserving maps will import the these
   instances:
 \begin{small}
@@ -200,7 +198,7 @@
   clause.  The \isakeyword{for} clause is also where the syntax of these
   parameters is declared.
 
-  Two context elements for the map parameter~@{text \<phi>} and the
+  Two context elements for the map parameter~\<open>\<phi>\<close> and the
   assumptions that it is order preserving complete the locale
   declaration.\<close>
 
@@ -221,9 +219,9 @@
   @{thm [display, indent=4] le'.less_le_trans}
   While there is infix syntax for the strict operation associated with
   @{term "(\<sqsubseteq>)"}, there is none for the strict version of @{term
-  "(\<preceq>)"}.  The syntax @{text "\<sqsubset>"} for @{text less} is only
+  "(\<preceq>)"}.  The syntax \<open>\<sqsubset>\<close> for \<open>less\<close> is only
   available for the original instance it was declared for.  We may
-  introduce infix syntax for @{text le'.less} with the following declaration:\<close>
+  introduce infix syntax for \<open>le'.less\<close> with the following declaration:\<close>
 
   notation (in order_preserving) le'.less (infixl "\<prec>" 50)
 
@@ -240,9 +238,8 @@
 text \<open>
   It is possible to omit parameter instantiations.  The
   instantiation then defaults to the name of
-  the parameter itself.  For example, the locale expression @{text
-  partial_order} is short for @{text "partial_order le"}, since the
-  locale's single parameter is~@{text le}.  We took advantage of this
+  the parameter itself.  For example, the locale expression \<open>partial_order\<close> is short for \<open>partial_order le\<close>, since the
+  locale's single parameter is~\<open>le\<close>.  We took advantage of this
   in the \isakeyword{sublocale} declarations of
   Section~\ref{sec:changing-the-hierarchy}.\<close>
 
@@ -267,7 +264,7 @@
   locale parameters for which no parameter instantiation is given are
   implicitly added, with their mixfix syntax, at the beginning of the
   \isakeyword{for} clause.  For example, in a locale declaration, the
-  expression @{text partial_order} is short for
+  expression \<open>partial_order\<close> is short for
 \begin{small}
 \begin{alltt}
   partial_order le \isakeyword{for} le (\isakeyword{infixl} "\(\sqsubseteq\)" 50)\textrm{.}
@@ -279,7 +276,7 @@
 
 text\<open>
   The following locale declarations provide more examples.  A
-  map~@{text \<phi>} is a lattice homomorphism if it preserves meet and
+  map~\<open>\<phi>\<close> is a lattice homomorphism if it preserves meet and
   join.\<close>
 
   locale lattice_hom =
@@ -289,9 +286,9 @@
       and hom_join: "\<phi> (x \<squnion> y) = le'.join (\<phi> x) (\<phi> y)"
 
 text \<open>The parameter instantiation in the first instance of @{term
-  lattice} is omitted.  This causes the parameter~@{text le} to be
+  lattice} is omitted.  This causes the parameter~\<open>le\<close> to be
   added to the \isakeyword{for} clause, and the locale has
-  parameters~@{text le},~@{text le'} and, of course,~@{text \<phi>}.
+  parameters~\<open>le\<close>,~\<open>le'\<close> and, of course,~\<open>\<phi>\<close>.
 
   Before turning to the second example, we complete the locale by
   providing infix syntax for the meet and join operations of the
@@ -310,11 +307,10 @@
 
   locale lattice_end = lattice_hom _ le
 
-text \<open>The notation~@{text _} enables to omit a parameter in a
-  positional instantiation.  The omitted parameter,~@{text le} becomes
+text \<open>The notation~\<open>_\<close> enables to omit a parameter in a
+  positional instantiation.  The omitted parameter,~\<open>le\<close> becomes
   the parameter of the declared locale and is, in the following
-  position, used to instantiate the second parameter of @{text
-  lattice_hom}.  The effect is that of identifying the first in second
+  position, used to instantiate the second parameter of \<open>lattice_hom\<close>.  The effect is that of identifying the first in second
   parameter of the homomorphism locale.\<close>
 
 text \<open>The inheritance diagram of the situation we have now is shown
@@ -322,9 +318,7 @@
   interpretation which is introduced below.  Parameter instantiations
   are indicated by $\sqsubseteq \mapsto \preceq$ etc.  By looking at
   the inheritance diagram it would seem
-  that two identical copies of each of the locales @{text
-  partial_order} and @{text lattice} are imported by @{text
-  lattice_end}.  This is not the case!  Inheritance paths with
+  that two identical copies of each of the locales \<open>partial_order\<close> and \<open>lattice\<close> are imported by \<open>lattice_end\<close>.  This is not the case!  Inheritance paths with
   identical morphisms are automatically detected and
   the conclusions of the respective locales appear only once.
 
@@ -332,15 +326,15 @@
 \hrule \vspace{2ex}
 \begin{center}
 \begin{tikzpicture}
-  \node (o) at (0,0) {@{text partial_order}};
-  \node (oh) at (1.5,-2) {@{text order_preserving}};
+  \node (o) at (0,0) {\<open>partial_order\<close>};
+  \node (oh) at (1.5,-2) {\<open>order_preserving\<close>};
   \node (oh1) at (1.5,-0.7) {$\scriptscriptstyle \sqsubseteq \mapsto \sqsubseteq$};
   \node (oh2) at (0,-1.3) {$\scriptscriptstyle \sqsubseteq \mapsto \preceq$};
-  \node (l) at (-1.5,-2) {@{text lattice}};
-  \node (lh) at (0,-4) {@{text lattice_hom}};
+  \node (l) at (-1.5,-2) {\<open>lattice\<close>};
+  \node (lh) at (0,-4) {\<open>lattice_hom\<close>};
   \node (lh1) at (0,-2.7) {$\scriptscriptstyle \sqsubseteq \mapsto \sqsubseteq$};
   \node (lh2) at (-1.5,-3.3) {$\scriptscriptstyle \sqsubseteq \mapsto \preceq$};
-  \node (le) at (0,-6) {@{text lattice_end}};
+  \node (le) at (0,-6) {\<open>lattice_end\<close>};
   \node (le1) at (0,-4.8)
     [anchor=west]{$\scriptscriptstyle \sqsubseteq \mapsto \sqsubseteq$};
   \node (le2) at (0,-5.2)
@@ -375,8 +369,7 @@
 
 text (in lattice_hom) \<open>
   Theorems and other declarations --- syntax, in particular --- from
-  the locale @{text order_preserving} are now active in @{text
-  lattice_hom}, for example
+  the locale \<open>order_preserving\<close> are now active in \<open>lattice_hom\<close>, for example
   @{thm [source] hom_le}:
   @{thm [display, indent=2] hom_le}
   This theorem will be useful in the following section.
@@ -388,9 +381,9 @@
 text \<open>There are situations where an interpretation is not possible
   in the general case since the desired property is only valid if
   certain conditions are fulfilled.  Take, for example, the function
-  @{text "\<lambda>i. n * i"} that scales its argument by a constant factor.
+  \<open>\<lambda>i. n * i\<close> that scales its argument by a constant factor.
   This function is order preserving (and even a lattice endomorphism)
-  with respect to @{term "(\<le>)"} provided @{text "n \<ge> 0"}.
+  with respect to @{term "(\<le>)"} provided \<open>n \<ge> 0\<close>.
 
   It is not possible to express this using a global interpretation,
   because it is in general unspecified whether~@{term n} is
--- a/src/Doc/Logics_ZF/FOL_examples.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Logics_ZF/FOL_examples.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -12,7 +12,7 @@
   \<comment> \<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule allE)
   \<comment> \<open>@{subgoals[display,indent=0,margin=65]}\<close>
-txt\<open>see below for @{text allI} combined with @{text swap}\<close>
+txt\<open>see below for \<open>allI\<close> combined with \<open>swap\<close>\<close>
 apply (erule allI [THEN [2] swap])
   \<comment> \<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule impI)
--- a/src/Doc/Logics_ZF/IFOL_examples.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Logics_ZF/IFOL_examples.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -15,7 +15,7 @@
   \<comment> \<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule allE)
   \<comment> \<open>@{subgoals[display,indent=0,margin=65]}\<close>
-txt\<open>Now @{text "apply assumption"} fails\<close>
+txt\<open>Now \<open>apply assumption\<close> fails\<close>
 oops
 
 text\<open>Trying again, with the same first two steps\<close>
--- a/src/Doc/Logics_ZF/If.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Logics_ZF/If.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -40,7 +40,7 @@
   \<comment> \<open>@{subgoals[display,indent=0,margin=65]}\<close>
 oops
 
-text\<open>Trying again from the beginning in order to use @{text blast}\<close>
+text\<open>Trying again from the beginning in order to use \<open>blast\<close>\<close>
 declare ifI [intro!]
 declare ifE [elim!]
 
--- a/src/Doc/Logics_ZF/ZF_Isar.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Logics_ZF/ZF_Isar.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -18,9 +18,9 @@
   Simplifier setup.
 
   \begin{matharray}{rcl}
-    @{command_def (ZF) "print_tcset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
-    @{method_def (ZF) typecheck} & : & @{text method} \\
-    @{attribute_def (ZF) TC} & : & @{text attribute} \\
+    @{command_def (ZF) "print_tcset"}\<open>\<^sup>*\<close> & : & \<open>context \<rightarrow>\<close> \\
+    @{method_def (ZF) typecheck} & : & \<open>method\<close> \\
+    @{attribute_def (ZF) TC} & : & \<open>attribute\<close> \\
   \end{matharray}
 
   @{rail \<open>
@@ -52,10 +52,10 @@
   Coinductive definitions are available in both cases, too.
 
   \begin{matharray}{rcl}
-    @{command_def (ZF) "inductive"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (ZF) "coinductive"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (ZF) "datatype"} & : & @{text "theory \<rightarrow> theory"} \\
-    @{command_def (ZF) "codatatype"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (ZF) "inductive"} & : & \<open>theory \<rightarrow> theory\<close> \\
+    @{command_def (ZF) "coinductive"} & : & \<open>theory \<rightarrow> theory\<close> \\
+    @{command_def (ZF) "datatype"} & : & \<open>theory \<rightarrow> theory\<close> \\
+    @{command_def (ZF) "codatatype"} & : & \<open>theory \<rightarrow> theory\<close> \\
   \end{matharray}
 
   @{rail \<open>
@@ -78,8 +78,7 @@
     @{syntax_def (ZF) typeelims}: @'type_elims' @{syntax thms}
   \<close>}
 
-  In the following syntax specification @{text "monos"}, @{text
-  typeintros}, and @{text typeelims} are the same as above.
+  In the following syntax specification \<open>monos\<close>, \<open>typeintros\<close>, and \<open>typeelims\<close> are the same as above.
 
   @{rail \<open>
     (@@{command (ZF) datatype} | @@{command (ZF) codatatype}) domain? (dtspec + @'and') hints
@@ -104,7 +103,7 @@
 
 text \<open>
   \begin{matharray}{rcl}
-    @{command_def (ZF) "primrec"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{command_def (ZF) "primrec"} & : & \<open>theory \<rightarrow> theory\<close> \\
   \end{matharray}
 
   @{rail \<open>
@@ -120,10 +119,10 @@
   ported to Isar.  These should not be used in proper proof texts.
 
   \begin{matharray}{rcl}
-    @{method_def (ZF) case_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def (ZF) induct_tac}@{text "\<^sup>*"} & : & @{text method} \\
-    @{method_def (ZF) ind_cases}@{text "\<^sup>*"} & : & @{text method} \\
-    @{command_def (ZF) "inductive_cases"} & : & @{text "theory \<rightarrow> theory"} \\
+    @{method_def (ZF) case_tac}\<open>\<^sup>*\<close> & : & \<open>method\<close> \\
+    @{method_def (ZF) induct_tac}\<open>\<^sup>*\<close> & : & \<open>method\<close> \\
+    @{method_def (ZF) ind_cases}\<open>\<^sup>*\<close> & : & \<open>method\<close> \\
+    @{command_def (ZF) "inductive_cases"} & : & \<open>theory \<rightarrow> theory\<close> \\
   \end{matharray}
 
   @{rail \<open>
--- a/src/Doc/Logics_ZF/ZF_examples.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Logics_ZF/ZF_examples.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -146,7 +146,7 @@
 apply (assumption+)
 done
 
-text\<open>Trying again from the beginning in order to use @{text blast}\<close>
+text\<open>Trying again from the beginning in order to use \<open>blast\<close>\<close>
 lemma "Pow(A Int B) = Pow(A) Int Pow(B)"
 by blast
 
--- a/src/Doc/Prog_Prove/Basics.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Prog_Prove/Basics.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -20,50 +20,50 @@
 @{typ nat}, the type of natural numbers ($\mathbb{N}$), and \indexed{@{typ int}}{int},
 the type of mathematical integers ($\mathbb{Z}$).
 \item[type constructors,]
- in particular @{text list}, the type of
-lists, and @{text set}, the type of sets. Type constructors are written
+ in particular \<open>list\<close>, the type of
+lists, and \<open>set\<close>, the type of sets. Type constructors are written
 postfix, i.e., after their arguments. For example,
 @{typ "nat list"} is the type of lists whose elements are natural numbers.
 \item[function types,]
-denoted by @{text"\<Rightarrow>"}.
+denoted by \<open>\<Rightarrow>\<close>.
 \item[type variables,]
   denoted by @{typ 'a}, @{typ 'b}, etc., like in ML\@.
 \end{description}
 Note that @{typ"'a \<Rightarrow> 'b list"} means \noquotes{@{typ[source]"'a \<Rightarrow> ('b list)"}},
 not @{typ"('a \<Rightarrow> 'b) list"}: postfix type constructors have precedence
-over @{text"\<Rightarrow>"}.
+over \<open>\<Rightarrow>\<close>.
 
 \conceptidx{Terms}{term} are formed as in functional programming by
-applying functions to arguments. If @{text f} is a function of type
-@{text"\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2"} and @{text t} is a term of type
-@{text"\<tau>\<^sub>1"} then @{term"f t"} is a term of type @{text"\<tau>\<^sub>2"}. We write @{text "t :: \<tau>"} to mean that term @{text t} has type @{text \<tau>}.
+applying functions to arguments. If \<open>f\<close> is a function of type
+\<open>\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2\<close> and \<open>t\<close> is a term of type
+\<open>\<tau>\<^sub>1\<close> then @{term"f t"} is a term of type \<open>\<tau>\<^sub>2\<close>. We write \<open>t :: \<tau>\<close> to mean that term \<open>t\<close> has type \<open>\<tau>\<close>.
 
 \begin{warn}
-There are many predefined infix symbols like @{text "+"} and @{text"\<le>"}.
+There are many predefined infix symbols like \<open>+\<close> and \<open>\<le>\<close>.
 The name of the corresponding binary function is @{term"(+)"},
-not just @{text"+"}. That is, @{term"x + y"} is nice surface syntax
+not just \<open>+\<close>. That is, @{term"x + y"} is nice surface syntax
 (``syntactic sugar'') for \noquotes{@{term[source]"(+) x y"}}.
 \end{warn}
 
 HOL also supports some basic constructs from functional programming:
 \begin{quote}
-@{text "(if b then t\<^sub>1 else t\<^sub>2)"}\\
-@{text "(let x = t in u)"}\\
-@{text "(case t of pat\<^sub>1 \<Rightarrow> t\<^sub>1 | \<dots> | pat\<^sub>n \<Rightarrow> t\<^sub>n)"}
+\<open>(if b then t\<^sub>1 else t\<^sub>2)\<close>\\
+\<open>(let x = t in u)\<close>\\
+\<open>(case t of pat\<^sub>1 \<Rightarrow> t\<^sub>1 | \<dots> | pat\<^sub>n \<Rightarrow> t\<^sub>n)\<close>
 \end{quote}
 \begin{warn}
 The above three constructs must always be enclosed in parentheses
 if they occur inside other constructs.
 \end{warn}
-Terms may also contain @{text "\<lambda>"}-abstractions. For example,
+Terms may also contain \<open>\<lambda>\<close>-abstractions. For example,
 @{term "\<lambda>x. x"} is the identity function.
 
-\conceptidx{Formulas}{formula} are terms of type @{text bool}.
+\conceptidx{Formulas}{formula} are terms of type \<open>bool\<close>.
 There are the basic constants @{term True} and @{term False} and
 the usual logical connectives (in decreasing order of precedence):
-@{text"\<not>"}, @{text"\<and>"}, @{text"\<or>"}, @{text"\<longrightarrow>"}.
+\<open>\<not>\<close>, \<open>\<and>\<close>, \<open>\<or>\<close>, \<open>\<longrightarrow>\<close>.
 
-\conceptidx{Equality}{equality} is available in the form of the infix function @{text "="}
+\conceptidx{Equality}{equality} is available in the form of the infix function \<open>=\<close>
 of type @{typ "'a \<Rightarrow> 'a \<Rightarrow> bool"}. It also works for formulas, where
 it means ``if and only if''.
 
@@ -72,26 +72,26 @@
 Isabelle automatically computes the type of each variable in a term. This is
 called \concept{type inference}.  Despite type inference, it is sometimes
 necessary to attach an explicit \concept{type constraint} (or \concept{type
-annotation}) to a variable or term.  The syntax is @{text "t :: \<tau>"} as in
+annotation}) to a variable or term.  The syntax is \<open>t :: \<tau>\<close> as in
 \mbox{\noquotes{@{term[source] "m + (n::nat)"}}}. Type constraints may be
 needed to
-disambiguate terms involving overloaded functions such as @{text "+"}.
+disambiguate terms involving overloaded functions such as \<open>+\<close>.
 
-Finally there are the universal quantifier @{text"\<And>"}\index{$4@\isasymAnd} and the implication
-@{text"\<Longrightarrow>"}\index{$3@\isasymLongrightarrow}. They are part of the Isabelle framework, not the logic
-HOL. Logically, they agree with their HOL counterparts @{text"\<forall>"} and
-@{text"\<longrightarrow>"}, but operationally they behave differently. This will become
+Finally there are the universal quantifier \<open>\<And>\<close>\index{$4@\isasymAnd} and the implication
+\<open>\<Longrightarrow>\<close>\index{$3@\isasymLongrightarrow}. They are part of the Isabelle framework, not the logic
+HOL. Logically, they agree with their HOL counterparts \<open>\<forall>\<close> and
+\<open>\<longrightarrow>\<close>, but operationally they behave differently. This will become
 clearer as we go along.
 \begin{warn}
 Right-arrows of all kinds always associate to the right. In particular,
 the formula
-@{text"A\<^sub>1 \<Longrightarrow> A\<^sub>2 \<Longrightarrow> A\<^sub>3"} means @{text "A\<^sub>1 \<Longrightarrow> (A\<^sub>2 \<Longrightarrow> A\<^sub>3)"}.
+\<open>A\<^sub>1 \<Longrightarrow> A\<^sub>2 \<Longrightarrow> A\<^sub>3\<close> means \<open>A\<^sub>1 \<Longrightarrow> (A\<^sub>2 \<Longrightarrow> A\<^sub>3)\<close>.
 The (Isabelle-specific\footnote{To display implications in this style in
-Isabelle/jEdit you need to set Plugins $>$ Plugin Options $>$ Isabelle/General $>$ Print Mode to ``\texttt{brackets}'' and restart.}) notation \mbox{@{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"}}
-is short for the iterated implication \mbox{@{text"A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> A"}}.
+Isabelle/jEdit you need to set Plugins $>$ Plugin Options $>$ Isabelle/General $>$ Print Mode to ``\texttt{brackets}'' and restart.}) notation \mbox{\<open>\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A\<close>}
+is short for the iterated implication \mbox{\<open>A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> A\<close>}.
 Sometimes we also employ inference rule notation:
-\inferrule{\mbox{@{text "A\<^sub>1"}}\\ \mbox{@{text "\<dots>"}}\\ \mbox{@{text "A\<^sub>n"}}}
-{\mbox{@{text A}}}
+\inferrule{\mbox{\<open>A\<^sub>1\<close>}\\ \mbox{\<open>\<dots>\<close>}\\ \mbox{\<open>A\<^sub>n\<close>}}
+{\mbox{\<open>A\<close>}}
 \end{warn}
 
 
@@ -101,25 +101,25 @@
 Roughly speaking, a \concept{theory} is a named collection of types,
 functions, and theorems, much like a module in a programming language.
 All Isabelle text needs to go into a theory.
-The general format of a theory @{text T} is
+The general format of a theory \<open>T\<close> is
 \begin{quote}
-\indexed{\isacom{theory}}{theory} @{text T}\\
-\indexed{\isacom{imports}}{imports} @{text "T\<^sub>1 \<dots> T\<^sub>n"}\\
+\indexed{\isacom{theory}}{theory} \<open>T\<close>\\
+\indexed{\isacom{imports}}{imports} \<open>T\<^sub>1 \<dots> T\<^sub>n\<close>\\
 \isacom{begin}\\
 \emph{definitions, theorems and proofs}\\
 \isacom{end}
 \end{quote}
-where @{text "T\<^sub>1 \<dots> T\<^sub>n"} are the names of existing
-theories that @{text T} is based on. The @{text "T\<^sub>i"} are the
-direct \conceptidx{parent theories}{parent theory} of @{text T}.
+where \<open>T\<^sub>1 \<dots> T\<^sub>n\<close> are the names of existing
+theories that \<open>T\<close> is based on. The \<open>T\<^sub>i\<close> are the
+direct \conceptidx{parent theories}{parent theory} of \<open>T\<close>.
 Everything defined in the parent theories (and their parents, recursively) is
-automatically visible. Each theory @{text T} must
-reside in a \concept{theory file} named @{text "T.thy"}.
+automatically visible. Each theory \<open>T\<close> must
+reside in a \concept{theory file} named \<open>T.thy\<close>.
 
 \begin{warn}
 HOL contains a theory @{theory Main}\index{Main@@{theory Main}}, the union of all the basic
 predefined theories like arithmetic, lists, sets, etc.
-Unless you know what you are doing, always include @{text Main}
+Unless you know what you are doing, always include \<open>Main\<close>
 as a direct or indirect parent of all your theories.
 \end{warn}
 
--- a/src/Doc/Prog_Prove/Bool_nat_list.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Prog_Prove/Bool_nat_list.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -6,7 +6,7 @@
 
 text\<open>
 \vspace{-4ex}
-\section{\texorpdfstring{Types @{typ bool}, @{typ nat} and @{text list}}{Types bool, nat and list}}
+\section{\texorpdfstring{Types @{typ bool}, @{typ nat} and \<open>list\<close>}{Types bool, nat and list}}
 
 These are the most important predefined types. We go through them one by one.
 Based on examples we learn how to define (possibly recursive) functions and
@@ -17,8 +17,7 @@
 The type of boolean values is a predefined datatype
 @{datatype[display] bool}
 with the two values \indexed{@{const True}}{True} and \indexed{@{const False}}{False} and
-with many predefined functions:  @{text "\<not>"}, @{text "\<and>"}, @{text "\<or>"}, @{text
-"\<longrightarrow>"}, etc. Here is how conjunction could be defined by pattern matching:
+with many predefined functions:  \<open>\<not>\<close>, \<open>\<and>\<close>, \<open>\<or>\<close>, \<open>\<longrightarrow>\<close>, etc. Here is how conjunction could be defined by pattern matching:
 \<close>
 
 fun conj :: "bool \<Rightarrow> bool \<Rightarrow> bool" where
@@ -33,10 +32,9 @@
 Natural numbers are another predefined datatype:
 @{datatype[display] nat}\index{Suc@@{const Suc}}
 All values of type @{typ nat} are generated by the constructors
-@{text 0} and @{const Suc}. Thus the values of type @{typ nat} are
-@{text 0}, @{term"Suc 0"}, @{term"Suc(Suc 0)"}, etc.
-There are many predefined functions: @{text "+"}, @{text "*"}, @{text
-"\<le>"}, etc. Here is how you could define your own addition:
+\<open>0\<close> and @{const Suc}. Thus the values of type @{typ nat} are
+\<open>0\<close>, @{term"Suc 0"}, @{term"Suc(Suc 0)"}, etc.
+There are many predefined functions: \<open>+\<close>, \<open>*\<close>, \<open>\<le>\<close>, etc. Here is how you could define your own addition:
 \<close>
 
 fun add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
@@ -54,22 +52,22 @@
 apply(induction m)
 (*>*)
 txt\<open>The \isacom{lemma} command starts the proof and gives the lemma
-a name, @{text add_02}. Properties of recursively defined functions
+a name, \<open>add_02\<close>. Properties of recursively defined functions
 need to be established by induction in most cases.
-Command \isacom{apply}@{text"(induction m)"} instructs Isabelle to
-start a proof by induction on @{text m}. In response, it will show the
+Command \isacom{apply}\<open>(induction m)\<close> instructs Isabelle to
+start a proof by induction on \<open>m\<close>. In response, it will show the
 following proof state\ifsem\footnote{See page \pageref{proof-state} for how to
 display the proof state.}\fi:
 @{subgoals[display,indent=0]}
 The numbered lines are known as \emph{subgoals}.
 The first subgoal is the base case, the second one the induction step.
-The prefix @{text"\<And>m."} is Isabelle's way of saying ``for an arbitrary but fixed @{text m}''. The @{text"\<Longrightarrow>"} separates assumptions from the conclusion.
-The command \isacom{apply}@{text"(auto)"} instructs Isabelle to try
+The prefix \<open>\<And>m.\<close> is Isabelle's way of saying ``for an arbitrary but fixed \<open>m\<close>''. The \<open>\<Longrightarrow>\<close> separates assumptions from the conclusion.
+The command \isacom{apply}\<open>(auto)\<close> instructs Isabelle to try
 and prove all subgoals automatically, essentially by simplifying them.
 Because both subgoals are easy, Isabelle can do it.
 The base case @{prop"add 0 0 = 0"} holds by definition of @{const add},
 and the induction step is almost as simple:
-@{text"add\<^latex>\<open>~\<close>(Suc m) 0 = Suc(add m 0) = Suc m"}
+\<open>add\<^latex>\<open>~\<close>(Suc m) 0 = Suc(add m 0) = Suc m\<close>
 using first the definition of @{const add} and then the induction hypothesis.
 In summary, both subproofs rely on simplification with function definitions and
 the induction hypothesis.
@@ -80,33 +78,33 @@
 thm add_02
 
 txt\<open>which displays @{thm[show_question_marks,display] add_02} The free
-variable @{text m} has been replaced by the \concept{unknown}
-@{text"?m"}. There is no logical difference between the two but there is an
+variable \<open>m\<close> has been replaced by the \concept{unknown}
+\<open>?m\<close>. There is no logical difference between the two but there is an
 operational one: unknowns can be instantiated, which is what you want after
 some lemma has been proved.
 
-Note that there is also a proof method @{text induct}, which behaves almost
-like @{text induction}; the difference is explained in \autoref{ch:Isar}.
+Note that there is also a proof method \<open>induct\<close>, which behaves almost
+like \<open>induction\<close>; the difference is explained in \autoref{ch:Isar}.
 
 \begin{warn}
 Terminology: We use \concept{lemma}, \concept{theorem} and \concept{rule}
 interchangeably for propositions that have been proved.
 \end{warn}
 \begin{warn}
-  Numerals (@{text 0}, @{text 1}, @{text 2}, \dots) and most of the standard
-  arithmetic operations (@{text "+"}, @{text "-"}, @{text "*"}, @{text"\<le>"},
-  @{text"<"}, etc.) are overloaded: they are available
+  Numerals (\<open>0\<close>, \<open>1\<close>, \<open>2\<close>, \dots) and most of the standard
+  arithmetic operations (\<open>+\<close>, \<open>-\<close>, \<open>*\<close>, \<open>\<le>\<close>,
+  \<open><\<close>, etc.) are overloaded: they are available
   not just for natural numbers but for other types as well.
-  For example, given the goal @{text"x + 0 = x"}, there is nothing to indicate
+  For example, given the goal \<open>x + 0 = x\<close>, there is nothing to indicate
   that you are talking about natural numbers. Hence Isabelle can only infer
-  that @{term x} is of some arbitrary type where @{text 0} and @{text"+"}
+  that @{term x} is of some arbitrary type where \<open>0\<close> and \<open>+\<close>
   exist. As a consequence, you will be unable to prove the goal.
 %  To alert you to such pitfalls, Isabelle flags numerals without a
 %  fixed type in its output: @ {prop"x+0 = x"}.
   In this particular example, you need to include
-  an explicit type constraint, for example @{text"x+0 = (x::nat)"}. If there
+  an explicit type constraint, for example \<open>x+0 = (x::nat)\<close>. If there
   is enough contextual information this may not be necessary: @{prop"Suc x =
-  x"} automatically implies @{text"x::nat"} because @{term Suc} is not
+  x"} automatically implies \<open>x::nat\<close> because @{term Suc} is not
   overloaded.
 \end{warn}
 
@@ -121,19 +119,19 @@
 \textbf{Lemma} @{prop"add m 0 = m"}
 
 \noindent
-\textbf{Proof} by induction on @{text m}.
+\textbf{Proof} by induction on \<open>m\<close>.
 \begin{itemize}
-\item Case @{text 0} (the base case): @{prop"add 0 0 = 0"}
+\item Case \<open>0\<close> (the base case): @{prop"add 0 0 = 0"}
   holds by definition of @{const add}.
 \item Case @{term"Suc m"} (the induction step):
   We assume @{prop"add m 0 = m"}, the induction hypothesis (IH),
-  and we need to show @{text"add (Suc m) 0 = Suc m"}.
+  and we need to show \<open>add (Suc m) 0 = Suc m\<close>.
   The proof is as follows:\smallskip
 
   \begin{tabular}{@ {}rcl@ {\quad}l@ {}}
-  @{term "add (Suc m) 0"} &@{text"="}& @{term"Suc(add m 0)"}
-  & by definition of @{text add}\\
-              &@{text"="}& @{term "Suc m"} & by IH
+  @{term "add (Suc m) 0"} &\<open>=\<close>& @{term"Suc(add m 0)"}
+  & by definition of \<open>add\<close>\\
+              &\<open>=\<close>& @{term "Suc m"} & by IH
   \end{tabular}
 \end{itemize}
 Throughout this book, \concept{IH} will stand for ``induction hypothesis''.
@@ -149,7 +147,7 @@
 that is closer to traditional informal mathematical language and is often
 directly readable.
 
-\subsection{Type \indexed{@{text list}}{list}}
+\subsection{Type \indexed{\<open>list\<close>}{list}}
 
 Although lists are already predefined, we define our own copy for
 demonstration purposes:
@@ -185,8 +183,8 @@
 "rev Nil = Nil" |
 "rev (Cons x xs) = app (rev xs) (Cons x Nil)"
 
-text\<open>By default, variables @{text xs}, @{text ys} and @{text zs} are of
-@{text list} type.
+text\<open>By default, variables \<open>xs\<close>, \<open>ys\<close> and \<open>zs\<close> are of
+\<open>list\<close> type.
 
 Command \indexed{\isacommand{value}}{value} evaluates a term. For example,\<close>
 
@@ -200,8 +198,8 @@
 \medskip
 
 Figure~\ref{fig:MyList} shows the theory created so far.
-Because @{text list}, @{const Nil}, @{const Cons}, etc.\ are already predefined,
- Isabelle prints qualified (long) names when executing this theory, for example, @{text MyList.Nil}
+Because \<open>list\<close>, @{const Nil}, @{const Cons}, etc.\ are already predefined,
+ Isabelle prints qualified (long) names when executing this theory, for example, \<open>MyList.Nil\<close>
  instead of @{const Nil}.
  To suppress the qualified names you can insert the command
  \texttt{declare [[names\_short]]}.
@@ -225,11 +223,11 @@
 Just as for natural numbers, there is a proof principle of induction for
 lists. Induction over a list is essentially induction over the length of
 the list, although the length remains implicit. To prove that some property
-@{text P} holds for all lists @{text xs}, i.e., \mbox{@{prop"P(xs)"}},
+\<open>P\<close> holds for all lists \<open>xs\<close>, i.e., \mbox{@{prop"P(xs)"}},
 you need to prove
 \begin{enumerate}
 \item the base case @{prop"P(Nil)"} and
-\item the inductive case @{prop"P(Cons x xs)"} under the assumption @{prop"P(xs)"}, for some arbitrary but fixed @{text x} and @{text xs}.
+\item the inductive case @{prop"P(Cons x xs)"} under the assumption @{prop"P(xs)"}, for some arbitrary but fixed \<open>x\<close> and \<open>xs\<close>.
 \end{enumerate}
 This is often called \concept{structural induction} for lists.
 
@@ -244,7 +242,7 @@
 
 txt\<open>Commands \isacom{theorem} and \isacom{lemma} are
 interchangeable and merely indicate the importance we attach to a
-proposition. Via the bracketed attribute @{text simp} we also tell Isabelle
+proposition. Via the bracketed attribute \<open>simp\<close> we also tell Isabelle
 to make the eventual theorem a \conceptnoidx{simplification rule}: future proofs
 involving simplification will replace occurrences of @{term"rev(rev xs)"} by
 @{term"xs"}. The proof is by induction:\<close>
@@ -273,9 +271,9 @@
 (*>*)
 lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
 
-txt\<open>There are two variables that we could induct on: @{text xs} and
-@{text ys}. Because @{const app} is defined by recursion on
-the first argument, @{text xs} is the correct one:
+txt\<open>There are two variables that we could induct on: \<open>xs\<close> and
+\<open>ys\<close>. Because @{const app} is defined by recursion on
+the first argument, \<open>xs\<close> is the correct one:
 \<close>
 
 apply(induction xs)
@@ -310,11 +308,11 @@
 apply(auto)
 
 txt\<open>
-We find that this time @{text"auto"} solves the base case, but the
+We find that this time \<open>auto\<close> solves the base case, but the
 induction step merely simplifies to
 @{subgoals[display,indent=0,goals_limit=1]}
 The missing lemma is associativity of @{const app},
-which we insert in front of the failed lemma @{text rev_app}.
+which we insert in front of the failed lemma \<open>rev_app\<close>.
 
 \subsubsection{Associativity of @{const app}}
 
@@ -350,12 +348,12 @@
 \textbf{Lemma} @{prop"app (app xs ys) zs = app xs (app ys zs)"}
 
 \noindent
-\textbf{Proof} by induction on @{text xs}.
+\textbf{Proof} by induction on \<open>xs\<close>.
 \begin{itemize}
-\item Case @{text Nil}: \ @{prop"app (app Nil ys) zs = app ys zs"} @{text"="}
-  \mbox{@{term"app Nil (app ys zs)"}} \ holds by definition of @{text app}.
-\item Case @{text"Cons x xs"}: We assume
-  \begin{center} \hfill @{term"app (app xs ys) zs"} @{text"="}
+\item Case \<open>Nil\<close>: \ @{prop"app (app Nil ys) zs = app ys zs"} \<open>=\<close>
+  \mbox{@{term"app Nil (app ys zs)"}} \ holds by definition of \<open>app\<close>.
+\item Case \<open>Cons x xs\<close>: We assume
+  \begin{center} \hfill @{term"app (app xs ys) zs"} \<open>=\<close>
   @{term"app xs (app ys zs)"} \hfill (IH) \end{center}
   and we need to show
   \begin{center} @{prop"app (app (Cons x xs) ys) zs = app (Cons x xs) (app ys zs)"}.\end{center}
@@ -363,10 +361,10 @@
 
   \begin{tabular}{@ {}l@ {\quad}l@ {}}
   @{term"app (app (Cons x xs) ys) zs"}\\
-  @{text"= app (Cons x (app xs ys)) zs"} & by definition of @{text app}\\
-  @{text"= Cons x (app (app xs ys) zs)"} & by definition of @{text app}\\
-  @{text"= Cons x (app xs (app ys zs))"} & by IH\\
-  @{text"= app (Cons x xs) (app ys zs)"} & by definition of @{text app}
+  \<open>= app (Cons x (app xs ys)) zs\<close> & by definition of \<open>app\<close>\\
+  \<open>= Cons x (app (app xs ys) zs)\<close> & by definition of \<open>app\<close>\\
+  \<open>= Cons x (app xs (app ys zs))\<close> & by IH\\
+  \<open>= app (Cons x xs) (app ys zs)\<close> & by definition of \<open>app\<close>
   \end{tabular}
 \end{itemize}
 \medskip
@@ -375,8 +373,8 @@
 in both cases, going from left to right, the last equality step is not a
 simplification at all! In the base case it is @{prop"app ys zs = app Nil (app
 ys zs)"}. It appears almost mysterious because we suddenly complicate the
-term by appending @{text Nil} on the left. What is really going on is this:
-when proving some equality \mbox{@{prop"s = t"}}, both @{text s} and @{text t} are
+term by appending \<open>Nil\<close> on the left. What is really going on is this:
+when proving some equality \mbox{@{prop"s = t"}}, both \<open>s\<close> and \<open>t\<close> are
 simplified until they ``meet in the middle''. This heuristic for equality proofs
 works well for a functional programming context like ours. In the base case
 both @{term"app (app Nil ys) zs"} and @{term"app Nil (app
@@ -388,31 +386,31 @@
 Isabelle's predefined lists are the same as the ones above, but with
 more syntactic sugar:
 \begin{itemize}
-\item @{text "[]"} is \indexed{@{const Nil}}{Nil},
+\item \<open>[]\<close> is \indexed{@{const Nil}}{Nil},
 \item @{term"x # xs"} is @{term"Cons x xs"}\index{Cons@@{const Cons}},
-\item @{text"[x\<^sub>1, \<dots>, x\<^sub>n]"} is @{text"x\<^sub>1 # \<dots> # x\<^sub>n # []"}, and
+\item \<open>[x\<^sub>1, \<dots>, x\<^sub>n]\<close> is \<open>x\<^sub>1 # \<dots> # x\<^sub>n # []\<close>, and
 \item @{term "xs @ ys"} is @{term"app xs ys"}.
 \end{itemize}
 There is also a large library of predefined functions.
 The most important ones are the length function
-@{text"length :: 'a list \<Rightarrow> nat"}\index{length@@{const length}} (with the obvious definition),
+\<open>length :: 'a list \<Rightarrow> nat\<close>\index{length@@{const length}} (with the obvious definition),
 and the \indexed{@{const map}}{map} function that applies a function to all elements of a list:
 \begin{isabelle}
-\isacom{fun} @{const map} @{text"::"} @{typ[source] "('a \<Rightarrow> 'b) \<Rightarrow> 'a list \<Rightarrow> 'b list"} \isacom{where}\\
-@{text"\""}@{thm list.map(1) [of f]}@{text"\" |"}\\
-@{text"\""}@{thm list.map(2) [of f x xs]}@{text"\""}
+\isacom{fun} @{const map} \<open>::\<close> @{typ[source] "('a \<Rightarrow> 'b) \<Rightarrow> 'a list \<Rightarrow> 'b list"} \isacom{where}\\
+\<open>"\<close>@{thm list.map(1) [of f]}\<open>" |\<close>\\
+\<open>"\<close>@{thm list.map(2) [of f x xs]}\<open>"\<close>
 \end{isabelle}
 
 \ifsem
 Also useful are the \concept{head} of a list, its first element,
 and the \concept{tail}, the rest of the list:
 \begin{isabelle}\index{hd@@{const hd}}
-\isacom{fun} @{text"hd :: 'a list \<Rightarrow> 'a"}\\
+\isacom{fun} \<open>hd :: 'a list \<Rightarrow> 'a\<close>\\
 @{prop"hd(x#xs) = x"}
 \end{isabelle}
 \begin{isabelle}\index{tl@@{const tl}}
-\isacom{fun} @{text"tl :: 'a list \<Rightarrow> 'a list"}\\
-@{prop"tl [] = []"} @{text"|"}\\
+\isacom{fun} \<open>tl :: 'a list \<Rightarrow> 'a list\<close>\\
+@{prop"tl [] = []"} \<open>|\<close>\\
 @{prop"tl(x#xs) = xs"}
 \end{isabelle}
 Note that since HOL is a logic of total functions, @{term"hd []"} is defined,
@@ -477,26 +475,26 @@
 \begin{exercise}
 Start from the definition of @{const add} given above.
 Prove that @{const add} is associative and commutative.
-Define a recursive function @{text double} @{text"::"} @{typ"nat \<Rightarrow> nat"}
+Define a recursive function \<open>double\<close> \<open>::\<close> @{typ"nat \<Rightarrow> nat"}
 and prove @{prop"double m = add m m"}.
 \end{exercise}
 
 \begin{exercise}
-Define a function @{text"count ::"} @{typ"'a \<Rightarrow> 'a list \<Rightarrow> nat"}
+Define a function \<open>count ::\<close> @{typ"'a \<Rightarrow> 'a list \<Rightarrow> nat"}
 that counts the number of occurrences of an element in a list. Prove
 @{prop"count x xs \<le> length xs"}.
 \end{exercise}
 
 \begin{exercise}
-Define a recursive function @{text "snoc ::"} @{typ"'a list \<Rightarrow> 'a \<Rightarrow> 'a list"}
-that appends an element to the end of a list. With the help of @{text snoc}
-define a recursive function @{text "reverse ::"} @{typ"'a list \<Rightarrow> 'a list"}
+Define a recursive function \<open>snoc ::\<close> @{typ"'a list \<Rightarrow> 'a \<Rightarrow> 'a list"}
+that appends an element to the end of a list. With the help of \<open>snoc\<close>
+define a recursive function \<open>reverse ::\<close> @{typ"'a list \<Rightarrow> 'a list"}
 that reverses a list. Prove @{prop"reverse(reverse xs) = xs"}.
 \end{exercise}
 
 \begin{exercise}
-Define a recursive function @{text "sum_upto ::"} @{typ"nat \<Rightarrow> nat"} such that
-\mbox{@{text"sum_upto n"}} @{text"="} @{text"0 + ... + n"} and prove
+Define a recursive function \<open>sum_upto ::\<close> @{typ"nat \<Rightarrow> nat"} such that
+\mbox{\<open>sum_upto n\<close>} \<open>=\<close> \<open>0 + ... + n\<close> and prove
 @{prop" sum_upto (n::nat) = n * (n+1) div 2"}.
 \end{exercise}
 \<close>
--- a/src/Doc/Prog_Prove/Isar.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Prog_Prove/Isar.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -17,11 +17,11 @@
 \<close>text\<open>
 \begin{tabular}{@ {}l}
 \isacom{proof}\\
-\quad\isacom{assume} @{text"\""}$\mathit{formula}_0$@{text"\""}\\
-\quad\isacom{have} @{text"\""}$\mathit{formula}_1$@{text"\""} \quad\isacom{by} @{text simp}\\
+\quad\isacom{assume} \<open>"\<close>$\mathit{formula}_0$\<open>"\<close>\\
+\quad\isacom{have} \<open>"\<close>$\mathit{formula}_1$\<open>"\<close> \quad\isacom{by} \<open>simp\<close>\\
 \quad\vdots\\
-\quad\isacom{have} @{text"\""}$\mathit{formula}_n$@{text"\""} \quad\isacom{by} @{text blast}\\
-\quad\isacom{show} @{text"\""}$\mathit{formula}_{n+1}$@{text"\""} \quad\isacom{by} @{text \<dots>}\\
+\quad\isacom{have} \<open>"\<close>$\mathit{formula}_n$\<open>"\<close> \quad\isacom{by} \<open>blast\<close>\\
+\quad\isacom{show} \<open>"\<close>$\mathit{formula}_{n+1}$\<open>"\<close> \quad\isacom{by} \<open>\<dots>\<close>\\
 \isacom{qed}
 \end{tabular}
 \<close>text\<open>
@@ -46,7 +46,7 @@
 \medskip
 
 \begin{tabular}{@ {}lcl@ {}}
-\textit{proposition} &=& [\textit{name}:] @{text"\""}\textit{formula}@{text"\""}
+\textit{proposition} &=& [\textit{name}:] \<open>"\<close>\textit{formula}\<open>"\<close>
 \end{tabular}
 \medskip
 
@@ -56,32 +56,31 @@
 \medskip
 
 \noindent A proof can either be an atomic \isacom{by} with a single proof
-method which must finish off the statement being proved, for example @{text
-auto},  or it can be a \isacom{proof}--\isacom{qed} block of multiple
+method which must finish off the statement being proved, for example \<open>auto\<close>,  or it can be a \isacom{proof}--\isacom{qed} block of multiple
 steps. Such a block can optionally begin with a proof method that indicates
-how to start off the proof, e.g., \mbox{@{text"(induction xs)"}}.
+how to start off the proof, e.g., \mbox{\<open>(induction xs)\<close>}.
 
 A step either assumes a proposition or states a proposition
 together with its proof. The optional \isacom{from} clause
 indicates which facts are to be used in the proof.
 Intermediate propositions are stated with \isacom{have}, the overall goal
 is stated with \isacom{show}. A step can also introduce new local variables with
-\isacom{fix}. Logically, \isacom{fix} introduces @{text"\<And>"}-quantified
+\isacom{fix}. Logically, \isacom{fix} introduces \<open>\<And>\<close>-quantified
 variables, \isacom{assume} introduces the assumption of an implication
-(@{text"\<Longrightarrow>"}) and \isacom{have}/\isacom{show} introduce the conclusion.
+(\<open>\<Longrightarrow>\<close>) and \isacom{have}/\isacom{show} introduce the conclusion.
 
 Propositions are optionally named formulas. These names can be referred to in
 later \isacom{from} clauses. In the simplest case, a fact is such a name.
-But facts can also be composed with @{text OF} and @{text of} as shown in
+But facts can also be composed with \<open>OF\<close> and \<open>of\<close> as shown in
 \autoref{sec:forward-proof} --- hence the \dots\ in the above grammar.  Note
 that assumptions, intermediate \isacom{have} statements and global lemmas all
 have the same status and are thus collectively referred to as
 \conceptidx{facts}{fact}.
 
-Fact names can stand for whole lists of facts. For example, if @{text f} is
-defined by command \isacom{fun}, @{text"f.simps"} refers to the whole list of
-recursion equations defining @{text f}. Individual facts can be selected by
-writing @{text"f.simps(2)"}, whole sublists by writing @{text"f.simps(2-4)"}.
+Fact names can stand for whole lists of facts. For example, if \<open>f\<close> is
+defined by command \isacom{fun}, \<open>f.simps\<close> refers to the whole list of
+recursion equations defining \<open>f\<close>. Individual facts can be selected by
+writing \<open>f.simps(2)\<close>, whole sublists by writing \<open>f.simps(2-4)\<close>.
 
 
 \section{Isar by Example}
@@ -102,27 +101,27 @@
 text\<open>
 The \isacom{proof} command lacks an explicit method by which to perform
 the proof. In such cases Isabelle tries to use some standard introduction
-rule, in the above case for @{text"\<not>"}:
+rule, in the above case for \<open>\<not>\<close>:
 \[
 \inferrule{
 \mbox{@{thm (prem 1) notI}}}
 {\mbox{@{thm (concl) notI}}}
 \]
-In order to prove @{prop"~ P"}, assume @{text P} and show @{text False}.
+In order to prove @{prop"~ P"}, assume \<open>P\<close> and show \<open>False\<close>.
 Thus we may assume \mbox{\noquotes{@{prop [source] "surj f"}}}. The proof shows that names of propositions
 may be (single!) digits --- meaningful names are hard to invent and are often
 not necessary. Both \isacom{have} steps are obvious. The second one introduces
 the diagonal set @{term"{x. x \<notin> f x}"}, the key idea in the proof.
-If you wonder why @{text 2} directly implies @{text False}: from @{text 2}
+If you wonder why \<open>2\<close> directly implies \<open>False\<close>: from \<open>2\<close>
 it follows that @{prop"a \<notin> f a \<longleftrightarrow> a \<in> f a"}.
 
-\subsection{\indexed{@{text this}}{this}, \indexed{\isacom{then}}{then}, \indexed{\isacom{hence}}{hence} and \indexed{\isacom{thus}}{thus}}
+\subsection{\indexed{\<open>this\<close>}{this}, \indexed{\isacom{then}}{then}, \indexed{\isacom{hence}}{hence} and \indexed{\isacom{thus}}{thus}}
 
 Labels should be avoided. They interrupt the flow of the reader who has to
 scan the context for the point where the label was introduced. Ideally, the
 proof is a linear flow, where the output of one step becomes the input of the
 next step, piping the previously proved fact into the next proof, like
-in a UNIX pipe. In such cases the predefined name @{text this} can be used
+in a UNIX pipe. In such cases the predefined name \<open>this\<close> can be used
 to refer to the proposition proved in the previous step. This allows us to
 eliminate all labels from our proof (we suppress the \isacom{lemma} statement):
 \<close>
@@ -142,7 +141,7 @@
 \medskip
 
 \begin{tabular}{r@ {\quad=\quad}l}
-\isacom{then} & \isacom{from} @{text this}\\
+\isacom{then} & \isacom{from} \<open>this\<close>\\
 \isacom{thus} & \isacom{then} \isacom{show}\\
 \isacom{hence} & \isacom{then} \isacom{have}
 \end{tabular}
@@ -194,7 +193,7 @@
 assumptions can be separated by \isacom{and}. The
 \isacom{shows} part gives the goal. The actual theorem that will come out of
 the proof is \noquotes{@{prop[source]"surj f \<Longrightarrow> False"}}, but during the proof the assumption
-\noquotes{@{prop[source]"surj f"}} is available under the name @{text s} like any other fact.
+\noquotes{@{prop[source]"surj f"}} is available under the name \<open>s\<close> like any other fact.
 \<close>
 
 proof -
@@ -211,13 +210,13 @@
 there is no such rule and \isacom{proof} would fail.
 \end{warn}
 In the \isacom{have} step the assumption \noquotes{@{prop[source]"surj f"}} is now
-referenced by its name @{text s}. The duplication of \noquotes{@{prop[source]"surj f"}} in the
+referenced by its name \<open>s\<close>. The duplication of \noquotes{@{prop[source]"surj f"}} in the
 above proofs (once in the statement of the lemma, once in its proof) has been
 eliminated.
 
 Stating a lemma with \isacom{assumes}-\isacom{shows} implicitly introduces the
-name \indexed{@{text assms}}{assms} that stands for the list of all assumptions. You can refer
-to individual assumptions by @{text"assms(1)"}, @{text"assms(2)"}, etc.,
+name \indexed{\<open>assms\<close>}{assms} that stands for the list of all assumptions. You can refer
+to individual assumptions by \<open>assms(1)\<close>, \<open>assms(2)\<close>, etc.,
 thus obviating the need to name them individually.
 
 \section{Proof Patterns}
@@ -232,9 +231,9 @@
 \fi
 
 We start with two forms of \concept{case analysis}:
-starting from a formula @{text P} we have the two cases @{text P} and
+starting from a formula \<open>P\<close> we have the two cases \<open>P\<close> and
 @{prop"~P"}, and starting from a fact @{prop"P \<or> Q"}
-we have the two cases @{text P} and @{text Q}:
+we have the two cases \<open>P\<close> and \<open>Q\<close>:
 \<close>text_raw\<open>
 \begin{tabular}{@ {}ll@ {}}
 \begin{minipage}[t]{.4\textwidth}
@@ -252,7 +251,7 @@
   show "R" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)oops(*>*)
 text_raw \<open>}
-\end{minipage}\index{cases@@{text cases}}
+\end{minipage}\index{cases@\<open>cases\<close>}
 &
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
@@ -360,12 +359,12 @@
 \medskip
 \begin{isamarkuptext}%
 In the proof of \noquotes{@{prop[source]"\<forall>x. P(x)"}},
-the step \indexed{\isacom{fix}}{fix}~@{text x} introduces a locally fixed variable @{text x}
+the step \indexed{\isacom{fix}}{fix}~\<open>x\<close> introduces a locally fixed variable \<open>x\<close>
 into the subproof, the proverbial ``arbitrary but fixed value''.
-Instead of @{text x} we could have chosen any name in the subproof.
+Instead of \<open>x\<close> we could have chosen any name in the subproof.
 In the proof of \noquotes{@{prop[source]"\<exists>x. P(x)"}},
-@{text witness} is some arbitrary
-term for which we can prove that it satisfies @{text P}.
+\<open>witness\<close> is some arbitrary
+term for which we can prove that it satisfies \<open>P\<close>.
 
 How to reason forward from \noquotes{@{prop[source] "\<exists>x. P(x)"}}:
 \end{isamarkuptext}%
@@ -375,11 +374,11 @@
 then obtain x where p: "P(x)" by blast
 (*<*)oops(*>*)
 text\<open>
-After the \indexed{\isacom{obtain}}{obtain} step, @{text x} (we could have chosen any name)
+After the \indexed{\isacom{obtain}}{obtain} step, \<open>x\<close> (we could have chosen any name)
 is a fixed local
-variable, and @{text p} is the name of the fact
+variable, and \<open>p\<close> is the name of the fact
 \noquotes{@{prop[source] "P(x)"}}.
-This pattern works for one or more @{text x}.
+This pattern works for one or more \<open>x\<close>.
 As an example of the \isacom{obtain} command, here is the proof of
 Cantor's theorem in more detail:
 \<close>
@@ -516,11 +515,11 @@
 This can make the text harder to read, write and maintain. Pattern matching
 is an abbreviation mechanism to avoid such duplication. Writing
 \begin{quote}
-\isacom{show} \ \textit{formula} @{text"("}\indexed{\isacom{is}}{is} \textit{pattern}@{text")"}
+\isacom{show} \ \textit{formula} \<open>(\<close>\indexed{\isacom{is}}{is} \textit{pattern}\<open>)\<close>
 \end{quote}
 matches the pattern against the formula, thus instantiating the unknowns in
 the pattern for later use. As an example, consider the proof pattern for
-@{text"\<longleftrightarrow>"}:
+\<open>\<longleftrightarrow>\<close>:
 \end{isamarkuptext}%
 \<close>
 (*<*)lemma "formula\<^sub>1 \<longleftrightarrow> formula\<^sub>2" proof-(*>*)
@@ -535,12 +534,12 @@
   show "?L" (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)qed(*>*)
 
-text\<open>Instead of duplicating @{text"formula\<^sub>i"} in the text, we introduce
-the two abbreviations @{text"?L"} and @{text"?R"} by pattern matching.
+text\<open>Instead of duplicating \<open>formula\<^sub>i\<close> in the text, we introduce
+the two abbreviations \<open>?L\<close> and \<open>?R\<close> by pattern matching.
 Pattern matching works wherever a formula is stated, in particular
 with \isacom{have} and \isacom{lemma}.
 
-The unknown \indexed{@{text"?thesis"}}{thesis} is implicitly matched against any goal stated by
+The unknown \indexed{\<open>?thesis\<close>}{thesis} is implicitly matched against any goal stated by
 \isacom{lemma} or \isacom{show}. Here is a typical example:\<close>
 
 lemma "formula"
@@ -552,34 +551,34 @@
 text\<open>
 Unknowns can also be instantiated with \indexed{\isacom{let}}{let} commands
 \begin{quote}
-\isacom{let} @{text"?t"} = @{text"\""}\textit{some-big-term}@{text"\""}
+\isacom{let} \<open>?t\<close> = \<open>"\<close>\textit{some-big-term}\<open>"\<close>
 \end{quote}
-Later proof steps can refer to @{text"?t"}:
+Later proof steps can refer to \<open>?t\<close>:
 \begin{quote}
-\isacom{have} @{text"\""}\dots @{text"?t"} \dots@{text"\""}
+\isacom{have} \<open>"\<close>\dots \<open>?t\<close> \dots\<open>"\<close>
 \end{quote}
 \begin{warn}
-Names of facts are introduced with @{text"name:"} and refer to proved
-theorems. Unknowns @{text"?X"} refer to terms or formulas.
+Names of facts are introduced with \<open>name:\<close> and refer to proved
+theorems. Unknowns \<open>?X\<close> refer to terms or formulas.
 \end{warn}
 
 Although abbreviations shorten the text, the reader needs to remember what
-they stand for. Similarly for names of facts. Names like @{text 1}, @{text 2}
-and @{text 3} are not helpful and should only be used in short proofs. For
+they stand for. Similarly for names of facts. Names like \<open>1\<close>, \<open>2\<close>
+and \<open>3\<close> are not helpful and should only be used in short proofs. For
 longer proofs, descriptive names are better. But look at this example:
 \begin{quote}
-\isacom{have} \ @{text"x_gr_0: \"x > 0\""}\\
+\isacom{have} \ \<open>x_gr_0: "x > 0"\<close>\\
 $\vdots$\\
-\isacom{from} @{text "x_gr_0"} \dots
+\isacom{from} \<open>x_gr_0\<close> \dots
 \end{quote}
 The name is longer than the fact it stands for! Short facts do not need names;
 one can refer to them easily by quoting them:
 \begin{quote}
-\isacom{have} \ @{text"\"x > 0\""}\\
+\isacom{have} \ \<open>"x > 0"\<close>\\
 $\vdots$\\
-\isacom{from} @{text "`x>0`"} \dots\index{$IMP053@@{text"`...`"}}
+\isacom{from} \<open>`x>0`\<close> \dots\index{$IMP053@\<open>`...`\<close>}
 \end{quote}
-Note that the quotes around @{text"x>0"} are \conceptnoidx{back quotes}.
+Note that the quotes around \<open>x>0\<close> are \conceptnoidx{back quotes}.
 They refer to the fact not by name but by value.
 
 \subsection{\indexed{\isacom{moreover}}{moreover}}
@@ -633,15 +632,15 @@
 variables at the end. This is simply an extension of the basic
 \indexed{\isacom{have}}{have} construct:
 \begin{quote}
-\indexed{\isacom{have}}{have} @{text"B"}\
- \indexed{\isacom{if}}{if} \<open>name:\<close> @{text"A\<^sub>1 \<dots> A\<^sub>m"}\
- \indexed{\isacom{for}}{for} @{text"x\<^sub>1 \<dots> x\<^sub>n"}\\
+\indexed{\isacom{have}}{have} \<open>B\<close>\
+ \indexed{\isacom{if}}{if} \<open>name:\<close> \<open>A\<^sub>1 \<dots> A\<^sub>m\<close>\
+ \indexed{\isacom{for}}{for} \<open>x\<^sub>1 \<dots> x\<^sub>n\<close>\\
 \isasymproof
 \end{quote}
-proves @{text"\<lbrakk> A\<^sub>1; \<dots> ; A\<^sub>m \<rbrakk> \<Longrightarrow> B"}
-where all @{text"x\<^sub>i"} have been replaced by unknowns @{text"?x\<^sub>i"}.
+proves \<open>\<lbrakk> A\<^sub>1; \<dots> ; A\<^sub>m \<rbrakk> \<Longrightarrow> B\<close>
+where all \<open>x\<^sub>i\<close> have been replaced by unknowns \<open>?x\<^sub>i\<close>.
 As an example we prove a simple fact about divisibility on integers.
-The definition of @{text "dvd"} is @{thm dvd_def}.
+The definition of \<open>dvd\<close> is @{thm dvd_def}.
 \end{isamarkuptext}%
 \<close>
 
@@ -677,8 +676,8 @@
 (*<*)oops(*>*)
 text\<open>
 Hint: There are predefined functions @{const_typ take} and @{const_typ drop}
-such that @{text"take k [x\<^sub>1,\<dots>] = [x\<^sub>1,\<dots>,x\<^sub>k]"} and
-@{text"drop k [x\<^sub>1,\<dots>] = [x\<^bsub>k+1\<^esub>,\<dots>]"}. Let sledgehammer find and apply
+such that \<open>take k [x\<^sub>1,\<dots>] = [x\<^sub>1,\<dots>,x\<^sub>k]\<close> and
+\<open>drop k [x\<^sub>1,\<dots>] = [x\<^bsub>k+1\<^esub>,\<dots>]\<close>. Let sledgehammer find and apply
 the relevant @{const take} and @{const drop} lemmas for you.
 \endexercise
 
@@ -689,9 +688,9 @@
 \index{case analysis|(}
 
 We have seen case analysis on formulas. Now we want to distinguish
-which form some term takes: is it @{text 0} or of the form @{term"Suc n"},
+which form some term takes: is it \<open>0\<close> or of the form @{term"Suc n"},
 is it @{term"[]"} or of the form @{term"x#xs"}, etc. Here is a typical example
-proof by case analysis on the form of @{text xs}:
+proof by case analysis on the form of \<open>xs\<close>:
 \<close>
 
 lemma "length(tl xs) = length xs - 1"
@@ -703,22 +702,22 @@
   thus ?thesis by simp
 qed
 
-text\<open>\index{cases@@{text"cases"}|(}Function @{text tl} (''tail'') is defined by @{thm list.sel(2)} and
+text\<open>\index{cases@\<open>cases\<close>|(}Function \<open>tl\<close> (''tail'') is defined by @{thm list.sel(2)} and
 @{thm list.sel(3)}. Note that the result type of @{const length} is @{typ nat}
 and @{prop"0 - 1 = (0::nat)"}.
 
-This proof pattern works for any term @{text t} whose type is a datatype.
-The goal has to be proved for each constructor @{text C}:
+This proof pattern works for any term \<open>t\<close> whose type is a datatype.
+The goal has to be proved for each constructor \<open>C\<close>:
 \begin{quote}
-\isacom{fix} \ @{text"x\<^sub>1 \<dots> x\<^sub>n"} \isacom{assume} @{text"\"t = C x\<^sub>1 \<dots> x\<^sub>n\""}
+\isacom{fix} \ \<open>x\<^sub>1 \<dots> x\<^sub>n\<close> \isacom{assume} \<open>"t = C x\<^sub>1 \<dots> x\<^sub>n"\<close>
 \end{quote}\index{case@\isacom{case}|(}
 Each case can be written in a more compact form by means of the \isacom{case}
 command:
 \begin{quote}
-\isacom{case} @{text "(C x\<^sub>1 \<dots> x\<^sub>n)"}
+\isacom{case} \<open>(C x\<^sub>1 \<dots> x\<^sub>n)\<close>
 \end{quote}
 This is equivalent to the explicit \isacom{fix}-\isacom{assume} line
-but also gives the assumption @{text"\"t = C x\<^sub>1 \<dots> x\<^sub>n\""} a name: @{text C},
+but also gives the assumption \<open>"t = C x\<^sub>1 \<dots> x\<^sub>n"\<close> a name: \<open>C\<close>,
 like the constructor.
 Here is the \isacom{case} version of the proof above:
 \<close>
@@ -731,8 +730,8 @@
   thus ?thesis by simp
 qed
 
-text\<open>Remember that @{text Nil} and @{text Cons} are the alphanumeric names
-for @{text"[]"} and @{text"#"}. The names of the assumptions
+text\<open>Remember that \<open>Nil\<close> and \<open>Cons\<close> are the alphanumeric names
+for \<open>[]\<close> and \<open>#\<close>. The names of the assumptions
 are not used because they are directly piped (via \isacom{thus})
 into the proof of the claim.
 \index{case analysis|)}
@@ -742,8 +741,8 @@
 \index{structural induction|(}
 
 We illustrate structural induction with an example based on natural numbers:
-the sum (@{text"\<Sum>"}) of the first @{text n} natural numbers
-(@{text"{0..n::nat}"}) is equal to \mbox{@{term"n*(n+1) div 2::nat"}}.
+the sum (\<open>\<Sum>\<close>) of the first \<open>n\<close> natural numbers
+(\<open>{0..n::nat}\<close>) is equal to \mbox{@{term"n*(n+1) div 2::nat"}}.
 Never mind the details, just focus on the pattern:
 \<close>
 
@@ -768,12 +767,12 @@
   thus "?P(Suc n)" by simp
 qed
 
-text\<open>The first line introduces an abbreviation @{text"?P n"} for the goal.
-Pattern matching @{text"?P n"} with the goal instantiates @{text"?P"} to the
+text\<open>The first line introduces an abbreviation \<open>?P n\<close> for the goal.
+Pattern matching \<open>?P n\<close> with the goal instantiates \<open>?P\<close> to the
 function @{term"\<lambda>n. \<Sum>{0..n::nat} = n*(n+1) div 2"}.  Now the proposition to
-be proved in the base case can be written as @{text"?P 0"}, the induction
-hypothesis as @{text"?P n"}, and the conclusion of the induction step as
-@{text"?P(Suc n)"}.
+be proved in the base case can be written as \<open>?P 0\<close>, the induction
+hypothesis as \<open>?P n\<close>, and the conclusion of the induction step as
+\<open>?P(Suc n)\<close>.
 
 Induction also provides the \isacom{case} idiom that abbreviates
 the \isacom{fix}-\isacom{assume} step. The above proof becomes
@@ -788,9 +787,9 @@
 qed
 
 text\<open>
-The unknown @{text"?case"}\index{case?@@{text"?case"}|(} is set in each case to the required
-claim, i.e., @{text"?P 0"} and \mbox{@{text"?P(Suc n)"}} in the above proof,
-without requiring the user to define a @{text "?P"}. The general
+The unknown \<open>?case\<close>\index{case?@\<open>?case\<close>|(} is set in each case to the required
+claim, i.e., \<open>?P 0\<close> and \mbox{\<open>?P(Suc n)\<close>} in the above proof,
+without requiring the user to define a \<open>?P\<close>. The general
 pattern for induction over @{typ nat} is shown on the left-hand side:
 \<close>text_raw\<open>
 \begin{tabular}{@ {}ll@ {}}
@@ -815,12 +814,12 @@
 \begin{minipage}[t]{.4\textwidth}
 ~\\
 ~\\
-\isacom{let} @{text"?case = \"P(0)\""}\\
+\isacom{let} \<open>?case = "P(0)"\<close>\\
 ~\\
 ~\\
 ~\\[1ex]
-\isacom{fix} @{text n} \isacom{assume} @{text"Suc: \"P(n)\""}\\
-\isacom{let} @{text"?case = \"P(Suc n)\""}\\
+\isacom{fix} \<open>n\<close> \isacom{assume} \<open>Suc: "P(n)"\<close>\\
+\isacom{let} \<open>?case = "P(Suc n)"\<close>\\
 \end{minipage}
 \end{tabular}
 \medskip
@@ -833,36 +832,36 @@
 proposition to be proved in each case is not the whole implication but only
 its conclusion; the premises of the implication are immediately made
 assumptions of that case. That is, if in the above proof we replace
-\isacom{show}~@{text"\"P(n)\""} by
-\mbox{\isacom{show}~@{text"\"A(n) \<Longrightarrow> P(n)\""}}
-then \isacom{case}~@{text 0} stands for
+\isacom{show}~\<open>"P(n)"\<close> by
+\mbox{\isacom{show}~\<open>"A(n) \<Longrightarrow> P(n)"\<close>}
+then \isacom{case}~\<open>0\<close> stands for
 \begin{quote}
-\isacom{assume} \ @{text"0: \"A(0)\""}\\
-\isacom{let} @{text"?case = \"P(0)\""}
+\isacom{assume} \ \<open>0: "A(0)"\<close>\\
+\isacom{let} \<open>?case = "P(0)"\<close>
 \end{quote}
-and \isacom{case}~@{text"(Suc n)"} stands for
+and \isacom{case}~\<open>(Suc n)\<close> stands for
 \begin{quote}
-\isacom{fix} @{text n}\\
-\isacom{assume} @{text"Suc:"}
-  \begin{tabular}[t]{l}@{text"\"A(n) \<Longrightarrow> P(n)\""}\\@{text"\"A(Suc n)\""}\end{tabular}\\
-\isacom{let} @{text"?case = \"P(Suc n)\""}
+\isacom{fix} \<open>n\<close>\\
+\isacom{assume} \<open>Suc:\<close>
+  \begin{tabular}[t]{l}\<open>"A(n) \<Longrightarrow> P(n)"\<close>\\\<open>"A(Suc n)"\<close>\end{tabular}\\
+\isacom{let} \<open>?case = "P(Suc n)"\<close>
 \end{quote}
-The list of assumptions @{text Suc} is actually subdivided
-into @{text"Suc.IH"}, the induction hypotheses (here @{text"A(n) \<Longrightarrow> P(n)"}),
-and @{text"Suc.prems"}, the premises of the goal being proved
-(here @{text"A(Suc n)"}).
+The list of assumptions \<open>Suc\<close> is actually subdivided
+into \<open>Suc.IH\<close>, the induction hypotheses (here \<open>A(n) \<Longrightarrow> P(n)\<close>),
+and \<open>Suc.prems\<close>, the premises of the goal being proved
+(here \<open>A(Suc n)\<close>).
 
 Induction works for any datatype.
-Proving a goal @{text"\<lbrakk> A\<^sub>1(x); \<dots>; A\<^sub>k(x) \<rbrakk> \<Longrightarrow> P(x)"}
-by induction on @{text x} generates a proof obligation for each constructor
-@{text C} of the datatype. The command \isacom{case}~@{text"(C x\<^sub>1 \<dots> x\<^sub>n)"}
+Proving a goal \<open>\<lbrakk> A\<^sub>1(x); \<dots>; A\<^sub>k(x) \<rbrakk> \<Longrightarrow> P(x)\<close>
+by induction on \<open>x\<close> generates a proof obligation for each constructor
+\<open>C\<close> of the datatype. The command \isacom{case}~\<open>(C x\<^sub>1 \<dots> x\<^sub>n)\<close>
 performs the following steps:
 \begin{enumerate}
-\item \isacom{fix} @{text"x\<^sub>1 \<dots> x\<^sub>n"}
-\item \isacom{assume} the induction hypotheses (calling them @{text C.IH}\index{IH@@{text".IH"}})
- and the premises \mbox{@{text"A\<^sub>i(C x\<^sub>1 \<dots> x\<^sub>n)"}} (calling them @{text"C.prems"}\index{prems@@{text".prems"}})
- and calling the whole list @{text C}
-\item \isacom{let} @{text"?case = \"P(C x\<^sub>1 \<dots> x\<^sub>n)\""}
+\item \isacom{fix} \<open>x\<^sub>1 \<dots> x\<^sub>n\<close>
+\item \isacom{assume} the induction hypotheses (calling them \<open>C.IH\<close>\index{IH@\<open>.IH\<close>})
+ and the premises \mbox{\<open>A\<^sub>i(C x\<^sub>1 \<dots> x\<^sub>n)\<close>} (calling them \<open>C.prems\<close>\index{prems@\<open>.prems\<close>})
+ and calling the whole list \<open>C\<close>
+\item \isacom{let} \<open>?case = "P(C x\<^sub>1 \<dots> x\<^sub>n)"\<close>
 \end{enumerate}
 \index{structural induction|)}
 
@@ -947,13 +946,13 @@
 \begin{minipage}[t]{.5\textwidth}
 ~\\
 ~\\
-\isacom{let} @{text"?case = \"evn 0\""}\\
+\isacom{let} \<open>?case = "evn 0"\<close>\\
 ~\\
 ~\\
-\isacom{fix} @{text n}\\
-\isacom{assume} @{text"evSS:"}
-  \begin{tabular}[t]{l} @{text"\"ev n\""}\\@{text"\"evn n\""}\end{tabular}\\
-\isacom{let} @{text"?case = \"evn(Suc(Suc n))\""}\\
+\isacom{fix} \<open>n\<close>\\
+\isacom{assume} \<open>evSS:\<close>
+  \begin{tabular}[t]{l} \<open>"ev n"\<close>\\\<open>"evn n"\<close>\end{tabular}\\
+\isacom{let} \<open>?case = "evn(Suc(Suc n))"\<close>\\
 \end{minipage}
 \end{tabular}
 \medskip
@@ -974,18 +973,18 @@
 \end{warn}
 
 In the case @{thm[source]evSS} of the proof above we have pretended that the
-system fixes a variable @{text n}.  But unless the user provides the name
-@{text n}, the system will just invent its own name that cannot be referred
+system fixes a variable \<open>n\<close>.  But unless the user provides the name
+\<open>n\<close>, the system will just invent its own name that cannot be referred
 to.  In the above proof, we do not need to refer to it, hence we do not give
 it a specific name. In case one needs to refer to it one writes
 \begin{quote}
-\isacom{case} @{text"(evSS m)"}
+\isacom{case} \<open>(evSS m)\<close>
 \end{quote}
-like \isacom{case}~@{text"(Suc n)"} in earlier structural inductions.
-The name @{text m} is an arbitrary choice. As a result,
+like \isacom{case}~\<open>(Suc n)\<close> in earlier structural inductions.
+The name \<open>m\<close> is an arbitrary choice. As a result,
 case @{thm[source] evSS} is derived from a renamed version of
-rule @{thm[source] evSS}: @{text"ev m \<Longrightarrow> ev(Suc(Suc m))"}.
-Here is an example with a (contrived) intermediate step that refers to @{text m}:
+rule @{thm[source] evSS}: \<open>ev m \<Longrightarrow> ev(Suc(Suc m))\<close>.
+Here is an example with a (contrived) intermediate step that refers to \<open>m\<close>:
 \<close>
 
 lemma "ev n \<Longrightarrow> evn n"
@@ -999,10 +998,10 @@
 
 text\<open>
 \indent
-In general, let @{text I} be a (for simplicity unary) inductively defined
-predicate and let the rules in the definition of @{text I}
-be called @{text "rule\<^sub>1"}, \dots, @{text "rule\<^sub>n"}. A proof by rule
-induction follows this pattern:\index{inductionrule@@{text"induction ... rule:"}}
+In general, let \<open>I\<close> be a (for simplicity unary) inductively defined
+predicate and let the rules in the definition of \<open>I\<close>
+be called \<open>rule\<^sub>1\<close>, \dots, \<open>rule\<^sub>n\<close>. A proof by rule
+induction follows this pattern:\index{inductionrule@\<open>induction ... rule:\<close>}
 \<close>
 
 (*<*)
@@ -1027,35 +1026,35 @@
 
 text\<open>
 One can provide explicit variable names by writing
-\isacom{case}~@{text"(rule\<^sub>i x\<^sub>1 \<dots> x\<^sub>k)"}, thus renaming the first @{text k}
-free variables in rule @{text i} to @{text"x\<^sub>1 \<dots> x\<^sub>k"},
-going through rule @{text i} from left to right.
+\isacom{case}~\<open>(rule\<^sub>i x\<^sub>1 \<dots> x\<^sub>k)\<close>, thus renaming the first \<open>k\<close>
+free variables in rule \<open>i\<close> to \<open>x\<^sub>1 \<dots> x\<^sub>k\<close>,
+going through rule \<open>i\<close> from left to right.
 
 \subsection{Assumption Naming}
 \label{sec:assm-naming}
 
-In any induction, \isacom{case}~@{text name} sets up a list of assumptions
-also called @{text name}, which is subdivided into three parts:
+In any induction, \isacom{case}~\<open>name\<close> sets up a list of assumptions
+also called \<open>name\<close>, which is subdivided into three parts:
 \begin{description}
-\item[@{text name.IH}]\index{IH@@{text".IH"}} contains the induction hypotheses.
-\item[@{text name.hyps}]\index{hyps@@{text".hyps"}} contains all the other hypotheses of this case in the
+\item[\<open>name.IH\<close>]\index{IH@\<open>.IH\<close>} contains the induction hypotheses.
+\item[\<open>name.hyps\<close>]\index{hyps@\<open>.hyps\<close>} contains all the other hypotheses of this case in the
 induction rule. For rule inductions these are the hypotheses of rule
-@{text name}, for structural inductions these are empty.
-\item[@{text name.prems}]\index{prems@@{text".prems"}} contains the (suitably instantiated) premises
-of the statement being proved, i.e., the @{text A\<^sub>i} when
-proving @{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"}.
+\<open>name\<close>, for structural inductions these are empty.
+\item[\<open>name.prems\<close>]\index{prems@\<open>.prems\<close>} contains the (suitably instantiated) premises
+of the statement being proved, i.e., the \<open>A\<^sub>i\<close> when
+proving \<open>\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A\<close>.
 \end{description}
 \begin{warn}
-Proof method @{text induct} differs from @{text induction}
-only in this naming policy: @{text induct} does not distinguish
-@{text IH} from @{text hyps} but subsumes @{text IH} under @{text hyps}.
+Proof method \<open>induct\<close> differs from \<open>induction\<close>
+only in this naming policy: \<open>induct\<close> does not distinguish
+\<open>IH\<close> from \<open>hyps\<close> but subsumes \<open>IH\<close> under \<open>hyps\<close>.
 \end{warn}
 
 More complicated inductive proofs than the ones we have seen so far
-often need to refer to specific assumptions --- just @{text name} or even
-@{text name.prems} and @{text name.IH} can be too unspecific.
+often need to refer to specific assumptions --- just \<open>name\<close> or even
+\<open>name.prems\<close> and \<open>name.IH\<close> can be too unspecific.
 This is where the indexing of fact lists comes in handy, e.g.,
-@{text"name.IH(2)"} or @{text"name.prems(1-2)"}.
+\<open>name.IH(2)\<close> or \<open>name.prems(1-2)\<close>.
 
 \subsection{Rule Inversion}
 \label{sec:rule-inversion}
@@ -1089,17 +1088,17 @@
 
 text\<open>The key point here is that a case analysis over some inductively
 defined predicate is triggered by piping the given fact
-(here: \isacom{from}~@{text this}) into a proof by @{text cases}.
-Let us examine the assumptions available in each case. In case @{text ev0}
-we have @{text"n = 0"} and in case @{text evSS} we have @{prop"n = Suc(Suc k)"}
+(here: \isacom{from}~\<open>this\<close>) into a proof by \<open>cases\<close>.
+Let us examine the assumptions available in each case. In case \<open>ev0\<close>
+we have \<open>n = 0\<close> and in case \<open>evSS\<close> we have @{prop"n = Suc(Suc k)"}
 and @{prop"ev k"}. In each case the assumptions are available under the name
 of the case; there is no fine-grained naming schema like there is for induction.
 
 Sometimes some rules could not have been used to derive the given fact
 because constructors clash. As an extreme example consider
-rule inversion applied to @{prop"ev(Suc 0)"}: neither rule @{text ev0} nor
-rule @{text evSS} can yield @{prop"ev(Suc 0)"} because @{text"Suc 0"} unifies
-neither with @{text 0} nor with @{term"Suc(Suc n)"}. Impossible cases do not
+rule inversion applied to @{prop"ev(Suc 0)"}: neither rule \<open>ev0\<close> nor
+rule \<open>evSS\<close> can yield @{prop"ev(Suc 0)"} because \<open>Suc 0\<close> unifies
+neither with \<open>0\<close> nor with @{term"Suc(Suc n)"}. Impossible cases do not
 have to be proved. Hence we can prove anything from @{prop"ev(Suc 0)"}:
 \<close>
 (*<*)
@@ -1123,28 +1122,28 @@
 \subsection{Advanced Rule Induction}
 \label{sec:advanced-rule-induction}
 
-So far, rule induction was always applied to goals of the form @{text"I x y z \<Longrightarrow> \<dots>"}
-where @{text I} is some inductively defined predicate and @{text x}, @{text y}, @{text z}
+So far, rule induction was always applied to goals of the form \<open>I x y z \<Longrightarrow> \<dots>\<close>
+where \<open>I\<close> is some inductively defined predicate and \<open>x\<close>, \<open>y\<close>, \<open>z\<close>
 are variables. In some rare situations one needs to deal with an assumption where
-not all arguments @{text r}, @{text s}, @{text t} are variables:
+not all arguments \<open>r\<close>, \<open>s\<close>, \<open>t\<close> are variables:
 \begin{isabelle}
-\isacom{lemma} @{text"\"I r s t \<Longrightarrow> \<dots>\""}
+\isacom{lemma} \<open>"I r s t \<Longrightarrow> \<dots>"\<close>
 \end{isabelle}
 Applying the standard form of
 rule induction in such a situation will lead to strange and typically unprovable goals.
 We can easily reduce this situation to the standard one by introducing
-new variables @{text x}, @{text y}, @{text z} and reformulating the goal like this:
+new variables \<open>x\<close>, \<open>y\<close>, \<open>z\<close> and reformulating the goal like this:
 \begin{isabelle}
-\isacom{lemma} @{text"\"I x y z \<Longrightarrow> x = r \<Longrightarrow> y = s \<Longrightarrow> z = t \<Longrightarrow> \<dots>\""}
+\isacom{lemma} \<open>"I x y z \<Longrightarrow> x = r \<Longrightarrow> y = s \<Longrightarrow> z = t \<Longrightarrow> \<dots>"\<close>
 \end{isabelle}
 Standard rule induction will work fine now, provided the free variables in
-@{text r}, @{text s}, @{text t} are generalized via @{text"arbitrary"}.
+\<open>r\<close>, \<open>s\<close>, \<open>t\<close> are generalized via \<open>arbitrary\<close>.
 
 However, induction can do the above transformation for us, behind the curtains, so we never
 need to see the expanded version of the lemma. This is what we need to write:
 \begin{isabelle}
-\isacom{lemma} @{text"\"I r s t \<Longrightarrow> \<dots>\""}\isanewline
-\isacom{proof}@{text"(induction \"r\" \"s\" \"t\" arbitrary: \<dots> rule: I.induct)"}\index{inductionrule@@{text"induction ... rule:"}}\index{arbitrary@@{text"arbitrary:"}}
+\isacom{lemma} \<open>"I r s t \<Longrightarrow> \<dots>"\<close>\isanewline
+\isacom{proof}\<open>(induction "r" "s" "t" arbitrary: \<dots> rule: I.induct)\<close>\index{inductionrule@\<open>induction ... rule:\<close>}\index{arbitrary@\<open>arbitrary:\<close>}
 \end{isabelle}
 Like for rule inversion, cases that are impossible because of constructor clashes
 will not show up at all. Here is a concrete example:\<close>
@@ -1167,31 +1166,31 @@
 Remarks:
 \begin{itemize}
 \item 
-Instead of the \isacom{case} and @{text ?case} magic we have spelled all formulas out.
+Instead of the \isacom{case} and \<open>?case\<close> magic we have spelled all formulas out.
 This is merely for greater clarity.
 \item
 We only need to deal with one case because the @{thm[source] ev0} case is impossible.
 \item
-The form of the @{text IH} shows us that internally the lemma was expanded as explained
+The form of the \<open>IH\<close> shows us that internally the lemma was expanded as explained
 above: \noquotes{@{prop[source]"ev x \<Longrightarrow> x = Suc m \<Longrightarrow> \<not> ev m"}}.
 \item
 The goal @{prop"\<not> ev (Suc n)"} may surprise. The expanded version of the lemma
-would suggest that we have a \isacom{fix} @{text m} \isacom{assume} @{prop"Suc(Suc n) = Suc m"}
+would suggest that we have a \isacom{fix} \<open>m\<close> \isacom{assume} @{prop"Suc(Suc n) = Suc m"}
 and need to show @{prop"\<not> ev m"}. What happened is that Isabelle immediately
 simplified @{prop"Suc(Suc n) = Suc m"} to @{prop"Suc n = m"} and could then eliminate
-@{text m}. Beware of such nice surprises with this advanced form of induction.
+\<open>m\<close>. Beware of such nice surprises with this advanced form of induction.
 \end{itemize}
 \begin{warn}
-This advanced form of induction does not support the @{text IH}
+This advanced form of induction does not support the \<open>IH\<close>
 naming schema explained in \autoref{sec:assm-naming}:
-the induction hypotheses are instead found under the name @{text hyps},
+the induction hypotheses are instead found under the name \<open>hyps\<close>,
 as they are for the simpler
-@{text induct} method.
+\<open>induct\<close> method.
 \end{warn}
 \index{induction|)}
-\index{cases@@{text"cases"}|)}
+\index{cases@\<open>cases\<close>|)}
 \index{case@\isacom{case}|)}
-\index{case?@@{text"?case"}|)}
+\index{case?@\<open>?case\<close>|)}
 \index{rule induction|)}
 \index{rule inversion|)}
 
@@ -1215,26 +1214,26 @@
 \end{exercise}
 
 \begin{exercise}
-Recall predicate @{text star} from \autoref{sec:star} and @{text iter}
+Recall predicate \<open>star\<close> from \autoref{sec:star} and \<open>iter\<close>
 from Exercise~\ref{exe:iter}. Prove @{prop "iter r n x y \<Longrightarrow> star r x y"}
 in a structured style; do not just sledgehammer each case of the
 required induction.
 \end{exercise}
 
 \begin{exercise}
-Define a recursive function @{text "elems ::"} @{typ"'a list \<Rightarrow> 'a set"}
+Define a recursive function \<open>elems ::\<close> @{typ"'a list \<Rightarrow> 'a set"}
 and prove @{prop "x \<in> elems xs \<Longrightarrow> \<exists>ys zs. xs = ys @ x # zs \<and> x \<notin> elems ys"}.
 \end{exercise}
 
 \begin{exercise}
 Extend Exercise~\ref{exe:cfg} with a function that checks if some
-\mbox{@{text "alpha list"}} is a balanced
+\mbox{\<open>alpha list\<close>} is a balanced
 string of parentheses. More precisely, define a \mbox{recursive} function
-@{text "balanced :: nat \<Rightarrow> alpha list \<Rightarrow> bool"} such that @{term"balanced n w"}
-is true iff (informally) @{text"S (a\<^sup>n @ w)"}. Formally, prove that
+\<open>balanced :: nat \<Rightarrow> alpha list \<Rightarrow> bool\<close> such that @{term"balanced n w"}
+is true iff (informally) \<open>S (a\<^sup>n @ w)\<close>. Formally, prove that
 @{prop "balanced n w \<longleftrightarrow> S (replicate n a @ w)"} where
-@{const replicate} @{text"::"} @{typ"nat \<Rightarrow> 'a \<Rightarrow> 'a list"} is predefined
-and @{term"replicate n x"} yields the list @{text"[x, \<dots>, x]"} of length @{text n}.
+@{const replicate} \<open>::\<close> @{typ"nat \<Rightarrow> 'a \<Rightarrow> 'a list"} is predefined
+and @{term"replicate n x"} yields the list \<open>[x, \<dots>, x]\<close> of length \<open>n\<close>.
 \end{exercise}
 \<close>
 
--- a/src/Doc/Prog_Prove/Logic.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Prog_Prove/Logic.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -13,7 +13,7 @@
 \begin{array}{rcl}
 
 \mathit{form} & ::= &
-  @{text"(form)"} ~\mid~
+  \<open>(form)\<close> ~\mid~
   @{const True} ~\mid~
   @{const False} ~\mid~
   @{prop "term = term"}\\
@@ -25,36 +25,36 @@
 \end{array}
 \]
 Terms are the ones we have seen all along, built from constants, variables,
-function application and @{text"\<lambda>"}-abstraction, including all the syntactic
-sugar like infix symbols, @{text "if"}, @{text "case"}, etc.
+function application and \<open>\<lambda>\<close>-abstraction, including all the syntactic
+sugar like infix symbols, \<open>if\<close>, \<open>case\<close>, etc.
 \begin{warn}
-Remember that formulas are simply terms of type @{text bool}. Hence
-@{text "="} also works for formulas. Beware that @{text"="} has a higher
+Remember that formulas are simply terms of type \<open>bool\<close>. Hence
+\<open>=\<close> also works for formulas. Beware that \<open>=\<close> has a higher
 precedence than the other logical operators. Hence @{prop"s = t \<and> A"} means
-@{text"(s = t) \<and> A"}, and @{prop"A\<and>B = B\<and>A"} means @{text"A \<and> (B = B) \<and> A"}.
+\<open>(s = t) \<and> A\<close>, and @{prop"A\<and>B = B\<and>A"} means \<open>A \<and> (B = B) \<and> A\<close>.
 Logical equivalence can also be written with
-@{text "\<longleftrightarrow>"} instead of @{text"="}, where @{text"\<longleftrightarrow>"} has the same low
-precedence as @{text"\<longrightarrow>"}. Hence @{text"A \<and> B \<longleftrightarrow> B \<and> A"} really means
-@{text"(A \<and> B) \<longleftrightarrow> (B \<and> A)"}.
+\<open>\<longleftrightarrow>\<close> instead of \<open>=\<close>, where \<open>\<longleftrightarrow>\<close> has the same low
+precedence as \<open>\<longrightarrow>\<close>. Hence \<open>A \<and> B \<longleftrightarrow> B \<and> A\<close> really means
+\<open>(A \<and> B) \<longleftrightarrow> (B \<and> A)\<close>.
 \end{warn}
 \begin{warn}
 Quantifiers need to be enclosed in parentheses if they are nested within
-other constructs (just like @{text "if"}, @{text case} and @{text let}).
+other constructs (just like \<open>if\<close>, \<open>case\<close> and \<open>let\<close>).
 \end{warn}
 The most frequent logical symbols and their ASCII representations are shown
 in Fig.~\ref{fig:log-symbols}.
 \begin{figure}
 \begin{center}
 \begin{tabular}{l@ {\qquad}l@ {\qquad}l}
-@{text "\<forall>"} & \xsymbol{forall} & \texttt{ALL}\\
-@{text "\<exists>"} & \xsymbol{exists} & \texttt{EX}\\
-@{text "\<lambda>"} & \xsymbol{lambda} & \texttt{\%}\\
-@{text "\<longrightarrow>"} & \texttt{-{}->}\\
-@{text "\<longleftrightarrow>"} & \texttt{<->}\\
-@{text "\<and>"} & \texttt{/\char`\\} & \texttt{\&}\\
-@{text "\<or>"} & \texttt{\char`\\/} & \texttt{|}\\
-@{text "\<not>"} & \xsymbol{not} & \texttt{\char`~}\\
-@{text "\<noteq>"} & \xsymbol{noteq} & \texttt{\char`~=}
+\<open>\<forall>\<close> & \xsymbol{forall} & \texttt{ALL}\\
+\<open>\<exists>\<close> & \xsymbol{exists} & \texttt{EX}\\
+\<open>\<lambda>\<close> & \xsymbol{lambda} & \texttt{\%}\\
+\<open>\<longrightarrow>\<close> & \texttt{-{}->}\\
+\<open>\<longleftrightarrow>\<close> & \texttt{<->}\\
+\<open>\<and>\<close> & \texttt{/\char`\\} & \texttt{\&}\\
+\<open>\<or>\<close> & \texttt{\char`\\/} & \texttt{|}\\
+\<open>\<not>\<close> & \xsymbol{not} & \texttt{\char`~}\\
+\<open>\<noteq>\<close> & \xsymbol{noteq} & \texttt{\char`~=}
 \end{tabular}
 \end{center}
 \caption{Logical symbols and their ASCII forms}
@@ -68,59 +68,59 @@
 are special in that they are merely keyboard shortcuts for the interface and
 not logical symbols by themselves.
 \begin{warn}
-The implication @{text"\<Longrightarrow>"} is part of the Isabelle framework. It structures
+The implication \<open>\<Longrightarrow>\<close> is part of the Isabelle framework. It structures
 theorems and proof states, separating assumptions from conclusions.
-The implication @{text"\<longrightarrow>"} is part of the logic HOL and can occur inside the
+The implication \<open>\<longrightarrow>\<close> is part of the logic HOL and can occur inside the
 formulas that make up the assumptions and conclusion.
-Theorems should be of the form @{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"},
-not @{text"A\<^sub>1 \<and> \<dots> \<and> A\<^sub>n \<longrightarrow> A"}. Both are logically equivalent
+Theorems should be of the form \<open>\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A\<close>,
+not \<open>A\<^sub>1 \<and> \<dots> \<and> A\<^sub>n \<longrightarrow> A\<close>. Both are logically equivalent
 but the first one works better when using the theorem in further proofs.
 \end{warn}
 
 \section{Sets}
 \label{sec:Sets}
 
-Sets of elements of type @{typ 'a} have type @{typ"'a set"}\index{set@@{text set}}.
+Sets of elements of type @{typ 'a} have type @{typ"'a set"}\index{set@\<open>set\<close>}.
 They can be finite or infinite. Sets come with the usual notation:
 \begin{itemize}
-\item \indexed{@{term"{}"}}{$IMP042},\quad @{text"{e\<^sub>1,\<dots>,e\<^sub>n}"}
+\item \indexed{@{term"{}"}}{$IMP042},\quad \<open>{e\<^sub>1,\<dots>,e\<^sub>n}\<close>
 \item @{prop"e \<in> A"}\index{$HOLSet0@\isasymin},\quad @{prop"A \<subseteq> B"}\index{$HOLSet2@\isasymsubseteq}
 \item @{term"A \<union> B"}\index{$HOLSet4@\isasymunion},\quad @{term"A \<inter> B"}\index{$HOLSet5@\isasyminter},\quad @{term"A - B"},\quad @{term"-A"}
 \end{itemize}
-(where @{term"A-B"} and @{text"-A"} are set difference and complement)
+(where @{term"A-B"} and \<open>-A\<close> are set difference and complement)
 and much more. @{const UNIV} is the set of all elements of some type.
 Set comprehension\index{set comprehension} is written
-@{term"{x. P}"}\index{$IMP042@@{term"{x. P}"}} rather than @{text"{x | P}"}.
+@{term"{x. P}"}\index{$IMP042@@{term"{x. P}"}} rather than \<open>{x | P}\<close>.
 \begin{warn}
-In @{term"{x. P}"} the @{text x} must be a variable. Set comprehension
-involving a proper term @{text t} must be written
-\noquotes{@{term[source] "{t | x y. P}"}}\index{$IMP042@@{text"{t |x. P}"}},
-where @{text "x y"} are those free variables in @{text t}
-that occur in @{text P}.
+In @{term"{x. P}"} the \<open>x\<close> must be a variable. Set comprehension
+involving a proper term \<open>t\<close> must be written
+\noquotes{@{term[source] "{t | x y. P}"}}\index{$IMP042@\<open>{t |x. P}\<close>},
+where \<open>x y\<close> are those free variables in \<open>t\<close>
+that occur in \<open>P\<close>.
 This is just a shorthand for @{term"{v. \<exists>x y. v = t \<and> P}"}, where
-@{text v} is a new variable. For example, @{term"{x+y|x. x \<in> A}"}
+\<open>v\<close> is a new variable. For example, @{term"{x+y|x. x \<in> A}"}
 is short for \noquotes{@{term[source]"{v. \<exists>x. v = x+y \<and> x \<in> A}"}}.
 \end{warn}
 
 Here are the ASCII representations of the mathematical symbols:
 \begin{center}
 \begin{tabular}{l@ {\quad}l@ {\quad}l}
-@{text "\<in>"} & \texttt{\char`\\\char`\<in>} & \texttt{:}\\
-@{text "\<subseteq>"} & \texttt{\char`\\\char`\<subseteq>} & \texttt{<=}\\
-@{text "\<union>"} & \texttt{\char`\\\char`\<union>} & \texttt{Un}\\
-@{text "\<inter>"} & \texttt{\char`\\\char`\<inter>} & \texttt{Int}
+\<open>\<in>\<close> & \texttt{\char`\\\char`\<in>} & \texttt{:}\\
+\<open>\<subseteq>\<close> & \texttt{\char`\\\char`\<subseteq>} & \texttt{<=}\\
+\<open>\<union>\<close> & \texttt{\char`\\\char`\<union>} & \texttt{Un}\\
+\<open>\<inter>\<close> & \texttt{\char`\\\char`\<inter>} & \texttt{Int}
 \end{tabular}
 \end{center}
 Sets also allow bounded quantifications @{prop"\<forall>x \<in> A. P"} and
 @{prop"\<exists>x \<in> A. P"}.
 
-For the more ambitious, there are also @{text"\<Union>"}\index{$HOLSet6@\isasymUnion}
-and @{text"\<Inter>"}\index{$HOLSet7@\isasymInter}:
+For the more ambitious, there are also \<open>\<Union>\<close>\index{$HOLSet6@\isasymUnion}
+and \<open>\<Inter>\<close>\index{$HOLSet7@\isasymInter}:
 \begin{center}
 @{thm Union_eq} \qquad @{thm Inter_eq}
 \end{center}
-The ASCII forms of @{text"\<Union>"} are \texttt{\char`\\\char`\<Union>} and \texttt{Union},
-those of @{text"\<Inter>"} are \texttt{\char`\\\char`\<Inter>} and \texttt{Inter}.
+The ASCII forms of \<open>\<Union>\<close> are \texttt{\char`\\\char`\<Union>} and \texttt{Union},
+those of \<open>\<Inter>\<close> are \texttt{\char`\\\char`\<Inter>} and \texttt{Inter}.
 There are also indexed unions and intersections:
 \begin{center}
 @{thm[eta_contract=false] UNION_eq} \\ @{thm[eta_contract=false] INTER_eq}
@@ -135,7 +135,7 @@
 @{const_typ set}\index{set@@{const set}} & converts a list to the set of its elements\\
 @{const_typ finite}\index{finite@@{const finite}} & is true iff its argument is finite\\
 \noquotes{@{term[source] "card :: 'a set \<Rightarrow> nat"}}\index{card@@{const card}} & is the cardinality of a finite set\\
- & and is @{text 0} for all infinite sets\\
+ & and is \<open>0\<close> for all infinite sets\\
 @{thm image_def}\index{$IMP042@@{term"f ` A"}} & is the image of a function over a set
 \end{tabular}
 \end{center}
@@ -152,23 +152,23 @@
 datatype 'a tree = Tip | Node "'a tree" 'a "'a tree"
 
 text\<open>
-Define a function @{text "set ::"} @{typ "'a tree \<Rightarrow> 'a set"}
+Define a function \<open>set ::\<close> @{typ "'a tree \<Rightarrow> 'a set"}
 that returns the elements in a tree and a function
-@{text "ord ::"} @{typ "int tree \<Rightarrow> bool"}
+\<open>ord ::\<close> @{typ "int tree \<Rightarrow> bool"}
 that tests if an @{typ "int tree"} is ordered.
 
-Define a function @{text ins} that inserts an element into an ordered @{typ "int tree"}
+Define a function \<open>ins\<close> that inserts an element into an ordered @{typ "int tree"}
 while maintaining the order of the tree. If the element is already in the tree, the
-same tree should be returned. Prove correctness of @{text ins}:
+same tree should be returned. Prove correctness of \<open>ins\<close>:
 @{prop "set(ins x t) = {x} \<union> set t"} and @{prop "ord t \<Longrightarrow> ord(ins i t)"}.
 \endexercise
 
 
 \section{Proof Automation}
 
-So far we have only seen @{text simp} and \indexed{@{text auto}}{auto}: Both perform
+So far we have only seen \<open>simp\<close> and \indexed{\<open>auto\<close>}{auto}: Both perform
 rewriting, both can also prove linear arithmetic facts (no multiplication),
-and @{text auto} is also able to prove simple logical or set-theoretic goals:
+and \<open>auto\<close> is also able to prove simple logical or set-theoretic goals:
 \<close>
 
 lemma "\<forall>x. \<exists>y. x = y"
@@ -186,7 +186,7 @@
 \isacom{apply} \textit{proof-method}\\
 \isacom{done}
 \end{quote}
-The key characteristics of both @{text simp} and @{text auto} are
+The key characteristics of both \<open>simp\<close> and \<open>auto\<close> are
 \begin{itemize}
 \item They show you where they got stuck, giving you an idea how to continue.
 \item They perform the obvious steps but are highly incomplete.
@@ -195,10 +195,10 @@
 There is no complete proof method for HOL, not even in theory.
 Hence all our proof methods only differ in how incomplete they are.
 
-A proof method that is still incomplete but tries harder than @{text auto} is
-\indexed{@{text fastforce}}{fastforce}.  It either succeeds or fails, it acts on the first
-subgoal only, and it can be modified like @{text auto}, e.g.,
-with @{text "simp add"}. Here is a typical example of what @{text fastforce}
+A proof method that is still incomplete but tries harder than \<open>auto\<close> is
+\indexed{\<open>fastforce\<close>}{fastforce}.  It either succeeds or fails, it acts on the first
+subgoal only, and it can be modified like \<open>auto\<close>, e.g.,
+with \<open>simp add\<close>. Here is a typical example of what \<open>fastforce\<close>
 can do:
 \<close>
 
@@ -206,15 +206,15 @@
    \<Longrightarrow> \<exists>n. length us = n+n"
 by fastforce
 
-text\<open>This lemma is out of reach for @{text auto} because of the
-quantifiers.  Even @{text fastforce} fails when the quantifier structure
-becomes more complicated. In a few cases, its slow version @{text force}
-succeeds where @{text fastforce} fails.
+text\<open>This lemma is out of reach for \<open>auto\<close> because of the
+quantifiers.  Even \<open>fastforce\<close> fails when the quantifier structure
+becomes more complicated. In a few cases, its slow version \<open>force\<close>
+succeeds where \<open>fastforce\<close> fails.
 
-The method of choice for complex logical goals is \indexed{@{text blast}}{blast}. In the
-following example, @{text T} and @{text A} are two binary predicates. It
-is shown that if @{text T} is total, @{text A} is antisymmetric and @{text T} is
-a subset of @{text A}, then @{text A} is a subset of @{text T}:
+The method of choice for complex logical goals is \indexed{\<open>blast\<close>}{blast}. In the
+following example, \<open>T\<close> and \<open>A\<close> are two binary predicates. It
+is shown that if \<open>T\<close> is total, \<open>A\<close> is antisymmetric and \<open>T\<close> is
+a subset of \<open>A\<close>, then \<open>A\<close> is a subset of \<open>T\<close>:
 \<close>
 
 lemma
@@ -226,7 +226,7 @@
 
 text\<open>
 We leave it to the reader to figure out why this lemma is true.
-Method @{text blast}
+Method \<open>blast\<close>
 \begin{itemize}
 \item is (in principle) a complete proof procedure for first-order formulas,
   a fragment of HOL. In practice there is a search bound.
@@ -257,10 +257,10 @@
 text\<open>We do not explain how the proof was found but what this command
 means. For a start, Isabelle does not trust external tools (and in particular
 not the translations from Isabelle's logic to those tools!)
-and insists on a proof that it can check. This is what \indexed{@{text metis}}{metis} does.
+and insists on a proof that it can check. This is what \indexed{\<open>metis\<close>}{metis} does.
 It is given a list of lemmas and tries to find a proof using just those lemmas
-(and pure logic). In contrast to using @{text simp} and friends who know a lot of
-lemmas already, using @{text metis} manually is tedious because one has
+(and pure logic). In contrast to using \<open>simp\<close> and friends who know a lot of
+lemmas already, using \<open>metis\<close> manually is tedious because one has
 to find all the relevant lemmas first. But that is precisely what
 \isacom{sledgehammer} does for us.
 In this case lemma @{thm[source]append_eq_conv_conj} alone suffices:
@@ -274,32 +274,30 @@
 Just as for the other proof methods we have seen, there is no guarantee that
 \isacom{sledgehammer} will find a proof if it exists. Nor is
 \isacom{sledgehammer} superior to the other proof methods.  They are
-incomparable. Therefore it is recommended to apply @{text simp} or @{text
-auto} before invoking \isacom{sledgehammer} on what is left.
+incomparable. Therefore it is recommended to apply \<open>simp\<close> or \<open>auto\<close> before invoking \isacom{sledgehammer} on what is left.
 
 \subsection{Arithmetic}
 
-By arithmetic formulas we mean formulas involving variables, numbers, @{text
-"+"}, @{text"-"}, @{text "="}, @{text "<"}, @{text "\<le>"} and the usual logical
-connectives @{text"\<not>"}, @{text"\<and>"}, @{text"\<or>"}, @{text"\<longrightarrow>"},
-@{text"\<longleftrightarrow>"}. Strictly speaking, this is known as \concept{linear arithmetic}
+By arithmetic formulas we mean formulas involving variables, numbers, \<open>+\<close>, \<open>-\<close>, \<open>=\<close>, \<open><\<close>, \<open>\<le>\<close> and the usual logical
+connectives \<open>\<not>\<close>, \<open>\<and>\<close>, \<open>\<or>\<close>, \<open>\<longrightarrow>\<close>,
+\<open>\<longleftrightarrow>\<close>. Strictly speaking, this is known as \concept{linear arithmetic}
 because it does not involve multiplication, although multiplication with
-numbers, e.g., @{text"2*n"}, is allowed. Such formulas can be proved by
-\indexed{@{text arith}}{arith}:
+numbers, e.g., \<open>2*n\<close>, is allowed. Such formulas can be proved by
+\indexed{\<open>arith\<close>}{arith}:
 \<close>
 
 lemma "\<lbrakk> (a::nat) \<le> x + b; 2*x < c \<rbrakk> \<Longrightarrow> 2*a + 1 \<le> 2*b + c"
 by arith
 
-text\<open>In fact, @{text auto} and @{text simp} can prove many linear
+text\<open>In fact, \<open>auto\<close> and \<open>simp\<close> can prove many linear
 arithmetic formulas already, like the one above, by calling a weak but fast
-version of @{text arith}. Hence it is usually not necessary to invoke
-@{text arith} explicitly.
+version of \<open>arith\<close>. Hence it is usually not necessary to invoke
+\<open>arith\<close> explicitly.
 
 The above example involves natural numbers, but integers (type @{typ int})
-and real numbers (type @{text real}) are supported as well. As are a number
+and real numbers (type \<open>real\<close>) are supported as well. As are a number
 of further operators like @{const min} and @{const max}. On @{typ nat} and
-@{typ int}, @{text arith} can even prove theorems with quantifiers in them,
+@{typ int}, \<open>arith\<close> can even prove theorems with quantifiers in them,
 but we will not enlarge on that here.
 
 
@@ -313,48 +311,48 @@
 sledgehammer. If desired, specific simplification and introduction rules
 can be added:
 \begin{isabelle}
-\isacom{try0} @{text"simp: \<dots> intro: \<dots>"}
+\isacom{try0} \<open>simp: \<dots> intro: \<dots>\<close>
 \end{isabelle}
 
 \section{Single Step Proofs}
 
 Although automation is nice, it often fails, at least initially, and you need
-to find out why. When @{text fastforce} or @{text blast} simply fail, you have
+to find out why. When \<open>fastforce\<close> or \<open>blast\<close> simply fail, you have
 no clue why. At this point, the stepwise
-application of proof rules may be necessary. For example, if @{text blast}
+application of proof rules may be necessary. For example, if \<open>blast\<close>
 fails on @{prop"A \<and> B"}, you want to attack the two
-conjuncts @{text A} and @{text B} separately. This can
+conjuncts \<open>A\<close> and \<open>B\<close> separately. This can
 be achieved by applying \emph{conjunction introduction}
-\[ @{thm[mode=Rule,show_question_marks]conjI}\ @{text conjI}
+\[ @{thm[mode=Rule,show_question_marks]conjI}\ \<open>conjI\<close>
 \]
 to the proof state. We will now examine the details of this process.
 
 \subsection{Instantiating Unknowns}
 
 We had briefly mentioned earlier that after proving some theorem,
-Isabelle replaces all free variables @{text x} by so called \conceptidx{unknowns}{unknown}
-@{text "?x"}. We can see this clearly in rule @{thm[source] conjI}.
+Isabelle replaces all free variables \<open>x\<close> by so called \conceptidx{unknowns}{unknown}
+\<open>?x\<close>. We can see this clearly in rule @{thm[source] conjI}.
 These unknowns can later be instantiated explicitly or implicitly:
 \begin{itemize}
-\item By hand, using \indexed{@{text of}}{of}.
-The expression @{text"conjI[of \"a=b\" \"False\"]"}
+\item By hand, using \indexed{\<open>of\<close>}{of}.
+The expression \<open>conjI[of "a=b" "False"]\<close>
 instantiates the unknowns in @{thm[source] conjI} from left to right with the
-two formulas @{text"a=b"} and @{text False}, yielding the rule
+two formulas \<open>a=b\<close> and \<open>False\<close>, yielding the rule
 @{thm[display,mode=Rule,margin=100]conjI[of "a=b" False]}
 
-In general, @{text"th[of string\<^sub>1 \<dots> string\<^sub>n]"} instantiates
-the unknowns in the theorem @{text th} from left to right with the terms
-@{text string\<^sub>1} to @{text string\<^sub>n}.
+In general, \<open>th[of string\<^sub>1 \<dots> string\<^sub>n]\<close> instantiates
+the unknowns in the theorem \<open>th\<close> from left to right with the terms
+\<open>string\<^sub>1\<close> to \<open>string\<^sub>n\<close>.
 
 \item By unification. \conceptidx{Unification}{unification} is the process of making two
 terms syntactically equal by suitable instantiations of unknowns. For example,
-unifying @{text"?P \<and> ?Q"} with \mbox{@{prop"a=b \<and> False"}} instantiates
-@{text "?P"} with @{prop "a=b"} and @{text "?Q"} with @{prop False}.
+unifying \<open>?P \<and> ?Q\<close> with \mbox{@{prop"a=b \<and> False"}} instantiates
+\<open>?P\<close> with @{prop "a=b"} and \<open>?Q\<close> with @{prop False}.
 \end{itemize}
 We need not instantiate all unknowns. If we want to skip a particular one we
-can write @{text"_"} instead, for example @{text "conjI[of _ \"False\"]"}.
-Unknowns can also be instantiated by name using \indexed{@{text "where"}}{where}, for example
-@{text "conjI[where ?P = \"a=b\""} \isacom{and} @{text"?Q = \"False\"]"}.
+can write \<open>_\<close> instead, for example \<open>conjI[of _ "False"]\<close>.
+Unknowns can also be instantiated by name using \indexed{\<open>where\<close>}{where}, for example
+\<open>conjI[where ?P = "a=b"\<close> \isacom{and} \<open>?Q = "False"]\<close>.
 
 
 \subsection{Rule Application}
@@ -362,26 +360,26 @@
 \conceptidx{Rule application}{rule application} means applying a rule backwards to a proof state.
 For example, applying rule @{thm[source]conjI} to a proof state
 \begin{quote}
-@{text"1.  \<dots>  \<Longrightarrow> A \<and> B"}
+\<open>1.  \<dots>  \<Longrightarrow> A \<and> B\<close>
 \end{quote}
 results in two subgoals, one for each premise of @{thm[source]conjI}:
 \begin{quote}
-@{text"1.  \<dots>  \<Longrightarrow> A"}\\
-@{text"2.  \<dots>  \<Longrightarrow> B"}
+\<open>1.  \<dots>  \<Longrightarrow> A\<close>\\
+\<open>2.  \<dots>  \<Longrightarrow> B\<close>
 \end{quote}
-In general, the application of a rule @{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"}
-to a subgoal \mbox{@{text"\<dots> \<Longrightarrow> C"}} proceeds in two steps:
+In general, the application of a rule \<open>\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A\<close>
+to a subgoal \mbox{\<open>\<dots> \<Longrightarrow> C\<close>} proceeds in two steps:
 \begin{enumerate}
 \item
-Unify @{text A} and @{text C}, thus instantiating the unknowns in the rule.
+Unify \<open>A\<close> and \<open>C\<close>, thus instantiating the unknowns in the rule.
 \item
-Replace the subgoal @{text C} with @{text n} new subgoals @{text"A\<^sub>1"} to @{text"A\<^sub>n"}.
+Replace the subgoal \<open>C\<close> with \<open>n\<close> new subgoals \<open>A\<^sub>1\<close> to \<open>A\<^sub>n\<close>.
 \end{enumerate}
-This is the command to apply rule @{text xyz}:
+This is the command to apply rule \<open>xyz\<close>:
 \begin{quote}
-\isacom{apply}@{text"(rule xyz)"}\index{rule@@{text rule}}
+\isacom{apply}\<open>(rule xyz)\<close>\index{rule@\<open>rule\<close>}
 \end{quote}
-This is also called \concept{backchaining} with rule @{text xyz}.
+This is also called \concept{backchaining} with rule \<open>xyz\<close>.
 
 \subsection{Introduction Rules}
 
@@ -390,13 +388,13 @@
 premises some logical construct can be introduced. Here are some further
 useful introduction rules:
 \[
-\inferrule*[right=\mbox{@{text impI}}]{\mbox{@{text"?P \<Longrightarrow> ?Q"}}}{\mbox{@{text"?P \<longrightarrow> ?Q"}}}
+\inferrule*[right=\mbox{\<open>impI\<close>}]{\mbox{\<open>?P \<Longrightarrow> ?Q\<close>}}{\mbox{\<open>?P \<longrightarrow> ?Q\<close>}}
 \qquad
-\inferrule*[right=\mbox{@{text allI}}]{\mbox{@{text"\<And>x. ?P x"}}}{\mbox{@{text"\<forall>x. ?P x"}}}
+\inferrule*[right=\mbox{\<open>allI\<close>}]{\mbox{\<open>\<And>x. ?P x\<close>}}{\mbox{\<open>\<forall>x. ?P x\<close>}}
 \]
 \[
-\inferrule*[right=\mbox{@{text iffI}}]{\mbox{@{text"?P \<Longrightarrow> ?Q"}} \\ \mbox{@{text"?Q \<Longrightarrow> ?P"}}}
-  {\mbox{@{text"?P = ?Q"}}}
+\inferrule*[right=\mbox{\<open>iffI\<close>}]{\mbox{\<open>?P \<Longrightarrow> ?Q\<close>} \\ \mbox{\<open>?Q \<Longrightarrow> ?P\<close>}}
+  {\mbox{\<open>?P = ?Q\<close>}}
 \]
 These rules are part of the logical system of \concept{natural deduction}
 (e.g., @{cite HuthRyan}). Although we intentionally de-emphasize the basic rules
@@ -406,23 +404,22 @@
 \begin{itemize}
 \item @{thm[source] conjI} and @{thm[source]iffI} split the goal into two subgoals,
 \item @{thm[source] impI} moves the left-hand side of a HOL implication into the list of assumptions,
-\item and @{thm[source] allI} removes a @{text "\<forall>"} by turning the quantified variable into a fixed local variable of the subgoal.
+\item and @{thm[source] allI} removes a \<open>\<forall>\<close> by turning the quantified variable into a fixed local variable of the subgoal.
 \end{itemize}
 Isabelle knows about these and a number of other introduction rules.
 The command
 \begin{quote}
-\isacom{apply} @{text rule}\index{rule@@{text rule}}
+\isacom{apply} \<open>rule\<close>\index{rule@\<open>rule\<close>}
 \end{quote}
 automatically selects the appropriate rule for the current subgoal.
 
 You can also turn your own theorems into introduction rules by giving them
-the \indexed{@{text"intro"}}{intro} attribute, analogous to the @{text simp} attribute.  In
-that case @{text blast}, @{text fastforce} and (to a limited extent) @{text
-auto} will automatically backchain with those theorems. The @{text intro}
+the \indexed{\<open>intro\<close>}{intro} attribute, analogous to the \<open>simp\<close> attribute.  In
+that case \<open>blast\<close>, \<open>fastforce\<close> and (to a limited extent) \<open>auto\<close> will automatically backchain with those theorems. The \<open>intro\<close>
 attribute should be used with care because it increases the search space and
 can lead to nontermination.  Sometimes it is better to use it only in
-specific calls of @{text blast} and friends. For example,
-@{thm[source] le_trans}, transitivity of @{text"\<le>"} on type @{typ nat},
+specific calls of \<open>blast\<close> and friends. For example,
+@{thm[source] le_trans}, transitivity of \<open>\<le>\<close> on type @{typ nat},
 is not an introduction rule by default because of the disastrous effect
 on the search space, but can be useful in specific situations:
 \<close>
@@ -431,21 +428,19 @@
 by(blast intro: le_trans)
 
 text\<open>
-Of course this is just an example and could be proved by @{text arith}, too.
+Of course this is just an example and could be proved by \<open>arith\<close>, too.
 
 \subsection{Forward Proof}
 \label{sec:forward-proof}
 
 Forward proof means deriving new theorems from old theorems. We have already
-seen a very simple form of forward proof: the @{text of} operator for
-instantiating unknowns in a theorem. The big brother of @{text of} is
-\indexed{@{text OF}}{OF} for applying one theorem to others. Given a theorem @{prop"A \<Longrightarrow> B"} called
-@{text r} and a theorem @{text A'} called @{text r'}, the theorem @{text
-"r[OF r']"} is the result of applying @{text r} to @{text r'}, where @{text
-r} should be viewed as a function taking a theorem @{text A} and returning
-@{text B}.  More precisely, @{text A} and @{text A'} are unified, thus
-instantiating the unknowns in @{text B}, and the result is the instantiated
-@{text B}. Of course, unification may also fail.
+seen a very simple form of forward proof: the \<open>of\<close> operator for
+instantiating unknowns in a theorem. The big brother of \<open>of\<close> is
+\indexed{\<open>OF\<close>}{OF} for applying one theorem to others. Given a theorem @{prop"A \<Longrightarrow> B"} called
+\<open>r\<close> and a theorem \<open>A'\<close> called \<open>r'\<close>, the theorem \<open>r[OF r']\<close> is the result of applying \<open>r\<close> to \<open>r'\<close>, where \<open>r\<close> should be viewed as a function taking a theorem \<open>A\<close> and returning
+\<open>B\<close>.  More precisely, \<open>A\<close> and \<open>A'\<close> are unified, thus
+instantiating the unknowns in \<open>B\<close>, and the result is the instantiated
+\<open>B\<close>. Of course, unification may also fail.
 \begin{warn}
 Application of rules to other rules operates in the forward direction: from
 the premises to the conclusion of the rule; application of rules to proof
@@ -453,10 +448,10 @@
 premises.
 \end{warn}
 
-In general @{text r} can be of the form @{text"\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A"}
-and there can be multiple argument theorems @{text r\<^sub>1} to @{text r\<^sub>m}
-(with @{text"m \<le> n"}), in which case @{text "r[OF r\<^sub>1 \<dots> r\<^sub>m]"} is obtained
-by unifying and thus proving @{text "A\<^sub>i"} with @{text "r\<^sub>i"}, @{text"i = 1\<dots>m"}.
+In general \<open>r\<close> can be of the form \<open>\<lbrakk> A\<^sub>1; \<dots>; A\<^sub>n \<rbrakk> \<Longrightarrow> A\<close>
+and there can be multiple argument theorems \<open>r\<^sub>1\<close> to \<open>r\<^sub>m\<close>
+(with \<open>m \<le> n\<close>), in which case \<open>r[OF r\<^sub>1 \<dots> r\<^sub>m]\<close> is obtained
+by unifying and thus proving \<open>A\<^sub>i\<close> with \<open>r\<^sub>i\<close>, \<open>i = 1\<dots>m\<close>.
 Here is an example, where @{thm[source]refl} is the theorem
 @{thm[show_question_marks] refl}:
 \<close>
@@ -467,13 +462,13 @@
 The command \isacom{thm} merely displays the result.
 
 Forward reasoning also makes sense in connection with proof states.
-Therefore @{text blast}, @{text fastforce} and @{text auto} support a modifier
-@{text dest} which instructs the proof method to use certain rules in a
-forward fashion. If @{text r} is of the form \mbox{@{text "A \<Longrightarrow> B"}}, the modifier
-\mbox{@{text"dest: r"}}\index{dest@@{text"dest:"}}
-allows proof search to reason forward with @{text r}, i.e.,
-to replace an assumption @{text A'}, where @{text A'} unifies with @{text A},
-with the correspondingly instantiated @{text B}. For example, @{thm[source,show_question_marks] Suc_leD} is the theorem \mbox{@{thm Suc_leD}}, which works well for forward reasoning:
+Therefore \<open>blast\<close>, \<open>fastforce\<close> and \<open>auto\<close> support a modifier
+\<open>dest\<close> which instructs the proof method to use certain rules in a
+forward fashion. If \<open>r\<close> is of the form \mbox{\<open>A \<Longrightarrow> B\<close>}, the modifier
+\mbox{\<open>dest: r\<close>}\index{dest@\<open>dest:\<close>}
+allows proof search to reason forward with \<open>r\<close>, i.e.,
+to replace an assumption \<open>A'\<close>, where \<open>A'\<close> unifies with \<open>A\<close>,
+with the correspondingly instantiated \<open>B\<close>. For example, @{thm[source,show_question_marks] Suc_leD} is the theorem \mbox{@{thm Suc_leD}}, which works well for forward reasoning:
 \<close>
 
 lemma "Suc(Suc(Suc a)) \<le> b \<Longrightarrow> a \<le> b"
@@ -514,7 +509,7 @@
 \item If $n$ is even, so is $n+2$.
 \end{itemize}
 The operative word ``inductive'' means that these are the only even numbers.
-In Isabelle we give the two rules the names @{text ev0} and @{text evSS}
+In Isabelle we give the two rules the names \<open>ev0\<close> and \<open>evSS\<close>
 and write
 \<close>
 
@@ -528,7 +523,7 @@
 
 How do we prove that some number is even, e.g., @{prop "ev 4"}? Simply by combining the defining rules for @{const ev}:
 \begin{quote}
-@{text "ev 0 \<Longrightarrow> ev (0 + 2) \<Longrightarrow> ev((0 + 2) + 2) = ev 4"}
+\<open>ev 0 \<Longrightarrow> ev (0 + 2) \<Longrightarrow> ev((0 + 2) + 2) = ev 4\<close>
 \end{quote}
 
 \subsubsection{Rule Induction}\index{rule induction|(}
@@ -549,11 +544,11 @@
 \begin{description}
 \item[Case @{thm[source]ev0}:]
  @{prop"ev m"} was derived by rule @{prop "ev 0"}: \\
- @{text"\<Longrightarrow>"} @{prop"m=(0::nat)"} @{text"\<Longrightarrow>"} @{text "evn m = evn 0 = True"}
+ \<open>\<Longrightarrow>\<close> @{prop"m=(0::nat)"} \<open>\<Longrightarrow>\<close> \<open>evn m = evn 0 = True\<close>
 \item[Case @{thm[source]evSS}:]
  @{prop"ev m"} was derived by rule @{prop "ev n \<Longrightarrow> ev(n+2)"}: \\
-@{text"\<Longrightarrow>"} @{prop"m=n+(2::nat)"} and by induction hypothesis @{prop"evn n"}\\
-@{text"\<Longrightarrow>"} @{text"evn m = evn(n + 2) = evn n = True"}
+\<open>\<Longrightarrow>\<close> @{prop"m=n+(2::nat)"} and by induction hypothesis @{prop"evn n"}\\
+\<open>\<Longrightarrow>\<close> \<open>evn m = evn(n + 2) = evn n = True\<close>
 \end{description}
 
 What we have just seen is a special case of \concept{rule induction}.
@@ -562,7 +557,7 @@
 @{prop "ev n \<Longrightarrow> P n"}
 \end{quote}
 That is, we want to prove a property @{prop"P n"}
-for all even @{text n}. But if we assume @{prop"ev n"}, then there must be
+for all even \<open>n\<close>. But if we assume @{prop"ev n"}, then there must be
 some derivation of this assumption using the two defining rules for
 @{const ev}. That is, we must prove
 \begin{description}
@@ -578,7 +573,7 @@
 {\mbox{@{thm (concl) ev.induct[of "n"]}}}
 \]
 The first premise @{prop"ev n"} enforces that this rule can only be applied
-in situations where we know that @{text n} is even.
+in situations where we know that \<open>n\<close> is even.
 
 Note that in the induction step we may not just assume @{prop"P n"} but also
 \mbox{@{prop"ev n"}}, which is simply the premise of rule @{thm[source]
@@ -587,8 +582,8 @@
 Case @{thm[source]ev0} requires us to prove @{prop"ev(0 - 2)"}, which follows
 from @{prop"ev 0"} because @{prop"0 - 2 = (0::nat)"} on type @{typ nat}. In
 case @{thm[source]evSS} we have \mbox{@{prop"m = n+(2::nat)"}} and may assume
-@{prop"ev n"}, which implies @{prop"ev (m - 2)"} because @{text"m - 2 = (n +
-2) - 2 = n"}. We did not need the induction hypothesis at all for this proof (it
+@{prop"ev n"}, which implies @{prop"ev (m - 2)"} because \<open>m - 2 = (n +
+2) - 2 = n\<close>. We did not need the induction hypothesis at all for this proof (it
 is just a case analysis of which rule was used) but having @{prop"ev n"}
 at our disposal in case @{thm[source]evSS} was essential.
 This case analysis of rules is also called ``rule inversion''
@@ -599,11 +594,11 @@
 Let us now recast the above informal proofs in Isabelle. For a start,
 we use @{const Suc} terms instead of numerals in rule @{thm[source]evSS}:
 @{thm[display] evSS}
-This avoids the difficulty of unifying @{text"n+2"} with some numeral,
+This avoids the difficulty of unifying \<open>n+2\<close> with some numeral,
 which is not automatic.
 
 The simplest way to prove @{prop"ev(Suc(Suc(Suc(Suc 0))))"} is in a forward
-direction: @{text "evSS[OF evSS[OF ev0]]"} yields the theorem @{thm evSS[OF
+direction: \<open>evSS[OF evSS[OF ev0]]\<close> yields the theorem @{thm evSS[OF
 evSS[OF ev0]]}. Alternatively, you can also prove it as a lemma in backwards
 fashion. Although this is more verbose, it allows us to demonstrate how each
 rule application changes the proof state:\<close>
@@ -625,14 +620,14 @@
 
 text\<open>\indent
 Rule induction is applied by giving the induction rule explicitly via the
-@{text"rule:"} modifier:\index{inductionrule@@{text"induction ... rule:"}}\<close>
+\<open>rule:\<close> modifier:\index{inductionrule@\<open>induction ... rule:\<close>}\<close>
 
 lemma "ev m \<Longrightarrow> evn m"
 apply(induction rule: ev.induct)
 by(simp_all)
 
 text\<open>Both cases are automatic. Note that if there are multiple assumptions
-of the form @{prop"ev t"}, method @{text induction} will induct on the leftmost
+of the form @{prop"ev t"}, method \<open>induction\<close> will induct on the leftmost
 one.
 
 As a bonus, we also prove the remaining direction of the equivalence of
@@ -642,7 +637,7 @@
 lemma "evn n \<Longrightarrow> ev n"
 apply(induction n rule: evn.induct)
 
-txt\<open>This is a proof by computation induction on @{text n} (see
+txt\<open>This is a proof by computation induction on \<open>n\<close> (see
 \autoref{sec:recursive-funs}) that sets up three subgoals corresponding to
 the three equations for @{const evn}:
 @{subgoals[display,indent=0]}
@@ -655,7 +650,7 @@
 rules because their premises are always smaller than the conclusion. It
 makes sense to turn them into simplification and introduction rules
 permanently, to enhance proof automation. They are named @{thm[source] ev.intros}
-\index{intros@@{text".intros"}} by Isabelle:\<close>
+\index{intros@\<open>.intros\<close>} by Isabelle:\<close>
 
 declare ev.intros[simp,intro]
 
@@ -671,7 +666,7 @@
 expresses both the positive information (which numbers are even) and the
 negative information (which numbers are not even) directly. An inductive
 definition only expresses the positive information directly. The negative
-information, for example, that @{text 1} is not even, has to be proved from
+information, for example, that \<open>1\<close> is not even, has to be proved from
 it (by induction or rule inversion). On the other hand, rule induction is
 tailor-made for proving \mbox{@{prop"ev n \<Longrightarrow> P n"}} because it only asks you
 to prove the positive cases. In the proof of @{prop"evn n \<Longrightarrow> P n"} by
@@ -699,14 +694,14 @@
 some of the semantics considered in the second part of the book.
 \fi
 
-The reflexive transitive closure, called @{text star} below, is a function
-that maps a binary predicate to another binary predicate: if @{text r} is of
-type @{text"\<tau> \<Rightarrow> \<tau> \<Rightarrow> bool"} then @{term "star r"} is again of type @{text"\<tau> \<Rightarrow>
-\<tau> \<Rightarrow> bool"}, and @{prop"star r x y"} means that @{text x} and @{text y} are in
+The reflexive transitive closure, called \<open>star\<close> below, is a function
+that maps a binary predicate to another binary predicate: if \<open>r\<close> is of
+type \<open>\<tau> \<Rightarrow> \<tau> \<Rightarrow> bool\<close> then @{term "star r"} is again of type \<open>\<tau> \<Rightarrow>
+\<tau> \<Rightarrow> bool\<close>, and @{prop"star r x y"} means that \<open>x\<close> and \<open>y\<close> are in
 the relation @{term"star r"}. Think @{term"r\<^sup>*"} when you see @{term"star
-r"}, because @{text"star r"} is meant to be the reflexive transitive closure.
-That is, @{prop"star r x y"} is meant to be true if from @{text x} we can
-reach @{text y} in finitely many @{text r} steps. This concept is naturally
+r"}, because \<open>star r\<close> is meant to be the reflexive transitive closure.
+That is, @{prop"star r x y"} is meant to be true if from \<open>x\<close> we can
+reach \<open>y\<close> in finitely many \<open>r\<close> steps. This concept is naturally
 defined inductively:\<close>
 
 inductive star :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"  for r where
@@ -714,11 +709,11 @@
 step:  "r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
 
 text\<open>The base case @{thm[source] refl} is reflexivity: @{term "x=y"}. The
-step case @{thm[source]step} combines an @{text r} step (from @{text x} to
-@{text y}) and a @{term"star r"} step (from @{text y} to @{text z}) into a
-@{term"star r"} step (from @{text x} to @{text z}).
-The ``\isacom{for}~@{text r}'' in the header is merely a hint to Isabelle
-that @{text r} is a fixed parameter of @{const star}, in contrast to the
+step case @{thm[source]step} combines an \<open>r\<close> step (from \<open>x\<close> to
+\<open>y\<close>) and a @{term"star r"} step (from \<open>y\<close> to \<open>z\<close>) into a
+@{term"star r"} step (from \<open>x\<close> to \<open>z\<close>).
+The ``\isacom{for}~\<open>r\<close>'' in the header is merely a hint to Isabelle
+that \<open>r\<close> is a fixed parameter of @{const star}, in contrast to the
 further parameters of @{const star}, which change. As a result, Isabelle
 generates a simpler induction rule.
 
@@ -737,10 +732,10 @@
 which we abbreviate by @{prop"P x y"}. These are our two subgoals:
 @{subgoals[display,indent=0]}
 The first one is @{prop"P x x"}, the result of case @{thm[source]refl},
-and it is trivial:\index{assumption@@{text assumption}}
+and it is trivial:\index{assumption@\<open>assumption\<close>}
 \<close>
 apply(assumption)
-txt\<open>Let us examine subgoal @{text 2}, case @{thm[source] step}.
+txt\<open>Let us examine subgoal \<open>2\<close>, case @{thm[source] step}.
 Assumptions @{prop"r u x"} and \mbox{@{prop"star r x y"}}
 are the premises of rule @{thm[source]step}.
 Assumption @{prop"star r y z \<Longrightarrow> star r x z"} is \mbox{@{prop"P x y"}},
@@ -759,32 +754,32 @@
 
 Inductive definitions have approximately the following general form:
 \begin{quote}
-\isacom{inductive} @{text"I :: \"\<tau> \<Rightarrow> bool\""} \isacom{where}
+\isacom{inductive} \<open>I :: "\<tau> \<Rightarrow> bool"\<close> \isacom{where}
 \end{quote}
 followed by a sequence of (possibly named) rules of the form
 \begin{quote}
-@{text "\<lbrakk> I a\<^sub>1; \<dots>; I a\<^sub>n \<rbrakk> \<Longrightarrow> I a"}
+\<open>\<lbrakk> I a\<^sub>1; \<dots>; I a\<^sub>n \<rbrakk> \<Longrightarrow> I a\<close>
 \end{quote}
-separated by @{text"|"}. As usual, @{text n} can be 0.
+separated by \<open>|\<close>. As usual, \<open>n\<close> can be 0.
 The corresponding rule induction principle
-@{text I.induct} applies to propositions of the form
+\<open>I.induct\<close> applies to propositions of the form
 \begin{quote}
 @{prop "I x \<Longrightarrow> P x"}
 \end{quote}
-where @{text P} may itself be a chain of implications.
+where \<open>P\<close> may itself be a chain of implications.
 \begin{warn}
 Rule induction is always on the leftmost premise of the goal.
-Hence @{text "I x"} must be the first premise.
+Hence \<open>I x\<close> must be the first premise.
 \end{warn}
 Proving @{prop "I x \<Longrightarrow> P x"} by rule induction means proving
-for every rule of @{text I} that @{text P} is invariant:
+for every rule of \<open>I\<close> that \<open>P\<close> is invariant:
 \begin{quote}
-@{text "\<lbrakk> I a\<^sub>1; P a\<^sub>1; \<dots>; I a\<^sub>n; P a\<^sub>n \<rbrakk> \<Longrightarrow> P a"}
+\<open>\<lbrakk> I a\<^sub>1; P a\<^sub>1; \<dots>; I a\<^sub>n; P a\<^sub>n \<rbrakk> \<Longrightarrow> P a\<close>
 \end{quote}
 
 The above format for inductive definitions is simplified in a number of
-respects. @{text I} can have any number of arguments and each rule can have
-additional premises not involving @{text I}, so-called \conceptidx{side
+respects. \<open>I\<close> can have any number of arguments and each rule can have
+additional premises not involving \<open>I\<close>, so-called \conceptidx{side
 conditions}{side condition}. In rule inductions, these side conditions appear as additional
 assumptions. The \isacom{for} clause seen in the definition of the reflexive
 transitive closure simplifies the induction rule.
@@ -796,10 +791,10 @@
 Formalize the following definition of palindromes
 \begin{itemize}
 \item The empty list and a singleton list are palindromes.
-\item If @{text xs} is a palindrome, so is @{term "a # xs @ [a]"}.
+\item If \<open>xs\<close> is a palindrome, so is @{term "a # xs @ [a]"}.
 \end{itemize}
-as an inductive predicate @{text "palindrome ::"} @{typ "'a list \<Rightarrow> bool"}
-and prove that @{prop "rev xs = xs"} if @{text xs} is a palindrome.
+as an inductive predicate \<open>palindrome ::\<close> @{typ "'a list \<Rightarrow> bool"}
+and prove that @{prop "rev xs = xs"} if \<open>xs\<close> is a palindrome.
 \end{exercise}
 
 \exercise
@@ -811,7 +806,7 @@
 step': "star' r x y \<Longrightarrow> r y z \<Longrightarrow> star' r x z"
 
 text\<open>
-The single @{text r} step is performed after rather than before the @{text star'}
+The single \<open>r\<close> step is performed after rather than before the \<open>star'\<close>
 steps. Prove @{prop "star' r x y \<Longrightarrow> star r x y"} and
 @{prop "star r x y \<Longrightarrow> star' r x y"}. You may need lemmas.
 Note that rule induction fails
@@ -819,9 +814,9 @@
 \endexercise
 
 \begin{exercise}\label{exe:iter}
-Analogous to @{const star}, give an inductive definition of the @{text n}-fold iteration
-of a relation @{text r}: @{term "iter r n x y"} should hold if there are @{text x\<^sub>0}, \dots, @{text x\<^sub>n}
-such that @{prop"x = x\<^sub>0"}, @{prop"x\<^sub>n = y"} and @{text"r x\<^bsub>i\<^esub> x\<^bsub>i+1\<^esub>"} for
+Analogous to @{const star}, give an inductive definition of the \<open>n\<close>-fold iteration
+of a relation \<open>r\<close>: @{term "iter r n x y"} should hold if there are \<open>x\<^sub>0\<close>, \dots, \<open>x\<^sub>n\<close>
+such that @{prop"x = x\<^sub>0"}, @{prop"x\<^sub>n = y"} and \<open>r x\<^bsub>i\<^esub> x\<^bsub>i+1\<^esub>\<close> for
 all @{prop"i < n"}. Correct and prove the following claim:
 @{prop"star r x y \<Longrightarrow> iter r n x y"}.
 \end{exercise}
@@ -831,9 +826,9 @@
 nonterminal $A$ is an inductively defined predicate on lists of terminal
 symbols: $A(w)$ means that $w$ is in the language generated by $A$.
 For example, the production $S \to a S b$ can be viewed as the implication
-@{prop"S w \<Longrightarrow> S (a # w @ [b])"} where @{text a} and @{text b} are terminal symbols,
+@{prop"S w \<Longrightarrow> S (a # w @ [b])"} where \<open>a\<close> and \<open>b\<close> are terminal symbols,
 i.e., elements of some alphabet. The alphabet can be defined like this:
-\isacom{datatype} @{text"alpha = a | b | \<dots>"}
+\isacom{datatype} \<open>alpha = a | b | \<dots>\<close>
 
 Define the two grammars (where $\varepsilon$ is the empty word)
 \[
@@ -843,7 +838,7 @@
 \end{array}
 \]
 as two inductive predicates.
-If you think of @{text a} and @{text b} as ``@{text "("}'' and  ``@{text ")"}'',
+If you think of \<open>a\<close> and \<open>b\<close> as ``\<open>(\<close>'' and  ``\<open>)\<close>'',
 the grammar defines strings of balanced parentheses.
 Prove @{prop"T w \<Longrightarrow> S w"} and \mbox{@{prop "S w \<Longrightarrow> T w"}} separately and conclude
 @{prop "S w = T w"}.
@@ -852,9 +847,9 @@
 \ifsem
 \begin{exercise}
 In \autoref{sec:AExp} we defined a recursive evaluation function
-@{text "aval :: aexp \<Rightarrow> state \<Rightarrow> val"}.
+\<open>aval :: aexp \<Rightarrow> state \<Rightarrow> val\<close>.
 Define an inductive evaluation predicate
-@{text "aval_rel :: aexp \<Rightarrow> state \<Rightarrow> val \<Rightarrow> bool"}
+\<open>aval_rel :: aexp \<Rightarrow> state \<Rightarrow> val \<Rightarrow> bool\<close>
 and prove that it agrees with the recursive function:
 @{prop "aval_rel a s v \<Longrightarrow> aval a s = v"}, 
 @{prop "aval a s = v \<Longrightarrow> aval_rel a s v"} and thus
@@ -866,15 +861,15 @@
 and recall the concept of \concept{stack underflow}
 from Exercise~\ref{exe:stack-underflow}.
 Define an inductive predicate
-@{text "ok :: nat \<Rightarrow> instr list \<Rightarrow> nat \<Rightarrow> bool"}
-such that @{text "ok n is n'"} means that with any initial stack of length
-@{text n} the instructions @{text "is"} can be executed
-without stack underflow and that the final stack has length @{text n'}.
-Prove that @{text ok} correctly computes the final stack size
+\<open>ok :: nat \<Rightarrow> instr list \<Rightarrow> nat \<Rightarrow> bool\<close>
+such that \<open>ok n is n'\<close> means that with any initial stack of length
+\<open>n\<close> the instructions \<open>is\<close> can be executed
+without stack underflow and that the final stack has length \<open>n'\<close>.
+Prove that \<open>ok\<close> correctly computes the final stack size
 @{prop[display] "\<lbrakk>ok n is n'; length stk = n\<rbrakk> \<Longrightarrow> length (exec is s stk) = n'"}
-and that instruction sequences generated by @{text comp}
-cannot cause stack underflow: \ @{text "ok n (comp a) ?"} \ for
-some suitable value of @{text "?"}.
+and that instruction sequences generated by \<open>comp\<close>
+cannot cause stack underflow: \ \<open>ok n (comp a) ?\<close> \ for
+some suitable value of \<open>?\<close>.
 \end{exercise}
 \fi
 \<close>
--- a/src/Doc/Prog_Prove/Types_and_funs.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Prog_Prove/Types_and_funs.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -8,7 +8,7 @@
 \section{Type and Function Definitions}
 
 Type synonyms are abbreviations for existing types, for example
-\index{string@@{text string}}\<close>
+\index{string@\<open>string\<close>}\<close>
 
 type_synonym string = "char list"
 
@@ -21,14 +21,14 @@
 The general form of a datatype definition looks like this:
 \begin{quote}
 \begin{tabular}{@ {}rclcll}
-\indexed{\isacom{datatype}}{datatype} @{text "('a\<^sub>1,\<dots>,'a\<^sub>n)t"}
-     & = & $C_1 \ @{text"\""}\tau_{1,1}@{text"\""} \dots @{text"\""}\tau_{1,n_1}@{text"\""}$ \\
+\indexed{\isacom{datatype}}{datatype} \<open>('a\<^sub>1,\<dots>,'a\<^sub>n)t\<close>
+     & = & $C_1 \ \<open>"\<close>\tau_{1,1}\<open>"\<close> \dots \<open>"\<close>\tau_{1,n_1}\<open>"\<close>$ \\
      & $|$ & \dots \\
-     & $|$ & $C_k \ @{text"\""}\tau_{k,1}@{text"\""} \dots @{text"\""}\tau_{k,n_k}@{text"\""}$
+     & $|$ & $C_k \ \<open>"\<close>\tau_{k,1}\<open>"\<close> \dots \<open>"\<close>\tau_{k,n_k}\<open>"\<close>$
 \end{tabular}
 \end{quote}
 It introduces the constructors \
-$C_i :: \tau_{i,1}\Rightarrow \cdots \Rightarrow \tau_{i,n_i} \Rightarrow$~@{text "('a\<^sub>1,\<dots>,'a\<^sub>n)t"} \ and expresses that any value of this type is built from these constructors in a unique manner. Uniqueness is implied by the following
+$C_i :: \tau_{i,1}\Rightarrow \cdots \Rightarrow \tau_{i,n_i} \Rightarrow$~\<open>('a\<^sub>1,\<dots>,'a\<^sub>n)t\<close> \ and expresses that any value of this type is built from these constructors in a unique manner. Uniqueness is implied by the following
 properties of the constructors:
 \begin{itemize}
 \item \emph{Distinctness:} $C_i\ \ldots \neq C_j\ \dots$ \quad if $i \neq j$
@@ -40,20 +40,20 @@
 \end{itemize}
 The fact that any value of the datatype is built from the constructors implies
 the \concept{structural induction}\index{induction} rule: to show
-$P~x$ for all $x$ of type @{text "('a\<^sub>1,\<dots>,'a\<^sub>n)t"},
+$P~x$ for all $x$ of type \<open>('a\<^sub>1,\<dots>,'a\<^sub>n)t\<close>,
 one needs to show $P(C_i\ x_1 \dots x_{n_i})$ (for each $i$) assuming
-$P(x_j)$ for all $j$ where $\tau_{i,j} =$~@{text "('a\<^sub>1,\<dots>,'a\<^sub>n)t"}.
-Distinctness and injectivity are applied automatically by @{text auto}
+$P(x_j)$ for all $j$ where $\tau_{i,j} =$~\<open>('a\<^sub>1,\<dots>,'a\<^sub>n)t\<close>.
+Distinctness and injectivity are applied automatically by \<open>auto\<close>
 and other proof methods. Induction must be applied explicitly.
 
 Like in functional programming languages, datatype values can be taken apart
-with case expressions\index{case expression}\index{case expression@@{text "case ... of"}}, for example
+with case expressions\index{case expression}\index{case expression@\<open>case ... of\<close>}, for example
 \begin{quote}
 \noquotes{@{term[source] "(case xs of [] \<Rightarrow> 0 | x # _ \<Rightarrow> Suc x)"}}
 \end{quote}
 Case expressions must be enclosed in parentheses.
 
-As an example of a datatype beyond @{typ nat} and @{text list}, consider binary trees:
+As an example of a datatype beyond @{typ nat} and \<open>list\<close>, consider binary trees:
 \<close>
 
 datatype 'a tree = Tip | Node  "'a tree"  'a  "'a tree"
@@ -72,10 +72,10 @@
 txt\<open>yields
 @{subgoals[display]}
 The induction step contains two induction hypotheses, one for each subtree.
-An application of @{text auto} finishes the proof.
+An application of \<open>auto\<close> finishes the proof.
 
 A very simple but also very useful datatype is the predefined
-@{datatype[display] option}\index{option@@{text option}}\index{None@@{const None}}\index{Some@@{const Some}}
+@{datatype[display] option}\index{option@\<open>option\<close>}\index{None@@{const None}}\index{Some@@{const Some}}
 Its sole purpose is to add a new element @{const None} to an existing
 type @{typ 'a}. To make sure that @{const None} is distinct from all the
 elements of @{typ 'a}, you wrap them up in @{const Some} and call
@@ -91,12 +91,12 @@
 "lookup ((a,b) # ps) x = (if a = x then Some b else lookup ps x)"
 
 text\<open>
-Note that @{text"\<tau>\<^sub>1 * \<tau>\<^sub>2"} is the type of pairs, also written @{text"\<tau>\<^sub>1 \<times> \<tau>\<^sub>2"}.
+Note that \<open>\<tau>\<^sub>1 * \<tau>\<^sub>2\<close> is the type of pairs, also written \<open>\<tau>\<^sub>1 \<times> \<tau>\<^sub>2\<close>.
 Pairs can be taken apart either by pattern matching (as above) or with the
 projection functions @{const fst} and @{const snd}: @{thm fst_conv[of x y]} and @{thm snd_conv[of x y]}.
 Tuples are simulated by pairs nested to the right: @{term"(a,b,c)"}
-is short for @{text"(a, (b, c))"} and @{text "\<tau>\<^sub>1 \<times> \<tau>\<^sub>2 \<times> \<tau>\<^sub>3"} is short for
-@{text "\<tau>\<^sub>1 \<times> (\<tau>\<^sub>2 \<times> \<tau>\<^sub>3)"}.
+is short for \<open>(a, (b, c))\<close> and \<open>\<tau>\<^sub>1 \<times> \<tau>\<^sub>2 \<times> \<tau>\<^sub>3\<close> is short for
+\<open>\<tau>\<^sub>1 \<times> (\<tau>\<^sub>2 \<times> \<tau>\<^sub>3)\<close>.
 
 \subsection{Definitions}
 
@@ -107,7 +107,7 @@
 "sq n = n * n"
 
 text\<open>Such definitions do not allow pattern matching but only
-@{text"f x\<^sub>1 \<dots> x\<^sub>n = t"}, where @{text f} does not occur in @{text t}.
+\<open>f x\<^sub>1 \<dots> x\<^sub>n = t\<close>, where \<open>f\<close> does not occur in \<open>t\<close>.
 
 \subsection{Abbreviations}
 
@@ -128,7 +128,7 @@
 if abused, they can lead to a confusing discrepancy between the internal and
 external view of a term.
 
-The ASCII representation of @{text"\<equiv>"} is \texttt{==} or \xsymbol{equiv}.
+The ASCII representation of \<open>\<equiv>\<close> is \texttt{==} or \xsymbol{equiv}.
 
 \subsection{Recursive Functions}
 \label{sec:recursive-funs}
@@ -145,8 +145,7 @@
 recursive calls on the right-hand side must be strictly smaller than the
 arguments on the left-hand side. In the simplest case, this means that one
 fixed argument position decreases in size with each recursive call. The size
-is measured as the number of constructors (excluding 0-ary ones, e.g., @{text
-Nil}). Lexicographic combinations are also recognized. In more complicated
+is measured as the number of constructors (excluding 0-ary ones, e.g., \<open>Nil\<close>). Lexicographic combinations are also recognized. In more complicated
 situations, the user may have to prove termination by hand. For details
 see~@{cite Krauss}.
 
@@ -175,67 +174,67 @@
 lemma "div2(n) = n div 2"
 apply(induction n rule: div2.induct)
 
-txt\<open>(where the infix @{text div} is the predefined division operation)
+txt\<open>(where the infix \<open>div\<close> is the predefined division operation)
 yields the subgoals
 @{subgoals[display,margin=65]}
-An application of @{text auto} finishes the proof.
-Had we used ordinary structural induction on @{text n},
+An application of \<open>auto\<close> finishes the proof.
+Had we used ordinary structural induction on \<open>n\<close>,
 the proof would have needed an additional
 case analysis in the induction step.
 
 This example leads to the following induction heuristic:
 \begin{quote}
-\emph{Let @{text f} be a recursive function.
-If the definition of @{text f} is more complicated
+\emph{Let \<open>f\<close> be a recursive function.
+If the definition of \<open>f\<close> is more complicated
 than having one equation for each constructor of some datatype,
-then properties of @{text f} are best proved via @{text "f.induct"}.\index{inductionrule@@{text".induct"}}}
+then properties of \<open>f\<close> are best proved via \<open>f.induct\<close>.\index{inductionrule@\<open>.induct\<close>}}
 \end{quote}
 
 The general case is often called \concept{computation induction},
 because the induction follows the (terminating!) computation.
 For every defining equation
 \begin{quote}
-@{text "f(e) = \<dots> f(r\<^sub>1) \<dots> f(r\<^sub>k) \<dots>"}
+\<open>f(e) = \<dots> f(r\<^sub>1) \<dots> f(r\<^sub>k) \<dots>\<close>
 \end{quote}
-where @{text"f(r\<^sub>i)"}, @{text"i=1\<dots>k"}, are all the recursive calls,
-the induction rule @{text"f.induct"} contains one premise of the form
+where \<open>f(r\<^sub>i)\<close>, \<open>i=1\<dots>k\<close>, are all the recursive calls,
+the induction rule \<open>f.induct\<close> contains one premise of the form
 \begin{quote}
-@{text"P(r\<^sub>1) \<Longrightarrow> \<dots> \<Longrightarrow> P(r\<^sub>k) \<Longrightarrow> P(e)"}
+\<open>P(r\<^sub>1) \<Longrightarrow> \<dots> \<Longrightarrow> P(r\<^sub>k) \<Longrightarrow> P(e)\<close>
 \end{quote}
-If @{text "f :: \<tau>\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> \<tau>\<^sub>n \<Rightarrow> \<tau>"} then @{text"f.induct"} is applied like this:
+If \<open>f :: \<tau>\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> \<tau>\<^sub>n \<Rightarrow> \<tau>\<close> then \<open>f.induct\<close> is applied like this:
 \begin{quote}
-\isacom{apply}@{text"(induction x\<^sub>1 \<dots> x\<^sub>n rule: f.induct)"}\index{inductionrule@@{text"induction ... rule:"}}
+\isacom{apply}\<open>(induction x\<^sub>1 \<dots> x\<^sub>n rule: f.induct)\<close>\index{inductionrule@\<open>induction ... rule:\<close>}
 \end{quote}
-where typically there is a call @{text"f x\<^sub>1 \<dots> x\<^sub>n"} in the goal.
-But note that the induction rule does not mention @{text f} at all,
-except in its name, and is applicable independently of @{text f}.
+where typically there is a call \<open>f x\<^sub>1 \<dots> x\<^sub>n\<close> in the goal.
+But note that the induction rule does not mention \<open>f\<close> at all,
+except in its name, and is applicable independently of \<open>f\<close>.
 
 
 \subsection*{Exercises}
 
 \begin{exercise}
-Starting from the type @{text "'a tree"} defined in the text, define
-a function @{text "contents ::"} @{typ "'a tree \<Rightarrow> 'a list"}
+Starting from the type \<open>'a tree\<close> defined in the text, define
+a function \<open>contents ::\<close> @{typ "'a tree \<Rightarrow> 'a list"}
 that collects all values in a tree in a list, in any order,
 without removing duplicates.
-Then define a function @{text "sum_tree ::"} @{typ "nat tree \<Rightarrow> nat"}
+Then define a function \<open>sum_tree ::\<close> @{typ "nat tree \<Rightarrow> nat"}
 that sums up all values in a tree of natural numbers
 and prove @{prop "sum_tree t = sum_list(contents t)"}
 (where @{const sum_list} is predefined).
 \end{exercise}
 
 \begin{exercise}
-Define a new type @{text "'a tree2"} of binary trees where values are also
+Define a new type \<open>'a tree2\<close> of binary trees where values are also
 stored in the leaves of the tree.  Also reformulate the
 @{const mirror} function accordingly. Define two functions
-@{text "pre_order"} and @{text "post_order"} of type @{text "'a tree2 \<Rightarrow> 'a list"}
+\<open>pre_order\<close> and \<open>post_order\<close> of type \<open>'a tree2 \<Rightarrow> 'a list\<close>
 that traverse a tree and collect all stored values in the respective order in
 a list. Prove @{prop "pre_order (mirror t) = rev (post_order t)"}.
 \end{exercise}
 
 \begin{exercise}
-Define a function @{text "intersperse ::"} @{typ "'a \<Rightarrow> 'a list \<Rightarrow> 'a list"}
-such that @{text "intersperse a [x\<^sub>1, ..., x\<^sub>n] = [x\<^sub>1, a, x\<^sub>2, a, ..., a, x\<^sub>n]"}.
+Define a function \<open>intersperse ::\<close> @{typ "'a \<Rightarrow> 'a list \<Rightarrow> 'a list"}
+such that \<open>intersperse a [x\<^sub>1, ..., x\<^sub>n] = [x\<^sub>1, a, x\<^sub>2, a, ..., a, x\<^sub>n]\<close>.
 Now prove that @{prop "map f (intersperse a xs) = intersperse (f a) (map f xs)"}.
 \end{exercise}
 
@@ -259,7 +258,7 @@
 because it calls append for each element of the list and
 append is linear in its first argument.  A linear time version of
 @{const rev} requires an extra argument where the result is accumulated
-gradually, using only~@{text"#"}:
+gradually, using only~\<open>#\<close>:
 \<close>
 (*<*)
 apply auto
@@ -304,29 +303,29 @@
 lemma "itrev xs ys = rev xs @ ys"
 (*<*)apply(induction xs, auto)(*>*)
 txt\<open>
-If @{text ys} is replaced by @{term"[]"}, the right-hand side simplifies to
+If \<open>ys\<close> is replaced by @{term"[]"}, the right-hand side simplifies to
 @{term"rev xs"}, as required.
 In this instance it was easy to guess the right generalization.
 Other situations can require a good deal of creativity.  
 
-Although we now have two variables, only @{text xs} is suitable for
+Although we now have two variables, only \<open>xs\<close> is suitable for
 induction, and we repeat our proof attempt. Unfortunately, we are still
 not there:
 @{subgoals[display,margin=65]}
 The induction hypothesis is still too weak, but this time it takes no
-intuition to generalize: the problem is that the @{text ys}
+intuition to generalize: the problem is that the \<open>ys\<close>
 in the induction hypothesis is fixed,
 but the induction hypothesis needs to be applied with
-@{term"a # ys"} instead of @{text ys}. Hence we prove the theorem
-for all @{text ys} instead of a fixed one. We can instruct induction
-to perform this generalization for us by adding @{text "arbitrary: ys"}\index{arbitrary@@{text"arbitrary:"}}.
+@{term"a # ys"} instead of \<open>ys\<close>. Hence we prove the theorem
+for all \<open>ys\<close> instead of a fixed one. We can instruct induction
+to perform this generalization for us by adding \<open>arbitrary: ys\<close>\index{arbitrary@\<open>arbitrary:\<close>}.
 \<close>
 (*<*)oops
 lemma "itrev xs ys = rev xs @ ys"
 (*>*)
 apply(induction xs arbitrary: ys)
 
-txt\<open>The induction hypothesis in the induction step is now universally quantified over @{text ys}:
+txt\<open>The induction hypothesis in the induction step is now universally quantified over \<open>ys\<close>:
 @{subgoals[display,margin=65]}
 Thus the proof succeeds:
 \<close>
@@ -340,7 +339,7 @@
 \emph{Generalize induction by generalizing all free
 variables\\ {\em(except the induction variable itself)}.}
 \end{quote}
-Generalization is best performed with @{text"arbitrary: y\<^sub>1 \<dots> y\<^sub>k"}. 
+Generalization is best performed with \<open>arbitrary: y\<^sub>1 \<dots> y\<^sub>k\<close>. 
 This heuristic prevents trivial failures like the one above.
 However, it should not be applied blindly.
 It is not always required, and the additional quantifiers can complicate
@@ -351,10 +350,10 @@
 \subsection*{Exercises}
 
 \begin{exercise}
-Write a tail-recursive variant of the @{text add} function on @{typ nat}:
+Write a tail-recursive variant of the \<open>add\<close> function on @{typ nat}:
 @{term "itadd :: nat \<Rightarrow> nat \<Rightarrow> nat"}.
-Tail-recursive means that in the recursive case, @{text itadd} needs to call
-itself directly: \mbox{@{term"itadd (Suc m) n"}} @{text"= itadd \<dots>"}.
+Tail-recursive means that in the recursive case, \<open>itadd\<close> needs to call
+itself directly: \mbox{@{term"itadd (Suc m) n"}} \<open>= itadd \<dots>\<close>.
 Prove @{prop "itadd m n = add m n"}.
 \end{exercise}
 
@@ -368,30 +367,30 @@
 \item as long as possible.
 \end{itemize}
 To emphasize the directionality, equations that have been given the
-@{text"simp"} attribute are called \conceptidx{simplification rules}{simplification rule}.
+\<open>simp\<close> attribute are called \conceptidx{simplification rules}{simplification rule}.
 Logically, they are still symmetric, but proofs by
 simplification use them only in the left-to-right direction.  The proof tool
 that performs simplifications is called the \concept{simplifier}. It is the
-basis of @{text auto} and other related proof methods.
+basis of \<open>auto\<close> and other related proof methods.
 
 The idea of simplification is best explained by an example. Given the
 simplification rules
 \[
 \begin{array}{rcl@ {\quad}l}
-@{term"0 + n::nat"} &@{text"="}& @{text n} & (1) \\
-@{term"(Suc m) + n"} &@{text"="}& @{term"Suc (m + n)"} & (2) \\
-@{text"(Suc m \<le> Suc n)"} &@{text"="}& @{text"(m \<le> n)"} & (3)\\
-@{text"(0 \<le> m)"} &@{text"="}& @{const True} & (4)
+@{term"0 + n::nat"} &\<open>=\<close>& \<open>n\<close> & (1) \\
+@{term"(Suc m) + n"} &\<open>=\<close>& @{term"Suc (m + n)"} & (2) \\
+\<open>(Suc m \<le> Suc n)\<close> &\<open>=\<close>& \<open>(m \<le> n)\<close> & (3)\\
+\<open>(0 \<le> m)\<close> &\<open>=\<close>& @{const True} & (4)
 \end{array}
 \]
 the formula @{prop"0 + Suc 0 \<le> Suc 0 + x"} is simplified to @{const True}
 as follows:
 \[
 \begin{array}{r@ {}c@ {}l@ {\quad}l}
-@{text"(0 + Suc 0"} & \leq & @{text"Suc 0 + x)"}  & \stackrel{(1)}{=} \\
-@{text"(Suc 0"}     & \leq & @{text"Suc 0 + x)"}  & \stackrel{(2)}{=} \\
-@{text"(Suc 0"}     & \leq & @{text"Suc (0 + x))"} & \stackrel{(3)}{=} \\
-@{text"(0"}         & \leq & @{text"0 + x)"}      & \stackrel{(4)}{=} \\[1ex]
+\<open>(0 + Suc 0\<close> & \leq & \<open>Suc 0 + x)\<close>  & \stackrel{(1)}{=} \\
+\<open>(Suc 0\<close>     & \leq & \<open>Suc 0 + x)\<close>  & \stackrel{(2)}{=} \\
+\<open>(Suc 0\<close>     & \leq & \<open>Suc (0 + x))\<close> & \stackrel{(3)}{=} \\
+\<open>(0\<close>         & \leq & \<open>0 + x)\<close>      & \stackrel{(4)}{=} \\[1ex]
  & @{const True}
 \end{array}
 \]
@@ -400,7 +399,7 @@
 
 \subsection{Simplification Rules}
 
-The attribute @{text"simp"} declares theorems to be simplification rules,
+The attribute \<open>simp\<close> declares theorems to be simplification rules,
 which the simplifier will use automatically. In addition,
 \isacom{datatype} and \isacom{fun} commands implicitly declare some
 simplification rules: \isacom{datatype} the distinctness and injectivity
@@ -454,39 +453,39 @@
 leads to nontermination: when trying to rewrite @{prop"n<m"} to @{const True}
 one first has to prove \mbox{@{prop"Suc n < m"}}, which can be rewritten to @{const True} provided @{prop"Suc(Suc n) < m"}, \emph{ad infinitum}.
 
-\subsection{The \indexed{@{text simp}}{simp} Proof Method}
+\subsection{The \indexed{\<open>simp\<close>}{simp} Proof Method}
 \label{sec:simp}
 
-So far we have only used the proof method @{text auto}.  Method @{text simp}
-is the key component of @{text auto}, but @{text auto} can do much more. In
-some cases, @{text auto} is overeager and modifies the proof state too much.
-In such cases the more predictable @{text simp} method should be used.
+So far we have only used the proof method \<open>auto\<close>.  Method \<open>simp\<close>
+is the key component of \<open>auto\<close>, but \<open>auto\<close> can do much more. In
+some cases, \<open>auto\<close> is overeager and modifies the proof state too much.
+In such cases the more predictable \<open>simp\<close> method should be used.
 Given a goal
 \begin{quote}
-@{text"1. \<lbrakk> P\<^sub>1; \<dots>; P\<^sub>m \<rbrakk> \<Longrightarrow> C"}
+\<open>1. \<lbrakk> P\<^sub>1; \<dots>; P\<^sub>m \<rbrakk> \<Longrightarrow> C\<close>
 \end{quote}
 the command
 \begin{quote}
-\isacom{apply}@{text"(simp add: th\<^sub>1 \<dots> th\<^sub>n)"}
+\isacom{apply}\<open>(simp add: th\<^sub>1 \<dots> th\<^sub>n)\<close>
 \end{quote}
-simplifies the assumptions @{text "P\<^sub>i"} and the conclusion @{text C} using
+simplifies the assumptions \<open>P\<^sub>i\<close> and the conclusion \<open>C\<close> using
 \begin{itemize}
 \item all simplification rules, including the ones coming from \isacom{datatype} and \isacom{fun},
-\item the additional lemmas @{text"th\<^sub>1 \<dots> th\<^sub>n"}, and
+\item the additional lemmas \<open>th\<^sub>1 \<dots> th\<^sub>n\<close>, and
 \item the assumptions.
 \end{itemize}
-In addition to or instead of @{text add} there is also @{text del} for removing
-simplification rules temporarily. Both are optional. Method @{text auto}
+In addition to or instead of \<open>add\<close> there is also \<open>del\<close> for removing
+simplification rules temporarily. Both are optional. Method \<open>auto\<close>
 can be modified similarly:
 \begin{quote}
-\isacom{apply}@{text"(auto simp add: \<dots> simp del: \<dots>)"}
+\isacom{apply}\<open>(auto simp add: \<dots> simp del: \<dots>)\<close>
 \end{quote}
-Here the modifiers are @{text"simp add"} and @{text"simp del"}
-instead of just @{text add} and @{text del} because @{text auto}
+Here the modifiers are \<open>simp add\<close> and \<open>simp del\<close>
+instead of just \<open>add\<close> and \<open>del\<close> because \<open>auto\<close>
 does not just perform simplification.
 
-Note that @{text simp} acts only on subgoal 1, @{text auto} acts on all
-subgoals. There is also @{text simp_all}, which applies @{text simp} to
+Note that \<open>simp\<close> acts only on subgoal 1, \<open>auto\<close> acts on all
+subgoals. There is also \<open>simp_all\<close>, which applies \<open>simp\<close> to
 all subgoals.
 
 \subsection{Rewriting with Definitions}
@@ -502,41 +501,41 @@
 robust: if the definition has to be changed, only the proofs of the abstract
 properties will be affected.
 
-The definition of a function @{text f} is a theorem named @{text f_def}
-and can be added to a call of @{text simp} like any other theorem:
+The definition of a function \<open>f\<close> is a theorem named \<open>f_def\<close>
+and can be added to a call of \<open>simp\<close> like any other theorem:
 \begin{quote}
-\isacom{apply}@{text"(simp add: f_def)"}
+\isacom{apply}\<open>(simp add: f_def)\<close>
 \end{quote}
 In particular, let-expressions can be unfolded by
 making @{thm[source] Let_def} a simplification rule.
 
-\subsection{Case Splitting With @{text simp}}
+\subsection{Case Splitting With \<open>simp\<close>}
 
 Goals containing if-expressions are automatically split into two cases by
-@{text simp} using the rule
+\<open>simp\<close> using the rule
 \begin{quote}
 @{prop"P(if A then s else t) = ((A \<longrightarrow> P(s)) \<and> (~A \<longrightarrow> P(t)))"}
 \end{quote}
-For example, @{text simp} can prove
+For example, \<open>simp\<close> can prove
 \begin{quote}
 @{prop"(A \<and> B) = (if A then B else False)"}
 \end{quote}
 because both @{prop"A \<longrightarrow> (A & B) = B"} and @{prop"~A \<longrightarrow> (A & B) = False"}
 simplify to @{const True}.
 
-We can split case-expressions similarly. For @{text nat} the rule looks like this:
+We can split case-expressions similarly. For \<open>nat\<close> the rule looks like this:
 @{prop[display,margin=65,indent=4]"P(case e of 0 \<Rightarrow> a | Suc n \<Rightarrow> b n) = ((e = 0 \<longrightarrow> P a) \<and> (\<forall>n. e = Suc n \<longrightarrow> P(b n)))"}
-Case expressions are not split automatically by @{text simp}, but @{text simp}
+Case expressions are not split automatically by \<open>simp\<close>, but \<open>simp\<close>
 can be instructed to do so:
 \begin{quote}
-\isacom{apply}@{text"(simp split: nat.split)"}
+\isacom{apply}\<open>(simp split: nat.split)\<close>
 \end{quote}
 splits all case-expressions over natural numbers. For an arbitrary
-datatype @{text t} it is @{text "t.split"}\index{split@@{text".split"}} instead of @{thm[source] nat.split}.
-Method @{text auto} can be modified in exactly the same way.
-The modifier \indexed{@{text "split:"}}{split} can be followed by multiple names.
+datatype \<open>t\<close> it is \<open>t.split\<close>\index{split@\<open>.split\<close>} instead of @{thm[source] nat.split}.
+Method \<open>auto\<close> can be modified in exactly the same way.
+The modifier \indexed{\<open>split:\<close>}{split} can be followed by multiple names.
 Splitting if or case-expressions in the assumptions requires 
-@{text "split: if_splits"} or @{text "split: t.splits"}.
+\<open>split: if_splits\<close> or \<open>split: t.splits\<close>.
 
 \ifsem\else
 \subsection{Rewriting \<open>let\<close> and Numerals}
@@ -547,16 +546,16 @@
 Numerals of type @{typ nat} can be converted to @{const Suc} terms with the simplification rule
 @{thm[source] numeral_eq_Suc}. This is required, for example, when a function that is defined
 by pattern matching with @{const Suc} is applied to a numeral: if \<open>f\<close> is defined by
-@{text "f 0 = ..."} and  @{text "f (Suc n) = ..."}, the simplifier cannot simplify \<open>f 2\<close> unless
+\<open>f 0 = ...\<close> and  \<open>f (Suc n) = ...\<close>, the simplifier cannot simplify \<open>f 2\<close> unless
 \<open>2\<close> is converted to @{term "Suc(Suc 0)"} (via @{thm[source] numeral_eq_Suc}).
 \fi
 
 \subsection*{Exercises}
 
 \exercise\label{exe:tree0}
-Define a datatype @{text tree0} of binary tree skeletons which do not store
+Define a datatype \<open>tree0\<close> of binary tree skeletons which do not store
 any information, neither in the inner nodes nor in the leaves.
-Define a function @{text "nodes :: tree0 \<Rightarrow> nat"} that counts the number of
+Define a function \<open>nodes :: tree0 \<Rightarrow> nat\<close> that counts the number of
 all nodes (inner nodes and leaves) in such a tree.
 Consider the following recursive function:
 \<close>
@@ -570,9 +569,9 @@
 text \<open>
 Find an equation expressing the size of a tree after exploding it
 (\noquotes{@{term [source] "nodes (explode n t)"}}) as a function
-of @{term "nodes t"} and @{text n}. Prove your equation.
+of @{term "nodes t"} and \<open>n\<close>. Prove your equation.
 You may use the usual arithmetic operators, including the exponentiation
-operator ``@{text"^"}''. For example, \noquotes{@{prop [source] "2 ^ 2 = 4"}}.
+operator ``\<open>^\<close>''. For example, \noquotes{@{prop [source] "2 ^ 2 = 4"}}.
 
 Hint: simplifying with the list of theorems @{thm[source] algebra_simps}
 takes care of common algebraic properties of the arithmetic operators.
@@ -587,8 +586,8 @@
 
 text\<open>
 Define a function \noquotes{@{term [source]"eval :: exp \<Rightarrow> int \<Rightarrow> int"}}
-such that @{term"eval e x"} evaluates @{text e} at the value
-@{text x}.
+such that @{term"eval e x"} evaluates \<open>e\<close> at the value
+\<open>x\<close>.
 
 A polynomial can be represented as a list of coefficients, starting with
 the constant. For example, @{term "[4, 2, -1, 3::int]"} represents the
@@ -597,7 +596,7 @@
 that evaluates a polynomial at the given value.
 Define a function \noquotes{@{term[source] "coeffs :: exp \<Rightarrow> int list"}}
 that transforms an expression into a polynomial. This may require auxiliary
-functions. Prove that @{text coeffs} preserves the value of the expression:
+functions. Prove that \<open>coeffs\<close> preserves the value of the expression:
 \mbox{@{prop"evalp (coeffs e) x = eval e x"}.}
 Hint: consider the hint in Exercise~\ref{exe:tree0}.
 \endexercise
--- a/src/Doc/Sugar/Sugar.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Sugar/Sugar.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -27,9 +27,9 @@
 awe at the beauty unfolding in front of their eyes. Until one day you
 come across that very critical of readers known as the ``common referee''.
 He has the nasty habit of refusing to understand unfamiliar notation
-like Isabelle's infamous @{text"\<lbrakk> \<rbrakk> \<Longrightarrow>"} no matter how many times you
-explain it in your paper. Even worse, he thinks that using @{text"\<lbrakk>
-\<rbrakk>"} for anything other than denotational semantics is a cardinal sin
+like Isabelle's infamous \<open>\<lbrakk> \<rbrakk> \<Longrightarrow>\<close> no matter how many times you
+explain it in your paper. Even worse, he thinks that using \<open>\<lbrakk>
+\<rbrakk>\<close> for anything other than denotational semantics is a cardinal sin
 that must be punished by instant rejection.
 
 
@@ -47,7 +47,7 @@
 typically in \texttt{root.tex}. For a start, you should
 \verb!\usepackage{amssymb}! --- otherwise typesetting
 @{prop[source]"\<not>(\<exists>x. P x)"} will fail because the AMS symbol
-@{text"\<nexists>"} is missing.
+\<open>\<nexists>\<close> is missing.
 \end{itemize}
 
 
@@ -57,14 +57,14 @@
 
 The formula @{prop[source]"\<not>(\<exists>x. P x)"} is typeset as @{prop"\<not>(\<exists>x. P x)"}.
 
-The predefined constructs @{text"if"}, @{text"let"} and
-@{text"case"} are set in sans serif font to distinguish them from
+The predefined constructs \<open>if\<close>, \<open>let\<close> and
+\<open>case\<close> are set in sans serif font to distinguish them from
 other functions. This improves readability:
 \begin{itemize}
-\item @{term"if b then e\<^sub>1 else e\<^sub>2"} instead of @{text"if b then e\<^sub>1 else e\<^sub>2"}.
-\item @{term"let x = e\<^sub>1 in e\<^sub>2"} instead of @{text"let x = e\<^sub>1 in e\<^sub>2"}.
+\item @{term"if b then e\<^sub>1 else e\<^sub>2"} instead of \<open>if b then e\<^sub>1 else e\<^sub>2\<close>.
+\item @{term"let x = e\<^sub>1 in e\<^sub>2"} instead of \<open>let x = e\<^sub>1 in e\<^sub>2\<close>.
 \item @{term"case x of True \<Rightarrow> e\<^sub>1 | False \<Rightarrow> e\<^sub>2"} instead of\\
-      @{text"case x of True \<Rightarrow> e\<^sub>1 | False \<Rightarrow> e\<^sub>2"}.
+      \<open>case x of True \<Rightarrow> e\<^sub>1 | False \<Rightarrow> e\<^sub>2\<close>.
 \end{itemize}
 
 \subsection{Sets}
@@ -72,11 +72,11 @@
 Although set syntax in HOL is already close to
 standard, we provide a few further improvements:
 \begin{itemize}
-\item @{term"{x. P}"} instead of @{text"{x. P}"}.
-\item @{term"{}"} instead of @{text"{}"}, where
+\item @{term"{x. P}"} instead of \<open>{x. P}\<close>.
+\item @{term"{}"} instead of \<open>{}\<close>, where
  @{term"{}"} is also input syntax.
-\item @{term"insert a (insert b (insert c M))"} instead of @{text"insert a (insert b (insert c M))"}.
-\item @{term"card A"} instead of @{text"card A"}.
+\item @{term"insert a (insert b (insert c M))"} instead of \<open>insert a (insert b (insert c M))\<close>.
+\item @{term"card A"} instead of \<open>card A\<close>.
 \end{itemize}
 
 
@@ -84,21 +84,21 @@
 
 If lists are used heavily, the following notations increase readability:
 \begin{itemize}
-\item @{term"x # xs"} instead of @{text"x # xs"},
+\item @{term"x # xs"} instead of \<open>x # xs\<close>,
       where @{term"x # xs"} is also input syntax.
-\item @{term"length xs"} instead of @{text"length xs"}.
-\item @{term"nth xs n"} instead of @{text"nth xs n"},
-      the $n$th element of @{text xs}.
+\item @{term"length xs"} instead of \<open>length xs\<close>.
+\item @{term"nth xs n"} instead of \<open>nth xs n\<close>,
+      the $n$th element of \<open>xs\<close>.
 
 \item Human readers are good at converting automatically from lists to
 sets. Hence \texttt{OptionalSugar} contains syntax for suppressing the
 conversion function @{const set}: for example, @{prop[source]"x \<in> set xs"}
 becomes @{prop"x \<in> set xs"}.
 
-\item The @{text"@"} operation associates implicitly to the right,
+\item The \<open>@\<close> operation associates implicitly to the right,
 which leads to unpleasant line breaks if the term is too long for one
 line. To avoid this, \texttt{OptionalSugar} contains syntax to group
-@{text"@"}-terms to the left before printing, which leads to better
+\<open>@\<close>-terms to the left before printing, which leads to better
 line breaking behaviour:
 @{term[display]"term\<^sub>0 @ term\<^sub>1 @ term\<^sub>2 @ term\<^sub>3 @ term\<^sub>4 @ term\<^sub>5 @ term\<^sub>6 @ term\<^sub>7 @ term\<^sub>8 @ term\<^sub>9 @ term\<^sub>1\<^sub>0"}
 
@@ -110,10 +110,10 @@
 Coercions between numeric types are alien to mathematicians who
 consider, for example, @{typ nat} as a subset of @{typ int}.
 \texttt{OptionalSugar} contains syntax for suppressing numeric coercions such
-as @{const int} @{text"::"} @{typ"nat \<Rightarrow> int"}. For example,
+as @{const int} \<open>::\<close> @{typ"nat \<Rightarrow> int"}. For example,
 @{term[source]"int 5"} is printed as @{term "int 5"}. Embeddings of types
 @{typ nat}, @{typ int}, @{typ real} are covered; non-injective coercions such
-as @{const nat} @{text"::"} @{typ"int \<Rightarrow> nat"} are not and should not be
+as @{const nat} \<open>::\<close> @{typ"int \<Rightarrow> nat"} are not and should not be
 hidden.
 
 
@@ -161,7 +161,7 @@
 subsection \<open>Qualified names\<close>
 
 text\<open>If there are multiple declarations of the same name, Isabelle prints
-the qualified name, for example @{text "T.length"}, where @{text T} is the
+the qualified name, for example \<open>T.length\<close>, where \<open>T\<close> is the
 theory it is defined in, to distinguish it from the predefined @{const[source]
 "List.length"}. In case there is no danger of confusion, you can insist on
 short names (no qualifiers) by setting the \verb!names_short!
@@ -184,12 +184,12 @@
 \showout @{thm fst_conv[of _ DUMMY]}
 \end{quote}
 As expected, the second argument has been replaced by ``\_'',
-but the first argument is the ugly @{text "x1.0"}, a schematic variable
+but the first argument is the ugly \<open>x1.0\<close>, a schematic variable
 with suppressed question mark. Schematic variables that end in digits,
-e.g. @{text"x1"}, are still printed with a trailing @{text".0"},
-e.g. @{text"x1.0"}, their internal index. This can be avoided by
+e.g. \<open>x1\<close>, are still printed with a trailing \<open>.0\<close>,
+e.g. \<open>x1.0\<close>, their internal index. This can be avoided by
 turning the last digit into a subscript: write \<^verbatim>\<open>x\<^sub>1\<close> and
-obtain the much nicer @{text"x\<^sub>1"}. Alternatively, you can display trailing digits of
+obtain the much nicer \<open>x\<^sub>1\<close>. Alternatively, you can display trailing digits of
 schematic and free variables as subscripts with the \texttt{sub} style:
 \begin{quote}
 \verb!@!\verb!{thm (sub) fst_conv[of _ DUMMY]}!\\
@@ -277,7 +277,7 @@
 \end{center}
 
 Limitations: 1. Premises and conclusion must each not be longer than
-the line.  2. Premises that are @{text"\<Longrightarrow>"}-implications are again
+the line.  2. Premises that are \<open>\<Longrightarrow>\<close>-implications are again
 displayed with a horizontal line, which looks at least unusual.
 
 
@@ -392,7 +392,7 @@
 
 The \verb!thm! antiquotation works nicely for single theorems, but
 sets of equations as used in definitions are more difficult to
-typeset nicely: people tend to prefer aligned @{text "="} signs.
+typeset nicely: people tend to prefer aligned \<open>=\<close> signs.
 
 To deal with such cases where it is desirable to dive into the structure
 of terms and theorems, Isabelle offers antiquotations featuring ``styles'':
@@ -409,7 +409,7 @@
  styles, namely \verb!lhs! and \verb!rhs!, \verb!prem! with one argument, and \verb!concl!.
 For example, the output
 \begin{center}
-\begin{tabular}{l@ {~~@{text "="}~~}l}
+\begin{tabular}{l@ {~~\<open>=\<close>~~}l}
 @{thm (lhs) append_Nil} & @{thm (rhs) append_Nil}\\
 @{thm (lhs) append_Cons} & @{thm (rhs) append_Cons}
 \end{tabular}
@@ -428,7 +428,7 @@
 as an antiquotation. The styles \verb!lhs! and \verb!rhs!
 extract the left hand side (or right hand side respectively) from the
 conclusion of propositions consisting of a binary operator
-(e.~g.~@{text "="}, @{text "\<equiv>"}, @{text "<"}).
+(e.~g.~\<open>=\<close>, \<open>\<equiv>\<close>, \<open><\<close>).
 
 Likewise, \verb!concl! may be used as a style to show just the
 conclusion of a proposition. For example, take \verb!hd_Cons_tl!:
--- a/src/Doc/Tutorial/Advanced/Partial.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Advanced/Partial.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -4,7 +4,7 @@
 that all functions in HOL are total.  We cannot hope to define
 truly partial functions, but must make them total.  A straightforward
 method is to lift the result type of the function from $\tau$ to
-$\tau$~@{text option} (see \ref{sec:option}), where @{term None} is
+$\tau$~\<open>option\<close> (see \ref{sec:option}), where @{term None} is
 returned if the function is applied to an argument not in its
 domain. Function @{term assoc} in \S\ref{sec:Trie} is a simple example.
 We do not pursue this schema further because it should be clear
@@ -68,7 +68,7 @@
 
 text\<open>\noindent Of course we could also have defined
 @{term"divi(m,0)"} to be some specific number, for example 0. The
-latter option is chosen for the predefined @{text div} function, which
+latter option is chosen for the predefined \<open>div\<close> function, which
 simplifies proofs at the expense of deviating from the
 standard mathematical division function.
 
@@ -150,7 +150,7 @@
 % which is not part of {text Main} but needs to
 % be included explicitly among the ancestor theories.
 
-Constant @{term while} is of type @{text"('a \<Rightarrow> bool) \<Rightarrow> ('a \<Rightarrow> 'a) \<Rightarrow> 'a"}
+Constant @{term while} is of type \<open>('a \<Rightarrow> bool) \<Rightarrow> ('a \<Rightarrow> 'a) \<Rightarrow> 'a\<close>
 and satisfies the recursion equation @{thm[display]while_unfold[no_vars]}
 That is, @{term"while b c s"} is equivalent to the imperative program
 \begin{verbatim}
@@ -184,7 +184,7 @@
 Let us now prove that @{const find2} does indeed find a fixed point. Instead
 of induction we apply the above while rule, suitably instantiated.
 Only the final premise of @{thm[source]while_rule} is left unproved
-by @{text auto} but falls to @{text simp}:
+by \<open>auto\<close> but falls to \<open>simp\<close>:
 \<close>
 
 lemma lem: "wf(step1 f) \<Longrightarrow>
--- a/src/Doc/Tutorial/Advanced/WFrec.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Advanced/WFrec.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -5,7 +5,7 @@
 functions. Sometimes this can be inconvenient or
 impossible. Fortunately, \isacommand{recdef} supports much more
 general definitions. For example, termination of Ackermann's function
-can be shown by means of the \rmindex{lexicographic product} @{text"<*lex*>"}:
+can be shown by means of the \rmindex{lexicographic product} \<open><*lex*>\<close>:
 \<close>
 
 consts ack :: "nat\<times>nat \<Rightarrow> nat"
@@ -108,7 +108,7 @@
 (hints recdef_wf: wf_greater)
 
 text\<open>\noindent
-Alternatively, we could have given @{text "measure (\<lambda>k::nat. 10-k)"} for the
+Alternatively, we could have given \<open>measure (\<lambda>k::nat. 10-k)\<close> for the
 well-founded relation in our \isacommand{recdef}.  However, the arithmetic
 goal in the lemma above would have arisen instead in the \isacommand{recdef}
 termination proof, where we have less control.  A tailor-made termination
--- a/src/Doc/Tutorial/Advanced/simp2.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Advanced/simp2.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -23,7 +23,7 @@
 True} because we may use @{prop"xs = []"} when simplifying @{prop"xs@xs =
 xs"}. The generation of contextual information during simplification is
 controlled by so-called \bfindex{congruence rules}. This is the one for
-@{text"\<longrightarrow>"}:
+\<open>\<longrightarrow>\<close>:
 @{thm[display]imp_cong[no_vars]}
 It should be read as follows:
 In order to simplify @{prop"P-->Q"} to @{prop"P'-->Q'"},
@@ -34,7 +34,7 @@
 quantifiers supply contextual information about the bound variable:
 @{thm[display,eta_contract=false,margin=60]ball_cong[no_vars]}
 One congruence rule for conditional expressions supplies contextual
-information for simplifying the @{text then} and @{text else} cases:
+information for simplifying the \<open>then\<close> and \<open>else\<close> cases:
 @{thm[display]if_cong[no_vars]}
 An alternative congruence rule for conditional expressions
 actually \emph{prevents} simplification of some arguments:
@@ -42,19 +42,19 @@
 Only the first argument is simplified; the others remain unchanged.
 This makes simplification much faster and is faithful to the evaluation
 strategy in programming languages, which is why this is the default
-congruence rule for @{text "if"}. Analogous rules control the evaluation of
-@{text case} expressions.
+congruence rule for \<open>if\<close>. Analogous rules control the evaluation of
+\<open>case\<close> expressions.
 
 You can declare your own congruence rules with the attribute \attrdx{cong},
 either globally, in the usual manner,
 \begin{quote}
-\isacommand{declare} \textit{theorem-name} @{text"[cong]"}
+\isacommand{declare} \textit{theorem-name} \<open>[cong]\<close>
 \end{quote}
-or locally in a @{text"simp"} call by adding the modifier
+or locally in a \<open>simp\<close> call by adding the modifier
 \begin{quote}
-@{text"cong:"} \textit{list of theorem names}
+\<open>cong:\<close> \textit{list of theorem names}
 \end{quote}
-The effect is reversed by @{text"cong del"} instead of @{text cong}.
+The effect is reversed by \<open>cong del\<close> instead of \<open>cong\<close>.
 
 \begin{warn}
 The congruence rule @{thm[source]conj_cong}
@@ -80,7 +80,7 @@
 lexicographic ordering on terms. For example, commutativity rewrites
 @{term"b+a"} to @{term"a+b"}, but then stops because @{term"a+b"} is strictly
 smaller than @{term"b+a"}.  Permutative rewrite rules can be turned into
-simplification rules in the usual manner via the @{text simp} attribute; the
+simplification rules in the usual manner via the \<open>simp\<close> attribute; the
 simplifier recognizes their special status automatically.
 
 Permutative rewrite rules are most effective in the case of
@@ -102,7 +102,7 @@
 \[\def\maps#1{~\stackrel{#1}{\leadsto}~}
  f(f(b,c),a) \maps{A} f(b,f(c,a)) \maps{C} f(b,f(a,c)) \maps{LC} f(a,f(b,c)) \]
 
-Note that ordered rewriting for @{text"+"} and @{text"*"} on numbers is rarely
+Note that ordered rewriting for \<open>+\<close> and \<open>*\<close> on numbers is rarely
 necessary because the built-in arithmetic prover often succeeds without
 such tricks.
 \<close>
@@ -132,21 +132,21 @@
 variables. Thus all ordinary rewrite rules, where all unknowns are
 of base type, for example @{thm add.assoc}, are acceptable: if an unknown is
 of base type, it cannot have any arguments. Additionally, the rule
-@{text"(\<forall>x. ?P x \<and> ?Q x) = ((\<forall>x. ?P x) \<and> (\<forall>x. ?Q x))"} is also acceptable, in
-both directions: all arguments of the unknowns @{text"?P"} and
-@{text"?Q"} are distinct bound variables.
+\<open>(\<forall>x. ?P x \<and> ?Q x) = ((\<forall>x. ?P x) \<and> (\<forall>x. ?Q x))\<close> is also acceptable, in
+both directions: all arguments of the unknowns \<open>?P\<close> and
+\<open>?Q\<close> are distinct bound variables.
 
 If the left-hand side is not a higher-order pattern, all is not lost.
 The simplifier will still try to apply the rule provided it
 matches directly: without much $\lambda$-calculus hocus
-pocus.  For example, @{text"(?f ?x \<in> range ?f) = True"} rewrites
+pocus.  For example, \<open>(?f ?x \<in> range ?f) = True\<close> rewrites
 @{term"g a \<in> range g"} to @{const True}, but will fail to match
-@{text"g(h b) \<in> range(\<lambda>x. g(h x))"}.  However, you can
+\<open>g(h b) \<in> range(\<lambda>x. g(h x))\<close>.  However, you can
 eliminate the offending subterms --- those that are not patterns ---
 by adding new variables and conditions.
-In our example, we eliminate @{text"?f ?x"} and obtain
- @{text"?y =
-?f ?x \<Longrightarrow> (?y \<in> range ?f) = True"}, which is fine
+In our example, we eliminate \<open>?f ?x\<close> and obtain
+ \<open>?y =
+?f ?x \<Longrightarrow> (?y \<in> range ?f) = True\<close>, which is fine
 as a conditional rewrite rule since conditions can be arbitrary
 terms.  However, this trick is not a panacea because the newly
 introduced conditions may be hard to solve.
@@ -170,7 +170,7 @@
 P \land Q &\mapsto& P,\ Q \nonumber\\
 \forall x.~P~x &\mapsto& P~\Var{x}\nonumber\\
 \forall x \in A.\ P~x &\mapsto& \Var{x} \in A \Longrightarrow P~\Var{x} \nonumber\\
-@{text "if"}\ P\ @{text then}\ Q\ @{text else}\ R &\mapsto&
+\<open>if\<close>\ P\ \<open>then\<close>\ Q\ \<open>else\<close>\ R &\mapsto&
  P \Longrightarrow Q,\ \neg P \Longrightarrow R \nonumber
 \end{eqnarray}
 Once this conversion process is finished, all remaining non-equations
--- a/src/Doc/Tutorial/CTL/Base.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/CTL/Base.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -12,7 +12,7 @@
 we consider a simple modal logic called propositional dynamic
 logic (PDL)\@.  We then proceed to the temporal logic CTL, which is
 used in many real
-model checkers. In each case we give both a traditional semantics (@{text \<Turnstile>}) and a
+model checkers. In each case we give both a traditional semantics (\<open>\<Turnstile>\<close>) and a
 recursive function @{term mc} that maps a formula into the set of all states of
 the system where the formula is valid. If the system has a finite number of
 states, @{term mc} is directly executable: it is a model checker, albeit an
--- a/src/Doc/Tutorial/CTL/CTL.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/CTL/CTL.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -7,7 +7,7 @@
 The semantics of PDL only needs reflexive transitive closure.
 Let us be adventurous and introduce a more expressive temporal operator.
 We extend the datatype
-@{text formula} by a new constructor
+\<open>formula\<close> by a new constructor
 \<close>
 (*<*)
 datatype formula = Atom "atom"
@@ -98,7 +98,7 @@
 (*>*)
 text\<open>
 All we need to prove now is  @{prop"mc(AF f) = {s. s \<Turnstile> AF f}"}, which states
-that @{term mc} and @{text"\<Turnstile>"} agree for @{const AF}\@.
+that @{term mc} and \<open>\<Turnstile>\<close> agree for @{const AF}\@.
 This time we prove the two inclusions separately, starting
 with the easy one:
 \<close>
@@ -123,7 +123,7 @@
 In this remaining case, we set @{term t} to @{term"p(1::nat)"}.
 The rest is automatic, which is surprising because it involves
 finding the instantiation @{term"\<lambda>i::nat. p(i+1)"}
-for @{text"\<forall>p"}.
+for \<open>\<forall>p\<close>.
 \<close>
 
 apply(erule_tac x = "p 1" in allE)
@@ -167,7 +167,7 @@
 
 text\<open>\noindent
 Element @{term"n+1::nat"} on this path is some arbitrary successor
-@{term t} of element @{term n} such that @{term"Q t"} holds.  Remember that @{text"SOME t. R t"}
+@{term t} of element @{term n} such that @{term"Q t"} holds.  Remember that \<open>SOME t. R t\<close>
 is some arbitrary but fixed @{term t} such that @{prop"R t"} holds (see \S\ref{sec:SOME}). Of
 course, such a @{term t} need not exist, but that is of no
 concern to us since we will only use @{const path} when a
@@ -218,28 +218,28 @@
 holds. However, we first have to show that such a @{term t} actually exists! This reasoning
 is embodied in the theorem @{thm[source]someI2_ex}:
 @{thm[display,eta_contract=false]someI2_ex}
-When we apply this theorem as an introduction rule, @{text"?P x"} becomes
-@{prop"(s, x) \<in> M \<and> Q x"} and @{text"?Q x"} becomes @{prop"(s,x) \<in> M"} and we have to prove
+When we apply this theorem as an introduction rule, \<open>?P x\<close> becomes
+@{prop"(s, x) \<in> M \<and> Q x"} and \<open>?Q x\<close> becomes @{prop"(s,x) \<in> M"} and we have to prove
 two subgoals: @{prop"\<exists>a. (s, a) \<in> M \<and> Q a"}, which follows from the assumptions, and
 @{prop"(s, x) \<in> M \<and> Q x \<Longrightarrow> (s,x) \<in> M"}, which is trivial. Thus it is not surprising that
-@{text fast} can prove the base case quickly:
+\<open>fast\<close> can prove the base case quickly:
 \<close>
 
  apply(fast intro: someI2_ex)
 
 txt\<open>\noindent
 What is worth noting here is that we have used \methdx{fast} rather than
-@{text blast}.  The reason is that @{text blast} would fail because it cannot
+\<open>blast\<close>.  The reason is that \<open>blast\<close> would fail because it cannot
 cope with @{thm[source]someI2_ex}: unifying its conclusion with the current
 subgoal is non-trivial because of the nested schematic variables. For
-efficiency reasons @{text blast} does not even attempt such unifications.
-Although @{text fast} can in principle cope with complicated unification
+efficiency reasons \<open>blast\<close> does not even attempt such unifications.
+Although \<open>fast\<close> can in principle cope with complicated unification
 problems, in practice the number of unifiers arising is often prohibitive and
 the offending rule may need to be applied explicitly rather than
 automatically. This is what happens in the step case.
 
 The induction step is similar, but more involved, because now we face nested
-occurrences of @{text SOME}. As a result, @{text fast} is no longer able to
+occurrences of \<open>SOME\<close>. As a result, \<open>fast\<close> is no longer able to
 solve the subgoal and we apply @{thm[source]someI2_ex} by hand.  We merely
 show the proof commands but do not describe the details:
 \<close>
@@ -320,7 +320,7 @@
 simpler arguments.
 
 The main theorem is proved as for PDL, except that we also derive the
-necessary equality @{text"lfp(af A) = ..."} by combining
+necessary equality \<open>lfp(af A) = ...\<close> by combining
 @{thm[source]AF_lemma1} and @{thm[source]AF_lemma2} on the spot:
 \<close>
 
@@ -439,7 +439,7 @@
 our model checkers.  It is clear that if all sets are finite, they can be
 represented as lists and the usual set operations are easily
 implemented. Only @{const lfp} requires a little thought.  Fortunately, theory
-@{text While_Combinator} in the Library~@{cite "HOL-Library"} provides a
+\<open>While_Combinator\<close> in the Library~@{cite "HOL-Library"} provides a
 theorem stating that in the case of finite sets and a monotone
 function~@{term F}, the value of \mbox{@{term"lfp F"}} can be computed by
 iterated application of @{term F} to~@{term"{}"} until a fixed point is
--- a/src/Doc/Tutorial/CTL/CTLind.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/CTL/CTLind.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -10,7 +10,7 @@
 In \S\ref{sec:CTL} we gave a fairly involved proof of the correctness of a
 model checker for CTL\@. In particular the proof of the
 @{thm[source]infinity_lemma} on the way to @{thm[source]AF_lemma2} is not as
-simple as one might expect, due to the @{text SOME} operator
+simple as one might expect, due to the \<open>SOME\<close> operator
 involved. Below we give a simpler proof of @{thm[source]AF_lemma2}
 based on an auxiliary inductive definition.
 
@@ -39,7 +39,7 @@
 The proof is by induction on @{prop"f(0::nat) \<in> Avoid s A"}. However,
 this requires the following
 reformulation, as explained in \S\ref{sec:ind-var-in-prems} above;
-the @{text rule_format} directive undoes the reformulation after the proof.
+the \<open>rule_format\<close> directive undoes the reformulation after the proof.
 \<close>
 
 lemma ex_infinite_path[rule_format]:
@@ -53,10 +53,10 @@
 done
 
 text\<open>\noindent
-The base case (@{prop"t = s"}) is trivial and proved by @{text blast}.
+The base case (@{prop"t = s"}) is trivial and proved by \<open>blast\<close>.
 In the induction step, we have an infinite @{term A}-avoiding path @{term f}
 starting from @{term u}, a successor of @{term t}. Now we simply instantiate
-the @{text"\<forall>f\<in>Paths t"} in the induction hypothesis by the path starting with
+the \<open>\<forall>f\<in>Paths t\<close> in the induction hypothesis by the path starting with
 @{term t} and continuing with @{term f}. That is what the above $\lambda$-term
 expresses.  Simplification shows that this is a path starting with @{term t} 
 and that the instantiated induction hypothesis implies the conclusion.
@@ -129,11 +129,11 @@
 done
 
 text\<open>
-The @{text"(no_asm)"} modifier of the @{text"rule_format"} directive in the
+The \<open>(no_asm)\<close> modifier of the \<open>rule_format\<close> directive in the
 statement of the lemma means
-that the assumption is left unchanged; otherwise the @{text"\<forall>p"} 
+that the assumption is left unchanged; otherwise the \<open>\<forall>p\<close> 
 would be turned
-into a @{text"\<And>p"}, which would complicate matters below. As it is,
+into a \<open>\<And>p\<close>, which would complicate matters below. As it is,
 @{thm[source]Avoid_in_lfp} is now
 @{thm[display]Avoid_in_lfp[no_vars]}
 The main theorem is simply the corollary where @{prop"t = s"},
--- a/src/Doc/Tutorial/CTL/PDL.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/CTL/PDL.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -5,7 +5,7 @@
 text\<open>\index{PDL|(}
 The formulae of PDL are built up from atomic propositions via
 negation and conjunction and the two temporal
-connectives @{text AX} and @{text EF}\@. Since formulae are essentially
+connectives \<open>AX\<close> and \<open>EF\<close>\@. Since formulae are essentially
 syntax trees, they are naturally modelled as a datatype:%
 \footnote{The customary definition of PDL
 @{cite "HarelKT-DL"} looks quite different from ours, but the two are easily
@@ -22,8 +22,8 @@
 This resembles the boolean expression case study in
 \S\ref{sec:boolex}.
 A validity relation between states and formulae specifies the semantics.
-The syntax annotation allows us to write @{text"s \<Turnstile> f"} instead of
-\hbox{@{text"valid s f"}}. The definition is by recursion over the syntax:
+The syntax annotation allows us to write \<open>s \<Turnstile> f\<close> instead of
+\hbox{\<open>valid s f\<close>}. The definition is by recursion over the syntax:
 \<close>
 
 primrec valid :: "state \<Rightarrow> formula \<Rightarrow> bool"   ("(_ \<Turnstile> _)" [80,80] 80)
@@ -38,7 +38,7 @@
 The first three equations should be self-explanatory. The temporal formula
 @{term"AX f"} means that @{term f} is true in \emph{A}ll ne\emph{X}t states whereas
 @{term"EF f"} means that there \emph{E}xists some \emph{F}uture state in which @{term f} is
-true. The future is expressed via @{text"\<^sup>*"}, the reflexive transitive
+true. The future is expressed via \<open>\<^sup>*\<close>, the reflexive transitive
 closure. Because of reflexivity, the future includes the present.
 
 Now we come to the model checker itself. It maps a formula into the
@@ -54,7 +54,7 @@
 
 text\<open>\noindent
 Only the equation for @{term EF} deserves some comments. Remember that the
-postfix @{text"\<inverse>"} and the infix @{text"``"} are predefined and denote the
+postfix \<open>\<inverse>\<close> and the infix \<open>``\<close> are predefined and denote the
 converse of a relation and the image of a set under a relation.  Thus
 @{term "M\<inverse> `` T"} is the set of all predecessors of @{term T} and the least
 fixed point (@{term lfp}) of @{term"\<lambda>T. mc f \<union> M\<inverse> `` T"} is the least set
@@ -73,7 +73,7 @@
 done
 
 text\<open>\noindent
-Now we can relate model checking and semantics. For the @{text EF} case we need
+Now we can relate model checking and semantics. For the \<open>EF\<close> case we need
 a separate lemma:
 \<close>
 
@@ -106,7 +106,7 @@
 \ \ \ \ \ \ \ \ \ x\ {\isasymin}\ M{\isasyminverse}\ {\isacharbackquote}{\isacharbackquote}\ {\isacharparenleft}lfp\ {\isacharparenleft}\dots{\isacharparenright}\ {\isasyminter}\ {\isacharbraceleft}x{\isachardot}\ {\isasymexists}t{\isachardot}\ {\isacharparenleft}x{\isacharcomma}\ t{\isacharparenright}\ {\isasymin}\ M\isactrlsup {\isacharasterisk}\ {\isasymand}\ t\ {\isasymin}\ A{\isacharbraceright}{\isacharparenright}\isanewline
 \ \ \ \ \ \ \ \ {\isasymLongrightarrow}\ {\isasymexists}t{\isachardot}\ {\isacharparenleft}x{\isacharcomma}\ t{\isacharparenright}\ {\isasymin}\ M\isactrlsup {\isacharasterisk}\ {\isasymand}\ t\ {\isasymin}\ A
 \end{isabelle}
-It is proved by @{text blast}, using the transitivity of 
+It is proved by \<open>blast\<close>, using the transitivity of 
 \isa{M\isactrlsup {\isacharasterisk}}.
 \<close>
 
@@ -161,7 +161,7 @@
 
 text\<open>
 The main theorem is proved in the familiar manner: induction followed by
-@{text auto} augmented with the lemma as a simplification rule.
+\<open>auto\<close> augmented with the lemma as a simplification rule.
 \<close>
 
 theorem "mc f = {s. s \<Turnstile> f}"
@@ -173,8 +173,8 @@
 \begin{exercise}
 @{term AX} has a dual operator @{term EN} 
 (``there exists a next state such that'')%
-\footnote{We cannot use the customary @{text EX}: it is reserved
-as the \textsc{ascii}-equivalent of @{text"\<exists>"}.}
+\footnote{We cannot use the customary \<open>EX\<close>: it is reserved
+as the \textsc{ascii}-equivalent of \<open>\<exists>\<close>.}
 with the intended semantics
 @{prop[display]"(s \<Turnstile> EN f) = (\<exists>t. (s,t) \<in> M \<and> t \<Turnstile> f)"}
 Fortunately, @{term"EN f"} can already be expressed as a PDL formula. How?
--- a/src/Doc/Tutorial/CodeGen/CodeGen.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/CodeGen/CodeGen.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -37,7 +37,7 @@
 The stack machine has three instructions: load a constant value onto the
 stack, load the contents of an address onto the stack, and apply a
 binary operation to the two topmost elements of the stack, replacing them by
-the result. As for @{text"expr"}, addresses and values are type parameters:
+the result. As for \<open>expr\<close>, addresses and values are type parameters:
 \<close>
 
 datatype (dead 'a, 'v) instr = Const 'v
@@ -46,7 +46,7 @@
 
 text\<open>
 The execution of the stack machine is modelled by a function
-@{text"exec"} that takes a list of instructions, a store (modelled as a
+\<open>exec\<close> that takes a list of instructions, a store (modelled as a
 function from addresses to values, just like the environment for
 evaluating expressions), and a stack (modelled as a list) of values,
 and returns the stack at the end of the execution --- the store remains
@@ -103,14 +103,14 @@
 txt\<open>\noindent
 This requires induction on @{term"xs"} and ordinary simplification for the
 base cases. In the induction step, simplification leaves us with a formula
-that contains two @{text"case"}-expressions over instructions. Thus we add
+that contains two \<open>case\<close>-expressions over instructions. Thus we add
 automatic case splitting, which finishes the proof:
 \<close>
 apply(induct_tac xs, simp, simp split: instr.split)
 (*<*)done(*>*)
 text\<open>\noindent
 Note that because both \methdx{simp_all} and \methdx{auto} perform simplification, they can
-be modified in the same way as @{text simp}.  Thus the proof can be
+be modified in the same way as \<open>simp\<close>.  Thus the proof can be
 rewritten as
 \<close>
 (*<*)
--- a/src/Doc/Tutorial/Datatype/ABexpr.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Datatype/ABexpr.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -27,10 +27,10 @@
                  | Neg  "'a bexp"
 
 text\<open>\noindent
-Type @{text"aexp"} is similar to @{text"expr"} in \S\ref{sec:ExprCompiler},
-except that we have added an @{text IF} constructor,
+Type \<open>aexp\<close> is similar to \<open>expr\<close> in \S\ref{sec:ExprCompiler},
+except that we have added an \<open>IF\<close> constructor,
 fixed the values to be of type @{typ"nat"} and declared the two binary
-operations @{text Sum} and @{term"Diff"}.  Boolean
+operations \<open>Sum\<close> and @{term"Diff"}.  Boolean
 expressions can be arithmetic comparisons, conjunctions and negations.
 The semantics is given by two evaluation functions:
 \<close>
@@ -107,19 +107,19 @@
 \[ P@1(x@1)\ \land \dots \land P@n(x@n) \]
 where each variable $x@i$ is of type $\tau@i$. Induction is started by
 \begin{isabelle}
-\isacommand{apply}@{text"(induct_tac"} $x@1$ \isacommand{and} \dots\ \isacommand{and} $x@n$@{text ")"}
+\isacommand{apply}\<open>(induct_tac\<close> $x@1$ \isacommand{and} \dots\ \isacommand{and} $x@n$\<open>)\<close>
 \end{isabelle}
 
 \begin{exercise}
-  Define a function @{text"norma"} of type @{typ"'a aexp => 'a aexp"} that
+  Define a function \<open>norma\<close> of type @{typ"'a aexp => 'a aexp"} that
   replaces @{term"IF"}s with complex boolean conditions by nested
   @{term"IF"}s; it should eliminate the constructors
   @{term"And"} and @{term"Neg"}, leaving only @{term"Less"}.
-  Prove that @{text"norma"}
-  preserves the value of an expression and that the result of @{text"norma"}
+  Prove that \<open>norma\<close>
+  preserves the value of an expression and that the result of \<open>norma\<close>
   is really normal, i.e.\ no more @{term"And"}s and @{term"Neg"}s occur in
   it.  ({\em Hint:} proceed as in \S\ref{sec:boolex} and read the discussion
-  of type annotations following lemma @{text subst_id} below).
+  of type annotations following lemma \<open>subst_id\<close> below).
 \end{exercise}
 \<close>
 (*<*)
--- a/src/Doc/Tutorial/Datatype/Fundata.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Datatype/Fundata.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -16,7 +16,7 @@
 root is labeled with 0 and whose $i$th subtree is labeled with $i$ and
 has merely @{term"Tip"}s as further subtrees.
 
-Function @{term"map_bt"} applies a function to all labels in a @{text"bigtree"}:
+Function @{term"map_bt"} applies a function to all labels in a \<open>bigtree\<close>:
 \<close>
 
 primrec map_bt :: "('a \<Rightarrow> 'b) \<Rightarrow> ('a,'i)bigtree \<Rightarrow> ('b,'i)bigtree"
--- a/src/Doc/Tutorial/Datatype/Nested.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Datatype/Nested.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -15,7 +15,7 @@
 datatype ('v,'f)"term" = Var 'v | App 'f "('v,'f)term list"
 
 text\<open>\noindent
-Note that we need to quote @{text term} on the left to avoid confusion with
+Note that we need to quote \<open>term\<close> on the left to avoid confusion with
 the Isabelle command \isacommand{term}.
 Parameter @{typ"'v"} is the type of variables and @{typ"'f"} the type of
 function symbols.
@@ -23,10 +23,10 @@
   [Var y]]"}, where @{term f}, @{term g}, @{term x}, @{term y} are
 suitable values, e.g.\ numbers or strings.
 
-What complicates the definition of @{text term} is the nested occurrence of
-@{text term} inside @{text list} on the right-hand side. In principle,
+What complicates the definition of \<open>term\<close> is the nested occurrence of
+\<open>term\<close> inside \<open>list\<close> on the right-hand side. In principle,
 nested recursion can be eliminated in favour of mutual recursion by unfolding
-the offending datatypes, here @{text list}. The result for @{text term}
+the offending datatypes, here \<open>list\<close>. The result for \<open>term\<close>
 would be something like
 \medskip
 
@@ -36,7 +36,7 @@
 \noindent
 Although we do not recommend this unfolding to the user, it shows how to
 simulate nested recursion by mutual recursion.
-Now we return to the initial definition of @{text term} using
+Now we return to the initial definition of \<open>term\<close> using
 nested recursion.
 
 Let us define a substitution function on terms. Because terms involve term
@@ -75,7 +75,7 @@
 leaves variables unchanged: @{prop"subst Var (Var x) = Var x"}. Note also
 that the type annotations are necessary because otherwise there is nothing in
 the goal to enforce that both halves of the goal talk about the same type
-parameters @{text"('v,'f)"}. As a result, induction would fail
+parameters \<open>('v,'f)\<close>. As a result, induction would fail
 because the two halves of the goal would be unrelated.
 
 \begin{exercise}
@@ -83,7 +83,7 @@
 roughly as follows:
 @{text[display]"subst (f \<circ> g) t = subst f (subst g t)"}
 Correct this statement (you will find that it does not type-check),
-strengthen it, and prove it. (Note: @{text"\<circ>"} is function composition;
+strengthen it, and prove it. (Note: \<open>\<circ>\<close> is function composition;
 its definition is found in theorem @{thm[source]o_def}).
 \end{exercise}
 \begin{exercise}\label{ex:trev-trev}
@@ -97,7 +97,7 @@
 unnecessary. The @{term App}-case can be defined directly as
 @{term[display]"subst s (App f ts) = App f (map (subst s) ts)"}
 where @{term"map"} is the standard list function such that
-@{text"map f [x1,...,xn] = [f x1,...,f xn]"}. This is true, but Isabelle
+\<open>map f [x1,...,xn] = [f x1,...,f xn]\<close>. This is true, but Isabelle
 insists on the conjunctive format. Fortunately, we can easily \emph{prove}
 that the suggested equation holds:
 \<close>
@@ -143,17 +143,17 @@
 text\<open>\noindent The advantage is that now we have replaced @{const
 substs} by @{const map}, we can profit from the large number of
 pre-proved lemmas about @{const map}.  Unfortunately, inductive proofs
-about type @{text term} are still awkward because they expect a
+about type \<open>term\<close> are still awkward because they expect a
 conjunction. One could derive a new induction principle as well (see
 \S\ref{sec:derive-ind}), but simpler is to stop using
 \isacommand{primrec} and to define functions with \isacommand{fun}
 instead.  Simple uses of \isacommand{fun} are described in
 \S\ref{sec:fun} below.  Advanced applications, including functions
-over nested datatypes like @{text term}, are discussed in a
+over nested datatypes like \<open>term\<close>, are discussed in a
 separate tutorial~@{cite "isabelle-function"}.
 
 Of course, you may also combine mutual and nested recursion of datatypes. For example,
-constructor @{text Sum} in \S\ref{sec:datatype-mut-rec} could take a list of
-expressions as its argument: @{text Sum}~@{typ[quotes]"'a aexp list"}.
+constructor \<open>Sum\<close> in \S\ref{sec:datatype-mut-rec} could take a list of
+expressions as its argument: \<open>Sum\<close>~@{typ[quotes]"'a aexp list"}.
 \<close>
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Documents/Documents.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Documents/Documents.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -29,7 +29,7 @@
   such as \isacommand{definition} and \isacommand{primrec} --- and also
   \isacommand{datatype}, which declares constructor operations.
   Type-constructors may be annotated as well, although this is less
-  frequently encountered in practice (the infix type @{text "\<times>"} comes
+  frequently encountered in practice (the infix type \<open>\<times>\<close> comes
   to mind).
 
   Infix declarations\index{infix annotations} provide a useful special
@@ -41,38 +41,38 @@
 where "A [+] B \<equiv> (A \<and> \<not> B) \<or> (\<not> A \<and> B)"
 
 text \<open>
-  \noindent Now @{text "xor A B"} and @{text "A [+] B"} refer to the
+  \noindent Now \<open>xor A B\<close> and \<open>A [+] B\<close> refer to the
   same expression internally.  Any curried function with at least two
   arguments may be given infix syntax.  For partial applications with
   fewer than two operands, the operator is enclosed in parentheses.
-  For instance, @{text xor} without arguments is represented as
-  @{text "([+])"}; together with ordinary function application, this
-  turns @{text "xor A"} into @{text "([+]) A"}.
+  For instance, \<open>xor\<close> without arguments is represented as
+  \<open>([+])\<close>; together with ordinary function application, this
+  turns \<open>xor A\<close> into \<open>([+]) A\<close>.
 
   The keyword \isakeyword{infixl} seen above specifies an
   infix operator that is nested to the \emph{left}: in iterated
   applications the more complex expression appears on the left-hand
-  side, and @{term "A [+] B [+] C"} stands for @{text "(A [+] B) [+]
-  C"}.  Similarly, \isakeyword{infixr} means nesting to the
-  \emph{right}, reading @{term "A [+] B [+] C"} as @{text "A [+] (B
-  [+] C)"}.  A \emph{non-oriented} declaration via \isakeyword{infix}
+  side, and @{term "A [+] B [+] C"} stands for \<open>(A [+] B) [+]
+  C\<close>.  Similarly, \isakeyword{infixr} means nesting to the
+  \emph{right}, reading @{term "A [+] B [+] C"} as \<open>A [+] (B
+  [+] C)\<close>.  A \emph{non-oriented} declaration via \isakeyword{infix}
   would render @{term "A [+] B [+] C"} illegal, but demand explicit
   parentheses to indicate the intended grouping.
 
   The string @{text [source] "[+]"} in our annotation refers to the
   concrete syntax to represent the operator (a literal token), while
-  the number @{text 60} determines the precedence of the construct:
+  the number \<open>60\<close> determines the precedence of the construct:
   the syntactic priorities of the arguments and result.  Isabelle/HOL
   already uses up many popular combinations of ASCII symbols for its
-  own use, including both @{text "+"} and @{text "++"}.  Longer
+  own use, including both \<open>+\<close> and \<open>++\<close>.  Longer
   character combinations are more likely to be still available for
-  user extensions, such as our~@{text "[+]"}.
+  user extensions, such as our~\<open>[+]\<close>.
 
   Operator precedences have a range of 0--1000.  Very low or high
   priorities are reserved for the meta-logic.  HOL syntax mainly uses
-  the range of 10--100: the equality infix @{text "="} is centered at
-  50; logical connectives (like @{text "\<or>"} and @{text "\<and>"}) are
-  below 50; algebraic ones (like @{text "+"} and @{text "*"}) are
+  the range of 10--100: the equality infix \<open>=\<close> is centered at
+  50; logical connectives (like \<open>\<or>\<close> and \<open>\<and>\<close>) are
+  below 50; algebraic ones (like \<open>+\<close> and \<open>*\<close>) are
   above 50.  User syntax should strive to coexist with common HOL
   forms, or use the mostly unused range 100--900.
 \<close>
@@ -103,7 +103,7 @@
   This results in an infinite store of symbols, whose
   interpretation is left to further front-end tools.  For example, the
   Isabelle document processor (see \S\ref{sec:document-preparation})
-  display the \verb,\,\verb,<forall>, symbol as~@{text \<forall>}.
+  display the \verb,\,\verb,<forall>, symbol as~\<open>\<forall>\<close>.
 
   A list of standard Isabelle symbols is given in
   @{cite "isabelle-isar-ref"}.  You may introduce your own
@@ -113,14 +113,13 @@
   few predefined control symbols, such as \verb,\,\verb,<^sub>, and
   \verb,\,\verb,<^sup>, for sub- and superscript of the subsequent
   printable symbol, respectively.  For example, \<^verbatim>\<open>A\<^sup>\<star>\<close>, is
-  output as @{text "A\<^sup>\<star>"}.
+  output as \<open>A\<^sup>\<star>\<close>.
 
   A number of symbols are considered letters by the Isabelle lexer and
   can be used as part of identifiers. These are the greek letters
-  @{text "\<alpha>"} (\verb+\+\verb+<alpha>+), @{text "\<beta>"}
-  (\verb+\+\verb+<beta>+), etc. (excluding @{text "\<lambda>"}),
-  special letters like @{text "\<A>"} (\verb+\+\verb+<A>+) and @{text
-  "\<AA>"} (\verb+\+\verb+<AA>+).  Moreover the control symbol
+  \<open>\<alpha>\<close> (\verb+\+\verb+<alpha>+), \<open>\<beta>\<close>
+  (\verb+\+\verb+<beta>+), etc. (excluding \<open>\<lambda>\<close>),
+  special letters like \<open>\<A>\<close> (\verb+\+\verb+<A>+) and \<open>\<AA>\<close> (\verb+\+\verb+<AA>+).  Moreover the control symbol
   \verb+\+\verb+<^sub>+ may be used to subscript a single letter or digit
   in the trailing part of an identifier. This means that the input
 
@@ -131,7 +130,7 @@
   \noindent is recognized as the term @{term "\<forall>\<alpha>\<^sub>1. \<alpha>\<^sub>1 = \<Pi>\<^sub>\<A>"} 
   by Isabelle.
 
-  Replacing our previous definition of @{text xor} by the
+  Replacing our previous definition of \<open>xor\<close> by the
   following specifies an Isabelle symbol for the new operator:
 \<close>
 
@@ -150,7 +149,7 @@
   through the \bfindex{print mode} concept~@{cite "isabelle-isar-ref"}.  By
   convention, the mode of ``$xsymbols$'' is enabled whenever
   Proof~General's X-Symbol mode or {\LaTeX} output is active.  Now
-  consider the following hybrid declaration of @{text xor}:
+  consider the following hybrid declaration of \<open>xor\<close>:
 \<close>
 
 (*<*)
@@ -168,9 +167,9 @@
 text \<open>\noindent
 The \commdx{notation} command associates a mixfix
 annotation with a known constant.  The print mode specification,
-here @{text "(xsymbols)"}, is optional.
+here \<open>(xsymbols)\<close>, is optional.
 
-We may now write @{text "A [+] B"} or @{text "A \<oplus> B"} in input, while
+We may now write \<open>A [+] B\<close> or \<open>A \<oplus> B\<close> in input, while
 output uses the nicer syntax of $xsymbols$ whenever that print mode is
 active.  Such an arrangement is particularly useful for interactive
 development, where users may type ASCII text and see mathematical
@@ -196,8 +195,8 @@
   \noindent Here the mixfix annotations on the rightmost column happen
   to consist of a single Isabelle symbol each: \verb,\,\verb,<euro>,,
   \verb,\,\verb,<pounds>,, \verb,\,\verb,<yen>,, and \verb,$,.  Recall
-  that a constructor like @{text Euro} actually is a function @{typ
-  "nat \<Rightarrow> currency"}.  The expression @{text "Euro 10"} will be
+  that a constructor like \<open>Euro\<close> actually is a function @{typ
+  "nat \<Rightarrow> currency"}.  The expression \<open>Euro 10\<close> will be
   printed as @{term "\<euro> 10"}; only the head of the application is
   subject to our concrete syntax.  This rather simple form already
   achieves conformance with notational standards of the European
@@ -211,7 +210,7 @@
 
 text\<open>Mixfix syntax annotations merely decorate particular constant
 application forms with concrete syntax, for instance replacing
-@{text "xor A B"} by @{text "A \<oplus> B"}.  Occasionally, the relationship
+\<open>xor A B\<close> by \<open>A \<oplus> B\<close>.  Occasionally, the relationship
 between some piece of notation and its internal form is more
 complicated.  Here we need \emph{abbreviations}.
 
@@ -221,22 +220,22 @@
 simple mechanism for syntactic macros.
 
 A typical use of abbreviations is to introduce relational notation for
-membership in a set of pairs, replacing @{text "(x, y) \<in> sim"} by
-@{text "x \<approx> y"}. We assume that a constant @{text sim } of type
+membership in a set of pairs, replacing \<open>(x, y) \<in> sim\<close> by
+\<open>x \<approx> y\<close>. We assume that a constant \<open>sim\<close> of type
 @{typ"('a \<times> 'a) set"} has been introduced at this point.\<close>
 (*<*)consts sim :: "('a \<times> 'a) set"(*>*)
 abbreviation sim2 :: "'a \<Rightarrow> 'a \<Rightarrow> bool"   (infix "\<approx>" 50)
 where "x \<approx> y  \<equiv>  (x, y) \<in> sim"
 
 text \<open>\noindent The given meta-equality is used as a rewrite rule
-after parsing (replacing \mbox{@{prop"x \<approx> y"}} by @{text"(x,y) \<in>
-sim"}) and before printing (turning @{text"(x,y) \<in> sim"} back into
-\mbox{@{prop"x \<approx> y"}}). The name of the dummy constant @{text "sim2"}
+after parsing (replacing \mbox{@{prop"x \<approx> y"}} by \<open>(x,y) \<in>
+sim\<close>) and before printing (turning \<open>(x,y) \<in> sim\<close> back into
+\mbox{@{prop"x \<approx> y"}}). The name of the dummy constant \<open>sim2\<close>
 does not matter, as long as it is unique.
 
 Another common application of abbreviations is to
 provide variant versions of fundamental relational expressions, such
-as @{text \<noteq>} for negated equalities.  The following declaration
+as \<open>\<noteq>\<close> for negated equalities.  The following declaration
 stems from Isabelle/HOL itself:
 \<close>
 
@@ -245,7 +244,7 @@
 
 notation (xsymbols) not_equal (infix "\<noteq>\<ignore>" 50)
 
-text \<open>\noindent The notation @{text \<noteq>} is introduced separately to restrict it
+text \<open>\noindent The notation \<open>\<noteq>\<close> is introduced separately to restrict it
 to the \emph{xsymbols} mode.
 
 Abbreviations are appropriate when the defined concept is a
@@ -284,7 +283,7 @@
 text_raw \<open>\begin{quotation}\<close>
 
 text \<open>
-  The following datatype definition of @{text "'a bintree"} models
+  The following datatype definition of \<open>'a bintree\<close> models
   binary trees with nodes being decorated by elements of type @{typ
   'a}.
 \<close>
@@ -425,8 +424,8 @@
   in corresponding {\LaTeX} elements.
 
   From the Isabelle perspective, each markup command takes a single
-  $text$ argument (delimited by \verb,",~@{text \<dots>}~\verb,", or
-  \verb,{,\verb,*,~@{text \<dots>}~\verb,*,\verb,},).  After stripping any
+  $text$ argument (delimited by \verb,",~\<open>\<dots>\<close>~\verb,", or
+  \verb,{,\verb,*,~\<open>\<dots>\<close>~\verb,*,\verb,},).  After stripping any
   surrounding white space, the argument is passed to a {\LaTeX} macro
   \verb,\isamarkupXYZ, for command \isakeyword{XYZ}.  These macros are
   defined in \verb,isabelle.sty, according to the meaning given in the
@@ -467,7 +466,7 @@
 
 text \<open>
   Isabelle \bfindex{source comments}, which are of the form
-  \verb,(,\verb,*,~@{text \<dots>}~\verb,*,\verb,),, essentially act like
+  \verb,(,\verb,*,~\<open>\<dots>\<close>~\verb,*,\verb,),, essentially act like
   white space and do not really contribute to the content.  They
   mainly serve technical purposes to mark certain oddities in the raw
   input text.  In contrast, \bfindex{formal comments} are portions of
@@ -477,8 +476,8 @@
 
   \medskip Marginal comments are part of each command's concrete
   syntax @{cite "isabelle-isar-ref"}; the common form is ``\verb,--,~$text$''
-  where $text$ is delimited by \verb,",@{text \<dots>}\verb,", or
-  \verb,{,\verb,*,~@{text \<dots>}~\verb,*,\verb,}, as before.  Multiple
+  where $text$ is delimited by \verb,",\<open>\<dots>\<close>\verb,", or
+  \verb,{,\verb,*,~\<open>\<dots>\<close>~\verb,*,\verb,}, as before.  Multiple
   marginal comments may be given at the same time.  Here is a simple
   example:
 \<close>
@@ -541,7 +540,7 @@
   \end{ttbox}\vspace{-\medskipamount}
 
   The notational change from the ASCII character~\verb,%, to the
-  symbol~@{text \<lambda>} reveals that Isabelle printed this term, after
+  symbol~\<open>\<lambda>\<close> reveals that Isabelle printed this term, after
   parsing and type-checking.  Document preparation enables symbolic
   output by default.
 
@@ -586,8 +585,7 @@
   virtue is that the string $s$ is processed as Isabelle output,
   interpreting Isabelle symbols appropriately.
 
-  For example, \texttt{\at}\verb,{text "\<forall>\<exists>"}, produces @{text
-  "\<forall>\<exists>"}, according to the standard interpretation of these symbol
+  For example, \texttt{\at}\verb,{text "\<forall>\<exists>"}, produces \<open>\<forall>\<exists>\<close>, according to the standard interpretation of these symbol
   (cf.\ \S\ref{sec:doc-prep-symbols}).  Thus we achieve consistent
   mathematical notation in both the formal and informal parts of the
   document very easily, independently of the term language of
--- a/src/Doc/Tutorial/Fun/fun0.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Fun/fun0.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -68,7 +68,7 @@
 text\<open>
 After a function~$f$ has been defined via \isacommand{fun},
 its defining equations (or variants derived from them) are available
-under the name $f$@{text".simps"} as theorems.
+under the name $f$\<open>.simps\<close> as theorems.
 For example, look (via \isacommand{thm}) at
 @{thm[source]sep.simps} and @{thm[source]sep1.simps} to see that they define
 the same function. What is more, those equations are automatically declared as
@@ -105,7 +105,7 @@
 simplification rules, just as with \isacommand{primrec}.
 In most cases this works fine, but there is a subtle
 problem that must be mentioned: simplification may not
-terminate because of automatic splitting of @{text "if"}.
+terminate because of automatic splitting of \<open>if\<close>.
 \index{*if expressions!splitting of}
 Let us look at an example:
 \<close>
@@ -120,10 +120,10 @@
 is proved automatically because it is already present as a lemma in
 HOL\@.  Thus the recursion equation becomes a simplification
 rule. Of course the equation is nonterminating if we are allowed to unfold
-the recursive call inside the @{text else} branch, which is why programming
+the recursive call inside the \<open>else\<close> branch, which is why programming
 languages and our simplifier don't do that. Unfortunately the simplifier does
 something else that leads to the same problem: it splits 
-each @{text "if"}-expression unless its
+each \<open>if\<close>-expression unless its
 condition simplifies to @{term True} or @{term False}.  For
 example, simplification reduces
 @{prop[display]"gcd m n = k"}
@@ -132,7 +132,7 @@
 where the condition cannot be reduced further, and splitting leads to
 @{prop[display]"(n=0 --> m=k) & (n ~= 0 --> gcd n (m mod n)=k)"}
 Since the recursive call @{term"gcd n (m mod n)"} is no longer protected by
-an @{text "if"}, it is unfolded again, which leads to an infinite chain of
+an \<open>if\<close>, it is unfolded again, which leads to an infinite chain of
 simplification steps. Fortunately, this problem can be avoided in many
 different ways.
 
@@ -140,10 +140,10 @@
 @{thm[source]if_split},
 as shown in \S\ref{sec:AutoCaseSplits}.  However, we do not recommend this
 approach: you will often have to invoke the rule explicitly when
-@{text "if"} is involved.
+\<open>if\<close> is involved.
 
 If possible, the definition should be given by pattern matching on the left
-rather than @{text "if"} on the right. In the case of @{term gcd} the
+rather than \<open>if\<close> on the right. In the case of @{term gcd} the
 following alternative definition suggests itself:
 \<close>
 
@@ -156,7 +156,7 @@
 @{prop"n ~= (0::nat)"}.  Unfortunately, not all conditionals can be
 expressed by pattern matching.
 
-A simple alternative is to replace @{text "if"} by @{text case}, 
+A simple alternative is to replace \<open>if\<close> by \<open>case\<close>, 
 which is also available for @{typ bool} and is not split automatically:
 \<close>
 
@@ -181,8 +181,7 @@
 done
 
 text\<open>\noindent
-Simplification terminates for these proofs because the condition of the @{text
-"if"} simplifies to @{term True} or @{term False}.
+Simplification terminates for these proofs because the condition of the \<open>if\<close> simplifies to @{term True} or @{term False}.
 Now we can disable the original simplification rule:
 \<close>
 
@@ -200,7 +199,7 @@
 again induction. But this time the structural form of induction that comes
 with datatypes is unlikely to work well --- otherwise we could have defined the
 function by \isacommand{primrec}. Therefore \isacommand{fun} automatically
-proves a suitable induction rule $f$@{text".induct"} that follows the
+proves a suitable induction rule $f$\<open>.induct\<close> that follows the
 recursion pattern of the particular function $f$. We call this
 \textbf{recursion induction}. Roughly speaking, it
 requires you to prove for each \isacommand{fun} equation that the property
@@ -236,7 +235,7 @@
 
 In general, the format of invoking recursion induction is
 \begin{quote}
-\isacommand{apply}@{text"(induct_tac"} $x@1 \dots x@n$ @{text"rule:"} $f$@{text".induct)"}
+\isacommand{apply}\<open>(induct_tac\<close> $x@1 \dots x@n$ \<open>rule:\<close> $f$\<open>.induct)\<close>
 \end{quote}\index{*induct_tac (method)}%
 where $x@1~\dots~x@n$ is a list of free variables in the subgoal and $f$ the
 name of a function that takes $n$ arguments. Usually the subgoal will
--- a/src/Doc/Tutorial/Ifexpr/Ifexpr.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Ifexpr/Ifexpr.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -31,7 +31,7 @@
 \subsubsection{The Value of a Boolean Expression}
 
 The value of a boolean expression depends on the value of its variables.
-Hence the function @{text"value"} takes an additional parameter, an
+Hence the function \<open>value\<close> takes an additional parameter, an
 \emph{environment} of type @{typ"nat => bool"}, which maps variables to their
 values:
 \<close>
@@ -139,7 +139,7 @@
 (*>*)
 text\<open>\noindent
 Note that the lemma does not have a name, but is implicitly used in the proof
-of the theorem shown above because of the @{text"[simp]"} attribute.
+of the theorem shown above because of the \<open>[simp]\<close> attribute.
 
 But how can we be sure that @{term"norm"} really produces a normal form in
 the above sense? We define a function that tests If-expressions for normality:
@@ -168,17 +168,17 @@
 
 text\<open>\medskip
 How do we come up with the required lemmas? Try to prove the main theorems
-without them and study carefully what @{text auto} leaves unproved. This 
+without them and study carefully what \<open>auto\<close> leaves unproved. This 
 can provide the clue.  The necessity of universal quantification
-(@{text"\<forall>t e"}) in the two lemmas is explained in
+(\<open>\<forall>t e\<close>) in the two lemmas is explained in
 \S\ref{sec:InductionHeuristics}
 
 \begin{exercise}
   We strengthen the definition of a @{const normal} If-expression as follows:
   the first argument of all @{term IF}s must be a variable. Adapt the above
   development to this changed requirement. (Hint: you may need to formulate
-  some of the goals as implications (@{text"\<longrightarrow>"}) rather than
-  equalities (@{text"="}).)
+  some of the goals as implications (\<open>\<longrightarrow>\<close>) rather than
+  equalities (\<open>=\<close>).)
 \end{exercise}
 \index{boolean expressions example|)}
 \<close>
--- a/src/Doc/Tutorial/Inductive/AB.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/AB.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -70,9 +70,9 @@
 
 txt\<open>\noindent
 These propositions are expressed with the help of the predefined @{term
-filter} function on lists, which has the convenient syntax @{text"[x\<leftarrow>xs. P
-x]"}, the list of all elements @{term x} in @{term xs} such that @{prop"P x"}
-holds. Remember that on lists @{text size} and @{text length} are synonymous.
+filter} function on lists, which has the convenient syntax \<open>[x\<leftarrow>xs. P
+x]\<close>, the list of all elements @{term x} in @{term xs} such that @{prop"P x"}
+holds. Remember that on lists \<open>size\<close> and \<open>length\<close> are synonymous.
 
 The proof itself is by rule induction and afterwards automatic:
 \<close>
@@ -99,7 +99,7 @@
 intermediate value theorem @{thm[source]nat0_intermed_int_val}
 @{thm[display,margin=60]nat0_intermed_int_val[no_vars]}
 where @{term f} is of type @{typ"nat \<Rightarrow> int"}, @{typ int} are the integers,
-@{text"\<bar>.\<bar>"} is the absolute value function\footnote{See
+\<open>\<bar>.\<bar>\<close> is the absolute value function\footnote{See
 Table~\ref{tab:ascii} in the Appendix for the correct \textsc{ascii}
 syntax.}, and @{term"1::int"} is the integer 1 (see \S\ref{sec:numbers}).
 
@@ -117,7 +117,7 @@
 
 txt\<open>\noindent
 The lemma is a bit hard to read because of the coercion function
-@{text"int :: nat \<Rightarrow> int"}. It is required because @{term size} returns
+\<open>int :: nat \<Rightarrow> int\<close>. It is required because @{term size} returns
 a natural number, but subtraction on type~@{typ nat} will do the wrong thing.
 Function @{term take} is predefined and @{term"take i xs"} is the prefix of
 length @{term i} of @{term xs}; below we also need @{term"drop i xs"}, which
@@ -141,7 +141,7 @@
   \<exists>i\<le>size w. size[x\<leftarrow>take i w. P x] = size[x\<leftarrow>take i w. \<not>P x]+1"
 
 txt\<open>\noindent
-This is proved by @{text force} with the help of the intermediate value theorem,
+This is proved by \<open>force\<close> with the help of the intermediate value theorem,
 instantiated appropriately and with its first premise disposed of by lemma
 @{thm[source]step1}:
 \<close>
@@ -202,11 +202,11 @@
 apply(rename_tac w)
 
 txt\<open>\noindent
-The @{text rule} parameter tells @{text induct_tac} explicitly which induction
+The \<open>rule\<close> parameter tells \<open>induct_tac\<close> explicitly which induction
 rule to use. For details see \S\ref{sec:complete-ind} below.
 In this case the result is that we may assume the lemma already
 holds for all words shorter than @{term w}. Because the induction step renames
-the induction variable we rename it back to @{text w}.
+the induction variable we rename it back to \<open>w\<close>.
 
 The proof continues with a case distinction on @{term w},
 on whether @{term w} is empty or not.
@@ -292,8 +292,8 @@
 separating the two directions, they perform one induction on the
 length of a word. This deprives them of the beauty of rule induction,
 and in the easy direction (correctness) their reasoning is more
-detailed than our @{text auto}. For the hard part (completeness), they
-consider just one of the cases that our @{text simp_all} disposes of
+detailed than our \<open>auto\<close>. For the hard part (completeness), they
+consider just one of the cases that our \<open>simp_all\<close> disposes of
 automatically. Then they conclude the proof by saying about the
 remaining cases: ``We do this in a manner similar to our method of
 proof for part (1); this part is left to the reader''. But this is
--- a/src/Doc/Tutorial/Inductive/Advanced.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/Advanced.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -21,7 +21,7 @@
 terms}: terms constructed from constant and function 
 symbols but not variables. To simplify matters further, we regard a
 constant as a function applied to the null argument  list.  Let us declare a
-datatype @{text gterm} for the type of ground  terms. It is a type constructor
+datatype \<open>gterm\<close> for the type of ground  terms. It is a type constructor
 whose argument is a type of  function symbols. 
 \<close>
 
@@ -39,22 +39,22 @@
 Now the type @{typ "integer_op gterm"} denotes the ground 
 terms built over those symbols.
 
-The type constructor @{text gterm} can be generalized to a function 
+The type constructor \<open>gterm\<close> can be generalized to a function 
 over sets.  It returns 
-the set of ground terms that can be formed over a set @{text F} of function symbols. For
+the set of ground terms that can be formed over a set \<open>F\<close> of function symbols. For
 example,  we could consider the set of ground terms formed from the finite 
-set @{text "{Number 2, UnaryMinus, Plus}"}.
+set \<open>{Number 2, UnaryMinus, Plus}\<close>.
 
-This concept is inductive. If we have a list @{text args} of ground terms 
-over~@{text F} and a function symbol @{text f} in @{text F}, then we 
-can apply @{text f} to @{text args} to obtain another ground term. 
+This concept is inductive. If we have a list \<open>args\<close> of ground terms 
+over~\<open>F\<close> and a function symbol \<open>f\<close> in \<open>F\<close>, then we 
+can apply \<open>f\<close> to \<open>args\<close> to obtain another ground term. 
 The only difficulty is that the argument list may be of any length. Hitherto, 
 each rule in an inductive definition referred to the inductively 
 defined set a fixed number of times, typically once or twice. 
 A universal quantifier in the premise of the introduction rule 
-expresses that every element of @{text args} belongs
+expresses that every element of \<open>args\<close> belongs
 to our inductively defined set: is a ground term 
-over~@{text F}.  The function @{term set} denotes the set of elements in a given 
+over~\<open>F\<close>.  The function @{term set} denotes the set of elements in a given 
 list. 
 \<close>
 
@@ -85,22 +85,22 @@
 Intuitively, this theorem says that
 enlarging the set of function symbols enlarges the set of ground 
 terms. The proof is a trivial rule induction.
-First we use the @{text clarify} method to assume the existence of an element of
-@{term "gterms F"}.  (We could have used @{text "intro subsetI"}.)  We then
+First we use the \<open>clarify\<close> method to assume the existence of an element of
+@{term "gterms F"}.  (We could have used \<open>intro subsetI\<close>.)  We then
 apply rule induction. Here is the resulting subgoal:
 @{subgoals[display,indent=0]}
-The assumptions state that @{text f} belongs 
-to~@{text F}, which is included in~@{text G}, and that every element of the list @{text args} is
-a ground term over~@{text G}.  The @{text blast} method finds this chain of reasoning easily.  
+The assumptions state that \<open>f\<close> belongs 
+to~\<open>F\<close>, which is included in~\<open>G\<close>, and that every element of the list \<open>args\<close> is
+a ground term over~\<open>G\<close>.  The \<open>blast\<close> method finds this chain of reasoning easily.  
 \<close>
 (*<*)oops(*>*)
 text \<open>
 \begin{warn}
-Why do we call this function @{text gterms} instead 
-of @{text gterm}?  A constant may have the same name as a type.  However,
+Why do we call this function \<open>gterms\<close> instead 
+of \<open>gterm\<close>?  A constant may have the same name as a type.  However,
 name  clashes could arise in the theorems that Isabelle generates. 
-Our choice of names keeps @{text gterms.induct} separate from 
-@{text gterm.induct}.
+Our choice of names keeps \<open>gterms.induct\<close> separate from 
+\<open>gterm.induct\<close>.
 \end{warn}
 
 Call a term \textbf{well-formed} if each symbol occurring in it is applied
@@ -108,10 +108,10 @@
 \textbf{arity}.)  We can express well-formedness by
 generalizing the inductive definition of
 \isa{gterms}.
-Suppose we are given a function called @{text arity}, specifying the arities
-of all symbols.  In the inductive step, we have a list @{text args} of such
-terms and a function  symbol~@{text f}. If the length of the list matches the
-function's arity  then applying @{text f} to @{text args} yields a well-formed
+Suppose we are given a function called \<open>arity\<close>, specifying the arities
+of all symbols.  In the inductive step, we have a list \<open>args\<close> of such
+terms and a function  symbol~\<open>f\<close>. If the length of the list matches the
+function's arity  then applying \<open>f\<close> to \<open>args\<close> yields a well-formed
 term.
 \<close>
 
@@ -126,7 +126,7 @@
 text \<open>
 The inductive definition neatly captures the reasoning above.
 The universal quantification over the
-@{text set} of arguments expresses that all of them are well-formed.%
+\<open>set\<close> of arguments expresses that all of them are well-formed.%
 \index{quantifiers!and inductive definitions|)}
 \<close>
 
@@ -140,13 +140,13 @@
 change the  inductive definition above, replacing the
 quantifier by a use of the function @{term lists}. This
 function, from the Isabelle theory of lists, is analogous to the
-function @{term gterms} declared above: if @{text A} is a set then
+function @{term gterms} declared above: if \<open>A\<close> is a set then
 @{term "lists A"} is the set of lists whose elements belong to
 @{term A}.  
 
 In the inductive definition of well-formed terms, examine the one
-introduction rule.  The first premise states that @{text args} belongs to
-the @{text lists} of well-formed terms.  This formulation is more
+introduction rule.  The first premise states that \<open>args\<close> belongs to
+the \<open>lists\<close> of well-formed terms.  This formulation is more
 direct, if more obscure, than using a universal quantifier.
 \<close>
 
@@ -160,7 +160,7 @@
 monos lists_mono
 
 text \<open>
-We cite the theorem @{text lists_mono} to justify 
+We cite the theorem \<open>lists_mono\<close> to justify 
 using the function @{term lists}.%
 \footnote{This particular theorem is installed by default already, but we
 include the \isakeyword{monos} declaration in order to illustrate its syntax.}
@@ -215,7 +215,7 @@
 apply (erule well_formed_gterm.induct)
 (*>*)
 txt \<open>
-The @{text clarify} method gives
+The \<open>clarify\<close> method gives
 us an element of @{term "well_formed_gterm arity"} on which to perform 
 induction.  The resulting subgoal can be proved automatically:
 @{subgoals[display,indent=0]}
@@ -249,8 +249,8 @@
 \item @{term "args \<in> lists (well_formed_gterm' arity)"}
 \item @{term "args \<in> lists (well_formed_gterm arity)"}
 \end{trivlist}
-Invoking the rule @{text well_formed_gterm.step} completes the proof.  The
-call to @{text auto} does all this work.
+Invoking the rule \<open>well_formed_gterm.step\<close> completes the proof.  The
+call to \<open>auto\<close> does all this work.
 
 This example is typical of how monotone functions
 \index{monotone functions} can be used.  In particular, many of them
@@ -266,7 +266,7 @@
 text \<open>
 \index{rule inversion|(}%
 Does @{term gterms} distribute over intersection?  We have proved that this
-function is monotone, so @{text mono_Int} gives one of the inclusions.  The
+function is monotone, so \<open>mono_Int\<close> gives one of the inclusions.  The
 opposite inclusion asserts that if @{term t} is a ground term over both of the
 sets
 @{term F} and~@{term G} then it is also a ground term over their intersection,
@@ -292,7 +292,7 @@
 No cases are discarded (there was only one to begin
 with) but the rule applies specifically to the pattern @{term "Apply f args"}.
 It can be applied repeatedly as an elimination rule without looping, so we
-have given the @{text "elim!"} attribute. 
+have given the \<open>elim!\<close> attribute. 
 
 Now we can prove the other half of that distributive law.
 \<close>
@@ -311,12 +311,12 @@
 @{term gterms}, which leaves a single subgoal:  
 @{subgoals[display,indent=0,margin=65]}
 To prove this, we assume @{term "Apply f args \<in> gterms G"}.  Rule inversion,
-in the form of @{text gterm_Apply_elim}, infers
+in the form of \<open>gterm_Apply_elim\<close>, infers
 that every element of @{term args} belongs to 
 @{term "gterms G"}; hence (by the induction hypothesis) it belongs
 to @{term "gterms (F \<inter> G)"}.  Rule inversion also yields
 @{term "f \<in> G"} and hence @{term "f \<in> F \<inter> G"}. 
-All of this reasoning is done by @{text blast}.
+All of this reasoning is done by \<open>blast\<close>.
 
 \smallskip
 Our distributive law is a trivial consequence of previously-proved results:
--- a/src/Doc/Tutorial/Inductive/Even.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/Even.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -8,7 +8,7 @@
 \index{even numbers!defining inductively|(}%
 The set of even numbers can be inductively defined as the least set
 containing 0 and closed under the operation $+2$.  Obviously,
-\emph{even} can also be expressed using the divides relation (@{text dvd}). 
+\emph{even} can also be expressed using the divides relation (\<open>dvd\<close>). 
 We shall prove below that the two formulations coincide.  On the way we
 shall examine the primary means of reasoning about inductively defined
 sets: rule induction.
@@ -17,7 +17,7 @@
 subsection\<open>Making an Inductive Definition\<close>
 
 text \<open>
-Using \commdx{inductive\protect\_set}, we declare the constant @{text even} to be
+Using \commdx{inductive\protect\_set}, we declare the constant \<open>even\<close> to be
 a set of natural numbers with the desired properties.
 \<close>
 
@@ -42,7 +42,7 @@
 \index{intro"!@\isa {intro"!} (attribute)}
 directing the classical reasoner to 
 apply them aggressively. Obviously, regarding 0 as even is safe.  The
-@{text step} rule is also safe because $n+2$ is even if and only if $n$ is
+\<open>step\<close> rule is also safe because $n+2$ is even if and only if $n$ is
 even.  We prove this equivalence later.
 \<close>
 
@@ -65,16 +65,16 @@
 (*>*)
 txt \<open>
 \noindent
-The first step is induction on the natural number @{text k}, which leaves
+The first step is induction on the natural number \<open>k\<close>, which leaves
 two subgoals:
 @{subgoals[display,indent=0,margin=65]}
-Here @{text auto} simplifies both subgoals so that they match the introduction
+Here \<open>auto\<close> simplifies both subgoals so that they match the introduction
 rules, which are then applied automatically.
 
 Our ultimate goal is to prove the equivalence between the traditional
-definition of @{text even} (using the divides relation) and our inductive
+definition of \<open>even\<close> (using the divides relation) and our inductive
 definition.  One direction of this equivalence is immediate by the lemma
-just proved, whose @{text "intro!"} attribute ensures it is applied automatically.
+just proved, whose \<open>intro!\<close> attribute ensures it is applied automatically.
 \<close>
 (*<*)oops(*>*)
 lemma dvd_imp_even: "2 dvd n \<Longrightarrow> n \<in> even"
@@ -89,7 +89,7 @@
 generated an induction rule:
 @{named_thms [display,indent=0,margin=40] even.induct [no_vars] (even.induct)}
 A property @{term P} holds for every even number provided it
-holds for~@{text 0} and is closed under the operation
+holds for~\<open>0\<close> and is closed under the operation
 \isa{Suc(Suc \(\cdot\))}.  Then @{term P} is closed under the introduction
 rules for @{term even}, which is the least set closed under those rules. 
 This type of inductive argument is called \textbf{rule induction}. 
@@ -97,7 +97,7 @@
 Apart from the double application of @{term Suc}, the induction rule above
 resembles the familiar mathematical induction, which indeed is an instance
 of rule induction; the natural numbers can be defined inductively to be
-the least set containing @{text 0} and closed under~@{term Suc}.
+the least set containing \<open>0\<close> and closed under~@{term Suc}.
 
 Induction is the usual way of proving a property of the elements of an
 inductively defined set.  Let us prove that all members of the set
@@ -106,34 +106,34 @@
 
 lemma even_imp_dvd: "n \<in> even \<Longrightarrow> 2 dvd n"
 txt \<open>
-We begin by applying induction.  Note that @{text even.induct} has the form
-of an elimination rule, so we use the method @{text erule}.  We get two
+We begin by applying induction.  Note that \<open>even.induct\<close> has the form
+of an elimination rule, so we use the method \<open>erule\<close>.  We get two
 subgoals:
 \<close>
 apply (erule even.induct)
 txt \<open>
 @{subgoals[display,indent=0]}
-We unfold the definition of @{text dvd} in both subgoals, proving the first
+We unfold the definition of \<open>dvd\<close> in both subgoals, proving the first
 one and simplifying the second:
 \<close>
 apply (simp_all add: dvd_def)
 txt \<open>
 @{subgoals[display,indent=0]}
 The next command eliminates the existential quantifier from the assumption
-and replaces @{text n} by @{text "2 * k"}.
+and replaces \<open>n\<close> by \<open>2 * k\<close>.
 \<close>
 apply clarify
 txt \<open>
 @{subgoals[display,indent=0]}
 To conclude, we tell Isabelle that the desired value is
-@{term "Suc k"}.  With this hint, the subgoal falls to @{text simp}.
+@{term "Suc k"}.  With this hint, the subgoal falls to \<open>simp\<close>.
 \<close>
 apply (rule_tac x = "Suc k" in exI, simp)
 (*<*)done(*>*)
 
 text \<open>
 Combining the previous two results yields our objective, the
-equivalence relating @{term even} and @{text dvd}. 
+equivalence relating @{term even} and \<open>dvd\<close>. 
 %
 %we don't want [iff]: discuss?
 \<close>
@@ -163,7 +163,7 @@
 txt \<open>
 Rule induction finds no occurrences of @{term "Suc(Suc n)"} in the
 conclusion, which it therefore leaves unchanged.  (Look at
-@{text even.induct} to see why this happens.)  We have these subgoals:
+\<open>even.induct\<close> to see why this happens.)  We have these subgoals:
 @{subgoals[display,indent=0]}
 The first one is hopeless.  Rule induction on
 a non-variable term discards information, and usually fails.
@@ -184,7 +184,7 @@
 txt \<open>
 This lemma is trivially inductive.  Here are the subgoals:
 @{subgoals[display,indent=0]}
-The first is trivial because @{text "0 - 2"} simplifies to @{text 0}, which is
+The first is trivial because \<open>0 - 2\<close> simplifies to \<open>0\<close>, which is
 even.  The second is trivial too: @{term "Suc (Suc n) - 2"} simplifies to
 @{term n}, matching the assumption.%
 \index{rule induction|)}  %the sequel isn't really about induction
@@ -197,7 +197,7 @@
 by (drule even_imp_even_minus_2, simp)
 
 text \<open>
-We have just proved the converse of the introduction rule @{text even.step}.
+We have just proved the converse of the introduction rule \<open>even.step\<close>.
 This suggests proving the following equivalence.  We give it the
 \attrdx{iff} attribute because of its obvious value for simplification.
 \<close>
@@ -221,7 +221,7 @@
 Minimality means that @{term even} contains only the elements that these
 rules force it to contain.  If we are told that @{term a}
 belongs to
-@{term even} then there are only two possibilities.  Either @{term a} is @{text 0}
+@{term even} then there are only two possibilities.  Either @{term a} is \<open>0\<close>
 or else @{term a} has the form @{term "Suc(Suc n)"}, for some suitable @{term n}
 that belongs to
 @{term even}.  That is the gist of the @{term cases} rule, which Isabelle proves
@@ -238,29 +238,29 @@
 
 text \<open>
 The \commdx{inductive\protect\_cases} command generates an instance of
-the @{text cases} rule for the supplied pattern and gives it the supplied name:
+the \<open>cases\<close> rule for the supplied pattern and gives it the supplied name:
 @{named_thms [display,indent=0] Suc_Suc_cases [no_vars] (Suc_Suc_cases)}
-Applying this as an elimination rule yields one case where @{text even.cases}
+Applying this as an elimination rule yields one case where \<open>even.cases\<close>
 would yield two.  Rule inversion works well when the conclusions of the
-introduction rules involve datatype constructors like @{term Suc} and @{text "#"}
+introduction rules involve datatype constructors like @{term Suc} and \<open>#\<close>
 (list ``cons''); freeness reasoning discards all but one or two cases.
 
 In the \isacommand{inductive\_cases} command we supplied an
-attribute, @{text "elim!"},
+attribute, \<open>elim!\<close>,
 \index{elim"!@\isa {elim"!} (attribute)}%
 indicating that this elimination rule can be
 applied aggressively.  The original
 @{term cases} rule would loop if used in that manner because the
 pattern~@{term a} matches everything.
 
-The rule @{text Suc_Suc_cases} is equivalent to the following implication:
+The rule \<open>Suc_Suc_cases\<close> is equivalent to the following implication:
 @{term [display,indent=0] "Suc (Suc n) \<in> even \<Longrightarrow> n \<in> even"}
 Just above we devoted some effort to reaching precisely
 this result.  Yet we could have obtained it by a one-line declaration,
-dispensing with the lemma @{text even_imp_even_minus_2}. 
+dispensing with the lemma \<open>even_imp_even_minus_2\<close>. 
 This example also justifies the terminology
 \textbf{rule inversion}: the new rule inverts the introduction rule
-@{text even.step}.  In general, a rule can be inverted when the set of elements
+\<open>even.step\<close>.  In general, a rule can be inverted when the set of elements
 it introduces is disjoint from those of the other introduction rules.
 
 For one-off applications of rule inversion, use the \methdx{ind_cases} method. 
@@ -272,13 +272,13 @@
 (*<*)oops(*>*)
 
 text \<open>
-The specified instance of the @{text cases} rule is generated, then applied
+The specified instance of the \<open>cases\<close> rule is generated, then applied
 as an elimination rule.
 
-To summarize, every inductive definition produces a @{text cases} rule.  The
+To summarize, every inductive definition produces a \<open>cases\<close> rule.  The
 \commdx{inductive\protect\_cases} command stores an instance of the
-@{text cases} rule for a given pattern.  Within a proof, the
-@{text ind_cases} method applies an instance of the @{text cases}
+\<open>cases\<close> rule for a given pattern.  Within a proof, the
+\<open>ind_cases\<close> method applies an instance of the \<open>cases\<close>
 rule.
 
 The even numbers example has shown how inductive definitions can be
--- a/src/Doc/Tutorial/Inductive/Mutual.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/Mutual.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -32,7 +32,7 @@
 
 txt\<open>\noindent
 The proof is by rule induction. Because of the form of the induction theorem,
-it is applied by @{text rule} rather than @{text erule} as for ordinary
+it is applied by \<open>rule\<close> rather than \<open>erule\<close> as for ordinary
 inductive definitions:
 \<close>
 
@@ -69,11 +69,11 @@
 you write \commdx{inductive} instead of \isacommand{inductive\_set} and
 @{prop"evn n"} instead of @{prop"n \<in> Even"}.
 When defining an n-ary relation as a predicate, it is recommended to curry
-the predicate: its type should be \mbox{@{text"\<tau>\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> \<tau>\<^sub>n \<Rightarrow> bool"}}
+the predicate: its type should be \mbox{\<open>\<tau>\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> \<tau>\<^sub>n \<Rightarrow> bool\<close>}
 rather than
-@{text"\<tau>\<^sub>1 \<times> \<dots> \<times> \<tau>\<^sub>n \<Rightarrow> bool"}. The curried version facilitates inductions.
+\<open>\<tau>\<^sub>1 \<times> \<dots> \<times> \<tau>\<^sub>n \<Rightarrow> bool\<close>. The curried version facilitates inductions.
 
-When should you choose sets and when predicates? If you intend to combine your notion with set theoretic notation, define it as an inductive set. If not, define it as an inductive predicate, thus avoiding the @{text"\<in>"} notation. But note that predicates of more than one argument cannot be combined with the usual set theoretic operators: @{term"P \<union> Q"} is not well-typed if @{text"P, Q :: \<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> bool"}, you have to write @{term"%x y. P x y & Q x y"} instead.
+When should you choose sets and when predicates? If you intend to combine your notion with set theoretic notation, define it as an inductive set. If not, define it as an inductive predicate, thus avoiding the \<open>\<in>\<close> notation. But note that predicates of more than one argument cannot be combined with the usual set theoretic operators: @{term"P \<union> Q"} is not well-typed if \<open>P, Q :: \<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> bool\<close>, you have to write @{term"%x y. P x y & Q x y"} instead.
 \index{inductive predicates|)}
 \<close>
 
--- a/src/Doc/Tutorial/Inductive/Star.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/Star.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -9,7 +9,7 @@
 Relations too can be defined inductively, since they are just sets of pairs.
 A perfect example is the function that maps a relation to its
 reflexive transitive closure.  This concept was already
-introduced in \S\ref{sec:Relations}, where the operator @{text"\<^sup>*"} was
+introduced in \S\ref{sec:Relations}, where the operator \<open>\<^sup>*\<close> was
 defined as a least fixed point because inductive definitions were not yet
 available. But now they are:
 \<close>
@@ -23,9 +23,9 @@
 
 text\<open>\noindent
 The function @{term rtc} is annotated with concrete syntax: instead of
-@{text"rtc r"} we can write @{term"r*"}. The actual definition
+\<open>rtc r\<close> we can write @{term"r*"}. The actual definition
 consists of two rules. Reflexivity is obvious and is immediately given the
-@{text iff} attribute to increase automation. The
+\<open>iff\<close> attribute to increase automation. The
 second rule, @{thm[source]rtc_step}, says that we can always add one more
 @{term r}-step to the left. Although we could make @{thm[source]rtc_step} an
 introduction rule, this is dangerous: the recursion in the second premise
@@ -47,16 +47,15 @@
 danger of killing the automatic tactics because @{term"r*"} occurs only in
 the conclusion and not in the premise. Thus some proofs that would otherwise
 need @{thm[source]rtc_step} can now be found automatically. The proof also
-shows that @{text blast} is able to handle @{thm[source]rtc_step}. But
-some of the other automatic tactics are more sensitive, and even @{text
-blast} can be lead astray in the presence of large numbers of rules.
+shows that \<open>blast\<close> is able to handle @{thm[source]rtc_step}. But
+some of the other automatic tactics are more sensitive, and even \<open>blast\<close> can be lead astray in the presence of large numbers of rules.
 
 To prove transitivity, we need rule induction, i.e.\ theorem
 @{thm[source]rtc.induct}:
 @{thm[display]rtc.induct}
-It says that @{text"?P"} holds for an arbitrary pair @{thm (prem 1) rtc.induct}
-if @{text"?P"} is preserved by all rules of the inductive definition,
-i.e.\ if @{text"?P"} holds for the conclusion provided it holds for the
+It says that \<open>?P\<close> holds for an arbitrary pair @{thm (prem 1) rtc.induct}
+if \<open>?P\<close> is preserved by all rules of the inductive definition,
+i.e.\ if \<open>?P\<close> holds for the conclusion provided it holds for the
 premises. In general, rule induction for an $n$-ary inductive relation $R$
 expects a premise of the form $(x@1,\dots,x@n) \in R$.
 
@@ -71,16 +70,16 @@
 @{subgoals[display,indent=0,goals_limit=1]}
 We have to abandon this proof attempt.
 To understand what is going on, let us look again at @{thm[source]rtc.induct}.
-In the above application of @{text erule}, the first premise of
+In the above application of \<open>erule\<close>, the first premise of
 @{thm[source]rtc.induct} is unified with the first suitable assumption, which
 is @{term"(x,y) \<in> r*"} rather than @{term"(y,z) \<in> r*"}. Although that
 is what we want, it is merely due to the order in which the assumptions occur
 in the subgoal, which it is not good practice to rely on. As a result,
-@{text"?xb"} becomes @{term x}, @{text"?xa"} becomes
-@{term y} and @{text"?P"} becomes @{term"\<lambda>u v. (u,z) \<in> r*"}, thus
+\<open>?xb\<close> becomes @{term x}, \<open>?xa\<close> becomes
+@{term y} and \<open>?P\<close> becomes @{term"\<lambda>u v. (u,z) \<in> r*"}, thus
 yielding the above subgoal. So what went wrong?
 
-When looking at the instantiation of @{text"?P"} we see that it does not
+When looking at the instantiation of \<open>?P\<close> we see that it does not
 depend on its second parameter at all. The reason is that in our original
 goal, of the pair @{term"(x,y)"} only @{term x} appears also in the
 conclusion, but not @{term y}. Thus our induction statement is too
@@ -98,8 +97,8 @@
 using $\longrightarrow$.
 \end{quote}
 A similar heuristic for other kinds of inductions is formulated in
-\S\ref{sec:ind-var-in-prems}. The @{text rule_format} directive turns
-@{text"\<longrightarrow>"} back into @{text"\<Longrightarrow>"}: in the end we obtain the original
+\S\ref{sec:ind-var-in-prems}. The \<open>rule_format\<close> directive turns
+\<open>\<longrightarrow>\<close> back into \<open>\<Longrightarrow>\<close>: in the end we obtain the original
 statement of our lemma.
 \<close>
 
--- a/src/Doc/Tutorial/Misc/AdvancedInd.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Misc/AdvancedInd.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -45,12 +45,12 @@
 heuristic applies to rule inductions; see \S\ref{sec:rtc}.}
 \begin{quote}
 \emph{Pull all occurrences of the induction variable into the conclusion
-using @{text"\<longrightarrow>"}.}
+using \<open>\<longrightarrow>\<close>.}
 \end{quote}
 Thus we should state the lemma as an ordinary 
-implication~(@{text"\<longrightarrow>"}), letting
+implication~(\<open>\<longrightarrow>\<close>), letting
 \attrdx{rule_format} (\S\ref{sec:forward}) convert the
-result to the usual @{text"\<Longrightarrow>"} form:
+result to the usual \<open>\<Longrightarrow>\<close> form:
 \<close>
 (*<*)oops(*>*)
 lemma hd_rev [rule_format]: "xs \<noteq> [] \<longrightarrow> hd(rev xs) = last xs"
@@ -61,15 +61,15 @@
 txt\<open>\noindent
 This time, induction leaves us with a trivial base case:
 @{subgoals[display,indent=0,goals_limit=1]}
-And @{text"auto"} completes the proof.
+And \<open>auto\<close> completes the proof.
 
 If there are multiple premises $A@1$, \dots, $A@n$ containing the
 induction variable, you should turn the conclusion $C$ into
 \[ A@1 \longrightarrow \cdots A@n \longrightarrow C. \]
 Additionally, you may also have to universally quantify some other variables,
-which can yield a fairly complex conclusion.  However, @{text rule_format} 
-can remove any number of occurrences of @{text"\<forall>"} and
-@{text"\<longrightarrow>"}.
+which can yield a fairly complex conclusion.  However, \<open>rule_format\<close> 
+can remove any number of occurrences of \<open>\<forall>\<close> and
+\<open>\<longrightarrow>\<close>.
 
 \index{induction!on a term}%
 A second reason why your proposition may not be amenable to induction is that
@@ -92,7 +92,7 @@
 For an example see \S\ref{sec:CTL-revisited} below.
 
 Of course, all premises that share free variables with $t$ need to be pulled into
-the conclusion as well, under the @{text"\<forall>"}, again using @{text"\<longrightarrow>"} as shown above.
+the conclusion as well, under the \<open>\<forall>\<close>, again using \<open>\<longrightarrow>\<close> as shown above.
 
 Readers who are puzzled by the form of statement
 (\ref{eqn:ind-over-term}) above should remember that the
@@ -138,7 +138,7 @@
 text\<open>
 \begin{warn}
 We discourage the use of axioms because of the danger of
-inconsistencies.  Axiom @{text f_ax} does
+inconsistencies.  Axiom \<open>f_ax\<close> does
 not introduce an inconsistency because, for example, the identity function
 satisfies it.  Axioms can be useful in exploratory developments, say when 
 you assume some well-known theorems so that you can quickly demonstrate some
@@ -163,7 +163,7 @@
 txt\<open>\noindent
 We get the following proof state:
 @{subgoals[display,indent=0,margin=65]}
-After stripping the @{text"\<forall>i"}, the proof continues with a case
+After stripping the \<open>\<forall>i\<close>, the proof continues with a case
 distinction on @{term"i"}. The case @{prop"i = (0::nat)"} is trivial and we focus on
 the other case:
 \<close>
@@ -208,7 +208,7 @@
 lemmas f_incr = f_incr_lem[rule_format, OF refl]
 
 text\<open>\noindent
-The final @{thm[source]refl} gets rid of the premise @{text"?k = f ?i"}. 
+The final @{thm[source]refl} gets rid of the premise \<open>?k = f ?i\<close>. 
 We could have included this derivation in the original statement of the lemma:
 \<close>
 
@@ -225,7 +225,7 @@
 whose conclusion is of the form ${?}P~?x@1 \dots ?x@n$, in which case the
 format is
 \begin{quote}
-\isacommand{apply}@{text"(induct_tac"} $y@1 \dots y@n$ @{text"rule:"} $r$@{text")"}
+\isacommand{apply}\<open>(induct_tac\<close> $y@1 \dots y@n$ \<open>rule:\<close> $r$\<open>)\<close>
 \end{quote}
 where $y@1, \dots, y@n$ are variables in the conclusion of the first subgoal.
 
@@ -279,7 +279,7 @@
 HOL already provides the mother of
 all inductions, well-founded induction (see \S\ref{sec:Well-founded}).  For
 example theorem @{thm[source]nat_less_induct} is
-a special case of @{thm[source]wf_induct} where @{term r} is @{text"<"} on
+a special case of @{thm[source]wf_induct} where @{term r} is \<open><\<close> on
 @{typ nat}. The details can be found in theory \isa{Wellfounded_Recursion}.
 \<close>
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Misc/Itrev.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Misc/Itrev.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -20,15 +20,15 @@
 \emph{Do induction on argument number $i$ if the function is defined by
 recursion in argument number $i$.}
 \end{quote}
-When we look at the proof of @{text"(xs@ys) @ zs = xs @ (ys@zs)"}
+When we look at the proof of \<open>(xs@ys) @ zs = xs @ (ys@zs)\<close>
 in \S\ref{sec:intro-proof} we find
 \begin{itemize}
-\item @{text"@"} is recursive in
+\item \<open>@\<close> is recursive in
 the first argument
 \item @{term xs}  occurs only as the first argument of
-@{text"@"}
+\<open>@\<close>
 \item both @{term ys} and @{term zs} occur at least once as
-the second argument of @{text"@"}
+the second argument of \<open>@\<close>
 \end{itemize}
 Hence it is natural to perform induction on~@{term xs}.
 
@@ -39,10 +39,10 @@
 step to go through. Let us illustrate the idea with an example.
 
 Function \cdx{rev} has quadratic worst-case running time
-because it calls function @{text"@"} for each element of the list and
-@{text"@"} is linear in its first argument.  A linear time version of
+because it calls function \<open>@\<close> for each element of the list and
+\<open>@\<close> is linear in its first argument.  A linear time version of
 @{term"rev"} reqires an extra argument where the result is accumulated
-gradually, using only~@{text"#"}:
+gradually, using only~\<open>#\<close>:
 \<close>
 
 primrec itrev :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where
--- a/src/Doc/Tutorial/Misc/Option2.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Misc/Option2.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -13,13 +13,13 @@
 
 text\<open>\noindent
 Frequently one needs to add a distinguished element to some existing type.
-For example, type @{text"t option"} can model the result of a computation that
+For example, type \<open>t option\<close> can model the result of a computation that
 may either terminate with an error (represented by @{const None}) or return
 some value @{term v} (represented by @{term"Some v"}).
 Similarly, @{typ nat} extended with $\infty$ can be modeled by type
 @{typ"nat option"}. In both cases one could define a new datatype with
 customized constructors like @{term Error} and @{term Infinity},
-but it is often simpler to use @{text option}. For an application see
+but it is often simpler to use \<open>option\<close>. For an application see
 \S\ref{sec:Trie}.
 \<close>
 (*<*)
--- a/src/Doc/Tutorial/Misc/Tree2.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Misc/Tree2.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -4,7 +4,7 @@
 
 text\<open>\noindent In Exercise~\ref{ex:Tree} we defined a function
 @{term"flatten"} from trees to lists. The straightforward version of
-@{term"flatten"} is based on @{text"@"} and is thus, like @{term"rev"},
+@{term"flatten"} is based on \<open>@\<close> and is thus, like @{term"rev"},
 quadratic. A linear time version of @{term"flatten"} again reqires an extra
 argument, the accumulator. Define\<close>
 (*<*)primrec(*>*)flatten2 :: "'a tree \<Rightarrow> 'a list \<Rightarrow> 'a list"(*<*)where
--- a/src/Doc/Tutorial/Misc/case_exprs.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Misc/case_exprs.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -16,12 +16,12 @@
 In general, case expressions are of the form
 \[
 \begin{array}{c}
-@{text"case"}~e~@{text"of"}\ pattern@1~@{text"\<Rightarrow>"}~e@1\ @{text"|"}\ \dots\
- @{text"|"}~pattern@m~@{text"\<Rightarrow>"}~e@m
+\<open>case\<close>~e~\<open>of\<close>\ pattern@1~\<open>\<Rightarrow>\<close>~e@1\ \<open>|\<close>\ \dots\
+ \<open>|\<close>~pattern@m~\<open>\<Rightarrow>\<close>~e@m
 \end{array}
 \]
 Like in functional programming, patterns are expressions consisting of
-datatype constructors (e.g. @{term"[]"} and @{text"#"})
+datatype constructors (e.g. @{term"[]"} and \<open>#\<close>)
 and variables, including the wildcard ``\verb$_$''.
 Not all cases need to be covered and the order of cases matters.
 However, one is well-advised not to wallow in complex patterns because
@@ -39,7 +39,7 @@
 \end{warn}
 
 \begin{warn}
-Like @{text"if"}, @{text"case"}-expressions may need to be enclosed in
+Like \<open>if\<close>, \<open>case\<close>-expressions may need to be enclosed in
 parentheses to indicate their scope.
 \end{warn}
 
@@ -73,11 +73,11 @@
   Induction is only allowed on free (or \isasymAnd-bound) variables that
   should not occur among the assumptions of the subgoal; see
   \S\ref{sec:ind-var-in-prems} for details. Case distinction
-  (@{text"case_tac"}) works for arbitrary terms, which need to be
-  quoted if they are non-atomic. However, apart from @{text"\<And>"}-bound
+  (\<open>case_tac\<close>) works for arbitrary terms, which need to be
+  quoted if they are non-atomic. However, apart from \<open>\<And>\<close>-bound
   variables, the terms must not contain variables that are bound outside.
   For example, given the goal @{prop"\<forall>xs. xs = [] \<or> (\<exists>y ys. xs = y#ys)"},
-  @{text"case_tac xs"} will not work as expected because Isabelle interprets
+  \<open>case_tac xs\<close> will not work as expected because Isabelle interprets
   the @{term xs} as a new free variable distinct from the bound
   @{term xs} in the goal.
 \end{warn}
--- a/src/Doc/Tutorial/Misc/natsum.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Misc/natsum.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -2,7 +2,7 @@
 theory natsum imports Main begin
 (*>*)
 text\<open>\noindent
-In particular, there are @{text"case"}-expressions, for example
+In particular, there are \<open>case\<close>-expressions, for example
 @{term[display]"case n of 0 => 0 | Suc m => m"}
 primitive recursion, for example
 \<close>
@@ -38,17 +38,17 @@
   \cdx{max}, \isadxboldpos{\isasymle}{$HOL2arithrel} and
   \isadxboldpos{<}{$HOL2arithrel} are overloaded: they are available
   not just for natural numbers but for other types as well.
-  For example, given the goal @{text"x + 0 = x"}, there is nothing to indicate
+  For example, given the goal \<open>x + 0 = x\<close>, there is nothing to indicate
   that you are talking about natural numbers. Hence Isabelle can only infer
-  that @{term x} is of some arbitrary type where @{text 0} and @{text"+"} are
+  that @{term x} is of some arbitrary type where \<open>0\<close> and \<open>+\<close> are
   declared. As a consequence, you will be unable to prove the
   goal. To alert you to such pitfalls, Isabelle flags numerals without a
   fixed type in its output: @{prop"x+0 = x"}. (In the absence of a numeral,
   it may take you some time to realize what has happened if \pgmenu{Show
   Types} is not set).  In this particular example, you need to include
-  an explicit type constraint, for example @{text"x+0 = (x::nat)"}. If there
+  an explicit type constraint, for example \<open>x+0 = (x::nat)\<close>. If there
   is enough contextual information this may not be necessary: @{prop"Suc x =
-  x"} automatically implies @{text"x::nat"} because @{term Suc} is not
+  x"} automatically implies \<open>x::nat\<close> because @{term Suc} is not
   overloaded.
 
   For details on overloading see \S\ref{sec:overloading}.
@@ -57,20 +57,20 @@
 \end{warn}
 \begin{warn}
   The symbols \isadxboldpos{>}{$HOL2arithrel} and
-  \isadxboldpos{\isasymge}{$HOL2arithrel} are merely syntax: @{text"x > y"}
-  stands for @{prop"y < x"} and similary for @{text"\<ge>"} and
-  @{text"\<le>"}.
+  \isadxboldpos{\isasymge}{$HOL2arithrel} are merely syntax: \<open>x > y\<close>
+  stands for @{prop"y < x"} and similary for \<open>\<ge>\<close> and
+  \<open>\<le>\<close>.
 \end{warn}
 \begin{warn}
-  Constant @{text"1::nat"} is defined to equal @{term"Suc 0"}. This definition
+  Constant \<open>1::nat\<close> is defined to equal @{term"Suc 0"}. This definition
   (see \S\ref{sec:ConstDefinitions}) is unfolded automatically by some
-  tactics (like @{text auto}, @{text simp} and @{text arith}) but not by
+  tactics (like \<open>auto\<close>, \<open>simp\<close> and \<open>arith\<close>) but not by
   others (especially the single step tactics in Chapter~\ref{chap:rules}).
   If you need the full set of numerals, see~\S\ref{sec:numerals}.
   \emph{Novices are advised to stick to @{term"0::nat"} and @{term Suc}.}
 \end{warn}
 
-Both @{text auto} and @{text simp}
+Both \<open>auto\<close> and \<open>simp\<close>
 (a method introduced below, \S\ref{sec:Simplification}) prove 
 simple arithmetic goals automatically:
 \<close>
@@ -81,7 +81,7 @@
 text\<open>\noindent
 For efficiency's sake, this built-in prover ignores quantified formulae,
 many logical connectives, and all arithmetic operations apart from addition.
-In consequence, @{text auto} and @{text simp} cannot prove this slightly more complex goal:
+In consequence, \<open>auto\<close> and \<open>simp\<close> cannot prove this slightly more complex goal:
 \<close>
 
 lemma "m \<noteq> (n::nat) \<Longrightarrow> m < n \<or> n < m"
@@ -89,10 +89,10 @@
 
 text\<open>\noindent The method \methdx{arith} is more general.  It attempts to
 prove the first subgoal provided it is a \textbf{linear arithmetic} formula.
-Such formulas may involve the usual logical connectives (@{text"\<not>"},
-@{text"\<and>"}, @{text"\<or>"}, @{text"\<longrightarrow>"}, @{text"="},
-@{text"\<forall>"}, @{text"\<exists>"}), the relations @{text"="},
-@{text"\<le>"} and @{text"<"}, and the operations @{text"+"}, @{text"-"},
+Such formulas may involve the usual logical connectives (\<open>\<not>\<close>,
+\<open>\<and>\<close>, \<open>\<or>\<close>, \<open>\<longrightarrow>\<close>, \<open>=\<close>,
+\<open>\<forall>\<close>, \<open>\<exists>\<close>), the relations \<open>=\<close>,
+\<open>\<le>\<close> and \<open><\<close>, and the operations \<open>+\<close>, \<open>-\<close>,
 @{term min} and @{term max}.  For example,\<close>
 
 lemma "min i (max j (k*k)) = max (min (k*k) i) (min i (j::nat))"
@@ -107,19 +107,19 @@
 (*<*)oops(*>*)
 
 text\<open>\noindent
-is not proved by @{text arith} because the proof relies 
+is not proved by \<open>arith\<close> because the proof relies 
 on properties of multiplication. Only multiplication by numerals (which is
 the same as iterated addition) is taken into account.
 
-\begin{warn} The running time of @{text arith} is exponential in the number
+\begin{warn} The running time of \<open>arith\<close> is exponential in the number
   of occurrences of \ttindexboldpos{-}{$HOL2arithfun}, \cdx{min} and
   \cdx{max} because they are first eliminated by case distinctions.
 
-If @{text k} is a numeral, \sdx{div}~@{text k}, \sdx{mod}~@{text k} and
-@{text k}~\sdx{dvd} are also supported, where the former two are eliminated
+If \<open>k\<close> is a numeral, \sdx{div}~\<open>k\<close>, \sdx{mod}~\<open>k\<close> and
+\<open>k\<close>~\sdx{dvd} are also supported, where the former two are eliminated
 by case distinctions, again blowing up the running time.
 
-If the formula involves quantifiers, @{text arith} may take
+If the formula involves quantifiers, \<open>arith\<close> may take
 super-exponential time and space.
 \end{warn}
 \<close>
--- a/src/Doc/Tutorial/Misc/pairs2.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Misc/pairs2.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -20,7 +20,7 @@
 as a degenerate product with 0 components.
 \item
 Products, like type @{typ nat}, are datatypes, which means
-in particular that @{text induct_tac} and @{text case_tac} are applicable to
+in particular that \<open>induct_tac\<close> and \<open>case_tac\<close> are applicable to
 terms of product type.
 Both split the term into a number of variables corresponding to the tuple structure
 (up to 7 components).
--- a/src/Doc/Tutorial/Misc/prime_def.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Misc/prime_def.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -6,7 +6,7 @@
 \begin{warn}
 A common mistake when writing definitions is to introduce extra free
 variables on the right-hand side.  Consider the following, flawed definition
-(where @{text"dvd"} means ``divides''):
+(where \<open>dvd\<close> means ``divides''):
 @{term[display,quotes]"prime(p) \<equiv> 1 < p \<and> (m dvd p \<longrightarrow> (m=1 \<or> m=p))"}
 \par\noindent\hangindent=0pt
 Isabelle rejects this ``definition'' because of the extra @{term"m"} on the
--- a/src/Doc/Tutorial/Misc/simp.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Misc/simp.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -6,7 +6,7 @@
 
 text\<open>\index{simplification rules}
 To facilitate simplification,  
-the attribute @{text"[simp]"}\index{*simp (attribute)}
+the attribute \<open>[simp]\<close>\index{*simp (attribute)}
 declares theorems to be simplification rules, which the simplifier
 will use automatically.  In addition, \isacommand{datatype} and
 \isacommand{primrec} declarations (and a few others) 
@@ -23,8 +23,8 @@
 The simplification attribute of theorems can be turned on and off:%
 \index{*simp del (attribute)}
 \begin{quote}
-\isacommand{declare} \textit{theorem-name}@{text"[simp]"}\\
-\isacommand{declare} \textit{theorem-name}@{text"[simp del]"}
+\isacommand{declare} \textit{theorem-name}\<open>[simp]\<close>\\
+\isacommand{declare} \textit{theorem-name}\<open>[simp del]\<close>
 \end{quote}
 Only equations that really simplify, like \isa{rev\
 {\isacharparenleft}rev\ xs{\isacharparenright}\ {\isacharequal}\ xs} and
@@ -56,15 +56,15 @@
 text\<open>\index{*simp (method)|bold}
 The general format of the simplification method is
 \begin{quote}
-@{text simp} \textit{list of modifiers}
+\<open>simp\<close> \textit{list of modifiers}
 \end{quote}
 where the list of \emph{modifiers} fine tunes the behaviour and may
 be empty. Specific modifiers are discussed below.  Most if not all of the
 proofs seen so far could have been performed
-with @{text simp} instead of \isa{auto}, except that @{text simp} attacks
+with \<open>simp\<close> instead of \isa{auto}, except that \<open>simp\<close> attacks
 only the first subgoal and may thus need to be repeated --- use
 \methdx{simp_all} to simplify all subgoals.
-If nothing changes, @{text simp} fails.
+If nothing changes, \<open>simp\<close> fails.
 \<close>
 
 subsection\<open>Adding and Deleting Simplification Rules\<close>
@@ -76,17 +76,17 @@
 the set of simplification rules used in a simplification step by adding rules
 to it and/or deleting rules from it. The two modifiers for this are
 \begin{quote}
-@{text"add:"} \textit{list of theorem names}\index{*add (modifier)}\\
-@{text"del:"} \textit{list of theorem names}\index{*del (modifier)}
+\<open>add:\<close> \textit{list of theorem names}\index{*add (modifier)}\\
+\<open>del:\<close> \textit{list of theorem names}\index{*del (modifier)}
 \end{quote}
 Or you can use a specific list of theorems and omit all others:
 \begin{quote}
-@{text"only:"} \textit{list of theorem names}\index{*only (modifier)}
+\<open>only:\<close> \textit{list of theorem names}\index{*only (modifier)}
 \end{quote}
 In this example, we invoke the simplifier, adding two distributive
 laws:
 \begin{quote}
-\isacommand{apply}@{text"(simp add: mod_mult_distrib add_mult_distrib)"}
+\isacommand{apply}\<open>(simp add: mod_mult_distrib add_mult_distrib)\<close>
 \end{quote}
 \<close>
 
@@ -112,7 +112,7 @@
 lemma "\<forall>x. f x = g (f (g x)) \<Longrightarrow> f [] = f [] @ []"
 
 txt\<open>\noindent
-An unmodified application of @{text"simp"} loops.  The culprit is the
+An unmodified application of \<open>simp\<close> loops.  The culprit is the
 simplification rule @{term"f x = g (f (g x))"}, which is extracted from
 the assumption.  (Isabelle notices certain simple forms of
 nontermination but not this one.)  The problem can be circumvented by
@@ -125,12 +125,12 @@
 text\<open>\noindent
 Three modifiers influence the treatment of assumptions:
 \begin{description}
-\item[@{text"(no_asm)"}]\index{*no_asm (modifier)}
+\item[\<open>(no_asm)\<close>]\index{*no_asm (modifier)}
  means that assumptions are completely ignored.
-\item[@{text"(no_asm_simp)"}]\index{*no_asm_simp (modifier)}
+\item[\<open>(no_asm_simp)\<close>]\index{*no_asm_simp (modifier)}
  means that the assumptions are not simplified but
   are used in the simplification of the conclusion.
-\item[@{text"(no_asm_use)"}]\index{*no_asm_use (modifier)}
+\item[\<open>(no_asm_use)\<close>]\index{*no_asm_use (modifier)}
  means that the assumptions are simplified but are not
   used in the simplification of each other or the conclusion.
 \end{description}
@@ -205,9 +205,9 @@
 
 text\<open>\index{simplification!of \isa{let}-expressions}\index{*let expressions}%
 Proving a goal containing \isa{let}-expressions almost invariably requires the
-@{text"let"}-con\-structs to be expanded at some point. Since
-@{text"let"}\ldots\isa{=}\ldots@{text"in"}{\ldots} is just syntactic sugar for
-the predefined constant @{term"Let"}, expanding @{text"let"}-constructs
+\<open>let\<close>-con\-structs to be expanded at some point. Since
+\<open>let\<close>\ldots\isa{=}\ldots\<open>in\<close>{\ldots} is just syntactic sugar for
+the predefined constant @{term"Let"}, expanding \<open>let\<close>-constructs
 means rewriting with \tdx{Let_def}:\<close>
 
 lemma "(let xs = [] in xs@ys@xs) = ys"
@@ -216,7 +216,7 @@
 
 text\<open>
 If, in a particular context, there is no danger of a combinatorial explosion
-of nested @{text"let"}s, you could even simplify with @{thm[source]Let_def} by
+of nested \<open>let\<close>s, you could even simplify with @{thm[source]Let_def} by
 default:
 \<close>
 declare Let_def [simp]
@@ -257,7 +257,7 @@
 subsection\<open>Automatic Case Splits\<close>
 
 text\<open>\label{sec:AutoCaseSplits}\indexbold{case splits}%
-Goals containing @{text"if"}-expressions\index{*if expressions!splitting of}
+Goals containing \<open>if\<close>-expressions\index{*if expressions!splitting of}
 are usually proved by case
 distinction on the boolean condition.  Here is an example:
 \<close>
@@ -273,12 +273,12 @@
 txt\<open>\noindent
 @{subgoals[display,indent=0]}
 where \tdx{if_split} is a theorem that expresses splitting of
-@{text"if"}s. Because
-splitting the @{text"if"}s is usually the right proof strategy, the
-simplifier does it automatically.  Try \isacommand{apply}@{text"(simp)"}
+\<open>if\<close>s. Because
+splitting the \<open>if\<close>s is usually the right proof strategy, the
+simplifier does it automatically.  Try \isacommand{apply}\<open>(simp)\<close>
 on the initial goal above.
 
-This splitting idea generalizes from @{text"if"} to \sdx{case}.
+This splitting idea generalizes from \<open>if\<close> to \sdx{case}.
 Let us simplify a case analysis over lists:\index{*list.split (theorem)}
 \<close>(*<*)by simp(*>*)
 lemma "(case xs of [] \<Rightarrow> zs | y#ys \<Rightarrow> y#(ys@zs)) = xs@zs"
@@ -287,10 +287,10 @@
 txt\<open>
 @{subgoals[display,indent=0]}
 The simplifier does not split
-@{text"case"}-expressions, as it does @{text"if"}-expressions, 
+\<open>case\<close>-expressions, as it does \<open>if\<close>-expressions, 
 because with recursive datatypes it could lead to nontermination.
 Instead, the simplifier has a modifier
-@{text split}\index{*split (modifier)} 
+\<open>split\<close>\index{*split (modifier)} 
 for adding splitting rules explicitly.  The
 lemma above can be proved in one step by
 \<close>
@@ -300,17 +300,17 @@
 apply(simp split: list.split)
 (*<*)done(*>*)
 text\<open>\noindent
-whereas \isacommand{apply}@{text"(simp)"} alone will not succeed.
+whereas \isacommand{apply}\<open>(simp)\<close> alone will not succeed.
 
 Every datatype $t$ comes with a theorem
-$t$@{text".split"} which can be declared to be a \bfindex{split rule} either
+$t$\<open>.split\<close> which can be declared to be a \bfindex{split rule} either
 locally as above, or by giving it the \attrdx{split} attribute globally:
 \<close>
 
 declare list.split [split]
 
 text\<open>\noindent
-The @{text"split"} attribute can be removed with the @{text"del"} modifier,
+The \<open>split\<close> attribute can be removed with the \<open>del\<close> modifier,
 either locally
 \<close>
 (*<*)
@@ -326,27 +326,27 @@
 declare list.split [split del]
 
 text\<open>
-Polished proofs typically perform splitting within @{text simp} rather than 
-invoking the @{text split} method.  However, if a goal contains
-several @{text "if"} and @{text case} expressions, 
-the @{text split} method can be
+Polished proofs typically perform splitting within \<open>simp\<close> rather than 
+invoking the \<open>split\<close> method.  However, if a goal contains
+several \<open>if\<close> and \<open>case\<close> expressions, 
+the \<open>split\<close> method can be
 helpful in selectively exploring the effects of splitting.
 
 The split rules shown above are intended to affect only the subgoal's
-conclusion.  If you want to split an @{text"if"} or @{text"case"}-expression
+conclusion.  If you want to split an \<open>if\<close> or \<open>case\<close>-expression
 in the assumptions, you have to apply \tdx{if_split_asm} or
-$t$@{text".split_asm"}:\<close>
+$t$\<open>.split_asm\<close>:\<close>
 
 lemma "if xs = [] then ys \<noteq> [] else ys = [] \<Longrightarrow> xs @ ys \<noteq> []"
 apply(split if_split_asm)
 
 txt\<open>\noindent
 Unlike splitting the conclusion, this step creates two
-separate subgoals, which here can be solved by @{text"simp_all"}:
+separate subgoals, which here can be solved by \<open>simp_all\<close>:
 @{subgoals[display,indent=0]}
 If you need to split both in the assumptions and the conclusion,
-use $t$@{text".splits"} which subsumes $t$@{text".split"} and
-$t$@{text".split_asm"}. Analogously, there is @{thm[source]if_splits}.
+use $t$\<open>.splits\<close> which subsumes $t$\<open>.split\<close> and
+$t$\<open>.split_asm\<close>. Analogously, there is @{thm[source]if_splits}.
 
 \begin{warn}
   The simplifier merely simplifies the condition of an 
@@ -476,7 +476,7 @@
 @{text[display]"_ * (_ + _) = \<dots>"}
 It only finds equations that can simplify the given pattern
 at the root, not somewhere inside: for example, equations of the form
-@{text"_ + _ = \<dots>"} do not match.
+\<open>_ + _ = \<dots>\<close> do not match.
 
 You may also search for theorems by name---you merely
 need to specify a substring. For example, you could search for all
@@ -502,7 +502,7 @@
 "_ + _"  -"_ - _"  -simp: "_ * (_ + _)"  name: assoc
 \end{ttbox}
 looks for theorems containing plus but not minus, and which do not simplify
-\mbox{@{text"_ * (_ + _)"}} at the root, and whose name contains \texttt{assoc}.
+\mbox{\<open>_ * (_ + _)\<close>} at the root, and whose name contains \texttt{assoc}.
 
 Further search criteria are explained in \S\ref{sec:find2}.
 
--- a/src/Doc/Tutorial/Misc/types.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Misc/types.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -14,7 +14,7 @@
 
 text\<open>\label{sec:ConstDefinitions}\indexbold{definitions}%
 Nonrecursive definitions can be made with the \commdx{definition}
-command, for example @{text nand} and @{text xor} gates
+command, for example \<open>nand\<close> and \<open>xor\<close> gates
 (based on type @{typ gate} above):
 \<close>
 
@@ -27,6 +27,6 @@
 Pattern-matching is not allowed: each definition must be of
 the form $f\,x@1\,\dots\,x@n~\isasymequiv~t$.
 Section~\ref{sec:Simp-with-Defs} explains how definitions are used
-in proofs. The default name of each definition is $f$@{text"_def"}, where
+in proofs. The default name of each definition is $f$\<open>_def\<close>, where
 $f$ is the name of the defined constant.\<close>
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Protocol/Event.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Protocol/Event.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -275,7 +275,7 @@
 done
 
 
-text\<open>For proving @{text new_keys_not_used}\<close>
+text\<open>For proving \<open>new_keys_not_used\<close>\<close>
 lemma keysFor_parts_insert:
      "[| K \<in> keysFor (parts (insert X G));  X \<in> synth (analz H) |] 
       ==> K \<in> keysFor (parts (G \<union> H)) | Key (invKey K) \<in> parts H" 
@@ -350,29 +350,29 @@
 
 text \<open>
 The system's behaviour is formalized as a set of traces of
-\emph{events}.  The most important event, @{text "Says A B X"}, expresses
+\emph{events}.  The most important event, \<open>Says A B X\<close>, expresses
 $A\to B : X$, which is the attempt by~$A$ to send~$B$ the message~$X$.
 A trace is simply a list, constructed in reverse
-using~@{text "#"}.  Other event types include reception of messages (when
+using~\<open>#\<close>.  Other event types include reception of messages (when
 we want to make it explicit) and an agent's storing a fact.
 
 Sometimes the protocol requires an agent to generate a new nonce. The
 probability that a 20-byte random number has appeared before is effectively
 zero.  To formalize this important property, the set @{term "used evs"}
-denotes the set of all items mentioned in the trace~@{text evs}.
-The function @{text used} has a straightforward
-recursive definition.  Here is the case for @{text Says} event:
+denotes the set of all items mentioned in the trace~\<open>evs\<close>.
+The function \<open>used\<close> has a straightforward
+recursive definition.  Here is the case for \<open>Says\<close> event:
 @{thm [display,indent=5] used_Says [no_vars]}
 
-The function @{text knows} formalizes an agent's knowledge.  Mostly we only
+The function \<open>knows\<close> formalizes an agent's knowledge.  Mostly we only
 care about the spy's knowledge, and @{term "knows Spy evs"} is the set of items
-available to the spy in the trace~@{text evs}.  Already in the empty trace,
+available to the spy in the trace~\<open>evs\<close>.  Already in the empty trace,
 the spy starts with some secrets at his disposal, such as the private keys
-of compromised users.  After each @{text Says} event, the spy learns the
+of compromised users.  After each \<open>Says\<close> event, the spy learns the
 message that was sent:
 @{thm [display,indent=5] knows_Spy_Says [no_vars]}
 Combinations of functions express other important
-sets of messages derived from~@{text evs}:
+sets of messages derived from~\<open>evs\<close>:
 \begin{itemize}
 \item @{term "analz (knows Spy evs)"} is everything that the spy could
 learn by decryption
--- a/src/Doc/Tutorial/Protocol/Message.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Protocol/Message.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -20,15 +20,15 @@
 text \<open>
 All protocol specifications refer to a syntactic theory of messages. 
 Datatype
-@{text agent} introduces the constant @{text Server} (a trusted central
+\<open>agent\<close> introduces the constant \<open>Server\<close> (a trusted central
 machine, needed for some protocols), an infinite population of
-friendly agents, and the~@{text Spy}:
+friendly agents, and the~\<open>Spy\<close>:
 \<close>
 
 datatype agent = Server | Friend nat | Spy
 
 text \<open>
-Keys are just natural numbers.  Function @{text invKey} maps a public key to
+Keys are just natural numbers.  Function \<open>invKey\<close> maps a public key to
 the matching private key, and vice versa:
 \<close>
 
@@ -52,7 +52,7 @@
 
 text \<open>
 Datatype
-@{text msg} introduces the message forms, which include agent names, nonces,
+\<open>msg\<close> introduces the message forms, which include agent names, nonces,
 keys, compound messages, and encryptions.  
 \<close>
 
@@ -179,7 +179,7 @@
 declare MPair_parts [elim!]  parts.Body [dest!]
 text\<open>NB These two rules are UNSAFE in the formal sense, as they discard the
      compound message.  They work well on THIS FILE.  
-  @{text MPair_parts} is left as SAFE because it speeds up proofs.
+  \<open>MPair_parts\<close> is left as SAFE because it speeds up proofs.
   The Crypt rule is normally kept UNSAFE to avoid breaking up certificates.\<close>
 
 lemma parts_increasing: "H \<subseteq> parts(H)"
@@ -241,7 +241,7 @@
   NOTE: the UN versions are no longer used!\<close>
 
 
-text\<open>This allows @{text blast} to simplify occurrences of 
+text\<open>This allows \<open>blast\<close> to simplify occurrences of 
   @{term "parts(G\<union>H)"} in the assumption.\<close>
 lemmas in_parts_UnE = parts_Un [THEN equalityD1, THEN subsetD, THEN UnE] 
 declare in_parts_UnE [elim!]
@@ -344,9 +344,9 @@
 Thus he accumulates additional keys and nonces.  These he can use to
 compose new messages, which he may send to anybody.  
 
-Two functions enable us to formalize this behaviour: @{text analz} and
-@{text synth}.  Each function maps a sets of messages to another set of
-messages. The set @{text "analz H"} formalizes what the adversary can learn
+Two functions enable us to formalize this behaviour: \<open>analz\<close> and
+\<open>synth\<close>.  Each function maps a sets of messages to another set of
+messages. The set \<open>analz H\<close> formalizes what the adversary can learn
 from the set of messages~$H$.  The closure properties of this set are
 defined inductively.
 \<close>
@@ -483,8 +483,8 @@
 by (intro equalityI lemma1 lemma2)
 
 text\<open>Case analysis: either the message is secure, or it is not! Effective,
-but can cause subgoals to blow up! Use with @{text "if_split"}; apparently
-@{text "split_tac"} does not cope with patterns such as @{term"analz (insert
+but can cause subgoals to blow up! Use with \<open>if_split\<close>; apparently
+\<open>split_tac\<close> does not cope with patterns such as @{term"analz (insert
 (Crypt K X) H)"}\<close> 
 lemma analz_Crypt_if [simp]:
      "analz (insert (Crypt K X) H) =                 
@@ -577,13 +577,13 @@
 by (blast intro: analz_UN_analz_lemma analz_mono [THEN [2] rev_subsetD])
 (*>*)
 text \<open>
-Note the @{text Decrypt} rule: the spy can decrypt a
+Note the \<open>Decrypt\<close> rule: the spy can decrypt a
 message encrypted with key~$K$ if he has the matching key,~$K^{-1}$. 
 Properties proved by rule induction include the following:
 @{named_thms [display,indent=0] analz_mono [no_vars] (analz_mono) analz_idem [no_vars] (analz_idem)}
 
 The set of fake messages that an intruder could invent
-starting from~@{text H} is @{text "synth(analz H)"}, where @{text "synth H"}
+starting from~\<open>H\<close> is \<open>synth(analz H)\<close>, where \<open>synth H\<close>
 formalizes what the adversary can build from the set of messages~$H$.  
 \<close>
 
@@ -624,10 +624,10 @@
 elements of @{term "synth H"} can be combined, and an element can be encrypted
 using a key present in~$H$.
 
-Like @{text analz}, this set operator is monotone and idempotent.  It also
-satisfies an interesting equation involving @{text analz}:
+Like \<open>analz\<close>, this set operator is monotone and idempotent.  It also
+satisfies an interesting equation involving \<open>analz\<close>:
 @{named_thms [display,indent=0] analz_synth [no_vars] (analz_synth)}
-Rule inversion plays a major role in reasoning about @{text synth}, through
+Rule inversion plays a major role in reasoning about \<open>synth\<close>, through
 declarations such as this one:
 \<close>
 
@@ -639,16 +639,16 @@
 @{term "Nonce n \<in> synth H"} by @{term "Nonce n \<in> H"},
 expressing that a nonce cannot be guessed.  
 
-A third operator, @{text parts}, is useful for stating correctness
+A third operator, \<open>parts\<close>, is useful for stating correctness
 properties.  The set
 @{term "parts H"} consists of the components of elements of~$H$.  This set
-includes~@{text H} and is closed under the projections from a compound
+includes~\<open>H\<close> and is closed under the projections from a compound
 message to its immediate parts. 
-Its definition resembles that of @{text analz} except in the rule
-corresponding to the constructor @{text Crypt}: 
+Its definition resembles that of \<open>analz\<close> except in the rule
+corresponding to the constructor \<open>Crypt\<close>: 
 @{thm [display,indent=5] parts.Body [no_vars]}
 The body of an encrypted message is always regarded as part of it.  We can
-use @{text parts} to express general well-formedness properties of a protocol,
+use \<open>parts\<close> to express general well-formedness properties of a protocol,
 for example, that an uncompromised agent's private key will never be
 included as a component of any message.
 \<close>
@@ -780,7 +780,7 @@
 
 
 text\<open>Rewrites to push in Key and Crypt messages, so that other messages can
-    be pulled out using the @{text analz_insert} rules\<close>
+    be pulled out using the \<open>analz_insert\<close> rules\<close>
 
 lemmas pushKeys =
   insert_commute [of "Key K" "Agent C"]
@@ -796,7 +796,7 @@
   insert_commute [of "Crypt X K" "MPair X' Y"]
   for X K C N X' Y
 
-text\<open>Cannot be added with @{text "[simp]"} -- messages should not always be
+text\<open>Cannot be added with \<open>[simp]\<close> -- messages should not always be
   re-ordered.\<close>
 lemmas pushes = pushKeys pushCrypts
 
@@ -856,7 +856,7 @@
        DEPTH_SOLVE (atomic_spy_analz_tac ctxt 1)]) i);
 \<close>
 
-text\<open>By default only @{text o_apply} is built-in.  But in the presence of
+text\<open>By default only \<open>o_apply\<close> is built-in.  But in the presence of
 eta-expansion this means that some terms displayed as @{term "f o g"} will be
 rewritten, and others will not!\<close>
 declare o_def [simp]
@@ -879,7 +879,7 @@
 apply (rule synth_analz_mono, blast)   
 done
 
-text\<open>Two generalizations of @{text analz_insert_eq}\<close>
+text\<open>Two generalizations of \<open>analz_insert_eq\<close>\<close>
 lemma gen_analz_insert_eq [rule_format]:
      "X \<in> analz H \<Longrightarrow> \<forall>G. H \<subseteq> G \<longrightarrow> analz (insert X G) = analz G"
 by (blast intro: analz_cut analz_insertI analz_mono [THEN [2] rev_subsetD])
--- a/src/Doc/Tutorial/Protocol/NS_Public.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Protocol/NS_Public.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -58,27 +58,27 @@
 }
 
 Each protocol step is specified by a rule of an inductive definition.  An
-event trace has type @{text "event list"}, so we declare the constant
-@{text ns_public} to be a set of such traces.
+event trace has type \<open>event list\<close>, so we declare the constant
+\<open>ns_public\<close> to be a set of such traces.
 
 Figure~\ref{fig:ns_public} presents the inductive definition.  The
-@{text Nil} rule introduces the empty trace.  The @{text Fake} rule models the
+\<open>Nil\<close> rule introduces the empty trace.  The \<open>Fake\<close> rule models the
 adversary's sending a message built from components taken from past
-traffic, expressed using the functions @{text synth} and
-@{text analz}. 
+traffic, expressed using the functions \<open>synth\<close> and
+\<open>analz\<close>. 
 The next three rules model how honest agents would perform the three
 protocol steps.  
 
-Here is a detailed explanation of rule @{text NS2}.
+Here is a detailed explanation of rule \<open>NS2\<close>.
 A trace containing an event of the form
 @{term [display,indent=5] "Says A' B (Crypt (pubK B) \<lbrace>Nonce NA, Agent A\<rbrace>)"}
 may be extended by an event of the form
 @{term [display,indent=5] "Says B A (Crypt (pubK A) \<lbrace>Nonce NA, Nonce NB, Agent B\<rbrace>)"}
-where @{text NB} is a fresh nonce: @{term "Nonce NB \<notin> used evs2"}.
-Writing the sender as @{text A'} indicates that @{text B} does not 
-know who sent the message.  Calling the trace variable @{text evs2} rather
-than simply @{text evs} helps us know where we are in a proof after many
-case-splits: every subgoal mentioning @{text evs2} involves message~2 of the
+where \<open>NB\<close> is a fresh nonce: @{term "Nonce NB \<notin> used evs2"}.
+Writing the sender as \<open>A'\<close> indicates that \<open>B\<close> does not 
+know who sent the message.  Calling the trace variable \<open>evs2\<close> rather
+than simply \<open>evs\<close> helps us know where we are in a proof after many
+case-splits: every subgoal mentioning \<open>evs2\<close> involves message~2 of the
 protocol.
 
 Benefits of this approach are simplicity and clarity.  The semantic model
@@ -113,16 +113,16 @@
 Secrecy properties can be hard to prove.  The conclusion of a typical
 secrecy theorem is 
 @{term "X \<notin> analz (knows Spy evs)"}.  The difficulty arises from
-having to reason about @{text analz}, or less formally, showing that the spy
-can never learn~@{text X}.  Much easier is to prove that @{text X} can never
+having to reason about \<open>analz\<close>, or less formally, showing that the spy
+can never learn~\<open>X\<close>.  Much easier is to prove that \<open>X\<close> can never
 occur at all.  Such \emph{regularity} properties are typically expressed
-using @{text parts} rather than @{text analz}.
+using \<open>parts\<close> rather than \<open>analz\<close>.
 
-The following lemma states that @{text A}'s private key is potentially
-known to the spy if and only if @{text A} belongs to the set @{text bad} of
-compromised agents.  The statement uses @{text parts}: the very presence of
-@{text A}'s private key in a message, whether protected by encryption or
-not, is enough to confirm that @{text A} is compromised.  The proof, like
+The following lemma states that \<open>A\<close>'s private key is potentially
+known to the spy if and only if \<open>A\<close> belongs to the set \<open>bad\<close> of
+compromised agents.  The statement uses \<open>parts\<close>: the very presence of
+\<open>A\<close>'s private key in a message, whether protected by encryption or
+not, is enough to confirm that \<open>A\<close> is compromised.  The proof, like
 nearly all protocol proofs, is by induction over traces.
 \<close>
 
@@ -132,14 +132,14 @@
 apply (erule ns_public.induct, simp_all)
 txt \<open>
 The induction yields five subgoals, one for each rule in the definition of
-@{text ns_public}.  The idea is to prove that the protocol property holds initially
-(rule @{text Nil}), is preserved by each of the legitimate protocol steps (rules
-@{text NS1}--@{text 3}), and even is preserved in the face of anything the
-spy can do (rule @{text Fake}).  
+\<open>ns_public\<close>.  The idea is to prove that the protocol property holds initially
+(rule \<open>Nil\<close>), is preserved by each of the legitimate protocol steps (rules
+\<open>NS1\<close>--\<open>3\<close>), and even is preserved in the face of anything the
+spy can do (rule \<open>Fake\<close>).  
 
 The proof is trivial.  No legitimate protocol rule sends any keys
-at all, so only @{text Fake} is relevant. Indeed, simplification leaves
-only the @{text Fake} case, as indicated by the variable name @{text evsf}:
+at all, so only \<open>Fake\<close> is relevant. Indeed, simplification leaves
+only the \<open>Fake\<close> case, as indicated by the variable name \<open>evsf\<close>:
 @{subgoals[display,indent=0,margin=65]}
 \<close>
 by blast
@@ -150,7 +150,7 @@
 (*>*)
 
 text \<open>
-The @{text Fake} case is proved automatically.  If
+The \<open>Fake\<close> case is proved automatically.  If
 @{term "priK A"} is in the extended trace then either (1) it was already in the
 original trace or (2) it was
 generated by the spy, who must have known this key already. 
@@ -162,8 +162,8 @@
 it is known to the spy.  Intuitively, it holds because honest agents
 always choose fresh values as nonces; only the spy might reuse a value,
 and he doesn't know this particular value.  The proof script is short:
-induction, simplification, @{text blast}.  The first line uses the rule
-@{text rev_mp} to prepare the induction by moving two assumptions into the 
+induction, simplification, \<open>blast\<close>.  The first line uses the rule
+\<open>rev_mp\<close> to prepare the induction by moving two assumptions into the 
 induction formula.
 \<close>
 
@@ -277,7 +277,7 @@
   \<Longrightarrow> Nonce NB \<notin> analz (knows Spy evs)"
 txt \<open>
 To prove it, we must formulate the induction properly (one of the
-assumptions mentions~@{text evs}), apply induction, and simplify:
+assumptions mentions~\<open>evs\<close>), apply induction, and simplify:
 \<close>
 
 apply (erule rev_mp, erule ns_public.induct, simp_all)
@@ -299,14 +299,14 @@
 refers to a past instance of message~2, while this subgoal
 concerns message~1 being sent just now.
 In the Isabelle subgoal, instead of primed variables like $B'$ and $Na'$
-we have @{text Ba} and~@{text NAa}:
+we have \<open>Ba\<close> and~\<open>NAa\<close>:
 @{subgoals[display,indent=0]}
 The simplifier has used a 
 default simplification rule that does a case
 analysis for each encrypted message on whether or not the decryption key
 is compromised.
 @{named_thms [display,indent=0,margin=50] analz_Crypt_if [no_vars] (analz_Crypt_if)}
-The simplifier has also used @{text Spy_see_priK}, proved in
+The simplifier has also used \<open>Spy_see_priK\<close>, proved in
 {\S}\ref{sec:regularity} above, to yield @{term "Ba \<in> bad"}.
 
 Recall that this subgoal concerns the case
@@ -315,11 +315,11 @@
 This message can compromise $Nb$ only if $Nb=Na'$ and $B'$ is compromised,
 allowing the spy to decrypt the message.  The Isabelle subgoal says
 precisely this, if we allow for its choice of variable names.
-Proving @{term "NB \<noteq> NAa"} is easy: @{text NB} was
-sent earlier, while @{text NAa} is fresh; formally, we have
+Proving @{term "NB \<noteq> NAa"} is easy: \<open>NB\<close> was
+sent earlier, while \<open>NAa\<close> is fresh; formally, we have
 the assumption @{term "Nonce NAa \<notin> used evs1"}. 
 
-Note that our reasoning concerned @{text B}'s participation in another
+Note that our reasoning concerned \<open>B\<close>'s participation in another
 run.  Agents may engage in several runs concurrently, and some attacks work
 by interleaving the messages of two runs.  With model checking, this
 possibility can cause a state-space explosion, and for us it
@@ -328,12 +328,12 @@
 the very message mentioned in the theorem statement.
 Some of the cases are proved by unicity, others by
 the induction hypothesis.  For all those complications, the proofs are
-automatic by @{text blast} with the theorem @{text no_nonce_NS1_NS2}.
+automatic by \<open>blast\<close> with the theorem \<open>no_nonce_NS1_NS2\<close>.
 
 The remaining theorems about the protocol are not hard to prove.  The
 following one asserts a form of \emph{authenticity}: if
-@{text B} has sent an instance of message~2 to~@{text A} and has received the
-expected reply, then that reply really originated with~@{text A}.  The
+\<open>B\<close> has sent an instance of message~2 to~\<open>A\<close> and has received the
+expected reply, then that reply really originated with~\<open>A\<close>.  The
 proof is a simple induction.
 \<close>
 
@@ -369,13 +369,13 @@
 (*>*)
 
 text \<open>
-From similar assumptions, we can prove that @{text A} started the protocol
-run by sending an instance of message~1 involving the nonce~@{text NA}\@. 
+From similar assumptions, we can prove that \<open>A\<close> started the protocol
+run by sending an instance of message~1 involving the nonce~\<open>NA\<close>\@. 
 For this theorem, the conclusion is 
 @{thm [display] (concl) B_trusts_protocol [no_vars]}
-Analogous theorems can be proved for~@{text A}, stating that nonce~@{text NA}
-remains secret and that message~2 really originates with~@{text B}.  Even the
-flawed protocol establishes these properties for~@{text A};
+Analogous theorems can be proved for~\<open>A\<close>, stating that nonce~\<open>NA\<close>
+remains secret and that message~2 really originates with~\<open>B\<close>.  Even the
+flawed protocol establishes these properties for~\<open>A\<close>;
 the flaw only harms the second participant.
 
 \medskip
--- a/src/Doc/Tutorial/Protocol/Public.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Protocol/Public.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -12,10 +12,10 @@
 
 text \<open>
 The function
-@{text pubK} maps agents to their public keys.  The function
-@{text priK} maps agents to their private keys.  It is merely
+\<open>pubK\<close> maps agents to their public keys.  The function
+\<open>priK\<close> maps agents to their private keys.  It is merely
 an abbreviation (cf.\ \S\ref{sec:abbreviations}) defined in terms of
-@{text invKey} and @{text pubK}.
+\<open>invKey\<close> and \<open>pubK\<close>.
 \<close>
 
 consts pubK :: "agent \<Rightarrow> key"
@@ -39,7 +39,7 @@
 
 text \<open>
 \noindent
-The set @{text bad} consists of those agents whose private keys are known to
+The set \<open>bad\<close> consists of those agents whose private keys are known to
 the spy.
 
 Two axioms are asserted about the public-key cryptosystem. 
--- a/src/Doc/Tutorial/Recdef/Induction.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/Induction.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -10,7 +10,7 @@
 again induction. But this time the structural form of induction that comes
 with datatypes is unlikely to work well --- otherwise we could have defined the
 function by \isacommand{primrec}. Therefore \isacommand{recdef} automatically
-proves a suitable induction rule $f$@{text".induct"} that follows the
+proves a suitable induction rule $f$\<open>.induct\<close> that follows the
 recursion pattern of the particular function $f$. We call this
 \textbf{recursion induction}. Roughly speaking, it
 requires you to prove for each \isacommand{recdef} equation that the property
@@ -47,7 +47,7 @@
 
 In general, the format of invoking recursion induction is
 \begin{quote}
-\isacommand{apply}@{text"(induct_tac"} $x@1 \dots x@n$ @{text"rule:"} $f$@{text".induct)"}
+\isacommand{apply}\<open>(induct_tac\<close> $x@1 \dots x@n$ \<open>rule:\<close> $f$\<open>.induct)\<close>
 \end{quote}\index{*induct_tac (method)}%
 where $x@1~\dots~x@n$ is a list of free variables in the subgoal and $f$ the
 name of a function that takes an $n$-tuple. Usually the subgoal will
--- a/src/Doc/Tutorial/Recdef/Nested1.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/Nested1.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -23,9 +23,9 @@
 unproved termination condition
 @{prop[display]"t : set ts --> size t < Suc (size_term_list ts)"}
 where @{term set} returns the set of elements of a list
-and @{text"size_term_list :: term list \<Rightarrow> nat"} is an auxiliary
+and \<open>size_term_list :: term list \<Rightarrow> nat\<close> is an auxiliary
 function automatically defined by Isabelle
-(while processing the declaration of @{text term}).  Why does the
+(while processing the declaration of \<open>term\<close>).  Why does the
 recursive call of @{const trev} lead to this
 condition?  Because \isacommand{recdef} knows that @{term map}
 will apply @{const trev} only to elements of @{term ts}. Thus the 
--- a/src/Doc/Tutorial/Recdef/Nested2.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/Nested2.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -14,7 +14,7 @@
 applies it automatically and the definition of @{term"trev"}
 succeeds now. As a reward for our effort, we can now prove the desired
 lemma directly.  We no longer need the verbose
-induction schema for type @{text"term"} and can use the simpler one arising from
+induction schema for type \<open>term\<close> and can use the simpler one arising from
 @{term"trev"}:
 \<close>
 
@@ -30,7 +30,7 @@
 text\<open>\noindent
 If the proof of the induction step mystifies you, we recommend that you go through
 the chain of simplification steps in detail; you will probably need the help of
-@{text"simp_trace"}. Theorem @{thm[source]map_cong} is discussed below.
+\<open>simp_trace\<close>. Theorem @{thm[source]map_cong} is discussed below.
 %\begin{quote}
 %{term[display]"trev(trev(App f ts))"}\\
 %{term[display]"App f (rev(map trev (rev(map trev ts))))"}\\
@@ -81,7 +81,7 @@
 declare map_cong[recdef_cong]
 
 text\<open>
-The @{text cong} and @{text recdef_cong} attributes are
+The \<open>cong\<close> and \<open>recdef_cong\<close> attributes are
 intentionally kept apart because they control different activities, namely
 simplification and making recursive definitions.
 %The simplifier's congruence rules cannot be used by recdef.
--- a/src/Doc/Tutorial/Recdef/examples.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/examples.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -36,7 +36,7 @@
 text\<open>\noindent
 This time the measure is the length of the list, which decreases with the
 recursive call; the first component of the argument tuple is irrelevant.
-The details of tupled $\lambda$-abstractions @{text"\<lambda>(x\<^sub>1,\<dots>,x\<^sub>n)"} are
+The details of tupled $\lambda$-abstractions \<open>\<lambda>(x\<^sub>1,\<dots>,x\<^sub>n)\<close> are
 explained in \S\ref{sec:products}, but for now your intuition is all you need.
 
 Pattern matching\index{pattern matching!and \isacommand{recdef}}
--- a/src/Doc/Tutorial/Recdef/simplification.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/simplification.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -7,7 +7,7 @@
 recursion equations become simplification rules, just as with
 \isacommand{primrec}. In most cases this works fine, but there is a subtle
 problem that must be mentioned: simplification may not
-terminate because of automatic splitting of @{text "if"}.
+terminate because of automatic splitting of \<open>if\<close>.
 \index{*if expressions!splitting of}
 Let us look at an example:
 \<close>
@@ -23,10 +23,10 @@
 is proved automatically because it is already present as a lemma in
 HOL\@.  Thus the recursion equation becomes a simplification
 rule. Of course the equation is nonterminating if we are allowed to unfold
-the recursive call inside the @{text else} branch, which is why programming
+the recursive call inside the \<open>else\<close> branch, which is why programming
 languages and our simplifier don't do that. Unfortunately the simplifier does
 something else that leads to the same problem: it splits 
-each @{text "if"}-expression unless its
+each \<open>if\<close>-expression unless its
 condition simplifies to @{term True} or @{term False}.  For
 example, simplification reduces
 @{term[display]"gcd(m,n) = k"}
@@ -35,7 +35,7 @@
 where the condition cannot be reduced further, and splitting leads to
 @{term[display]"(n=0 --> m=k) & (n ~= 0 --> gcd(n, m mod n)=k)"}
 Since the recursive call @{term"gcd(n, m mod n)"} is no longer protected by
-an @{text "if"}, it is unfolded again, which leads to an infinite chain of
+an \<open>if\<close>, it is unfolded again, which leads to an infinite chain of
 simplification steps. Fortunately, this problem can be avoided in many
 different ways.
 
@@ -43,10 +43,10 @@
 @{thm[source]if_split},
 as shown in \S\ref{sec:AutoCaseSplits}.  However, we do not recommend this
 approach: you will often have to invoke the rule explicitly when
-@{text "if"} is involved.
+\<open>if\<close> is involved.
 
 If possible, the definition should be given by pattern matching on the left
-rather than @{text "if"} on the right. In the case of @{term gcd} the
+rather than \<open>if\<close> on the right. In the case of @{term gcd} the
 following alternative definition suggests itself:
 \<close>
 
@@ -61,7 +61,7 @@
 @{prop"n ~= (0::nat)"}.  Unfortunately, in general the case distinction
 may not be expressible by pattern matching.
 
-A simple alternative is to replace @{text "if"} by @{text case}, 
+A simple alternative is to replace \<open>if\<close> by \<open>case\<close>, 
 which is also available for @{typ bool} and is not split automatically:
 \<close>
 
@@ -87,8 +87,7 @@
 done
 
 text\<open>\noindent
-Simplification terminates for these proofs because the condition of the @{text
-"if"} simplifies to @{term True} or @{term False}.
+Simplification terminates for these proofs because the condition of the \<open>if\<close> simplifies to @{term True} or @{term False}.
 Now we can disable the original simplification rule:
 \<close>
 
--- a/src/Doc/Tutorial/Recdef/termination.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/termination.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -7,7 +7,7 @@
 its termination with the help of the user-supplied measure.  Each of the examples
 above is simple enough that Isabelle can automatically prove that the
 argument's measure decreases in each recursive call. As a result,
-$f$@{text".simps"} will contain the defining equations (or variants derived
+$f$\<open>.simps\<close> will contain the defining equations (or variants derived
 from them) as theorems. For example, look (via \isacommand{thm}) at
 @{thm[source]sep.simps} and @{thm[source]sep1.simps} to see that they define
 the same function. What is more, those equations are automatically declared as
@@ -29,9 +29,9 @@
 We can either prove this as a separate lemma, or try to figure out which
 existing lemmas may help. We opt for the second alternative. The theory of
 lists contains the simplification rule @{thm length_filter_le[no_vars]},
-which is what we need, provided we turn \mbox{@{text"< Suc"}}
+which is what we need, provided we turn \mbox{\<open>< Suc\<close>}
 into
-@{text"\<le>"} so that the rule applies. Lemma
+\<open>\<le>\<close> so that the rule applies. Lemma
 @{thm[source]less_Suc_eq_le} does just that: @{thm less_Suc_eq_le[no_vars]}.
 
 Now we retry the above definition but supply the lemma(s) just found (or
@@ -48,7 +48,7 @@
 (*<*)local(*>*)
 text\<open>\noindent
 This time everything works fine. Now @{thm[source]qs.simps} contains precisely
-the stated recursion equations for @{text qs} and they have become
+the stated recursion equations for \<open>qs\<close> and they have become
 simplification rules.
 Thus we can automatically prove results such as this one:
 \<close>
--- a/src/Doc/Tutorial/Rules/find2.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Rules/find2.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -13,7 +13,7 @@
 
 For example, given the goal @{subgoals[display,indent=0,margin=65]}
 you can click on \pgmenu{Find} and type in the search expression
-\texttt{intro}. You will be shown a few rules ending in @{text"\<Longrightarrow> ?P \<and> ?Q"},
+\texttt{intro}. You will be shown a few rules ending in \<open>\<Longrightarrow> ?P \<and> ?Q\<close>,
 among them @{thm[source]conjI}\@. You may even discover that
 the very theorem you are trying to prove is already in the
 database.  Given the goal\<close>
@@ -31,7 +31,7 @@
 "_ \at\ _"  intro
 \end{ttbox}
 searches for all introduction rules that match the current goal and
-mention the @{text"@"} function.
+mention the \<open>@\<close> function.
 
 Searching for elimination and destruction rules via \texttt{elim} and
 \texttt{dest} is analogous to \texttt{intro} but takes the assumptions
--- a/src/Doc/Tutorial/ToyList/ToyList.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/ToyList/ToyList.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -3,8 +3,8 @@
 begin
 
 text\<open>\noindent
-HOL already has a predefined theory of lists called @{text List} ---
-@{text ToyList} is merely a small fragment of it chosen as an example.
+HOL already has a predefined theory of lists called \<open>List\<close> ---
+\<open>ToyList\<close> is merely a small fragment of it chosen as an example.
 To avoid some ambiguities caused by defining lists twice, we manipulate
 the concrete syntax and name space of theory @{theory Main} as follows.
 \<close>
@@ -31,19 +31,19 @@
 alternative syntax is the familiar one.  Thus the list \isa{Cons True
 (Cons False Nil)} becomes @{term"True # False # []"}. The annotation
 \isacommand{infixr}\index{infixr@\isacommand{infixr} (annotation)} 
-means that @{text"#"} associates to
-the right: the term @{term"x # y # z"} is read as @{text"x # (y # z)"}
-and not as @{text"(x # y) # z"}.
-The @{text 65} is the priority of the infix @{text"#"}.
+means that \<open>#\<close> associates to
+the right: the term @{term"x # y # z"} is read as \<open>x # (y # z)\<close>
+and not as \<open>(x # y) # z\<close>.
+The \<open>65\<close> is the priority of the infix \<open>#\<close>.
 
 \begin{warn}
   Syntax annotations can be powerful, but they are difficult to master and 
   are never necessary.  You
-  could drop them from theory @{text"ToyList"} and go back to the identifiers
+  could drop them from theory \<open>ToyList\<close> and go back to the identifiers
   @{term[source]Nil} and @{term[source]Cons}.  Novices should avoid using
   syntax annotations in their own theories.
 \end{warn}
-Next, two functions @{text"app"} and \cdx{rev} are defined recursively,
+Next, two functions \<open>app\<close> and \cdx{rev} are defined recursively,
 in this order, because Isabelle insists on definition before use:
 \<close>
 
@@ -58,18 +58,18 @@
 text\<open>\noindent
 Each function definition is of the form
 \begin{center}
-\isacommand{primrec} \textit{name} @{text"::"} \textit{type} \textit{(optional syntax)} \isakeyword{where} \textit{equations}
+\isacommand{primrec} \textit{name} \<open>::\<close> \textit{type} \textit{(optional syntax)} \isakeyword{where} \textit{equations}
 \end{center}
-The equations must be separated by @{text"|"}.
+The equations must be separated by \<open>|\<close>.
 %
-Function @{text"app"} is annotated with concrete syntax. Instead of the
-prefix syntax @{text"app xs ys"} the infix
+Function \<open>app\<close> is annotated with concrete syntax. Instead of the
+prefix syntax \<open>app xs ys\<close> the infix
 @{term"xs @ ys"}\index{$HOL2list@\isa{\at}|bold} becomes the preferred
 form.
 
 \index{*rev (constant)|(}\index{append function|(}
-The equations for @{text"app"} and @{term"rev"} hardly need comments:
-@{text"app"} appends two lists and @{term"rev"} reverses a list.  The
+The equations for \<open>app\<close> and @{term"rev"} hardly need comments:
+\<open>app\<close> appends two lists and @{term"rev"} reverses a list.  The
 keyword \commdx{primrec} indicates that the recursion is
 of a particularly primitive kind where each recursive call peels off a datatype
 constructor from one of the arguments.  Thus the
@@ -151,7 +151,7 @@
 \item
 It establishes a new theorem to be proved, namely @{prop"rev(rev xs) = xs"}.
 \item
-It gives that theorem the name @{text"rev_rev"}, for later reference.
+It gives that theorem the name \<open>rev_rev\<close>, for later reference.
 \item
 It tells Isabelle (via the bracketed attribute \attrdx{simp}) to take the eventual theorem as a simplification rule: future proofs involving
 simplification will replace occurrences of @{term"rev(rev xs)"} by
@@ -217,7 +217,7 @@
 
 txt\<open>\noindent
 This command tells Isabelle to apply a proof strategy called
-@{text"auto"} to all subgoals. Essentially, @{text"auto"} tries to
+\<open>auto\<close> to all subgoals. Essentially, \<open>auto\<close> tries to
 simplify the subgoals.  In our case, subgoal~1 is solved completely (thanks
 to the equation @{prop"rev [] = []"}) and disappears; the simplified version
 of subgoal~2 becomes the new subgoal~1:
@@ -244,7 +244,7 @@
 \emph{theorem} and \emph{lemma} pretty much interchangeably, too.
 
 There are two variables that we could induct on: @{term"xs"} and
-@{term"ys"}. Because @{text"@"} is defined by recursion on
+@{term"ys"}. Because \<open>@\<close> is defined by recursion on
 the first argument, @{term"xs"} is the correct one:
 \<close>
 
@@ -278,7 +278,7 @@
 
 txt\<open>
 \noindent
-It works, yielding the desired message @{text"No subgoals!"}:
+It works, yielding the desired message \<open>No subgoals!\<close>:
 @{goals[display,indent=0]}
 We still need to confirm that the proof is now finished:
 \<close>
@@ -294,7 +294,7 @@
 % \isacommand{by}\indexbold{by}, which we do most of the time.
 Notice that in lemma @{thm[source]app_Nil2},
 as printed out after the final \isacommand{done}, the free variable @{term"xs"} has been
-replaced by the unknown @{text"?xs"}, just as explained in
+replaced by the unknown \<open>?xs\<close>, just as explained in
 \S\ref{sec:variables}.
 
 Going back to the proof of the first lemma
@@ -306,16 +306,16 @@
 
 txt\<open>
 \noindent
-we find that this time @{text"auto"} solves the base case, but the
+we find that this time \<open>auto\<close> solves the base case, but the
 induction step merely simplifies to
 @{subgoals[display,indent=0,goals_limit=1]}
-Now we need to remember that @{text"@"} associates to the right, and that
-@{text"#"} and @{text"@"} have the same priority (namely the @{text"65"}
+Now we need to remember that \<open>@\<close> associates to the right, and that
+\<open>#\<close> and \<open>@\<close> have the same priority (namely the \<open>65\<close>
 in their \isacommand{infixr} annotation). Thus the conclusion really is
 \begin{isabelle}
 ~~~~~(rev~ys~@~rev~list)~@~(a~\#~[])~=~rev~ys~@~(rev~list~@~(a~\#~[]))
 \end{isabelle}
-and the missing lemma is associativity of @{text"@"}.
+and the missing lemma is associativity of \<open>@\<close>.
 \<close>
 (*<*)oops(*>*)
 
--- a/src/Doc/Tutorial/Trie/Trie.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Trie/Trie.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -26,7 +26,7 @@
 
 text\<open>\noindent
 Association lists come with a generic lookup function.  Its result
-involves type @{text option} because a lookup can fail:
+involves type \<open>option\<close> because a lookup can fail:
 \<close>
 
 primrec assoc :: "('key * 'val)list \<Rightarrow> 'key \<Rightarrow> 'val option" where
@@ -79,7 +79,7 @@
 optimizations!
 
 Before we start on any proofs about @{const update} we tell the simplifier to
-expand all @{text let}s and to split all @{text case}-constructs over
+expand all \<open>let\<close>s and to split all \<open>case\<close>-constructs over
 options:
 \<close>
 
@@ -88,7 +88,7 @@
 text\<open>\noindent
 The reason becomes clear when looking (probably after a failed proof
 attempt) at the body of @{const update}: it contains both
-@{text let} and a case distinction over type @{text option}.
+\<open>let\<close> and a case distinction over type \<open>option\<close>.
 
 Our main goal is to prove the correct interaction of @{const update} and
 @{const lookup}:
@@ -124,15 +124,15 @@
 
 text\<open>\noindent
 \index{subgoal numbering}%
-All methods ending in @{text tac} take an optional first argument that
-specifies the range of subgoals they are applied to, where @{text"[!]"} means
-all subgoals, i.e.\ @{text"[1-3]"} in our case. Individual subgoal numbers,
-e.g. @{text"[2]"} are also allowed.
+All methods ending in \<open>tac\<close> take an optional first argument that
+specifies the range of subgoals they are applied to, where \<open>[!]\<close> means
+all subgoals, i.e.\ \<open>[1-3]\<close> in our case. Individual subgoal numbers,
+e.g. \<open>[2]\<close> are also allowed.
 
 This proof may look surprisingly straightforward. However, note that this
 comes at a cost: the proof script is unreadable because the intermediate
 proof states are invisible, and we rely on the (possibly brittle) magic of
-@{text auto} (@{text simp_all} will not do --- try it) to split the subgoals
+\<open>auto\<close> (\<open>simp_all\<close> will not do --- try it) to split the subgoals
 of the induction up in such a way that case distinction on @{term bs} makes
 sense and solves the proof. 
 
--- a/src/Doc/Tutorial/Types/Axioms.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Types/Axioms.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -80,9 +80,9 @@
 
 subsubsection \<open>Monoids\<close>
 
-text \<open>We define a subclass @{text monoidl} (a semigroup with a
+text \<open>We define a subclass \<open>monoidl\<close> (a semigroup with a
 left-hand neutral) by extending @{class semigroup} with one additional
-parameter @{text neutral} together with its property:\<close>
+parameter \<open>neutral\<close> together with its property:\<close>
 
 class monoidl = semigroup +
   fixes neutral :: "'a" ("\<zero>")
@@ -138,8 +138,7 @@
 
 subsubsection \<open>Groups\<close>
 
-text \<open>\noindent To finish our small algebra example, we add a @{text
-group} class:\<close>
+text \<open>\noindent To finish our small algebra example, we add a \<open>group\<close> class:\<close>
 
 class group = monoidl +
   fixes inv :: "'a \<Rightarrow> 'a" ("\<div> _" [81] 80)
@@ -161,7 +160,7 @@
   then show "x \<oplus> y = x \<oplus> z" by simp
 qed
 
-text \<open>\noindent Any @{text "group"} is also a @{text "monoid"}; this
+text \<open>\noindent Any \<open>group\<close> is also a \<open>monoid\<close>; this
 can be made explicit by claiming an additional subclass relation,
 together with a proof of the logical difference:\<close>
 
@@ -175,7 +174,7 @@
 qed
 
 text \<open>\noindent The proof result is propagated to the type system,
-making @{text group} an instance of @{text monoid} by adding an
+making \<open>group\<close> an instance of \<open>monoid\<close> by adding an
 additional edge to the graph of subclass relation; see also
 Figure~\ref{fig:subclass}.
 
@@ -184,27 +183,27 @@
    \small
    \unitlength 0.6mm
    \begin{picture}(40,60)(0,0)
-     \put(20,60){\makebox(0,0){@{text semigroup}}}
-     \put(20,40){\makebox(0,0){@{text monoidl}}}
-     \put(00,20){\makebox(0,0){@{text monoid}}}
-     \put(40,00){\makebox(0,0){@{text group}}}
+     \put(20,60){\makebox(0,0){\<open>semigroup\<close>}}
+     \put(20,40){\makebox(0,0){\<open>monoidl\<close>}}
+     \put(00,20){\makebox(0,0){\<open>monoid\<close>}}
+     \put(40,00){\makebox(0,0){\<open>group\<close>}}
      \put(20,55){\vector(0,-1){10}}
      \put(15,35){\vector(-1,-1){10}}
      \put(25,35){\vector(1,-3){10}}
    \end{picture}
    \hspace{8em}
    \begin{picture}(40,60)(0,0)
-     \put(20,60){\makebox(0,0){@{text semigroup}}}
-     \put(20,40){\makebox(0,0){@{text monoidl}}}
-     \put(00,20){\makebox(0,0){@{text monoid}}}
-     \put(40,00){\makebox(0,0){@{text group}}}
+     \put(20,60){\makebox(0,0){\<open>semigroup\<close>}}
+     \put(20,40){\makebox(0,0){\<open>monoidl\<close>}}
+     \put(00,20){\makebox(0,0){\<open>monoid\<close>}}
+     \put(40,00){\makebox(0,0){\<open>group\<close>}}
      \put(20,55){\vector(0,-1){10}}
      \put(15,35){\vector(-1,-1){10}}
      \put(05,15){\vector(3,-1){30}}
    \end{picture}
    \caption{Subclass relationship of monoids and groups:
       before and after establishing the relationship
-      @{text "group \<subseteq> monoid"};  transitive edges are left out.}
+      \<open>group \<subseteq> monoid\<close>;  transitive edges are left out.}
    \label{fig:subclass}
  \end{center}
 \end{figure}
@@ -238,7 +237,7 @@
 text \<open>In our algebra example, we have started with a \emph{syntactic
 class} @{class plus} which only specifies operations but no axioms; it
 would have been also possible to start immediately with class @{class
-semigroup}, specifying the @{text "\<oplus>"} operation and associativity at
+semigroup}, specifying the \<open>\<oplus>\<close> operation and associativity at
 the same time.
 
 Which approach is more appropriate depends.  Usually it is more
--- a/src/Doc/Tutorial/Types/Overloading.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Types/Overloading.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -7,7 +7,7 @@
 
 subsubsection \<open>Overloading\<close>
 
-text \<open>We can introduce a binary infix addition operator @{text "\<oplus>"}
+text \<open>We can introduce a binary infix addition operator \<open>\<oplus>\<close>
 for arbitrary types by means of a type class:\<close>
 
 class plus =
@@ -34,9 +34,9 @@
   | "Suc m \<oplus> n = Suc (m \<oplus> n)"
 
 text \<open>\noindent Note that the name @{const [source] plus} carries a
-suffix @{text "_nat"}; by default, the local name of a class operation
-@{text f} to be instantiated on type constructor @{text \<kappa>} is mangled
-as @{text f_\<kappa>}.  In case of uncertainty, these names may be inspected
+suffix \<open>_nat\<close>; by default, the local name of a class operation
+\<open>f\<close> to be instantiated on type constructor \<open>\<kappa>\<close> is mangled
+as \<open>f_\<kappa>\<close>.  In case of uncertainty, these names may be inspected
 using the @{command "print_context"} command.
 
 Although class @{class [source] plus} has no axioms, the instantiation must be
--- a/src/Doc/Tutorial/Types/Pairs.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Types/Pairs.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -15,15 +15,15 @@
 
 text\<open>
 Tuples may be used as patterns in $\lambda$-abstractions,
-for example @{text"\<lambda>(x,y,z).x+y+z"} and @{text"\<lambda>((x,y),z).x+y+z"}. In fact,
+for example \<open>\<lambda>(x,y,z).x+y+z\<close> and \<open>\<lambda>((x,y),z).x+y+z\<close>. In fact,
 tuple patterns can be used in most variable binding constructs,
 and they can be nested. Here are
 some typical examples:
 \begin{quote}
 @{term"let (x,y) = f z in (y,x)"}\\
 @{term"case xs of [] => (0::nat) | (x,y)#zs => x+y"}\\
-@{text"\<forall>(x,y)\<in>A. x=y"}\\
-@{text"{(x,y,z). x=z}"}\\
+\<open>\<forall>(x,y)\<in>A. x=y\<close>\\
+\<open>{(x,y,z). x=z}\<close>\\
 @{term"\<Union>(x,y)\<in>A. {x+y}"}
 \end{quote}
 The intuitive meanings of these expressions should be obvious.
@@ -33,12 +33,12 @@
 internal representation.  Thus the internal and external form of a term may
 differ, which can affect proofs. If you want to avoid this complication,
 stick to @{term fst} and @{term snd} and write @{term"%p. fst p + snd p"}
-instead of @{text"\<lambda>(x,y). x+y"}.  These terms are distinct even though they
+instead of \<open>\<lambda>(x,y). x+y\<close>.  These terms are distinct even though they
 denote the same function.
 
-Internally, @{term"%(x,y). t"} becomes @{text"case_prod (\<lambda>x y. t)"}, where
-\cdx{split} is the uncurrying function of type @{text"('a \<Rightarrow> 'b
-\<Rightarrow> 'c) \<Rightarrow> 'a \<times> 'b \<Rightarrow> 'c"} defined as
+Internally, @{term"%(x,y). t"} becomes \<open>case_prod (\<lambda>x y. t)\<close>, where
+\cdx{split} is the uncurrying function of type \<open>('a \<Rightarrow> 'b
+\<Rightarrow> 'c) \<Rightarrow> 'a \<times> 'b \<Rightarrow> 'c\<close> defined as
 \begin{center}
 @{thm split_def}
 \hfill(@{thm[source]split_def})
@@ -98,7 +98,7 @@
 
 txt\<open>
 @{subgoals[display,indent=0]}
-A paired @{text let} reduces to a paired $\lambda$-abstraction, which
+A paired \<open>let\<close> reduces to a paired $\lambda$-abstraction, which
 can be split as above. The same is true for paired set comprehension:
 \<close>
 
@@ -110,7 +110,7 @@
 @{subgoals[display,indent=0]}
 Again, simplification produces a term suitable for @{thm[source]prod.split}
 as above. If you are worried about the strange form of the premise:
-@{text"case_prod (=)"} is short for @{term"\<lambda>(x,y). x=y"}.
+\<open>case_prod (=)\<close> is short for @{term"\<lambda>(x,y). x=y"}.
 The same proof procedure works for
 \<close>
 
@@ -130,7 +130,7 @@
 
 text\<open>\noindent
 Note that the above \isacommand{primrec} definition is admissible
-because @{text"\<times>"} is a datatype. When we now try to prove
+because \<open>\<times>\<close> is a datatype. When we now try to prove
 \<close>
 
 lemma "swap(swap p) = p"
@@ -145,14 +145,14 @@
 
 txt\<open>\noindent
 @{subgoals[display,indent=0]}
-Again, \methdx{case_tac} is applicable because @{text"\<times>"} is a datatype.
-The subgoal is easily proved by @{text simp}.
+Again, \methdx{case_tac} is applicable because \<open>\<times>\<close> is a datatype.
+The subgoal is easily proved by \<open>simp\<close>.
 
-Splitting by @{text case_tac} also solves the previous examples and may thus
+Splitting by \<open>case_tac\<close> also solves the previous examples and may thus
 appear preferable to the more arcane methods introduced first. However, see
-the warning about @{text case_tac} in \S\ref{sec:struct-ind-case}.
+the warning about \<open>case_tac\<close> in \S\ref{sec:struct-ind-case}.
 
-Alternatively, you can split \emph{all} @{text"\<And>"}-quantified variables
+Alternatively, you can split \emph{all} \<open>\<And>\<close>-quantified variables
 in a goal with the rewrite rule @{thm[source]split_paired_all}:
 \<close>
 
@@ -184,8 +184,8 @@
 apply(simp add: split_paired_all)
 (*<*)done(*>*)
 text\<open>\noindent
-Finally, the simplifier automatically splits all @{text"\<forall>"} and
-@{text"\<exists>"}-quantified variables:
+Finally, the simplifier automatically splits all \<open>\<forall>\<close> and
+\<open>\<exists>\<close>-quantified variables:
 \<close>
 
 lemma "\<forall>p. \<exists>q. swap p = swap q"
--- a/src/Doc/Tutorial/Types/Records.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Types/Records.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -55,7 +55,7 @@
 
 text \<open>\noindent
   We see above the ASCII notation for record brackets.  You can also
-  use the symbolic brackets @{text \<lparr>} and @{text \<rparr>}.  Record type
+  use the symbolic brackets \<open>\<lparr>\<close> and \<open>\<rparr>\<close>.  Record type
   expressions can be also written directly with individual fields.
   The type name above is merely an abbreviation.
 \<close>
@@ -65,9 +65,8 @@
 
 text \<open>
   For each field, there is a \emph{selector}\index{selector!record}
-  function of the same name.  For example, if @{text p} has type @{typ
-  point} then @{text "Xcoord p"} denotes the value of the @{text
-  Xcoord} field of~@{text p}.  Expressions involving field selection
+  function of the same name.  For example, if \<open>p\<close> has type @{typ
+  point} then \<open>Xcoord p\<close> denotes the value of the \<open>Xcoord\<close> field of~\<open>p\<close>.  Expressions involving field selection
   of explicit records are simplified automatically:
 \<close>
 
@@ -77,8 +76,7 @@
 text \<open>
   The \emph{update}\index{update!record} operation is functional.  For
   example, @{term "p\<lparr>Xcoord := 0\<rparr>"} is a record whose @{const Xcoord}
-  value is zero and whose @{const Ycoord} value is copied from~@{text
-  p}.  Updates of explicit records are also simplified automatically:
+  value is zero and whose @{const Ycoord} value is copied from~\<open>p\<close>.  Updates of explicit records are also simplified automatically:
 \<close>
 
 lemma "\<lparr>Xcoord = a, Ycoord = b\<rparr>\<lparr>Xcoord := 0\<rparr> =
@@ -89,7 +87,7 @@
   \begin{warn}
   Field names are declared as constants and can no longer be used as
   variables.  It would be unwise, for example, to call the fields of
-  type @{typ point} simply @{text x} and~@{text y}.
+  type @{typ point} simply \<open>x\<close> and~\<open>y\<close>.
   \end{warn}
 \<close>
 
@@ -99,8 +97,8 @@
 text \<open>
   \index{records!extensible|(}%
 
-  Now, let us define coloured points (type @{text cpoint}) to be
-  points extended with a field @{text col} of type @{text colour}:
+  Now, let us define coloured points (type \<open>cpoint\<close>) to be
+  points extended with a field \<open>col\<close> of type \<open>colour\<close>:
 \<close>
 
 datatype colour = Red | Green | Blue
@@ -109,8 +107,8 @@
   col :: colour
 
 text \<open>\noindent
-  The fields of this new type are @{const Xcoord}, @{text Ycoord} and
-  @{text col}, in that order.
+  The fields of this new type are @{const Xcoord}, \<open>Ycoord\<close> and
+  \<open>col\<close>, in that order.
 \<close>
 
 definition cpt1 :: cpoint where
@@ -123,24 +121,23 @@
   implicit pseudo-field, \cdx{more}, that keeps the extension as an
   explicit value.  Its type is declared as completely
   polymorphic:~@{typ 'a}.  When a fixed record value is expressed
-  using just its standard fields, the value of @{text more} is
-  implicitly set to @{text "()"}, the empty tuple, which has type
+  using just its standard fields, the value of \<open>more\<close> is
+  implicitly set to \<open>()\<close>, the empty tuple, which has type
   @{typ unit}.  Within the record brackets, you can refer to the
-  @{text more} field by writing ``@{text "\<dots>"}'' (three dots):
+  \<open>more\<close> field by writing ``\<open>\<dots>\<close>'' (three dots):
 \<close>
 
 lemma "Xcoord \<lparr>Xcoord = a, Ycoord = b, \<dots> = p\<rparr> = a"
   by simp
 
 text \<open>
-  This lemma applies to any record whose first two fields are @{text
-  Xcoord} and~@{const Ycoord}.  Note that @{text "\<lparr>Xcoord = a, Ycoord
-  = b, \<dots> = ()\<rparr>"} is exactly the same as @{text "\<lparr>Xcoord = a, Ycoord
-  = b\<rparr>"}.  Selectors and updates are always polymorphic wrt.\ the
-  @{text more} part of a record scheme, its value is just ignored (for
+  This lemma applies to any record whose first two fields are \<open>Xcoord\<close> and~@{const Ycoord}.  Note that \<open>\<lparr>Xcoord = a, Ycoord
+  = b, \<dots> = ()\<rparr>\<close> is exactly the same as \<open>\<lparr>Xcoord = a, Ycoord
+  = b\<rparr>\<close>.  Selectors and updates are always polymorphic wrt.\ the
+  \<open>more\<close> part of a record scheme, its value is just ignored (for
   select) or copied (for update).
 
-  The @{text more} pseudo-field may be manipulated directly as well,
+  The \<open>more\<close> pseudo-field may be manipulated directly as well,
   but the identifier needs to be qualified:
 \<close>
 
@@ -149,10 +146,10 @@
 
 text \<open>\noindent
   We see that the colour part attached to this @{typ point} is a
-  rudimentary record in its own right, namely @{text "\<lparr>col =
-  Green\<rparr>"}.  In order to select or update @{text col}, this fragment
+  rudimentary record in its own right, namely \<open>\<lparr>col =
+  Green\<rparr>\<close>.  In order to select or update \<open>col\<close>, this fragment
   needs to be put back into the context of the parent type scheme, say
-  as @{text more} part of another @{typ point}.
+  as \<open>more\<close> part of another @{typ point}.
 
   To define generic operations, we need to know a bit more about
   records.  Our definition of @{typ point} above has generated two
@@ -160,22 +157,21 @@
 
   \medskip
   \begin{tabular}{l}
-  @{typ point}~@{text "="}~@{text "\<lparr>Xcoord :: int, Ycoord :: int\<rparr>"} \\
-  @{typ "'a point_scheme"}~@{text "="}~@{text "\<lparr>Xcoord :: int, Ycoord :: int, \<dots> :: 'a\<rparr>"} \\
+  @{typ point}~\<open>=\<close>~\<open>\<lparr>Xcoord :: int, Ycoord :: int\<rparr>\<close> \\
+  @{typ "'a point_scheme"}~\<open>=\<close>~\<open>\<lparr>Xcoord :: int, Ycoord :: int, \<dots> :: 'a\<rparr>\<close> \\
   \end{tabular}
   \medskip
   
 \noindent
   Type @{typ point} is for fixed records having exactly the two fields
-  @{const Xcoord} and~@{text Ycoord}, while the polymorphic type @{typ
+  @{const Xcoord} and~\<open>Ycoord\<close>, while the polymorphic type @{typ
   "'a point_scheme"} comprises all possible extensions to those two
   fields.  Note that @{typ "unit point_scheme"} coincides with @{typ
   point}, and @{typ "\<lparr>col :: colour\<rparr> point_scheme"} with @{typ
   cpoint}.
 
   In the following example we define two operations --- methods, if we
-  regard records as objects --- to get and set any point's @{text
-  Xcoord} field.
+  regard records as objects --- to get and set any point's \<open>Xcoord\<close> field.
 \<close>
 
 definition getX :: "'a point_scheme \<Rightarrow> int" where
@@ -185,7 +181,7 @@
 
 text \<open>
   Here is a generic method that modifies a point, incrementing its
-  @{const Xcoord} field.  The @{text Ycoord} and @{text more} fields
+  @{const Xcoord} field.  The \<open>Ycoord\<close> and \<open>more\<close> fields
   are copied across.  It works for any record type scheme derived from
   @{typ point} (including @{typ cpoint} etc.):
 \<close>
@@ -196,7 +192,7 @@
 
 text \<open>
   Generic theorems can be proved about generic methods.  This trivial
-  lemma relates @{const incX} to @{text getX} and @{text setX}:
+  lemma relates @{const incX} to \<open>getX\<close> and \<open>setX\<close>:
 \<close>
 
 lemma "incX r = setX r (getX r + 1)"
@@ -204,9 +200,9 @@
 
 text \<open>
   \begin{warn}
-  If you use the symbolic record brackets @{text \<lparr>} and @{text \<rparr>},
-  then you must also use the symbolic ellipsis, ``@{text \<dots>}'', rather
-  than three consecutive periods, ``@{text "..."}''.  Mixing the ASCII
+  If you use the symbolic record brackets \<open>\<lparr>\<close> and \<open>\<rparr>\<close>,
+  then you must also use the symbolic ellipsis, ``\<open>\<dots>\<close>'', rather
+  than three consecutive periods, ``\<open>...\<close>''.  Mixing the ASCII
   and symbolic versions causes a syntax error.  (The two versions are
   more distinct on screen than they are on paper.)
   \end{warn}%
@@ -227,7 +223,7 @@
   by simp
 
 text \<open>
-  The following equality is similar, but generic, in that @{text r}
+  The following equality is similar, but generic, in that \<open>r\<close>
   can be any instance of @{typ "'a point_scheme"}:
 \<close>
 
@@ -236,8 +232,8 @@
 
 text \<open>\noindent
   We see above the syntax for iterated updates.  We could equivalently
-  have written the left-hand side as @{text "r\<lparr>Xcoord := a\<rparr>\<lparr>Ycoord :=
-  b\<rparr>"}.
+  have written the left-hand side as \<open>r\<lparr>Xcoord := a\<rparr>\<lparr>Ycoord :=
+  b\<rparr>\<close>.
 
   Record equality is \emph{extensional}:
   \index{extensionality!for records} a record is determined entirely
@@ -249,7 +245,7 @@
 
 text \<open>\noindent
   The generic version of this equality includes the pseudo-field
-  @{text more}:
+  \<open>more\<close>:
 \<close>
 
 lemma "r = \<lparr>Xcoord = Xcoord r, Ycoord = Ycoord r, \<dots> = point.more r\<rparr>"
@@ -275,13 +271,13 @@
 lemma "r\<lparr>Xcoord := a\<rparr> = r\<lparr>Xcoord := a'\<rparr> \<Longrightarrow> a = a'"
   apply (drule_tac f = Xcoord in arg_cong)
   txt \<open>@{subgoals [display, indent = 0, margin = 65]}
-    Now, @{text simp} will reduce the assumption to the desired
+    Now, \<open>simp\<close> will reduce the assumption to the desired
     conclusion.\<close>
   apply simp
   done
 
 text \<open>
-  The @{text cases} method is preferable to such a forward proof.  We
+  The \<open>cases\<close> method is preferable to such a forward proof.  We
   state the desired lemma again:
 \<close>
 
@@ -289,13 +285,12 @@
 
   txt \<open>The \methdx{cases} method adds an equality to replace the
   named record term by an explicit record expression, listing all
-  fields.  It even includes the pseudo-field @{text more}, since the
+  fields.  It even includes the pseudo-field \<open>more\<close>, since the
   record equality stated here is generic for all extensions.\<close>
 
   apply (cases r)
 
-  txt \<open>@{subgoals [display, indent = 0, margin = 65]} Again, @{text
-  simp} finishes the proof.  Because @{text r} is now represented as
+  txt \<open>@{subgoals [display, indent = 0, margin = 65]} Again, \<open>simp\<close> finishes the proof.  Because \<open>r\<close> is now represented as
   an explicit record construction, the updates can be applied and the
   record equality can be replaced by equality of the corresponding
   fields (due to injectivity).\<close>
@@ -306,10 +301,10 @@
 text \<open>
   The generic cases method does not admit references to locally bound
   parameters of a goal.  In longer proof scripts one might have to
-  fall back on the primitive @{text rule_tac} used together with the
+  fall back on the primitive \<open>rule_tac\<close> used together with the
   internal field representation rules of records.  The above use of
-  @{text "(cases r)"} would become @{text "(rule_tac r = r in
-  point.cases_scheme)"}.
+  \<open>(cases r)\<close> would become \<open>(rule_tac r = r in
+  point.cases_scheme)\<close>.
 \<close>
 
 
@@ -330,7 +325,7 @@
 
   \item Function \cdx{fields} takes the record's very own fields and
   returns a record fragment consisting of just those fields.  This may
-  be filled into the @{text more} part of the parent record scheme.
+  be filled into the \<open>more\<close> part of the parent record scheme.
 
   \item Function \cdx{extend} takes two arguments: a record to be
   extended and a record containing the new fields.
@@ -343,33 +338,32 @@
   These functions provide useful abbreviations for standard
   record expressions involving constructors and selectors.  The
   definitions, which are \emph{not} unfolded by default, are made
-  available by the collective name of @{text defs} (@{text
-  point.defs}, @{text cpoint.defs}, etc.).
+  available by the collective name of \<open>defs\<close> (\<open>point.defs\<close>, \<open>cpoint.defs\<close>, etc.).
   For example, here are the versions of those functions generated for
-  record @{typ point}.  We omit @{text point.fields}, which happens to
-  be the same as @{text point.make}.
+  record @{typ point}.  We omit \<open>point.fields\<close>, which happens to
+  be the same as \<open>point.make\<close>.
 
   @{thm [display, indent = 0, margin = 65] point.make_def [no_vars]
   point.extend_def [no_vars] point.truncate_def [no_vars]}
   Contrast those with the corresponding functions for record @{typ
-  cpoint}.  Observe @{text cpoint.fields} in particular.
+  cpoint}.  Observe \<open>cpoint.fields\<close> in particular.
   @{thm [display, indent = 0, margin = 65] cpoint.make_def [no_vars]
   cpoint.fields_def [no_vars] cpoint.extend_def [no_vars]
   cpoint.truncate_def [no_vars]}
 
   To demonstrate these functions, we declare a new coloured point by
-  extending an ordinary point.  Function @{text point.extend} augments
-  @{text pt1} with a colour value, which is converted into an
-  appropriate record fragment by @{text cpoint.fields}.
+  extending an ordinary point.  Function \<open>point.extend\<close> augments
+  \<open>pt1\<close> with a colour value, which is converted into an
+  appropriate record fragment by \<open>cpoint.fields\<close>.
 \<close>
 
 definition cpt2 :: cpoint where
 "cpt2 \<equiv> point.extend pt1 (cpoint.fields Green)"
 
 text \<open>
-  The coloured points @{const cpt1} and @{text cpt2} are equal.  The
+  The coloured points @{const cpt1} and \<open>cpt2\<close> are equal.  The
   proof is trivial, by unfolding all the definitions.  We deliberately
-  omit the definition of~@{text pt1} in order to reveal the underlying
+  omit the definition of~\<open>pt1\<close> in order to reveal the underlying
   comparison on type @{typ point}.
 \<close>
 
@@ -381,7 +375,7 @@
 
 text \<open>
   In the example below, a coloured point is truncated to leave a
-  point.  We use the @{text truncate} function of the target record.
+  point.  We use the \<open>truncate\<close> function of the target record.
 \<close>
 
 lemma "point.truncate cpt2 = pt1"
@@ -389,10 +383,9 @@
 
 text \<open>
   \begin{exercise}
-  Extend record @{typ cpoint} to have a further field, @{text
-  intensity}, of type~@{typ nat}.  Experiment with generic operations
+  Extend record @{typ cpoint} to have a further field, \<open>intensity\<close>, of type~@{typ nat}.  Experiment with generic operations
   (using polymorphic selectors and updates) and explicit coercions
-  (using @{text extend}, @{text truncate} etc.) among the three record
+  (using \<open>extend\<close>, \<open>truncate\<close> etc.) among the three record
   types.
   \end{exercise}
 
--- a/src/Doc/Tutorial/Types/Typedefs.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Tutorial/Types/Typedefs.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -4,7 +4,7 @@
 
 text\<open>\label{sec:adv-typedef}
 For most applications, a combination of predefined types like @{typ bool} and
-@{text"\<Rightarrow>"} with recursive datatypes and records is quite sufficient. Very
+\<open>\<Rightarrow>\<close> with recursive datatypes and records is quite sufficient. Very
 occasionally you may feel the need for a more advanced type.  If you
 are certain that your type is not definable by any of the
 standard means, then read on.
@@ -111,7 +111,7 @@
 \end{center}
 %
 From this example it should be clear what \isacommand{typedef} does
-in general given a name (here @{text three}) and a set
+in general given a name (here \<open>three\<close>) and a set
 (here @{term"{0::nat,1,2}"}).
 
 Our next step is to define the basic functions expected on the new type.
@@ -148,13 +148,13 @@
 \begin{tabular}{@ {}r@ {\qquad}l@ {}}
 @{thm Rep_three_inject[no_vars]} & (@{thm[source]Rep_three_inject}) \\
 \begin{tabular}{@ {}l@ {}}
-@{text"\<lbrakk>x \<in> {0, 1, 2}; y \<in> {0, 1, 2} \<rbrakk>"} \\
-@{text"\<Longrightarrow> (Abs_three x = Abs_three y) = (x = y)"}
+\<open>\<lbrakk>x \<in> {0, 1, 2}; y \<in> {0, 1, 2} \<rbrakk>\<close> \\
+\<open>\<Longrightarrow> (Abs_three x = Abs_three y) = (x = y)\<close>
 \end{tabular} & (@{thm[source]Abs_three_inject}) \\
 \end{tabular}
 \end{center}
-The following ones allow to replace some @{text"x::three"} by
-@{text"Abs_three(y::nat)"}, and conversely @{term y} by @{term"Rep_three x"}:
+The following ones allow to replace some \<open>x::three\<close> by
+\<open>Abs_three(y::nat)\<close>, and conversely @{term y} by @{term"Rep_three x"}:
 \begin{center}
 \begin{tabular}{@ {}r@ {\qquad}l@ {}}
 @{thm Rep_three_cases[no_vars]} & (@{thm[source]Rep_three_cases}) \\
@@ -163,7 +163,7 @@
 @{thm Abs_three_induct[no_vars]} & (@{thm[source]Abs_three_induct}) \\
 \end{tabular}
 \end{center}
-These theorems are proved for any type definition, with @{text three}
+These theorems are proved for any type definition, with \<open>three\<close>
 replaced by the name of the type in question.
 
 Distinctness of @{term A}, @{term B} and @{term C} follows immediately
--- a/src/Doc/Typeclass_Hierarchy/Typeclass_Hierarchy.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/Doc/Typeclass_Hierarchy/Typeclass_Hierarchy.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -39,46 +39,46 @@
   always have been numerical types, which form an inclusion chain:
   
   \begin{center}
-    @{typ nat} @{text \<sqsubset>} @{typ int} @{text \<sqsubset>} @{typ rat}
-      @{text \<sqsubset>} @{typ real} @{text \<sqsubset>} @{typ complex}
+    @{typ nat} \<open>\<sqsubset>\<close> @{typ int} \<open>\<sqsubset>\<close> @{typ rat}
+      \<open>\<sqsubset>\<close> @{typ real} \<open>\<sqsubset>\<close> @{typ complex}
   \end{center}
 
-  \noindent The inclusion @{text \<sqsubset>} means that any value of the numerical
+  \noindent The inclusion \<open>\<sqsubset>\<close> means that any value of the numerical
   type to the left hand side mathematically can be transferred
   to the numerical type on the right hand side.
 
   How to accomplish this given the quite restrictive type system
   of {Isabelle/HOL}?  Paulson @{cite "paulson-numerical"} explains
   that each numerical type has some characteristic properties
-  which define an characteristic algebraic structure;  @{text \<sqsubset>}
+  which define an characteristic algebraic structure;  \<open>\<sqsubset>\<close>
   then corresponds to specialization of the corresponding
   characteristic algebraic structures.  These algebraic structures
   are expressed using algebraic type classes and embeddings
   of numerical types into them:
 
   \begin{center}\begin{tabular}{lccc}
-    @{term of_nat} @{text "::"}  & @{typ nat}  & @{text \<Rightarrow>} & @{typ [source] "'a::semiring_1"} \\
-                                 & @{text \<sqinter>}   &            & @{text \<up>} \\
-    @{term of_int} @{text "::"}  & @{typ int}  & @{text \<Rightarrow>} & @{typ [source] "'a::ring_1"} \\
-                                 & @{text \<sqinter>}   &            & @{text \<up>} \\
-    @{term of_rat} @{text "::"}  & @{typ rat}  & @{text \<Rightarrow>} & @{typ [source] "'a::field_char_0"} \\
-                                 & @{text \<sqinter>}   &            & @{text \<up>} \\
-    @{term of_real} @{text "::"} & @{typ real} & @{text \<Rightarrow>} & @{typ [source] "'a::real_algebra_1"} \\
-                                 & @{text \<sqinter>} \\
+    @{term of_nat} \<open>::\<close>  & @{typ nat}  & \<open>\<Rightarrow>\<close> & @{typ [source] "'a::semiring_1"} \\
+                                 & \<open>\<sqinter>\<close>   &            & \<open>\<up>\<close> \\
+    @{term of_int} \<open>::\<close>  & @{typ int}  & \<open>\<Rightarrow>\<close> & @{typ [source] "'a::ring_1"} \\
+                                 & \<open>\<sqinter>\<close>   &            & \<open>\<up>\<close> \\
+    @{term of_rat} \<open>::\<close>  & @{typ rat}  & \<open>\<Rightarrow>\<close> & @{typ [source] "'a::field_char_0"} \\
+                                 & \<open>\<sqinter>\<close>   &            & \<open>\<up>\<close> \\
+    @{term of_real} \<open>::\<close> & @{typ real} & \<open>\<Rightarrow>\<close> & @{typ [source] "'a::real_algebra_1"} \\
+                                 & \<open>\<sqinter>\<close> \\
                                  & @{typ complex}
   \end{tabular}\end{center}
 
-  \noindent @{text "d \<leftarrow> c"} means that @{text c} is subclass of @{text d}.
-  Hence each characteristic embedding @{text of_num} can transform
-  any value of type @{text num} to any numerical type further
+  \noindent \<open>d \<leftarrow> c\<close> means that \<open>c\<close> is subclass of \<open>d\<close>.
+  Hence each characteristic embedding \<open>of_num\<close> can transform
+  any value of type \<open>num\<close> to any numerical type further
   up in the inclusion chain.
 
   This canonical example exhibits key strengths of type classes:
 
     \<^item> Sharing of operations and facts among different
       types, hence also sharing of notation and names: there
-      is only one plus operation using infix syntax @{text "+"},
-      only one zero written @{text 0}, and neutrality
+      is only one plus operation using infix syntax \<open>+\<close>,
+      only one zero written \<open>0\<close>, and neutrality
       (@{thm (frugal_sorts) add_0_left [all, no_vars]} and
       @{thm (frugal_sorts) add_0_right [all, no_vars]})
       is referred to
@@ -109,7 +109,7 @@
     \<^item> Type classes are not apt for meta-theory.  There
       is no practically usable way to express that the units
       of an integral domain form a multiplicative group using
-      type classes.  But see session @{text "HOL-Algebra"}
+      type classes.  But see session \<open>HOL-Algebra\<close>
       which provides locales with an explicit carrier.
 \<close>
 
@@ -152,9 +152,9 @@
 
     \<^item> Syntactic type classes allow generic notation to be used
       regardless of a particular logical interpretation; e.g.
-      although multiplication @{text "*"} is usually associative,
+      although multiplication \<open>*\<close> is usually associative,
       there are examples where it is not (e.g. octonions), and
-      leaving @{text "*"} without axioms allows to re-use this
+      leaving \<open>*\<close> without axioms allows to re-use this
       syntax by means of type class instantiation also for such
       exotic examples.
 
@@ -178,8 +178,8 @@
 
 text \<open>
   In common literature, notation for semigroups and monoids
-  is either multiplicative @{text "(*, 1)"} or additive
-  @{text "(+, 0)"} with underlying properties isomorphic.
+  is either multiplicative \<open>(*, 1)\<close> or additive
+  \<open>(+, 0)\<close> with underlying properties isomorphic.
   In {Isabelle/HOL}, this is accomplished using the following
   abstract setup:
 
@@ -190,7 +190,7 @@
       with a neutral element.
 
     \<^item> Both @{locale semigroup} and @{locale monoid} provide
-      dedicated syntax for their operations @{text "(\<^bold>*, \<^bold>1)"}.
+      dedicated syntax for their operations \<open>(\<^bold>*, \<^bold>1)\<close>.
       This syntax is not visible on the global theory level
       but only for abstract reasoning inside the respective
       locale.
@@ -209,8 +209,8 @@
 
       Locales @{locale semigroup} and @{locale monoid} are
       interpreted (using @{command sublocale}) into their
-      corresponding type classes, with prefixes @{text add}
-      and @{text mult}; hence facts derived in @{locale semigroup}
+      corresponding type classes, with prefixes \<open>add\<close>
+      and \<open>mult\<close>; hence facts derived in @{locale semigroup}
       and @{locale monoid} are propagated simultaneously to
       \<^emph>\<open>both\<close> using a consistent naming policy, ie.
 
@@ -285,7 +285,7 @@
   that there are weaker algebraic structures with only a partially
   inverse operation.  E. g. the natural numbers have bounded
   subtraction @{term "m - (n::nat)"} which is only an inverse
-  operation if @{term "m \<ge> (n::nat)"};  unary minus @{text "-"}
+  operation if @{term "m \<ge> (n::nat)"};  unary minus \<open>-\<close>
   is pointless on the natural numbers.
 
   Hence for both additive and multiplicative notation there
--- a/src/HOL/Data_Structures/AA_Set.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/Data_Structures/AA_Set.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -75,7 +75,7 @@
 text\<open>In the paper, the last case of @{const adjust} is expressed with the help of an
 incorrect auxiliary function \texttt{nlvl}.
 
-Function @{text split_max} below is called \texttt{dellrg} in the paper.
+Function \<open>split_max\<close> below is called \texttt{dellrg} in the paper.
 The latter is incorrect for two reasons: \texttt{dellrg} is meant to delete the largest
 element but recurses on the left instead of the right subtree; the invariant
 is not restored.\<close>
--- a/src/HOL/Data_Structures/AList_Upd_Del.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/Data_Structures/AList_Upd_Del.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -8,7 +8,7 @@
 
 abbreviation "sorted1 ps \<equiv> sorted(map fst ps)"
 
-text\<open>Define own @{text map_of} function to avoid pulling in an unknown
+text\<open>Define own \<open>map_of\<close> function to avoid pulling in an unknown
 amount of lemmas implicitly (via the simpset).\<close>
 
 hide_const (open) map_of
--- a/src/HOL/Data_Structures/Sorted_Less.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/Data_Structures/Sorted_Less.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -8,7 +8,7 @@
 
 hide_const sorted
 
-text \<open>Is a list sorted without duplicates, i.e., wrt @{text"<"}?.\<close>
+text \<open>Is a list sorted without duplicates, i.e., wrt \<open><\<close>?.\<close>
 
 abbreviation sorted :: "'a::linorder list \<Rightarrow> bool" where
 "sorted \<equiv> sorted_wrt (<)"
--- a/src/HOL/IMP/AExp.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/AExp.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -36,7 +36,7 @@
   "_State (_updbinds b bs)" <= "_Update (_State b) bs"
 
 text \<open>\noindent
-  We can now write a series of updates to the function @{text "\<lambda>x. 0"} compactly:
+  We can now write a series of updates to the function \<open>\<lambda>x. 0\<close> compactly:
 \<close>
 lemma "<a := 1, b := 2> = (<> (a := 1)) (b := (2::int))"
   by (rule refl)
@@ -48,8 +48,8 @@
 \<close>
 value "aval (Plus (V ''x'') (N 5)) <''y'' := 7>"
 
-text\<open>Note that this @{text"<\<dots>>"} syntax works for any function space
-@{text"\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2"} where @{text "\<tau>\<^sub>2"} has a @{text 0}.\<close>
+text\<open>Note that this \<open><\<dots>>\<close> syntax works for any function space
+\<open>\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2\<close> where \<open>\<tau>\<^sub>2\<close> has a \<open>0\<close>.\<close>
 
 
 subsection "Constant Folding"
--- a/src/HOL/IMP/Abs_Int0.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Abs_Int0.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -353,8 +353,8 @@
 assumes m2: "x < y \<Longrightarrow> m x > m y"
 begin
 
-text\<open>The predicates @{text "top_on_ty a X"} that follow describe that any abstract
-state in @{text a} maps all variables in @{text X} to @{term \<top>}.
+text\<open>The predicates \<open>top_on_ty a X\<close> that follow describe that any abstract
+state in \<open>a\<close> maps all variables in \<open>X\<close> to @{term \<top>}.
 This is an important invariant for the termination proof where we argue that only
 the finitely many variables in the program change. That the others do not change
 follows because they remain @{term \<top>}.\<close>
--- a/src/HOL/IMP/Abs_Int1.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Abs_Int1.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -6,7 +6,7 @@
 imports Abs_State
 begin
 
-text\<open>Abstract interpretation over type @{text st} instead of functions.\<close>
+text\<open>Abstract interpretation over type \<open>st\<close> instead of functions.\<close>
 
 context Gamma_semilattice
 begin
--- a/src/HOL/IMP/Abs_Int1_parity.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Abs_Int1_parity.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -13,15 +13,14 @@
 instantiation parity :: order
 begin
 
-text\<open>First the definition of the interface function @{text"\<le>"}. Note that
+text\<open>First the definition of the interface function \<open>\<le>\<close>. Note that
 the header of the definition must refer to the ascii name @{const less_eq} of the
-constants as @{text less_eq_parity} and the definition is named @{text
-less_eq_parity_def}.  Inside the definition the symbolic names can be used.\<close>
+constants as \<open>less_eq_parity\<close> and the definition is named \<open>less_eq_parity_def\<close>.  Inside the definition the symbolic names can be used.\<close>
 
 definition less_eq_parity where
 "x \<le> y = (y = Either \<or> x=y)"
 
-text\<open>We also need @{text"<"}, which is defined canonically:\<close>
+text\<open>We also need \<open><\<close>, which is defined canonically:\<close>
 
 definition less_parity where
 "x < y = (x \<le> y \<and> \<not> y \<le> (x::parity))"
@@ -59,9 +58,9 @@
 "\<top> = Either"
 
 text\<open>Now the instance proof. This time we take a shortcut with the help of
-proof method @{text goal_cases}: it creates cases 1 ... n for the subgoals
+proof method \<open>goal_cases\<close>: it creates cases 1 ... n for the subgoals
 1 ... n; in case i, i is also the name of the assumptions of subgoal i and
-@{text "case?"} refers to the conclusion of subgoal i.
+\<open>case?\<close> refers to the conclusion of subgoal i.
 The class axioms are presented in the same order as in the class definition.\<close>
 
 instance
@@ -123,7 +122,7 @@
 
 text\<open>Instantiating the abstract interpretation locale requires no more
 proofs (they happened in the instatiation above) but delivers the
-instantiated abstract interpreter which we call @{text AI_parity}:\<close>
+instantiated abstract interpreter which we call \<open>AI_parity\<close>:\<close>
 
 global_interpretation Abs_Int
 where \<gamma> = \<gamma>_parity and num' = num_parity and plus' = plus_parity
--- a/src/HOL/IMP/Abs_Int2_ivl.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Abs_Int2_ivl.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -405,7 +405,7 @@
 
 value "show_acom_opt (AI_ivl test1_ivl)"
 
-text\<open>Better than @{text AI_const}:\<close>
+text\<open>Better than \<open>AI_const\<close>:\<close>
 value "show_acom_opt (AI_ivl test3_const)"
 value "show_acom_opt (AI_ivl test4_const)"
 value "show_acom_opt (AI_ivl test6_const)"
--- a/src/HOL/IMP/Abs_Int3.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Abs_Int3.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -322,7 +322,7 @@
 by(auto simp add: narrow_acom_def top_on_acom_def)(metis top_on_opt_narrow in_set_zipE)
 
 text\<open>The assumptions for widening and narrowing differ because during
-narrowing we have the invariant @{prop"y \<le> x"} (where @{text y} is the next
+narrowing we have the invariant @{prop"y \<le> x"} (where \<open>y\<close> is the next
 iterate), but during widening there is no such invariant, there we only have
 that not yet @{prop"y \<le> x"}. This complicates the termination proof for
 widening.\<close>
--- a/src/HOL/IMP/Big_Step.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Big_Step.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -7,7 +7,7 @@
 text \<open>
 The big-step semantics is a straight-forward inductive definition
 with concrete syntax. Note that the first parameter is a tuple,
-so the syntax becomes @{text "(c,s) \<Rightarrow> s'"}.
+so the syntax becomes \<open>(c,s) \<Rightarrow> s'\<close>.
 \<close>
 
 text_raw\<open>\snip{BigStepdef}{0}{1}{%\<close>
@@ -73,8 +73,8 @@
 text\<open>
 This induction schema is almost perfect for our purposes, but
 our trick for reusing the tuple syntax means that the induction
-schema has two parameters instead of the @{text c}, @{text s},
-and @{text s'} that we are likely to encounter. Splitting
+schema has two parameters instead of the \<open>c\<close>, \<open>s\<close>,
+and \<open>s'\<close> that we are likely to encounter. Splitting
 the tuple parameter fixes this:
 \<close>
 lemmas big_step_induct = big_step.induct[split_format(complete)]
@@ -156,10 +156,10 @@
 subsection "Command Equivalence"
 
 text \<open>
-  We call two statements @{text c} and @{text c'} equivalent wrt.\ the
-  big-step semantics when \emph{@{text c} started in @{text s} terminates
-  in @{text s'} iff @{text c'} started in the same @{text s} also terminates
-  in the same @{text s'}}. Formally:
+  We call two statements \<open>c\<close> and \<open>c'\<close> equivalent wrt.\ the
+  big-step semantics when \emph{\<open>c\<close> started in \<open>s\<close> terminates
+  in \<open>s'\<close> iff \<open>c'\<close> started in the same \<open>s\<close> also terminates
+  in the same \<open>s'\<close>}. Formally:
 \<close>
 text_raw\<open>\snip{BigStepEquiv}{0}{1}{%\<close>
 abbreviation
@@ -168,7 +168,7 @@
 text_raw\<open>}%endsnip\<close>
 
 text \<open>
-Warning: @{text"\<sim>"} is the symbol written \verb!\ < s i m >! (without spaces).
+Warning: \<open>\<sim>\<close> is the symbol written \verb!\ < s i m >! (without spaces).
 
   As an example, we show that loop unfolding is an equivalence
   transformation on programs:
--- a/src/HOL/IMP/Compiler.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Compiler.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -123,9 +123,9 @@
   by (induction rule: exec_induct) (blast intro: star.step exec1_appendL)+
 
 text\<open>Now we specialise the above lemmas to enable automatic proofs of
-@{prop "P \<turnstile> c \<rightarrow>* c'"} where @{text P} is a mixture of concrete instructions and
+@{prop "P \<turnstile> c \<rightarrow>* c'"} where \<open>P\<close> is a mixture of concrete instructions and
 pieces of code that we already know how they execute (by induction), combined
-by @{text "@"} and @{text "#"}. Backward jumps are not supported.
+by \<open>@\<close> and \<open>#\<close>. Backward jumps are not supported.
 The details should be skipped on a first reading.
 
 If we have just executed the first instruction of the program, drop it:\<close>
--- a/src/HOL/IMP/Compiler2.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Compiler2.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -8,7 +8,7 @@
 
 text \<open>
 The preservation of the source code semantics is already shown in the 
-parent theory @{text "Compiler"}. This here shows the second direction.
+parent theory \<open>Compiler\<close>. This here shows the second direction.
 \<close>
 
 subsection \<open>Definitions\<close>
--- a/src/HOL/IMP/Hoare_Examples.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Hoare_Examples.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -6,7 +6,7 @@
 
 hide_const (open) sum
 
-text\<open>Summing up the first @{text x} natural numbers in variable @{text y}.\<close>
+text\<open>Summing up the first \<open>x\<close> natural numbers in variable \<open>y\<close>.\<close>
 
 fun sum :: "int \<Rightarrow> int" where
 "sum i = (if i \<le> 0 then 0 else sum (i - 1) + i)"
--- a/src/HOL/IMP/Hoare_Total.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Hoare_Total.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -8,7 +8,7 @@
 imports Hoare_Examples
 begin
 
-text\<open>Note that this definition of total validity @{text"\<Turnstile>\<^sub>t"} only
+text\<open>Note that this definition of total validity \<open>\<Turnstile>\<^sub>t\<close> only
 works if execution is deterministic (which it is in our case).\<close>
 
 definition hoare_tvalid :: "assn \<Rightarrow> com \<Rightarrow> assn \<Rightarrow> bool"
@@ -16,9 +16,9 @@
 "\<Turnstile>\<^sub>t {P}c{Q}  \<longleftrightarrow>  (\<forall>s. P s \<longrightarrow> (\<exists>t. (c,s) \<Rightarrow> t \<and> Q t))"
 
 text\<open>Provability of Hoare triples in the proof system for total
-correctness is written @{text"\<turnstile>\<^sub>t {P}c{Q}"} and defined
-inductively. The rules for @{text"\<turnstile>\<^sub>t"} differ from those for
-@{text"\<turnstile>"} only in the one place where nontermination can arise: the
+correctness is written \<open>\<turnstile>\<^sub>t {P}c{Q}\<close> and defined
+inductively. The rules for \<open>\<turnstile>\<^sub>t\<close> differ from those for
+\<open>\<turnstile>\<close> only in the one place where nontermination can arise: the
 @{term While}-rule.\<close>
 
 inductive
@@ -134,7 +134,7 @@
 
 
 text\<open>Now we define the number of iterations @{term "WHILE b DO c"} needs to
-terminate when started in state @{text s}. Because this is a truly partial
+terminate when started in state \<open>s\<close>. Because this is a truly partial
 function, we define it as an (inductive) relation first:\<close>
 
 inductive Its :: "bexp \<Rightarrow> com \<Rightarrow> state \<Rightarrow> nat \<Rightarrow> bool" where
--- a/src/HOL/IMP/Hoare_Total_EX.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Hoare_Total_EX.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -91,7 +91,7 @@
 done
 
 
-text\<open>Function @{text wpw} computes the weakest precondition of a While-loop
+text\<open>Function \<open>wpw\<close> computes the weakest precondition of a While-loop
 that is unfolded a fixed number of times.\<close>
 
 fun wpw :: "bexp \<Rightarrow> com \<Rightarrow> nat \<Rightarrow> assn \<Rightarrow> assn" where
--- a/src/HOL/IMP/Hoare_Total_EX2.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Hoare_Total_EX2.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -92,7 +92,7 @@
 by (auto simp: wpt_def fun_eq_iff)
 
 
-text\<open>Function @{text wpw} computes the weakest precondition of a While-loop
+text\<open>Function \<open>wpw\<close> computes the weakest precondition of a While-loop
 that is unfolded a fixed number of times.\<close>
 
 fun wpw :: "bexp \<Rightarrow> com \<Rightarrow> nat \<Rightarrow> assn2 \<Rightarrow> assn2" where
--- a/src/HOL/IMP/Live_True.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Live_True.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -48,7 +48,7 @@
 lemma L_While_X: "X \<subseteq> L (WHILE b DO c) X"
 using L_While_unfold by blast
 
-text\<open>Disable @{text "L WHILE"} equation and reason only with @{text "L WHILE"} constraints:\<close>
+text\<open>Disable \<open>L WHILE\<close> equation and reason only with \<open>L WHILE\<close> constraints:\<close>
 declare L.simps(5)[simp del]
 
 
@@ -149,7 +149,7 @@
    in while (\<lambda>Y. f Y \<noteq> Y) f {})"
 by(rule L_While_let, simp)
 
-text\<open>Replace the equation for @{text "L (WHILE \<dots>)"} by the executable @{thm[source] L_While_set}:\<close>
+text\<open>Replace the equation for \<open>L (WHILE \<dots>)\<close> by the executable @{thm[source] L_While_set}:\<close>
 lemmas [code] = L.simps(1-4) L_While_set
 text\<open>Sorry, this syntax is odd.\<close>
 
--- a/src/HOL/IMP/Sec_Type_Expr.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Sec_Type_Expr.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -12,7 +12,7 @@
 
 text\<open>The security/confidentiality level of each variable is globally fixed
 for simplicity. For the sake of examples --- the general theory does not rely
-on it! --- a variable of length @{text n} has security level @{text n}:\<close>
+on it! --- a variable of length \<open>n\<close> has security level \<open>n\<close>:\<close>
 
 instantiation list :: (type)sec
 begin
--- a/src/HOL/IMP/Small_Step.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Small_Step.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -37,10 +37,10 @@
 subsubsection\<open>Induction rules\<close>
 
 text\<open>The default induction rule @{thm[source] small_step.induct} only works
-for lemmas of the form @{text"a \<rightarrow> b \<Longrightarrow> \<dots>"} where @{text a} and @{text b} are
-not already pairs @{text"(DUMMY,DUMMY)"}. We can generate a suitable variant
+for lemmas of the form \<open>a \<rightarrow> b \<Longrightarrow> \<dots>\<close> where \<open>a\<close> and \<open>b\<close> are
+not already pairs \<open>(DUMMY,DUMMY)\<close>. We can generate a suitable variant
 of @{thm[source] small_step.induct} for pairs by ``splitting'' the arguments
-@{text"\<rightarrow>"} into pairs:\<close>
+\<open>\<rightarrow>\<close> into pairs:\<close>
 lemmas small_step_induct = small_step.induct[split_format(complete)]
 
 
@@ -84,7 +84,7 @@
 by(blast intro: star.step star_seq2 star_trans)
 
 text\<open>The following proof corresponds to one on the board where one would
-show chains of @{text "\<rightarrow>"} and @{text "\<rightarrow>*"} steps.\<close>
+show chains of \<open>\<rightarrow>\<close> and \<open>\<rightarrow>*\<close> steps.\<close>
 
 lemma big_to_small:
   "cs \<Rightarrow> t \<Longrightarrow> cs \<rightarrow>* (SKIP,t)"
@@ -184,7 +184,7 @@
 lemma final_iff_SKIP: "final (c,s) = (c = SKIP)"
 by (metis SkipE finalD final_def)
 
-text\<open>Now we can show that @{text"\<Rightarrow>"} yields a final state iff @{text"\<rightarrow>"}
+text\<open>Now we can show that \<open>\<Rightarrow>\<close> yields a final state iff \<open>\<rightarrow>\<close>
 terminates:\<close>
 
 lemma big_iff_small_termination:
@@ -193,7 +193,7 @@
 
 text\<open>This is the same as saying that the absence of a big step result is
 equivalent with absence of a terminating small step sequence, i.e.\ with
-nontermination.  Since @{text"\<rightarrow>"} is determininistic, there is no difference
+nontermination.  Since \<open>\<rightarrow>\<close> is determininistic, there is no difference
 between may and must terminate.\<close>
 
 end
--- a/src/HOL/IMP/Vars.thy	Wed Dec 26 16:07:28 2018 +0100
+++ b/src/HOL/IMP/Vars.thy	Wed Dec 26 16:25:20 2018 +0100
@@ -8,8 +8,7 @@
 subsection "The Variables in an Expression"
 
 text\<open>We need to collect the variables in both arithmetic and boolean
-expressions. For a change we do not introduce two functions, e.g.\ @{text
-avars} and @{text bvars}, but we overload the name @{text vars}
+expressions. For a change we do not introduce two functions, e.g.\ \<open>avars\<close> and \<open>bvars\<close>, but we overload the name \<open>vars\<close>
 via a \emph{type class}, a device that originated with Haskell:\<close>
  
 class vars =