isabelle update_cartouches;
authorwenzelm
Mon, 12 Oct 2015 21:15:10 +0200
changeset 61419 3c3f8b182e4b
parent 61418 44d9d55b9ef4
child 61420 ee42cba50933
isabelle update_cartouches;
src/Doc/Classes/Classes.thy
src/Doc/Classes/Setup.thy
src/Doc/Functions/Functions.thy
src/Doc/Locales/Examples.thy
src/Doc/Locales/Examples1.thy
src/Doc/Locales/Examples2.thy
src/Doc/Locales/Examples3.thy
--- a/src/Doc/Classes/Classes.thy	Mon Oct 12 21:11:48 2015 +0200
+++ b/src/Doc/Classes/Classes.thy	Mon Oct 12 21:15:10 2015 +0200
@@ -2,9 +2,9 @@
 imports Main Setup
 begin
 
-section {* Introduction *}
+section \<open>Introduction\<close>
 
-text {*
+text \<open>
   Type classes were introduced by Wadler and Blott @{cite wadler89how}
   into the Haskell language to allow for a reasonable implementation
   of overloading\footnote{throughout this tutorial, we are referring
@@ -89,23 +89,23 @@
   algebraic hierarchy of semigroups, monoids and groups.  Our
   background theory is that of Isabelle/HOL @{cite "isa-tutorial"}, for
   which some familiarity is assumed.
-*}
+\<close>
 
-section {* A simple algebra example \label{sec:example} *}
+section \<open>A simple algebra example \label{sec:example}\<close>
 
-subsection {* Class definition *}
+subsection \<open>Class definition\<close>
 
-text {*
+text \<open>
   Depending on an arbitrary type @{text "\<alpha>"}, class @{text
   "semigroup"} introduces a binary operator @{text "(\<otimes>)"} that is
   assumed to be associative:
-*}
+\<close>
 
 class %quote semigroup =
   fixes mult :: "\<alpha> \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>"    (infixl "\<otimes>" 70)
   assumes assoc: "(x \<otimes> y) \<otimes> z = x \<otimes> (y \<otimes> z)"
 
-text {*
+text \<open>
   \noindent This @{command class} specification consists of two parts:
   the \qn{operational} part names the class parameter (@{element
   "fixes"}), the \qn{logical} part specifies properties on them
@@ -114,17 +114,17 @@
   parameter @{term [source] "mult :: \<alpha>::semigroup \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>"} and the
   global theorem @{fact "semigroup.assoc:"}~@{prop [source] "\<And>x y z ::
   \<alpha>::semigroup. (x \<otimes> y) \<otimes> z = x \<otimes> (y \<otimes> z)"}.
-*}
+\<close>
 
 
-subsection {* Class instantiation \label{sec:class_inst} *}
+subsection \<open>Class instantiation \label{sec:class_inst}\<close>
 
-text {*
+text \<open>
   The concrete type @{typ int} is made a @{class semigroup} instance
   by providing a suitable definition for the class parameter @{text
   "(\<otimes>)"} and a proof for the specification of @{fact assoc}.  This is
   accomplished by the @{command instantiation} target:
-*}
+\<close>
 
 instantiation %quote int :: semigroup
 begin
@@ -140,7 +140,7 @@
 
 end %quote
 
-text {*
+text \<open>
   \noindent @{command instantiation} defines class parameters at a
   particular instance using common specification tools (here,
   @{command definition}).  The concluding @{command instance} opens a
@@ -157,7 +157,7 @@
 
   \medskip Another instance of @{class semigroup} yields the natural
   numbers:
-*}
+\<close>
 
 instantiation %quote nat :: semigroup
 begin
@@ -174,21 +174,21 @@
 
 end %quote
 
-text {*
+text \<open>
   \noindent Note the occurrence of the name @{text mult_nat} in the
   primrec declaration; by default, the local name of a class operation
   @{text f} to be instantiated on type constructor @{text \<kappa>} is
   mangled as @{text f_\<kappa>}.  In case of uncertainty, these names may be
   inspected using the @{command "print_context"} command.
-*}
+\<close>
 
-subsection {* Lifting and parametric types *}
+subsection \<open>Lifting and parametric types\<close>
 
-text {*
+text \<open>
   Overloaded definitions given at a class instantiation may include
   recursion over the syntactic structure of types.  As a canonical
   example, we model product semigroups using our simple algebra:
-*}
+\<close>
 
 instantiation %quote prod :: (semigroup, semigroup) semigroup
 begin
@@ -204,34 +204,34 @@
 
 end %quote
 
-text {*
+text \<open>
   \noindent Associativity of product semigroups is established using
   the definition of @{text "(\<otimes>)"} on products and the hypothetical
   associativity of the type components; these hypotheses are
   legitimate due to the @{class semigroup} constraints imposed on the
   type components by the @{command instance} proposition.  Indeed,
   this pattern often occurs with parametric types and type classes.
-*}
+\<close>
 
 
-subsection {* Subclassing *}
+subsection \<open>Subclassing\<close>
 
-text {*
+text \<open>
   We define a subclass @{text monoidl} (a semigroup with a left-hand
   neutral) by extending @{class semigroup} with one additional
   parameter @{text neutral} together with its characteristic property:
-*}
+\<close>
 
 class %quote monoidl = semigroup +
   fixes neutral :: "\<alpha>" ("\<one>")
   assumes neutl: "\<one> \<otimes> x = x"
 
-text {*
+text \<open>
   \noindent Again, we prove some instances, by providing suitable
   parameter definitions and proofs for the additional specifications.
   Observe that instantiations for types with the same arity may be
   simultaneous:
-*}
+\<close>
 
 instantiation %quote nat and int :: monoidl
 begin
@@ -268,10 +268,10 @@
 
 end %quote
 
-text {*
+text \<open>
   \noindent Fully-fledged monoids are modelled by another subclass,
   which does not add new parameters but tightens the specification:
-*}
+\<close>
 
 class %quote monoid = monoidl +
   assumes neutr: "x \<otimes> \<one> = x"
@@ -302,10 +302,10 @@
 
 end %quote
 
-text {*
+text \<open>
   \noindent To finish our small algebra example, we add a @{text
   group} class with a corresponding instance:
-*}
+\<close>
 
 class %quote group = monoidl +
   fixes inverse :: "\<alpha> \<Rightarrow> \<alpha>"    ("(_\<div>)" [1000] 999)
@@ -327,57 +327,57 @@
 end %quote
 
 
-section {* Type classes as locales *}
+section \<open>Type classes as locales\<close>
 
-subsection {* A look behind the scenes *}
+subsection \<open>A look behind the scenes\<close>
 
-text {*
+text \<open>
   The example above gives an impression how Isar type classes work in
   practice.  As stated in the introduction, classes also provide a
   link to Isar's locale system.  Indeed, the logical core of a class
   is nothing other than a locale:
-*}
+\<close>
 
 class %quote idem =
   fixes f :: "\<alpha> \<Rightarrow> \<alpha>"
   assumes idem: "f (f x) = f x"
 
-text {*
+text \<open>
   \noindent essentially introduces the locale
-*} (*<*)setup %invisible {* Sign.add_path "foo" *}
+\<close> (*<*)setup %invisible \<open>Sign.add_path "foo"\<close>
 (*>*)
 locale %quote idem =
   fixes f :: "\<alpha> \<Rightarrow> \<alpha>"
   assumes idem: "f (f x) = f x"
 
-text {* \noindent together with corresponding constant(s): *}
+text \<open>\noindent together with corresponding constant(s):\<close>
 
 consts %quote f :: "\<alpha> \<Rightarrow> \<alpha>"
 
-text {*
+text \<open>
   \noindent The connection to the type system is done by means of a
   primitive type class @{text "idem"}, together with a corresponding
   interpretation:
-*}
+\<close>
 
 interpretation %quote idem_class:
   idem "f :: (\<alpha>::idem) \<Rightarrow> \<alpha>"
 (*<*)sorry(*>*)
 
-text {*
+text \<open>
   \noindent This gives you the full power of the Isabelle module system;
   conclusions in locale @{text idem} are implicitly propagated
   to class @{text idem}.
-*} (*<*)setup %invisible {* Sign.parent_path *}
+\<close> (*<*)setup %invisible \<open>Sign.parent_path\<close>
 (*>*)
-subsection {* Abstract reasoning *}
+subsection \<open>Abstract reasoning\<close>
 
-text {*
+text \<open>
   Isabelle locales enable reasoning at a general level, while results
   are implicitly transferred to all instances.  For example, we can
   now establish the @{text "left_cancel"} lemma for groups, which
   states that the function @{text "(x \<otimes>)"} is injective:
-*}
+\<close>
 
 lemma %quote (in group) left_cancel: "x \<otimes> y = x \<otimes> z \<longleftrightarrow> y = z"
 proof
@@ -390,7 +390,7 @@
   then show "x \<otimes> y = x \<otimes> z" by simp
 qed
 
-text {*
+text \<open>
   \noindent Here the \qt{@{keyword "in"} @{class group}} target
   specification indicates that the result is recorded within that
   context for later use.  This local theorem is also lifted to the
@@ -399,20 +399,20 @@
   made an instance of @{text "group"} before, we may refer to that
   fact as well: @{prop [source] "\<And>x y z :: int. x \<otimes> y = x \<otimes> z \<longleftrightarrow> y =
   z"}.
-*}
+\<close>
 
 
-subsection {* Derived definitions *}
+subsection \<open>Derived definitions\<close>
 
-text {*
+text \<open>
   Isabelle locales are targets which support local definitions:
-*}
+\<close>
 
 primrec %quote (in monoid) pow_nat :: "nat \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>" where
   "pow_nat 0 x = \<one>"
   | "pow_nat (Suc n) x = x \<otimes> pow_nat n x"
 
-text {*
+text \<open>
   \noindent If the locale @{text group} is also a class, this local
   definition is propagated onto a global definition of @{term [source]
   "pow_nat :: nat \<Rightarrow> \<alpha>::monoid \<Rightarrow> \<alpha>::monoid"} with corresponding theorems
@@ -423,12 +423,12 @@
   you may use any specification tool which works together with
   locales, such as Krauss's recursive function package
   @{cite krauss2006}.
-*}
+\<close>
 
 
-subsection {* A functor analogy *}
+subsection \<open>A functor analogy\<close>
 
-text {*
+text \<open>
   We introduced Isar classes by analogy to type classes in functional
   programming; if we reconsider this in the context of what has been
   said about type classes and locales, we can drive this analogy
@@ -440,18 +440,18 @@
   lists the same operations as for genuinely algebraic types.  In such
   a case, we can simply make a particular interpretation of monoids
   for lists:
-*}
+\<close>
 
 interpretation %quote list_monoid: monoid append "[]"
   proof qed auto
 
-text {*
+text \<open>
   \noindent This enables us to apply facts on monoids
   to lists, e.g. @{thm list_monoid.neutl [no_vars]}.
 
   When using this interpretation pattern, it may also
   be appropriate to map derived definitions accordingly:
-*}
+\<close>
 
 primrec %quote replicate :: "nat \<Rightarrow> \<alpha> list \<Rightarrow> \<alpha> list" where
   "replicate 0 _ = []"
@@ -469,7 +469,7 @@
   qed
 qed intro_locales
 
-text {*
+text \<open>
   \noindent This pattern is also helpful to reuse abstract
   specifications on the \emph{same} type.  For example, think of a
   class @{text preorder}; for type @{typ nat}, there are at least two
@@ -478,15 +478,15 @@
   @{command instantiation}; using the locale behind the class @{text
   preorder}, it is still possible to utilise the same abstract
   specification again using @{command interpretation}.
-*}
+\<close>
 
-subsection {* Additional subclass relations *}
+subsection \<open>Additional subclass relations\<close>
 
-text {*
+text \<open>
   Any @{text "group"} is also a @{text "monoid"}; this can be made
   explicit by claiming an additional subclass relation, together with
   a proof of the logical difference:
-*}
+\<close>
 
 subclass %quote (in group) monoid
 proof
@@ -496,7 +496,7 @@
   with left_cancel show "x \<otimes> \<one> = x" by simp
 qed
 
-text {*
+text \<open>
   The logical proof is carried out on the locale level.  Afterwards it
   is propagated to the type system, making @{text group} an instance
   of @{text monoid} by adding an additional edge to the graph of
@@ -534,50 +534,50 @@
 
   For illustration, a derived definition in @{text group} using @{text
   pow_nat}
-*}
+\<close>
 
 definition %quote (in group) pow_int :: "int \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>" where
   "pow_int k x = (if k >= 0
     then pow_nat (nat k) x
     else (pow_nat (nat (- k)) x)\<div>)"
 
-text {*
+text \<open>
   \noindent yields the global definition of @{term [source] "pow_int ::
   int \<Rightarrow> \<alpha>::group \<Rightarrow> \<alpha>::group"} with the corresponding theorem @{thm
   pow_int_def [no_vars]}.
-*}
+\<close>
 
-subsection {* A note on syntax *}
+subsection \<open>A note on syntax\<close>
 
-text {*
+text \<open>
   As a convenience, class context syntax allows references to local
   class operations and their global counterparts uniformly; type
   inference resolves ambiguities.  For example:
-*}
+\<close>
 
 context %quote semigroup
 begin
 
-term %quote "x \<otimes> y" -- {* example 1 *}
-term %quote "(x::nat) \<otimes> y" -- {* example 2 *}
+term %quote "x \<otimes> y" -- \<open>example 1\<close>
+term %quote "(x::nat) \<otimes> y" -- \<open>example 2\<close>
 
 end  %quote
 
-term %quote "x \<otimes> y" -- {* example 3 *}
+term %quote "x \<otimes> y" -- \<open>example 3\<close>
 
-text {*
+text \<open>
   \noindent Here in example 1, the term refers to the local class
   operation @{text "mult [\<alpha>]"}, whereas in example 2 the type
   constraint enforces the global class operation @{text "mult [nat]"}.
   In the global context in example 3, the reference is to the
   polymorphic global class operation @{text "mult [?\<alpha> :: semigroup]"}.
-*}
+\<close>
 
-section {* Further issues *}
+section \<open>Further issues\<close>
 
-subsection {* Type classes and code generation *}
+subsection \<open>Type classes and code generation\<close>
 
-text {*
+text \<open>
   Turning back to the first motivation for type classes, namely
   overloading, it is obvious that overloading stemming from @{command
   class} statements and @{command instantiation} targets naturally
@@ -586,37 +586,37 @@
   language (e.g.~SML) lacks type classes, then they are implemented by
   an explicit dictionary construction.  As example, let's go back to
   the power function:
-*}
+\<close>
 
 definition %quote example :: int where
   "example = pow_int 10 (-2)"
 
-text {*
+text \<open>
   \noindent This maps to Haskell as follows:
-*}
-text %quotetypewriter {*
+\<close>
+text %quotetypewriter \<open>
   @{code_stmts example (Haskell)}
-*}
+\<close>
 
-text {*
+text \<open>
   \noindent The code in SML has explicit dictionary passing:
-*}
-text %quotetypewriter {*
+\<close>
+text %quotetypewriter \<open>
   @{code_stmts example (SML)}
-*}
+\<close>
 
 
-text {*
+text \<open>
   \noindent In Scala, implicits are used as dictionaries:
-*}
-text %quotetypewriter {*
+\<close>
+text %quotetypewriter \<open>
   @{code_stmts example (Scala)}
-*}
+\<close>
 
 
-subsection {* Inspecting the type class universe *}
+subsection \<open>Inspecting the type class universe\<close>
 
-text {*
+text \<open>
   To facilitate orientation in complex subclass structures, two
   diagnostics commands are provided:
 
@@ -631,7 +631,7 @@
       an optional second sort argument to all superclasses of this sort.
 
   \end{description}
-*}
+\<close>
 
 end
 
--- a/src/Doc/Classes/Setup.thy	Mon Oct 12 21:11:48 2015 +0200
+++ b/src/Doc/Classes/Setup.thy	Mon Oct 12 21:15:10 2015 +0200
@@ -13,7 +13,7 @@
   "_beta" :: "type"  ("\<beta>")
   "_beta_ofsort" :: "sort \<Rightarrow> type"  ("\<beta>()::_" [0] 1000)
 
-parse_ast_translation {*
+parse_ast_translation \<open>
   let
     fun alpha_ast_tr [] = Ast.Variable "'a"
       | alpha_ast_tr asts = raise Ast.AST ("alpha_ast_tr", asts);
@@ -31,6 +31,6 @@
     (@{syntax_const "_beta"}, K beta_ast_tr),
     (@{syntax_const "_beta_ofsort"}, K beta_ofsort_ast_tr)]
   end
-*}
+\<close>
 
 end
--- a/src/Doc/Functions/Functions.thy	Mon Oct 12 21:11:48 2015 +0200
+++ b/src/Doc/Functions/Functions.thy	Mon Oct 12 21:15:10 2015 +0200
@@ -8,11 +8,11 @@
 imports Main
 begin
 
-section {* Function Definitions for Dummies *}
+section \<open>Function Definitions for Dummies\<close>
 
-text {*
+text \<open>
   In most cases, defining a recursive function is just as simple as other definitions:
-*}
+\<close>
 
 fun fib :: "nat \<Rightarrow> nat"
 where
@@ -20,7 +20,7 @@
 | "fib (Suc 0) = 1"
 | "fib (Suc (Suc n)) = fib n + fib (Suc n)"
 
-text {*
+text \<open>
   The syntax is rather self-explanatory: We introduce a function by
   giving its name, its type, 
   and a set of defining recursive equations.
@@ -28,9 +28,9 @@
   inferred, which can sometimes lead to surprises: Since both @{term
   "1::nat"} and @{text "+"} are overloaded, we would end up
   with @{text "fib :: nat \<Rightarrow> 'a::{one,plus}"}.
-*}
+\<close>
 
-text {*
+text \<open>
   The function always terminates, since its argument gets smaller in
   every recursive call. 
   Since HOL is a logic of total functions, termination is a
@@ -40,11 +40,11 @@
   Isabelle tries to prove termination automatically when a definition
   is made. In \S\ref{termination}, we will look at cases where this
   fails and see what to do then.
-*}
+\<close>
 
-subsection {* Pattern matching *}
+subsection \<open>Pattern matching\<close>
 
-text {* \label{patmatch}
+text \<open>\label{patmatch}
   Like in functional programming, we can use pattern matching to
   define functions. At the moment we will only consider \emph{constructor
   patterns}, which only consist of datatype constructors and
@@ -55,35 +55,35 @@
   If patterns overlap, the order of the equations is taken into
   account. The following function inserts a fixed element between any
   two elements of a list:
-*}
+\<close>
 
 fun sep :: "'a \<Rightarrow> 'a list \<Rightarrow> 'a list"
 where
   "sep a (x#y#xs) = x # a # sep a (y # xs)"
 | "sep a xs       = xs"
 
-text {* 
+text \<open>
   Overlapping patterns are interpreted as \qt{increments} to what is
   already there: The second equation is only meant for the cases where
   the first one does not match. Consequently, Isabelle replaces it
   internally by the remaining cases, making the patterns disjoint:
-*}
+\<close>
 
 thm sep.simps
 
-text {* @{thm [display] sep.simps[no_vars]} *}
+text \<open>@{thm [display] sep.simps[no_vars]}\<close>
 
-text {* 
+text \<open>
   \noindent The equations from function definitions are automatically used in
   simplification:
-*}
+\<close>
 
 lemma "sep 0 [1, 2, 3] = [1, 0, 2, 0, 3]"
 by simp
 
-subsection {* Induction *}
+subsection \<open>Induction\<close>
 
-text {*
+text \<open>
 
   Isabelle provides customized induction rules for recursive
   functions. These rules follow the recursive structure of the
@@ -95,30 +95,30 @@
   We have a step case for list with at least two elements, and two
   base cases for the zero- and the one-element list. Here is a simple
   proof about @{const sep} and @{const map}
-*}
+\<close>
 
 lemma "map f (sep x ys) = sep (f x) (map f ys)"
 apply (induct x ys rule: sep.induct)
 
-text {*
+text \<open>
   We get three cases, like in the definition.
 
   @{subgoals [display]}
-*}
+\<close>
 
 apply auto 
 done
-text {*
+text \<open>
 
   With the \cmd{fun} command, you can define about 80\% of the
   functions that occur in practice. The rest of this tutorial explains
   the remaining 20\%.
-*}
+\<close>
 
 
-section {* fun vs.\ function *}
+section \<open>fun vs.\ function\<close>
 
-text {* 
+text \<open>
   The \cmd{fun} command provides a
   convenient shorthand notation for simple function definitions. In
   this mode, Isabelle tries to solve all the necessary proof obligations
@@ -171,32 +171,32 @@
   Whenever a \cmd{fun} command fails, it is usually a good idea to
   expand the syntax to the more verbose \cmd{function} form, to see
   what is actually going on.
- *}
+\<close>
 
 
-section {* Termination *}
+section \<open>Termination\<close>
 
-text {*\label{termination}
+text \<open>\label{termination}
   The method @{text "lexicographic_order"} is the default method for
   termination proofs. It can prove termination of a
   certain class of functions by searching for a suitable lexicographic
   combination of size measures. Of course, not all functions have such
   a simple termination argument. For them, we can specify the termination
   relation manually.
-*}
+\<close>
 
-subsection {* The {\tt relation} method *}
-text{*
+subsection \<open>The {\tt relation} method\<close>
+text\<open>
   Consider the following function, which sums up natural numbers up to
   @{text "N"}, using a counter @{text "i"}:
-*}
+\<close>
 
 function sum :: "nat \<Rightarrow> nat \<Rightarrow> nat"
 where
   "sum i N = (if i > N then 0 else i + sum (Suc i) N)"
 by pat_completeness auto
 
-text {*
+text \<open>
   \noindent The @{text "lexicographic_order"} method fails on this example, because none of the
   arguments decreases in the recursive call, with respect to the standard size ordering.
   To prove termination manually, we must provide a custom wellfounded relation.
@@ -208,12 +208,12 @@
   @{text "N + 1 - i"} always decreases.
 
   We can use this expression as a measure function suitable to prove termination.
-*}
+\<close>
 
 termination sum
 apply (relation "measure (\<lambda>(i,N). N + 1 - i)")
 
-text {*
+text \<open>
   The \cmd{termination} command sets up the termination goal for the
   specified function @{text "sum"}. If the function name is omitted, it
   implicitly refers to the last function definition.
@@ -236,15 +236,15 @@
   @{subgoals[display,indent=0]}
 
   These goals are all solved by @{text "auto"}:
-*}
+\<close>
 
 apply auto
 done
 
-text {*
+text \<open>
   Let us complicate the function a little, by adding some more
   recursive calls: 
-*}
+\<close>
 
 function foo :: "nat \<Rightarrow> nat \<Rightarrow> nat"
 where
@@ -253,7 +253,7 @@
               else i + foo (Suc i) N)"
 by pat_completeness auto
 
-text {*
+text \<open>
   When @{text "i"} has reached @{text "N"}, it starts at zero again
   and @{text "N"} is decremented.
   This corresponds to a nested
@@ -262,12 +262,12 @@
   the value of @{text "N"} and the above difference. The @{const
   "measures"} combinator generalizes @{text "measure"} by taking a
   list of measure functions.  
-*}
+\<close>
 
 termination 
 by (relation "measures [\<lambda>(i, N). N, \<lambda>(i,N). N + 1 - i]") auto
 
-subsection {* How @{text "lexicographic_order"} works *}
+subsection \<open>How @{text "lexicographic_order"} works\<close>
 
 (*fun fails :: "nat \<Rightarrow> nat list \<Rightarrow> nat"
 where
@@ -275,7 +275,7 @@
 | "fails a (x#xs) = fails (x + a) (x # xs)"
 *)
 
-text {*
+text \<open>
   To see how the automatic termination proofs work, let's look at an
   example where it fails\footnote{For a detailed discussion of the
   termination prover, see @{cite bulwahnKN07}}:
@@ -308,8 +308,8 @@
 *** Could not find lexicographic termination order.\newline
 *** At command "fun".\newline
 \end{isabelle}
-*}
-text {*
+\<close>
+text \<open>
   The key to this error message is the matrix at the bottom. The rows
   of that matrix correspond to the different recursive calls (In our
   case, there is just one). The columns are the function's arguments 
@@ -324,11 +324,11 @@
 
   For the failed proof attempts, the unfinished subgoals are also
   printed. Looking at these will often point to a missing lemma.
-*}
+\<close>
 
-subsection {* The @{text size_change} method *}
+subsection \<open>The @{text size_change} method\<close>
 
-text {*
+text \<open>
   Some termination goals that are beyond the powers of
   @{text lexicographic_order} can be solved automatically by the
   more powerful @{text size_change} method, which uses a variant of
@@ -347,14 +347,14 @@
 
   Loading the theory @{text Multiset} makes the @{text size_change}
   method a bit stronger: it can then use multiset orders internally.
-*}
+\<close>
 
-section {* Mutual Recursion *}
+section \<open>Mutual Recursion\<close>
 
-text {*
+text \<open>
   If two or more functions call one another mutually, they have to be defined
   in one step. Here are @{text "even"} and @{text "odd"}:
-*}
+\<close>
 
 function even :: "nat \<Rightarrow> bool"
     and odd  :: "nat \<Rightarrow> bool"
@@ -365,93 +365,93 @@
 | "odd (Suc n) = even n"
 by pat_completeness auto
 
-text {*
+text \<open>
   To eliminate the mutual dependencies, Isabelle internally
   creates a single function operating on the sum
   type @{typ "nat + nat"}. Then, @{const even} and @{const odd} are
   defined as projections. Consequently, termination has to be proved
   simultaneously for both functions, by specifying a measure on the
   sum type: 
-*}
+\<close>
 
 termination 
 by (relation "measure (\<lambda>x. case x of Inl n \<Rightarrow> n | Inr n \<Rightarrow> n)") auto
 
-text {* 
+text \<open>
   We could also have used @{text lexicographic_order}, which
   supports mutual recursive termination proofs to a certain extent.
-*}
+\<close>
 
-subsection {* Induction for mutual recursion *}
+subsection \<open>Induction for mutual recursion\<close>
 
-text {*
+text \<open>
 
   When functions are mutually recursive, proving properties about them
   generally requires simultaneous induction. The induction rule @{thm [source] "even_odd.induct"}
   generated from the above definition reflects this.
 
   Let us prove something about @{const even} and @{const odd}:
-*}
+\<close>
 
 lemma even_odd_mod2:
   "even n = (n mod 2 = 0)"
   "odd n = (n mod 2 = 1)"
 
-text {* 
+text \<open>
   We apply simultaneous induction, specifying the induction variable
-  for both goals, separated by \cmd{and}:  *}
+  for both goals, separated by \cmd{and}:\<close>
 
 apply (induct n and n rule: even_odd.induct)
 
-text {* 
+text \<open>
   We get four subgoals, which correspond to the clauses in the
   definition of @{const even} and @{const odd}:
   @{subgoals[display,indent=0]}
   Simplification solves the first two goals, leaving us with two
   statements about the @{text "mod"} operation to prove:
-*}
+\<close>
 
 apply simp_all
 
-text {* 
+text \<open>
   @{subgoals[display,indent=0]} 
 
   \noindent These can be handled by Isabelle's arithmetic decision procedures.
   
-*}
+\<close>
 
 apply arith
 apply arith
 done
 
-text {*
+text \<open>
   In proofs like this, the simultaneous induction is really essential:
   Even if we are just interested in one of the results, the other
   one is necessary to strengthen the induction hypothesis. If we leave
   out the statement about @{const odd} and just write @{term True} instead,
   the same proof fails:
-*}
+\<close>
 
 lemma failed_attempt:
   "even n = (n mod 2 = 0)"
   "True"
 apply (induct n rule: even_odd.induct)
 
-text {*
+text \<open>
   \noindent Now the third subgoal is a dead end, since we have no
   useful induction hypothesis available:
 
   @{subgoals[display,indent=0]} 
-*}
+\<close>
 
 oops
 
-section {* Elimination *}
+section \<open>Elimination\<close>
 
-text {* 
+text \<open>
   A definition of function @{text f} gives rise to two kinds of elimination rules. Rule @{text f.cases}
   simply describes case analysis according to the patterns used in the definition:
-*}
+\<close>
 
 fun list_to_option :: "'a list \<Rightarrow> 'a option"
 where
@@ -459,21 +459,21 @@
 | "list_to_option _ = None"
 
 thm list_to_option.cases
-text {*
+text \<open>
   @{thm[display] list_to_option.cases}
 
   Note that this rule does not mention the function at all, but only describes the cases used for
   defining it. In contrast, the rule @{thm[source] list_to_option.elims} also tell us what the function
   value will be in each case:
-*}
+\<close>
 thm list_to_option.elims
-text {*
+text \<open>
   @{thm[display] list_to_option.elims}
 
   \noindent
   This lets us eliminate an assumption of the form @{prop "list_to_option xs = y"} and replace it
   with the two cases, e.g.:
-*}
+\<close>
 
 lemma "list_to_option xs = y \<Longrightarrow> P"
 proof (erule list_to_option.elims)
@@ -485,28 +485,28 @@
 qed
 
 
-text {*
+text \<open>
   Sometimes it is convenient to derive specialized versions of the @{text elim} rules above and
   keep them around as facts explicitly. For example, it is natural to show that if 
   @{prop "list_to_option xs = Some y"}, then @{term xs} must be a singleton. The command 
   \cmd{fun\_cases} derives such facts automatically, by instantiating and simplifying the general 
   elimination rules given some pattern:
-*}
+\<close>
 
 fun_cases list_to_option_SomeE[elim]: "list_to_option xs = Some y"
 
 thm list_to_option_SomeE
-text {*
+text \<open>
   @{thm[display] list_to_option_SomeE}
-*}
+\<close>
 
 
-section {* General pattern matching *}
-text{*\label{genpats} *}
+section \<open>General pattern matching\<close>
+text\<open>\label{genpats}\<close>
 
-subsection {* Avoiding automatic pattern splitting *}
+subsection \<open>Avoiding automatic pattern splitting\<close>
 
-text {*
+text \<open>
 
   Up to now, we used pattern matching only on datatypes, and the
   patterns were always disjoint and complete, and if they weren't,
@@ -520,11 +520,11 @@
   Suppose we are modeling incomplete knowledge about the world by a
   three-valued datatype, which has values @{term "T"}, @{term "F"}
   and @{term "X"} for true, false and uncertain propositions, respectively. 
-*}
+\<close>
 
 datatype P3 = T | F | X
 
-text {* \noindent Then the conjunction of such values can be defined as follows: *}
+text \<open>\noindent Then the conjunction of such values can be defined as follows:\<close>
 
 fun And :: "P3 \<Rightarrow> P3 \<Rightarrow> P3"
 where
@@ -535,17 +535,17 @@
 | "And X X = X"
 
 
-text {* 
+text \<open>
   This definition is useful, because the equations can directly be used
   as simplification rules. But the patterns overlap: For example,
   the expression @{term "And T T"} is matched by both the first and
   the second equation. By default, Isabelle makes the patterns disjoint by
   splitting them up, producing instances:
-*}
+\<close>
 
 thm And.simps
 
-text {*
+text \<open>
   @{thm[indent=4] And.simps}
   
   \vspace*{1em}
@@ -575,7 +575,7 @@
   prove that our pattern matching is consistent\footnote{This prevents
   us from defining something like @{term "f x = True"} and @{term "f x
   = False"} simultaneously.}:
-*}
+\<close>
 
 function And2 :: "P3 \<Rightarrow> P3 \<Rightarrow> P3"
 where
@@ -585,7 +585,7 @@
 | "And2 F p = F"
 | "And2 X X = X"
 
-text {*
+text \<open>
   \noindent Now let's look at the proof obligations generated by a
   function definition. In this case, they are:
 
@@ -599,26 +599,26 @@
   (\<exists>p. x = (F, p)) \<or> (x = (X, X))"}, and you can use the method @{text atomize_elim} to get that form instead.}. If the patterns just involve
   datatypes, we can solve it with the @{text "pat_completeness"}
   method:
-*}
+\<close>
 
 apply pat_completeness
 
-text {*
+text \<open>
   The remaining subgoals express \emph{pattern compatibility}. We do
   allow that an input value matches multiple patterns, but in this
   case, the result (i.e.~the right hand sides of the equations) must
   also be equal. For each pair of two patterns, there is one such
   subgoal. Usually this needs injectivity of the constructors, which
   is used automatically by @{text "auto"}.
-*}
+\<close>
 
 by auto
 termination by (relation "{}") simp
 
 
-subsection {* Non-constructor patterns *}
+subsection \<open>Non-constructor patterns\<close>
 
-text {*
+text \<open>
   Most of Isabelle's basic types take the form of inductive datatypes,
   and usually pattern matching works on the constructors of such types. 
   However, this need not be always the case, and the \cmd{function}
@@ -628,7 +628,7 @@
   so-called \emph{$n+k$-patterns}, which are a little controversial in
   the functional programming world. Here is the initial fibonacci
   example with $n+k$-patterns:
-*}
+\<close>
 
 function fib2 :: "nat \<Rightarrow> nat"
 where
@@ -636,7 +636,7 @@
 | "fib2 1 = 1"
 | "fib2 (n + 2) = fib2 n + fib2 (Suc n)"
 
-text {*
+text \<open>
   This kind of matching is again justified by the proof of pattern
   completeness and compatibility. 
   The proof obligation for pattern completeness states that every natural number is
@@ -651,17 +651,17 @@
   "atomize_elim"} to do an ad-hoc conversion to a disjunction of
   existentials, which can then be solved by the arithmetic decision procedure.
   Pattern compatibility and termination are automatic as usual.
-*}
+\<close>
 apply atomize_elim
 apply arith
 apply auto
 done
 termination by lexicographic_order
-text {*
+text \<open>
   We can stretch the notion of pattern matching even more. The
   following function is not a sensible functional program, but a
   perfectly valid mathematical definition:
-*}
+\<close>
 
 function ev :: "nat \<Rightarrow> bool"
 where
@@ -671,7 +671,7 @@
 by arith+
 termination by (relation "{}") simp
 
-text {*
+text \<open>
   This general notion of pattern matching gives you a certain freedom
   in writing down specifications. However, as always, such freedom should
   be used with care:
@@ -682,17 +682,17 @@
   code generator, and expect it to generate ML code for our
   definitions. Also, such a specification might not work very well together with
   simplification. Your mileage may vary.
-*}
+\<close>
 
 
-subsection {* Conditional equations *}
+subsection \<open>Conditional equations\<close>
 
-text {* 
+text \<open>
   The function package also supports conditional equations, which are
   similar to guards in a language like Haskell. Here is Euclid's
   algorithm written with conditional patterns\footnote{Note that the
   patterns are also overlapping in the base case}:
-*}
+\<close>
 
 function gcd :: "nat \<Rightarrow> nat \<Rightarrow> nat"
 where
@@ -703,18 +703,18 @@
 by (atomize_elim, auto, arith)
 termination by lexicographic_order
 
-text {*
+text \<open>
   By now, you can probably guess what the proof obligations for the
   pattern completeness and compatibility look like. 
 
   Again, functions with conditional patterns are not supported by the
   code generator.
-*}
+\<close>
 
 
-subsection {* Pattern matching on strings *}
+subsection \<open>Pattern matching on strings\<close>
 
-text {*
+text \<open>
   As strings (as lists of characters) are normal datatypes, pattern
   matching on them is possible, but somewhat problematic. Consider the
   following definition:
@@ -734,7 +734,7 @@
 
   There are two things we can do here. Either we write an explicit
   @{text "if"} on the right hand side, or we can use conditional patterns:
-*}
+\<close>
 
 function check :: "string \<Rightarrow> bool"
 where
@@ -744,9 +744,9 @@
 termination by (relation "{}") simp
 
 
-section {* Partiality *}
+section \<open>Partiality\<close>
 
-text {* 
+text \<open>
   In HOL, all functions are total. A function @{term "f"} applied to
   @{term "x"} always has the value @{term "f x"}, and there is no notion
   of undefinedness. 
@@ -757,22 +757,22 @@
   However, the \cmd{function} package does support partiality to a
   certain extent. Let's look at the following function which looks
   for a zero of a given function f. 
-*}
+\<close>
 
 function (*<*)(domintros)(*>*)findzero :: "(nat \<Rightarrow> nat) \<Rightarrow> nat \<Rightarrow> nat"
 where
   "findzero f n = (if f n = 0 then n else findzero f (Suc n))"
 by pat_completeness auto
 
-text {*
+text \<open>
   \noindent Clearly, any attempt of a termination proof must fail. And without
   that, we do not get the usual rules @{text "findzero.simps"} and 
   @{text "findzero.induct"}. So what was the definition good for at all?
-*}
+\<close>
 
-subsection {* Domain predicates *}
+subsection \<open>Domain predicates\<close>
 
-text {*
+text \<open>
   The trick is that Isabelle has not only defined the function @{const findzero}, but also
   a predicate @{term "findzero_dom"} that characterizes the values where the function
   terminates: the \emph{domain} of the function. If we treat a
@@ -781,18 +781,18 @@
   induction rules as we do for total functions. They are guarded
   by domain conditions and are called @{text psimps} and @{text
   pinduct}: 
-*}
+\<close>
 
-text {*
+text \<open>
   \noindent\begin{minipage}{0.79\textwidth}@{thm[display,margin=85] findzero.psimps}\end{minipage}
   \hfill(@{thm [source] "findzero.psimps"})
   \vspace{1em}
 
   \noindent\begin{minipage}{0.79\textwidth}@{thm[display,margin=85] findzero.pinduct}\end{minipage}
   \hfill(@{thm [source] "findzero.pinduct"})
-*}
+\<close>
 
-text {*
+text \<open>
   Remember that all we
   are doing here is use some tricks to make a total function appear
   as if it was partial. We can still write the term @{term "findzero
@@ -803,16 +803,16 @@
   But it is defined enough to prove something interesting about it. We
   can prove that if @{term "findzero f n"}
   terminates, it indeed returns a zero of @{term f}:
-*}
+\<close>
 
 lemma findzero_zero: "findzero_dom (f, n) \<Longrightarrow> f (findzero f n) = 0"
 
-text {* \noindent We apply induction as usual, but using the partial induction
-  rule: *}
+text \<open>\noindent We apply induction as usual, but using the partial induction
+  rule:\<close>
 
 apply (induct f n rule: findzero.pinduct)
 
-text {* \noindent This gives the following subgoals:
+text \<open>\noindent This gives the following subgoals:
 
   @{subgoals[display,indent=0]}
 
@@ -821,26 +821,26 @@
   "findzero_dom (f, n)"} as a local assumption in the induction step. This
   allows unfolding @{term "findzero f n"} using the @{text psimps}
   rule, and the rest is trivial.
- *}
+\<close>
 apply (simp add: findzero.psimps)
 done
 
-text {*
+text \<open>
   Proofs about partial functions are often not harder than for total
   functions. Fig.~\ref{findzero_isar} shows a slightly more
   complicated proof written in Isar. It is verbose enough to show how
   partiality comes into play: From the partial induction, we get an
   additional domain condition hypothesis. Observe how this condition
   is applied when calls to @{term findzero} are unfolded.
-*}
+\<close>
 
-text_raw {*
+text_raw \<open>
 \begin{figure}
 \hrule\vspace{6pt}
 \begin{minipage}{0.8\textwidth}
 \isabellestyle{it}
 \isastyle\isamarkuptrue
-*}
+\<close>
 lemma "\<lbrakk>findzero_dom (f, n); x \<in> {n ..< findzero f n}\<rbrakk> \<Longrightarrow> f x \<noteq> 0"
 proof (induct rule: findzero.pinduct)
   fix f n assume dom: "findzero_dom (f, n)"
@@ -857,24 +857,24 @@
   thus "f x \<noteq> 0"
   proof
     assume "x = n"
-    with `f n \<noteq> 0` show ?thesis by simp
+    with \<open>f n \<noteq> 0\<close> show ?thesis by simp
   next
     assume "x \<in> {Suc n ..< findzero f n}"
-    with dom and `f n \<noteq> 0` have "x \<in> {Suc n ..< findzero f (Suc n)}" by (simp add: findzero.psimps)
-    with IH and `f n \<noteq> 0`
+    with dom and \<open>f n \<noteq> 0\<close> have "x \<in> {Suc n ..< findzero f (Suc n)}" by (simp add: findzero.psimps)
+    with IH and \<open>f n \<noteq> 0\<close>
     show ?thesis by simp
   qed
 qed
-text_raw {*
+text_raw \<open>
 \isamarkupfalse\isabellestyle{tt}
 \end{minipage}\vspace{6pt}\hrule
 \caption{A proof about a partial function}\label{findzero_isar}
 \end{figure}
-*}
+\<close>
 
-subsection {* Partial termination proofs *}
+subsection \<open>Partial termination proofs\<close>
 
-text {*
+text \<open>
   Now that we have proved some interesting properties about our
   function, we should turn to the domain predicate and see if it is
   actually true for some values. Otherwise we would have just proved
@@ -894,11 +894,11 @@
 \ \ \ldots\\
 
   \noindent Now the package has proved an introduction rule for @{text findzero_dom}:
-*}
+\<close>
 
 thm findzero.domintros
 
-text {*
+text \<open>
   @{thm[display] findzero.domintros}
 
   Domain introduction rules allow to show that a given value lies in the
@@ -921,27 +921,27 @@
   induction is then straightforward, except for the unusual induction
   principle.
 
-*}
+\<close>
 
-text_raw {*
+text_raw \<open>
 \begin{figure}
 \hrule\vspace{6pt}
 \begin{minipage}{0.8\textwidth}
 \isabellestyle{it}
 \isastyle\isamarkuptrue
-*}
+\<close>
 lemma findzero_termination:
   assumes "x \<ge> n" and "f x = 0"
   shows "findzero_dom (f, n)"
 proof - 
   have base: "findzero_dom (f, x)"
-    by (rule findzero.domintros) (simp add:`f x = 0`)
+    by (rule findzero.domintros) (simp add:\<open>f x = 0\<close>)
 
   have step: "\<And>i. findzero_dom (f, Suc i) 
     \<Longrightarrow> findzero_dom (f, i)"
     by (rule findzero.domintros) simp
 
-  from `x \<ge> n` show ?thesis
+  from \<open>x \<ge> n\<close> show ?thesis
   proof (induct rule:inc_induct)
     show "findzero_dom (f, x)" by (rule base)
   next
@@ -949,18 +949,18 @@
     thus "findzero_dom (f, i)" by (rule step)
   qed
 qed      
-text_raw {*
+text_raw \<open>
 \isamarkupfalse\isabellestyle{tt}
 \end{minipage}\vspace{6pt}\hrule
 \caption{Termination proof for @{text findzero}}\label{findzero_term}
 \end{figure}
-*}
+\<close>
       
-text {*
+text \<open>
   Again, the proof given in Fig.~\ref{findzero_term} has a lot of
   detail in order to explain the principles. Using more automation, we
   can also have a short proof:
-*}
+\<close>
 
 lemma findzero_termination_short:
   assumes zero: "x >= n" 
@@ -969,18 +969,18 @@
 using zero
 by (induct rule:inc_induct) (auto intro: findzero.domintros)
     
-text {*
+text \<open>
   \noindent It is simple to combine the partial correctness result with the
   termination lemma:
-*}
+\<close>
 
 lemma findzero_total_correctness:
   "f x = 0 \<Longrightarrow> f (findzero f 0) = 0"
 by (blast intro: findzero_zero findzero_termination)
 
-subsection {* Definition of the domain predicate *}
+subsection \<open>Definition of the domain predicate\<close>
 
-text {*
+text \<open>
   Sometimes it is useful to know what the definition of the domain
   predicate looks like. Actually, @{text findzero_dom} is just an
   abbreviation:
@@ -1014,17 +1014,17 @@
   accp_downward}, and of course the introduction and elimination rules
   for the recursion relation @{thm [source] "findzero_rel.intros"} and @{thm
   [source] "findzero_rel.cases"}.
-*}
+\<close>
 
-section {* Nested recursion *}
+section \<open>Nested recursion\<close>
 
-text {*
+text \<open>
   Recursive calls which are nested in one another frequently cause
   complications, since their termination proof can depend on a partial
   correctness property of the function itself. 
 
   As a small example, we define the \qt{nested zero} function:
-*}
+\<close>
 
 function nz :: "nat \<Rightarrow> nat"
 where
@@ -1032,16 +1032,16 @@
 | "nz (Suc n) = nz (nz n)"
 by pat_completeness auto
 
-text {*
+text \<open>
   If we attempt to prove termination using the identity measure on
   naturals, this fails:
-*}
+\<close>
 
 termination
   apply (relation "measure (\<lambda>n. n)")
   apply auto
 
-text {*
+text \<open>
   We get stuck with the subgoal
 
   @{subgoals[display]}
@@ -1049,36 +1049,36 @@
   Of course this statement is true, since we know that @{const nz} is
   the zero function. And in fact we have no problem proving this
   property by induction.
-*}
+\<close>
 (*<*)oops(*>*)
 lemma nz_is_zero: "nz_dom n \<Longrightarrow> nz n = 0"
   by (induct rule:nz.pinduct) (auto simp: nz.psimps)
 
-text {*
+text \<open>
   We formulate this as a partial correctness lemma with the condition
   @{term "nz_dom n"}. This allows us to prove it with the @{text
   pinduct} rule before we have proved termination. With this lemma,
   the termination proof works as expected:
-*}
+\<close>
 
 termination
   by (relation "measure (\<lambda>n. n)") (auto simp: nz_is_zero)
 
-text {*
+text \<open>
   As a general strategy, one should prove the statements needed for
   termination as a partial property first. Then they can be used to do
   the termination proof. This also works for less trivial
   examples. Figure \ref{f91} defines the 91-function, a well-known
   challenge problem due to John McCarthy, and proves its termination.
-*}
+\<close>
 
-text_raw {*
+text_raw \<open>
 \begin{figure}
 \hrule\vspace{6pt}
 \begin{minipage}{0.8\textwidth}
 \isabellestyle{it}
 \isastyle\isamarkuptrue
-*}
+\<close>
 
 function f91 :: "nat \<Rightarrow> nat"
 where
@@ -1101,46 +1101,46 @@
 
   assume inner_trm: "f91_dom (n + 11)" -- "Outer call"
   with f91_estimate have "n + 11 < f91 (n + 11) + 11" .
-  with `\<not> 100 < n` show "(f91 (n + 11), n) \<in> ?R" by simp
+  with \<open>\<not> 100 < n\<close> show "(f91 (n + 11), n) \<in> ?R" by simp
 qed
 
-text_raw {*
+text_raw \<open>
 \isamarkupfalse\isabellestyle{tt}
 \end{minipage}
 \vspace{6pt}\hrule
 \caption{McCarthy's 91-function}\label{f91}
 \end{figure}
-*}
+\<close>
 
 
-section {* Higher-Order Recursion *}
+section \<open>Higher-Order Recursion\<close>
 
-text {*
+text \<open>
   Higher-order recursion occurs when recursive calls
   are passed as arguments to higher-order combinators such as @{const
   map}, @{term filter} etc.
   As an example, imagine a datatype of n-ary trees:
-*}
+\<close>
 
 datatype 'a tree = 
   Leaf 'a 
 | Branch "'a tree list"
 
 
-text {* \noindent We can define a function which swaps the left and right subtrees recursively, using the 
-  list functions @{const rev} and @{const map}: *}
+text \<open>\noindent We can define a function which swaps the left and right subtrees recursively, using the 
+  list functions @{const rev} and @{const map}:\<close>
 
 fun mirror :: "'a tree \<Rightarrow> 'a tree"
 where
   "mirror (Leaf n) = Leaf n"
 | "mirror (Branch l) = Branch (rev (map mirror l))"
 
-text {*
+text \<open>
   Although the definition is accepted without problems, let us look at the termination proof:
-*}
+\<close>
 
 termination proof
-  text {*
+  text \<open>
 
   As usual, we have to give a wellfounded relation, such that the
   arguments of the recursive calls get smaller. But what exactly are
@@ -1184,12 +1184,12 @@
   But if you define your own higher-order functions, you may have to
   state and prove the required congruence rules yourself, if you want to use your
   functions in recursive definitions. 
-*}
+\<close>
 (*<*)oops(*>*)
 
-subsection {* Congruence Rules and Evaluation Order *}
+subsection \<open>Congruence Rules and Evaluation Order\<close>
 
-text {* 
+text \<open>
   Higher order logic differs from functional programming languages in
   that it has no built-in notion of evaluation order. A program is
   just a set of equations, and it is not specified how they must be
@@ -1201,14 +1201,14 @@
   consistent with the logical definition. 
 
   Consider the following function.
-*}
+\<close>
 
 function f :: "nat \<Rightarrow> bool"
 where
   "f n = (n = 0 \<or> f (n - 1))"
 (*<*)by pat_completeness auto(*>*)
 
-text {*
+text \<open>
   For this definition, the termination proof fails. The default configuration
   specifies no congruence rule for disjunction. We have to add a
   congruence rule that specifies left-to-right evaluation order:
@@ -1224,7 +1224,7 @@
   However, as evaluation is not a hard-wired concept, we
   could just turn everything around by declaring a different
   congruence rule. Then we can make the reverse definition:
-*}
+\<close>
 
 lemma disj_cong2[fundef_cong]: 
   "(\<not> Q' \<Longrightarrow> P = P') \<Longrightarrow> (Q = Q') \<Longrightarrow> (P \<or> Q) = (P' \<or> Q')"
@@ -1234,13 +1234,13 @@
 where
   "f' n = (f' (n - 1) \<or> n = 0)"
 
-text {*
+text \<open>
   \noindent These examples show that, in general, there is no \qt{best} set of
   congruence rules.
 
   However, such tweaking should rarely be necessary in
   practice, as most of the time, the default set of congruence rules
   works well.
-*}
+\<close>
 
 end
--- a/src/Doc/Locales/Examples.thy	Mon Oct 12 21:11:48 2015 +0200
+++ b/src/Doc/Locales/Examples.thy	Mon Oct 12 21:15:10 2015 +0200
@@ -15,9 +15,9 @@
 *}
 *)
 
-section {* Introduction *}
+section \<open>Introduction\<close>
 
-text {*
+text \<open>
   Locales are based on contexts.  A \emph{context} can be seen as a
   formula schema
 \[
@@ -38,17 +38,17 @@
   have been made persistent.  To the user, though, they provide
   powerful means for declaring and combining contexts, and for the
   reuse of theorems proved in these contexts.
-  *}
+\<close>
 
-section {* Simple Locales *}
+section \<open>Simple Locales\<close>
 
-text {*
+text \<open>
   In its simplest form, a
   \emph{locale declaration} consists of a sequence of context elements
   declaring parameters (keyword \isakeyword{fixes}) and assumptions
   (keyword \isakeyword{assumes}).  The following is the specification of
   partial orders, as locale @{text partial_order}.
-  *}
+\<close>
 
   locale partial_order =
     fixes le :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infixl "\<sqsubseteq>" 50)
@@ -56,7 +56,7 @@
       and anti_sym [intro]: "\<lbrakk> x \<sqsubseteq> y; y \<sqsubseteq> x \<rbrakk> \<Longrightarrow> x = y"
       and trans [trans]: "\<lbrakk> x \<sqsubseteq> y; y \<sqsubseteq> z \<rbrakk> \<Longrightarrow> x \<sqsubseteq> z"
 
-text (in partial_order) {* The parameter of this locale is~@{text le},
+text (in partial_order) \<open>The parameter of this locale is~@{text le},
   which is a binary predicate with infix syntax~@{text \<sqsubseteq>}.  The
   parameter syntax is available in the subsequent
   assumptions, which are the familiar partial order axioms.
@@ -107,11 +107,11 @@
   of context and conclusion.  For the transitivity theorem, this is
   @{thm [source] partial_order.trans}:
   @{thm [display, indent=2] partial_order.trans}
-*}
+\<close>
 
-subsection {* Targets: Extending Locales *}
+subsection \<open>Targets: Extending Locales\<close>
 
-text {*
+text \<open>
   The specification of a locale is fixed, but its list of conclusions
   may be extended through Isar commands that take a \emph{target} argument.
   In the following, \isakeyword{definition} and 
@@ -140,13 +140,13 @@
 \caption{Isar commands that accept a target.}
 \label{tab:commands-with-target}
 \end{table}
-  *}
+\<close>
 
   definition (in partial_order)
     less :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infixl "\<sqsubset>" 50)
     where "(x \<sqsubset> y) = (x \<sqsubseteq> y \<and> x \<noteq> y)"
 
-text (in partial_order) {* The strict order @{text less} with infix
+text (in partial_order) \<open>The strict order @{text less} with infix
   syntax~@{text \<sqsubset>} is
   defined in terms of the locale parameter~@{text le} and the general
   equality of the object logic we work in.  The definition generates a
@@ -161,25 +161,25 @@
   and parsed as infix~@{text \<sqsubset>}.  Finally, the conclusion @{thm [source]
   less_def} is added to the locale:
   @{thm [display, indent=2] less_def}
-*}
+\<close>
 
-text {* The treatment of theorem statements is more straightforward.
+text \<open>The treatment of theorem statements is more straightforward.
   As an example, here is the derivation of a transitivity law for the
-  strict order relation. *}
+  strict order relation.\<close>
 
   lemma (in partial_order) less_le_trans [trans]:
     "\<lbrakk> x \<sqsubset> y; y \<sqsubseteq> z \<rbrakk> \<Longrightarrow> x \<sqsubset> z"
     unfolding %visible less_def by %visible (blast intro: trans)
 
-text {* In the context of the proof, conclusions of the
+text \<open>In the context of the proof, conclusions of the
   locale may be used like theorems.  Attributes are effective: @{text
   anti_sym} was
   declared as introduction rule, hence it is in the context's set of
-  rules used by the classical reasoner by default.  *}
+  rules used by the classical reasoner by default.\<close>
 
-subsection {* Context Blocks *}
+subsection \<open>Context Blocks\<close>
 
-text {* When working with locales, sequences of commands with the same
+text \<open>When working with locales, sequences of commands with the same
   target are frequent.  A block of commands, delimited by
   \isakeyword{begin} and \isakeyword{end}, makes a theory-like style
   of working possible.  All commands inside the block refer to the
@@ -190,7 +190,7 @@
 
   This style of working is illustrated in the block below, where
   notions of infimum and supremum for partial orders are introduced,
-  together with theorems about their uniqueness.  *}
+  together with theorems about their uniqueness.\<close>
 
   context partial_order
   begin
@@ -290,37 +290,37 @@
 
   end
 
-text {* The syntax of the locale commands discussed in this tutorial is
+text \<open>The syntax of the locale commands discussed in this tutorial is
   shown in Table~\ref{tab:commands}.  The grammar is complete with the
   exception of the context elements \isakeyword{constrains} and
   \isakeyword{defines}, which are provided for backward
   compatibility.  See the Isabelle/Isar Reference
-  Manual @{cite IsarRef} for full documentation.  *}
+  Manual @{cite IsarRef} for full documentation.\<close>
 
 
-section {* Import \label{sec:import} *}
+section \<open>Import \label{sec:import}\<close>
 
-text {* 
+text \<open>
   Algebraic structures are commonly defined by adding operations and
   properties to existing structures.  For example, partial orders
   are extended to lattices and total orders.  Lattices are extended to
-  distributive lattices. *}
+  distributive lattices.\<close>
 
-text {*
+text \<open>
   With locales, this kind of inheritance is achieved through
   \emph{import} of locales.  The import part of a locale declaration,
   if present, precedes the context elements.  Here is an example,
   where partial orders are extended to lattices.
-  *}
+\<close>
 
   locale lattice = partial_order +
     assumes ex_inf: "\<exists>inf. is_inf x y inf"
       and ex_sup: "\<exists>sup. is_sup x y sup"
   begin
 
-text {* These assumptions refer to the predicates for infimum
+text \<open>These assumptions refer to the predicates for infimum
   and supremum defined for @{text partial_order} in the previous
-  section.  We now introduce the notions of meet and join.  *}
+  section.  We now introduce the notions of meet and join.\<close>
 
   definition
     meet (infixl "\<sqinter>" 70) where "x \<sqinter> y = (THE inf. is_inf x y inf)"
@@ -331,7 +331,7 @@
   proof (unfold meet_def)
     assume "is_inf x y i"
     then show "(THE i. is_inf x y i) = i"
-      by (rule the_equality) (rule is_inf_uniq [OF _ `is_inf x y i`])
+      by (rule the_equality) (rule is_inf_uniq [OF _ \<open>is_inf x y i\<close>])
   qed
 
   lemma %invisible meetI [intro?]:
@@ -342,7 +342,7 @@
   proof (unfold meet_def)
     from ex_inf obtain i where "is_inf x y i" ..
     then show "is_inf x y (THE i. is_inf x y i)"
-      by (rule theI) (rule is_inf_uniq [OF _ `is_inf x y i`])
+      by (rule theI) (rule is_inf_uniq [OF _ \<open>is_inf x y i\<close>])
   qed
 
   lemma %invisible meet_left [intro?]:
@@ -361,7 +361,7 @@
   proof (unfold join_def)
     assume "is_sup x y s"
     then show "(THE s. is_sup x y s) = s"
-      by (rule the_equality) (rule is_sup_uniq [OF _ `is_sup x y s`])
+      by (rule the_equality) (rule is_sup_uniq [OF _ \<open>is_sup x y s\<close>])
   qed
 
   lemma %invisible joinI [intro?]: "x \<sqsubseteq> s \<Longrightarrow> y \<sqsubseteq> s \<Longrightarrow>
@@ -372,7 +372,7 @@
   proof (unfold join_def)
     from ex_sup obtain s where "is_sup x y s" ..
     then show "is_sup x y (THE s. is_sup x y s)"
-      by (rule theI) (rule is_sup_uniq [OF _ `is_sup x y s`])
+      by (rule theI) (rule is_sup_uniq [OF _ \<open>is_sup x y s\<close>])
   qed
 
   lemma %invisible join_left [intro?]:
@@ -558,7 +558,7 @@
   theorem %invisible join_connection2: "(x \<sqsubseteq> y) = (x \<squnion> y = y)"
     using join_commute join_connection by simp
 
-  text %invisible {* Naming according to Jacobson I, p.\ 459. *}
+  text %invisible \<open>Naming according to Jacobson I, p.\ 459.\<close>
   lemmas %invisible L1 = join_commute meet_commute
   lemmas %invisible L2 = join_assoc meet_assoc
   (* lemmas L3 = join_idem meet_idem *)
@@ -566,10 +566,10 @@
 
   end
 
-text {* Locales for total orders and distributive lattices follow to
+text \<open>Locales for total orders and distributive lattices follow to
   establish a sufficiently rich landscape of locales for
   further examples in this tutorial.  Each comes with an example
-  theorem. *}
+  theorem.\<close>
 
   locale total_order = partial_order +
     assumes total: "x \<sqsubseteq> y \<or> y \<sqsubseteq> x"
@@ -592,7 +592,7 @@
     finally show ?thesis .
   qed
 
-text {*
+text \<open>
   The locale hierarchy obtained through these declarations is shown in
   Figure~\ref{fig:lattices}(a).
 
@@ -639,12 +639,12 @@
 \caption{Hierarchy of Lattice Locales.}
 \label{fig:lattices}
 \end{figure}
-  *}
+\<close>
 
-section {* Changing the Locale Hierarchy
-  \label{sec:changing-the-hierarchy} *}
+section \<open>Changing the Locale Hierarchy
+  \label{sec:changing-the-hierarchy}\<close>
 
-text {*
+text \<open>
   Locales enable to prove theorems abstractly, relative to
   sets of assumptions.  These theorems can then be used in other
   contexts where the assumptions themselves, or
@@ -676,11 +676,11 @@
   lattices.  Therefore the \isakeyword{sublocale} command generates a
   goal, which must be discharged by the user.  This is illustrated in
   the following paragraphs.  First the sublocale relation is stated.
-*}
+\<close>
 
   sublocale %visible total_order \<subseteq> lattice
 
-txt {* \normalsize
+txt \<open>\normalsize
   This enters the context of locale @{text total_order}, in
   which the goal @{subgoals [display]} must be shown.
   Now the
@@ -699,40 +699,40 @@
 
   For the current goal, we would like to get hold of
   the assumptions of @{text lattice}, which need to be shown, hence
-  @{text unfold_locales} is appropriate. *}
+  @{text unfold_locales} is appropriate.\<close>
 
   proof unfold_locales
 
-txt {* \normalsize
+txt \<open>\normalsize
   Since the fact that both lattices and total orders are partial
   orders is already reflected in the locale hierarchy, the assumptions
   of @{text partial_order} are discharged automatically, and only the
   assumptions introduced in @{text lattice} remain as subgoals
   @{subgoals [display]}
   The proof for the first subgoal is obtained by constructing an
-  infimum, whose existence is implied by totality. *}
+  infimum, whose existence is implied by totality.\<close>
 
     fix x y
     from total have "is_inf x y (if x \<sqsubseteq> y then x else y)"
       by (auto simp: is_inf_def)
     then show "\<exists>inf. is_inf x y inf" ..
-txt {* \normalsize
+txt \<open>\normalsize
    The proof for the second subgoal is analogous and not
-  reproduced here. *}
+  reproduced here.\<close>
   next %invisible
     fix x y
     from total have "is_sup x y (if x \<sqsubseteq> y then y else x)"
       by (auto simp: is_sup_def)
     then show "\<exists>sup. is_sup x y sup" .. qed %visible
 
-text {* Similarly, we may establish that total orders are distributive
-  lattices with a second \isakeyword{sublocale} statement. *}
+text \<open>Similarly, we may establish that total orders are distributive
+  lattices with a second \isakeyword{sublocale} statement.\<close>
 
   sublocale total_order \<subseteq> distrib_lattice
     proof unfold_locales
     fix %"proof" x y z
     show "x \<sqinter> (y \<squnion> z) = x \<sqinter> y \<squnion> x \<sqinter> z" (is "?l = ?r")
-      txt {* Jacobson I, p.\ 462 *}
+      txt \<open>Jacobson I, p.\ 462\<close>
     proof -
       { assume c: "y \<sqsubseteq> x" "z \<sqsubseteq> x"
         from c have "?l = y \<squnion> z"
@@ -751,10 +751,10 @@
     qed
   qed
 
-text {* The locale hierarchy is now as shown in
-  Figure~\ref{fig:lattices}(c). *}
+text \<open>The locale hierarchy is now as shown in
+  Figure~\ref{fig:lattices}(c).\<close>
 
-text {*
+text \<open>
   Locale interpretation is \emph{dynamic}.  The statement
   \isakeyword{sublocale} $l_1 \subseteq l_2$ will not just add the
   current conclusions of $l_2$ to $l_1$.  Rather the dependency is
@@ -764,6 +764,6 @@
   effect along chains of sublocales.  Even cycles in the sublocale relation are
   supported, as long as these cycles do not lead to infinite chains.
   Details are discussed in the technical report @{cite Ballarin2006a}.
-  See also Section~\ref{sec:infinite-chains} of this tutorial.  *}
+  See also Section~\ref{sec:infinite-chains} of this tutorial.\<close>
 
 end
--- a/src/Doc/Locales/Examples1.thy	Mon Oct 12 21:11:48 2015 +0200
+++ b/src/Doc/Locales/Examples1.thy	Mon Oct 12 21:15:10 2015 +0200
@@ -2,10 +2,10 @@
 imports Examples
 begin
 
-section {* Use of Locales in Theories and Proofs
-  \label{sec:interpretation} *}
+section \<open>Use of Locales in Theories and Proofs
+  \label{sec:interpretation}\<close>
 
-text {*
+text \<open>
   Locales can be interpreted in the contexts of theories and
   structured proofs.  These interpretations are dynamic, too.
   Conclusions of locales will be propagated to the current theory or
@@ -23,21 +23,21 @@
   with the interpretation that @{term "op \<le>"} is a partial order.  The
   facilities of the interpretation command are explored gradually in
   three versions.
-  *}
+\<close>
 
 
-subsection {* First Version: Replacement of Parameters Only
-  \label{sec:po-first} *}
+subsection \<open>First Version: Replacement of Parameters Only
+  \label{sec:po-first}\<close>
 
-text {*
+text \<open>
   The command \isakeyword{interpretation} is for the interpretation of
   locale in theories.  In the following example, the parameter of locale
   @{text partial_order} is replaced by @{term "op \<le> :: int \<Rightarrow> int \<Rightarrow>
   bool"} and the locale instance is interpreted in the current
-  theory. *}
+  theory.\<close>
 
   interpretation %visible int: partial_order "op \<le> :: int \<Rightarrow> int \<Rightarrow> bool"
-txt {* \normalsize
+txt \<open>\normalsize
   The argument of the command is a simple \emph{locale expression}
   consisting of the name of the interpreted locale, which is
   preceded by the qualifier @{text "int:"} and succeeded by a
@@ -50,10 +50,10 @@
 
   The command creates the goal
   @{subgoals [display]} which can be shown easily:
- *}
+\<close>
     by unfold_locales auto
 
-text {*  The effect of the command is that instances of all
+text \<open>The effect of the command is that instances of all
   conclusions of the locale are available in the theory, where names
   are prefixed by the qualifier.  For example, transitivity for @{typ
   int} is named @{thm [source] int.trans} and is the following
@@ -61,12 +61,12 @@
   @{thm [display, indent=2] int.trans}
   It is not possible to reference this theorem simply as @{text
   trans}.  This prevents unwanted hiding of existing theorems of the
-  theory by an interpretation. *}
+  theory by an interpretation.\<close>
 
 
-subsection {* Second Version: Replacement of Definitions *}
+subsection \<open>Second Version: Replacement of Definitions\<close>
 
-text {* Not only does the above interpretation qualify theorem names.
+text \<open>Not only does the above interpretation qualify theorem names.
   The prefix @{text int} is applied to all names introduced in locale
   conclusions including names introduced in definitions.  The
   qualified name @{text int.less} is short for
@@ -85,5 +85,5 @@
   accepts \emph{equations} in addition to the parameter instantiation.
   These follow the locale expression and are indicated with the
   keyword \isakeyword{where}.  This is the revised interpretation:
-  *}
+\<close>
 end
--- a/src/Doc/Locales/Examples2.thy	Mon Oct 12 21:11:48 2015 +0200
+++ b/src/Doc/Locales/Examples2.thy	Mon Oct 12 21:15:10 2015 +0200
@@ -4,20 +4,20 @@
   interpretation %visible int: partial_order "op \<le> :: [int, int] \<Rightarrow> bool"
     where "int.less x y = (x < y)"
   proof -
-    txt {* \normalsize The goals are now:
+    txt \<open>\normalsize The goals are now:
       @{subgoals [display]}
-      The proof that~@{text \<le>} is a partial order is as above. *}
+      The proof that~@{text \<le>} is a partial order is as above.\<close>
     show "partial_order (op \<le> :: int \<Rightarrow> int \<Rightarrow> bool)"
       by unfold_locales auto
-    txt {* \normalsize The second goal is shown by unfolding the
-      definition of @{term "partial_order.less"}. *}
+    txt \<open>\normalsize The second goal is shown by unfolding the
+      definition of @{term "partial_order.less"}.\<close>
     show "partial_order.less op \<le> x y = (x < y)"
-      unfolding partial_order.less_def [OF `partial_order op \<le>`]
+      unfolding partial_order.less_def [OF \<open>partial_order op \<le>\<close>]
       by auto
   qed
 
-text {* Note that the above proof is not in the context of the
+text \<open>Note that the above proof is not in the context of the
   interpreted locale.  Hence, the premise of @{text
   "partial_order.less_def"} is discharged manually with @{text OF}.
-  *}
+\<close>
 end
--- a/src/Doc/Locales/Examples3.thy	Mon Oct 12 21:11:48 2015 +0200
+++ b/src/Doc/Locales/Examples3.thy	Mon Oct 12 21:15:10 2015 +0200
@@ -2,10 +2,10 @@
 imports Examples
 begin
 
-subsection {* Third Version: Local Interpretation
-  \label{sec:local-interpretation} *}
+subsection \<open>Third Version: Local Interpretation
+  \label{sec:local-interpretation}\<close>
 
-text {* In the above example, the fact that @{term "op \<le>"} is a partial
+text \<open>In the above example, the fact that @{term "op \<le>"} is a partial
   order for the integers was used in the second goal to
   discharge the premise in the definition of @{text "op \<sqsubset>"}.  In
   general, proofs of the equations not only may involve definitions
@@ -14,7 +14,7 @@
   interpreted locale conclusions temporarily available in the proof.
   This can be achieved by a locale interpretation in the proof body.
   The command for local interpretations is \isakeyword{interpret}.  We
-  repeat the example from the previous section to illustrate this. *}
+  repeat the example from the previous section to illustrate this.\<close>
 
   interpretation %visible int: partial_order "op \<le> :: int \<Rightarrow> int \<Rightarrow> bool"
     where "int.less x y = (x < y)"
@@ -26,48 +26,48 @@
       unfolding int.less_def by auto
   qed
 
-text {* The inner interpretation is immediate from the preceding fact
+text \<open>The inner interpretation is immediate from the preceding fact
   and proved by assumption (Isar short hand ``.'').  It enriches the
   local proof context by the theorems
   also obtained in the interpretation from Section~\ref{sec:po-first},
   and @{text int.less_def} may directly be used to unfold the
   definition.  Theorems from the local interpretation disappear after
   leaving the proof context --- that is, after the succeeding
-  \isakeyword{next} or \isakeyword{qed} statement. *}
+  \isakeyword{next} or \isakeyword{qed} statement.\<close>
 
 
-subsection {* Further Interpretations *}
+subsection \<open>Further Interpretations\<close>
 
-text {* Further interpretations are necessary for
+text \<open>Further interpretations are necessary for
   the other locales.  In @{text lattice} the operations~@{text \<sqinter>}
   and~@{text \<squnion>} are substituted by @{term "min :: int \<Rightarrow> int \<Rightarrow> int"}
   and @{term "max :: int \<Rightarrow> int \<Rightarrow> int"}.  The entire proof for the
   interpretation is reproduced to give an example of a more
   elaborate interpretation proof.  Note that the equations are named
-  so they can be used in a later example.  *}
+  so they can be used in a later example.\<close>
 
   interpretation %visible int: lattice "op \<le> :: int \<Rightarrow> int \<Rightarrow> bool"
     where int_min_eq: "int.meet x y = min x y"
       and int_max_eq: "int.join x y = max x y"
   proof -
     show "lattice (op \<le> :: int \<Rightarrow> int \<Rightarrow> bool)"
-      txt {* \normalsize We have already shown that this is a partial
-        order, *}
+      txt \<open>\normalsize We have already shown that this is a partial
+        order,\<close>
       apply unfold_locales
-      txt {* \normalsize hence only the lattice axioms remain to be
+      txt \<open>\normalsize hence only the lattice axioms remain to be
         shown.
         @{subgoals [display]}
-        By @{text is_inf} and @{text is_sup}, *}
+        By @{text is_inf} and @{text is_sup},\<close>
       apply (unfold int.is_inf_def int.is_sup_def)
-      txt {* \normalsize the goals are transformed to these
+      txt \<open>\normalsize the goals are transformed to these
         statements:
         @{subgoals [display]}
         This is Presburger arithmetic, which can be solved by the
-        method @{text arith}. *}
+        method @{text arith}.\<close>
       by arith+
-    txt {* \normalsize In order to show the equations, we put ourselves
+    txt \<open>\normalsize In order to show the equations, we put ourselves
       in a situation where the lattice theorems can be used in a
-      convenient way. *}
+      convenient way.\<close>
     then interpret int: lattice "op \<le> :: int \<Rightarrow> int \<Rightarrow> bool" .
     show "int.meet x y = min x y"
       by (bestsimp simp: int.meet_def int.is_inf_def)
@@ -75,13 +75,13 @@
       by (bestsimp simp: int.join_def int.is_sup_def)
   qed
 
-text {* Next follows that @{text "op \<le>"} is a total order, again for
-  the integers. *}
+text \<open>Next follows that @{text "op \<le>"} is a total order, again for
+  the integers.\<close>
 
   interpretation %visible int: total_order "op \<le> :: int \<Rightarrow> int \<Rightarrow> bool"
     by unfold_locales arith
 
-text {* Theorems that are available in the theory at this point are shown in
+text \<open>Theorems that are available in the theory at this point are shown in
   Table~\ref{tab:int-lattice}.  Two points are worth noting:
 
 \begin{table}
@@ -123,9 +123,9 @@
   related interpretations --- that is, for interpretations that share
   the instances of parameters they have in common.
 \end{itemize}
-  *}
+\<close>
 
-text {* The interpretations for a locale $n$ within the current
+text \<open>The interpretations for a locale $n$ within the current
   theory may be inspected with \isakeyword{print\_interps}~$n$.  This
   prints the list of instances of $n$, for which interpretations exist.
   For example, \isakeyword{print\_interps} @{term partial_order}
@@ -144,12 +144,12 @@
   prevent accidental hiding of names, while optional qualifiers can be
   more convenient to use.  For \isakeyword{interpretation}, the
   default is ``!''.
-*}
+\<close>
 
 
-section {* Locale Expressions \label{sec:expressions} *}
+section \<open>Locale Expressions \label{sec:expressions}\<close>
 
-text {*
+text \<open>
   A map~@{term \<phi>} between partial orders~@{text \<sqsubseteq>} and~@{text \<preceq>}
   is called order preserving if @{text "x \<sqsubseteq> y"} implies @{text "\<phi> x \<preceq>
   \<phi> y"}.  This situation is more complex than those encountered so
@@ -206,7 +206,7 @@
 
   Two context elements for the map parameter~@{text \<phi>} and the
   assumptions that it is order preserving complete the locale
-  declaration. *}
+  declaration.\<close>
 
   locale order_preserving =
     le: partial_order le + le': partial_order le'
@@ -214,7 +214,7 @@
     fixes \<phi>
     assumes hom_le: "x \<sqsubseteq> y \<Longrightarrow> \<phi> x \<preceq> \<phi> y"
 
-text (in order_preserving) {* Here are examples of theorems that are
+text (in order_preserving) \<open>Here are examples of theorems that are
   available in the locale:
 
   \hspace*{1em}@{thm [source] hom_le}: @{thm hom_le}
@@ -228,34 +228,34 @@
   "op \<preceq>"}.  The abbreviation @{text less} with its infix syntax is only
   available for the original instance it was declared for.  We may
   introduce the abbreviation @{text less'} with infix syntax~@{text \<prec>}
-  with the following declaration: *}
+  with the following declaration:\<close>
 
   abbreviation (in order_preserving)
     less' (infixl "\<prec>" 50) where "less' \<equiv> partial_order.less le'"
 
-text (in order_preserving) {* Now the theorem is displayed nicely as
+text (in order_preserving) \<open>Now the theorem is displayed nicely as
   @{thm [source]  le'.less_le_trans}:
-  @{thm [display, indent=2] le'.less_le_trans} *}
+  @{thm [display, indent=2] le'.less_le_trans}\<close>
 
-text {* There are short notations for locale expressions.  These are
-  discussed in the following. *}
+text \<open>There are short notations for locale expressions.  These are
+  discussed in the following.\<close>
 
 
-subsection {* Default Instantiations *}
+subsection \<open>Default Instantiations\<close>
 
-text {* 
+text \<open>
   It is possible to omit parameter instantiations.  The
   instantiation then defaults to the name of
   the parameter itself.  For example, the locale expression @{text
   partial_order} is short for @{text "partial_order le"}, since the
   locale's single parameter is~@{text le}.  We took advantage of this
   in the \isakeyword{sublocale} declarations of
-  Section~\ref{sec:changing-the-hierarchy}. *}
+  Section~\ref{sec:changing-the-hierarchy}.\<close>
 
 
-subsection {* Implicit Parameters \label{sec:implicit-parameters} *}
+subsection \<open>Implicit Parameters \label{sec:implicit-parameters}\<close>
 
-text {* In a locale expression that occurs within a locale
+text \<open>In a locale expression that occurs within a locale
   declaration, omitted parameters additionally extend the (possibly
   empty) \isakeyword{for} clause.
 
@@ -281,12 +281,12 @@
 \end{small}
   This short hand was used in the locale declarations throughout
   Section~\ref{sec:import}.
- *}
+\<close>
 
-text{*
+text\<open>
   The following locale declarations provide more examples.  A
   map~@{text \<phi>} is a lattice homomorphism if it preserves meet and
-  join. *}
+  join.\<close>
 
   locale lattice_hom =
     le: lattice + le': lattice le' for le' (infixl "\<preceq>" 50) +
@@ -294,7 +294,7 @@
     assumes hom_meet: "\<phi> (x \<sqinter> y) = le'.meet (\<phi> x) (\<phi> y)"
       and hom_join: "\<phi> (x \<squnion> y) = le'.join (\<phi> x) (\<phi> y)"
 
-text {* The parameter instantiation in the first instance of @{term
+text \<open>The parameter instantiation in the first instance of @{term
   lattice} is omitted.  This causes the parameter~@{text le} to be
   added to the \isakeyword{for} clause, and the locale has
   parameters~@{text le},~@{text le'} and, of course,~@{text \<phi>}.
@@ -302,7 +302,7 @@
   Before turning to the second example, we complete the locale by
   providing infix syntax for the meet and join operations of the
   second lattice.
-*}
+\<close>
 
   context lattice_hom
   begin
@@ -310,20 +310,20 @@
   abbreviation join' (infixl "\<squnion>''" 50) where "join' \<equiv> le'.join"
   end
 
-text {* The next example makes radical use of the short hand
+text \<open>The next example makes radical use of the short hand
   facilities.  A homomorphism is an endomorphism if both orders
-  coincide. *}
+  coincide.\<close>
 
   locale lattice_end = lattice_hom _ le
 
-text {* The notation~@{text _} enables to omit a parameter in a
+text \<open>The notation~@{text _} enables to omit a parameter in a
   positional instantiation.  The omitted parameter,~@{text le} becomes
   the parameter of the declared locale and is, in the following
   position, used to instantiate the second parameter of @{text
   lattice_hom}.  The effect is that of identifying the first in second
-  parameter of the homomorphism locale. *}
+  parameter of the homomorphism locale.\<close>
 
-text {* The inheritance diagram of the situation we have now is shown
+text \<open>The inheritance diagram of the situation we have now is shown
   in Figure~\ref{fig:hom}, where the dashed line depicts an
   interpretation which is introduced below.  Parameter instantiations
   are indicated by $\sqsubseteq \mapsto \preceq$ etc.  By looking at
@@ -364,11 +364,11 @@
 \caption{Hierarchy of Homomorphism Locales.}
 \label{fig:hom}
 \end{figure}
-  *}
+\<close>
 
-text {* It can be shown easily that a lattice homomorphism is order
+text \<open>It can be shown easily that a lattice homomorphism is order
   preserving.  As the final example of this section, a locale
-  interpretation is used to assert this: *}
+  interpretation is used to assert this:\<close>
 
   sublocale lattice_hom \<subseteq> order_preserving
   proof unfold_locales
@@ -379,19 +379,19 @@
     then show "\<phi> x \<preceq> \<phi> y" by (simp add: le'.join_connection)
   qed
 
-text (in lattice_hom) {*
+text (in lattice_hom) \<open>
   Theorems and other declarations --- syntax, in particular --- from
   the locale @{text order_preserving} are now active in @{text
   lattice_hom}, for example
   @{thm [source] hom_le}:
   @{thm [display, indent=2] hom_le}
   This theorem will be useful in the following section.
-  *}
+\<close>
 
 
-section {* Conditional Interpretation *}
+section \<open>Conditional Interpretation\<close>
 
-text {* There are situations where an interpretation is not possible
+text \<open>There are situations where an interpretation is not possible
   in the general case since the desired property is only valid if
   certain conditions are fulfilled.  Take, for example, the function
   @{text "\<lambda>i. n * i"} that scales its argument by a constant factor.
@@ -407,58 +407,58 @@
   contexts.  What is
   required is an interpretation that depends on the condition --- and
   this can be done with the \isakeyword{sublocale} command.  For this
-  purpose, we introduce a locale for the condition. *}
+  purpose, we introduce a locale for the condition.\<close>
 
   locale non_negative =
     fixes n :: int
     assumes non_neg: "0 \<le> n"
 
-text {* It is again convenient to make the interpretation in an
+text \<open>It is again convenient to make the interpretation in an
   incremental fashion, first for order preserving maps, the for
-  lattice endomorphisms. *}
+  lattice endomorphisms.\<close>
 
   sublocale non_negative \<subseteq>
       order_preserving "op \<le>" "op \<le>" "\<lambda>i. n * i"
     using non_neg by unfold_locales (rule mult_left_mono)
 
-text {* While the proof of the previous interpretation
+text \<open>While the proof of the previous interpretation
   is straightforward from monotonicity lemmas for~@{term "op *"}, the
-  second proof follows a useful pattern. *}
+  second proof follows a useful pattern.\<close>
 
   sublocale %visible non_negative \<subseteq> lattice_end "op \<le>" "\<lambda>i. n * i"
   proof (unfold_locales, unfold int_min_eq int_max_eq)
-    txt {* \normalsize Unfolding the locale predicates \emph{and} the
+    txt \<open>\normalsize Unfolding the locale predicates \emph{and} the
       interpretation equations immediately yields two subgoals that
       reflect the core conjecture.
       @{subgoals [display]}
       It is now necessary to show, in the context of @{term
       non_negative}, that multiplication by~@{term n} commutes with
-      @{term min} and @{term max}. *}
+      @{term min} and @{term max}.\<close>
   qed (auto simp: hom_le)
 
-text (in order_preserving) {* The lemma @{thm [source] hom_le}
+text (in order_preserving) \<open>The lemma @{thm [source] hom_le}
   simplifies a proof that would have otherwise been lengthy and we may
-  consider making it a default rule for the simplifier: *}
+  consider making it a default rule for the simplifier:\<close>
 
   lemmas (in order_preserving) hom_le [simp]
 
 
-subsection {* Avoiding Infinite Chains of Interpretations
-  \label{sec:infinite-chains} *}
+subsection \<open>Avoiding Infinite Chains of Interpretations
+  \label{sec:infinite-chains}\<close>
 
-text {* Similar situations arise frequently in formalisations of
+text \<open>Similar situations arise frequently in formalisations of
   abstract algebra where it is desirable to express that certain
   constructions preserve certain properties.  For example, polynomials
   over rings are rings, or --- an example from the domain where the
   illustrations of this tutorial are taken from --- a partial order
   may be obtained for a function space by point-wise lifting of the
   partial order of the co-domain.  This corresponds to the following
-  interpretation: *}
+  interpretation:\<close>
 
   sublocale %visible partial_order \<subseteq> f: partial_order "\<lambda>f g. \<forall>x. f x \<sqsubseteq> g x"
     oops
 
-text {* Unfortunately this is a cyclic interpretation that leads to an
+text \<open>Unfortunately this is a cyclic interpretation that leads to an
   infinite chain, namely
   @{text [display, indent=2] "partial_order \<subseteq> partial_order (\<lambda>f g. \<forall>x. f x \<sqsubseteq> g x) \<subseteq>
   partial_order (\<lambda>f g. \<forall>x y. f x y \<sqsubseteq> g x y) \<subseteq>  \<dots>"}
@@ -467,7 +467,7 @@
   Instead it is necessary to declare a locale that is logically
   equivalent to @{term partial_order} but serves to collect facts
   about functions spaces where the co-domain is a partial order, and
-  to make the interpretation in its context: *}
+  to make the interpretation in its context:\<close>
 
   locale fun_partial_order = partial_order
 
@@ -475,10 +475,10 @@
       f: partial_order "\<lambda>f g. \<forall>x. f x \<sqsubseteq> g x"
     by unfold_locales (fast,rule,fast,blast intro: trans)
 
-text {* It is quite common in abstract algebra that such a construction
+text \<open>It is quite common in abstract algebra that such a construction
   maps a hierarchy of algebraic structures (or specifications) to a
   related hierarchy.  By means of the same lifting, a function space
-  is a lattice if its co-domain is a lattice: *}
+  is a lattice if its co-domain is a lattice:\<close>
 
   locale fun_lattice = fun_partial_order + lattice
 
@@ -498,9 +498,9 @@
   qed
 
 
-section {* Further Reading *}
+section \<open>Further Reading\<close>
 
-text {* More information on locales and their interpretation is
+text \<open>More information on locales and their interpretation is
   available.  For the locale hierarchy of import and interpretation
   dependencies see~@{cite Ballarin2006a}; interpretations in theories
   and proofs are covered in~@{cite Ballarin2006b}.  In the latter, I
@@ -532,9 +532,9 @@
   The sources of this tutorial, which include all proofs, are
   available with the Isabelle distribution at
   @{url "http://isabelle.in.tum.de"}.
-  *}
+\<close>
 
-text {*
+text \<open>
 \begin{table}
 \hrule
 \vspace{2ex}
@@ -635,18 +635,18 @@
 \caption{Syntax of Locale Commands.}
 \label{tab:commands}
 \end{table}
-  *}
+\<close>
 
-text {* \textbf{Revision History.}  For the present third revision of
+text \<open>\textbf{Revision History.}  For the present third revision of
   the tutorial, much of the explanatory text
   was rewritten.  Inheritance of interpretation equations is
   available with the forthcoming release of Isabelle, which at the
   time of editing these notes is expected for the end of 2009.
   The second revision accommodates changes introduced by the locales
   reimplementation for Isabelle 2009.  Most notably locale expressions
-  have been generalised from renaming to instantiation.  *}
+  have been generalised from renaming to instantiation.\<close>
 
-text {* \textbf{Acknowledgements.}  Alexander Krauss, Tobias Nipkow,
+text \<open>\textbf{Acknowledgements.}  Alexander Krauss, Tobias Nipkow,
   Randy Pollack, Andreas Schropp, Christian Sternagel and Makarius Wenzel
   have made
   useful comments on earlier versions of this document.  The section
@@ -654,6 +654,6 @@
   enquiries the author received from locale users, and which suggested
   that this use case is important enough to deserve explicit
   explanation.  The term \emph{conditional interpretation} is due to
-  Larry Paulson. *}
+  Larry Paulson.\<close>
 
 end