isabelle update_cartouches -c;
authorwenzelm
Fri, 12 Jan 2018 14:08:53 +0100
changeset 67406 23307fd33906
parent 67405 e9ab4ad7bd15
child 67407 dbaa38bd223a
isabelle update_cartouches -c;
src/Doc/Classes/Classes.thy
src/Doc/Functions/Functions.thy
src/Doc/How_to_Prove_it/How_to_Prove_it.thy
src/Doc/Logics_ZF/FOL_examples.thy
src/Doc/Logics_ZF/IFOL_examples.thy
src/Doc/Logics_ZF/If.thy
src/Doc/Logics_ZF/ZF_Isar.thy
src/Doc/Logics_ZF/ZF_examples.thy
src/Doc/Prog_Prove/Basics.thy
src/Doc/Prog_Prove/Bool_nat_list.thy
src/Doc/Prog_Prove/Isar.thy
src/Doc/Prog_Prove/LaTeXsugar.thy
src/Doc/Prog_Prove/Logic.thy
src/Doc/Prog_Prove/Types_and_funs.thy
src/Doc/Sugar/Sugar.thy
src/Doc/Tutorial/Advanced/Partial.thy
src/Doc/Tutorial/Advanced/WFrec.thy
src/Doc/Tutorial/Advanced/simp2.thy
src/Doc/Tutorial/CTL/Base.thy
src/Doc/Tutorial/CTL/CTL.thy
src/Doc/Tutorial/CTL/CTLind.thy
src/Doc/Tutorial/CTL/PDL.thy
src/Doc/Tutorial/CodeGen/CodeGen.thy
src/Doc/Tutorial/Datatype/ABexpr.thy
src/Doc/Tutorial/Datatype/Fundata.thy
src/Doc/Tutorial/Datatype/Nested.thy
src/Doc/Tutorial/Documents/Documents.thy
src/Doc/Tutorial/Fun/fun0.thy
src/Doc/Tutorial/Ifexpr/Ifexpr.thy
src/Doc/Tutorial/Inductive/AB.thy
src/Doc/Tutorial/Inductive/Advanced.thy
src/Doc/Tutorial/Inductive/Even.thy
src/Doc/Tutorial/Inductive/Mutual.thy
src/Doc/Tutorial/Inductive/Star.thy
src/Doc/Tutorial/Misc/AdvancedInd.thy
src/Doc/Tutorial/Misc/Itrev.thy
src/Doc/Tutorial/Misc/Option2.thy
src/Doc/Tutorial/Misc/Plus.thy
src/Doc/Tutorial/Misc/Tree.thy
src/Doc/Tutorial/Misc/Tree2.thy
src/Doc/Tutorial/Misc/appendix.thy
src/Doc/Tutorial/Misc/case_exprs.thy
src/Doc/Tutorial/Misc/fakenat.thy
src/Doc/Tutorial/Misc/natsum.thy
src/Doc/Tutorial/Misc/pairs2.thy
src/Doc/Tutorial/Misc/prime_def.thy
src/Doc/Tutorial/Misc/simp.thy
src/Doc/Tutorial/Misc/types.thy
src/Doc/Tutorial/Protocol/Event.thy
src/Doc/Tutorial/Protocol/Message.thy
src/Doc/Tutorial/Protocol/NS_Public.thy
src/Doc/Tutorial/Protocol/Public.thy
src/Doc/Tutorial/Recdef/Induction.thy
src/Doc/Tutorial/Recdef/Nested0.thy
src/Doc/Tutorial/Recdef/Nested1.thy
src/Doc/Tutorial/Recdef/Nested2.thy
src/Doc/Tutorial/Recdef/examples.thy
src/Doc/Tutorial/Recdef/simplification.thy
src/Doc/Tutorial/Recdef/termination.thy
src/Doc/Tutorial/Rules/Basic.thy
src/Doc/Tutorial/Rules/Blast.thy
src/Doc/Tutorial/Rules/Force.thy
src/Doc/Tutorial/Rules/Forward.thy
src/Doc/Tutorial/Rules/TPrimes.thy
src/Doc/Tutorial/Rules/Tacticals.thy
src/Doc/Tutorial/Rules/find2.thy
src/Doc/Tutorial/Sets/Examples.thy
src/Doc/Tutorial/Sets/Functions.thy
src/Doc/Tutorial/Sets/Recur.thy
src/Doc/Tutorial/Sets/Relations.thy
src/Doc/Tutorial/ToyList/ToyList.thy
src/Doc/Tutorial/ToyList/ToyList_Test.thy
src/Doc/Tutorial/Trie/Trie.thy
src/Doc/Tutorial/Types/Axioms.thy
src/Doc/Tutorial/Types/Numbers.thy
src/Doc/Tutorial/Types/Overloading.thy
src/Doc/Tutorial/Types/Pairs.thy
src/Doc/Tutorial/Types/Records.thy
src/Doc/Tutorial/Types/Typedefs.thy
src/HOL/Data_Structures/AA_Map.thy
src/HOL/Data_Structures/AA_Set.thy
src/HOL/Data_Structures/AList_Upd_Del.thy
src/HOL/Data_Structures/AVL_Map.thy
src/HOL/Data_Structures/AVL_Set.thy
src/HOL/Data_Structures/Brother12_Map.thy
src/HOL/Data_Structures/Brother12_Set.thy
src/HOL/Data_Structures/Cmp.thy
src/HOL/Data_Structures/Leftist_Heap.thy
src/HOL/Data_Structures/Less_False.thy
src/HOL/Data_Structures/List_Ins_Del.thy
src/HOL/Data_Structures/Map_by_Ordered.thy
src/HOL/Data_Structures/Set_by_Ordered.thy
src/HOL/Data_Structures/Sorted_Less.thy
src/HOL/Data_Structures/Tree234.thy
src/HOL/Data_Structures/Tree234_Set.thy
src/HOL/Data_Structures/Tree23_Set.thy
src/HOL/IMP/ACom.thy
src/HOL/IMP/AExp.thy
src/HOL/IMP/ASM.thy
src/HOL/IMP/Abs_Int0.thy
src/HOL/IMP/Abs_Int1.thy
src/HOL/IMP/Abs_Int1_const.thy
src/HOL/IMP/Abs_Int1_parity.thy
src/HOL/IMP/Abs_Int2.thy
src/HOL/IMP/Abs_Int2_ivl.thy
src/HOL/IMP/Abs_Int3.thy
src/HOL/IMP/Abs_Int_Tests.thy
src/HOL/IMP/Abs_Int_init.thy
src/HOL/IMP/Abs_State.thy
src/HOL/IMP/BExp.thy
src/HOL/IMP/Big_Step.thy
src/HOL/IMP/C_like.thy
src/HOL/IMP/Collecting.thy
src/HOL/IMP/Collecting1.thy
src/HOL/IMP/Collecting_Examples.thy
src/HOL/IMP/Compiler.thy
src/HOL/IMP/Compiler2.thy
src/HOL/IMP/Def_Init_Big.thy
src/HOL/IMP/Def_Init_Small.thy
src/HOL/IMP/Denotational.thy
src/HOL/IMP/Finite_Reachable.thy
src/HOL/IMP/Hoare.thy
src/HOL/IMP/Hoare_Examples.thy
src/HOL/IMP/Hoare_Total.thy
src/HOL/IMP/Hoare_Total_EX.thy
src/HOL/IMP/Hoare_Total_EX2.thy
src/HOL/IMP/Live.thy
src/HOL/IMP/Live_True.thy
src/HOL/IMP/OO.thy
src/HOL/IMP/Poly_Types.thy
src/HOL/IMP/Sec_Type_Expr.thy
src/HOL/IMP/Sec_Typing.thy
src/HOL/IMP/Sec_TypingT.thy
src/HOL/IMP/Sem_Equiv.thy
src/HOL/IMP/Small_Step.thy
src/HOL/IMP/Star.thy
src/HOL/IMP/Types.thy
src/HOL/IMP/VCG.thy
src/HOL/IMP/VCG_Total_EX.thy
src/HOL/IMP/VCG_Total_EX2.thy
src/HOL/IMP/Vars.thy
--- a/src/Doc/Classes/Classes.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Classes/Classes.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -558,12 +558,12 @@
 context %quote semigroup
 begin
 
-term %quote "x \<otimes> y" -- \<open>example 1\<close>
-term %quote "(x::nat) \<otimes> y" -- \<open>example 2\<close>
+term %quote "x \<otimes> y" \<comment> \<open>example 1\<close>
+term %quote "(x::nat) \<otimes> y" \<comment> \<open>example 2\<close>
 
 end  %quote
 
-term %quote "x \<otimes> y" -- \<open>example 3\<close>
+term %quote "x \<otimes> y" \<comment> \<open>example 3\<close>
 
 text \<open>
   \<^noindent> Here in example 1, the term refers to the local class
--- a/src/Doc/Functions/Functions.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Functions/Functions.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1095,11 +1095,11 @@
   let ?R = "measure (\<lambda>x. 101 - x)"
   show "wf ?R" ..
 
-  fix n :: nat assume "\<not> 100 < n" -- "Assumptions for both calls"
+  fix n :: nat assume "\<not> 100 < n" \<comment> "Assumptions for both calls"
 
-  thus "(n + 11, n) \<in> ?R" by simp -- "Inner call"
+  thus "(n + 11, n) \<in> ?R" by simp \<comment> "Inner call"
 
-  assume inner_trm: "f91_dom (n + 11)" -- "Outer call"
+  assume inner_trm: "f91_dom (n + 11)" \<comment> "Outer call"
   with f91_estimate have "n + 11 < f91 (n + 11) + 11" .
   with \<open>\<not> 100 < n\<close> show "(f91 (n + 11), n) \<in> ?R" by simp
 qed
--- a/src/Doc/How_to_Prove_it/How_to_Prove_it.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/How_to_Prove_it/How_to_Prove_it.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,7 +3,7 @@
 imports Complex_Main
 begin
 (*>*)
-text{*
+text\<open>
 \chapter{@{theory Main}}
 
 \section{Natural numbers}
@@ -34,12 +34,12 @@
 
 \noindent
 Example:
-*}
+\<close>
 
 lemma fixes x :: int shows "x ^ 3 = x * x * x"
 by (simp add: numeral_eq_Suc)
 
-text{* This is a typical situation: function ``@{text"^"}'' is defined
+text\<open>This is a typical situation: function ``@{text"^"}'' is defined
 by pattern matching on @{const Suc} but is applied to a numeral.
 
 Note: simplification with @{thm[source] numeral_eq_Suc} will convert all numerals.
@@ -80,7 +80,7 @@
 But what to do when proper multiplication is involved?
 At this point it can be helpful to simplify with the lemma list
 @{thm [source] algebra_simps}. Examples:
-*}
+\<close>
 
 lemma fixes x :: int
   shows "(x + y) * (y - z) = (y - z) * x + y * (y-z)"
@@ -90,7 +90,7 @@
   shows "(x + y) * (y - z) = (y - z) * x + y * (y-z)"
 by(simp add: algebra_simps)
 
-text{*
+text\<open>
 Rewriting with @{thm[source] algebra_simps} has the following effect:
 terms are rewritten into a normal form by multiplying out,
 rearranging sums and products into some canonical order.
@@ -101,33 +101,33 @@
 and @{class comm_ring}) this yields a decision procedure for equality.
 
 Additional function and predicate symbols are not a problem either:
-*}
+\<close>
 
 lemma fixes f :: "int \<Rightarrow> int" shows "2 * f(x*y) - f(y*x) < f(y*x) + 1"
 by(simp add: algebra_simps)
 
-text{* Here @{thm[source]algebra_simps} merely has the effect of rewriting
+text\<open>Here @{thm[source]algebra_simps} merely has the effect of rewriting
 @{term"y*x"} to @{term"x*y"} (or the other way around). This yields
 a problem of the form @{prop"2*t - t < t + (1::int)"} and we are back in the
 realm of linear arithmetic.
 
 Because @{thm[source]algebra_simps} multiplies out, terms can explode.
 If one merely wants to bring sums or products into a canonical order
-it suffices to rewrite with @{thm [source] ac_simps}: *}
+it suffices to rewrite with @{thm [source] ac_simps}:\<close>
 
 lemma fixes f :: "int \<Rightarrow> int" shows "f(x*y*z) - f(z*x*y) = 0"
 by(simp add: ac_simps)
 
-text{* The lemmas @{thm[source]algebra_simps} take care of addition, subtraction
+text\<open>The lemmas @{thm[source]algebra_simps} take care of addition, subtraction
 and multiplication (algebraic structures up to rings) but ignore division (fields).
 The lemmas @{thm[source]field_simps} also deal with division:
-*}
+\<close>
 
 lemma fixes x :: real shows "x+z \<noteq> 0 \<Longrightarrow> 1 + y/(x+z) = (x+y+z)/(x+z)"
 by(simp add: field_simps)
 
-text{* Warning: @{thm[source]field_simps} can blow up your terms
-beyond recognition. *}
+text\<open>Warning: @{thm[source]field_simps} can blow up your terms
+beyond recognition.\<close>
 
 (*<*)
 end
--- a/src/Doc/Logics_ZF/FOL_examples.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Logics_ZF/FOL_examples.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,30 +1,30 @@
-section{*Examples of Classical Reasoning*}
+section\<open>Examples of Classical Reasoning\<close>
 
 theory FOL_examples imports FOL begin
 
 lemma "EX y. ALL x. P(y)-->P(x)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule exCI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule allI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule allE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-txt{*see below for @{text allI} combined with @{text swap}*}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+txt\<open>see below for @{text allI} combined with @{text swap}\<close>
 apply (erule allI [THEN [2] swap])
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule notE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption
 done
 
-text {*
+text \<open>
 @{thm[display] allI [THEN [2] swap]}
-*}
+\<close>
 
 lemma "EX y. ALL x. P(y)-->P(x)"
 by blast
--- a/src/Doc/Logics_ZF/IFOL_examples.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Logics_ZF/IFOL_examples.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,56 +1,56 @@
-section{*Examples of Intuitionistic Reasoning*}
+section\<open>Examples of Intuitionistic Reasoning\<close>
 
 theory IFOL_examples imports IFOL begin
 
-text{*Quantifier example from the book Logic and Computation*}
+text\<open>Quantifier example from the book Logic and Computation\<close>
 lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule allI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule exI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule exE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule allE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-txt{*Now @{text "apply assumption"} fails*}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+txt\<open>Now @{text "apply assumption"} fails\<close>
 oops
 
-text{*Trying again, with the same first two steps*}
+text\<open>Trying again, with the same first two steps\<close>
 lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule allI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule exE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule exI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule allE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 done
 
 lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
-by (tactic {*IntPr.fast_tac @{context} 1*})
+by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*Example of Dyckhoff's method*}
+text\<open>Example of Dyckhoff's method\<close>
 lemma "~ ~ ((P-->Q) | (Q-->P))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (unfold not_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule disj_impE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule imp_impE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
  apply (erule imp_impE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
 apply (erule FalseE)+
 done
--- a/src/Doc/Logics_ZF/If.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Logics_ZF/If.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -12,35 +12,35 @@
 
 lemma ifI:
     "[| P ==> Q; ~P ==> R |] ==> if(P,Q,R)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (simp add: if_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply blast
 done
 
 lemma ifE:
    "[| if(P,Q,R);  [| P; Q |] ==> S; [| ~P; R |] ==> S |] ==> S"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (simp add: if_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply blast
 done
 
 lemma if_commute: "if(P, if(Q,A,B), if(Q,C,D)) <-> if(Q, if(P,A,C), if(P,B,D))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule iffI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule ifE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule ifE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule ifI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule ifI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 oops
 
-text{*Trying again from the beginning in order to use @{text blast}*}
+text\<open>Trying again from the beginning in order to use @{text blast}\<close>
 declare ifI [intro!]
 declare ifE [elim!]
 
@@ -49,34 +49,34 @@
 
 
 lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 by blast
 
-text{*Trying again from the beginning in order to prove from the definitions*}
+text\<open>Trying again from the beginning in order to prove from the definitions\<close>
 lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (simp add: if_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply blast
 done
 
 
-text{*An invalid formula.  High-level rules permit a simpler diagnosis*}
+text\<open>An invalid formula.  High-level rules permit a simpler diagnosis\<close>
 lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply auto
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 (*The next step will fail unless subgoals remain*)
 apply (tactic all_tac)
 oops
 
-text{*Trying again from the beginning in order to prove from the definitions*}
+text\<open>Trying again from the beginning in order to prove from the definitions\<close>
 lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (simp add: if_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (auto) 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 (*The next step will fail unless subgoals remain*)
 apply (tactic all_tac)
 oops
--- a/src/Doc/Logics_ZF/ZF_Isar.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Logics_ZF/ZF_Isar.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -6,11 +6,11 @@
 ML_file "../antiquote_setup.ML"
 (*>*)
 
-chapter {* Some Isar language elements *}
+chapter \<open>Some Isar language elements\<close>
 
-section {* Type checking *}
+section \<open>Type checking\<close>
 
-text {*
+text \<open>
   The ZF logic is essentially untyped, so the concept of ``type
   checking'' is performed as logical reasoning about set-membership
   statements.  A special method assists users in this task; a version
@@ -39,14 +39,14 @@
   the context.
 
   \end{description}
-*}
+\<close>
 
 
-section {* (Co)Inductive sets and datatypes *}
+section \<open>(Co)Inductive sets and datatypes\<close>
 
-subsection {* Set definitions *}
+subsection \<open>Set definitions\<close>
 
-text {*
+text \<open>
   In ZF everything is a set.  The generic inductive package also
   provides a specific view for ``datatype'' specifications.
   Coinductive definitions are available in both cases, too.
@@ -97,12 +97,12 @@
   See @{cite "isabelle-ZF"} for further information on inductive
   definitions in ZF, but note that this covers the old-style theory
   format.
-*}
+\<close>
 
 
-subsection {* Primitive recursive functions *}
+subsection \<open>Primitive recursive functions\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def (ZF) "primrec"} & : & @{text "theory \<rightarrow> theory"} \\
   \end{matharray}
@@ -110,12 +110,12 @@
   @{rail \<open>
     @@{command (ZF) primrec} (@{syntax thmdecl}? @{syntax prop} +)
   \<close>}
-*}
+\<close>
 
 
-subsection {* Cases and induction: emulating tactic scripts *}
+subsection \<open>Cases and induction: emulating tactic scripts\<close>
 
-text {*
+text \<open>
   The following important tactical tools of Isabelle/ZF have been
   ported to Isar.  These should not be used in proper proof texts.
 
@@ -133,6 +133,6 @@
     ;
     @@{command (ZF) inductive_cases} (@{syntax thmdecl}? (@{syntax prop} +) + @'and')
   \<close>}
-*}
+\<close>
 
 end
--- a/src/Doc/Logics_ZF/ZF_examples.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Logics_ZF/ZF_examples.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,8 +1,8 @@
-section{*Examples of Reasoning in ZF Set Theory*}
+section\<open>Examples of Reasoning in ZF Set Theory\<close>
 
 theory ZF_examples imports ZFC begin
 
-subsection {* Binary Trees *}
+subsection \<open>Binary Trees\<close>
 
 consts
   bt :: "i => i"
@@ -12,11 +12,11 @@
 
 declare bt.intros [simp]
 
-text{*Induction via tactic emulation*}
+text\<open>Induction via tactic emulation\<close>
 lemma Br_neq_left [rule_format]: "l \<in> bt(A) ==> \<forall>x r. Br(x, l, r) \<noteq> l"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   apply (induct_tac l)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   apply auto
   done
 
@@ -25,26 +25,26 @@
   apply (tactic {*exhaust_tac "l" 1*})
 *)
 
-text{*The new induction method, which I don't understand*}
+text\<open>The new induction method, which I don't understand\<close>
 lemma Br_neq_left': "l \<in> bt(A) ==> (!!x r. Br(x, l, r) \<noteq> l)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   apply (induct set: bt)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   apply auto
   done
 
 lemma Br_iff: "Br(a,l,r) = Br(a',l',r') <-> a=a' & l=l' & r=r'"
-  -- "Proving a freeness theorem."
+  \<comment> "Proving a freeness theorem."
   by (blast elim!: bt.free_elims)
 
 inductive_cases Br_in_bt: "Br(a,l,r) \<in> bt(A)"
-  -- "An elimination rule, for type-checking."
+  \<comment> "An elimination rule, for type-checking."
 
-text {*
+text \<open>
 @{thm[display] Br_in_bt[no_vars]}
-*}
+\<close>
 
-subsection{*Primitive recursion*}
+subsection\<open>Primitive recursion\<close>
 
 consts  n_nodes :: "i => i"
 primrec
@@ -71,7 +71,7 @@
  by (simp add: n_nodes_tail_def n_nodes_aux_eq) 
 
 
-subsection {*Inductive definitions*}
+subsection \<open>Inductive definitions\<close>
 
 consts  Fin       :: "i=>i"
 inductive
@@ -114,7 +114,7 @@
   type_intros  llist.intros
 
 
-subsection{*Powerset example*}
+subsection\<open>Powerset example\<close>
 
 lemma Pow_mono: "A\<subseteq>B  ==>  Pow(A) \<subseteq> Pow(B)"
 apply (rule subsetI)
@@ -124,78 +124,78 @@
 done
 
 lemma "Pow(A Int B) = Pow(A) Int Pow(B)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule equalityI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule Int_greatest)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule Int_lower1 [THEN Pow_mono])
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule Int_lower2 [THEN Pow_mono])
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule subsetI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule IntE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule PowI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (drule PowD)+
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule Int_greatest)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (assumption+)
 done
 
-text{*Trying again from the beginning in order to use @{text blast}*}
+text\<open>Trying again from the beginning in order to use @{text blast}\<close>
 lemma "Pow(A Int B) = Pow(A) Int Pow(B)"
 by blast
 
 
 lemma "C\<subseteq>D ==> Union(C) \<subseteq> Union(D)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule subsetI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule UnionE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule UnionI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule subsetD)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
 done
 
-text{*A more abstract version of the same proof*}
+text\<open>A more abstract version of the same proof\<close>
 
 lemma "C\<subseteq>D ==> Union(C) \<subseteq> Union(D)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule Union_least)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule Union_upper)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule subsetD, assumption)
 done
 
 
 lemma "[| a \<in> A;  f \<in> A->B;  g \<in> C->D;  A \<inter> C = 0 |] ==> (f \<union> g)`a = f`a"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule apply_equality)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule UnI1)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule apply_Pair)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule fun_disjoint_Un)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
 done
 
--- a/src/Doc/Prog_Prove/Basics.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Prog_Prove/Basics.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,7 +3,7 @@
 imports Main
 begin
 (*>*)
-text{*
+text\<open>
 This chapter introduces HOL as a functional programming language and shows
 how to prove properties of functional programs by induction.
 
@@ -149,7 +149,7 @@
 to see the proof state in the output window.
 \end{warn}
 \fi
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Prog_Prove/Bool_nat_list.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Prog_Prove/Bool_nat_list.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,7 +4,7 @@
 begin
 (*>*)
 
-text{*
+text\<open>
 \vspace{-4ex}
 \section{\texorpdfstring{Types @{typ bool}, @{typ nat} and @{text list}}{Types bool, nat and list}}
 
@@ -19,13 +19,13 @@
 with the two values \indexed{@{const True}}{True} and \indexed{@{const False}}{False} and
 with many predefined functions:  @{text "\<not>"}, @{text "\<and>"}, @{text "\<or>"}, @{text
 "\<longrightarrow>"}, etc. Here is how conjunction could be defined by pattern matching:
-*}
+\<close>
 
 fun conj :: "bool \<Rightarrow> bool \<Rightarrow> bool" where
 "conj True True = True" |
 "conj _ _ = False"
 
-text{* Both the datatype and function definitions roughly follow the syntax
+text\<open>Both the datatype and function definitions roughly follow the syntax
 of functional programming languages.
 
 \subsection{Type \indexed{@{typ nat}}{nat}}
@@ -37,13 +37,13 @@
 @{text 0}, @{term"Suc 0"}, @{term"Suc(Suc 0)"}, etc.
 There are many predefined functions: @{text "+"}, @{text "*"}, @{text
 "\<le>"}, etc. Here is how you could define your own addition:
-*}
+\<close>
 
 fun add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "add 0 n = n" |
 "add (Suc m) n = Suc(add m n)"
 
-text{* And here is a proof of the fact that @{prop"add m 0 = m"}: *}
+text\<open>And here is a proof of the fact that @{prop"add m 0 = m"}:\<close>
 
 lemma add_02: "add m 0 = m"
 apply(induction m)
@@ -53,7 +53,7 @@
 lemma "add m 0 = m"
 apply(induction m)
 (*>*)
-txt{* The \isacom{lemma} command starts the proof and gives the lemma
+txt\<open>The \isacom{lemma} command starts the proof and gives the lemma
 a name, @{text add_02}. Properties of recursively defined functions
 need to be established by induction in most cases.
 Command \isacom{apply}@{text"(induction m)"} instructs Isabelle to
@@ -75,11 +75,11 @@
 the induction hypothesis.
 As a result of that final \isacom{done}, Isabelle associates the lemma
 just proved with its name. You can now inspect the lemma with the command
-*}
+\<close>
 
 thm add_02
 
-txt{* which displays @{thm[show_question_marks,display] add_02} The free
+txt\<open>which displays @{thm[show_question_marks,display] add_02} The free
 variable @{text m} has been replaced by the \concept{unknown}
 @{text"?m"}. There is no logical difference between the two but there is an
 operational one: unknowns can be instantiated, which is what you want after
@@ -153,7 +153,7 @@
 
 Although lists are already predefined, we define our own copy for
 demonstration purposes:
-*}
+\<close>
 (*<*)
 apply(auto)
 done 
@@ -164,7 +164,7 @@
 for map: map
 (*>*)
 
-text{*
+text\<open>
 \begin{itemize}
 \item Type @{typ "'a list"} is the type of lists over elements of type @{typ 'a}. Because @{typ 'a} is a type variable, lists are in fact \concept{polymorphic}: the elements of a list can be of arbitrary type (but must all be of the same type).
 \item Lists have two constructors: @{const Nil}, the empty list, and @{const Cons}, which puts an element (of type @{typ 'a}) in front of a list (of type @{typ "'a list"}).
@@ -175,7 +175,7 @@
 types of a constructor needs to be enclosed in quotation marks, unless
 it is just an identifier (e.g., @{typ nat} or @{typ 'a}).
 \end{itemize}
-We also define two standard functions, append and reverse: *}
+We also define two standard functions, append and reverse:\<close>
 
 fun app :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where
 "app Nil ys = ys" |
@@ -185,18 +185,18 @@
 "rev Nil = Nil" |
 "rev (Cons x xs) = app (rev xs) (Cons x Nil)"
 
-text{* By default, variables @{text xs}, @{text ys} and @{text zs} are of
+text\<open>By default, variables @{text xs}, @{text ys} and @{text zs} are of
 @{text list} type.
 
-Command \indexed{\isacommand{value}}{value} evaluates a term. For example, *}
+Command \indexed{\isacommand{value}}{value} evaluates a term. For example,\<close>
 
 value "rev(Cons True (Cons False Nil))"
 
-text{* yields the result @{value "rev(Cons True (Cons False Nil))"}. This works symbolically, too: *}
+text\<open>yields the result @{value "rev(Cons True (Cons False Nil))"}. This works symbolically, too:\<close>
 
 value "rev(Cons a (Cons b Nil))"
 
-text{* yields @{value "rev(Cons a (Cons b Nil))"}.
+text\<open>yields @{value "rev(Cons a (Cons b Nil))"}.
 \medskip
 
 Figure~\ref{fig:MyList} shows the theory created so far.
@@ -238,28 +238,28 @@
 We will now demonstrate the typical proof process, which involves
 the formulation and proof of auxiliary lemmas.
 Our goal is to show that reversing a list twice produces the original
-list. *}
+list.\<close>
 
 theorem rev_rev [simp]: "rev(rev xs) = xs"
 
-txt{* Commands \isacom{theorem} and \isacom{lemma} are
+txt\<open>Commands \isacom{theorem} and \isacom{lemma} are
 interchangeable and merely indicate the importance we attach to a
 proposition. Via the bracketed attribute @{text simp} we also tell Isabelle
 to make the eventual theorem a \conceptnoidx{simplification rule}: future proofs
 involving simplification will replace occurrences of @{term"rev(rev xs)"} by
-@{term"xs"}. The proof is by induction: *}
+@{term"xs"}. The proof is by induction:\<close>
 
 apply(induction xs)
 
-txt{*
+txt\<open>
 As explained above, we obtain two subgoals, namely the base case (@{const Nil}) and the induction step (@{const Cons}):
 @{subgoals[display,indent=0,margin=65]}
 Let us try to solve both goals automatically:
-*}
+\<close>
 
 apply(auto)
 
-txt{*Subgoal~1 is proved, and disappears; the simplified version
+txt\<open>Subgoal~1 is proved, and disappears; the simplified version
 of subgoal~2 becomes the new subgoal~1:
 @{subgoals[display,indent=0,margin=70]}
 In order to simplify this subgoal further, a lemma suggests itself.
@@ -267,22 +267,22 @@
 \subsubsection{A First Lemma}
 
 We insert the following lemma in front of the main theorem:
-*}
+\<close>
 (*<*)
 oops
 (*>*)
 lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
 
-txt{* There are two variables that we could induct on: @{text xs} and
+txt\<open>There are two variables that we could induct on: @{text xs} and
 @{text ys}. Because @{const app} is defined by recursion on
 the first argument, @{text xs} is the correct one:
-*}
+\<close>
 
 apply(induction xs)
 
-txt{* This time not even the base case is solved automatically: *}
+txt\<open>This time not even the base case is solved automatically:\<close>
 apply(auto)
-txt{*
+txt\<open>
 \vspace{-5ex}
 @{subgoals[display,goals_limit=1]}
 Again, we need to abandon this proof attempt and prove another simple lemma
@@ -291,7 +291,7 @@
 \subsubsection{A Second Lemma}
 
 We again try the canonical proof procedure:
-*}
+\<close>
 (*<*)
 oops
 (*>*)
@@ -300,16 +300,16 @@
 apply(auto)
 done
 
-text{*
+text\<open>
 Thankfully, this worked.
 Now we can continue with our stuck proof attempt of the first lemma:
-*}
+\<close>
 
 lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
 apply(induction xs)
 apply(auto)
 
-txt{*
+txt\<open>
 We find that this time @{text"auto"} solves the base case, but the
 induction step merely simplifies to
 @{subgoals[display,indent=0,goals_limit=1]}
@@ -319,7 +319,7 @@
 \subsubsection{Associativity of @{const app}}
 
 The canonical proof procedure succeeds without further ado:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma app_assoc [simp]: "app (app xs ys) zs = app xs (app ys zs)"
 apply(induction xs)
@@ -336,7 +336,7 @@
 apply(auto)
 done
 (*>*)
-text{*
+text\<open>
 Finally the proofs of @{thm[source] rev_app} and @{thm[source] rev_rev}
 succeed, too.
 
@@ -457,7 +457,7 @@
 \mbox{@{text"sum_upto n"}} @{text"="} @{text"0 + ... + n"} and prove
 @{prop" sum_upto (n::nat) = n * (n+1) div 2"}.
 \end{exercise}
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Prog_Prove/Isar.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Prog_Prove/Isar.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,7 +4,7 @@
 begin
 declare [[quick_and_dirty]]
 (*>*)
-text{*
+text\<open>
 Apply-scripts are unreadable and hard to maintain. The language of choice
 for larger proofs is \concept{Isar}. The two key features of Isar are:
 \begin{itemize}
@@ -14,7 +14,7 @@
 \end{itemize}
 Whereas apply-scripts are like assembly language programs, Isar proofs
 are like structured programs with comments. A typical Isar proof looks like this:
-*}text{*
+\<close>text\<open>
 \begin{tabular}{@ {}l}
 \isacom{proof}\\
 \quad\isacom{assume} @{text"\""}$\mathit{formula}_0$@{text"\""}\\
@@ -24,7 +24,7 @@
 \quad\isacom{show} @{text"\""}$\mathit{formula}_{n+1}$@{text"\""} \quad\isacom{by} @{text \<dots>}\\
 \isacom{qed}
 \end{tabular}
-*}text{*
+\<close>text\<open>
 It proves $\mathit{formula}_0 \Longrightarrow \mathit{formula}_{n+1}$
 (provided each proof step succeeds).
 The intermediate \isacom{have} statements are merely stepping stones
@@ -89,7 +89,7 @@
 We show a number of proofs of Cantor's theorem that a function from a set to
 its powerset cannot be surjective, illustrating various features of Isar. The
 constant @{const surj} is predefined.
-*}
+\<close>
 
 lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
 proof
@@ -99,7 +99,7 @@
   from 2 show "False" by blast
 qed
 
-text{*
+text\<open>
 The \isacom{proof} command lacks an explicit method by which to perform
 the proof. In such cases Isabelle tries to use some standard introduction
 rule, in the above case for @{text"\<not>"}:
@@ -125,7 +125,7 @@
 in a UNIX pipe. In such cases the predefined name @{text this} can be used
 to refer to the proposition proved in the previous step. This allows us to
 eliminate all labels from our proof (we suppress the \isacom{lemma} statement):
-*}
+\<close>
 (*<*)
 lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
 (*>*)
@@ -135,7 +135,7 @@
   from this show "False" by blast
 qed
 
-text{* We have also taken the opportunity to compress the two \isacom{have}
+text\<open>We have also taken the opportunity to compress the two \isacom{have}
 steps into one.
 
 To compact the text further, Isar has a few convenient abbreviations:
@@ -150,7 +150,7 @@
 
 \noindent
 With the help of these abbreviations the proof becomes
-*}
+\<close>
 (*<*)
 lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
 (*>*)
@@ -159,7 +159,7 @@
   hence "\<exists>a. {x. x \<notin> f x} = f a" by(auto simp: surj_def)
   thus "False" by blast
 qed
-text{*
+text\<open>
 
 There are two further linguistic variations:
 \medskip
@@ -180,14 +180,14 @@
 Lemmas can also be stated in a more structured fashion. To demonstrate this
 feature with Cantor's theorem, we rephrase \noquotes{@{prop[source]"\<not> surj f"}}
 a little:
-*}
+\<close>
 
 lemma
   fixes f :: "'a \<Rightarrow> 'a set"
   assumes s: "surj f"
   shows "False"
 
-txt{* The optional \isacom{fixes} part allows you to state the types of
+txt\<open>The optional \isacom{fixes} part allows you to state the types of
 variables up front rather than by decorating one of their occurrences in the
 formula with a type constraint. The key advantage of the structured format is
 the \isacom{assumes} part that allows you to name each assumption; multiple
@@ -195,7 +195,7 @@
 \isacom{shows} part gives the goal. The actual theorem that will come out of
 the proof is \noquotes{@{prop[source]"surj f \<Longrightarrow> False"}}, but during the proof the assumption
 \noquotes{@{prop[source]"surj f"}} is available under the name @{text s} like any other fact.
-*}
+\<close>
 
 proof -
   have "\<exists> a. {x. x \<notin> f x} = f a" using s
@@ -203,7 +203,7 @@
   thus "False" by blast
 qed
 
-text{*
+text\<open>
 \begin{warn}
 Note the hyphen after the \isacom{proof} command.
 It is the null method that does nothing to the goal. Leaving it out would be asking
@@ -235,42 +235,42 @@
 starting from a formula @{text P} we have the two cases @{text P} and
 @{prop"~P"}, and starting from a fact @{prop"P \<or> Q"}
 we have the two cases @{text P} and @{text Q}:
-*}text_raw{*
+\<close>text_raw\<open>
 \begin{tabular}{@ {}ll@ {}}
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "R" proof-(*>*)
 show "R"
 proof cases
   assume "P"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "R" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "R" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 next
   assume "\<not> P"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "R" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "R" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)oops(*>*)
-text_raw {* }
+text_raw \<open>}
 \end{minipage}\index{cases@@{text cases}}
 &
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "R" proof-(*>*)
-have "P \<or> Q" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+have "P \<or> Q" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 then show "R"
 proof
   assume "P"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "R" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "R" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 next
   assume "Q"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "R" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "R" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 \end{tabular}
 \medskip
@@ -278,19 +278,19 @@
 How to prove a logical equivalence:
 \end{isamarkuptext}%
 \isa{%
-*}
+\<close>
 (*<*)lemma "P\<longleftrightarrow>Q" proof-(*>*)
 show "P \<longleftrightarrow> Q"
 proof
   assume "P"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "Q" (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "Q" (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 next
   assume "Q"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "P" (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "P" (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)qed(*>*)
-text_raw {* }
+text_raw \<open>}
 \medskip
 \begin{isamarkuptext}%
 Proofs by contradiction (@{thm[source] ccontr} stands for ``classical contradiction''):
@@ -298,30 +298,30 @@
 \begin{tabular}{@ {}ll@ {}}
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "\<not> P" proof-(*>*)
 show "\<not> P"
 proof
   assume "P"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "False" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "False" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 &
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "P" proof-(*>*)
 show "P"
 proof (rule ccontr)
   assume "\<not>P"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "False" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "False" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 \end{tabular}
 \medskip
@@ -331,30 +331,30 @@
 \begin{tabular}{@ {}ll@ {}}
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "ALL x. P x" proof-(*>*)
 show "\<forall>x. P(x)"
 proof
   fix x
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "P(x)" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "P(x)" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 &
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "EX x. P(x)" proof-(*>*)
 show "\<exists>x. P(x)"
 proof
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "P(witness)" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "P(witness)" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed
 (*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 \end{tabular}
 \medskip
@@ -369,12 +369,12 @@
 
 How to reason forward from \noquotes{@{prop[source] "\<exists>x. P(x)"}}:
 \end{isamarkuptext}%
-*}
+\<close>
 (*<*)lemma True proof- assume 1: "EX x. P x"(*>*)
-have "\<exists>x. P(x)" (*<*)by(rule 1)(*>*)text_raw{*\ \isasymproof\\*}
+have "\<exists>x. P(x)" (*<*)by(rule 1)(*>*)text_raw\<open>\ \isasymproof\\\<close>
 then obtain x where p: "P(x)" by blast
 (*<*)oops(*>*)
-text{*
+text\<open>
 After the \indexed{\isacom{obtain}}{obtain} step, @{text x} (we could have chosen any name)
 is a fixed local
 variable, and @{text p} is the name of the fact
@@ -382,7 +382,7 @@
 This pattern works for one or more @{text x}.
 As an example of the \isacom{obtain} command, here is the proof of
 Cantor's theorem in more detail:
-*}
+\<close>
 
 lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
 proof
@@ -393,7 +393,7 @@
   thus "False" by blast
 qed
 
-text_raw{*
+text_raw\<open>
 \begin{isamarkuptext}%
 
 Finally, how to prove set equality and subset relationship:
@@ -401,31 +401,31 @@
 \begin{tabular}{@ {}ll@ {}}
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "A = (B::'a set)" proof-(*>*)
 show "A = B"
 proof
-  show "A \<subseteq> B" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  show "A \<subseteq> B" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 next
-  show "B \<subseteq> A" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  show "B \<subseteq> A" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)qed(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 &
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "A <= (B::'a set)" proof-(*>*)
 show "A \<subseteq> B"
 proof
   fix x
   assume "x \<in> A"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "x \<in> B" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "x \<in> B" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)qed(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 \end{tabular}
 \begin{isamarkuptext}%
@@ -522,34 +522,34 @@
 the pattern for later use. As an example, consider the proof pattern for
 @{text"\<longleftrightarrow>"}:
 \end{isamarkuptext}%
-*}
+\<close>
 (*<*)lemma "formula\<^sub>1 \<longleftrightarrow> formula\<^sub>2" proof-(*>*)
 show "formula\<^sub>1 \<longleftrightarrow> formula\<^sub>2" (is "?L \<longleftrightarrow> ?R")
 proof
   assume "?L"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "?R" (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "?R" (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 next
   assume "?R"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "?L" (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "?L" (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)qed(*>*)
 
-text{* Instead of duplicating @{text"formula\<^sub>i"} in the text, we introduce
+text\<open>Instead of duplicating @{text"formula\<^sub>i"} in the text, we introduce
 the two abbreviations @{text"?L"} and @{text"?R"} by pattern matching.
 Pattern matching works wherever a formula is stated, in particular
 with \isacom{have} and \isacom{lemma}.
 
 The unknown \indexed{@{text"?thesis"}}{thesis} is implicitly matched against any goal stated by
-\isacom{lemma} or \isacom{show}. Here is a typical example: *}
+\isacom{lemma} or \isacom{show}. Here is a typical example:\<close>
 
 lemma "formula"
 proof -
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show ?thesis (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show ?thesis (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 qed
 
-text{* 
+text\<open>
 Unknowns can also be instantiated with \indexed{\isacom{let}}{let} commands
 \begin{quote}
 \isacom{let} @{text"?t"} = @{text"\""}\textit{some-big-term}@{text"\""}
@@ -588,37 +588,37 @@
 Sometimes one needs a number of facts to enable some deduction. Of course
 one can name these facts individually, as shown on the right,
 but one can also combine them with \isacom{moreover}, as shown on the left:
-*}text_raw{*
+\<close>text_raw\<open>
 \begin{tabular}{@ {}ll@ {}}
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "P" proof-(*>*)
-have "P\<^sub>1" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
-moreover have "P\<^sub>2" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+have "P\<^sub>1" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
+moreover have "P\<^sub>2" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 moreover
-text_raw{*\\$\vdots$\\\hspace{-1.4ex}*}(*<*)have "True" ..(*>*)
-moreover have "P\<^sub>n" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
-ultimately have "P"  (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+text_raw\<open>\\$\vdots$\\\hspace{-1.4ex}\<close>(*<*)have "True" ..(*>*)
+moreover have "P\<^sub>n" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
+ultimately have "P"  (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 (*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 &
 \qquad
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "P" proof-(*>*)
-have lab\<^sub>1: "P\<^sub>1" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
-have lab\<^sub>2: "P\<^sub>2" (*<*)sorry(*>*)text_raw{*\ \isasymproof*}
-text_raw{*\\$\vdots$\\\hspace{-1.4ex}*}
-have lab\<^sub>n: "P\<^sub>n" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
-from lab\<^sub>1 lab\<^sub>2 text_raw{*\ $\dots$\\*}
-have "P"  (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+have lab\<^sub>1: "P\<^sub>1" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
+have lab\<^sub>2: "P\<^sub>2" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\<close>
+text_raw\<open>\\$\vdots$\\\hspace{-1.4ex}\<close>
+have lab\<^sub>n: "P\<^sub>n" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
+from lab\<^sub>1 lab\<^sub>2 text_raw\<open>\ $\dots$\\\<close>
+have "P"  (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 (*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 \end{tabular}
 \begin{isamarkuptext}%
@@ -643,7 +643,7 @@
 As an example we prove a simple fact about divisibility on integers.
 The definition of @{text "dvd"} is @{thm dvd_def}.
 \end{isamarkuptext}%
-*}
+\<close>
 
 lemma fixes a b :: int assumes "b dvd (a+b)" shows "b dvd a"
 proof -
@@ -654,28 +654,28 @@
   then show ?thesis using assms by(auto simp add: dvd_def)
 qed
 
-text{*
+text\<open>
 
 \subsection*{Exercises}
 
 \exercise
 Give a readable, structured proof of the following lemma:
-*}
+\<close>
 lemma assumes T: "\<forall>x y. T x y \<or> T y x"
   and A: "\<forall>x y. A x y \<and> A y x \<longrightarrow> x = y"
   and TA: "\<forall>x y. T x y \<longrightarrow> A x y" and "A x y"
   shows "T x y"
 (*<*)oops(*>*)
-text{*
+text\<open>
 \endexercise
 
 \exercise
 Give a readable, structured proof of the following lemma:
-*}
+\<close>
 lemma "\<exists>ys zs. xs = ys @ zs \<and>
             (length ys = length zs \<or> length ys = length zs + 1)"
 (*<*)oops(*>*)
-text{*
+text\<open>
 Hint: There are predefined functions @{const_typ take} and @{const_typ drop}
 such that @{text"take k [x\<^sub>1,\<dots>] = [x\<^sub>1,\<dots>,x\<^sub>k]"} and
 @{text"drop k [x\<^sub>1,\<dots>] = [x\<^bsub>k+1\<^esub>,\<dots>]"}. Let sledgehammer find and apply
@@ -692,7 +692,7 @@
 which form some term takes: is it @{text 0} or of the form @{term"Suc n"},
 is it @{term"[]"} or of the form @{term"x#xs"}, etc. Here is a typical example
 proof by case analysis on the form of @{text xs}:
-*}
+\<close>
 
 lemma "length(tl xs) = length xs - 1"
 proof (cases xs)
@@ -703,7 +703,7 @@
   thus ?thesis by simp
 qed
 
-text{*\index{cases@@{text"cases"}|(}Function @{text tl} (''tail'') is defined by @{thm list.sel(2)} and
+text\<open>\index{cases@@{text"cases"}|(}Function @{text tl} (''tail'') is defined by @{thm list.sel(2)} and
 @{thm list.sel(3)}. Note that the result type of @{const length} is @{typ nat}
 and @{prop"0 - 1 = (0::nat)"}.
 
@@ -721,7 +721,7 @@
 but also gives the assumption @{text"\"t = C x\<^sub>1 \<dots> x\<^sub>n\""} a name: @{text C},
 like the constructor.
 Here is the \isacom{case} version of the proof above:
-*}
+\<close>
 (*<*)lemma "length(tl xs) = length xs - 1"(*>*)
 proof (cases xs)
   case Nil
@@ -731,7 +731,7 @@
   thus ?thesis by simp
 qed
 
-text{* Remember that @{text Nil} and @{text Cons} are the alphanumeric names
+text\<open>Remember that @{text Nil} and @{text Cons} are the alphanumeric names
 for @{text"[]"} and @{text"#"}. The names of the assumptions
 are not used because they are directly piped (via \isacom{thus})
 into the proof of the claim.
@@ -745,7 +745,7 @@
 the sum (@{text"\<Sum>"}) of the first @{text n} natural numbers
 (@{text"{0..n::nat}"}) is equal to \mbox{@{term"n*(n+1) div 2::nat"}}.
 Never mind the details, just focus on the pattern:
-*}
+\<close>
 
 lemma "\<Sum>{0..n::nat} = n*(n+1) div 2"
 proof (induction n)
@@ -755,10 +755,10 @@
   thus "\<Sum>{0..Suc n} = Suc n*(Suc n+1) div 2" by simp
 qed
 
-text{* Except for the rewrite steps, everything is explicitly given. This
+text\<open>Except for the rewrite steps, everything is explicitly given. This
 makes the proof easily readable, but the duplication means it is tedious to
 write and maintain. Here is how pattern
-matching can completely avoid any duplication: *}
+matching can completely avoid any duplication:\<close>
 
 lemma "\<Sum>{0..n::nat} = n*(n+1) div 2" (is "?P n")
 proof (induction n)
@@ -768,7 +768,7 @@
   thus "?P(Suc n)" by simp
 qed
 
-text{* The first line introduces an abbreviation @{text"?P n"} for the goal.
+text\<open>The first line introduces an abbreviation @{text"?P n"} for the goal.
 Pattern matching @{text"?P n"} with the goal instantiates @{text"?P"} to the
 function @{term"\<lambda>n. \<Sum>{0..n::nat} = n*(n+1) div 2"}.  Now the proposition to
 be proved in the base case can be written as @{text"?P 0"}, the induction
@@ -777,7 +777,7 @@
 
 Induction also provides the \isacom{case} idiom that abbreviates
 the \isacom{fix}-\isacom{assume} step. The above proof becomes
-*}
+\<close>
 (*<*)lemma "\<Sum>{0..n::nat} = n*(n+1) div 2"(*>*)
 proof (induction n)
   case 0
@@ -787,29 +787,29 @@
   thus ?case by simp
 qed
 
-text{*
+text\<open>
 The unknown @{text"?case"}\index{case?@@{text"?case"}|(} is set in each case to the required
 claim, i.e., @{text"?P 0"} and \mbox{@{text"?P(Suc n)"}} in the above proof,
 without requiring the user to define a @{text "?P"}. The general
 pattern for induction over @{typ nat} is shown on the left-hand side:
-*}text_raw{*
+\<close>text_raw\<open>
 \begin{tabular}{@ {}ll@ {}}
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "P(n::nat)" proof -(*>*)
 show "P(n)"
 proof (induction n)
   case 0
-  text_raw{*\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}*}
-  show ?case (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}\<close>
+  show ?case (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 next
   case (Suc n)
-  text_raw{*\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}*}
-  show ?case (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}\<close>
+  show ?case (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)qed(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 &
 \begin{minipage}[t]{.4\textwidth}
@@ -824,8 +824,8 @@
 \end{minipage}
 \end{tabular}
 \medskip
-*}
-text{*
+\<close>
+text\<open>
 On the right side you can see what the \isacom{case} command
 on the left stands for.
 
@@ -910,7 +910,7 @@
 
 Recall the inductive and recursive definitions of even numbers in
 \autoref{sec:inductive-defs}:
-*}
+\<close>
 
 inductive ev :: "nat \<Rightarrow> bool" where
 ev0: "ev 0" |
@@ -921,13 +921,13 @@
 "evn (Suc 0) = False" |
 "evn (Suc(Suc n)) = evn n"
 
-text{* We recast the proof of @{prop"ev n \<Longrightarrow> evn n"} in Isar. The
+text\<open>We recast the proof of @{prop"ev n \<Longrightarrow> evn n"} in Isar. The
 left column shows the actual proof text, the right column shows
-the implicit effect of the two \isacom{case} commands:*}text_raw{*
+the implicit effect of the two \isacom{case} commands:\<close>text_raw\<open>
 \begin{tabular}{@ {}l@ {\qquad}l@ {}}
 \begin{minipage}[t]{.5\textwidth}
 \isa{%
-*}
+\<close>
 
 lemma "ev n \<Longrightarrow> evn n"
 proof(induction rule: ev.induct)
@@ -941,7 +941,7 @@
   thus ?case by simp
 qed
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 &
 \begin{minipage}[t]{.5\textwidth}
@@ -957,8 +957,8 @@
 \end{minipage}
 \end{tabular}
 \medskip
-*}
-text{*
+\<close>
+text\<open>
 The proof resembles structural induction, but the induction rule is given
 explicitly and the names of the cases are the names of the rules in the
 inductive definition.
@@ -986,7 +986,7 @@
 case @{thm[source] evSS} is derived from a renamed version of
 rule @{thm[source] evSS}: @{text"ev m \<Longrightarrow> ev(Suc(Suc m))"}.
 Here is an example with a (contrived) intermediate step that refers to @{text m}:
-*}
+\<close>
 
 lemma "ev n \<Longrightarrow> evn n"
 proof(induction rule: ev.induct)
@@ -994,16 +994,16 @@
 next
   case (evSS m)
   have "evn(Suc(Suc m)) = evn m" by simp
-  thus ?case using `evn m` by blast
+  thus ?case using \<open>evn m\<close> by blast
 qed
 
-text{*
+text\<open>
 \indent
 In general, let @{text I} be a (for simplicity unary) inductively defined
 predicate and let the rules in the definition of @{text I}
 be called @{text "rule\<^sub>1"}, \dots, @{text "rule\<^sub>n"}. A proof by rule
 induction follows this pattern:\index{inductionrule@@{text"induction ... rule:"}}
-*}
+\<close>
 
 (*<*)
 inductive I where rule\<^sub>1: "I()" |  rule\<^sub>2: "I()" |  rule\<^sub>n: "I()"
@@ -1011,21 +1011,21 @@
 show "I x \<Longrightarrow> P x"
 proof(induction rule: I.induct)
   case rule\<^sub>1
-  text_raw{*\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
-  show ?case (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}\<close>
+  show ?case (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 next
-  text_raw{*\\[-.4ex]$\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
+  text_raw\<open>\\[-.4ex]$\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}\<close>
 (*<*)
   case rule\<^sub>2
   show ?case sorry
 (*>*)
 next
   case rule\<^sub>n
-  text_raw{*\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
-  show ?case (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}\<close>
+  show ?case (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)qed(*>*)
 
-text{*
+text\<open>
 One can provide explicit variable names by writing
 \isacom{case}~@{text"(rule\<^sub>i x\<^sub>1 \<dots> x\<^sub>k)"}, thus renaming the first @{text k}
 free variables in rule @{text i} to @{text"x\<^sub>1 \<dots> x\<^sub>k"},
@@ -1071,7 +1071,7 @@
 A simple example is the proof that @{prop"ev n \<Longrightarrow> ev (n - 2)"}. We
 already went through the details informally in \autoref{sec:Logic:even}. This
 is the Isar proof:
-*}
+\<close>
 (*<*)
 notepad
 begin fix n
@@ -1087,7 +1087,7 @@
 end
 (*>*)
 
-text{* The key point here is that a case analysis over some inductively
+text\<open>The key point here is that a case analysis over some inductively
 defined predicate is triggered by piping the given fact
 (here: \isacom{from}~@{text this}) into a proof by @{text cases}.
 Let us examine the assumptions available in each case. In case @{text ev0}
@@ -1101,7 +1101,7 @@
 rule @{text evSS} can yield @{prop"ev(Suc 0)"} because @{text"Suc 0"} unifies
 neither with @{text 0} nor with @{term"Suc(Suc n)"}. Impossible cases do not
 have to be proved. Hence we can prove anything from @{prop"ev(Suc 0)"}:
-*}
+\<close>
 (*<*)
 notepad begin fix P
 (*>*)
@@ -1110,14 +1110,14 @@
 end
 (*>*)
 
-text{* That is, @{prop"ev(Suc 0)"} is simply not provable: *}
+text\<open>That is, @{prop"ev(Suc 0)"} is simply not provable:\<close>
 
 lemma "\<not> ev(Suc 0)"
 proof
   assume "ev(Suc 0)" then show False by cases
 qed
 
-text{* Normally not all cases will be impossible. As a simple exercise,
+text\<open>Normally not all cases will be impossible. As a simple exercise,
 prove that \mbox{@{prop"\<not> ev(Suc(Suc(Suc 0)))"}.}
 
 \subsection{Advanced Rule Induction}
@@ -1147,23 +1147,23 @@
 \isacom{proof}@{text"(induction \"r\" \"s\" \"t\" arbitrary: \<dots> rule: I.induct)"}\index{inductionrule@@{text"induction ... rule:"}}\index{arbitrary@@{text"arbitrary:"}}
 \end{isabelle}
 Like for rule inversion, cases that are impossible because of constructor clashes
-will not show up at all. Here is a concrete example: *}
+will not show up at all. Here is a concrete example:\<close>
 
 lemma "ev (Suc m) \<Longrightarrow> \<not> ev m"
 proof(induction "Suc m" arbitrary: m rule: ev.induct)
   fix n assume IH: "\<And>m. n = Suc m \<Longrightarrow> \<not> ev m"
   show "\<not> ev (Suc n)"
-  proof --"contradiction"
+  proof \<comment>"contradiction"
     assume "ev(Suc n)"
     thus False
-    proof cases --"rule inversion"
+    proof cases \<comment>"rule inversion"
       fix k assume "n = Suc k" "ev k"
       thus False using IH by auto
     qed
   qed
 qed
 
-text{*
+text\<open>
 Remarks:
 \begin{itemize}
 \item 
@@ -1200,12 +1200,12 @@
 
 \exercise
 Give a structured proof by rule inversion:
-*}
+\<close>
 
 lemma assumes a: "ev(Suc(Suc n))" shows "ev n"
 (*<*)oops(*>*)
 
-text{*
+text\<open>
 \endexercise
 
 \begin{exercise}
@@ -1236,7 +1236,7 @@
 @{const replicate} @{text"::"} @{typ"nat \<Rightarrow> 'a \<Rightarrow> 'a list"} is predefined
 and @{term"replicate n x"} yields the list @{text"[x, \<dots>, x]"} of length @{text n}.
 \end{exercise}
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Prog_Prove/LaTeXsugar.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Prog_Prove/LaTeXsugar.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -43,7 +43,7 @@
   "_asms" :: "prop \<Rightarrow> asms \<Rightarrow> asms" ("_ /\<^latex>\<open>{\\normalsize \\,\<close>and\<^latex>\<open>\\,}\<close>/ _")
   "_asm" :: "prop \<Rightarrow> asms" ("_")
 
-setup{*
+setup\<open>
   let
     fun pretty ctxt c =
       let val tc = Proof_Context.read_const {proper = true, strict = false} ctxt c
@@ -57,7 +57,7 @@
           Thy_Output.output ctxt
             (Thy_Output.maybe_pretty_source pretty ctxt src [arg]))
   end;
-*}
+\<close>
 
 end
 (*>*)
--- a/src/Doc/Prog_Prove/Logic.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Prog_Prove/Logic.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,7 +3,7 @@
 imports LaTeXsugar
 begin
 (*>*)
-text{*
+text\<open>
 \vspace{-5ex}
 \section{Formulas}
 
@@ -147,11 +147,11 @@
 
 \exercise
 Start from the data type of binary trees defined earlier:
-*}
+\<close>
 
 datatype 'a tree = Tip | Node "'a tree" 'a "'a tree"
 
-text{*
+text\<open>
 Define a function @{text "set ::"} @{typ "'a tree \<Rightarrow> 'a set"}
 that returns the elements in a tree and a function
 @{text "ord ::"} @{typ "int tree \<Rightarrow> bool"}
@@ -169,7 +169,7 @@
 So far we have only seen @{text simp} and \indexed{@{text auto}}{auto}: Both perform
 rewriting, both can also prove linear arithmetic facts (no multiplication),
 and @{text auto} is also able to prove simple logical or set-theoretic goals:
-*}
+\<close>
 
 lemma "\<forall>x. \<exists>y. x = y"
 by auto
@@ -177,7 +177,7 @@
 lemma "A \<subseteq> B \<inter> C \<Longrightarrow> A \<subseteq> B \<union> C"
 by auto
 
-text{* where
+text\<open>where
 \begin{quote}
 \isacom{by} \textit{proof-method}
 \end{quote}
@@ -200,13 +200,13 @@
 subgoal only, and it can be modified like @{text auto}, e.g.,
 with @{text "simp add"}. Here is a typical example of what @{text fastforce}
 can do:
-*}
+\<close>
 
 lemma "\<lbrakk> \<forall>xs \<in> A. \<exists>ys. xs = ys @ ys;  us \<in> A \<rbrakk>
    \<Longrightarrow> \<exists>n. length us = n+n"
 by fastforce
 
-text{* This lemma is out of reach for @{text auto} because of the
+text\<open>This lemma is out of reach for @{text auto} because of the
 quantifiers.  Even @{text fastforce} fails when the quantifier structure
 becomes more complicated. In a few cases, its slow version @{text force}
 succeeds where @{text fastforce} fails.
@@ -215,7 +215,7 @@
 following example, @{text T} and @{text A} are two binary predicates. It
 is shown that if @{text T} is total, @{text A} is antisymmetric and @{text T} is
 a subset of @{text A}, then @{text A} is a subset of @{text T}:
-*}
+\<close>
 
 lemma
   "\<lbrakk> \<forall>x y. T x y \<or> T y x;
@@ -224,7 +224,7 @@
    \<Longrightarrow> \<forall>x y. A x y \<longrightarrow> T x y"
 by blast
 
-text{*
+text\<open>
 We leave it to the reader to figure out why this lemma is true.
 Method @{text blast}
 \begin{itemize}
@@ -245,16 +245,16 @@
 queried over the internet. If successful, a proof command is generated and can
 be inserted into your proof.  The biggest win of \isacom{sledgehammer} is
 that it will take into account the whole lemma library and you do not need to
-feed in any lemma explicitly. For example,*}
+feed in any lemma explicitly. For example,\<close>
 
 lemma "\<lbrakk> xs @ ys = ys @ xs;  length xs = length ys \<rbrakk> \<Longrightarrow> xs = ys"
 
-txt{* cannot be solved by any of the standard proof methods, but
-\isacom{sledgehammer} finds the following proof: *}
+txt\<open>cannot be solved by any of the standard proof methods, but
+\isacom{sledgehammer} finds the following proof:\<close>
 
 by (metis append_eq_conv_conj)
 
-text{* We do not explain how the proof was found but what this command
+text\<open>We do not explain how the proof was found but what this command
 means. For a start, Isabelle does not trust external tools (and in particular
 not the translations from Isabelle's logic to those tools!)
 and insists on a proof that it can check. This is what \indexed{@{text metis}}{metis} does.
@@ -286,12 +286,12 @@
 because it does not involve multiplication, although multiplication with
 numbers, e.g., @{text"2*n"}, is allowed. Such formulas can be proved by
 \indexed{@{text arith}}{arith}:
-*}
+\<close>
 
 lemma "\<lbrakk> (a::nat) \<le> x + b; 2*x < c \<rbrakk> \<Longrightarrow> 2*a + 1 \<le> 2*b + c"
 by arith
 
-text{* In fact, @{text auto} and @{text simp} can prove many linear
+text\<open>In fact, @{text auto} and @{text simp} can prove many linear
 arithmetic formulas already, like the one above, by calling a weak but fast
 version of @{text arith}. Hence it is usually not necessary to invoke
 @{text arith} explicitly.
@@ -425,12 +425,12 @@
 @{thm[source] le_trans}, transitivity of @{text"\<le>"} on type @{typ nat},
 is not an introduction rule by default because of the disastrous effect
 on the search space, but can be useful in specific situations:
-*}
+\<close>
 
 lemma "\<lbrakk> (a::nat) \<le> b; b \<le> c; c \<le> d; d \<le> e \<rbrakk> \<Longrightarrow> a \<le> e"
 by(blast intro: le_trans)
 
-text{*
+text\<open>
 Of course this is just an example and could be proved by @{text arith}, too.
 
 \subsection{Forward Proof}
@@ -459,11 +459,11 @@
 by unifying and thus proving @{text "A\<^sub>i"} with @{text "r\<^sub>i"}, @{text"i = 1\<dots>m"}.
 Here is an example, where @{thm[source]refl} is the theorem
 @{thm[show_question_marks] refl}:
-*}
+\<close>
 
 thm conjI[OF refl[of "a"] refl[of "b"]]
 
-text{* yields the theorem @{thm conjI[OF refl[of "a"] refl[of "b"]]}.
+text\<open>yields the theorem @{thm conjI[OF refl[of "a"] refl[of "b"]]}.
 The command \isacom{thm} merely displays the result.
 
 Forward reasoning also makes sense in connection with proof states.
@@ -474,12 +474,12 @@
 allows proof search to reason forward with @{text r}, i.e.,
 to replace an assumption @{text A'}, where @{text A'} unifies with @{text A},
 with the correspondingly instantiated @{text B}. For example, @{thm[source,show_question_marks] Suc_leD} is the theorem \mbox{@{thm Suc_leD}}, which works well for forward reasoning:
-*}
+\<close>
 
 lemma "Suc(Suc(Suc a)) \<le> b \<Longrightarrow> a \<le> b"
 by(blast dest: Suc_leD)
 
-text{* In this particular example we could have backchained with
+text\<open>In this particular example we could have backchained with
 @{thm[source] Suc_leD}, too, but because the premise is more complicated than the conclusion this can easily lead to nontermination.
 
 %\subsection{Finding Theorems}
@@ -516,14 +516,14 @@
 The operative word ``inductive'' means that these are the only even numbers.
 In Isabelle we give the two rules the names @{text ev0} and @{text evSS}
 and write
-*}
+\<close>
 
 inductive ev :: "nat \<Rightarrow> bool" where
 ev0:    "ev 0" |
 evSS:  (*<*)"ev n \<Longrightarrow> ev (Suc(Suc n))"(*>*)
-text_raw{* @{prop[source]"ev n \<Longrightarrow> ev (n + 2)"} *}
+text_raw\<open>@{prop[source]"ev n \<Longrightarrow> ev (n + 2)"}\<close>
 
-text{* To get used to inductive definitions, we will first prove a few
+text\<open>To get used to inductive definitions, we will first prove a few
 properties of @{const ev} informally before we descend to the Isabelle level.
 
 How do we prove that some number is even, e.g., @{prop "ev 4"}? Simply by combining the defining rules for @{const ev}:
@@ -535,14 +535,14 @@
 
 Showing that all even numbers have some property is more complicated.  For
 example, let us prove that the inductive definition of even numbers agrees
-with the following recursive one:*}
+with the following recursive one:\<close>
 
 fun evn :: "nat \<Rightarrow> bool" where
 "evn 0 = True" |
 "evn (Suc 0) = False" |
 "evn (Suc(Suc n)) = evn n"
 
-text{* We prove @{prop"ev m \<Longrightarrow> evn m"}.  That is, we
+text\<open>We prove @{prop"ev m \<Longrightarrow> evn m"}.  That is, we
 assume @{prop"ev m"} and by induction on the form of its derivation
 prove @{prop"evn m"}. There are two cases corresponding to the two rules
 for @{const ev}:
@@ -606,60 +606,60 @@
 direction: @{text "evSS[OF evSS[OF ev0]]"} yields the theorem @{thm evSS[OF
 evSS[OF ev0]]}. Alternatively, you can also prove it as a lemma in backwards
 fashion. Although this is more verbose, it allows us to demonstrate how each
-rule application changes the proof state: *}
+rule application changes the proof state:\<close>
 
 lemma "ev(Suc(Suc(Suc(Suc 0))))"
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,goals_limit=1]}
-*}
+\<close>
 apply(rule evSS)
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,goals_limit=1]}
-*}
+\<close>
 apply(rule evSS)
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,goals_limit=1]}
-*}
+\<close>
 apply(rule ev0)
 done
 
-text{* \indent
+text\<open>\indent
 Rule induction is applied by giving the induction rule explicitly via the
-@{text"rule:"} modifier:\index{inductionrule@@{text"induction ... rule:"}}*}
+@{text"rule:"} modifier:\index{inductionrule@@{text"induction ... rule:"}}\<close>
 
 lemma "ev m \<Longrightarrow> evn m"
 apply(induction rule: ev.induct)
 by(simp_all)
 
-text{* Both cases are automatic. Note that if there are multiple assumptions
+text\<open>Both cases are automatic. Note that if there are multiple assumptions
 of the form @{prop"ev t"}, method @{text induction} will induct on the leftmost
 one.
 
 As a bonus, we also prove the remaining direction of the equivalence of
 @{const ev} and @{const evn}:
-*}
+\<close>
 
 lemma "evn n \<Longrightarrow> ev n"
 apply(induction n rule: evn.induct)
 
-txt{* This is a proof by computation induction on @{text n} (see
+txt\<open>This is a proof by computation induction on @{text n} (see
 \autoref{sec:recursive-funs}) that sets up three subgoals corresponding to
 the three equations for @{const evn}:
 @{subgoals[display,indent=0]}
 The first and third subgoals follow with @{thm[source]ev0} and @{thm[source]evSS}, and the second subgoal is trivially true because @{prop"evn(Suc 0)"} is @{const False}:
-*}
+\<close>
 
 by (simp_all add: ev0 evSS)
 
-text{* The rules for @{const ev} make perfect simplification and introduction
+text\<open>The rules for @{const ev} make perfect simplification and introduction
 rules because their premises are always smaller than the conclusion. It
 makes sense to turn them into simplification and introduction rules
 permanently, to enhance proof automation. They are named @{thm[source] ev.intros}
-\index{intros@@{text".intros"}} by Isabelle: *}
+\index{intros@@{text".intros"}} by Isabelle:\<close>
 
 declare ev.intros[simp,intro]
 
-text{* The rules of an inductive definition are not simplification rules by
+text\<open>The rules of an inductive definition are not simplification rules by
 default because, in contrast to recursive functions, there is no termination
 requirement for inductive definitions.
 
@@ -707,13 +707,13 @@
 r"}, because @{text"star r"} is meant to be the reflexive transitive closure.
 That is, @{prop"star r x y"} is meant to be true if from @{text x} we can
 reach @{text y} in finitely many @{text r} steps. This concept is naturally
-defined inductively: *}
+defined inductively:\<close>
 
 inductive star :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"  for r where
 refl:  "star r x x" |
 step:  "r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
 
-text{* The base case @{thm[source] refl} is reflexivity: @{term "x=y"}. The
+text\<open>The base case @{thm[source] refl} is reflexivity: @{term "x=y"}. The
 step case @{thm[source]step} combines an @{text r} step (from @{text x} to
 @{text y}) and a @{term"star r"} step (from @{text y} to @{text z}) into a
 @{term"star r"} step (from @{text x} to @{text z}).
@@ -723,7 +723,7 @@
 generates a simpler induction rule.
 
 By definition @{term"star r"} is reflexive. It is also transitive, but we
-need rule induction to prove that: *}
+need rule induction to prove that:\<close>
 
 lemma star_trans: "star r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
 apply(induction rule: star.induct)
@@ -732,15 +732,15 @@
 apply(rename_tac u x y)
 defer
 (*>*)
-txt{* The induction is over @{prop"star r x y"} (the first matching assumption)
+txt\<open>The induction is over @{prop"star r x y"} (the first matching assumption)
 and we try to prove \mbox{@{prop"star r y z \<Longrightarrow> star r x z"}},
 which we abbreviate by @{prop"P x y"}. These are our two subgoals:
 @{subgoals[display,indent=0]}
 The first one is @{prop"P x x"}, the result of case @{thm[source]refl},
 and it is trivial:\index{assumption@@{text assumption}}
-*}
+\<close>
 apply(assumption)
-txt{* Let us examine subgoal @{text 2}, case @{thm[source] step}.
+txt\<open>Let us examine subgoal @{text 2}, case @{thm[source] step}.
 Assumptions @{prop"r u x"} and \mbox{@{prop"star r x y"}}
 are the premises of rule @{thm[source]step}.
 Assumption @{prop"star r y z \<Longrightarrow> star r x z"} is \mbox{@{prop"P x y"}},
@@ -749,11 +749,11 @@
 The proof itself is straightforward: from \mbox{@{prop"star r y z"}} the IH
 leads to @{prop"star r x z"} which, together with @{prop"r u x"},
 leads to \mbox{@{prop"star r u z"}} via rule @{thm[source]step}:
-*}
+\<close>
 apply(metis step)
 done
 
-text{*\index{rule induction|)}
+text\<open>\index{rule induction|)}
 
 \subsection{The General Case}
 
@@ -804,13 +804,13 @@
 
 \exercise
 We could also have defined @{const star} as follows:
-*}
+\<close>
 
 inductive star' :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" for r where
 refl': "star' r x x" |
 step': "star' r x y \<Longrightarrow> r y z \<Longrightarrow> star' r x z"
 
-text{*
+text\<open>
 The single @{text r} step is performed after rather than before the @{text star'}
 steps. Prove @{prop "star' r x y \<Longrightarrow> star r x y"} and
 @{prop "star r x y \<Longrightarrow> star' r x y"}. You may need lemmas.
@@ -877,7 +877,7 @@
 some suitable value of @{text "?"}.
 \end{exercise}
 \fi
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Prog_Prove/Types_and_funs.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Prog_Prove/Types_and_funs.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,16 +3,16 @@
 imports Main
 begin
 (*>*)
-text{*
+text\<open>
 \vspace{-5ex}
 \section{Type and Function Definitions}
 
 Type synonyms are abbreviations for existing types, for example
-\index{string@@{text string}}*}
+\index{string@@{text string}}\<close>
 
 type_synonym string = "char list"
 
-text{*
+text\<open>
 Type synonyms are expanded after parsing and are not present in internal representation and output. They are mere conveniences for the reader.
 
 \subsection{Datatypes}
@@ -54,22 +54,22 @@
 Case expressions must be enclosed in parentheses.
 
 As an example of a datatype beyond @{typ nat} and @{text list}, consider binary trees:
-*}
+\<close>
 
 datatype 'a tree = Tip | Node  "'a tree"  'a  "'a tree"
 
-text{* with a mirror function: *}
+text\<open>with a mirror function:\<close>
 
 fun mirror :: "'a tree \<Rightarrow> 'a tree" where
 "mirror Tip = Tip" |
 "mirror (Node l a r) = Node (mirror r) a (mirror l)"
 
-text{* The following lemma illustrates induction: *}
+text\<open>The following lemma illustrates induction:\<close>
 
 lemma "mirror(mirror t) = t"
 apply(induction t)
 
-txt{* yields
+txt\<open>yields
 @{subgoals[display]}
 The induction step contains two induction hypotheses, one for each subtree.
 An application of @{text auto} finishes the proof.
@@ -81,7 +81,7 @@
 elements of @{typ 'a}, you wrap them up in @{const Some} and call
 the new type @{typ"'a option"}. A typical application is a lookup function
 on a list of key-value pairs, often called an association list:
-*}
+\<close>
 (*<*)
 apply auto
 done
@@ -90,7 +90,7 @@
 "lookup [] x = None" |
 "lookup ((a,b) # ps) x = (if a = x then Some b else lookup ps x)"
 
-text{*
+text\<open>
 Note that @{text"\<tau>\<^sub>1 * \<tau>\<^sub>2"} is the type of pairs, also written @{text"\<tau>\<^sub>1 \<times> \<tau>\<^sub>2"}.
 Pairs can be taken apart either by pattern matching (as above) or with the
 projection functions @{const fst} and @{const snd}: @{thm fst_conv[of x y]} and @{thm snd_conv[of x y]}.
@@ -101,23 +101,23 @@
 \subsection{Definitions}
 
 Non-recursive functions can be defined as in the following example:
-\index{definition@\isacom{definition}}*}
+\index{definition@\isacom{definition}}\<close>
 
 definition sq :: "nat \<Rightarrow> nat" where
 "sq n = n * n"
 
-text{* Such definitions do not allow pattern matching but only
+text\<open>Such definitions do not allow pattern matching but only
 @{text"f x\<^sub>1 \<dots> x\<^sub>n = t"}, where @{text f} does not occur in @{text t}.
 
 \subsection{Abbreviations}
 
 Abbreviations are similar to definitions:
-\index{abbreviation@\isacom{abbreviation}}*}
+\index{abbreviation@\isacom{abbreviation}}\<close>
 
 abbreviation sq' :: "nat \<Rightarrow> nat" where
 "sq' n \<equiv> n * n"
 
-text{* The key difference is that @{const sq'} is only syntactic sugar:
+text\<open>The key difference is that @{const sq'} is only syntactic sugar:
 after parsing, @{term"sq' t"} is replaced by \mbox{@{term"t*t"}};
 before printing, every occurrence of @{term"u*u"} is replaced by
 \mbox{@{term"sq' u"}}.  Internally, @{const sq'} does not exist.
@@ -153,14 +153,14 @@
 Functions defined with \isacom{fun} come with their own induction schema
 that mirrors the recursion schema and is derived from the termination
 order. For example,
-*}
+\<close>
 
 fun div2 :: "nat \<Rightarrow> nat" where
 "div2 0 = 0" |
 "div2 (Suc 0) = 0" |
 "div2 (Suc(Suc n)) = Suc(div2 n)"
 
-text{* does not just define @{const div2} but also proves a
+text\<open>does not just define @{const div2} but also proves a
 customized induction rule:
 \[
 \inferrule{
@@ -170,12 +170,12 @@
 {\mbox{@{thm (concl) div2.induct[of _ "m"]}}}
 \]
 This customized induction rule can simplify inductive proofs. For example,
-*}
+\<close>
 
 lemma "div2(n) = n div 2"
 apply(induction n rule: div2.induct)
 
-txt{* (where the infix @{text div} is the predefined division operation)
+txt\<open>(where the infix @{text div} is the predefined division operation)
 yields the subgoals
 @{subgoals[display,margin=65]}
 An application of @{text auto} finishes the proof.
@@ -260,7 +260,7 @@
 append is linear in its first argument.  A linear time version of
 @{const rev} requires an extra argument where the result is accumulated
 gradually, using only~@{text"#"}:
-*}
+\<close>
 (*<*)
 apply auto
 done
@@ -269,7 +269,7 @@
 "itrev []        ys = ys" |
 "itrev (x#xs) ys = itrev xs (x#ys)"
 
-text{* The behaviour of @{const itrev} is simple: it reverses
+text\<open>The behaviour of @{const itrev} is simple: it reverses
 its first argument by stacking its elements onto the second argument,
 and it returns that second argument when the first one becomes
 empty. Note that @{const itrev} is tail-recursive: it can be
@@ -277,17 +277,17 @@
 
 Naturally, we would like to show that @{const itrev} does indeed reverse
 its first argument provided the second one is empty:
-*}
+\<close>
 
 lemma "itrev xs [] = rev xs"
 
-txt{* There is no choice as to the induction variable:
-*}
+txt\<open>There is no choice as to the induction variable:
+\<close>
 
 apply(induction xs)
 apply(auto)
 
-txt{*
+txt\<open>
 Unfortunately, this attempt does not prove
 the induction step:
 @{subgoals[display,margin=70]}
@@ -299,11 +299,11 @@
 \end{quote}
 Of course one cannot do this naively: @{prop"itrev xs ys = rev xs"} is
 just not true.  The correct generalization is
-*}
+\<close>
 (*<*)oops(*>*)
 lemma "itrev xs ys = rev xs @ ys"
 (*<*)apply(induction xs, auto)(*>*)
-txt{*
+txt\<open>
 If @{text ys} is replaced by @{term"[]"}, the right-hand side simplifies to
 @{term"rev xs"}, as required.
 In this instance it was easy to guess the right generalization.
@@ -320,21 +320,21 @@
 @{term"a # ys"} instead of @{text ys}. Hence we prove the theorem
 for all @{text ys} instead of a fixed one. We can instruct induction
 to perform this generalization for us by adding @{text "arbitrary: ys"}\index{arbitrary@@{text"arbitrary:"}}.
-*}
+\<close>
 (*<*)oops
 lemma "itrev xs ys = rev xs @ ys"
 (*>*)
 apply(induction xs arbitrary: ys)
 
-txt{* The induction hypothesis in the induction step is now universally quantified over @{text ys}:
+txt\<open>The induction hypothesis in the induction step is now universally quantified over @{text ys}:
 @{subgoals[display,margin=65]}
 Thus the proof succeeds:
-*}
+\<close>
 
 apply auto
 done
 
-text{*
+text\<open>
 This leads to another heuristic for generalization:
 \begin{quote}
 \emph{Generalize induction by generalizing all free
@@ -547,7 +547,7 @@
 Define a function @{text "nodes :: tree0 \<Rightarrow> nat"} that counts the number of
 all nodes (inner nodes and leaves) in such a tree.
 Consider the following recursive function:
-*}
+\<close>
 (*<*)
 datatype tree0 = Tip | Node tree0 tree0
 (*>*)
@@ -555,7 +555,7 @@
 "explode 0 t = t" |
 "explode (Suc n) t = explode n (Node t t)"
 
-text {*
+text \<open>
 Find an equation expressing the size of a tree after exploding it
 (\noquotes{@{term [source] "nodes (explode n t)"}}) as a function
 of @{term "nodes t"} and @{text n}. Prove your equation.
@@ -569,11 +569,11 @@
 \exercise
 Define arithmetic expressions in one variable over integers (type @{typ int})
 as a data type:
-*}
+\<close>
 
 datatype exp = Var | Const int | Add exp exp | Mult exp exp
 
-text{*
+text\<open>
 Define a function \noquotes{@{term [source]"eval :: exp \<Rightarrow> int \<Rightarrow> int"}}
 such that @{term"eval e x"} evaluates @{text e} at the value
 @{text x}.
@@ -589,7 +589,7 @@
 \mbox{@{prop"evalp (coeffs e) x = eval e x"}.}
 Hint: consider the hint in Exercise~\ref{exe:tree0}.
 \endexercise
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Sugar/Sugar.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Sugar/Sugar.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -7,7 +7,7 @@
 no_translations
   ("prop") "P \<and> Q \<Longrightarrow> R" <= ("prop") "P \<Longrightarrow> Q \<Longrightarrow> R"
 (*>*)
-text{*
+text\<open>
 \section{Introduction}
 
 This document is for those Isabelle users who have mastered
@@ -142,13 +142,13 @@
 \end{quote}
 into the relevant \texttt{ROOT} file, just before the \texttt{theories} for that session.
 The rest of this document is produced with this flag set to \texttt{false}.
-*}
+\<close>
 
 (*<*)declare [[show_question_marks = false]](*>*)
 
-subsection {*Qualified names*}
+subsection \<open>Qualified names\<close>
 
-text{* If there are multiple declarations of the same name, Isabelle prints
+text\<open>If there are multiple declarations of the same name, Isabelle prints
 the qualified name, for example @{text "T.length"}, where @{text T} is the
 theory it is defined in, to distinguish it from the predefined @{const[source]
 "List.length"}. In case there is no danger of confusion, you can insist on
@@ -201,10 +201,10 @@
 \end{quote}
 
 Sometimes Isabelle $\eta$-contracts terms, for example in the following definition:
-*}
+\<close>
 fun eta where
 "eta (x \<cdot> xs) = (\<forall>y \<in> set xs. x < y)"
-text{*
+text\<open>
 \noindent
 If you now print the defining equation, the result is not what you hoped for:
 \begin{quote}
@@ -287,12 +287,12 @@
 When displaying theorems with the \texttt{display} option, for example as in
 \verb!@!\verb!{thm[display] refl}! @{thm[display] refl} the theorem is
 set in small font. It uses the \LaTeX-macro \verb!\isastyle!,
-which is also the style that regular theory text is set in, e.g. *}
+which is also the style that regular theory text is set in, e.g.\<close>
 
 lemma "t = t"
 (*<*)oops(*>*)
 
-text{* \noindent Otherwise \verb!\isastyleminor! is used,
+text\<open>\noindent Otherwise \verb!\isastyleminor! is used,
 which does not modify the font size (assuming you stick to the default
 \verb!\isabellestyle{it}! in \texttt{root.tex}). If you prefer
 normal font size throughout your text, include
@@ -447,23 +447,23 @@
 papers, but some key lemmas might be of interest.
 It is usually easiest to put them in figures like the one in Fig.\
 \ref{fig:proof}. This was achieved with the \isakeyword{text\_raw} command:
-*}
-text_raw {*
+\<close>
+text_raw \<open>
   \begin{figure}
   \begin{center}\begin{minipage}{0.6\textwidth}  
   \isastyleminor\isamarkuptrue
-*}
+\<close>
 lemma True
 proof -
-  -- "pretty trivial"
+  \<comment> "pretty trivial"
   show True by force
 qed
-text_raw {*    
+text_raw \<open>
   \end{minipage}\end{center}
   \caption{Example proof in a figure.}\label{fig:proof}
   \end{figure}
-*}
-text {*
+\<close>
+text \<open>
 
 \begin{quote}
 \small
@@ -574,7 +574,7 @@
 \texttt{const\_typ} defined in \texttt{LaTeXsugar}. For example,
 \verb!@!\verb!{const_typ length}! produces @{const_typ length}.
 
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Advanced/Partial.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Advanced/Partial.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,6 +1,6 @@
 (*<*)theory Partial imports While_Combinator begin(*>*)
 
-text{*\noindent Throughout this tutorial, we have emphasized
+text\<open>\noindent Throughout this tutorial, we have emphasized
 that all functions in HOL are total.  We cannot hope to define
 truly partial functions, but must make them total.  A straightforward
 method is to lift the result type of the function from $\tau$ to
@@ -23,29 +23,29 @@
 We have already seen an instance of underdefinedness by means of
 non-exhaustive pattern matching: the definition of @{term last} in
 \S\ref{sec:fun}. The same is allowed for \isacommand{primrec}
-*}
+\<close>
 
 consts hd :: "'a list \<Rightarrow> 'a"
 primrec "hd (x#xs) = x"
 
-text{*\noindent
+text\<open>\noindent
 although it generates a warning.
 Even ordinary definitions allow underdefinedness, this time by means of
 preconditions:
-*}
+\<close>
 
 definition subtract :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "n \<le> m \<Longrightarrow> subtract m n \<equiv> m - n"
 
-text{*
+text\<open>
 The rest of this section is devoted to the question of how to define
 partial recursive functions by other means than non-exhaustive pattern
 matching.
-*}
+\<close>
 
-subsubsection{*Guarded Recursion*}
+subsubsection\<open>Guarded Recursion\<close>
 
-text{* 
+text\<open>
 \index{recursion!guarded}%
 Neither \isacommand{primrec} nor \isacommand{recdef} allow to
 prefix an equation with a condition in the way ordinary definitions do
@@ -59,14 +59,14 @@
 which is ideal for specifying underdefined functions on top of it.
 
 As a simple example we define division on @{typ nat}:
-*}
+\<close>
 
 consts divi :: "nat \<times> nat \<Rightarrow> nat"
 recdef divi "measure(\<lambda>(m,n). m)"
   "divi(m,0) = arbitrary"
   "divi(m,n) = (if m < n then 0 else divi(m-n,n)+1)"
 
-text{*\noindent Of course we could also have defined
+text\<open>\noindent Of course we could also have defined
 @{term"divi(m,0)"} to be some specific number, for example 0. The
 latter option is chosen for the predefined @{text div} function, which
 simplifies proofs at the expense of deviating from the
@@ -83,14 +83,14 @@
 known \emph{Union-Find} algorithm.
 The snag is that it may not terminate if @{term f} has non-trivial cycles.
 Phrased differently, the relation
-*}
+\<close>
 
 definition step1 :: "('a \<Rightarrow> 'a) \<Rightarrow> ('a \<times> 'a)set" where
   "step1 f \<equiv> {(y,x). y = f x \<and> y \<noteq> x}"
 
-text{*\noindent
+text\<open>\noindent
 must be well-founded. Thus we make the following definition:
-*}
+\<close>
 
 consts find :: "('a \<Rightarrow> 'a) \<times> 'a \<Rightarrow> 'a"
 recdef find "same_fst (\<lambda>f. wf(step1 f)) step1"
@@ -99,7 +99,7 @@
                 else arbitrary)"
 (hints recdef_simp: step1_def)
 
-text{*\noindent
+text\<open>\noindent
 The recursion equation itself should be clear enough: it is our aborted
 first attempt augmented with a check that there are no non-trivial loops.
 To express the required well-founded relation we employ the
@@ -122,29 +122,29 @@
 
 Normally you will then derive the following conditional variant from
 the recursion equation:
-*}
+\<close>
 
 lemma [simp]:
   "wf(step1 f) \<Longrightarrow> find(f,x) = (if f x = x then x else find(f, f x))"
 by simp
 
-text{*\noindent Then you should disable the original recursion equation:*}
+text\<open>\noindent Then you should disable the original recursion equation:\<close>
 
 declare find.simps[simp del]
 
-text{*
+text\<open>
 Reasoning about such underdefined functions is like that for other
 recursive functions.  Here is a simple example of recursion induction:
-*}
+\<close>
 
 lemma "wf(step1 f) \<longrightarrow> f(find(f,x)) = find(f,x)"
 apply(induct_tac f x rule: find.induct)
 apply simp
 done
 
-subsubsection{*The {\tt\slshape while} Combinator*}
+subsubsection\<open>The {\tt\slshape while} Combinator\<close>
 
-text{*If the recursive function happens to be tail recursive, its
+text\<open>If the recursive function happens to be tail recursive, its
 definition becomes a triviality if based on the predefined \cdx{while}
 combinator.  The latter lives in the Library theory \thydx{While_Combinator}.
 % which is not part of {text Main} but needs to
@@ -158,13 +158,13 @@
 \end{verbatim}
 In general, @{term s} will be a tuple or record.  As an example
 consider the following definition of function @{const find}:
-*}
+\<close>
 
 definition find2 :: "('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a" where
   "find2 f x \<equiv>
    fst(while (\<lambda>(x,x'). x' \<noteq> x) (\<lambda>(x,x'). (x',f x')) (x,f x))"
 
-text{*\noindent
+text\<open>\noindent
 The loop operates on two ``local variables'' @{term x} and @{term x'}
 containing the ``current'' and the ``next'' value of function @{term f}.
 They are initialized with the global @{term x} and @{term"f x"}. At the
@@ -185,7 +185,7 @@
 of induction we apply the above while rule, suitably instantiated.
 Only the final premise of @{thm[source]while_rule} is left unproved
 by @{text auto} but falls to @{text simp}:
-*}
+\<close>
 
 lemma lem: "wf(step1 f) \<Longrightarrow>
   \<exists>y. while (\<lambda>(x,x'). x' \<noteq> x) (\<lambda>(x,x'). (x',f x')) (x,f x) = (y,y) \<and>
@@ -196,16 +196,16 @@
 apply(simp add: inv_image_def step1_def)
 done
 
-text{*
+text\<open>
 The theorem itself is a simple consequence of this lemma:
-*}
+\<close>
 
 theorem "wf(step1 f) \<Longrightarrow> f(find2 f x) = find2 f x"
 apply(drule_tac x = x in lem)
 apply(auto simp add: find2_def)
 done
 
-text{* Let us conclude this section on partial functions by a
+text\<open>Let us conclude this section on partial functions by a
 discussion of the merits of the @{term while} combinator. We have
 already seen that the advantage of not having to
 provide a termination argument when defining a function via @{term
@@ -219,6 +219,6 @@
 definition that is impossible to execute or prohibitively slow.
 Thus, if you are aiming for an efficiently executable definition
 of a partial function, you are likely to need @{term while}.
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Advanced/WFrec.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Advanced/WFrec.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,12 +1,12 @@
 (*<*)theory WFrec imports Main begin(*>*)
 
-text{*\noindent
+text\<open>\noindent
 So far, all recursive definitions were shown to terminate via measure
 functions. Sometimes this can be inconvenient or
 impossible. Fortunately, \isacommand{recdef} supports much more
 general definitions. For example, termination of Ackermann's function
 can be shown by means of the \rmindex{lexicographic product} @{text"<*lex*>"}:
-*}
+\<close>
 
 consts ack :: "nat\<times>nat \<Rightarrow> nat"
 recdef ack "measure(\<lambda>m. m) <*lex*> measure(\<lambda>n. n)"
@@ -14,7 +14,7 @@
   "ack(Suc m,0)     = ack(m, 1)"
   "ack(Suc m,Suc n) = ack(m,ack(Suc m,n))"
 
-text{*\noindent
+text\<open>\noindent
 The lexicographic product decreases if either its first component
 decreases (as in the second equation and in the outer call in the
 third equation) or its first component stays the same and the second
@@ -39,7 +39,7 @@
 product of two well-founded relations is again well-founded, which we relied
 on when defining Ackermann's function above.
 Of course the lexicographic product can also be iterated:
-*}
+\<close>
 
 consts contrived :: "nat \<times> nat \<times> nat \<Rightarrow> nat"
 recdef contrived
@@ -49,7 +49,7 @@
 "contrived(Suc i,0,0) = contrived(i,i,i)"
 "contrived(0,0,0)     = 0"
 
-text{*
+text\<open>
 Lexicographic products of measure functions already go a long
 way. Furthermore, you may embed a type in an
 existing well-founded relation via the inverse image construction @{term
@@ -64,42 +64,42 @@
 \isacommand{recdef}.  For example, the greater-than relation can be made
 well-founded by cutting it off at a certain point.  Here is an example
 of a recursive function that calls itself with increasing values up to ten:
-*}
+\<close>
 
 consts f :: "nat \<Rightarrow> nat"
 recdef (*<*)(permissive)(*>*)f "{(i,j). j<i \<and> i \<le> (10::nat)}"
 "f i = (if 10 \<le> i then 0 else i * f(Suc i))"
 
-text{*\noindent
+text\<open>\noindent
 Since \isacommand{recdef} is not prepared for the relation supplied above,
 Isabelle rejects the definition.  We should first have proved that
 our relation was well-founded:
-*}
+\<close>
 
 lemma wf_greater: "wf {(i,j). j<i \<and> i \<le> (N::nat)}"
 
-txt{*\noindent
+txt\<open>\noindent
 The proof is by showing that our relation is a subset of another well-founded
 relation: one given by a measure function.\index{*wf_subset (theorem)}
-*}
+\<close>
 
 apply (rule wf_subset [of "measure (\<lambda>k::nat. N-k)"], blast)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 
 \noindent
 The inclusion remains to be proved. After unfolding some definitions, 
 we are left with simple arithmetic that is dispatched automatically.
-*}
+\<close>
 
 by (clarify, simp add: measure_def inv_image_def)
 
-text{*\noindent
+text\<open>\noindent
 
 Armed with this lemma, we use the \attrdx{recdef_wf} attribute to attach a
 crucial hint\cmmdx{hints} to our definition:
-*}
+\<close>
 (*<*)
 consts g :: "nat \<Rightarrow> nat"
 recdef g "{(i,j). j<i \<and> i \<le> (10::nat)}"
@@ -107,13 +107,13 @@
 (*>*)
 (hints recdef_wf: wf_greater)
 
-text{*\noindent
+text\<open>\noindent
 Alternatively, we could have given @{text "measure (\<lambda>k::nat. 10-k)"} for the
 well-founded relation in our \isacommand{recdef}.  However, the arithmetic
 goal in the lemma above would have arisen instead in the \isacommand{recdef}
 termination proof, where we have less control.  A tailor-made termination
 relation makes even more sense when it can be used in several function
 declarations.
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Advanced/simp2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Advanced/simp2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,19 +2,19 @@
 theory simp2 imports Main begin
 (*>*)
 
-section{*Simplification*}
+section\<open>Simplification\<close>
 
-text{*\label{sec:simplification-II}\index{simplification|(}
+text\<open>\label{sec:simplification-II}\index{simplification|(}
 This section describes features not covered until now.  It also
 outlines the simplification process itself, which can be helpful
 when the simplifier does not do what you expect of it.
-*}
+\<close>
 
-subsection{*Advanced Features*}
+subsection\<open>Advanced Features\<close>
 
-subsubsection{*Congruence Rules*}
+subsubsection\<open>Congruence Rules\<close>
 
-text{*\label{sec:simp-cong}
+text\<open>\label{sec:simp-cong}
 While simplifying the conclusion $Q$
 of $P \Imp Q$, it is legal to use the assumption $P$.
 For $\Imp$ this policy is hardwired, but 
@@ -62,11 +62,11 @@
 \par\noindent
 is occasionally useful but is not a default rule; you have to declare it explicitly.
 \end{warn}
-*}
+\<close>
 
-subsubsection{*Permutative Rewrite Rules*}
+subsubsection\<open>Permutative Rewrite Rules\<close>
 
-text{*
+text\<open>
 \index{rewrite rules!permutative|bold}%
 An equation is a \textbf{permutative rewrite rule} if the left-hand
 side and right-hand side are the same up to renaming of variables.  The most
@@ -105,20 +105,20 @@
 Note that ordered rewriting for @{text"+"} and @{text"*"} on numbers is rarely
 necessary because the built-in arithmetic prover often succeeds without
 such tricks.
-*}
+\<close>
 
-subsection{*How the Simplifier Works*}
+subsection\<open>How the Simplifier Works\<close>
 
-text{*\label{sec:SimpHow}
+text\<open>\label{sec:SimpHow}
 Roughly speaking, the simplifier proceeds bottom-up: subterms are simplified
 first.  A conditional equation is only applied if its condition can be
 proved, again by simplification.  Below we explain some special features of
 the rewriting process. 
-*}
+\<close>
 
-subsubsection{*Higher-Order Patterns*}
+subsubsection\<open>Higher-Order Patterns\<close>
 
-text{*\index{simplification rule|(}
+text\<open>\index{simplification rule|(}
 So far we have pretended the simplifier can deal with arbitrary
 rewrite rules. This is not quite true.  For reasons of feasibility,
 the simplifier expects the
@@ -153,11 +153,11 @@
   
 There is no restriction on the form of the right-hand
 sides.  They may not contain extraneous term or type variables, though.
-*}
+\<close>
 
-subsubsection{*The Preprocessor*}
+subsubsection\<open>The Preprocessor\<close>
 
-text{*\label{sec:simp-preprocessor}
+text\<open>\label{sec:simp-preprocessor}
 When a theorem is declared a simplification rule, it need not be a
 conditional equation already.  The simplifier will turn it into a set of
 conditional equations automatically.  For example, @{prop"f x =
@@ -183,7 +183,7 @@
 \end{center}
 \index{simplification rule|)}
 \index{simplification|)}
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Tutorial/CTL/Base.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/CTL/Base.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,8 +1,8 @@
 (*<*)theory Base imports Main begin(*>*)
 
-section{*Case Study: Verified Model Checking*}
+section\<open>Case Study: Verified Model Checking\<close>
 
-text{*\label{sec:VMC}
+text\<open>\label{sec:VMC}
 This chapter ends with a case study concerning model checking for 
 Computation Tree Logic (CTL), a temporal logic.
 Model checking is a popular technique for the verification of finite
@@ -54,11 +54,11 @@
 
 Abstracting from this concrete example, we assume there is a type of
 states:
-*}
+\<close>
 
 typedecl state
 
-text{*\noindent
+text\<open>\noindent
 Command \commdx{typedecl} merely declares a new type but without
 defining it (see \S\ref{sec:typedecl}). Thus we know nothing
 about the type other than its existence. That is exactly what we need
@@ -67,25 +67,25 @@
 parameter of everything but declaring @{typ state} globally as above
 reduces clutter.  Similarly we declare an arbitrary but fixed
 transition system, i.e.\ a relation between states:
-*}
+\<close>
 
 consts M :: "(state \<times> state)set"
 
-text{*\noindent
+text\<open>\noindent
 This is Isabelle's way of declaring a constant without defining it.
 Finally we introduce a type of atomic propositions
-*}
+\<close>
 
 typedecl "atom"
 
-text{*\noindent
+text\<open>\noindent
 and a \emph{labelling function}
-*}
+\<close>
 
 consts L :: "state \<Rightarrow> atom set"
 
-text{*\noindent
+text\<open>\noindent
 telling us which atomic propositions are true in each state.
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/CTL/CTL.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/CTL/CTL.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,14 +1,14 @@
 (*<*)theory CTL imports Base begin(*>*)
 
-subsection{*Computation Tree Logic --- CTL*}
+subsection\<open>Computation Tree Logic --- CTL\<close>
 
-text{*\label{sec:CTL}
+text\<open>\label{sec:CTL}
 \index{CTL|(}%
 The semantics of PDL only needs reflexive transitive closure.
 Let us be adventurous and introduce a more expressive temporal operator.
 We extend the datatype
 @{text formula} by a new constructor
-*}
+\<close>
 (*<*)
 datatype formula = Atom "atom"
                   | Neg formula
@@ -17,23 +17,23 @@
                   | EF formula(*>*)
                   | AF formula
 
-text{*\noindent
+text\<open>\noindent
 which stands for ``\emph{A}lways in the \emph{F}uture'':
 on all infinite paths, at some point the formula holds.
 Formalizing the notion of an infinite path is easy
 in HOL: it is simply a function from @{typ nat} to @{typ state}.
-*}
+\<close>
 
 definition Paths :: "state \<Rightarrow> (nat \<Rightarrow> state)set" where
 "Paths s \<equiv> {p. s = p 0 \<and> (\<forall>i. (p i, p(i+1)) \<in> M)}"
 
-text{*\noindent
+text\<open>\noindent
 This definition allows a succinct statement of the semantics of @{const AF}:
 \footnote{Do not be misled: neither datatypes nor recursive functions can be
 extended by new constructors or equations. This is just a trick of the
 presentation (see \S\ref{sec:doc-prep-suppress}). In reality one has to define
 a new datatype and a new function.}
-*}
+\<close>
 (*<*)
 primrec valid :: "state \<Rightarrow> formula \<Rightarrow> bool" ("(_ \<Turnstile> _)" [80,80] 80) where
 "s \<Turnstile> Atom a  =  (a \<in> L s)" |
@@ -44,18 +44,18 @@
 (*>*)
 "s \<Turnstile> AF f    = (\<forall>p \<in> Paths s. \<exists>i. p i \<Turnstile> f)"
 
-text{*\noindent
+text\<open>\noindent
 Model checking @{const AF} involves a function which
 is just complicated enough to warrant a separate definition:
-*}
+\<close>
 
 definition af :: "state set \<Rightarrow> state set \<Rightarrow> state set" where
 "af A T \<equiv> A \<union> {s. \<forall>t. (s, t) \<in> M \<longrightarrow> t \<in> T}"
 
-text{*\noindent
+text\<open>\noindent
 Now we define @{term "mc(AF f)"} as the least set @{term T} that includes
 @{term"mc f"} and all states all of whose direct successors are in @{term T}:
-*}
+\<close>
 (*<*)
 primrec mc :: "formula \<Rightarrow> state set" where
 "mc(Atom a)  = {s. a \<in> L s}" |
@@ -65,10 +65,10 @@
 "mc(EF f)    = lfp(\<lambda>T. mc f \<union> M\<inverse> `` T)"|(*>*)
 "mc(AF f)    = lfp(af(mc f))"
 
-text{*\noindent
+text\<open>\noindent
 Because @{const af} is monotone in its second argument (and also its first, but
 that is irrelevant), @{term"af A"} has a least fixed point:
-*}
+\<close>
 
 lemma mono_af: "mono(af A)"
 apply(simp add: mono_def af_def)
@@ -96,16 +96,16 @@
 apply(subst lfp_unfold[OF mono_ef])
 by(blast)
 (*>*)
-text{*
+text\<open>
 All we need to prove now is  @{prop"mc(AF f) = {s. s \<Turnstile> AF f}"}, which states
 that @{term mc} and @{text"\<Turnstile>"} agree for @{const AF}\@.
 This time we prove the two inclusions separately, starting
 with the easy one:
-*}
+\<close>
 
 theorem AF_lemma1: "lfp(af A) \<subseteq> {s. \<forall>p \<in> Paths s. \<exists>i. p i \<in> A}"
 
-txt{*\noindent
+txt\<open>\noindent
 In contrast to the analogous proof for @{const EF}, and just
 for a change, we do not use fixed point induction.  Park-induction,
 named after David Park, is weaker but sufficient for this proof:
@@ -114,24 +114,24 @@
 \end{center}
 The instance of the premise @{prop"f S \<subseteq> S"} is proved pointwise,
 a decision that \isa{auto} takes for us:
-*}
+\<close>
 apply(rule lfp_lowerbound)
 apply(auto simp add: af_def Paths_def)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=70,goals_limit=1]}
 In this remaining case, we set @{term t} to @{term"p(1::nat)"}.
 The rest is automatic, which is surprising because it involves
 finding the instantiation @{term"\<lambda>i::nat. p(i+1)"}
 for @{text"\<forall>p"}.
-*}
+\<close>
 
 apply(erule_tac x = "p 1" in allE)
 apply(auto)
 done
 
 
-text{*
+text\<open>
 The opposite inclusion is proved by contradiction: if some state
 @{term s} is not in @{term"lfp(af A)"}, then we can construct an
 infinite @{term A}-avoiding path starting from~@{term s}. The reason is
@@ -143,7 +143,7 @@
 
 The one-step argument in the sketch above
 is proved by a variant of contraposition:
-*}
+\<close>
 
 lemma not_in_lfp_afD:
  "s \<notin> lfp(af A) \<Longrightarrow> s \<notin> A \<and> (\<exists> t. (s,t) \<in> M \<and> t \<notin> lfp(af A))"
@@ -152,20 +152,20 @@
 apply(simp add: af_def)
 done
 
-text{*\noindent
+text\<open>\noindent
 We assume the negation of the conclusion and prove @{term"s : lfp(af A)"}.
 Unfolding @{const lfp} once and
 simplifying with the definition of @{const af} finishes the proof.
 
 Now we iterate this process. The following construction of the desired
 path is parameterized by a predicate @{term Q} that should hold along the path:
-*}
+\<close>
 
 primrec path :: "state \<Rightarrow> (state \<Rightarrow> bool) \<Rightarrow> (nat \<Rightarrow> state)" where
 "path s Q 0 = s" |
 "path s Q (Suc n) = (SOME t. (path s Q n,t) \<in> M \<and> Q t)"
 
-text{*\noindent
+text\<open>\noindent
 Element @{term"n+1::nat"} on this path is some arbitrary successor
 @{term t} of element @{term n} such that @{term"Q t"} holds.  Remember that @{text"SOME t. R t"}
 is some arbitrary but fixed @{term t} such that @{prop"R t"} holds (see \S\ref{sec:SOME}). Of
@@ -175,43 +175,43 @@
 
 Let us show that if each state @{term s} that satisfies @{term Q}
 has a successor that again satisfies @{term Q}, then there exists an infinite @{term Q}-path:
-*}
+\<close>
 
 lemma infinity_lemma:
   "\<lbrakk> Q s; \<forall>s. Q s \<longrightarrow> (\<exists> t. (s,t) \<in> M \<and> Q t) \<rbrakk> \<Longrightarrow>
    \<exists>p\<in>Paths s. \<forall>i. Q(p i)"
 
-txt{*\noindent
+txt\<open>\noindent
 First we rephrase the conclusion slightly because we need to prove simultaneously
 both the path property and the fact that @{term Q} holds:
-*}
+\<close>
 
 apply(subgoal_tac
   "\<exists>p. s = p 0 \<and> (\<forall>i::nat. (p i, p(i+1)) \<in> M \<and> Q(p i))")
 
-txt{*\noindent
+txt\<open>\noindent
 From this proposition the original goal follows easily:
-*}
+\<close>
 
  apply(simp add: Paths_def, blast)
 
-txt{*\noindent
+txt\<open>\noindent
 The new subgoal is proved by providing the witness @{term "path s Q"} for @{term p}:
-*}
+\<close>
 
 apply(rule_tac x = "path s Q" in exI)
 apply(clarsimp)
 
-txt{*\noindent
+txt\<open>\noindent
 After simplification and clarification, the subgoal has the following form:
 @{subgoals[display,indent=0,margin=70,goals_limit=1]}
 It invites a proof by induction on @{term i}:
-*}
+\<close>
 
 apply(induct_tac i)
  apply(simp)
 
-txt{*\noindent
+txt\<open>\noindent
 After simplification, the base case boils down to
 @{subgoals[display,indent=0,margin=70,goals_limit=1]}
 The conclusion looks exceedingly trivial: after all, @{term t} is chosen such that @{prop"(s,t):M"}
@@ -223,11 +223,11 @@
 two subgoals: @{prop"EX a. (s, a) : M & Q a"}, which follows from the assumptions, and
 @{prop"(s, x) : M & Q x ==> (s,x) : M"}, which is trivial. Thus it is not surprising that
 @{text fast} can prove the base case quickly:
-*}
+\<close>
 
  apply(fast intro: someI2_ex)
 
-txt{*\noindent
+txt\<open>\noindent
 What is worth noting here is that we have used \methdx{fast} rather than
 @{text blast}.  The reason is that @{text blast} would fail because it cannot
 cope with @{thm[source]someI2_ex}: unifying its conclusion with the current
@@ -242,7 +242,7 @@
 occurrences of @{text SOME}. As a result, @{text fast} is no longer able to
 solve the subgoal and we apply @{thm[source]someI2_ex} by hand.  We merely
 show the proof commands but do not describe the details:
-*}
+\<close>
 
 apply(simp)
 apply(rule someI2_ex)
@@ -252,7 +252,7 @@
 apply(blast)
 done
 
-text{*
+text\<open>
 Function @{const path} has fulfilled its purpose now and can be forgotten.
 It was merely defined to provide the witness in the proof of the
 @{thm[source]infinity_lemma}. Aficionados of minimal proofs might like to know
@@ -261,7 +261,7 @@
 @{term[display]"rec_nat s (\<lambda>n t. SOME u. (t,u)\<in>M \<and> Q u)"}
 is extensionally equal to @{term"path s Q"},
 where @{term rec_nat} is the predefined primitive recursor on @{typ nat}.
-*}
+\<close>
 (*<*)
 lemma
 "\<lbrakk> Q s; \<forall> s. Q s \<longrightarrow> (\<exists> t. (s,t)\<in>M \<and> Q t) \<rbrakk> \<Longrightarrow>
@@ -284,37 +284,37 @@
 by(blast)
 (*>*)
 
-text{*
+text\<open>
 At last we can prove the opposite direction of @{thm[source]AF_lemma1}:
-*}
+\<close>
 
 theorem AF_lemma2: "{s. \<forall>p \<in> Paths s. \<exists>i. p i \<in> A} \<subseteq> lfp(af A)"
 
-txt{*\noindent
+txt\<open>\noindent
 The proof is again pointwise and then by contraposition:
-*}
+\<close>
 
 apply(rule subsetI)
 apply(erule contrapos_pp)
 apply simp
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,goals_limit=1]}
 Applying the @{thm[source]infinity_lemma} as a destruction rule leaves two subgoals, the second
 premise of @{thm[source]infinity_lemma} and the original subgoal:
-*}
+\<close>
 
 apply(drule infinity_lemma)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 Both are solved automatically:
-*}
+\<close>
 
  apply(auto dest: not_in_lfp_afD)
 done
 
-text{*
+text\<open>
 If you find these proofs too complicated, we recommend that you read
 \S\ref{sec:CTL-revisited}, where we show how inductive definitions lead to
 simpler arguments.
@@ -322,20 +322,20 @@
 The main theorem is proved as for PDL, except that we also derive the
 necessary equality @{text"lfp(af A) = ..."} by combining
 @{thm[source]AF_lemma1} and @{thm[source]AF_lemma2} on the spot:
-*}
+\<close>
 
 theorem "mc f = {s. s \<Turnstile> f}"
 apply(induct_tac f)
 apply(auto simp add: EF_lemma equalityI[OF AF_lemma1 AF_lemma2])
 done
 
-text{*
+text\<open>
 
 The language defined above is not quite CTL\@. The latter also includes an
 until-operator @{term"EU f g"} with semantics ``there \emph{E}xists a path
 where @{term f} is true \emph{U}ntil @{term g} becomes true''.  We need
 an auxiliary function:
-*}
+\<close>
 
 primrec
 until:: "state set \<Rightarrow> state set \<Rightarrow> state \<Rightarrow> state list \<Rightarrow> bool" where
@@ -345,7 +345,7 @@
  eusem :: "state set \<Rightarrow> state set \<Rightarrow> state set" where
 "eusem A B \<equiv> {s. \<exists>p. until A B s p}"(*>*)
 
-text{*\noindent
+text\<open>\noindent
 Expressing the semantics of @{term EU} is now straightforward:
 @{prop[display]"s \<Turnstile> EU f g = (\<exists>p. until {t. t \<Turnstile> f} {t. t \<Turnstile> g} s p)"}
 Note that @{term EU} is not definable in terms of the other operators!
@@ -362,7 +362,7 @@
 %which enables you to read and write {text"E[f U g]"} instead of {term"EU f g"}.
 \end{exercise}
 For more CTL exercises see, for example, Huth and Ryan @{cite "Huth-Ryan-book"}.
-*}
+\<close>
 
 (*<*)
 definition eufix :: "state set \<Rightarrow> state set \<Rightarrow> state set \<Rightarrow> state set" where
@@ -435,7 +435,7 @@
 *)
 (*>*)
 
-text{* Let us close this section with a few words about the executability of
+text\<open>Let us close this section with a few words about the executability of
 our model checkers.  It is clear that if all sets are finite, they can be
 represented as lists and the usual set operations are easily
 implemented. Only @{const lfp} requires a little thought.  Fortunately, theory
@@ -445,5 +445,5 @@
 iterated application of @{term F} to~@{term"{}"} until a fixed point is
 reached. It is actually possible to generate executable functional programs
 from HOL definitions, but that is beyond the scope of the tutorial.%
-\index{CTL|)} *}
+\index{CTL|)}\<close>
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/CTL/CTLind.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/CTL/CTLind.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,8 +1,8 @@
 (*<*)theory CTLind imports CTL begin(*>*)
 
-subsection{*CTL Revisited*}
+subsection\<open>CTL Revisited\<close>
 
-text{*\label{sec:CTL-revisited}
+text\<open>\label{sec:CTL-revisited}
 \index{CTL|(}%
 The purpose of this section is twofold: to demonstrate
 some of the induction principles and heuristics discussed above and to
@@ -22,7 +22,7 @@
 A}-avoiding path:
 % Second proof of opposite direction, directly by well-founded induction
 % on the initial segment of M that avoids A.
-*}
+\<close>
 
 inductive_set
   Avoid :: "state \<Rightarrow> state set \<Rightarrow> state set"
@@ -31,7 +31,7 @@
     "s \<in> Avoid s A"
   | "\<lbrakk> t \<in> Avoid s A; t \<notin> A; (t,u) \<in> M \<rbrakk> \<Longrightarrow> u \<in> Avoid s A"
 
-text{*
+text\<open>
 It is easy to see that for any infinite @{term A}-avoiding path @{term f}
 with @{prop"f(0::nat) \<in> Avoid s A"} there is an infinite @{term A}-avoiding path
 starting with @{term s} because (by definition of @{const Avoid}) there is a
@@ -40,7 +40,7 @@
 this requires the following
 reformulation, as explained in \S\ref{sec:ind-var-in-prems} above;
 the @{text rule_format} directive undoes the reformulation after the proof.
-*}
+\<close>
 
 lemma ex_infinite_path[rule_format]:
   "t \<in> Avoid s A  \<Longrightarrow>
@@ -52,7 +52,7 @@
 apply(simp_all add: Paths_def split: nat.split)
 done
 
-text{*\noindent
+text\<open>\noindent
 The base case (@{prop"t = s"}) is trivial and proved by @{text blast}.
 In the induction step, we have an infinite @{term A}-avoiding path @{term f}
 starting from @{term u}, a successor of @{term t}. Now we simply instantiate
@@ -66,12 +66,12 @@
 inductive proof this must be generalized to the statement that every point @{term t}
 ``between'' @{term s} and @{term A}, in other words all of @{term"Avoid s A"},
 is contained in @{term"lfp(af A)"}:
-*}
+\<close>
 
 lemma Avoid_in_lfp[rule_format(no_asm)]:
   "\<forall>p\<in>Paths s. \<exists>i. p i \<in> A \<Longrightarrow> t \<in> Avoid s A \<longrightarrow> t \<in> lfp(af A)"
 
-txt{*\noindent
+txt\<open>\noindent
 The proof is by induction on the ``distance'' between @{term t} and @{term
 A}. Remember that @{prop"lfp(af A) = A \<union> M\<inverse> `` lfp(af A)"}.
 If @{term t} is already in @{term A}, then @{prop"t \<in> lfp(af A)"} is
@@ -85,14 +85,14 @@
 As we shall see presently, the absence of infinite @{term A}-avoiding paths
 starting from @{term s} implies well-foundedness of this relation. For the
 moment we assume this and proceed with the induction:
-*}
+\<close>
 
 apply(subgoal_tac "wf{(y,x). (x,y) \<in> M \<and> x \<in> Avoid s A \<and> x \<notin> A}")
  apply(erule_tac a = t in wf_induct)
  apply(clarsimp)
 (*<*)apply(rename_tac t)(*>*)
 
-txt{*\noindent
+txt\<open>\noindent
 @{subgoals[display,indent=0,margin=65]}
 Now the induction hypothesis states that if @{prop"t \<notin> A"}
 then all successors of @{term t} that are in @{term"Avoid s A"} are in
@@ -104,13 +104,13 @@
 @{term"Avoid s A"}, because we also assume @{prop"t \<in> Avoid s A"}.
 Hence, by the induction hypothesis, all successors of @{term t} are indeed in
 @{term"lfp(af A)"}. Mechanically:
-*}
+\<close>
 
  apply(subst lfp_unfold[OF mono_af])
  apply(simp (no_asm) add: af_def)
  apply(blast intro: Avoid.intros)
 
-txt{*
+txt\<open>
 Having proved the main goal, we return to the proof obligation that the 
 relation used above is indeed well-founded. This is proved by contradiction: if
 the relation is not well-founded then there exists an infinite @{term
@@ -119,7 +119,7 @@
 @{thm[display]wf_iff_no_infinite_down_chain[no_vars]}
 From lemma @{thm[source]ex_infinite_path} the existence of an infinite
 @{term A}-avoiding path starting in @{term s} follows, contradiction.
-*}
+\<close>
 
 apply(erule contrapos_pp)
 apply(simp add: wf_iff_no_infinite_down_chain)
@@ -128,7 +128,7 @@
 apply(auto simp add: Paths_def)
 done
 
-text{*
+text\<open>
 The @{text"(no_asm)"} modifier of the @{text"rule_format"} directive in the
 statement of the lemma means
 that the assumption is left unchanged; otherwise the @{text"\<forall>p"} 
@@ -139,7 +139,7 @@
 The main theorem is simply the corollary where @{prop"t = s"},
 when the assumption @{prop"t \<in> Avoid s A"} is trivially true
 by the first @{const Avoid}-rule. Isabelle confirms this:%
-\index{CTL|)}*}
+\index{CTL|)}\<close>
 
 theorem AF_lemma2:  "{s. \<forall>p \<in> Paths s. \<exists> i. p i \<in> A} \<subseteq> lfp(af A)"
 by(auto elim: Avoid_in_lfp intro: Avoid.intros)
--- a/src/Doc/Tutorial/CTL/PDL.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/CTL/PDL.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,8 +1,8 @@
 (*<*)theory PDL imports Base begin(*>*)
 
-subsection{*Propositional Dynamic Logic --- PDL*}
+subsection\<open>Propositional Dynamic Logic --- PDL\<close>
 
-text{*\index{PDL|(}
+text\<open>\index{PDL|(}
 The formulae of PDL are built up from atomic propositions via
 negation and conjunction and the two temporal
 connectives @{text AX} and @{text EF}\@. Since formulae are essentially
@@ -10,7 +10,7 @@
 \footnote{The customary definition of PDL
 @{cite "HarelKT-DL"} looks quite different from ours, but the two are easily
 shown to be equivalent.}
-*}
+\<close>
 
 datatype formula = Atom "atom"
                   | Neg formula
@@ -18,13 +18,13 @@
                   | AX formula
                   | EF formula
 
-text{*\noindent
+text\<open>\noindent
 This resembles the boolean expression case study in
 \S\ref{sec:boolex}.
 A validity relation between states and formulae specifies the semantics.
 The syntax annotation allows us to write @{text"s \<Turnstile> f"} instead of
 \hbox{@{text"valid s f"}}. The definition is by recursion over the syntax:
-*}
+\<close>
 
 primrec valid :: "state \<Rightarrow> formula \<Rightarrow> bool"   ("(_ \<Turnstile> _)" [80,80] 80)
 where
@@ -34,7 +34,7 @@
 "s \<Turnstile> AX f    = (\<forall>t. (s,t) \<in> M \<longrightarrow> t \<Turnstile> f)" |
 "s \<Turnstile> EF f    = (\<exists>t. (s,t) \<in> M\<^sup>* \<and> t \<Turnstile> f)"
 
-text{*\noindent
+text\<open>\noindent
 The first three equations should be self-explanatory. The temporal formula
 @{term"AX f"} means that @{term f} is true in \emph{A}ll ne\emph{X}t states whereas
 @{term"EF f"} means that there \emph{E}xists some \emph{F}uture state in which @{term f} is
@@ -43,7 +43,7 @@
 
 Now we come to the model checker itself. It maps a formula into the
 set of states where the formula is true.  It too is defined by
-recursion over the syntax: *}
+recursion over the syntax:\<close>
 
 primrec mc :: "formula \<Rightarrow> state set" where
 "mc(Atom a)  = {s. a \<in> L s}" |
@@ -52,7 +52,7 @@
 "mc(AX f)    = {s. \<forall>t. (s,t) \<in> M  \<longrightarrow> t \<in> mc f}" |
 "mc(EF f)    = lfp(\<lambda>T. mc f \<union> (M\<inverse> `` T))"
 
-text{*\noindent
+text\<open>\noindent
 Only the equation for @{term EF} deserves some comments. Remember that the
 postfix @{text"\<inverse>"} and the infix @{text"``"} are predefined and denote the
 converse of a relation and the image of a set under a relation.  Thus
@@ -65,40 +65,40 @@
 
 First we prove monotonicity of the function inside @{term lfp}
 in order to make sure it really has a least fixed point.
-*}
+\<close>
 
 lemma mono_ef: "mono(\<lambda>T. A \<union> (M\<inverse> `` T))"
 apply(rule monoI)
 apply blast
 done
 
-text{*\noindent
+text\<open>\noindent
 Now we can relate model checking and semantics. For the @{text EF} case we need
 a separate lemma:
-*}
+\<close>
 
 lemma EF_lemma:
   "lfp(\<lambda>T. A \<union> (M\<inverse> `` T)) = {s. \<exists>t. (s,t) \<in> M\<^sup>* \<and> t \<in> A}"
 
-txt{*\noindent
+txt\<open>\noindent
 The equality is proved in the canonical fashion by proving that each set
 includes the other; the inclusion is shown pointwise:
-*}
+\<close>
 
 apply(rule equalityI)
  apply(rule subsetI)
  apply(simp)(*<*)apply(rename_tac s)(*>*)
 
-txt{*\noindent
+txt\<open>\noindent
 Simplification leaves us with the following first subgoal
 @{subgoals[display,indent=0,goals_limit=1]}
 which is proved by @{term lfp}-induction:
-*}
+\<close>
 
  apply(erule lfp_induct_set)
   apply(rule mono_ef)
  apply(simp)
-txt{*\noindent
+txt\<open>\noindent
 Having disposed of the monotonicity subgoal,
 simplification leaves us with the following goal:
 \begin{isabelle}
@@ -108,19 +108,19 @@
 \end{isabelle}
 It is proved by @{text blast}, using the transitivity of 
 \isa{M\isactrlsup {\isacharasterisk}}.
-*}
+\<close>
 
  apply(blast intro: rtrancl_trans)
 
-txt{*
+txt\<open>
 We now return to the second set inclusion subgoal, which is again proved
 pointwise:
-*}
+\<close>
 
 apply(rule subsetI)
 apply(simp, clarify)
 
-txt{*\noindent
+txt\<open>\noindent
 After simplification and clarification we are left with
 @{subgoals[display,indent=0,goals_limit=1]}
 This goal is proved by induction on @{term"(s,t)\<in>M\<^sup>*"}. But since the model
@@ -132,44 +132,44 @@
 It says that if @{prop"(a,b):r\<^sup>*"} and we know @{prop"P b"} then we can infer
 @{prop"P a"} provided each step backwards from a predecessor @{term z} of
 @{term b} preserves @{term P}.
-*}
+\<close>
 
 apply(erule converse_rtrancl_induct)
 
-txt{*\noindent
+txt\<open>\noindent
 The base case
 @{subgoals[display,indent=0,goals_limit=1]}
 is solved by unrolling @{term lfp} once
-*}
+\<close>
 
  apply(subst lfp_unfold[OF mono_ef])
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,goals_limit=1]}
 and disposing of the resulting trivial subgoal automatically:
-*}
+\<close>
 
  apply(blast)
 
-txt{*\noindent
+txt\<open>\noindent
 The proof of the induction step is identical to the one for the base case:
-*}
+\<close>
 
 apply(subst lfp_unfold[OF mono_ef])
 apply(blast)
 done
 
-text{*
+text\<open>
 The main theorem is proved in the familiar manner: induction followed by
 @{text auto} augmented with the lemma as a simplification rule.
-*}
+\<close>
 
 theorem "mc f = {s. s \<Turnstile> f}"
 apply(induct_tac f)
 apply(auto simp add: EF_lemma)
 done
 
-text{*
+text\<open>
 \begin{exercise}
 @{term AX} has a dual operator @{term EN} 
 (``there exists a next state such that'')%
@@ -183,7 +183,7 @@
 @{prop[display]"(s \<Turnstile> EF f) = (s \<Turnstile> f | s \<Turnstile> EN(EF f))"}
 \end{exercise}
 \index{PDL|)}
-*}
+\<close>
 (*<*)
 theorem main: "mc f = {s. s \<Turnstile> f}"
 apply(induct_tac f)
--- a/src/Doc/Tutorial/CodeGen/CodeGen.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/CodeGen/CodeGen.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,9 +2,9 @@
 theory CodeGen imports Main begin
 (*>*)
 
-section{*Case Study: Compiling Expressions*}
+section\<open>Case Study: Compiling Expressions\<close>
 
-text{*\label{sec:ExprCompiler}
+text\<open>\label{sec:ExprCompiler}
 \index{compiling expressions example|(}%
 The task is to develop a compiler from a generic type of expressions (built
 from variables, constants and binary operations) to a stack machine.  This
@@ -13,45 +13,45 @@
 type of variables or values but make them type parameters.  Neither is there
 a fixed set of binary operations: instead the expression contains the
 appropriate function itself.
-*}
+\<close>
 
 type_synonym 'v binop = "'v \<Rightarrow> 'v \<Rightarrow> 'v"
 datatype (dead 'a, 'v) expr = Cex 'v
                       | Vex 'a
                       | Bex "'v binop"  "('a,'v)expr"  "('a,'v)expr"
 
-text{*\noindent
+text\<open>\noindent
 The three constructors represent constants, variables and the application of
 a binary operation to two subexpressions.
 
 The value of an expression with respect to an environment that maps variables to
 values is easily defined:
-*}
+\<close>
 
 primrec "value" :: "('a,'v)expr \<Rightarrow> ('a \<Rightarrow> 'v) \<Rightarrow> 'v" where
 "value (Cex v) env = v" |
 "value (Vex a) env = env a" |
 "value (Bex f e1 e2) env = f (value e1 env) (value e2 env)"
 
-text{*
+text\<open>
 The stack machine has three instructions: load a constant value onto the
 stack, load the contents of an address onto the stack, and apply a
 binary operation to the two topmost elements of the stack, replacing them by
 the result. As for @{text"expr"}, addresses and values are type parameters:
-*}
+\<close>
 
 datatype (dead 'a, 'v) instr = Const 'v
                        | Load 'a
                        | Apply "'v binop"
 
-text{*
+text\<open>
 The execution of the stack machine is modelled by a function
 @{text"exec"} that takes a list of instructions, a store (modelled as a
 function from addresses to values, just like the environment for
 evaluating expressions), and a stack (modelled as a list) of values,
 and returns the stack at the end of the execution --- the store remains
 unchanged:
-*}
+\<close>
 
 primrec exec :: "('a,'v)instr list \<Rightarrow> ('a\<Rightarrow>'v) \<Rightarrow> 'v list \<Rightarrow> 'v list"
 where
@@ -61,7 +61,7 @@
   | Load a   \<Rightarrow> exec is s ((s a)#vs)
   | Apply f  \<Rightarrow> exec is s ((f (hd vs) (hd(tl vs)))#(tl(tl vs))))"
 
-text{*\noindent
+text\<open>\noindent
 Recall that @{term"hd"} and @{term"tl"}
 return the first element and the remainder of a list.
 Because all functions are total, \cdx{hd} is defined even for the empty
@@ -72,54 +72,54 @@
 
 The compiler is a function from expressions to a list of instructions. Its
 definition is obvious:
-*}
+\<close>
 
 primrec compile :: "('a,'v)expr \<Rightarrow> ('a,'v)instr list" where
 "compile (Cex v)       = [Const v]" |
 "compile (Vex a)       = [Load a]" |
 "compile (Bex f e1 e2) = (compile e2) @ (compile e1) @ [Apply f]"
 
-text{*
+text\<open>
 Now we have to prove the correctness of the compiler, i.e.\ that the
 execution of a compiled expression results in the value of the expression:
-*}
+\<close>
 theorem "exec (compile e) s [] = [value e s]"
 (*<*)oops(*>*)
-text{*\noindent
+text\<open>\noindent
 This theorem needs to be generalized:
-*}
+\<close>
 
 theorem "\<forall>vs. exec (compile e) s vs = (value e s) # vs"
 
-txt{*\noindent
+txt\<open>\noindent
 It will be proved by induction on @{term"e"} followed by simplification.  
 First, we must prove a lemma about executing the concatenation of two
 instruction sequences:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma exec_app[simp]:
   "\<forall>vs. exec (xs@ys) s vs = exec ys s (exec xs s vs)" 
 
-txt{*\noindent
+txt\<open>\noindent
 This requires induction on @{term"xs"} and ordinary simplification for the
 base cases. In the induction step, simplification leaves us with a formula
 that contains two @{text"case"}-expressions over instructions. Thus we add
 automatic case splitting, which finishes the proof:
-*}
+\<close>
 apply(induct_tac xs, simp, simp split: instr.split)
 (*<*)done(*>*)
-text{*\noindent
+text\<open>\noindent
 Note that because both \methdx{simp_all} and \methdx{auto} perform simplification, they can
 be modified in the same way as @{text simp}.  Thus the proof can be
 rewritten as
-*}
+\<close>
 (*<*)
 declare exec_app[simp del]
 lemma [simp]: "\<forall>vs. exec (xs@ys) s vs = exec ys s (exec xs s vs)" 
 (*>*)
 apply(induct_tac xs, simp_all split: instr.split)
 (*<*)done(*>*)
-text{*\noindent
+text\<open>\noindent
 Although this is more compact, it is less clear for the reader of the proof.
 
 We could now go back and prove @{prop"exec (compile e) s [] = [value e s]"}
@@ -127,7 +127,7 @@
 However, this is unnecessary because the generalized version fully subsumes
 its instance.%
 \index{compiling expressions example|)}
-*}
+\<close>
 (*<*)
 theorem "\<forall>vs. exec (compile e) s vs = (value e s) # vs"
 by(induct_tac e, auto)
--- a/src/Doc/Tutorial/Datatype/ABexpr.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Datatype/ABexpr.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory ABexpr imports Main begin
 (*>*)
 
-text{*
+text\<open>
 \index{datatypes!mutually recursive}%
 Sometimes it is necessary to define two datatypes that depend on each
 other. This is called \textbf{mutual recursion}. As an example consider a
@@ -15,7 +15,7 @@
   comparisons like ``$m<n$''.
 \end{itemize}
 In Isabelle this becomes
-*}
+\<close>
 
 datatype 'a aexp = IF   "'a bexp" "'a aexp" "'a aexp"
                  | Sum  "'a aexp" "'a aexp"
@@ -26,14 +26,14 @@
                  | And  "'a bexp" "'a bexp"
                  | Neg  "'a bexp"
 
-text{*\noindent
+text\<open>\noindent
 Type @{text"aexp"} is similar to @{text"expr"} in \S\ref{sec:ExprCompiler},
 except that we have added an @{text IF} constructor,
 fixed the values to be of type @{typ"nat"} and declared the two binary
 operations @{text Sum} and @{term"Diff"}.  Boolean
 expressions can be arithmetic comparisons, conjunctions and negations.
 The semantics is given by two evaluation functions:
-*}
+\<close>
 
 primrec evala :: "'a aexp \<Rightarrow> ('a \<Rightarrow> nat) \<Rightarrow> nat" and
          evalb :: "'a bexp \<Rightarrow> ('a \<Rightarrow> nat) \<Rightarrow> bool" where
@@ -48,7 +48,7 @@
 "evalb (And b1 b2) env = (evalb b1 env \<and> evalb b2 env)" |
 "evalb (Neg b) env = (\<not> evalb b env)"
 
-text{*\noindent
+text\<open>\noindent
 
 Both take an expression and an environment (a mapping from variables
 @{typ"'a"} to values @{typ"nat"}) and return its arithmetic/boolean
@@ -60,7 +60,7 @@
 the empty line is purely for readability.
 
 In the same fashion we also define two functions that perform substitution:
-*}
+\<close>
 
 primrec substa :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a aexp \<Rightarrow> 'b aexp" and
          substb :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a bexp \<Rightarrow> 'b bexp" where
@@ -75,7 +75,7 @@
 "substb s (And b1 b2) = And (substb s b1) (substb s b2)" |
 "substb s (Neg b) = Neg (substb s b)"
 
-text{*\noindent
+text\<open>\noindent
 Their first argument is a function mapping variables to expressions, the
 substitution. It is applied to all variables in the second argument. As a
 result, the type of variables in the expression may change from @{typ"'a"}
@@ -89,19 +89,19 @@
 boolean expressions (by induction), you find that you always need the other
 theorem in the induction step. Therefore you need to state and prove both
 theorems simultaneously:
-*}
+\<close>
 
 lemma "evala (substa s a) env = evala a (\<lambda>x. evala (s x) env) \<and>
         evalb (substb s b) env = evalb b (\<lambda>x. evala (s x) env)"
 apply(induct_tac a and b)
 
-txt{*\noindent The resulting 8 goals (one for each constructor) are proved in one fell swoop:
-*}
+txt\<open>\noindent The resulting 8 goals (one for each constructor) are proved in one fell swoop:
+\<close>
 
 apply simp_all
 (*<*)done(*>*)
 
-text{*
+text\<open>
 In general, given $n$ mutually recursive datatypes $\tau@1$, \dots, $\tau@n$,
 an inductive proof expects a goal of the form
 \[ P@1(x@1)\ \land \dots \land P@n(x@n) \]
@@ -121,7 +121,7 @@
   it.  ({\em Hint:} proceed as in \S\ref{sec:boolex} and read the discussion
   of type annotations following lemma @{text subst_id} below).
 \end{exercise}
-*}
+\<close>
 (*<*)
 primrec norma :: "'a aexp \<Rightarrow> 'a aexp" and
         normb :: "'a bexp \<Rightarrow> 'a aexp \<Rightarrow> 'a aexp \<Rightarrow> 'a aexp" where
--- a/src/Doc/Tutorial/Datatype/Fundata.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Datatype/Fundata.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,7 +3,7 @@
 (*>*)
 datatype (dead 'a,'i) bigtree = Tip | Br 'a "'i \<Rightarrow> ('a,'i)bigtree"
 
-text{*\noindent
+text\<open>\noindent
 Parameter @{typ"'a"} is the type of values stored in
 the @{term Br}anches of the tree, whereas @{typ"'i"} is the index
 type over which the tree branches. If @{typ"'i"} is instantiated to
@@ -17,14 +17,14 @@
 has merely @{term"Tip"}s as further subtrees.
 
 Function @{term"map_bt"} applies a function to all labels in a @{text"bigtree"}:
-*}
+\<close>
 
 primrec map_bt :: "('a \<Rightarrow> 'b) \<Rightarrow> ('a,'i)bigtree \<Rightarrow> ('b,'i)bigtree"
 where
 "map_bt f Tip      = Tip" |
 "map_bt f (Br a F) = Br (f a) (\<lambda>i. map_bt f (F i))"
 
-text{*\noindent This is a valid \isacommand{primrec} definition because the
+text\<open>\noindent This is a valid \isacommand{primrec} definition because the
 recursive calls of @{term"map_bt"} involve only subtrees of
 @{term"F"}, which is itself a subterm of the left-hand side. Thus termination
 is assured.  The seasoned functional programmer might try expressing
@@ -32,18 +32,18 @@
 however will reject.  Applying @{term"map_bt"} to only one of its arguments
 makes the termination proof less obvious.
 
-The following lemma has a simple proof by induction:  *}
+The following lemma has a simple proof by induction:\<close>
 
 lemma "map_bt (g o f) T = map_bt g (map_bt f T)"
 apply(induct_tac T, simp_all)
 done
 (*<*)lemma "map_bt (g o f) T = map_bt g (map_bt f T)"
 apply(induct_tac T, rename_tac[2] F)(*>*)
-txt{*\noindent
+txt\<open>\noindent
 Because of the function type, the proof state after induction looks unusual.
 Notice the quantified induction hypothesis:
 @{subgoals[display,indent=0]}
-*}
+\<close>
 (*<*)
 oops
 end
--- a/src/Doc/Tutorial/Datatype/Nested.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Datatype/Nested.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory Nested imports ABexpr begin
 (*>*)
 
-text{*
+text\<open>
 \index{datatypes!and nested recursion}%
 So far, all datatypes had the property that on the right-hand side of their
 definition they occurred only at the top-level: directly below a
@@ -10,11 +10,11 @@
 datatype occurs nested in some other datatype (but not inside itself!).
 Consider the following model of terms
 where function symbols can be applied to a list of arguments:
-*}
+\<close>
 (*<*)hide_const Var(*>*)
 datatype ('v,'f)"term" = Var 'v | App 'f "('v,'f)term list"
 
-text{*\noindent
+text\<open>\noindent
 Note that we need to quote @{text term} on the left to avoid confusion with
 the Isabelle command \isacommand{term}.
 Parameter @{typ"'v"} is the type of variables and @{typ"'f"} the type of
@@ -41,7 +41,7 @@
 
 Let us define a substitution function on terms. Because terms involve term
 lists, we need to define two substitution functions simultaneously:
-*}
+\<close>
 
 primrec
 subst :: "('v\<Rightarrow>('v,'f)term) \<Rightarrow> ('v,'f)term      \<Rightarrow> ('v,'f)term" and
@@ -54,7 +54,7 @@
 "substs s [] = []" |
 "substs s (t # ts) = subst s t # substs s ts"
 
-text{*\noindent
+text\<open>\noindent
 Individual equations in a \commdx{primrec} definition may be
 named as shown for @{thm[source]subst_App}.
 The significance of this device will become apparent below.
@@ -63,14 +63,14 @@
 to prove a related statement about term lists simultaneously. For example,
 the fact that the identity substitution does not change a term needs to be
 strengthened and proved as follows:
-*}
+\<close>
 
 lemma subst_id(*<*)(*referred to from ABexpr*)(*>*): "subst  Var t  = (t ::('v,'f)term)  \<and>
                   substs Var ts = (ts::('v,'f)term list)"
 apply(induct_tac t and ts rule: subst.induct substs.induct, simp_all)
 done
 
-text{*\noindent
+text\<open>\noindent
 Note that @{term Var} is the identity substitution because by definition it
 leaves variables unchanged: @{prop"subst Var (Var x) = Var x"}. Note also
 that the type annotations are necessary because otherwise there is nothing in
@@ -100,7 +100,7 @@
 @{text"map f [x1,...,xn] = [f x1,...,f xn]"}. This is true, but Isabelle
 insists on the conjunctive format. Fortunately, we can easily \emph{prove}
 that the suggested equation holds:
-*}
+\<close>
 (*<*)
 (* Exercise 1: *)
 lemma "subst  ((subst f) \<circ> g) t  = subst  f (subst g t) \<and>
@@ -133,14 +133,14 @@
 apply(induct_tac ts, simp_all)
 done
 
-text{*\noindent
+text\<open>\noindent
 What is more, we can now disable the old defining equation as a
 simplification rule:
-*}
+\<close>
 
 declare subst_App [simp del]
 
-text{*\noindent The advantage is that now we have replaced @{const
+text\<open>\noindent The advantage is that now we have replaced @{const
 substs} by @{const map}, we can profit from the large number of
 pre-proved lemmas about @{const map}.  Unfortunately, inductive proofs
 about type @{text term} are still awkward because they expect a
@@ -155,5 +155,5 @@
 Of course, you may also combine mutual and nested recursion of datatypes. For example,
 constructor @{text Sum} in \S\ref{sec:datatype-mut-rec} could take a list of
 expressions as its argument: @{text Sum}~@{typ[quotes]"'a aexp list"}.
-*}
+\<close>
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Documents/Documents.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Documents/Documents.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,9 +2,9 @@
 theory Documents imports Main begin
 (*>*)
 
-section {* Concrete Syntax \label{sec:concrete-syntax} *}
+section \<open>Concrete Syntax \label{sec:concrete-syntax}\<close>
 
-text {*
+text \<open>
   The core concept of Isabelle's framework for concrete syntax is that
   of \bfindex{mixfix annotations}.  Associated with any kind of
   constant declaration, mixfixes affect both the grammar productions
@@ -19,12 +19,12 @@
 
   Below we introduce a few simple syntax declaration
   forms that already cover many common situations fairly well.
-*}
+\<close>
 
 
-subsection {* Infix Annotations *}
+subsection \<open>Infix Annotations\<close>
 
-text {*
+text \<open>
   Syntax annotations may be included wherever constants are declared,
   such as \isacommand{definition} and \isacommand{primrec} --- and also
   \isacommand{datatype}, which declares constructor operations.
@@ -35,12 +35,12 @@
   Infix declarations\index{infix annotations} provide a useful special
   case of mixfixes.  The following example of the exclusive-or
   operation on boolean values illustrates typical infix declarations.
-*}
+\<close>
 
 definition xor :: "bool \<Rightarrow> bool \<Rightarrow> bool"    (infixl "[+]" 60)
 where "A [+] B \<equiv> (A \<and> \<not> B) \<or> (\<not> A \<and> B)"
 
-text {*
+text \<open>
   \noindent Now @{text "xor A B"} and @{text "A [+] B"} refer to the
   same expression internally.  Any curried function with at least two
   arguments may be given infix syntax.  For partial applications with
@@ -75,12 +75,12 @@
   below 50; algebraic ones (like @{text "+"} and @{text "*"}) are
   above 50.  User syntax should strive to coexist with common HOL
   forms, or use the mostly unused range 100--900.
-*}
+\<close>
 
 
-subsection {* Mathematical Symbols \label{sec:syntax-symbols} *}
+subsection \<open>Mathematical Symbols \label{sec:syntax-symbols}\<close>
 
-text {*
+text \<open>
   Concrete syntax based on ASCII characters has inherent limitations.
   Mathematical notation demands a larger repertoire of glyphs.
   Several standards of extended character sets have been proposed over
@@ -133,39 +133,39 @@
 
   Replacing our previous definition of @{text xor} by the
   following specifies an Isabelle symbol for the new operator:
-*}
+\<close>
 
 (*<*)
 hide_const xor
-setup {* Sign.add_path "version1" *}
+setup \<open>Sign.add_path "version1"\<close>
 (*>*)
 definition xor :: "bool \<Rightarrow> bool \<Rightarrow> bool"    (infixl "\<oplus>" 60)
 where "A \<oplus> B \<equiv> (A \<and> \<not> B) \<or> (\<not> A \<and> B)"
 (*<*)
-setup {* Sign.local_path *}
+setup \<open>Sign.local_path\<close>
 (*>*)
 
-text {*
+text \<open>
   It is possible to provide alternative syntax forms
   through the \bfindex{print mode} concept~@{cite "isabelle-isar-ref"}.  By
   convention, the mode of ``$xsymbols$'' is enabled whenever
   Proof~General's X-Symbol mode or {\LaTeX} output is active.  Now
   consider the following hybrid declaration of @{text xor}:
-*}
+\<close>
 
 (*<*)
 hide_const xor
-setup {* Sign.add_path "version2" *}
+setup \<open>Sign.add_path "version2"\<close>
 (*>*)
 definition xor :: "bool \<Rightarrow> bool \<Rightarrow> bool"    (infixl "[+]\<ignore>" 60)
 where "A [+]\<ignore> B \<equiv> (A \<and> \<not> B) \<or> (\<not> A \<and> B)"
 
 notation (xsymbols) xor (infixl "\<oplus>\<ignore>" 60)
 (*<*)
-setup {* Sign.local_path *}
+setup \<open>Sign.local_path\<close>
 (*>*)
 
-text {*\noindent
+text \<open>\noindent
 The \commdx{notation} command associates a mixfix
 annotation with a known constant.  The print mode specification,
 here @{text "(xsymbols)"}, is optional.
@@ -174,17 +174,17 @@
 output uses the nicer syntax of $xsymbols$ whenever that print mode is
 active.  Such an arrangement is particularly useful for interactive
 development, where users may type ASCII text and see mathematical
-symbols displayed during proofs.  *}
+symbols displayed during proofs.\<close>
 
 
-subsection {* Prefix Annotations *}
+subsection \<open>Prefix Annotations\<close>
 
-text {*
+text \<open>
   Prefix syntax annotations\index{prefix annotation} are another form
   of mixfixes @{cite "isabelle-isar-ref"}, without any template arguments or
   priorities --- just some literal syntax.  The following example
   associates common symbols with the constructors of a datatype.
-*}
+\<close>
 
 datatype currency =
     Euro nat    ("\<euro>")
@@ -192,7 +192,7 @@
   | Yen nat     ("\<yen>")
   | Dollar nat  ("$")
 
-text {*
+text \<open>
   \noindent Here the mixfix annotations on the rightmost column happen
   to consist of a single Isabelle symbol each: \verb,\,\verb,<euro>,,
   \verb,\,\verb,<pounds>,, \verb,\,\verb,<yen>,, and \verb,$,.  Recall
@@ -204,12 +204,12 @@
   Commission.
 
   Prefix syntax works the same way for other commands that introduce new constants, e.g. \isakeyword{primrec}.
-*}
+\<close>
 
 
-subsection {* Abbreviations \label{sec:abbreviations} *}
+subsection \<open>Abbreviations \label{sec:abbreviations}\<close>
 
-text{* Mixfix syntax annotations merely decorate particular constant
+text\<open>Mixfix syntax annotations merely decorate particular constant
 application forms with concrete syntax, for instance replacing
 @{text "xor A B"} by @{text "A \<oplus> B"}.  Occasionally, the relationship
 between some piece of notation and its internal form is more
@@ -223,12 +223,12 @@
 A typical use of abbreviations is to introduce relational notation for
 membership in a set of pairs, replacing @{text "(x, y) \<in> sim"} by
 @{text "x \<approx> y"}. We assume that a constant @{text sim } of type
-@{typ"('a \<times> 'a) set"} has been introduced at this point. *}
+@{typ"('a \<times> 'a) set"} has been introduced at this point.\<close>
 (*<*)consts sim :: "('a \<times> 'a) set"(*>*)
 abbreviation sim2 :: "'a \<Rightarrow> 'a \<Rightarrow> bool"   (infix "\<approx>" 50)
 where "x \<approx> y  \<equiv>  (x, y) \<in> sim"
 
-text {* \noindent The given meta-equality is used as a rewrite rule
+text \<open>\noindent The given meta-equality is used as a rewrite rule
 after parsing (replacing \mbox{@{prop"x \<approx> y"}} by @{text"(x,y) \<in>
 sim"}) and before printing (turning @{text"(x,y) \<in> sim"} back into
 \mbox{@{prop"x \<approx> y"}}). The name of the dummy constant @{text "sim2"}
@@ -238,14 +238,14 @@
 provide variant versions of fundamental relational expressions, such
 as @{text \<noteq>} for negated equalities.  The following declaration
 stems from Isabelle/HOL itself:
-*}
+\<close>
 
 abbreviation not_equal :: "'a \<Rightarrow> 'a \<Rightarrow> bool"    (infixl "~=\<ignore>" 50)
 where "x ~=\<ignore> y  \<equiv>  \<not> (x = y)"
 
 notation (xsymbols) not_equal (infix "\<noteq>\<ignore>" 50)
 
-text {* \noindent The notation @{text \<noteq>} is introduced separately to restrict it
+text \<open>\noindent The notation @{text \<noteq>} is introduced separately to restrict it
 to the \emph{xsymbols} mode.
 
 Abbreviations are appropriate when the defined concept is a
@@ -257,12 +257,12 @@
 Abbreviations are a simplified form of the general concept of
 \emph{syntax translations}; even heavier transformations may be
 written in ML @{cite "isabelle-isar-ref"}.
-*}
+\<close>
 
 
-section {* Document Preparation \label{sec:document-preparation} *}
+section \<open>Document Preparation \label{sec:document-preparation}\<close>
 
-text {*
+text \<open>
   Isabelle/Isar is centered around the concept of \bfindex{formal
   proof documents}\index{documents|bold}.  The outcome of a formal
   development effort is meant to be a human-readable record, presented
@@ -279,27 +279,27 @@
 
   Here is an example to illustrate the idea of Isabelle document
   preparation.
-*}
+\<close>
 
-text_raw {* \begin{quotation} *}
+text_raw \<open>\begin{quotation}\<close>
 
-text {*
+text \<open>
   The following datatype definition of @{text "'a bintree"} models
   binary trees with nodes being decorated by elements of type @{typ
   'a}.
-*}
+\<close>
 
 datatype 'a bintree =
      Leaf | Branch 'a  "'a bintree"  "'a bintree"
 
-text {*
+text \<open>
   \noindent The datatype induction rule generated here is of the form
   @{thm [indent = 1, display] bintree.induct [no_vars]}
-*}
+\<close>
 
-text_raw {* \end{quotation} *}
+text_raw \<open>\end{quotation}\<close>
 
-text {*
+text \<open>
   \noindent The above document output has been produced as follows:
 
   \begin{ttbox}
@@ -324,12 +324,12 @@
   to formal entities by means of ``antiquotations'' (such as
   \texttt{\at}\verb,{text "'a bintree"}, or
   \texttt{\at}\verb,{typ 'a},), see also \S\ref{sec:doc-prep-text}.
-*}
+\<close>
 
 
-subsection {* Isabelle Sessions *}
+subsection \<open>Isabelle Sessions\<close>
 
-text {*
+text \<open>
   In contrast to the highly interactive mode of Isabelle/Isar theory
   development, the document preparation stage essentially works in
   batch-mode.  An Isabelle \bfindex{session} consists of a collection
@@ -412,12 +412,12 @@
   Isabelle batch session leaves the generated sources in their target
   location, identified by the accompanying error message.  This lets
   you trace {\LaTeX} problems with the generated files at hand.
-*}
+\<close>
 
 
-subsection {* Structure Markup *}
+subsection \<open>Structure Markup\<close>
 
-text {*
+text \<open>
   The large-scale structure of Isabelle documents follows existing
   {\LaTeX} conventions, with chapters, sections, subsubsections etc.
   The Isar language includes separate \bfindex{markup commands}, which
@@ -460,12 +460,12 @@
 
   end
   \end{ttbox}
-*}
+\<close>
 
 
-subsection {* Formal Comments and Antiquotations \label{sec:doc-prep-text} *}
+subsection \<open>Formal Comments and Antiquotations \label{sec:doc-prep-text}\<close>
 
-text {*
+text \<open>
   Isabelle \bfindex{source comments}, which are of the form
   \verb,(,\verb,*,~@{text \<dots>}~\verb,*,\verb,),, essentially act like
   white space and do not really contribute to the content.  They
@@ -481,14 +481,14 @@
   \verb,{,\verb,*,~@{text \<dots>}~\verb,*,\verb,}, as before.  Multiple
   marginal comments may be given at the same time.  Here is a simple
   example:
-*}
+\<close>
 
 lemma "A --> A"
-  -- "a triviality of propositional logic"
-  -- "(should not really bother)"
-  by (rule impI) -- "implicit assumption step involved here"
+  \<comment> "a triviality of propositional logic"
+  \<comment> "(should not really bother)"
+  by (rule impI) \<comment> "implicit assumption step involved here"
 
-text {*
+text \<open>
   \noindent The above output has been produced as follows:
 
 \begin{verbatim}
@@ -593,12 +593,12 @@
   document very easily, independently of the term language of
   Isabelle.  Manual {\LaTeX} code would leave more control over the
   typesetting, but is also slightly more tedious.
-*}
+\<close>
 
 
-subsection {* Interpretation of Symbols \label{sec:doc-prep-symbols} *}
+subsection \<open>Interpretation of Symbols \label{sec:doc-prep-symbols}\<close>
 
-text {*
+text \<open>
   As has been pointed out before (\S\ref{sec:syntax-symbols}),
   Isabelle symbols are the smallest syntactic entities --- a
   straightforward generalization of ASCII characters.  While Isabelle
@@ -640,12 +640,12 @@
   quotes are not printed at all.  The resulting quality of typesetting
   is quite good, so this should be the default style for work that
   gets distributed to a broader audience.
-*}
+\<close>
 
 
-subsection {* Suppressing Output \label{sec:doc-prep-suppress} *}
+subsection \<open>Suppressing Output \label{sec:doc-prep-suppress}\<close>
 
-text {*
+text \<open>
   By default, Isabelle's document system generates a {\LaTeX} file for
   each theory that gets loaded while running the session.  The
   generated \texttt{session.tex} will include all of these in order of
@@ -683,11 +683,11 @@
   commands involving ML code).  Users may add their own tags using the
   \verb,%,\emph{tag} notation right after a command name.  In the
   subsequent example we hide a particularly irrelevant proof:
-*}
+\<close>
 
 lemma "x = x" by %invisible (simp)
 
-text {*
+text \<open>
   The original source has been ``\verb,lemma "x = x" by %invisible (simp),''.
   Tags observe the structure of proofs; adjacent commands with the
   same tag are joined into a single region.  The Isabelle document
@@ -705,12 +705,12 @@
   of the theory, of course.  For example, we may hide parts of a proof
   that seem unfit for general public inspection.  The following
   ``fully automatic'' proof is actually a fake:
-*}
+\<close>
 
 lemma "x \<noteq> (0::int) \<Longrightarrow> 0 < x * x"
   by (auto(*<*)simp add: zero_less_mult_iff(*>*))
 
-text {*
+text \<open>
   \noindent The real source of the proof has been as follows:
 
 \begin{verbatim}
@@ -722,7 +722,7 @@
   should not misrepresent the underlying theory development.  It is
   easy to invalidate the visible text by hiding references to
   questionable axioms, for example.
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Fun/fun0.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Fun/fun0.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,19 +2,19 @@
 theory fun0 imports Main begin
 (*>*)
 
-text{*
+text\<open>
 \subsection{Definition}
 \label{sec:fun-examples}
 
 Here is a simple example, the \rmindex{Fibonacci function}:
-*}
+\<close>
 
 fun fib :: "nat \<Rightarrow> nat" where
 "fib 0 = 0" |
 "fib (Suc 0) = 1" |
 "fib (Suc(Suc x)) = fib x + fib (Suc x)"
 
-text{*\noindent
+text\<open>\noindent
 This resembles ordinary functional programming languages. Note the obligatory
 \isacommand{where} and \isa{|}. Command \isacommand{fun} declares and
 defines the function in one go. Isabelle establishes termination automatically
@@ -22,35 +22,35 @@
 
 Slightly more interesting is the insertion of a fixed element
 between any two elements of a list:
-*}
+\<close>
 
 fun sep :: "'a \<Rightarrow> 'a list \<Rightarrow> 'a list" where
 "sep a []     = []" |
 "sep a [x]    = [x]" |
 "sep a (x#y#zs) = x # a # sep a (y#zs)"
 
-text{*\noindent
+text\<open>\noindent
 This time the length of the list decreases with the
 recursive call; the first argument is irrelevant for termination.
 
 Pattern matching\index{pattern matching!and \isacommand{fun}}
 need not be exhaustive and may employ wildcards:
-*}
+\<close>
 
 fun last :: "'a list \<Rightarrow> 'a" where
 "last [x]      = x" |
 "last (_#y#zs) = last (y#zs)"
 
-text{*
+text\<open>
 Overlapping patterns are disambiguated by taking the order of equations into
 account, just as in functional programming:
-*}
+\<close>
 
 fun sep1 :: "'a \<Rightarrow> 'a list \<Rightarrow> 'a list" where
 "sep1 a (x#y#zs) = x # a # sep1 a (y#zs)" |
 "sep1 _ xs       = xs"
 
-text{*\noindent
+text\<open>\noindent
 To guarantee that the second equation can only be applied if the first
 one does not match, Isabelle internally replaces the second equation
 by the two possibilities that are left: @{prop"sep1 a [] = []"} and
@@ -59,13 +59,13 @@
 
 Because of its pattern matching syntax, \isacommand{fun} is also useful
 for the definition of non-recursive functions:
-*}
+\<close>
 
 fun swap12 :: "'a list \<Rightarrow> 'a list" where
 "swap12 (x#y#zs) = y#x#zs" |
 "swap12 zs       = zs"
 
-text{*
+text\<open>
 After a function~$f$ has been defined via \isacommand{fun},
 its defining equations (or variants derived from them) are available
 under the name $f$@{text".simps"} as theorems.
@@ -87,14 +87,14 @@
 More generally, \isacommand{fun} allows any \emph{lexicographic
 combination} of size measures in case there are multiple
 arguments. For example, the following version of \rmindex{Ackermann's
-function} is accepted: *}
+function} is accepted:\<close>
 
 fun ack2 :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "ack2 n 0 = Suc n" |
 "ack2 0 (Suc m) = ack2 (Suc 0) m" |
 "ack2 (Suc n) (Suc m) = ack2 (ack2 n (Suc m)) m"
 
-text{* The order of arguments has no influence on whether
+text\<open>The order of arguments has no influence on whether
 \isacommand{fun} can prove termination of a function. For more details
 see elsewhere~@{cite bulwahnKN07}.
 
@@ -108,12 +108,12 @@
 terminate because of automatic splitting of @{text "if"}.
 \index{*if expressions!splitting of}
 Let us look at an example:
-*}
+\<close>
 
 fun gcd :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "gcd m n = (if n=0 then m else gcd n (m mod n))"
 
-text{*\noindent
+text\<open>\noindent
 The second argument decreases with each recursive call.
 The termination condition
 @{prop[display]"n ~= (0::nat) ==> m mod n < n"}
@@ -145,32 +145,32 @@
 If possible, the definition should be given by pattern matching on the left
 rather than @{text "if"} on the right. In the case of @{term gcd} the
 following alternative definition suggests itself:
-*}
+\<close>
 
 fun gcd1 :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "gcd1 m 0 = m" |
 "gcd1 m n = gcd1 n (m mod n)"
 
-text{*\noindent
+text\<open>\noindent
 The order of equations is important: it hides the side condition
 @{prop"n ~= (0::nat)"}.  Unfortunately, not all conditionals can be
 expressed by pattern matching.
 
 A simple alternative is to replace @{text "if"} by @{text case}, 
 which is also available for @{typ bool} and is not split automatically:
-*}
+\<close>
 
 fun gcd2 :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "gcd2 m n = (case n=0 of True \<Rightarrow> m | False \<Rightarrow> gcd2 n (m mod n))"
 
-text{*\noindent
+text\<open>\noindent
 This is probably the neatest solution next to pattern matching, and it is
 always available.
 
 A final alternative is to replace the offending simplification rules by
 derived conditional ones. For @{term gcd} it means we have to prove
 these lemmas:
-*}
+\<close>
 
 lemma [simp]: "gcd m 0 = m"
 apply(simp)
@@ -180,15 +180,15 @@
 apply(simp)
 done
 
-text{*\noindent
+text\<open>\noindent
 Simplification terminates for these proofs because the condition of the @{text
 "if"} simplifies to @{term True} or @{term False}.
 Now we can disable the original simplification rule:
-*}
+\<close>
 
 declare gcd.simps [simp del]
 
-text{*
+text\<open>
 \index{induction!recursion|(}
 \index{recursion induction|(}
 
@@ -207,29 +207,29 @@
 you are trying to establish holds for the left-hand side provided it holds
 for all recursive calls on the right-hand side. Here is a simple example
 involving the predefined @{term"map"} functional on lists:
-*}
+\<close>
 
 lemma "map f (sep x xs) = sep (f x) (map f xs)"
 
-txt{*\noindent
+txt\<open>\noindent
 Note that @{term"map f xs"}
 is the result of applying @{term"f"} to all elements of @{term"xs"}. We prove
 this lemma by recursion induction over @{term"sep"}:
-*}
+\<close>
 
 apply(induct_tac x xs rule: sep.induct)
 
-txt{*\noindent
+txt\<open>\noindent
 The resulting proof state has three subgoals corresponding to the three
 clauses for @{term"sep"}:
 @{subgoals[display,indent=0]}
 The rest is pure simplification:
-*}
+\<close>
 
 apply simp_all
 done
 
-text{*\noindent The proof goes smoothly because the induction rule
+text\<open>\noindent The proof goes smoothly because the induction rule
 follows the recursion of @{const sep}.  Try proving the above lemma by
 structural induction, and you find that you need an additional case
 distinction.
@@ -255,7 +255,7 @@
 holds for the tail of that list.
 \index{induction!recursion|)}
 \index{recursion induction|)}
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Tutorial/Ifexpr/Ifexpr.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Ifexpr/Ifexpr.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,26 +2,26 @@
 theory Ifexpr imports Main begin
 (*>*)
 
-subsection{*Case Study: Boolean Expressions*}
+subsection\<open>Case Study: Boolean Expressions\<close>
 
-text{*\label{sec:boolex}\index{boolean expressions example|(}
+text\<open>\label{sec:boolex}\index{boolean expressions example|(}
 The aim of this case study is twofold: it shows how to model boolean
 expressions and some algorithms for manipulating them, and it demonstrates
 the constructs introduced above.
-*}
+\<close>
 
-subsubsection{*Modelling Boolean Expressions*}
+subsubsection\<open>Modelling Boolean Expressions\<close>
 
-text{*
+text\<open>
 We want to represent boolean expressions built up from variables and
 constants by negation and conjunction. The following datatype serves exactly
 that purpose:
-*}
+\<close>
 
 datatype boolex = Const bool | Var nat | Neg boolex
                 | And boolex boolex
 
-text{*\noindent
+text\<open>\noindent
 The two constants are represented by @{term"Const True"} and
 @{term"Const False"}. Variables are represented by terms of the form
 @{term"Var n"}, where @{term"n"} is a natural number (type @{typ"nat"}).
@@ -34,7 +34,7 @@
 Hence the function @{text"value"} takes an additional parameter, an
 \emph{environment} of type @{typ"nat => bool"}, which maps variables to their
 values:
-*}
+\<close>
 
 primrec "value" :: "boolex \<Rightarrow> (nat \<Rightarrow> bool) \<Rightarrow> bool" where
 "value (Const b) env = b" |
@@ -42,20 +42,20 @@
 "value (Neg b)   env = (\<not> value b env)" |
 "value (And b c) env = (value b env \<and> value c env)"
 
-text{*\noindent
+text\<open>\noindent
 \subsubsection{If-Expressions}
 
 An alternative and often more efficient (because in a certain sense
 canonical) representation are so-called \emph{If-expressions} built up
 from constants (@{term"CIF"}), variables (@{term"VIF"}) and conditionals
 (@{term"IF"}):
-*}
+\<close>
 
 datatype ifex = CIF bool | VIF nat | IF ifex ifex ifex
 
-text{*\noindent
+text\<open>\noindent
 The evaluation of If-expressions proceeds as for @{typ"boolex"}:
-*}
+\<close>
 
 primrec valif :: "ifex \<Rightarrow> (nat \<Rightarrow> bool) \<Rightarrow> bool" where
 "valif (CIF b)    env = b" |
@@ -63,13 +63,13 @@
 "valif (IF b t e) env = (if valif b env then valif t env
                                         else valif e env)"
 
-text{*
+text\<open>
 \subsubsection{Converting Boolean and If-Expressions}
 
 The type @{typ"boolex"} is close to the customary representation of logical
 formulae, whereas @{typ"ifex"} is designed for efficiency. It is easy to
 translate from @{typ"boolex"} into @{typ"ifex"}:
-*}
+\<close>
 
 primrec bool2if :: "boolex \<Rightarrow> ifex" where
 "bool2if (Const b) = CIF b" |
@@ -77,22 +77,22 @@
 "bool2if (Neg b)   = IF (bool2if b) (CIF False) (CIF True)" |
 "bool2if (And b c) = IF (bool2if b) (bool2if c) (CIF False)"
 
-text{*\noindent
+text\<open>\noindent
 At last, we have something we can verify: that @{term"bool2if"} preserves the
 value of its argument:
-*}
+\<close>
 
 lemma "valif (bool2if b) env = value b env"
 
-txt{*\noindent
+txt\<open>\noindent
 The proof is canonical:
-*}
+\<close>
 
 apply(induct_tac b)
 apply(auto)
 done
 
-text{*\noindent
+text\<open>\noindent
 In fact, all proofs in this case study look exactly like this. Hence we do
 not show them below.
 
@@ -102,7 +102,7 @@
 repeatedly replacing a subterm of the form @{term"IF (IF b x y) z u"} by
 @{term"IF b (IF x z u) (IF y z u)"}, which has the same value. The following
 primitive recursive functions perform this task:
-*}
+\<close>
 
 primrec normif :: "ifex \<Rightarrow> ifex \<Rightarrow> ifex \<Rightarrow> ifex" where
 "normif (CIF b)    t e = IF (CIF b) t e" |
@@ -114,18 +114,18 @@
 "norm (VIF x)    = VIF x" |
 "norm (IF b t e) = normif b (norm t) (norm e)"
 
-text{*\noindent
+text\<open>\noindent
 Their interplay is tricky; we leave it to you to develop an
 intuitive understanding. Fortunately, Isabelle can help us to verify that the
 transformation preserves the value of the expression:
-*}
+\<close>
 
 theorem "valif (norm b) env = valif b env"(*<*)oops(*>*)
 
-text{*\noindent
+text\<open>\noindent
 The proof is canonical, provided we first show the following simplification
 lemma, which also helps to understand what @{term"normif"} does:
-*}
+\<close>
 
 lemma [simp]:
   "\<forall>t e. valif (normif b t e) env = valif (IF b t e) env"
@@ -137,13 +137,13 @@
 apply(induct_tac b)
 by(auto)
 (*>*)
-text{*\noindent
+text\<open>\noindent
 Note that the lemma does not have a name, but is implicitly used in the proof
 of the theorem shown above because of the @{text"[simp]"} attribute.
 
 But how can we be sure that @{term"norm"} really produces a normal form in
 the above sense? We define a function that tests If-expressions for normality:
-*}
+\<close>
 
 primrec normal :: "ifex \<Rightarrow> bool" where
 "normal(CIF b) = True" |
@@ -151,10 +151,10 @@
 "normal(IF b t e) = (normal t \<and> normal e \<and>
      (case b of CIF b \<Rightarrow> True | VIF x \<Rightarrow> True | IF x y z \<Rightarrow> False))"
 
-text{*\noindent
+text\<open>\noindent
 Now we prove @{term"normal(norm b)"}. Of course, this requires a lemma about
 normality of @{term"normif"}:
-*}
+\<close>
 
 lemma [simp]: "\<forall>t e. normal(normif b t e) = (normal t \<and> normal e)"
 (*<*)
@@ -166,7 +166,7 @@
 by(auto)
 (*>*)
 
-text{*\medskip
+text\<open>\medskip
 How do we come up with the required lemmas? Try to prove the main theorems
 without them and study carefully what @{text auto} leaves unproved. This 
 can provide the clue.  The necessity of universal quantification
@@ -181,7 +181,7 @@
   equalities (@{text"="}).)
 \end{exercise}
 \index{boolean expressions example|)}
-*}
+\<close>
 (*<*)
 
 primrec normif2 :: "ifex => ifex => ifex => ifex" where
--- a/src/Doc/Tutorial/Inductive/AB.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/AB.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,8 +1,8 @@
 (*<*)theory AB imports Main begin(*>*)
 
-section{*Case Study: A Context Free Grammar*}
+section\<open>Case Study: A Context Free Grammar\<close>
 
-text{*\label{sec:CFG}
+text\<open>\label{sec:CFG}
 \index{grammars!defining inductively|(}%
 Grammars are nothing but shorthands for inductive definitions of nonterminals
 which represent sets of strings. For example, the production
@@ -21,24 +21,24 @@
 
 We start by fixing the alphabet, which consists only of @{term a}'s
 and~@{term b}'s:
-*}
+\<close>
 
 datatype alfa = a | b
 
-text{*\noindent
+text\<open>\noindent
 For convenience we include the following easy lemmas as simplification rules:
-*}
+\<close>
 
 lemma [simp]: "(x \<noteq> a) = (x = b) \<and> (x \<noteq> b) = (x = a)"
 by (case_tac x, auto)
 
-text{*\noindent
+text\<open>\noindent
 Words over this alphabet are of type @{typ"alfa list"}, and
 the three nonterminals are declared as sets of such words.
 The productions above are recast as a \emph{mutual} inductive
 definition\index{inductive definition!simultaneous}
 of @{term S}, @{term A} and~@{term B}:
-*}
+\<close>
 
 inductive_set
   S :: "alfa list set" and
@@ -55,31 +55,31 @@
 | "w \<in> S            \<Longrightarrow> b#w   \<in> B"
 | "\<lbrakk> v \<in> B; w \<in> B \<rbrakk> \<Longrightarrow> a#v@w \<in> B"
 
-text{*\noindent
+text\<open>\noindent
 First we show that all words in @{term S} contain the same number of @{term
 a}'s and @{term b}'s. Since the definition of @{term S} is by mutual
 induction, so is the proof: we show at the same time that all words in
 @{term A} contain one more @{term a} than @{term b} and all words in @{term
 B} contain one more @{term b} than @{term a}.
-*}
+\<close>
 
 lemma correctness:
   "(w \<in> S \<longrightarrow> size[x\<leftarrow>w. x=a] = size[x\<leftarrow>w. x=b])     \<and>
    (w \<in> A \<longrightarrow> size[x\<leftarrow>w. x=a] = size[x\<leftarrow>w. x=b] + 1) \<and>
    (w \<in> B \<longrightarrow> size[x\<leftarrow>w. x=b] = size[x\<leftarrow>w. x=a] + 1)"
 
-txt{*\noindent
+txt\<open>\noindent
 These propositions are expressed with the help of the predefined @{term
 filter} function on lists, which has the convenient syntax @{text"[x\<leftarrow>xs. P
 x]"}, the list of all elements @{term x} in @{term xs} such that @{prop"P x"}
 holds. Remember that on lists @{text size} and @{text length} are synonymous.
 
 The proof itself is by rule induction and afterwards automatic:
-*}
+\<close>
 
 by (rule S_A_B.induct, auto)
 
-text{*\noindent
+text\<open>\noindent
 This may seem surprising at first, and is indeed an indication of the power
 of inductive definitions. But it is also quite straightforward. For example,
 consider the production $A \to b A A$: if $v,w \in A$ and the elements of $A$
@@ -109,13 +109,13 @@
 and @{term b}'s to an arbitrary property @{term P}. Otherwise we would have
 to prove the desired lemma twice, once as stated above and once with the
 roles of @{term a}'s and @{term b}'s interchanged.
-*}
+\<close>
 
 lemma step1: "\<forall>i < size w.
   \<bar>(int(size[x\<leftarrow>take (i+1) w. P x])-int(size[x\<leftarrow>take (i+1) w. \<not>P x]))
    - (int(size[x\<leftarrow>take i w. P x])-int(size[x\<leftarrow>take i w. \<not>P x]))\<bar> \<le> 1"
 
-txt{*\noindent
+txt\<open>\noindent
 The lemma is a bit hard to read because of the coercion function
 @{text"int :: nat \<Rightarrow> int"}. It is required because @{term size} returns
 a natural number, but subtraction on type~@{typ nat} will do the wrong thing.
@@ -126,34 +126,34 @@
 The proof is by induction on @{term w}, with a trivial base case, and a not
 so trivial induction step. Since it is essentially just arithmetic, we do not
 discuss it.
-*}
+\<close>
 
 apply(induct_tac w)
 apply(auto simp add: abs_if take_Cons split: nat.split)
 done
 
-text{*
+text\<open>
 Finally we come to the above-mentioned lemma about cutting in half a word with two more elements of one sort than of the other sort:
-*}
+\<close>
 
 lemma part1:
  "size[x\<leftarrow>w. P x] = size[x\<leftarrow>w. \<not>P x]+2 \<Longrightarrow>
   \<exists>i\<le>size w. size[x\<leftarrow>take i w. P x] = size[x\<leftarrow>take i w. \<not>P x]+1"
 
-txt{*\noindent
+txt\<open>\noindent
 This is proved by @{text force} with the help of the intermediate value theorem,
 instantiated appropriately and with its first premise disposed of by lemma
 @{thm[source]step1}:
-*}
+\<close>
 
 apply(insert nat0_intermed_int_val[OF step1, of "P" "w" "1"])
 by force
 
-text{*\noindent
+text\<open>\noindent
 
 Lemma @{thm[source]part1} tells us only about the prefix @{term"take i w"}.
 An easy lemma deals with the suffix @{term"drop i w"}:
-*}
+\<close>
 
 
 lemma part2:
@@ -163,7 +163,7 @@
    \<Longrightarrow> size[x\<leftarrow>drop i w. P x] = size[x\<leftarrow>drop i w. \<not>P x]+1"
 by(simp del: append_take_drop_id)
 
-text{*\noindent
+text\<open>\noindent
 In the proof we have disabled the normally useful lemma
 \begin{isabelle}
 @{thm append_take_drop_id[no_vars]}
@@ -174,34 +174,34 @@
 
 To dispose of trivial cases automatically, the rules of the inductive
 definition are declared simplification rules:
-*}
+\<close>
 
 declare S_A_B.intros[simp]
 
-text{*\noindent
+text\<open>\noindent
 This could have been done earlier but was not necessary so far.
 
 The completeness theorem tells us that if a word has the same number of
 @{term a}'s and @{term b}'s, then it is in @{term S}, and similarly 
 for @{term A} and @{term B}:
-*}
+\<close>
 
 theorem completeness:
   "(size[x\<leftarrow>w. x=a] = size[x\<leftarrow>w. x=b]     \<longrightarrow> w \<in> S) \<and>
    (size[x\<leftarrow>w. x=a] = size[x\<leftarrow>w. x=b] + 1 \<longrightarrow> w \<in> A) \<and>
    (size[x\<leftarrow>w. x=b] = size[x\<leftarrow>w. x=a] + 1 \<longrightarrow> w \<in> B)"
 
-txt{*\noindent
+txt\<open>\noindent
 The proof is by induction on @{term w}. Structural induction would fail here
 because, as we can see from the grammar, we need to make bigger steps than
 merely appending a single letter at the front. Hence we induct on the length
 of @{term w}, using the induction rule @{thm[source]length_induct}:
-*}
+\<close>
 
 apply(induct_tac w rule: length_induct)
 apply(rename_tac w)
 
-txt{*\noindent
+txt\<open>\noindent
 The @{text rule} parameter tells @{text induct_tac} explicitly which induction
 rule to use. For details see \S\ref{sec:complete-ind} below.
 In this case the result is that we may assume the lemma already
@@ -210,13 +210,13 @@
 
 The proof continues with a case distinction on @{term w},
 on whether @{term w} is empty or not.
-*}
+\<close>
 
 apply(case_tac w)
  apply(simp_all)
 (*<*)apply(rename_tac x v)(*>*)
 
-txt{*\noindent
+txt\<open>\noindent
 Simplification disposes of the base case and leaves only a conjunction
 of two step cases to be proved:
 if @{prop"w = a#v"} and @{prop[display]"size[x\<in>v. x=a] = size[x\<in>v. x=b]+2"} then
@@ -226,49 +226,49 @@
 After breaking the conjunction up into two cases, we can apply
 @{thm[source]part1} to the assumption that @{term w} contains two more @{term
 a}'s than @{term b}'s.
-*}
+\<close>
 
 apply(rule conjI)
  apply(clarify)
  apply(frule part1[of "\<lambda>x. x=a", simplified])
  apply(clarify)
-txt{*\noindent
+txt\<open>\noindent
 This yields an index @{prop"i \<le> length v"} such that
 @{prop[display]"length [x\<leftarrow>take i v . x = a] = length [x\<leftarrow>take i v . x = b] + 1"}
 With the help of @{thm[source]part2} it follows that
 @{prop[display]"length [x\<leftarrow>drop i v . x = a] = length [x\<leftarrow>drop i v . x = b] + 1"}
-*}
+\<close>
 
  apply(drule part2[of "\<lambda>x. x=a", simplified])
   apply(assumption)
 
-txt{*\noindent
+txt\<open>\noindent
 Now it is time to decompose @{term v} in the conclusion @{prop"b#v \<in> A"}
 into @{term"take i v @ drop i v"},
-*}
+\<close>
 
  apply(rule_tac n1=i and t=v in subst[OF append_take_drop_id])
 
-txt{*\noindent
+txt\<open>\noindent
 (the variables @{term n1} and @{term t} are the result of composing the
 theorems @{thm[source]subst} and @{thm[source]append_take_drop_id})
 after which the appropriate rule of the grammar reduces the goal
 to the two subgoals @{prop"take i v \<in> A"} and @{prop"drop i v \<in> A"}:
-*}
+\<close>
 
  apply(rule S_A_B.intros)
 
-txt{*
+txt\<open>
 Both subgoals follow from the induction hypothesis because both @{term"take i
 v"} and @{term"drop i v"} are shorter than @{term w}:
-*}
+\<close>
 
   apply(force simp add: min_less_iff_disj)
  apply(force split: nat_diff_split)
 
-txt{*
+txt\<open>
 The case @{prop"w = b#v"} is proved analogously:
-*}
+\<close>
 
 apply(clarify)
 apply(frule part1[of "\<lambda>x. x=b", simplified])
@@ -280,7 +280,7 @@
  apply(force simp add: min_less_iff_disj)
 by(force simp add: min_less_iff_disj split: nat_diff_split)
 
-text{*
+text\<open>
 We conclude this section with a comparison of our proof with 
 Hopcroft\index{Hopcroft, J. E.} and Ullman's\index{Ullman, J. D.}
 @{cite \<open>p.\ts81\<close> HopcroftUllman}.
@@ -304,6 +304,6 @@
 cases.  Such errors are found in many pen-and-paper proofs when they
 are scrutinized formally.%
 \index{grammars!defining inductively|)}
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Inductive/Advanced.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/Advanced.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 ML_file "../../antiquote_setup.ML"
 (*>*)
 
-text {*
+text \<open>
 The premises of introduction rules may contain universal quantifiers and
 monotone functions.  A universal quantifier lets the rule 
 refer to any number of instances of 
@@ -10,11 +10,11 @@
 to existing constructions (such as ``list of'') over the inductively defined
 set.  The examples below show how to use the additional expressiveness
 and how to reason from the resulting definitions.
-*}
+\<close>
 
-subsection{* Universal Quantifiers in Introduction Rules \label{sec:gterm-datatype} *}
+subsection\<open>Universal Quantifiers in Introduction Rules \label{sec:gterm-datatype}\<close>
 
-text {*
+text \<open>
 \index{ground terms example|(}%
 \index{quantifiers!and inductive definitions|(}%
 As a running example, this section develops the theory of \textbf{ground
@@ -23,19 +23,19 @@
 constant as a function applied to the null argument  list.  Let us declare a
 datatype @{text gterm} for the type of ground  terms. It is a type constructor
 whose argument is a type of  function symbols. 
-*}
+\<close>
 
 datatype 'f gterm = Apply 'f "'f gterm list"
 
-text {*
+text \<open>
 To try it out, we declare a datatype of some integer operations: 
 integer constants, the unary minus operator and the addition 
 operator.
-*}
+\<close>
 
 datatype integer_op = Number int | UnaryMinus | Plus
 
-text {*
+text \<open>
 Now the type @{typ "integer_op gterm"} denotes the ground 
 terms built over those symbols.
 
@@ -56,7 +56,7 @@
 to our inductively defined set: is a ground term 
 over~@{text F}.  The function @{term set} denotes the set of elements in a given 
 list. 
-*}
+\<close>
 
 inductive_set
   gterms :: "'f set \<Rightarrow> 'f gterm set"
@@ -65,11 +65,11 @@
 step[intro!]: "\<lbrakk>\<forall>t \<in> set args. t \<in> gterms F;  f \<in> F\<rbrakk>
                \<Longrightarrow> (Apply f args) \<in> gterms F"
 
-text {*
+text \<open>
 To demonstrate a proof from this definition, let us 
 show that the function @{term gterms}
 is \textbf{monotone}.  We shall need this concept shortly.
-*}
+\<close>
 
 lemma gterms_mono: "F\<subseteq>G \<Longrightarrow> gterms F \<subseteq> gterms G"
 apply clarify
@@ -81,7 +81,7 @@
 apply clarify
 apply (erule gterms.induct)
 (*>*)
-txt{*
+txt\<open>
 Intuitively, this theorem says that
 enlarging the set of function symbols enlarges the set of ground 
 terms. The proof is a trivial rule induction.
@@ -92,9 +92,9 @@
 The assumptions state that @{text f} belongs 
 to~@{text F}, which is included in~@{text G}, and that every element of the list @{text args} is
 a ground term over~@{text G}.  The @{text blast} method finds this chain of reasoning easily.  
-*}
+\<close>
 (*<*)oops(*>*)
-text {*
+text \<open>
 \begin{warn}
 Why do we call this function @{text gterms} instead 
 of @{text gterm}?  A constant may have the same name as a type.  However,
@@ -113,7 +113,7 @@
 terms and a function  symbol~@{text f}. If the length of the list matches the
 function's arity  then applying @{text f} to @{text args} yields a well-formed
 term.
-*}
+\<close>
 
 inductive_set
   well_formed_gterm :: "('f \<Rightarrow> nat) \<Rightarrow> 'f gterm set"
@@ -123,16 +123,16 @@
                 length args = arity f\<rbrakk>
                \<Longrightarrow> (Apply f args) \<in> well_formed_gterm arity"
 
-text {*
+text \<open>
 The inductive definition neatly captures the reasoning above.
 The universal quantification over the
 @{text set} of arguments expresses that all of them are well-formed.%
 \index{quantifiers!and inductive definitions|)}
-*}
+\<close>
 
-subsection{* Alternative Definition Using a Monotone Function *}
+subsection\<open>Alternative Definition Using a Monotone Function\<close>
 
-text {*
+text \<open>
 \index{monotone functions!and inductive definitions|(}% 
 An inductive definition may refer to the
 inductively defined  set through an arbitrary monotone function.  To
@@ -148,7 +148,7 @@
 introduction rule.  The first premise states that @{text args} belongs to
 the @{text lists} of well-formed terms.  This formulation is more
 direct, if more obscure, than using a universal quantifier.
-*}
+\<close>
 
 inductive_set
   well_formed_gterm' :: "('f \<Rightarrow> nat) \<Rightarrow> 'f gterm set"
@@ -159,7 +159,7 @@
                \<Longrightarrow> (Apply f args) \<in> well_formed_gterm' arity"
 monos lists_mono
 
-text {*
+text \<open>
 We cite the theorem @{text lists_mono} to justify 
 using the function @{term lists}.%
 \footnote{This particular theorem is installed by default already, but we
@@ -194,15 +194,15 @@
 Further lists of well-formed
 terms become available and none are taken away.%
 \index{monotone functions!and inductive definitions|)} 
-*}
+\<close>
 
-subsection{* A Proof of Equivalence *}
+subsection\<open>A Proof of Equivalence\<close>
 
-text {*
+text \<open>
 We naturally hope that these two inductive definitions of ``well-formed'' 
 coincide.  The equality can be proved by separate inclusions in 
 each direction.  Each is a trivial rule induction. 
-*}
+\<close>
 
 lemma "well_formed_gterm arity \<subseteq> well_formed_gterm' arity"
 apply clarify
@@ -214,7 +214,7 @@
 apply clarify
 apply (erule well_formed_gterm.induct)
 (*>*)
-txt {*
+txt \<open>
 The @{text clarify} method gives
 us an element of @{term "well_formed_gterm arity"} on which to perform 
 induction.  The resulting subgoal can be proved automatically:
@@ -222,7 +222,7 @@
 This proof resembles the one given in
 {\S}\ref{sec:gterm-datatype} above, especially in the form of the
 induction hypothesis.  Next, we consider the opposite inclusion:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma "well_formed_gterm' arity \<subseteq> well_formed_gterm arity"
 apply clarify
@@ -234,7 +234,7 @@
 apply clarify
 apply (erule well_formed_gterm'.induct)
 (*>*)
-txt {*
+txt \<open>
 The proof script is virtually identical,
 but the subgoal after applying induction may be surprising:
 @{subgoals[display,indent=0,margin=65]}
@@ -257,13 +257,13 @@
 distribute over intersection.  Monotonicity implies one direction of
 this set equality; we have this theorem:
 @{named_thms [display,indent=0] mono_Int [no_vars] (mono_Int)}
-*}
+\<close>
 (*<*)oops(*>*)
 
 
-subsection{* Another Example of Rule Inversion *}
+subsection\<open>Another Example of Rule Inversion\<close>
 
-text {*
+text \<open>
 \index{rule inversion|(}%
 Does @{term gterms} distribute over intersection?  We have proved that this
 function is monotone, so @{text mono_Int} gives one of the inclusions.  The
@@ -271,20 +271,20 @@
 sets
 @{term F} and~@{term G} then it is also a ground term over their intersection,
 @{term "F \<inter> G"}.
-*}
+\<close>
 
 lemma gterms_IntI:
      "t \<in> gterms F \<Longrightarrow> t \<in> gterms G \<longrightarrow> t \<in> gterms (F\<inter>G)"
 (*<*)oops(*>*)
-text {*
+text \<open>
 Attempting this proof, we get the assumption 
 @{term "Apply f args \<in> gterms G"}, which cannot be broken down. 
 It looks like a job for rule inversion:\cmmdx{inductive\protect\_cases}
-*}
+\<close>
 
 inductive_cases gterm_Apply_elim [elim!]: "Apply f args \<in> gterms F"
 
-text {*
+text \<open>
 Here is the result.
 @{named_thms [display,indent=0,margin=50] gterm_Apply_elim [no_vars] (gterm_Apply_elim)}
 This rule replaces an assumption about @{term "Apply f args"} by 
@@ -295,7 +295,7 @@
 have given the @{text "elim!"} attribute. 
 
 Now we can prove the other half of that distributive law.
-*}
+\<close>
 
 lemma gterms_IntI [rule_format, intro!]:
      "t \<in> gterms F \<Longrightarrow> t \<in> gterms G \<longrightarrow> t \<in> gterms (F\<inter>G)"
@@ -306,7 +306,7 @@
 lemma "t \<in> gterms F \<Longrightarrow> t \<in> gterms G \<longrightarrow> t \<in> gterms (F\<inter>G)"
 apply (erule gterms.induct)
 (*>*)
-txt {*
+txt \<open>
 The proof begins with rule induction over the definition of
 @{term gterms}, which leaves a single subgoal:  
 @{subgoals[display,indent=0,margin=65]}
@@ -320,13 +320,13 @@
 
 \smallskip
 Our distributive law is a trivial consequence of previously-proved results:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma gterms_Int_eq [simp]:
      "gterms (F \<inter> G) = gterms F \<inter> gterms G"
 by (blast intro!: mono_Int monoI gterms_mono)
 
-text_raw {*
+text_raw \<open>
 \index{rule inversion|)}%
 \index{ground terms example|)}
 
@@ -339,7 +339,7 @@
 list of argument types paired with the result type. 
 Complete this inductive definition:
 \begin{isabelle}
-*}
+\<close>
 
 inductive_set
   well_typed_gterm :: "('f \<Rightarrow> 't list * 't) \<Rightarrow> ('f gterm * 't)set"
@@ -352,15 +352,15 @@
      \<Longrightarrow> (Apply f (map fst args), rtype) 
          \<in> well_typed_gterm sig"
 (*>*)
-text_raw {*
+text_raw \<open>
 \end{isabelle}
 \end{exercise}
 \end{isamarkuptext}
-*}
+\<close>
 
 (*<*)
 
-text{*the following declaration isn't actually used*}
+text\<open>the following declaration isn't actually used\<close>
 primrec
   integer_arity :: "integer_op \<Rightarrow> nat"
 where
@@ -368,7 +368,7 @@
 | "integer_arity UnaryMinus        = 1"
 | "integer_arity Plus              = 2"
 
-text{* the rest isn't used: too complicated.  OK for an exercise though.*}
+text\<open>the rest isn't used: too complicated.  OK for an exercise though.\<close>
 
 inductive_set
   integer_signature :: "(integer_op * (unit list * unit)) set"
--- a/src/Doc/Tutorial/Inductive/Even.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/Even.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,9 +2,9 @@
 ML_file "../../antiquote_setup.ML" 
 (*>*)
 
-section{* The Set of Even Numbers *}
+section\<open>The Set of Even Numbers\<close>
 
-text {*
+text \<open>
 \index{even numbers!defining inductively|(}%
 The set of even numbers can be inductively defined as the least set
 containing 0 and closed under the operation $+2$.  Obviously,
@@ -12,20 +12,20 @@
 We shall prove below that the two formulations coincide.  On the way we
 shall examine the primary means of reasoning about inductively defined
 sets: rule induction.
-*}
+\<close>
 
-subsection{* Making an Inductive Definition *}
+subsection\<open>Making an Inductive Definition\<close>
 
-text {*
+text \<open>
 Using \commdx{inductive\protect\_set}, we declare the constant @{text even} to be
 a set of natural numbers with the desired properties.
-*}
+\<close>
 
 inductive_set even :: "nat set" where
 zero[intro!]: "0 \<in> even" |
 step[intro!]: "n \<in> even \<Longrightarrow> (Suc (Suc n)) \<in> even"
 
-text {*
+text \<open>
 An inductive definition consists of introduction rules.  The first one
 above states that 0 is even; the second states that if $n$ is even, then so
 is~$n+2$.  Given this declaration, Isabelle generates a fixed point
@@ -44,16 +44,16 @@
 apply them aggressively. Obviously, regarding 0 as even is safe.  The
 @{text step} rule is also safe because $n+2$ is even if and only if $n$ is
 even.  We prove this equivalence later.
-*}
+\<close>
 
-subsection{*Using Introduction Rules*}
+subsection\<open>Using Introduction Rules\<close>
 
-text {*
+text \<open>
 Our first lemma states that numbers of the form $2\times k$ are even.
 Introduction rules are used to show that specific values belong to the
 inductive set.  Such proofs typically involve 
 induction, perhaps over some other inductive set.
-*}
+\<close>
 
 lemma two_times_even[intro!]: "2*k \<in> even"
 apply (induct_tac k)
@@ -63,7 +63,7 @@
 lemma "2*k \<in> even"
 apply (induct_tac k)
 (*>*)
-txt {*
+txt \<open>
 \noindent
 The first step is induction on the natural number @{text k}, which leaves
 two subgoals:
@@ -75,14 +75,14 @@
 definition of @{text even} (using the divides relation) and our inductive
 definition.  One direction of this equivalence is immediate by the lemma
 just proved, whose @{text "intro!"} attribute ensures it is applied automatically.
-*}
+\<close>
 (*<*)oops(*>*)
 lemma dvd_imp_even: "2 dvd n \<Longrightarrow> n \<in> even"
 by (auto simp add: dvd_def)
 
-subsection{* Rule Induction \label{sec:rule-induction} *}
+subsection\<open>Rule Induction \label{sec:rule-induction}\<close>
 
-text {*
+text \<open>
 \index{rule induction|(}%
 From the definition of the set
 @{term even}, Isabelle has
@@ -102,56 +102,56 @@
 Induction is the usual way of proving a property of the elements of an
 inductively defined set.  Let us prove that all members of the set
 @{term even} are multiples of two.
-*}
+\<close>
 
 lemma even_imp_dvd: "n \<in> even \<Longrightarrow> 2 dvd n"
-txt {*
+txt \<open>
 We begin by applying induction.  Note that @{text even.induct} has the form
 of an elimination rule, so we use the method @{text erule}.  We get two
 subgoals:
-*}
+\<close>
 apply (erule even.induct)
-txt {*
+txt \<open>
 @{subgoals[display,indent=0]}
 We unfold the definition of @{text dvd} in both subgoals, proving the first
 one and simplifying the second:
-*}
+\<close>
 apply (simp_all add: dvd_def)
-txt {*
+txt \<open>
 @{subgoals[display,indent=0]}
 The next command eliminates the existential quantifier from the assumption
 and replaces @{text n} by @{text "2 * k"}.
-*}
+\<close>
 apply clarify
-txt {*
+txt \<open>
 @{subgoals[display,indent=0]}
 To conclude, we tell Isabelle that the desired value is
 @{term "Suc k"}.  With this hint, the subgoal falls to @{text simp}.
-*}
+\<close>
 apply (rule_tac x = "Suc k" in exI, simp)
 (*<*)done(*>*)
 
-text {*
+text \<open>
 Combining the previous two results yields our objective, the
 equivalence relating @{term even} and @{text dvd}. 
 %
 %we don't want [iff]: discuss?
-*}
+\<close>
 
 theorem even_iff_dvd: "(n \<in> even) = (2 dvd n)"
 by (blast intro: dvd_imp_even even_imp_dvd)
 
 
-subsection{* Generalization and Rule Induction \label{sec:gen-rule-induction} *}
+subsection\<open>Generalization and Rule Induction \label{sec:gen-rule-induction}\<close>
 
-text {*
+text \<open>
 \index{generalizing for induction}%
 Before applying induction, we typically must generalize
 the induction formula.  With rule induction, the required generalization
 can be hard to find and sometimes requires a complete reformulation of the
 problem.  In this  example, our first attempt uses the obvious statement of
 the result.  It fails:
-*}
+\<close>
 
 lemma "Suc (Suc n) \<in> even \<Longrightarrow> n \<in> even"
 apply (erule even.induct)
@@ -160,7 +160,7 @@
 lemma "Suc (Suc n) \<in> even \<Longrightarrow> n \<in> even"
 apply (erule even.induct)
 (*>*)
-txt {*
+txt \<open>
 Rule induction finds no occurrences of @{term "Suc(Suc n)"} in the
 conclusion, which it therefore leaves unchanged.  (Look at
 @{text even.induct} to see why this happens.)  We have these subgoals:
@@ -171,7 +171,7 @@
 in general is described in {\S}\ref{sec:ind-var-in-prems} below.
 In the current case the solution is easy because
 we have the necessary inverse, subtraction:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma even_imp_even_minus_2: "n \<in> even \<Longrightarrow> n - 2 \<in> even"
 apply (erule even.induct)
@@ -181,7 +181,7 @@
 lemma "n \<in>  even \<Longrightarrow> n - 2 \<in> even"
 apply (erule even.induct)
 (*>*)
-txt {*
+txt \<open>
 This lemma is trivially inductive.  Here are the subgoals:
 @{subgoals[display,indent=0]}
 The first is trivial because @{text "0 - 2"} simplifies to @{text 0}, which is
@@ -191,24 +191,24 @@
 
 \medskip
 Using our lemma, we can easily prove the result we originally wanted:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma Suc_Suc_even_imp_even: "Suc (Suc n) \<in> even \<Longrightarrow> n \<in> even"
 by (drule even_imp_even_minus_2, simp)
 
-text {*
+text \<open>
 We have just proved the converse of the introduction rule @{text even.step}.
 This suggests proving the following equivalence.  We give it the
 \attrdx{iff} attribute because of its obvious value for simplification.
-*}
+\<close>
 
 lemma [iff]: "((Suc (Suc n)) \<in> even) = (n \<in> even)"
 by (blast dest: Suc_Suc_even_imp_even)
 
 
-subsection{* Rule Inversion \label{sec:rule-inversion} *}
+subsection\<open>Rule Inversion \label{sec:rule-inversion}\<close>
 
-text {*
+text \<open>
 \index{rule inversion|(}%
 Case analysis on an inductive definition is called \textbf{rule
 inversion}.  It is frequently used in proofs about operational
@@ -232,11 +232,11 @@
 @{term "Suc(Suc n)"} then the first case becomes irrelevant, while the second
 case tells us that @{term n} belongs to @{term even}.  Isabelle will generate
 this instance for us:
-*}
+\<close>
 
 inductive_cases Suc_Suc_cases [elim!]: "Suc(Suc n) \<in> even"
 
-text {*
+text \<open>
 The \commdx{inductive\protect\_cases} command generates an instance of
 the @{text cases} rule for the supplied pattern and gives it the supplied name:
 @{named_thms [display,indent=0] Suc_Suc_cases [no_vars] (Suc_Suc_cases)}
@@ -265,13 +265,13 @@
 
 For one-off applications of rule inversion, use the \methdx{ind_cases} method. 
 Here is an example:
-*}
+\<close>
 
 (*<*)lemma "Suc(Suc n) \<in> even \<Longrightarrow> P"(*>*)
 apply (ind_cases "Suc(Suc n) \<in> even")
 (*<*)oops(*>*)
 
-text {*
+text \<open>
 The specified instance of the @{text cases} rule is generated, then applied
 as an elimination rule.
 
@@ -285,6 +285,6 @@
 used.  Later examples will show that they are actually worth using.%
 \index{rule inversion|)}%
 \index{even numbers!defining inductively|)}
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Inductive/Mutual.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/Mutual.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,12 +1,12 @@
 (*<*)theory Mutual imports Main begin(*>*)
 
-subsection{*Mutually Inductive Definitions*}
+subsection\<open>Mutually Inductive Definitions\<close>
 
-text{*
+text\<open>
 Just as there are datatypes defined by mutual recursion, there are sets defined
 by mutual induction. As a trivial example we consider the even and odd
 natural numbers:
-*}
+\<close>
 
 inductive_set
   Even :: "nat set" and
@@ -16,7 +16,7 @@
 | EvenI: "n \<in> Odd \<Longrightarrow> Suc n \<in> Even"
 | OddI:  "n \<in> Even \<Longrightarrow> Suc n \<in> Odd"
 
-text{*\noindent
+text\<open>\noindent
 The mutually inductive definition of multiple sets is no different from
 that of a single set, except for induction: just as for mutually recursive
 datatypes, induction needs to involve all the simultaneously defined sets. In
@@ -26,25 +26,25 @@
 
 If we want to prove that all even numbers are divisible by two, we have to
 generalize the statement as follows:
-*}
+\<close>
 
 lemma "(m \<in> Even \<longrightarrow> 2 dvd m) \<and> (n \<in> Odd \<longrightarrow> 2 dvd (Suc n))"
 
-txt{*\noindent
+txt\<open>\noindent
 The proof is by rule induction. Because of the form of the induction theorem,
 it is applied by @{text rule} rather than @{text erule} as for ordinary
 inductive definitions:
-*}
+\<close>
 
 apply(rule Even_Odd.induct)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0]}
 The first two subgoals are proved by simplification and the final one can be
 proved in the same manner as in \S\ref{sec:rule-induction}
 where the same subgoal was encountered before.
 We do not show the proof script.
-*}
+\<close>
 (*<*)
   apply simp
  apply simp
@@ -55,17 +55,17 @@
 done
 (*>*)
 
-subsection{*Inductively Defined Predicates\label{sec:ind-predicates}*}
+subsection\<open>Inductively Defined Predicates\label{sec:ind-predicates}\<close>
 
-text{*\index{inductive predicates|(}
+text\<open>\index{inductive predicates|(}
 Instead of a set of even numbers one can also define a predicate on @{typ nat}:
-*}
+\<close>
 
 inductive evn :: "nat \<Rightarrow> bool" where
 zero: "evn 0" |
 step: "evn n \<Longrightarrow> evn(Suc(Suc n))"
 
-text{*\noindent Everything works as before, except that
+text\<open>\noindent Everything works as before, except that
 you write \commdx{inductive} instead of \isacommand{inductive\_set} and
 @{prop"evn n"} instead of @{prop"n : Even"}.
 When defining an n-ary relation as a predicate, it is recommended to curry
@@ -75,6 +75,6 @@
 
 When should you choose sets and when predicates? If you intend to combine your notion with set theoretic notation, define it as an inductive set. If not, define it as an inductive predicate, thus avoiding the @{text"\<in>"} notation. But note that predicates of more than one argument cannot be combined with the usual set theoretic operators: @{term"P \<union> Q"} is not well-typed if @{text"P, Q :: \<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> bool"}, you have to write @{term"%x y. P x y & Q x y"} instead.
 \index{inductive predicates|)}
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Inductive/Star.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/Star.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,8 +1,8 @@
 (*<*)theory Star imports Main begin(*>*)
 
-section{*The Reflexive Transitive Closure*}
+section\<open>The Reflexive Transitive Closure\<close>
 
-text{*\label{sec:rtc}
+text\<open>\label{sec:rtc}
 \index{reflexive transitive closure!defining inductively|(}%
 An inductive definition may accept parameters, so it can express 
 functions that yield sets.
@@ -12,7 +12,7 @@
 introduced in \S\ref{sec:Relations}, where the operator @{text"\<^sup>*"} was
 defined as a least fixed point because inductive definitions were not yet
 available. But now they are:
-*}
+\<close>
 
 inductive_set
   rtc :: "('a \<times> 'a)set \<Rightarrow> ('a \<times> 'a)set"   ("_*" [1000] 999)
@@ -21,7 +21,7 @@
   rtc_refl[iff]:  "(x,x) \<in> r*"
 | rtc_step:       "\<lbrakk> (x,y) \<in> r; (y,z) \<in> r* \<rbrakk> \<Longrightarrow> (x,z) \<in> r*"
 
-text{*\noindent
+text\<open>\noindent
 The function @{term rtc} is annotated with concrete syntax: instead of
 @{text"rtc r"} we can write @{term"r*"}. The actual definition
 consists of two rules. Reflexivity is obvious and is immediately given the
@@ -36,12 +36,12 @@
 for a start, it does not even mention transitivity.
 The rest of this section is devoted to proving that it is equivalent to
 the standard definition. We start with a simple lemma:
-*}
+\<close>
 
 lemma [intro]: "(x,y) \<in> r \<Longrightarrow> (x,y) \<in> r*"
 by(blast intro: rtc_step)
 
-text{*\noindent
+text\<open>\noindent
 Although the lemma itself is an unremarkable consequence of the basic rules,
 it has the advantage that it can be declared an introduction rule without the
 danger of killing the automatic tactics because @{term"r*"} occurs only in
@@ -61,12 +61,12 @@
 expects a premise of the form $(x@1,\dots,x@n) \in R$.
 
 Now we turn to the inductive proof of transitivity:
-*}
+\<close>
 
 lemma rtc_trans: "\<lbrakk> (x,y) \<in> r*; (y,z) \<in> r* \<rbrakk> \<Longrightarrow> (x,z) \<in> r*"
 apply(erule rtc.induct)
 
-txt{*\noindent
+txt\<open>\noindent
 Unfortunately, even the base case is a problem:
 @{subgoals[display,indent=0,goals_limit=1]}
 We have to abandon this proof attempt.
@@ -85,12 +85,12 @@
 goal, of the pair @{term"(x,y)"} only @{term x} appears also in the
 conclusion, but not @{term y}. Thus our induction statement is too
 general. Fortunately, it can easily be specialized:
-transfer the additional premise @{prop"(y,z):r*"} into the conclusion:*}
+transfer the additional premise @{prop"(y,z):r*"} into the conclusion:\<close>
 (*<*)oops(*>*)
 lemma rtc_trans[rule_format]:
   "(x,y) \<in> r* \<Longrightarrow> (y,z) \<in> r* \<longrightarrow> (x,z) \<in> r*"
 
-txt{*\noindent
+txt\<open>\noindent
 This is not an obscure trick but a generally applicable heuristic:
 \begin{quote}\em
 When proving a statement by rule induction on $(x@1,\dots,x@n) \in R$,
@@ -101,24 +101,24 @@
 \S\ref{sec:ind-var-in-prems}. The @{text rule_format} directive turns
 @{text"\<longrightarrow>"} back into @{text"\<Longrightarrow>"}: in the end we obtain the original
 statement of our lemma.
-*}
+\<close>
 
 apply(erule rtc.induct)
 
-txt{*\noindent
+txt\<open>\noindent
 Now induction produces two subgoals which are both proved automatically:
 @{subgoals[display,indent=0]}
-*}
+\<close>
 
  apply(blast)
 apply(blast intro: rtc_step)
 done
 
-text{*
+text\<open>
 Let us now prove that @{term"r*"} is really the reflexive transitive closure
 of @{term r}, i.e.\ the least reflexive and transitive
 relation containing @{term r}. The latter is easily formalized
-*}
+\<close>
 
 inductive_set
   rtc2 :: "('a \<times> 'a)set \<Rightarrow> ('a \<times> 'a)set"
@@ -128,10 +128,10 @@
 | "(x,x) \<in> rtc2 r"
 | "\<lbrakk> (x,y) \<in> rtc2 r; (y,z) \<in> rtc2 r \<rbrakk> \<Longrightarrow> (x,z) \<in> rtc2 r"
 
-text{*\noindent
+text\<open>\noindent
 and the equivalence of the two definitions is easily shown by the obvious rule
 inductions:
-*}
+\<close>
 
 lemma "(x,y) \<in> rtc2 r \<Longrightarrow> (x,y) \<in> r*"
 apply(erule rtc2.induct)
@@ -146,7 +146,7 @@
 apply(blast intro: rtc2.intros)
 done
 
-text{*
+text\<open>
 So why did we start with the first definition? Because it is simpler. It
 contains only two rules, and the single step rule is simpler than
 transitivity.  As a consequence, @{thm[source]rtc.induct} is simpler than
@@ -164,7 +164,7 @@
 @{term rtc} where @{thm[source]rtc_step} is replaced by its converse as shown
 in exercise~\ref{ex:converse-rtc-step}.
 \end{exercise}
-*}
+\<close>
 (*<*)
 lemma rtc_step2[rule_format]: "(x,y) : r* \<Longrightarrow> (y,z) : r --> (x,z) : r*"
 apply(erule rtc.induct)
--- a/src/Doc/Tutorial/Misc/AdvancedInd.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/AdvancedInd.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,29 +2,29 @@
 theory AdvancedInd imports Main begin
 (*>*)
 
-text{*\noindent
+text\<open>\noindent
 Now that we have learned about rules and logic, we take another look at the
 finer points of induction.  We consider two questions: what to do if the
 proposition to be proved is not directly amenable to induction
 (\S\ref{sec:ind-var-in-prems}), and how to utilize (\S\ref{sec:complete-ind})
 and even derive (\S\ref{sec:derive-ind}) new induction schemas. We conclude
 with an extended example of induction (\S\ref{sec:CTL-revisited}).
-*}
+\<close>
 
-subsection{*Massaging the Proposition*}
+subsection\<open>Massaging the Proposition\<close>
 
-text{*\label{sec:ind-var-in-prems}
+text\<open>\label{sec:ind-var-in-prems}
 Often we have assumed that the theorem to be proved is already in a form
 that is amenable to induction, but sometimes it isn't.
 Here is an example.
 Since @{term"hd"} and @{term"last"} return the first and last element of a
 non-empty list, this lemma looks easy to prove:
-*}
+\<close>
 
 lemma "xs \<noteq> [] \<Longrightarrow> hd(rev xs) = last xs"
 apply(induct_tac xs)
 
-txt{*\noindent
+txt\<open>\noindent
 But induction produces the warning
 \begin{quote}\tt
 Induction variable occurs also among premises!
@@ -51,14 +51,14 @@
 implication~(@{text"\<longrightarrow>"}), letting
 \attrdx{rule_format} (\S\ref{sec:forward}) convert the
 result to the usual @{text"\<Longrightarrow>"} form:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma hd_rev [rule_format]: "xs \<noteq> [] \<longrightarrow> hd(rev xs) = last xs"
 (*<*)
 apply(induct_tac xs)
 (*>*)
 
-txt{*\noindent
+txt\<open>\noindent
 This time, induction leaves us with a trivial base case:
 @{subgoals[display,indent=0,goals_limit=1]}
 And @{text"auto"} completes the proof.
@@ -109,12 +109,12 @@
 Unfortunately, this induction schema cannot be expressed as a
 single theorem because it depends on the number of free variables in $t$ ---
 the notation $\overline{y}$ is merely an informal device.
-*}
+\<close>
 (*<*)by auto(*>*)
 
-subsection{*Beyond Structural and Recursion Induction*}
+subsection\<open>Beyond Structural and Recursion Induction\<close>
 
-text{*\label{sec:complete-ind}
+text\<open>\label{sec:complete-ind}
 So far, inductive proofs were by structural induction for
 primitive recursive functions and recursion induction for total recursive
 functions. But sometimes structural induction is awkward and there is no
@@ -130,12 +130,12 @@
 @{thm[display]"nat_less_induct"[no_vars]}
 As an application, we prove a property of the following
 function:
-*}
+\<close>
 
 axiomatization f :: "nat \<Rightarrow> nat"
   where f_ax: "f(f(n)) < f(Suc(n))" for n :: nat
 
-text{*
+text\<open>
 \begin{warn}
 We discourage the use of axioms because of the danger of
 inconsistencies.  Axiom @{text f_ax} does
@@ -148,35 +148,35 @@
 The axiom for @{term"f"} implies @{prop"n <= f n"}, which can
 be proved by induction on \mbox{@{term"f n"}}. Following the recipe outlined
 above, we have to phrase the proposition as follows to allow induction:
-*}
+\<close>
 
 lemma f_incr_lem: "\<forall>i. k = f i \<longrightarrow> i \<le> f i"
 
-txt{*\noindent
+txt\<open>\noindent
 To perform induction on @{term k} using @{thm[source]nat_less_induct}, we use
 the same general induction method as for recursion induction (see
 \S\ref{sec:fun-induction}):
-*}
+\<close>
 
 apply(induct_tac k rule: nat_less_induct)
 
-txt{*\noindent
+txt\<open>\noindent
 We get the following proof state:
 @{subgoals[display,indent=0,margin=65]}
 After stripping the @{text"\<forall>i"}, the proof continues with a case
 distinction on @{term"i"}. The case @{prop"i = (0::nat)"} is trivial and we focus on
 the other case:
-*}
+\<close>
 
 apply(rule allI)
 apply(case_tac i)
  apply(simp)
-txt{*
+txt\<open>
 @{subgoals[display,indent=0]}
-*}
+\<close>
 by(blast intro!: f_ax Suc_leI intro: le_less_trans)
 
-text{*\noindent
+text\<open>\noindent
 If you find the last step puzzling, here are the two lemmas it employs:
 \begin{isabelle}
 @{thm Suc_leI[no_vars]}
@@ -203,19 +203,19 @@
 proofs are easy to write but hard to read and understand.
 
 The desired result, @{prop"i <= f i"}, follows from @{thm[source]f_incr_lem}:
-*}
+\<close>
 
 lemmas f_incr = f_incr_lem[rule_format, OF refl]
 
-text{*\noindent
+text\<open>\noindent
 The final @{thm[source]refl} gets rid of the premise @{text"?k = f ?i"}. 
 We could have included this derivation in the original statement of the lemma:
-*}
+\<close>
 
 lemma f_incr[rule_format, OF refl]: "\<forall>i. k = f i \<longrightarrow> i \<le> f i"
 (*<*)oops(*>*)
 
-text{*
+text\<open>
 \begin{exercise}
 From the axiom and lemma for @{term"f"}, show that @{term"f"} is the
 identity function.
@@ -235,32 +235,32 @@
 which is a special case of @{thm[source]measure_induct}
 @{thm[display]measure_induct[no_vars]}
 where @{term f} may be any function into type @{typ nat}.
-*}
+\<close>
 
-subsection{*Derivation of New Induction Schemas*}
+subsection\<open>Derivation of New Induction Schemas\<close>
 
-text{*\label{sec:derive-ind}
+text\<open>\label{sec:derive-ind}
 \index{induction!deriving new schemas}%
 Induction schemas are ordinary theorems and you can derive new ones
 whenever you wish.  This section shows you how, using the example
 of @{thm[source]nat_less_induct}. Assume we only have structural induction
 available for @{typ"nat"} and want to derive complete induction.  We
 must generalize the statement as shown:
-*}
+\<close>
 
 lemma induct_lem: "(\<And>n::nat. \<forall>m<n. P m \<Longrightarrow> P n) \<Longrightarrow> \<forall>m<n. P m"
 apply(induct_tac n)
 
-txt{*\noindent
+txt\<open>\noindent
 The base case is vacuously true. For the induction step (@{prop"m <
 Suc n"}) we distinguish two cases: case @{prop"m < n"} is true by induction
 hypothesis and case @{prop"m = n"} follows from the assumption, again using
 the induction hypothesis:
-*}
+\<close>
  apply(blast)
 by(blast elim: less_SucE)
 
-text{*\noindent
+text\<open>\noindent
 The elimination rule @{thm[source]less_SucE} expresses the case distinction:
 @{thm[display]"less_SucE"[no_vars]}
 
@@ -270,16 +270,16 @@
 and remove the trivial condition @{prop"n < Suc n"}. Fortunately, this
 happens automatically when we add the lemma as a new premise to the
 desired goal:
-*}
+\<close>
 
 theorem nat_less_induct: "(\<And>n::nat. \<forall>m<n. P m \<Longrightarrow> P n) \<Longrightarrow> P n"
 by(insert induct_lem, blast)
 
-text{*
+text\<open>
 HOL already provides the mother of
 all inductions, well-founded induction (see \S\ref{sec:Well-founded}).  For
 example theorem @{thm[source]nat_less_induct} is
 a special case of @{thm[source]wf_induct} where @{term r} is @{text"<"} on
 @{typ nat}. The details can be found in theory \isa{Wellfounded_Recursion}.
-*}
+\<close>
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Misc/Itrev.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/Itrev.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -5,9 +5,9 @@
 declare [[names_unique = false]]
 (*>*)
 
-section{*Induction Heuristics*}
+section\<open>Induction Heuristics\<close>
 
-text{*\label{sec:InductionHeuristics}
+text\<open>\label{sec:InductionHeuristics}
 \index{induction heuristics|(}%
 The purpose of this section is to illustrate some simple heuristics for
 inductive proofs. The first one we have already mentioned in our initial
@@ -43,13 +43,13 @@
 @{text"@"} is linear in its first argument.  A linear time version of
 @{term"rev"} reqires an extra argument where the result is accumulated
 gradually, using only~@{text"#"}:
-*}
+\<close>
 
 primrec itrev :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where
 "itrev []     ys = ys" |
 "itrev (x#xs) ys = itrev xs (x#ys)"
 
-text{*\noindent
+text\<open>\noindent
 The behaviour of \cdx{itrev} is simple: it reverses
 its first argument by stacking its elements onto the second argument,
 and returning that second argument when the first one becomes
@@ -58,17 +58,17 @@
 
 Naturally, we would like to show that @{term"itrev"} does indeed reverse
 its first argument provided the second one is empty:
-*}
+\<close>
 
 lemma "itrev xs [] = rev xs"
 
-txt{*\noindent
+txt\<open>\noindent
 There is no choice as to the induction variable, and we immediately simplify:
-*}
+\<close>
 
 apply(induct_tac xs, simp_all)
 
-txt{*\noindent
+txt\<open>\noindent
 Unfortunately, this attempt does not prove
 the induction step:
 @{subgoals[display,indent=0,margin=70]}
@@ -80,11 +80,11 @@
 \end{quote}
 Of course one cannot do this na\"{\i}vely: @{term"itrev xs ys = rev xs"} is
 just not true.  The correct generalization is
-*}
+\<close>
 (*<*)oops(*>*)
 lemma "itrev xs ys = rev xs @ ys"
 (*<*)apply(induct_tac xs, simp_all)(*>*)
-txt{*\noindent
+txt\<open>\noindent
 If @{term"ys"} is replaced by @{term"[]"}, the right-hand side simplifies to
 @{term"rev xs"}, as required.
 
@@ -100,14 +100,14 @@
 the subgoal, but the induction hypothesis needs to be applied with
 @{term"a # ys"} instead of @{term"ys"}. Hence we prove the theorem
 for all @{term"ys"} instead of a fixed one:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma "\<forall>ys. itrev xs ys = rev xs @ ys"
 (*<*)
 by(induct_tac xs, simp_all)
 (*>*)
 
-text{*\noindent
+text\<open>\noindent
 This time induction on @{term"xs"} followed by simplification succeeds. This
 leads to another heuristic for generalization:
 \begin{quote}
@@ -139,7 +139,7 @@
 Additionally, you can read \S\ref{sec:advanced-ind}
 to learn about some advanced techniques for inductive proofs.%
 \index{induction heuristics|)}
-*}
+\<close>
 (*<*)
 declare [[names_unique = true]]
 end
--- a/src/Doc/Tutorial/Misc/Option2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/Option2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,14 +4,14 @@
 hide_type option
 (*>*)
 
-text{*\indexbold{*option (type)}\indexbold{*None (constant)}%
+text\<open>\indexbold{*option (type)}\indexbold{*None (constant)}%
 \indexbold{*Some (constant)}
 Our final datatype is very simple but still eminently useful:
-*}
+\<close>
 
 datatype 'a option = None | Some 'a
 
-text{*\noindent
+text\<open>\noindent
 Frequently one needs to add a distinguished element to some existing type.
 For example, type @{text"t option"} can model the result of a computation that
 may either terminate with an error (represented by @{const None}) or return
@@ -21,7 +21,7 @@
 customized constructors like @{term Error} and @{term Infinity},
 but it is often simpler to use @{text option}. For an application see
 \S\ref{sec:Trie}.
-*}
+\<close>
 (*<*)
 (*
 definition infplus :: "nat option \<Rightarrow> nat option \<Rightarrow> nat option" where
--- a/src/Doc/Tutorial/Misc/Plus.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/Plus.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,13 +2,13 @@
 theory Plus imports Main begin
 (*>*)
 
-text{*\noindent Define the following addition function *}
+text\<open>\noindent Define the following addition function\<close>
 
 primrec add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "add m 0 = m" |
 "add m (Suc n) = add (Suc m) n"
 
-text{*\noindent and prove*}
+text\<open>\noindent and prove\<close>
 (*<*)
 lemma [simp]: "!m. add m n = m+n"
 apply(induct_tac n)
--- a/src/Doc/Tutorial/Misc/Tree.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/Tree.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,9 +2,9 @@
 theory Tree imports Main begin
 (*>*)
 
-text{*\noindent
+text\<open>\noindent
 Define the datatype of \rmindex{binary trees}:
-*}
+\<close>
 
 datatype 'a tree = Tip | Node "'a tree" 'a "'a tree"(*<*)
 
@@ -12,10 +12,10 @@
 "mirror Tip = Tip" |
 "mirror (Node l x r) = Node (mirror r) x (mirror l)"(*>*)
 
-text{*\noindent
+text\<open>\noindent
 Define a function @{term"mirror"} that mirrors a binary tree
 by swapping subtrees recursively. Prove
-*}
+\<close>
 
 lemma mirror_mirror: "mirror(mirror t) = t"
 (*<*)
@@ -27,10 +27,10 @@
 "flatten (Node l x r) = flatten l @ [x] @ flatten r"
 (*>*)
 
-text{*\noindent
+text\<open>\noindent
 Define a function @{term"flatten"} that flattens a tree into a list
 by traversing it in infix order. Prove
-*}
+\<close>
 
 lemma "flatten(mirror t) = rev(flatten t)"
 (*<*)
--- a/src/Doc/Tutorial/Misc/Tree2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/Tree2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,17 +2,17 @@
 theory Tree2 imports Tree begin
 (*>*)
 
-text{*\noindent In Exercise~\ref{ex:Tree} we defined a function
+text\<open>\noindent In Exercise~\ref{ex:Tree} we defined a function
 @{term"flatten"} from trees to lists. The straightforward version of
 @{term"flatten"} is based on @{text"@"} and is thus, like @{term"rev"},
 quadratic. A linear time version of @{term"flatten"} again reqires an extra
-argument, the accumulator. Define *}
+argument, the accumulator. Define\<close>
 (*<*)primrec(*>*)flatten2 :: "'a tree \<Rightarrow> 'a list \<Rightarrow> 'a list"(*<*)where
 "flatten2 Tip xs = xs" |
 "flatten2 (Node l x r) xs = flatten2 l (x#(flatten2 r xs))"
 (*>*)
 
-text{*\noindent and prove*}
+text\<open>\noindent and prove\<close>
 (*<*)
 lemma [simp]: "!xs. flatten2 t xs = flatten t @ xs"
 apply(induct_tac t)
--- a/src/Doc/Tutorial/Misc/appendix.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/appendix.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 imports Main
 begin(*>*)
 
-text{*
+text\<open>
 \begin{table}[htbp]
 \begin{center}
 \begin{tabular}{lll}
@@ -28,6 +28,6 @@
 \label{tab:overloading}
 \end{center}
 \end{table}
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Misc/case_exprs.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/case_exprs.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory case_exprs imports Main begin
 (*>*)
 
-text{*
+text\<open>
 \subsection{Case Expressions}
 \label{sec:case-expressions}\index{*case expressions}%
 HOL also features \isa{case}-expressions for analyzing
@@ -50,20 +50,20 @@
 it works for any datatype.  In some cases, induction is overkill and a case
 distinction over all constructors of the datatype suffices.  This is performed
 by \methdx{case_tac}.  Here is a trivial example:
-*}
+\<close>
 
 lemma "(case xs of [] \<Rightarrow> [] | y#ys \<Rightarrow> xs) = xs"
 apply(case_tac xs)
 
-txt{*\noindent
+txt\<open>\noindent
 results in the proof state
 @{subgoals[display,indent=0,margin=65]}
 which is solved automatically:
-*}
+\<close>
 
 apply(auto)
 (*<*)done(*>*)
-text{*
+text\<open>
 Note that we do not need to give a lemma a name if we do not intend to refer
 to it explicitly in the future.
 Other basic laws about a datatype are applied automatically during
@@ -81,7 +81,7 @@
   the @{term xs} as a new free variable distinct from the bound
   @{term xs} in the goal.
 \end{warn}
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Misc/fakenat.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/fakenat.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,11 +2,11 @@
 theory fakenat imports Main begin
 (*>*)
 
-text{*\noindent
+text\<open>\noindent
 The type \tydx{nat} of natural
 numbers is predefined to have the constructors \cdx{0} and~\cdx{Suc}.
 It behaves approximately as if it were declared like this:
-*}
+\<close>
 
 datatype nat = zero ("0") | Suc nat
 (*<*)
--- a/src/Doc/Tutorial/Misc/natsum.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/natsum.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,26 +1,26 @@
 (*<*)
 theory natsum imports Main begin
 (*>*)
-text{*\noindent
+text\<open>\noindent
 In particular, there are @{text"case"}-expressions, for example
 @{term[display]"case n of 0 => 0 | Suc m => m"}
 primitive recursion, for example
-*}
+\<close>
 
 primrec sum :: "nat \<Rightarrow> nat" where
 "sum 0 = 0" |
 "sum (Suc n) = Suc n + sum n"
 
-text{*\noindent
+text\<open>\noindent
 and induction, for example
-*}
+\<close>
 
 lemma "sum n + sum n = n*(Suc n)"
 apply(induct_tac n)
 apply(auto)
 done
 
-text{*\newcommand{\mystar}{*%
+text\<open>\newcommand{\mystar}{*%
 }
 \index{arithmetic operations!for \protect\isa{nat}}%
 The arithmetic operations \isadxboldpos{+}{$HOL2arithfun},
@@ -73,40 +73,40 @@
 Both @{text auto} and @{text simp}
 (a method introduced below, \S\ref{sec:Simplification}) prove 
 simple arithmetic goals automatically:
-*}
+\<close>
 
 lemma "\<lbrakk> \<not> m < n; m < n + (1::nat) \<rbrakk> \<Longrightarrow> m = n"
 (*<*)by(auto)(*>*)
 
-text{*\noindent
+text\<open>\noindent
 For efficiency's sake, this built-in prover ignores quantified formulae,
 many logical connectives, and all arithmetic operations apart from addition.
 In consequence, @{text auto} and @{text simp} cannot prove this slightly more complex goal:
-*}
+\<close>
 
 lemma "m \<noteq> (n::nat) \<Longrightarrow> m < n \<or> n < m"
 (*<*)by(arith)(*>*)
 
-text{*\noindent The method \methdx{arith} is more general.  It attempts to
+text\<open>\noindent The method \methdx{arith} is more general.  It attempts to
 prove the first subgoal provided it is a \textbf{linear arithmetic} formula.
 Such formulas may involve the usual logical connectives (@{text"\<not>"},
 @{text"\<and>"}, @{text"\<or>"}, @{text"\<longrightarrow>"}, @{text"="},
 @{text"\<forall>"}, @{text"\<exists>"}), the relations @{text"="},
 @{text"\<le>"} and @{text"<"}, and the operations @{text"+"}, @{text"-"},
-@{term min} and @{term max}.  For example, *}
+@{term min} and @{term max}.  For example,\<close>
 
 lemma "min i (max j (k*k)) = max (min (k*k) i) (min i (j::nat))"
 apply(arith)
 (*<*)done(*>*)
 
-text{*\noindent
+text\<open>\noindent
 succeeds because @{term"k*k"} can be treated as atomic. In contrast,
-*}
+\<close>
 
 lemma "n*n = n+1 \<Longrightarrow> n=0"
 (*<*)oops(*>*)
 
-text{*\noindent
+text\<open>\noindent
 is not proved by @{text arith} because the proof relies 
 on properties of multiplication. Only multiplication by numerals (which is
 the same as iterated addition) is taken into account.
@@ -122,7 +122,7 @@
 If the formula involves quantifiers, @{text arith} may take
 super-exponential time and space.
 \end{warn}
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Misc/pairs2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/pairs2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,7 +1,7 @@
 (*<*)
 theory pairs2 imports Main begin
 (*>*)
-text{*\label{sec:pairs}\index{pairs and tuples}
+text\<open>\label{sec:pairs}\index{pairs and tuples}
 HOL also has ordered pairs: \isa{($a@1$,$a@2$)} is of type $\tau@1$
 \indexboldpos{\isasymtimes}{$Isatype} $\tau@2$ provided each $a@i$ is of type
 $\tau@i$. The functions \cdx{fst} and
@@ -29,7 +29,7 @@
 records are preferable.
 \end{itemize}
 For more information on pairs and records see Chapter~\ref{ch:more-types}.
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Tutorial/Misc/prime_def.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/prime_def.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory prime_def imports Main begin
 consts prime :: "nat \<Rightarrow> bool"
 (*>*)
-text{*
+text\<open>
 \begin{warn}
 A common mistake when writing definitions is to introduce extra free
 variables on the right-hand side.  Consider the following, flawed definition
@@ -14,7 +14,7 @@
 The correct version is
 @{term[display,quotes]"prime(p) == 1 < p & (!m. m dvd p --> (m=1 | m=p))"}
 \end{warn}
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Tutorial/Misc/simp.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/simp.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,9 +2,9 @@
 theory simp imports Main begin
 (*>*)
 
-subsection{*Simplification Rules*}
+subsection\<open>Simplification Rules\<close>
 
-text{*\index{simplification rules}
+text\<open>\index{simplification rules}
 To facilitate simplification,  
 the attribute @{text"[simp]"}\index{*simp (attribute)}
 declares theorems to be simplification rules, which the simplifier
@@ -49,11 +49,11 @@
   different path) on $A$, it is not defined what the simplification attribute
   of that theorem will be in $C$: it could be either.
 \end{warn}
-*} 
+\<close> 
 
-subsection{*The {\tt\slshape simp}  Method*}
+subsection\<open>The {\tt\slshape simp}  Method\<close>
 
-text{*\index{*simp (method)|bold}
+text\<open>\index{*simp (method)|bold}
 The general format of the simplification method is
 \begin{quote}
 @{text simp} \textit{list of modifiers}
@@ -65,11 +65,11 @@
 only the first subgoal and may thus need to be repeated --- use
 \methdx{simp_all} to simplify all subgoals.
 If nothing changes, @{text simp} fails.
-*}
+\<close>
 
-subsection{*Adding and Deleting Simplification Rules*}
+subsection\<open>Adding and Deleting Simplification Rules\<close>
 
-text{*
+text\<open>
 \index{simplification rules!adding and deleting}%
 If a certain theorem is merely needed in a few proofs by simplification,
 we do not need to make it a global simplification rule. Instead we can modify
@@ -88,41 +88,41 @@
 \begin{quote}
 \isacommand{apply}@{text"(simp add: mod_mult_distrib add_mult_distrib)"}
 \end{quote}
-*}
+\<close>
 
-subsection{*Assumptions*}
+subsection\<open>Assumptions\<close>
 
-text{*\index{simplification!with/of assumptions}
+text\<open>\index{simplification!with/of assumptions}
 By default, assumptions are part of the simplification process: they are used
 as simplification rules and are simplified themselves. For example:
-*}
+\<close>
 
 lemma "\<lbrakk> xs @ zs = ys @ xs; [] @ xs = [] @ [] \<rbrakk> \<Longrightarrow> ys = zs"
 apply simp
 done
 
-text{*\noindent
+text\<open>\noindent
 The second assumption simplifies to @{term"xs = []"}, which in turn
 simplifies the first assumption to @{term"zs = ys"}, thus reducing the
 conclusion to @{term"ys = ys"} and hence to @{term"True"}.
 
 In some cases, using the assumptions can lead to nontermination:
-*}
+\<close>
 
 lemma "\<forall>x. f x = g (f (g x)) \<Longrightarrow> f [] = f [] @ []"
 
-txt{*\noindent
+txt\<open>\noindent
 An unmodified application of @{text"simp"} loops.  The culprit is the
 simplification rule @{term"f x = g (f (g x))"}, which is extracted from
 the assumption.  (Isabelle notices certain simple forms of
 nontermination but not this one.)  The problem can be circumvented by
 telling the simplifier to ignore the assumptions:
-*}
+\<close>
 
 apply(simp (no_asm))
 done
 
-text{*\noindent
+text\<open>\noindent
 Three modifiers influence the treatment of assumptions:
 \begin{description}
 \item[@{text"(no_asm)"}]\index{*no_asm (modifier)}
@@ -145,11 +145,11 @@
 %positive, and from left to right, if $n$ is negative.
 %Beware that such rotations make proofs quite brittle.
 %\end{warn}
-*}
+\<close>
 
-subsection{*Rewriting with Definitions*}
+subsection\<open>Rewriting with Definitions\<close>
 
-text{*\label{sec:Simp-with-Defs}\index{simplification!with definitions}
+text\<open>\label{sec:Simp-with-Defs}\index{simplification!with definitions}
 Constant definitions (\S\ref{sec:ConstDefinitions}) can be used as
 simplification rules, but by default they are not: the simplifier does not
 expand them automatically.  Definitions are intended for introducing abstract
@@ -159,32 +159,32 @@
 proofs more robust: if the definition has to be changed,
 only the proofs of the abstract properties will be affected.
 
-For example, given *}
+For example, given\<close>
 
 definition xor :: "bool \<Rightarrow> bool \<Rightarrow> bool" where
 "xor A B \<equiv> (A \<and> \<not>B) \<or> (\<not>A \<and> B)"
 
-text{*\noindent
+text\<open>\noindent
 we may want to prove
-*}
+\<close>
 
 lemma "xor A (\<not>A)"
 
-txt{*\noindent
+txt\<open>\noindent
 Typically, we begin by unfolding some definitions:
 \indexbold{definitions!unfolding}
-*}
+\<close>
 
 apply(simp only: xor_def)
 
-txt{*\noindent
+txt\<open>\noindent
 In this particular case, the resulting goal
 @{subgoals[display,indent=0]}
 can be proved by simplification. Thus we could have proved the lemma outright by
-*}(*<*)oops lemma "xor A (\<not>A)"(*>*)
+\<close>(*<*)oops lemma "xor A (\<not>A)"(*>*)
 apply(simp add: xor_def)
 (*<*)done(*>*)
-text{*\noindent
+text\<open>\noindent
 Of course we can also unfold definitions in the middle of a proof.
 
 \begin{warn}
@@ -199,78 +199,78 @@
 one or several definitions, as in \isacommand{apply}\isa{(unfold xor_def)}.
 This is can be useful in situations where \isa{simp} does too much.
 Warning: \isa{unfold} acts on all subgoals!
-*}
+\<close>
 
-subsection{*Simplifying {\tt\slshape let}-Expressions*}
+subsection\<open>Simplifying {\tt\slshape let}-Expressions\<close>
 
-text{*\index{simplification!of \isa{let}-expressions}\index{*let expressions}%
+text\<open>\index{simplification!of \isa{let}-expressions}\index{*let expressions}%
 Proving a goal containing \isa{let}-expressions almost invariably requires the
 @{text"let"}-con\-structs to be expanded at some point. Since
 @{text"let"}\ldots\isa{=}\ldots@{text"in"}{\ldots} is just syntactic sugar for
 the predefined constant @{term"Let"}, expanding @{text"let"}-constructs
-means rewriting with \tdx{Let_def}: *}
+means rewriting with \tdx{Let_def}:\<close>
 
 lemma "(let xs = [] in xs@ys@xs) = ys"
 apply(simp add: Let_def)
 done
 
-text{*
+text\<open>
 If, in a particular context, there is no danger of a combinatorial explosion
 of nested @{text"let"}s, you could even simplify with @{thm[source]Let_def} by
 default:
-*}
+\<close>
 declare Let_def [simp]
 
-subsection{*Conditional Simplification Rules*}
+subsection\<open>Conditional Simplification Rules\<close>
 
-text{*
+text\<open>
 \index{conditional simplification rules}%
 So far all examples of rewrite rules were equations. The simplifier also
 accepts \emph{conditional} equations, for example
-*}
+\<close>
 
 lemma hd_Cons_tl[simp]: "xs \<noteq> []  \<Longrightarrow>  hd xs # tl xs = xs"
 apply(case_tac xs, simp, simp)
 done
 
-text{*\noindent
+text\<open>\noindent
 Note the use of ``\ttindexboldpos{,}{$Isar}'' to string together a
 sequence of methods. Assuming that the simplification rule
 @{term"(rev xs = []) = (xs = [])"}
 is present as well,
 the lemma below is proved by plain simplification:
-*}
+\<close>
 
 lemma "xs \<noteq> [] \<Longrightarrow> hd(rev xs) # tl(rev xs) = rev xs"
 (*<*)
 by(simp)
 (*>*)
-text{*\noindent
+text\<open>\noindent
 The conditional equation @{thm[source]hd_Cons_tl} above
 can simplify @{term"hd(rev xs) # tl(rev xs)"} to @{term"rev xs"}
 because the corresponding precondition @{term"rev xs ~= []"}
 simplifies to @{term"xs ~= []"}, which is exactly the local
 assumption of the subgoal.
-*}
+\<close>
 
 
-subsection{*Automatic Case Splits*}
+subsection\<open>Automatic Case Splits\<close>
 
-text{*\label{sec:AutoCaseSplits}\indexbold{case splits}%
+text\<open>\label{sec:AutoCaseSplits}\indexbold{case splits}%
 Goals containing @{text"if"}-expressions\index{*if expressions!splitting of}
 are usually proved by case
 distinction on the boolean condition.  Here is an example:
-*}
+\<close>
 
 lemma "\<forall>xs. if xs = [] then rev xs = [] else rev xs \<noteq> []"
 
-txt{*\noindent
+txt\<open>\noindent
 The goal can be split by a special method, \methdx{split}:
-*}
+\<close>
 
 apply(split if_split)
 
-txt{*\noindent
+txt\<open>\noindent
 @{subgoals[display,indent=0]}
 where \tdx{if_split} is a theorem that expresses splitting of
 @{text"if"}s. Because
@@ -280,11 +280,11 @@
 
 This splitting idea generalizes from @{text"if"} to \sdx{case}.
 Let us simplify a case analysis over lists:\index{*list.split (theorem)}
-*}(*<*)by simp(*>*)
+\<close>(*<*)by simp(*>*)
 lemma "(case xs of [] \<Rightarrow> zs | y#ys \<Rightarrow> y#(ys@zs)) = xs@zs"
 apply(split list.split)
  
-txt{*
+txt\<open>
 @{subgoals[display,indent=0]}
 The simplifier does not split
 @{text"case"}-expressions, as it does @{text"if"}-expressions, 
@@ -293,26 +293,26 @@
 @{text split}\index{*split (modifier)} 
 for adding splitting rules explicitly.  The
 lemma above can be proved in one step by
-*}
+\<close>
 (*<*)oops
 lemma "(case xs of [] \<Rightarrow> zs | y#ys \<Rightarrow> y#(ys@zs)) = xs@zs"
 (*>*)
 apply(simp split: list.split)
 (*<*)done(*>*)
-text{*\noindent
+text\<open>\noindent
 whereas \isacommand{apply}@{text"(simp)"} alone will not succeed.
 
 Every datatype $t$ comes with a theorem
 $t$@{text".split"} which can be declared to be a \bfindex{split rule} either
 locally as above, or by giving it the \attrdx{split} attribute globally:
-*}
+\<close>
 
 declare list.split [split]
 
-text{*\noindent
+text\<open>\noindent
 The @{text"split"} attribute can be removed with the @{text"del"} modifier,
 either locally
-*}
+\<close>
 (*<*)
 lemma "dummy=dummy"
 (*>*)
@@ -320,12 +320,12 @@
 (*<*)
 oops
 (*>*)
-text{*\noindent
+text\<open>\noindent
 or globally:
-*}
+\<close>
 declare list.split [split del]
 
-text{*
+text\<open>
 Polished proofs typically perform splitting within @{text simp} rather than 
 invoking the @{text split} method.  However, if a goal contains
 several @{text "if"} and @{text case} expressions, 
@@ -335,12 +335,12 @@
 The split rules shown above are intended to affect only the subgoal's
 conclusion.  If you want to split an @{text"if"} or @{text"case"}-expression
 in the assumptions, you have to apply \tdx{if_split_asm} or
-$t$@{text".split_asm"}: *}
+$t$@{text".split_asm"}:\<close>
 
 lemma "if xs = [] then ys \<noteq> [] else ys = [] \<Longrightarrow> xs @ ys \<noteq> []"
 apply(split if_split_asm)
 
-txt{*\noindent
+txt\<open>\noindent
 Unlike splitting the conclusion, this step creates two
 separate subgoals, which here can be solved by @{text"simp_all"}:
 @{subgoals[display,indent=0]}
@@ -357,22 +357,22 @@
   simplified at first, until either the expression reduces to one of the
   cases or it is split.
 \end{warn}
-*}
+\<close>
 (*<*)
 by(simp_all)
 (*>*)
 
-subsection{*Tracing*}
-text{*\indexbold{tracing the simplifier}
+subsection\<open>Tracing\<close>
+text\<open>\indexbold{tracing the simplifier}
 Using the simplifier effectively may take a bit of experimentation.  Set the
 Proof General flag \pgmenu{Isabelle} $>$ \pgmenu{Settings} $>$ \pgmenu{Trace Simplifier} to get a better idea of what is going on:
-*}
+\<close>
 
 lemma "rev [a] = []"
 apply(simp)
 (*<*)oops(*>*)
 
-text{*\noindent
+text\<open>\noindent
 produces the following trace in Proof General's \pgmenu{Trace} buffer:
 
 \begin{ttbox}\makeatother
@@ -418,7 +418,7 @@
 obtained the desired trace.
 Since this is easily forgotten (and may have the unpleasant effect of
 swamping the interface with trace information), here is how you can switch
-the trace on locally in a proof: *}
+the trace on locally in a proof:\<close>
 
 (*<*)lemma "x=x"
 (*>*)
@@ -426,14 +426,14 @@
 apply simp
 (*<*)oops(*>*)
 
-text{* \noindent
+text\<open>\noindent
 Within the current proof, all simplifications in subsequent proof steps
 will be traced, but the text reminds you to remove the \isa{using} clause
-after it has done its job. *}
+after it has done its job.\<close>
 
-subsection{*Finding Theorems\label{sec:find}*}
+subsection\<open>Finding Theorems\label{sec:find}\<close>
 
-text{*\indexbold{finding theorems}\indexbold{searching theorems}
+text\<open>\indexbold{finding theorems}\indexbold{searching theorems}
 Isabelle's large database of proved theorems 
 offers a powerful search engine. Its chief limitation is
 its restriction to the theories currently loaded.
@@ -512,7 +512,7 @@
 through previous searches and just modify them. This saves you having
 to type in lengthy expressions again and again.
 \end{pgnote}
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Tutorial/Misc/types.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/types.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,30 +3,30 @@
 type_synonym gate = "bool \<Rightarrow> bool \<Rightarrow> bool"
 type_synonym ('a, 'b) alist = "('a \<times> 'b) list"
 
-text{*\noindent
+text\<open>\noindent
 Internally all synonyms are fully expanded.  As a consequence Isabelle's
 output never contains synonyms.  Their main purpose is to improve the
 readability of theories.  Synonyms can be used just like any other
 type.
-*}
+\<close>
 
-subsection{*Constant Definitions*}
+subsection\<open>Constant Definitions\<close>
 
-text{*\label{sec:ConstDefinitions}\indexbold{definitions}%
+text\<open>\label{sec:ConstDefinitions}\indexbold{definitions}%
 Nonrecursive definitions can be made with the \commdx{definition}
 command, for example @{text nand} and @{text xor} gates
 (based on type @{typ gate} above):
-*}
+\<close>
 
 definition nand :: gate where "nand A B \<equiv> \<not>(A \<and> B)"
 definition xor  :: gate where "xor  A B \<equiv> A \<and> \<not>B \<or> \<not>A \<and> B"
 
-text{*\noindent%
+text\<open>\noindent%
 The symbol \indexboldpos{\isasymequiv}{$IsaEq} is a special form of equality
 that must be used in constant definitions.
 Pattern-matching is not allowed: each definition must be of
 the form $f\,x@1\,\dots\,x@n~\isasymequiv~t$.
 Section~\ref{sec:Simp-with-Defs} explains how definitions are used
 in proofs. The default name of each definition is $f$@{text"_def"}, where
-$f$ is the name of the defined constant.*}
+$f$ is the name of the defined constant.\<close>
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Protocol/Event.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Protocol/Event.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -7,7 +7,7 @@
     stores are visible to him
 *)(*<*)
 
-section{*Theory of Events for Security Protocols*}
+section\<open>Theory of Events for Security Protocols\<close>
 
 theory Event imports Message begin
 
@@ -20,10 +20,10 @@
         | Notes agent       msg
        
 consts 
-  bad    :: "agent set"                         -- {* compromised agents *}
+  bad    :: "agent set"                         \<comment> \<open>compromised agents\<close>
 
 
-text{*The constant "spies" is retained for compatibility's sake*}
+text\<open>The constant "spies" is retained for compatibility's sake\<close>
 
 primrec
   knows :: "agent => event list => msg set"
@@ -50,7 +50,7 @@
   spies  :: "event list => msg set" where
   "spies == knows Spy"
 
-text{*Spy has access to his own key for spoof messages, but Server is secure*}
+text\<open>Spy has access to his own key for spoof messages, but Server is secure\<close>
 specification (bad)
   Spy_in_bad     [iff]: "Spy \<in> bad"
   Server_not_bad [iff]: "Server \<notin> bad"
@@ -73,9 +73,9 @@
                         Says A B X => parts {X} \<union> used evs
                       | Gets A X   => used evs
                       | Notes A X  => parts {X} \<union> used evs)"
-    --{*The case for @{term Gets} seems anomalous, but @{term Gets} always
+    \<comment>\<open>The case for @{term Gets} seems anomalous, but @{term Gets} always
         follows @{term Says} in real protocols.  Seems difficult to change.
-        See @{text Gets_correct} in theory @{text "Guard/Extensions.thy"}. *}
+        See @{text Gets_correct} in theory @{text "Guard/Extensions.thy"}.\<close>
 
 lemma Notes_imp_used [rule_format]: "Notes A X \<in> set evs --> X \<in> used evs"
 apply (induct_tac evs)
@@ -88,7 +88,7 @@
 done
 
 
-subsection{*Function @{term knows}*}
+subsection\<open>Function @{term knows}\<close>
 
 (*Simplifying   
  parts(insert X (knows Spy evs)) = parts{X} \<union> parts(knows Spy evs).
@@ -99,8 +99,8 @@
      "knows Spy (Says A B X # evs) = insert X (knows Spy evs)"
 by simp
 
-text{*Letting the Spy see "bad" agents' notes avoids redundant case-splits
-      on whether @{term "A=Spy"} and whether @{term "A\<in>bad"}*}
+text\<open>Letting the Spy see "bad" agents' notes avoids redundant case-splits
+      on whether @{term "A=Spy"} and whether @{term "A\<in>bad"}\<close>
 lemma knows_Spy_Notes [simp]:
      "knows Spy (Notes A X # evs) =  
           (if A:bad then insert X (knows Spy evs) else knows Spy evs)"
@@ -121,7 +121,7 @@
      "knows Spy evs \<subseteq> knows Spy (Gets A X # evs)"
 by (simp add: subset_insertI)
 
-text{*Spy sees what is sent on the traffic*}
+text\<open>Spy sees what is sent on the traffic\<close>
 lemma Says_imp_knows_Spy [rule_format]:
      "Says A B X \<in> set evs --> X \<in> knows Spy evs"
 apply (induct_tac "evs")
@@ -135,21 +135,21 @@
 done
 
 
-text{*Elimination rules: derive contradictions from old Says events containing
-  items known to be fresh*}
+text\<open>Elimination rules: derive contradictions from old Says events containing
+  items known to be fresh\<close>
 lemmas knows_Spy_partsEs =
      Says_imp_knows_Spy [THEN parts.Inj, elim_format] 
      parts.Body [elim_format]
 
 lemmas Says_imp_analz_Spy = Says_imp_knows_Spy [THEN analz.Inj]
 
-text{*Compatibility for the old "spies" function*}
+text\<open>Compatibility for the old "spies" function\<close>
 lemmas spies_partsEs = knows_Spy_partsEs
 lemmas Says_imp_spies = Says_imp_knows_Spy
 lemmas parts_insert_spies = parts_insert_knows_A [of _ Spy]
 
 
-subsection{*Knowledge of Agents*}
+subsection\<open>Knowledge of Agents\<close>
 
 lemma knows_Says: "knows A (Says A B X # evs) = insert X (knows A evs)"
 by simp
@@ -171,21 +171,21 @@
 lemma knows_subset_knows_Gets: "knows A evs \<subseteq> knows A (Gets A' X # evs)"
 by (simp add: subset_insertI)
 
-text{*Agents know what they say*}
+text\<open>Agents know what they say\<close>
 lemma Says_imp_knows [rule_format]: "Says A B X \<in> set evs --> X \<in> knows A evs"
 apply (induct_tac "evs")
 apply (simp_all (no_asm_simp) split: event.split)
 apply blast
 done
 
-text{*Agents know what they note*}
+text\<open>Agents know what they note\<close>
 lemma Notes_imp_knows [rule_format]: "Notes A X \<in> set evs --> X \<in> knows A evs"
 apply (induct_tac "evs")
 apply (simp_all (no_asm_simp) split: event.split)
 apply blast
 done
 
-text{*Agents know what they receive*}
+text\<open>Agents know what they receive\<close>
 lemma Gets_imp_knows_agents [rule_format]:
      "A \<noteq> Spy --> Gets A X \<in> set evs --> X \<in> knows A evs"
 apply (induct_tac "evs")
@@ -193,8 +193,8 @@
 done
 
 
-text{*What agents DIFFERENT FROM Spy know 
-  was either said, or noted, or got, or known initially*}
+text\<open>What agents DIFFERENT FROM Spy know 
+  was either said, or noted, or got, or known initially\<close>
 lemma knows_imp_Says_Gets_Notes_initState [rule_format]:
      "[| X \<in> knows A evs; A \<noteq> Spy |] ==> EX B.  
   Says A B X \<in> set evs | Gets A X \<in> set evs | Notes A X \<in> set evs | X \<in> initState A"
@@ -204,8 +204,8 @@
 apply blast
 done
 
-text{*What the Spy knows -- for the time being --
-  was either said or noted, or known initially*}
+text\<open>What the Spy knows -- for the time being --
+  was either said or noted, or known initially\<close>
 lemma knows_Spy_imp_Says_Notes_initState [rule_format]:
      "[| X \<in> knows Spy evs |] ==> EX A B.  
   Says A B X \<in> set evs | Notes A X \<in> set evs | X \<in> initState Spy"
@@ -241,15 +241,15 @@
 apply (blast intro: initState_into_used)
 done
 
-text{*NOTE REMOVAL--laws above are cleaner, as they don't involve "case"*}
+text\<open>NOTE REMOVAL--laws above are cleaner, as they don't involve "case"\<close>
 declare knows_Cons [simp del]
         used_Nil [simp del] used_Cons [simp del]
 
 
-text{*For proving theorems of the form @{term "X \<notin> analz (knows Spy evs) --> P"}
+text\<open>For proving theorems of the form @{term "X \<notin> analz (knows Spy evs) --> P"}
   New events added by induction to "evs" are discarded.  Provided 
   this information isn't needed, the proof will be much shorter, since
-  it will omit complicated reasoning about @{term analz}.*}
+  it will omit complicated reasoning about @{term analz}.\<close>
 
 lemmas analz_mono_contra =
        knows_Spy_subset_knows_Spy_Says [THEN analz_mono, THEN contra_subsetD]
@@ -259,12 +259,12 @@
 lemmas analz_impI = impI [where P = "Y \<notin> analz (knows Spy evs)"] for Y evs
 
 ML
-{*
+\<open>
 fun analz_mono_contra_tac ctxt =
   resolve_tac ctxt @{thms analz_impI} THEN' 
   REPEAT1 o (dresolve_tac ctxt @{thms analz_mono_contra})
   THEN' mp_tac ctxt
-*}
+\<close>
 
 lemma knows_subset_knows_Cons: "knows A evs \<subseteq> knows A (e # evs)"
 by (induct e, auto simp: knows_Cons)
@@ -275,7 +275,7 @@
 done
 
 
-text{*For proving @{text new_keys_not_used}*}
+text\<open>For proving @{text new_keys_not_used}\<close>
 lemma keysFor_parts_insert:
      "[| K \<in> keysFor (parts (insert X G));  X \<in> synth (analz H) |] 
       ==> K \<in> keysFor (parts (G \<union> H)) | Key (invKey K) \<in> parts H" 
@@ -284,16 +284,16 @@
            analz_subset_parts [THEN keysFor_mono, THEN [2] rev_subsetD]
     intro: analz_subset_parts [THEN subsetD] parts_mono [THEN [2] rev_subsetD])
 
-method_setup analz_mono_contra = {*
-    Scan.succeed (fn ctxt => SIMPLE_METHOD (REPEAT_FIRST (analz_mono_contra_tac ctxt))) *}
+method_setup analz_mono_contra = \<open>
+    Scan.succeed (fn ctxt => SIMPLE_METHOD (REPEAT_FIRST (analz_mono_contra_tac ctxt)))\<close>
     "for proving theorems of the form X \<notin> analz (knows Spy evs) --> P"
 
-subsubsection{*Useful for case analysis on whether a hash is a spoof or not*}
+subsubsection\<open>Useful for case analysis on whether a hash is a spoof or not\<close>
 
 lemmas syan_impI = impI [where P = "Y \<notin> synth (analz (knows Spy evs))"] for Y evs
 
 ML
-{*
+\<open>
 val knows_Cons = @{thm knows_Cons};
 val used_Nil = @{thm used_Nil};
 val used_Cons = @{thm used_Cons};
@@ -339,16 +339,16 @@
       @{thm knows_Spy_subset_knows_Spy_Gets} RS @{thm synth_analz_mono} RS @{thm contra_subsetD}])
   THEN'
   mp_tac ctxt
-*}
+\<close>
 
-method_setup synth_analz_mono_contra = {*
-    Scan.succeed (fn ctxt => SIMPLE_METHOD (REPEAT_FIRST (synth_analz_mono_contra_tac ctxt))) *}
+method_setup synth_analz_mono_contra = \<open>
+    Scan.succeed (fn ctxt => SIMPLE_METHOD (REPEAT_FIRST (synth_analz_mono_contra_tac ctxt)))\<close>
     "for proving theorems of the form X \<notin> synth (analz (knows Spy evs)) --> P"
 (*>*)
 
-section{* Event Traces \label{sec:events} *}
+section\<open>Event Traces \label{sec:events}\<close>
 
-text {*
+text \<open>
 The system's behaviour is formalized as a set of traces of
 \emph{events}.  The most important event, @{text "Says A B X"}, expresses
 $A\to B : X$, which is the attempt by~$A$ to send~$B$ the message~$X$.
@@ -379,7 +379,7 @@
 \item @{term "synth (analz (knows Spy evs))"} is everything that the spy
 could generate
 \end{itemize}
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Protocol/Message.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Protocol/Message.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -5,7 +5,7 @@
 Inductive relations "parts", "analz" and "synth"
 *)(*<*)
 
-section{*Theory of Agents and Messages for Security Protocols*}
+section\<open>Theory of Agents and Messages for Security Protocols\<close>
 
 theory Message imports Main begin
 ML_file "../../antiquote_setup.ML"
@@ -15,27 +15,27 @@
 by blast
 (*>*)
 
-section{* Agents and Messages *}
+section\<open>Agents and Messages\<close>
 
-text {*
+text \<open>
 All protocol specifications refer to a syntactic theory of messages. 
 Datatype
 @{text agent} introduces the constant @{text Server} (a trusted central
 machine, needed for some protocols), an infinite population of
 friendly agents, and the~@{text Spy}:
-*}
+\<close>
 
 datatype agent = Server | Friend nat | Spy
 
-text {*
+text \<open>
 Keys are just natural numbers.  Function @{text invKey} maps a public key to
 the matching private key, and vice versa:
-*}
+\<close>
 
 type_synonym key = nat
 consts invKey :: "key \<Rightarrow> key"
 (*<*)
-consts all_symmetric :: bool        --{*true if all keys are symmetric*}
+consts all_symmetric :: bool        \<comment>\<open>true if all keys are symmetric\<close>
 
 specification (invKey)
   invKey [simp]: "invKey (invKey K) = K"
@@ -43,18 +43,18 @@
     by (rule exI [of _ id], auto)
 
 
-text{*The inverse of a symmetric key is itself; that of a public key
-      is the private key and vice versa*}
+text\<open>The inverse of a symmetric key is itself; that of a public key
+      is the private key and vice versa\<close>
 
 definition symKeys :: "key set" where
   "symKeys == {K. invKey K = K}"
 (*>*)
 
-text {*
+text \<open>
 Datatype
 @{text msg} introduces the message forms, which include agent names, nonces,
 keys, compound messages, and encryptions.  
-*}
+\<close>
 
 datatype
      msg = Agent  agent
@@ -63,7 +63,7 @@
          | MPair  msg msg
          | Crypt  key msg
 
-text {*
+text \<open>
 \noindent
 The notation $\comp{X\sb 1,\ldots X\sb{n-1},X\sb n}$
 abbreviates
@@ -76,10 +76,10 @@
 wrong key succeeds but yields garbage.  Our model of encryption is
 realistic if encryption adds some redundancy to the plaintext, such as a
 checksum, so that garbage can be detected.
-*}
+\<close>
 
 (*<*)
-text{*Concrete syntax: messages appear as \<open>\<lbrace>A,B,NA\<rbrace>\<close>, etc...*}
+text\<open>Concrete syntax: messages appear as \<open>\<lbrace>A,B,NA\<rbrace>\<close>, etc...\<close>
 syntax
   "_MTuple"      :: "['a, args] => 'a * 'b"       ("(2\<lbrace>_,/ _\<rbrace>)")
 translations
@@ -88,11 +88,11 @@
 
 
 definition keysFor :: "msg set => key set" where
-    --{*Keys useful to decrypt elements of a message set*}
+    \<comment>\<open>Keys useful to decrypt elements of a message set\<close>
   "keysFor H == invKey ` {K. \<exists>X. Crypt K X \<in> H}"
 
 
-subsubsection{*Inductive Definition of All Parts" of a Message*}
+subsubsection\<open>Inductive Definition of All Parts" of a Message\<close>
 
 inductive_set
   parts :: "msg set => msg set"
@@ -104,7 +104,7 @@
   | Body:        "Crypt K X \<in> parts H ==> X \<in> parts H"
 
 
-text{*Monotonicity*}
+text\<open>Monotonicity\<close>
 lemma parts_mono: "G \<subseteq> H ==> parts(G) \<subseteq> parts(H)"
 apply auto
 apply (erule parts.induct) 
@@ -112,7 +112,7 @@
 done
 
 
-text{*Equations hold because constructors are injective.*}
+text\<open>Equations hold because constructors are injective.\<close>
 lemma Friend_image_eq [simp]: "(Friend x \<in> Friend`A) = (x:A)"
 by auto
 
@@ -123,7 +123,7 @@
 by auto
 
 
-subsubsection{*Inverse of keys *}
+subsubsection\<open>Inverse of keys\<close>
 
 lemma invKey_eq [simp]: "(invKey K = invKey K') = (K=K')"
 apply safe
@@ -131,7 +131,7 @@
 done
 
 
-subsection{*keysFor operator*}
+subsection\<open>keysFor operator\<close>
 
 lemma keysFor_empty [simp]: "keysFor {} = {}"
 by (unfold keysFor_def, blast)
@@ -142,7 +142,7 @@
 lemma keysFor_UN [simp]: "keysFor (\<Union>i\<in>A. H i) = (\<Union>i\<in>A. keysFor (H i))"
 by (unfold keysFor_def, blast)
 
-text{*Monotonicity*}
+text\<open>Monotonicity\<close>
 lemma keysFor_mono: "G \<subseteq> H ==> keysFor(G) \<subseteq> keysFor(H)"
 by (unfold keysFor_def, blast)
 
@@ -169,7 +169,7 @@
 by (unfold keysFor_def, blast)
 
 
-subsection{*Inductive relation "parts"*}
+subsection\<open>Inductive relation "parts"\<close>
 
 lemma MPair_parts:
      "[| \<lbrace>X,Y\<rbrace> \<in> parts H;        
@@ -177,10 +177,10 @@
 by (blast dest: parts.Fst parts.Snd) 
 
 declare MPair_parts [elim!]  parts.Body [dest!]
-text{*NB These two rules are UNSAFE in the formal sense, as they discard the
+text\<open>NB These two rules are UNSAFE in the formal sense, as they discard the
      compound message.  They work well on THIS FILE.  
   @{text MPair_parts} is left as SAFE because it speeds up proofs.
-  The Crypt rule is normally kept UNSAFE to avoid breaking up certificates.*}
+  The Crypt rule is normally kept UNSAFE to avoid breaking up certificates.\<close>
 
 lemma parts_increasing: "H \<subseteq> parts(H)"
 by blast
@@ -195,12 +195,12 @@
 lemma parts_emptyE [elim!]: "X\<in> parts{} ==> P"
 by simp
 
-text{*WARNING: loops if H = {Y}, therefore must not be repeated!*}
+text\<open>WARNING: loops if H = {Y}, therefore must not be repeated!\<close>
 lemma parts_singleton: "X\<in> parts H ==> \<exists>Y\<in>H. X\<in> parts {Y}"
 by (erule parts.induct, fast+)
 
 
-subsubsection{*Unions *}
+subsubsection\<open>Unions\<close>
 
 lemma parts_Un_subset1: "parts(G) \<union> parts(H) \<subseteq> parts(G \<union> H)"
 by (intro Un_least parts_mono Un_upper1 Un_upper2)
@@ -218,8 +218,8 @@
 apply (simp only: parts_Un)
 done
 
-text{*TWO inserts to avoid looping.  This rewrite is better than nothing.
-  Not suitable for Addsimps: its behaviour can be strange.*}
+text\<open>TWO inserts to avoid looping.  This rewrite is better than nothing.
+  Not suitable for Addsimps: its behaviour can be strange.\<close>
 lemma parts_insert2:
      "parts (insert X (insert Y H)) = parts {X} \<union> parts {Y} \<union> parts H"
 apply (simp add: Un_assoc)
@@ -237,12 +237,12 @@
 lemma parts_UN [simp]: "parts(\<Union>x\<in>A. H x) = (\<Union>x\<in>A. parts(H x))"
 by (intro equalityI parts_UN_subset1 parts_UN_subset2)
 
-text{*Added to simplify arguments to parts, analz and synth.
-  NOTE: the UN versions are no longer used!*}
+text\<open>Added to simplify arguments to parts, analz and synth.
+  NOTE: the UN versions are no longer used!\<close>
 
 
-text{*This allows @{text blast} to simplify occurrences of 
-  @{term "parts(G\<union>H)"} in the assumption.*}
+text\<open>This allows @{text blast} to simplify occurrences of 
+  @{term "parts(G\<union>H)"} in the assumption.\<close>
 lemmas in_parts_UnE = parts_Un [THEN equalityD1, THEN subsetD, THEN UnE] 
 declare in_parts_UnE [elim!]
 
@@ -250,7 +250,7 @@
 lemma parts_insert_subset: "insert X (parts H) \<subseteq> parts(insert X H)"
 by (blast intro: parts_mono [THEN [2] rev_subsetD])
 
-subsubsection{*Idempotence and transitivity *}
+subsubsection\<open>Idempotence and transitivity\<close>
 
 lemma parts_partsD [dest!]: "X\<in> parts (parts H) ==> X\<in> parts H"
 by (erule parts.induct, blast+)
@@ -267,7 +267,7 @@
 lemma parts_trans: "[| X\<in> parts G;  G \<subseteq> parts H |] ==> X\<in> parts H"
 by (drule parts_mono, blast)
 
-text{*Cut*}
+text\<open>Cut\<close>
 lemma parts_cut:
      "[| Y\<in> parts (insert X G);  X\<in> parts H |] ==> Y\<in> parts (G \<union> H)" 
 by (blast intro: parts_trans) 
@@ -277,7 +277,7 @@
 by (force dest!: parts_cut intro: parts_insertI)
 
 
-subsubsection{*Rewrite rules for pulling out atomic messages *}
+subsubsection\<open>Rewrite rules for pulling out atomic messages\<close>
 
 lemmas parts_insert_eq_I = equalityI [OF subsetI parts_insert_subset]
 
@@ -323,21 +323,21 @@
 done
 
 
-text{*In any message, there is an upper bound N on its greatest nonce.*}
+text\<open>In any message, there is an upper bound N on its greatest nonce.\<close>
 lemma msg_Nonce_supply: "\<exists>N. \<forall>n. N\<le>n --> Nonce n \<notin> parts {msg}"
 apply (induct_tac "msg")
 apply (simp_all (no_asm_simp) add: exI parts_insert2)
- txt{*MPair case: blast works out the necessary sum itself!*}
+ txt\<open>MPair case: blast works out the necessary sum itself!\<close>
  prefer 2 apply auto apply (blast elim!: add_leE)
-txt{*Nonce case*}
+txt\<open>Nonce case\<close>
 apply (rename_tac nat)
 apply (rule_tac x = "N + Suc nat" in exI, auto) 
 done
 (*>*)
 
-section{* Modelling the Adversary *}
+section\<open>Modelling the Adversary\<close>
 
-text {*
+text \<open>
 The spy is part of the system and must be built into the model.  He is
 a malicious user who does not have to follow the protocol.  He
 watches the network and uses any keys he knows to decrypt messages.
@@ -349,7 +349,7 @@
 messages. The set @{text "analz H"} formalizes what the adversary can learn
 from the set of messages~$H$.  The closure properties of this set are
 defined inductively.
-*}
+\<close>
 
 inductive_set
   analz :: "msg set \<Rightarrow> msg set"
@@ -362,14 +362,14 @@
              "\<lbrakk>Crypt K X \<in> analz H; Key(invKey K) \<in> analz H\<rbrakk>
               \<Longrightarrow> X \<in> analz H"
 (*<*)
-text{*Monotonicity; Lemma 1 of Lowe's paper*}
+text\<open>Monotonicity; Lemma 1 of Lowe's paper\<close>
 lemma analz_mono: "G\<subseteq>H ==> analz(G) \<subseteq> analz(H)"
 apply auto
 apply (erule analz.induct) 
 apply (auto dest: analz.Fst analz.Snd) 
 done
 
-text{*Making it safe speeds up proofs*}
+text\<open>Making it safe speeds up proofs\<close>
 lemma MPair_analz [elim!]:
      "[| \<lbrace>X,Y\<rbrace> \<in> analz H;        
              [| X \<in> analz H; Y \<in> analz H |] ==> P   
@@ -402,22 +402,22 @@
 
 lemmas analz_insertI = subset_insertI [THEN analz_mono, THEN [2] rev_subsetD]
 
-subsubsection{*General equational properties *}
+subsubsection\<open>General equational properties\<close>
 
 lemma analz_empty [simp]: "analz{} = {}"
 apply safe
 apply (erule analz.induct, blast+)
 done
 
-text{*Converse fails: we can analz more from the union than from the 
-  separate parts, as a key in one might decrypt a message in the other*}
+text\<open>Converse fails: we can analz more from the union than from the 
+  separate parts, as a key in one might decrypt a message in the other\<close>
 lemma analz_Un: "analz(G) \<union> analz(H) \<subseteq> analz(G \<union> H)"
 by (intro Un_least analz_mono Un_upper1 Un_upper2)
 
 lemma analz_insert: "insert X (analz H) \<subseteq> analz(insert X H)"
 by (blast intro: analz_mono [THEN [2] rev_subsetD])
 
-subsubsection{*Rewrite rules for pulling out atomic messages *}
+subsubsection\<open>Rewrite rules for pulling out atomic messages\<close>
 
 lemmas analz_insert_eq_I = equalityI [OF subsetI analz_insert]
 
@@ -433,7 +433,7 @@
 apply (erule analz.induct, auto) 
 done
 
-text{*Can only pull out Keys if they are not needed to decrypt the rest*}
+text\<open>Can only pull out Keys if they are not needed to decrypt the rest\<close>
 lemma analz_insert_Key [simp]: 
     "K \<notin> keysFor (analz H) ==>   
           analz (insert (Key K) H) = insert (Key K) (analz H)"
@@ -452,7 +452,7 @@
 apply (blast intro: analz.Fst analz.Snd)+
 done
 
-text{*Can pull out enCrypted message if the Key is not known*}
+text\<open>Can pull out enCrypted message if the Key is not known\<close>
 lemma analz_insert_Crypt:
      "Key (invKey K) \<notin> analz H 
       ==> analz (insert (Crypt K X) H) = insert (Crypt K X) (analz H)"
@@ -482,10 +482,10 @@
                insert (Crypt K X) (analz (insert X H))"
 by (intro equalityI lemma1 lemma2)
 
-text{*Case analysis: either the message is secure, or it is not! Effective,
+text\<open>Case analysis: either the message is secure, or it is not! Effective,
 but can cause subgoals to blow up! Use with @{text "if_split"}; apparently
 @{text "split_tac"} does not cope with patterns such as @{term"analz (insert
-(Crypt K X) H)"} *} 
+(Crypt K X) H)"}\<close> 
 lemma analz_Crypt_if [simp]:
      "analz (insert (Crypt K X) H) =                 
           (if (Key (invKey K) \<in> analz H)                 
@@ -494,7 +494,7 @@
 by (simp add: analz_insert_Crypt analz_insert_Decrypt)
 
 
-text{*This rule supposes "for the sake of argument" that we have the key.*}
+text\<open>This rule supposes "for the sake of argument" that we have the key.\<close>
 lemma analz_insert_Crypt_subset:
      "analz (insert (Crypt K X) H) \<subseteq>   
            insert (Crypt K X) (analz (insert X H))"
@@ -509,7 +509,7 @@
 done
 
 
-subsubsection{*Idempotence and transitivity *}
+subsubsection\<open>Idempotence and transitivity\<close>
 
 lemma analz_analzD [dest!]: "X\<in> analz (analz H) ==> X\<in> analz H"
 by (erule analz.induct, blast+)
@@ -526,7 +526,7 @@
 lemma analz_trans: "[| X\<in> analz G;  G \<subseteq> analz H |] ==> X\<in> analz H"
 by (drule analz_mono, blast)
 
-text{*Cut; Lemma 2 of Lowe*}
+text\<open>Cut; Lemma 2 of Lowe\<close>
 lemma analz_cut: "[| Y\<in> analz (insert X H);  X\<in> analz H |] ==> Y\<in> analz H"
 by (erule analz_trans, blast)
 
@@ -534,14 +534,14 @@
    "Y: analz (insert X H) ==> X: analz H --> Y: analz H"
 *)
 
-text{*This rewrite rule helps in the simplification of messages that involve
+text\<open>This rewrite rule helps in the simplification of messages that involve
   the forwarding of unknown components (X).  Without it, removing occurrences
-  of X can be very complicated. *}
+  of X can be very complicated.\<close>
 lemma analz_insert_eq: "X\<in> analz H ==> analz (insert X H) = analz H"
 by (blast intro: analz_cut analz_insertI)
 
 
-text{*A congruence rule for "analz" *}
+text\<open>A congruence rule for "analz"\<close>
 
 lemma analz_subset_cong:
      "[| analz G \<subseteq> analz G'; analz H \<subseteq> analz H' |] 
@@ -559,14 +559,14 @@
      "analz H = analz H' ==> analz(insert X H) = analz(insert X H')"
 by (force simp only: insert_def intro!: analz_cong)
 
-text{*If there are no pairs or encryptions then analz does nothing*}
+text\<open>If there are no pairs or encryptions then analz does nothing\<close>
 lemma analz_trivial:
      "[| \<forall>X Y. \<lbrace>X,Y\<rbrace> \<notin> H;  \<forall>X K. Crypt K X \<notin> H |] ==> analz H = H"
 apply safe
 apply (erule analz.induct, blast+)
 done
 
-text{*These two are obsolete (with a single Spy) but cost little to prove...*}
+text\<open>These two are obsolete (with a single Spy) but cost little to prove...\<close>
 lemma analz_UN_analz_lemma:
      "X\<in> analz (\<Union>i\<in>A. analz (H i)) ==> X\<in> analz (\<Union>i\<in>A. H i)"
 apply (erule analz.induct)
@@ -576,7 +576,7 @@
 lemma analz_UN_analz [simp]: "analz (\<Union>i\<in>A. analz (H i)) = analz (\<Union>i\<in>A. H i)"
 by (blast intro: analz_UN_analz_lemma analz_mono [THEN [2] rev_subsetD])
 (*>*)
-text {*
+text \<open>
 Note the @{text Decrypt} rule: the spy can decrypt a
 message encrypted with key~$K$ if he has the matching key,~$K^{-1}$. 
 Properties proved by rule induction include the following:
@@ -585,7 +585,7 @@
 The set of fake messages that an intruder could invent
 starting from~@{text H} is @{text "synth(analz H)"}, where @{text "synth H"}
 formalizes what the adversary can build from the set of messages~$H$.  
-*}
+\<close>
 
 inductive_set
   synth :: "msg set \<Rightarrow> msg set"
@@ -618,7 +618,7 @@
 apply (simp (no_asm_use))
 done
 (*>*)
-text {*
+text \<open>
 The set includes all agent names.  Nonces and keys are assumed to be
 unguessable, so none are included beyond those already in~$H$.   Two
 elements of @{term "synth H"} can be combined, and an element can be encrypted
@@ -629,11 +629,11 @@
 @{named_thms [display,indent=0] analz_synth [no_vars] (analz_synth)}
 Rule inversion plays a major role in reasoning about @{text synth}, through
 declarations such as this one:
-*}
+\<close>
 
 inductive_cases Nonce_synth [elim!]: "Nonce n \<in> synth H"
 
-text {*
+text \<open>
 \noindent
 The resulting elimination rule replaces every assumption of the form
 @{term "Nonce n \<in> synth H"} by @{term "Nonce n \<in> H"},
@@ -651,22 +651,22 @@
 use @{text parts} to express general well-formedness properties of a protocol,
 for example, that an uncompromised agent's private key will never be
 included as a component of any message.
-*}
+\<close>
 (*<*)
 lemma synth_increasing: "H \<subseteq> synth(H)"
 by blast
 
-subsubsection{*Unions *}
+subsubsection\<open>Unions\<close>
 
-text{*Converse fails: we can synth more from the union than from the 
-  separate parts, building a compound message using elements of each.*}
+text\<open>Converse fails: we can synth more from the union than from the 
+  separate parts, building a compound message using elements of each.\<close>
 lemma synth_Un: "synth(G) \<union> synth(H) \<subseteq> synth(G \<union> H)"
 by (intro Un_least synth_mono Un_upper1 Un_upper2)
 
 lemma synth_insert: "insert X (synth H) \<subseteq> synth(insert X H)"
 by (blast intro: synth_mono [THEN [2] rev_subsetD])
 
-subsubsection{*Idempotence and transitivity *}
+subsubsection\<open>Idempotence and transitivity\<close>
 
 lemma synth_synthD [dest!]: "X\<in> synth (synth H) ==> X\<in> synth H"
 by (erule synth.induct, blast+)
@@ -683,7 +683,7 @@
 lemma synth_trans: "[| X\<in> synth G;  G \<subseteq> synth H |] ==> X\<in> synth H"
 by (drule synth_mono, blast)
 
-text{*Cut; Lemma 2 of Lowe*}
+text\<open>Cut; Lemma 2 of Lowe\<close>
 lemma synth_cut: "[| Y\<in> synth (insert X H);  X\<in> synth H |] ==> Y\<in> synth H"
 by (erule synth_trans, blast)
 
@@ -706,7 +706,7 @@
 by (unfold keysFor_def, blast)
 
 
-subsubsection{*Combinations of parts, analz and synth *}
+subsubsection\<open>Combinations of parts, analz and synth\<close>
 
 lemma parts_synth [simp]: "parts (synth H) = parts H \<union> synth H"
 apply (rule equalityI)
@@ -722,13 +722,13 @@
 done
 
 
-subsubsection{*For reasoning about the Fake rule in traces *}
+subsubsection\<open>For reasoning about the Fake rule in traces\<close>
 
 lemma parts_insert_subset_Un: "X\<in> G ==> parts(insert X H) \<subseteq> parts G \<union> parts H"
 by (rule subset_trans [OF parts_mono parts_Un_subset2], blast)
 
-text{*More specifically for Fake.  Very occasionally we could do with a version
-  of the form  @{term"parts{X} \<subseteq> synth (analz H) \<union> parts H"} *}
+text\<open>More specifically for Fake.  Very occasionally we could do with a version
+  of the form  @{term"parts{X} \<subseteq> synth (analz H) \<union> parts H"}\<close>
 lemma Fake_parts_insert:
      "X \<in> synth (analz H) ==>  
       parts (insert X H) \<subseteq> synth (analz H) \<union> parts H"
@@ -742,8 +742,8 @@
       ==> Z \<in>  synth (analz H) \<union> parts H"
 by (blast dest: Fake_parts_insert  [THEN subsetD, dest])
 
-text{*@{term H} is sometimes @{term"Key ` KK \<union> spies evs"}, so can't put 
-  @{term "G=H"}.*}
+text\<open>@{term H} is sometimes @{term"Key ` KK \<union> spies evs"}, so can't put 
+  @{term "G=H"}.\<close>
 lemma Fake_analz_insert:
      "X\<in> synth (analz G) ==>  
       analz (insert X H) \<subseteq> synth (analz G) \<union> analz (G \<union> H)"
@@ -762,8 +762,8 @@
      "(X \<in> analz H | X \<in> parts H) = (X \<in> parts H)"
 by (blast intro: analz_subset_parts [THEN subsetD])
 
-text{*Without this equation, other rules for synth and analz would yield
-  redundant cases*}
+text\<open>Without this equation, other rules for synth and analz would yield
+  redundant cases\<close>
 lemma MPair_synth_analz [iff]:
      "(\<lbrace>X,Y\<rbrace> \<in> synth (analz H)) =  
       (X \<in> synth (analz H) & Y \<in> synth (analz H))"
@@ -775,12 +775,12 @@
 by blast
 
 
-text{*We do NOT want Crypt... messages broken up in protocols!!*}
+text\<open>We do NOT want Crypt... messages broken up in protocols!!\<close>
 declare parts.Body [rule del]
 
 
-text{*Rewrites to push in Key and Crypt messages, so that other messages can
-    be pulled out using the @{text analz_insert} rules*}
+text\<open>Rewrites to push in Key and Crypt messages, so that other messages can
+    be pulled out using the @{text analz_insert} rules\<close>
 
 lemmas pushKeys =
   insert_commute [of "Key K" "Agent C"]
@@ -800,14 +800,14 @@
   insert_commute [of "Crypt X K" "MPair X' Y"]
   for X K C N X' Y
 
-text{*Cannot be added with @{text "[simp]"} -- messages should not always be
-  re-ordered. *}
+text\<open>Cannot be added with @{text "[simp]"} -- messages should not always be
+  re-ordered.\<close>
 lemmas pushes = pushKeys pushCrypts
 
 
-subsection{*Tactics useful for many protocol proofs*}
+subsection\<open>Tactics useful for many protocol proofs\<close>
 ML
-{*
+\<open>
 val invKey = @{thm invKey};
 val keysFor_def = @{thm keysFor_def};
 val symKeys_def = @{thm symKeys_def};
@@ -858,11 +858,11 @@
        simp_tac ctxt 1,
        REPEAT (FIRSTGOAL (resolve_tac ctxt [allI,impI,notI,conjI,iffI])),
        DEPTH_SOLVE (atomic_spy_analz_tac ctxt 1)]) i);
-*}
+\<close>
 
-text{*By default only @{text o_apply} is built-in.  But in the presence of
+text\<open>By default only @{text o_apply} is built-in.  But in the presence of
 eta-expansion this means that some terms displayed as @{term "f o g"} will be
-rewritten, and others will not!*}
+rewritten, and others will not!\<close>
 declare o_def [simp]
 
 
@@ -883,7 +883,7 @@
 apply (rule synth_analz_mono, blast)   
 done
 
-text{*Two generalizations of @{text analz_insert_eq}*}
+text\<open>Two generalizations of @{text analz_insert_eq}\<close>
 lemma gen_analz_insert_eq [rule_format]:
      "X \<in> analz H ==> ALL G. H \<subseteq> G --> analz (insert X G) = analz G"
 by (blast intro: analz_cut analz_insertI analz_mono [THEN [2] rev_subsetD])
@@ -904,16 +904,16 @@
 
 lemmas Fake_parts_sing_imp_Un = Fake_parts_sing [THEN [2] rev_subsetD]
 
-method_setup spy_analz = {*
-    Scan.succeed (SIMPLE_METHOD' o spy_analz_tac) *}
+method_setup spy_analz = \<open>
+    Scan.succeed (SIMPLE_METHOD' o spy_analz_tac)\<close>
     "for proving the Fake case when analz is involved"
 
-method_setup atomic_spy_analz = {*
-    Scan.succeed (SIMPLE_METHOD' o atomic_spy_analz_tac) *}
+method_setup atomic_spy_analz = \<open>
+    Scan.succeed (SIMPLE_METHOD' o atomic_spy_analz_tac)\<close>
     "for debugging spy_analz"
 
-method_setup Fake_insert_simp = {*
-    Scan.succeed (SIMPLE_METHOD' o Fake_insert_simp_tac) *}
+method_setup Fake_insert_simp = \<open>
+    Scan.succeed (SIMPLE_METHOD' o Fake_insert_simp_tac)\<close>
     "for debugging spy_analz"
 
 
--- a/src/Doc/Tutorial/Protocol/NS_Public.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Protocol/NS_Public.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -6,12 +6,12 @@
 *)(*<*)
 theory NS_Public imports Public begin(*>*)
 
-section{* Modelling the Protocol \label{sec:modelling} *}
+section\<open>Modelling the Protocol \label{sec:modelling}\<close>
 
-text_raw {*
+text_raw \<open>
 \begin{figure}
 \begin{isabelle}
-*}
+\<close>
 
 inductive_set ns_public :: "event list set"
   where
@@ -40,13 +40,13 @@
               \<in> set evs3\<rbrakk>
           \<Longrightarrow> Says A B (Crypt (pubK B) (Nonce NB)) # evs3 \<in> ns_public"
 
-text_raw {*
+text_raw \<open>
 \end{isabelle}
 \caption{An Inductive Protocol Definition}\label{fig:ns_public}
 \end{figure}
-*}
+\<close>
 
-text {*
+text \<open>
 Let us formalize the Needham-Schroeder public-key protocol, as corrected by
 Lowe:
 \begin{alignat*%
@@ -84,9 +84,9 @@
 Benefits of this approach are simplicity and clarity.  The semantic model
 is set theory, proofs are by induction and the translation from the informal
 notation to the inductive rules is straightforward. 
-*}
+\<close>
 
-section{* Proving Elementary Properties \label{sec:regularity} *}
+section\<open>Proving Elementary Properties \label{sec:regularity}\<close>
 
 (*<*)
 declare knows_Spy_partsEs [elim]
@@ -109,7 +109,7 @@
 (*Spy never sees another agent's private key! (unless it's bad at start)*)
 (*>*)
 
-text {*
+text \<open>
 Secrecy properties can be hard to prove.  The conclusion of a typical
 secrecy theorem is 
 @{term "X \<notin> analz (knows Spy evs)"}.  The difficulty arises from
@@ -124,13 +124,13 @@
 @{text A}'s private key in a message, whether protected by encryption or
 not, is enough to confirm that @{text A} is compromised.  The proof, like
 nearly all protocol proofs, is by induction over traces.
-*}
+\<close>
 
 lemma Spy_see_priK [simp]:
       "evs \<in> ns_public
        \<Longrightarrow> (Key (priK A) \<in> parts (knows Spy evs)) = (A \<in> bad)"
 apply (erule ns_public.induct, simp_all)
-txt {*
+txt \<open>
 The induction yields five subgoals, one for each rule in the definition of
 @{text ns_public}.  The idea is to prove that the protocol property holds initially
 (rule @{text Nil}), is preserved by each of the legitimate protocol steps (rules
@@ -141,7 +141,7 @@
 at all, so only @{text Fake} is relevant. Indeed, simplification leaves
 only the @{text Fake} case, as indicated by the variable name @{text evsf}:
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 by blast
 (*<*)
 lemma Spy_analz_priK [simp]:
@@ -149,7 +149,7 @@
 by auto
 (*>*)
 
-text {*
+text \<open>
 The @{text Fake} case is proved automatically.  If
 @{term "priK A"} is in the extended trace then either (1) it was already in the
 original trace or (2) it was
@@ -165,7 +165,7 @@
 induction, simplification, @{text blast}.  The first line uses the rule
 @{text rev_mp} to prepare the induction by moving two assumptions into the 
 induction formula.
-*}
+\<close>
 
 lemma no_nonce_NS1_NS2:
     "\<lbrakk>Crypt (pubK C) \<lbrace>NA', Nonce NA, Agent D\<rbrace> \<in> parts (knows Spy evs);
@@ -177,11 +177,11 @@
 apply (blast intro: analz_insertI)+
 done
 
-text {*
+text \<open>
 The following unicity lemma states that, if \isa{NA} is secret, then its
 appearance in any instance of message~1 determines the other components. 
 The proof is similar to the previous one.
-*}
+\<close>
 
 lemma unique_NA:
      "\<lbrakk>Crypt(pubK B)  \<lbrace>Nonce NA, Agent A \<rbrace> \<in> parts(knows Spy evs);
@@ -196,7 +196,7 @@
 done
 (*>*)
 
-section{* Proving Secrecy Theorems \label{sec:secrecy} *}
+section\<open>Proving Secrecy Theorems \label{sec:secrecy}\<close>
 
 (*<*)
 (*Secrecy: Spy does not see the nonce sent in msg NS1 if A and B are secure
@@ -264,21 +264,21 @@
 done
 (*>*)
 
-text {*
+text \<open>
 The secrecy theorems for Bob (the second participant) are especially
 important because they fail for the original protocol.  The following
 theorem states that if Bob sends message~2 to Alice, and both agents are
 uncompromised, then Bob's nonce will never reach the spy.
-*}
+\<close>
 
 theorem Spy_not_see_NB [dest]:
  "\<lbrakk>Says B A (Crypt (pubK A) \<lbrace>Nonce NA, Nonce NB, Agent B\<rbrace>) \<in> set evs;
    A \<notin> bad;  B \<notin> bad;  evs \<in> ns_public\<rbrakk>
   \<Longrightarrow> Nonce NB \<notin> analz (knows Spy evs)"
-txt {*
+txt \<open>
 To prove it, we must formulate the induction properly (one of the
 assumptions mentions~@{text evs}), apply induction, and simplify:
-*}
+\<close>
 
 apply (erule rev_mp, erule ns_public.induct, simp_all)
 (*<*)
@@ -288,7 +288,7 @@
 apply (blast intro: no_nonce_NS1_NS2)
 (*>*)
 
-txt {*
+txt \<open>
 The proof states are too complicated to present in full.  
 Let's examine the simplest subgoal, that for message~1.  The following
 event has just occurred:
@@ -335,7 +335,7 @@
 @{text B} has sent an instance of message~2 to~@{text A} and has received the
 expected reply, then that reply really originated with~@{text A}.  The
 proof is a simple induction.
-*}
+\<close>
 
 (*<*)
 by (blast intro: no_nonce_NS1_NS2)
@@ -368,7 +368,7 @@
 by (erule ns_public.induct, auto)
 (*>*)
 
-text {*
+text \<open>
 From similar assumptions, we can prove that @{text A} started the protocol
 run by sending an instance of message~1 involving the nonce~@{text NA}\@. 
 For this theorem, the conclusion is 
@@ -395,6 +395,6 @@
 the strategy illustrated above, but the subgoals can
 be much bigger and there are more of them.
 \index{protocols!security|)}
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Protocol/Public.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Protocol/Public.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -10,13 +10,13 @@
 begin
 (*>*)
 
-text {*
+text \<open>
 The function
 @{text pubK} maps agents to their public keys.  The function
 @{text priK} maps agents to their private keys.  It is merely
 an abbreviation (cf.\ \S\ref{sec:abbreviations}) defined in terms of
 @{text invKey} and @{text pubK}.
-*}
+\<close>
 
 consts pubK :: "agent \<Rightarrow> key"
 abbreviation priK :: "agent \<Rightarrow> key"
@@ -37,7 +37,7 @@
 end
 (*>*)
 
-text {*
+text \<open>
 \noindent
 The set @{text bad} consists of those agents whose private keys are known to
 the spy.
@@ -45,7 +45,7 @@
 Two axioms are asserted about the public-key cryptosystem. 
 No two agents have the same public key, and no private key equals
 any public key.
-*}
+\<close>
 
 axiomatization where
   inj_pubK:        "inj pubK" and
@@ -156,16 +156,16 @@
 (*Specialized methods*)
 
 (*Tactic for possibility theorems*)
-ML {*
+ML \<open>
 fun possibility_tac ctxt =
     REPEAT (*omit used_Says so that Nonces start from different traces!*)
     (ALLGOALS (simp_tac (ctxt delsimps [used_Says]))
      THEN
      REPEAT_FIRST (eq_assume_tac ORELSE' 
                    resolve_tac ctxt [refl, conjI, @{thm Nonce_supply}]));
-*}
+\<close>
 
-method_setup possibility = {* Scan.succeed (SIMPLE_METHOD o possibility_tac) *}
+method_setup possibility = \<open>Scan.succeed (SIMPLE_METHOD o possibility_tac)\<close>
     "for proving possibility theorems"
 
 end
--- a/src/Doc/Tutorial/Recdef/Induction.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/Induction.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory Induction imports examples simplification begin
 (*>*)
 
-text{*
+text\<open>
 Assuming we have defined our function such that Isabelle could prove
 termination and that the recursion equations (or some suitable derived
 equations) are simplification rules, we might like to prove something about
@@ -17,29 +17,29 @@
 you are trying to establish holds for the left-hand side provided it holds
 for all recursive calls on the right-hand side. Here is a simple example
 involving the predefined @{term"map"} functional on lists:
-*}
+\<close>
 
 lemma "map f (sep(x,xs)) = sep(f x, map f xs)"
 
-txt{*\noindent
+txt\<open>\noindent
 Note that @{term"map f xs"}
 is the result of applying @{term"f"} to all elements of @{term"xs"}. We prove
 this lemma by recursion induction over @{term"sep"}:
-*}
+\<close>
 
 apply(induct_tac x xs rule: sep.induct)
 
-txt{*\noindent
+txt\<open>\noindent
 The resulting proof state has three subgoals corresponding to the three
 clauses for @{term"sep"}:
 @{subgoals[display,indent=0]}
 The rest is pure simplification:
-*}
+\<close>
 
 apply simp_all
 done
 
-text{*
+text\<open>
 Try proving the above lemma by structural induction, and you find that you
 need an additional case distinction. What is worse, the names of variables
 are invented by Isabelle and have nothing to do with the names in the
@@ -64,7 +64,7 @@
 empty list, the singleton list, and the list with at least two elements.
 The final case has an induction hypothesis:  you may assume that @{term"P"}
 holds for the tail of that list.
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Recdef/Nested0.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/Nested0.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,14 +2,14 @@
 theory Nested0 imports Main begin
 (*>*)
 
-text{*
+text\<open>
 \index{datatypes!nested}%
 In \S\ref{sec:nested-datatype} we defined the datatype of terms
-*}
+\<close>
 
 datatype ('a,'b)"term" = Var 'a | App 'b "('a,'b)term list"
 
-text{*\noindent
+text\<open>\noindent
 and closed with the observation that the associated schema for the definition
 of primitive recursive functions leads to overly verbose definitions. Moreover,
 if you have worked exercise~\ref{ex:trev-trev} you will have noticed that
@@ -18,7 +18,7 @@
 We will now show you how \isacommand{recdef} can simplify
 definitions and proofs about nested recursive datatypes. As an example we
 choose exercise~\ref{ex:trev-trev}:
-*}
+\<close>
 
 consts trev  :: "('a,'b)term \<Rightarrow> ('a,'b)term"
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Recdef/Nested1.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/Nested1.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory Nested1 imports Nested0 begin
 (*>*)
 
-text{*\noindent
+text\<open>\noindent
 Although the definition of @{term trev} below is quite natural, we will have
 to overcome a minor difficulty in convincing Isabelle of its termination.
 It is precisely this difficulty that is the \textit{raison d'\^etre} of
@@ -11,13 +11,13 @@
 Defining @{term trev} by \isacommand{recdef} rather than \isacommand{primrec}
 simplifies matters because we are now free to use the recursion equation
 suggested at the end of \S\ref{sec:nested-datatype}:
-*}
+\<close>
 
 recdef (*<*)(permissive)(*>*)trev "measure size"
  "trev (Var x)    = Var x"
  "trev (App f ts) = App f (rev(map trev ts))"
 
-text{*\noindent
+text\<open>\noindent
 Remember that function @{term size} is defined for each \isacommand{datatype}.
 However, the definition does not succeed. Isabelle complains about an
 unproved termination condition
@@ -36,7 +36,7 @@
 \isacommand{recdef} knows about @{term map}.
 
 The termination condition is easily proved by induction:
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Recdef/Nested2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/Nested2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -9,25 +9,25 @@
  "trev (Var x) = Var x"
  "trev (App f ts) = App f (rev(map trev ts))"
 (*>*)
-text{*\noindent
+text\<open>\noindent
 By making this theorem a simplification rule, \isacommand{recdef}
 applies it automatically and the definition of @{term"trev"}
 succeeds now. As a reward for our effort, we can now prove the desired
 lemma directly.  We no longer need the verbose
 induction schema for type @{text"term"} and can use the simpler one arising from
 @{term"trev"}:
-*}
+\<close>
 
 lemma "trev(trev t) = t"
 apply(induct_tac t rule: trev.induct)
-txt{*
+txt\<open>
 @{subgoals[display,indent=0]}
 Both the base case and the induction step fall to simplification:
-*}
+\<close>
 
 by(simp_all add: rev_map sym[OF map_compose] cong: map_cong)
 
-text{*\noindent
+text\<open>\noindent
 If the proof of the induction step mystifies you, we recommend that you go through
 the chain of simplification steps in detail; you will probably need the help of
 @{text"simp_trace"}. Theorem @{thm[source]map_cong} is discussed below.
@@ -65,7 +65,7 @@
 into a situation where you need to supply \isacommand{recdef} with new
 congruence rules, you can append a hint after the end of
 the recursion equations:\cmmdx{hints}
-*}
+\<close>
 (*<*)
 consts dummy :: "nat => nat"
 recdef dummy "{}"
@@ -73,19 +73,19 @@
 (*>*)
 (hints recdef_cong: map_cong)
 
-text{*\noindent
+text\<open>\noindent
 Or you can declare them globally
 by giving them the \attrdx{recdef_cong} attribute:
-*}
+\<close>
 
 declare map_cong[recdef_cong]
 
-text{*
+text\<open>
 The @{text cong} and @{text recdef_cong} attributes are
 intentionally kept apart because they control different activities, namely
 simplification and making recursive definitions.
 %The simplifier's congruence rules cannot be used by recdef.
 %For example the weak congruence rules for if and case would prevent
 %recdef from generating sensible termination conditions.
-*}
+\<close>
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Recdef/examples.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/examples.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,9 +2,9 @@
 theory examples imports Main begin
 (*>*)
 
-text{*
+text\<open>
 Here is a simple example, the \rmindex{Fibonacci function}:
-*}
+\<close>
 
 consts fib :: "nat \<Rightarrow> nat"
 recdef fib "measure(\<lambda>n. n)"
@@ -12,7 +12,7 @@
   "fib (Suc 0) = 1"
   "fib (Suc(Suc x)) = fib x + fib (Suc x)"
 
-text{*\noindent
+text\<open>\noindent
 \index{measure functions}%
 The definition of @{term"fib"} is accompanied by a \textbf{measure function}
 @{term"%n. n"} which maps the argument of @{term"fib"} to a
@@ -25,7 +25,7 @@
 
 Slightly more interesting is the insertion of a fixed element
 between any two elements of a list:
-*}
+\<close>
 
 consts sep :: "'a \<times> 'a list \<Rightarrow> 'a list"
 recdef sep "measure (\<lambda>(a,xs). length xs)"
@@ -33,7 +33,7 @@
   "sep(a, [x])    = [x]"
   "sep(a, x#y#zs) = x # a # sep(a,y#zs)"
 
-text{*\noindent
+text\<open>\noindent
 This time the measure is the length of the list, which decreases with the
 recursive call; the first component of the argument tuple is irrelevant.
 The details of tupled $\lambda$-abstractions @{text"\<lambda>(x\<^sub>1,\<dots>,x\<^sub>n)"} are
@@ -41,24 +41,24 @@
 
 Pattern matching\index{pattern matching!and \isacommand{recdef}}
 need not be exhaustive:
-*}
+\<close>
 
 consts last :: "'a list \<Rightarrow> 'a"
 recdef last "measure (\<lambda>xs. length xs)"
   "last [x]      = x"
   "last (x#y#zs) = last (y#zs)"
 
-text{*
+text\<open>
 Overlapping patterns are disambiguated by taking the order of equations into
 account, just as in functional programming:
-*}
+\<close>
 
 consts sep1 :: "'a \<times> 'a list \<Rightarrow> 'a list"
 recdef sep1 "measure (\<lambda>(a,xs). length xs)"
   "sep1(a, x#y#zs) = x # a # sep1(a,y#zs)"
   "sep1(a, xs)     = xs"
 
-text{*\noindent
+text\<open>\noindent
 To guarantee that the second equation can only be applied if the first
 one does not match, Isabelle internally replaces the second equation
 by the two possibilities that are left: @{prop"sep1(a,[]) = []"} and
@@ -73,17 +73,17 @@
   argument is relevant for termination, you can also rearrange the order of
   arguments as in the following definition:
 \end{warn}
-*}
+\<close>
 consts sep2 :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a list"
 recdef sep2 "measure length"
   "sep2 (x#y#zs) = (\<lambda>a. x # a # sep2 (y#zs) a)"
   "sep2 xs       = (\<lambda>a. xs)"
 
-text{*
+text\<open>
 Because of its pattern-matching syntax, \isacommand{recdef} is also useful
 for the definition of non-recursive functions, where the termination measure
 degenerates to the empty set @{term"{}"}:
-*}
+\<close>
 
 consts swap12 :: "'a list \<Rightarrow> 'a list"
 recdef swap12 "{}"
--- a/src/Doc/Tutorial/Recdef/simplification.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/simplification.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory simplification imports Main begin
 (*>*)
 
-text{*
+text\<open>
 Once we have proved all the termination conditions, the \isacommand{recdef} 
 recursion equations become simplification rules, just as with
 \isacommand{primrec}. In most cases this works fine, but there is a subtle
@@ -10,13 +10,13 @@
 terminate because of automatic splitting of @{text "if"}.
 \index{*if expressions!splitting of}
 Let us look at an example:
-*}
+\<close>
 
 consts gcd :: "nat\<times>nat \<Rightarrow> nat"
 recdef gcd "measure (\<lambda>(m,n).n)"
   "gcd (m, n) = (if n=0 then m else gcd(n, m mod n))"
 
-text{*\noindent
+text\<open>\noindent
 According to the measure function, the second argument should decrease with
 each recursive call. The resulting termination condition
 @{term[display]"n ~= (0::nat) ==> m mod n < n"}
@@ -48,7 +48,7 @@
 If possible, the definition should be given by pattern matching on the left
 rather than @{text "if"} on the right. In the case of @{term gcd} the
 following alternative definition suggests itself:
-*}
+\<close>
 
 consts gcd1 :: "nat\<times>nat \<Rightarrow> nat"
 recdef gcd1 "measure (\<lambda>(m,n).n)"
@@ -56,27 +56,27 @@
   "gcd1 (m, n) = gcd1(n, m mod n)"
 
 
-text{*\noindent
+text\<open>\noindent
 The order of equations is important: it hides the side condition
 @{prop"n ~= (0::nat)"}.  Unfortunately, in general the case distinction
 may not be expressible by pattern matching.
 
 A simple alternative is to replace @{text "if"} by @{text case}, 
 which is also available for @{typ bool} and is not split automatically:
-*}
+\<close>
 
 consts gcd2 :: "nat\<times>nat \<Rightarrow> nat"
 recdef gcd2 "measure (\<lambda>(m,n).n)"
   "gcd2(m,n) = (case n=0 of True \<Rightarrow> m | False \<Rightarrow> gcd2(n,m mod n))"
 
-text{*\noindent
+text\<open>\noindent
 This is probably the neatest solution next to pattern matching, and it is
 always available.
 
 A final alternative is to replace the offending simplification rules by
 derived conditional ones. For @{term gcd} it means we have to prove
 these lemmas:
-*}
+\<close>
 
 lemma [simp]: "gcd (m, 0) = m"
 apply(simp)
@@ -86,11 +86,11 @@
 apply(simp)
 done
 
-text{*\noindent
+text\<open>\noindent
 Simplification terminates for these proofs because the condition of the @{text
 "if"} simplifies to @{term True} or @{term False}.
 Now we can disable the original simplification rule:
-*}
+\<close>
 
 declare gcd.simps [simp del]
 
--- a/src/Doc/Tutorial/Recdef/termination.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/termination.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory "termination" imports examples begin
 (*>*)
 
-text{*
+text\<open>
 When a function~$f$ is defined via \isacommand{recdef}, Isabelle tries to prove
 its termination with the help of the user-supplied measure.  Each of the examples
 above is simple enough that Isabelle can automatically prove that the
@@ -14,14 +14,14 @@
 simplification rules.
 
 Isabelle may fail to prove the termination condition for some
-recursive call.  Let us try to define Quicksort:*}
+recursive call.  Let us try to define Quicksort:\<close>
 
 consts qs :: "nat list \<Rightarrow> nat list"
 recdef(*<*)(permissive)(*>*) qs "measure length"
  "qs [] = []"
  "qs(x#xs) = qs(filter (\<lambda>y. y\<le>x) xs) @ [x] @ qs(filter (\<lambda>y. x<y) xs)"
 
-text{*\noindent where @{term filter} is predefined and @{term"filter P xs"}
+text\<open>\noindent where @{term filter} is predefined and @{term"filter P xs"}
 is the list of elements of @{term xs} satisfying @{term P}.
 This definition of @{term qs} fails, and Isabelle prints an error message
 showing you what it was unable to prove:
@@ -38,7 +38,7 @@
 proved). Because \isacommand{recdef}'s termination prover involves
 simplification, we include in our second attempt a hint: the
 \attrdx{recdef_simp} attribute says to use @{thm[source]less_Suc_eq_le} as a
-simplification rule.\cmmdx{hints}  *}
+simplification rule.\cmmdx{hints}\<close>
 
 (*<*)global consts qs :: "nat list \<Rightarrow> nat list" (*>*)
 recdef qs "measure length"
@@ -46,25 +46,25 @@
  "qs(x#xs) = qs(filter (\<lambda>y. y\<le>x) xs) @ [x] @ qs(filter (\<lambda>y. x<y) xs)"
 (hints recdef_simp: less_Suc_eq_le)
 (*<*)local(*>*)
-text{*\noindent
+text\<open>\noindent
 This time everything works fine. Now @{thm[source]qs.simps} contains precisely
 the stated recursion equations for @{text qs} and they have become
 simplification rules.
 Thus we can automatically prove results such as this one:
-*}
+\<close>
 
 theorem "qs[2,3,0] = qs[3,0,2]"
 apply(simp)
 done
 
-text{*\noindent
+text\<open>\noindent
 More exciting theorems require induction, which is discussed below.
 
 If the termination proof requires a lemma that is of general use, you can
 turn it permanently into a simplification rule, in which case the above
 \isacommand{hint} is not necessary. But in the case of
 @{thm[source]less_Suc_eq_le} this would be of dubious value.
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Tutorial/Rules/Basic.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/Basic.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -35,9 +35,9 @@
  apply assumption
 done
 
-text {*
+text \<open>
 by eliminates uses of assumption and done
-*}
+\<close>
 
 lemma imp_uncurry': "P \<longrightarrow> Q \<longrightarrow> R \<Longrightarrow> P \<and> Q \<longrightarrow> R"
 apply (rule impI)
@@ -47,21 +47,21 @@
 by (drule mp)
 
 
-text {*
+text \<open>
 substitution
 
 @{thm[display] ssubst}
 \rulename{ssubst}
-*}
+\<close>
 
 lemma "\<lbrakk> x = f x; P(f x) \<rbrakk> \<Longrightarrow> P x"
 by (erule ssubst)
 
-text {*
+text \<open>
 also provable by simp (re-orients)
-*}
+\<close>
 
-text {*
+text \<open>
 the subst method
 
 @{thm[display] mult.commute}
@@ -69,17 +69,17 @@
 
 this would fail:
 apply (simp add: mult.commute) 
-*}
+\<close>
 
 
 lemma "\<lbrakk>P x y z; Suc x < y\<rbrakk> \<Longrightarrow> f z = x*y"
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply (subst mult.commute) 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 oops
 
 (*exercise involving THEN*)
@@ -90,11 +90,11 @@
 
 lemma "\<lbrakk>x = f x; triple (f x) (f x) x\<rbrakk> \<Longrightarrow> triple x x x"
 apply (erule ssubst) 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-back --{* @{subgoals[display,indent=0,margin=65]} *}
-back --{* @{subgoals[display,indent=0,margin=65]} *}
-back --{* @{subgoals[display,indent=0,margin=65]} *}
-back --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+back \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+back \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+back \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+back \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption
 done
 
@@ -102,9 +102,9 @@
 apply (erule ssubst, assumption)
 done
 
-text{*
+text\<open>
 or better still 
-*}
+\<close>
 
 lemma "\<lbrakk> x = f x; triple (f x) (f x) x \<rbrakk> \<Longrightarrow> triple x x x"
 by (erule ssubst)
@@ -120,7 +120,7 @@
 by (erule_tac P="\<lambda>u. triple u u x" in ssubst)
 
 
-text {*
+text \<open>
 negation
 
 @{thm[display] notI}
@@ -143,41 +143,41 @@
 
 @{thm[display] contrapos_nn}
 \rulename{contrapos_nn}
-*}
+\<close>
 
 
 lemma "\<lbrakk>\<not>(P\<longrightarrow>Q); \<not>(R\<longrightarrow>Q)\<rbrakk> \<Longrightarrow> R"
 apply (erule_tac Q="R\<longrightarrow>Q" in contrapos_np)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (intro impI)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 by (erule notE)
 
-text {*
+text \<open>
 @{thm[display] disjCI}
 \rulename{disjCI}
-*}
+\<close>
 
 lemma "(P \<or> Q) \<and> R \<Longrightarrow> P \<or> Q \<and> R"
 apply (intro disjCI conjI)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 
 apply (elim conjE disjE)
  apply assumption
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 
 by (erule contrapos_np, rule conjI)
-text{*
+text\<open>
 proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ {\isadigit{6}}\isanewline
 \isanewline
 goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
 {\isacharparenleft}P\ {\isasymor}\ Q{\isacharparenright}\ {\isasymand}\ R\ {\isasymLongrightarrow}\ P\ {\isasymor}\ Q\ {\isasymand}\ R\isanewline
 \ {\isadigit{1}}{\isachardot}\ {\isasymlbrakk}R{\isacharsemicolon}\ Q{\isacharsemicolon}\ {\isasymnot}\ P{\isasymrbrakk}\ {\isasymLongrightarrow}\ Q\isanewline
 \ {\isadigit{2}}{\isachardot}\ {\isasymlbrakk}R{\isacharsemicolon}\ Q{\isacharsemicolon}\ {\isasymnot}\ P{\isasymrbrakk}\ {\isasymLongrightarrow}\ R
-*}
+\<close>
 
 
-text{*rule_tac, etc.*}
+text\<open>rule_tac, etc.\<close>
 
 
 lemma "P&Q"
@@ -185,23 +185,23 @@
 oops
 
 
-text{*unification failure trace *}
+text\<open>unification failure trace\<close>
 
 declare [[unify_trace_failure = true]]
 
 lemma "P(a, f(b, g(e,a), b), a) \<Longrightarrow> P(a, f(b, g(c,a), b), a)"
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 apply assumption
 Clash: e =/= c
 
 Clash: == =/= Trueprop
-*}
+\<close>
 oops
 
 lemma "\<forall>x y. P(x,y) --> P(y,x)"
 apply auto
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 apply assumption
 
@@ -209,15 +209,15 @@
 
 Clash: == =/= Trueprop
 Clash: == =/= Trueprop
-*}
+\<close>
 oops
 
 declare [[unify_trace_failure = false]]
 
 
-text{*Quantifiers*}
+text\<open>Quantifiers\<close>
 
-text {*
+text \<open>
 @{thm[display] allI}
 \rulename{allI}
 
@@ -226,7 +226,7 @@
 
 @{thm[display] spec}
 \rulename{spec}
-*}
+\<close>
 
 lemma "\<forall>x. P x \<longrightarrow> P x"
 apply (rule allI)
@@ -237,74 +237,74 @@
 apply (drule spec)
 by (drule mp)
 
-text{*rename_tac*}
+text\<open>rename_tac\<close>
 lemma "x < y \<Longrightarrow> \<forall>x y. P x (f y)"
 apply (intro allI)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rename_tac v w)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 oops
 
 
 lemma "\<lbrakk>\<forall>x. P x \<longrightarrow> P (h x); P a\<rbrakk> \<Longrightarrow> P(h (h a))"
 apply (frule spec)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (drule mp, assumption)
 apply (drule spec)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 by (drule mp)
 
 lemma "\<lbrakk>\<forall>x. P x \<longrightarrow> P (f x); P a\<rbrakk> \<Longrightarrow> P(f (f a))"
 by blast
 
 
-text{*
-the existential quantifier*}
+text\<open>
+the existential quantifier\<close>
 
-text {*
+text \<open>
 @{thm[display]"exI"}
 \rulename{exI}
 
 @{thm[display]"exE"}
 \rulename{exE}
-*}
+\<close>
 
 
-text{*
-instantiating quantifiers explicitly by rule_tac and erule_tac*}
+text\<open>
+instantiating quantifiers explicitly by rule_tac and erule_tac\<close>
 
 lemma "\<lbrakk>\<forall>x. P x \<longrightarrow> P (h x); P a\<rbrakk> \<Longrightarrow> P(h (h a))"
 apply (frule spec)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (drule mp, assumption)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (drule_tac x = "h a" in spec)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 by (drule mp)
 
-text {*
+text \<open>
 @{thm[display]"dvd_def"}
 \rulename{dvd_def}
-*}
+\<close>
 
 lemma mult_dvd_mono: "\<lbrakk>i dvd m; j dvd n\<rbrakk> \<Longrightarrow> i*j dvd (m*n :: nat)"
 apply (simp add: dvd_def)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule exE) 
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule exE) 
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rename_tac l)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule_tac x="k*l" in exI) 
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply simp
 done
 
-text{*
-Hilbert-epsilon theorems*}
+text\<open>
+Hilbert-epsilon theorems\<close>
 
-text{*
+text\<open>
 @{thm[display] the_equality[no_vars]}
 \rulename{the_equality}
 
@@ -330,29 +330,29 @@
 
 @{thm[display] order_antisym[no_vars]}
 \rulename{order_antisym}
-*}
+\<close>
 
 
 lemma "inv Suc (Suc n) = n"
 by (simp add: inv_def)
 
-text{*but we know nothing about inv Suc 0*}
+text\<open>but we know nothing about inv Suc 0\<close>
 
 theorem Least_equality:
      "\<lbrakk> P (k::nat);  \<forall>x. P x \<longrightarrow> k \<le> x \<rbrakk> \<Longrightarrow> (LEAST x. P(x)) = k"
 apply (simp add: Least_def)
  
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
    
 apply (rule the_equality)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 
 first subgoal is existence; second is uniqueness
-*}
+\<close>
 by (auto intro: order_antisym)
 
 
@@ -360,19 +360,19 @@
      "(\<forall>x. \<exists>y. P x y) \<Longrightarrow> \<exists>f. \<forall>x. P x (f x)"
 apply (rule exI, rule allI)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 
 state after intro rules
-*}
+\<close>
 apply (drule spec, erule exE)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 
 applying @text{someI} automatically instantiates
 @{term f} to @{term "\<lambda>x. SOME y. P x y"}
-*}
+\<close>
 
 by (rule someI)
 
@@ -385,7 +385,7 @@
 apply (rule exI [of _  "\<lambda>x. SOME y. P x y"])
 by (blast intro: someI)
 
-text{*end of Epsilon section*}
+text\<open>end of Epsilon section\<close>
 
 
 lemma "(\<exists>x. P x) \<or> (\<exists>x. Q x) \<Longrightarrow> \<exists>x. P x \<or> Q x"
@@ -433,11 +433,11 @@
 
 lemma "\<forall>y. R y y \<Longrightarrow> \<exists>x. \<forall>y. R x y"
 apply (rule exI) 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule allI) 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (drule spec) 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 oops
 
 lemma "\<forall>x. \<exists>y. x=y"
--- a/src/Doc/Tutorial/Rules/Blast.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/Blast.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,9 +4,9 @@
        ((\<exists>x. \<forall>y. q(x)=q(y)) = ((\<exists>x. p(x))=(\<forall>y. q(y))))"
 by blast
 
-text{*\noindent Until now, we have proved everything using only induction and
+text\<open>\noindent Until now, we have proved everything using only induction and
 simplification.  Substantial proofs require more elaborate types of
-inference.*}
+inference.\<close>
 
 lemma "(\<forall>x. honest(x) \<and> industrious(x) \<longrightarrow> healthy(x)) \<and>  
        \<not> (\<exists>x. grocer(x) \<and> healthy(x)) \<and> 
@@ -20,13 +20,13 @@
         (\<Union>i\<in>I. \<Union>j\<in>J. A(i) \<inter> B(j))"
 by blast
 
-text {*
+text \<open>
 @{thm[display] mult_is_0}
  \rulename{mult_is_0}}
 
 @{thm[display] finite_Un}
  \rulename{finite_Un}}
-*}
+\<close>
 
 
 lemma [iff]: "(xs@ys = []) = (xs=[] & ys=[])"
--- a/src/Doc/Tutorial/Rules/Force.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/Force.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -15,20 +15,20 @@
 apply clarify
 oops
 
-text {*
+text \<open>
 proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ {\isadigit{1}}\isanewline
 \isanewline
 goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
 {\isacharparenleft}{\isasymforall}x{\isachardot}\ P\ x{\isacharparenright}\ {\isasymand}\ {\isacharparenleft}{\isasymexists}x{\isachardot}\ Q\ x{\isacharparenright}\ {\isasymlongrightarrow}\ {\isacharparenleft}{\isasymforall}x{\isachardot}\ P\ x\ {\isasymand}\ Q\ x{\isacharparenright}\isanewline
 \ {\isadigit{1}}{\isachardot}\ {\isasymAnd}x\ xa{\isachardot}\ {\isasymlbrakk}{\isasymforall}x{\isachardot}\ P\ x{\isacharsemicolon}\ Q\ xa{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ x\ {\isasymand}\ Q\ x
-*}
+\<close>
 
-text {*
+text \<open>
 couldn't find a good example of clarsimp
 
 @{thm[display]"someI"}
 \rulename{someI}
-*}
+\<close>
 
 lemma "\<lbrakk>Q a; P a\<rbrakk> \<Longrightarrow> P (SOME x. P x \<and> Q x) \<and> Q (SOME x. P x \<and> Q x)"
 apply (rule someI)
@@ -38,13 +38,13 @@
 apply (fast intro!: someI)
 done
 
-text{*
+text\<open>
 proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ \isadigit{1}\isanewline
 \isanewline
 goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
 {\isasymlbrakk}Q\ a{\isacharsemicolon}\ P\ a{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ {\isacharparenleft}SOME\ x{\isachardot}\ P\ x\ {\isasymand}\ Q\ x{\isacharparenright}\ {\isasymand}\ Q\ {\isacharparenleft}SOME\ x{\isachardot}\ P\ x\ {\isasymand}\ Q\ x{\isacharparenright}\isanewline
 \ \isadigit{1}{\isachardot}\ {\isasymlbrakk}Q\ a{\isacharsemicolon}\ P\ a{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ {\isacharquery}x\ {\isasymand}\ Q\ {\isacharquery}x
-*}
+\<close>
 
 end
 
--- a/src/Doc/Tutorial/Rules/Forward.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/Forward.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,12 +1,12 @@
 theory Forward imports TPrimes begin
 
-text{*\noindent
+text\<open>\noindent
 Forward proof material: of, OF, THEN, simplify, rule_format.
-*}
+\<close>
 
-text{*\noindent
+text\<open>\noindent
 SKIP most developments...
-*}
+\<close>
 
 (** Commutativity **)
 
@@ -29,13 +29,13 @@
 apply (simp add: gcd_commute [of "Suc 0"])
 done
 
-text{*\noindent
+text\<open>\noindent
 as far as HERE.
-*}
+\<close>
 
-text{*\noindent
+text\<open>\noindent
 SKIP THIS PROOF
-*}
+\<close>
 
 lemma gcd_mult_distrib2: "k * gcd m n = gcd (k*m) (k*n)"
 apply (induct_tac m n rule: gcd.induct)
@@ -45,14 +45,14 @@
 apply simp_all
 done
 
-text {*
+text \<open>
 @{thm[display] gcd_mult_distrib2}
 \rulename{gcd_mult_distrib2}
-*}
+\<close>
 
-text{*\noindent
+text\<open>\noindent
 of, simplified
-*}
+\<close>
 
 
 lemmas gcd_mult_0 = gcd_mult_distrib2 [of k 1]
@@ -64,7 +64,7 @@
 
 lemmas where3 = gcd_mult_distrib2 [where m=1 and k="j+k"]
 
-text {*
+text \<open>
 example using ``of'':
 @{thm[display] gcd_mult_distrib2 [of _ 1]}
 
@@ -82,7 +82,7 @@
 
 @{thm[display] sym}
 \rulename{sym}
-*}
+\<close>
 
 lemmas gcd_mult0 = gcd_mult_1 [THEN sym]
       (*not quite right: we need ?k but this gives k*)
@@ -90,9 +90,9 @@
 lemmas gcd_mult0' = gcd_mult_distrib2 [of k 1, simplified, THEN sym]
       (*better in one step!*)
 
-text {*
+text \<open>
 more legible, and variables properly generalized
-*}
+\<close>
 
 lemma gcd_mult [simp]: "gcd k (k*n) = k"
 by (rule gcd_mult_distrib2 [of k 1, simplified, THEN sym])
@@ -101,15 +101,15 @@
 lemmas gcd_self0 = gcd_mult [of k 1, simplified]
 
 
-text {*
+text \<open>
 @{thm[display] gcd_mult}
 \rulename{gcd_mult}
 
 @{thm[display] gcd_self0}
 \rulename{gcd_self0}
-*}
+\<close>
 
-text {*
+text \<open>
 Rules handy with THEN
 
 @{thm[display] iffD1}
@@ -117,18 +117,18 @@
 
 @{thm[display] iffD2}
 \rulename{iffD2}
-*}
+\<close>
 
 
-text {*
+text \<open>
 again: more legible, and variables properly generalized
-*}
+\<close>
 
 lemma gcd_self [simp]: "gcd k k = k"
 by (rule gcd_mult [of k 1, simplified])
 
 
-text{*
+text\<open>
 NEXT SECTION: Methods for Forward Proof
 
 NEW
@@ -136,48 +136,48 @@
 theorem arg_cong, useful in forward steps
 @{thm[display] arg_cong[no_vars]}
 \rulename{arg_cong}
-*}
+\<close>
 
 lemma "2 \<le> u \<Longrightarrow> u*m \<noteq> Suc(u*n)"
 apply (intro notI)
-txt{*
+txt\<open>
 before using arg_cong
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply (drule_tac f="\<lambda>x. x mod u" in arg_cong)
-txt{*
+txt\<open>
 after using arg_cong
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply (simp add: mod_Suc)
 done
 
-text{*
+text\<open>
 have just used this rule:
 @{thm[display] mod_Suc[no_vars]}
 \rulename{mod_Suc}
 
 @{thm[display] mult_le_mono1[no_vars]}
 \rulename{mult_le_mono1}
-*}
+\<close>
 
 
-text{*
+text\<open>
 example of "insert"
-*}
+\<close>
 
 lemma relprime_dvd_mult:
       "\<lbrakk> gcd k n = 1; k dvd m*n \<rbrakk> \<Longrightarrow> k dvd m"
 apply (insert gcd_mult_distrib2 [of m k n])
-txt{*@{subgoals[display,indent=0,margin=65]}*}
+txt\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply simp
-txt{*@{subgoals[display,indent=0,margin=65]}*}
+txt\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule_tac t="m" in ssubst)
 apply simp
 done
 
 
-text {*
+text \<open>
 @{thm[display] relprime_dvd_mult}
 \rulename{relprime_dvd_mult}
 
@@ -185,7 +185,7 @@
 
 @{thm[display] div_mult_mod_eq}
 \rulename{div_mult_mod_eq}
-*}
+\<close>
 
 (*MOVED to Force.thy, which now depends only on Divides.thy
 lemma div_mult_self_is_m: "0<n \<Longrightarrow> (m*n) div n = (m::nat)"
@@ -197,7 +197,7 @@
 lemma relprime_20_81: "gcd 20 81 = 1"
 by (simp add: gcd.simps)
 
-text {*
+text \<open>
 Examples of 'OF'
 
 @{thm[display] relprime_dvd_mult}
@@ -214,20 +214,20 @@
 @{thm[display] dvd_add [OF dvd_refl dvd_refl]}
 
 @{thm[display] dvd_add [OF _ dvd_refl]}
-*}
+\<close>
 
 lemma "\<lbrakk>(z::int) < 37; 66 < 2*z; z*z \<noteq> 1225; Q(34); Q(36)\<rbrakk> \<Longrightarrow> Q(z)"
 apply (subgoal_tac "z = 34 \<or> z = 36")
-txt{*
+txt\<open>
 the tactic leaves two subgoals:
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply blast
 apply (subgoal_tac "z \<noteq> 35")
-txt{*
+txt\<open>
 the tactic leaves two subgoals:
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply arith
 apply force
 done
--- a/src/Doc/Tutorial/Rules/TPrimes.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/TPrimes.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -8,10 +8,10 @@
   "gcd m n = (if n=0 then m else gcd n (m mod n))"
 
 
-text {*Now in Basic.thy!
+text \<open>Now in Basic.thy!
 @{thm[display]"dvd_def"}
 \rulename{dvd_def}
-*}
+\<close>
 
 
 (*** Euclid's Algorithm ***)
@@ -29,30 +29,30 @@
 (*gcd(m,n) divides m and n.  The conjunctions don't seem provable separately*)
 lemma gcd_dvd_both: "(gcd m n dvd m) \<and> (gcd m n dvd n)"
 apply (induct_tac m n rule: gcd.induct)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (case_tac "n=0")
-txt{*subgoals after the case tac
+txt\<open>subgoals after the case tac
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply (simp_all) 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 by (blast dest: dvd_mod_imp_dvd)
 
 
 
-text {*
+text \<open>
 @{thm[display] dvd_mod_imp_dvd}
 \rulename{dvd_mod_imp_dvd}
 
 @{thm[display] dvd_trans}
 \rulename{dvd_trans}
-*}
+\<close>
 
 lemmas gcd_dvd1 [iff] = gcd_dvd_both [THEN conjunct1]
 lemmas gcd_dvd2 [iff] = gcd_dvd_both [THEN conjunct2]
 
 
-text {*
+text \<open>
 \begin{quote}
 @{thm[display] gcd_dvd1}
 \rulename{gcd_dvd1}
@@ -60,7 +60,7 @@
 @{thm[display] gcd_dvd2}
 \rulename{gcd_dvd2}
 \end{quote}
-*}
+\<close>
 
 (*Maximality: for all m,n,k naturals, 
                 if k divides m and k divides n then k divides gcd(m,n)*)
@@ -68,16 +68,16 @@
       "k dvd m \<longrightarrow> k dvd n \<longrightarrow> k dvd gcd m n"
 apply (induct_tac m n rule: gcd.induct)
 apply (case_tac "n=0")
-txt{*subgoals after the case tac
+txt\<open>subgoals after the case tac
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply (simp_all add: dvd_mod)
 done
 
-text {*
+text \<open>
 @{thm[display] dvd_mod}
 \rulename{dvd_mod}
-*}
+\<close>
 
 (*just checking the claim that case_tac "n" works too*)
 lemma "k dvd m \<longrightarrow> k dvd n \<longrightarrow> k dvd gcd m n"
@@ -110,7 +110,7 @@
 done
 
 
-text {*
+text \<open>
 @{thm[display] dvd_antisym}
 \rulename{dvd_antisym}
 
@@ -123,7 +123,7 @@
 \ \ \ \ \ \ \ n\ dvd\ a\ \isasymand \ n\ dvd\ b\ \isasymand \ (\isasymforall d.\ d\ dvd\ a\ \isasymand \ d\ dvd\ b\ \isasymlongrightarrow \ d\ dvd\ n)\isasymrbrakk \isanewline
 \ \ \ \ \isasymLongrightarrow \ m\ =\ n
 \end{isabelle}
-*}
+\<close>
 
 lemma gcd_assoc: "gcd (gcd k m) n = gcd k (gcd m n)"
   apply (rule is_gcd_unique)
@@ -132,7 +132,7 @@
   apply (blast intro: dvd_trans)
   done
 
-text{*
+text\<open>
 \begin{isabelle}
 proof\ (prove):\ step\ 3\isanewline
 \isanewline
@@ -141,7 +141,7 @@
 \ 1.\ gcd\ (k,\ gcd\ (m,\ n))\ dvd\ k\ \isasymand \isanewline
 \ \ \ \ gcd\ (k,\ gcd\ (m,\ n))\ dvd\ m\ \isasymand \ gcd\ (k,\ gcd\ (m,\ n))\ dvd\ n
 \end{isabelle}
-*}
+\<close>
 
 
 lemma gcd_dvd_gcd_mult: "gcd m n dvd gcd (k*m) n"
--- a/src/Doc/Tutorial/Rules/Tacticals.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/Tacticals.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,6 +1,6 @@
 theory Tacticals imports Main begin
 
-text{*REPEAT*}
+text\<open>REPEAT\<close>
 lemma "\<lbrakk>P\<longrightarrow>Q; Q\<longrightarrow>R; R\<longrightarrow>S; P\<rbrakk> \<Longrightarrow> S"
 apply (drule mp, assumption)
 apply (drule mp, assumption)
@@ -11,32 +11,32 @@
 lemma "\<lbrakk>P\<longrightarrow>Q; Q\<longrightarrow>R; R\<longrightarrow>S; P\<rbrakk> \<Longrightarrow> S"
 by (drule mp, assumption)+
 
-text{*ORELSE with REPEAT*}
+text\<open>ORELSE with REPEAT\<close>
 lemma "\<lbrakk>Q\<longrightarrow>R; P\<longrightarrow>Q; x<5\<longrightarrow>P;  Suc x < 5\<rbrakk> \<Longrightarrow> R" 
 by (drule mp, (assumption|arith))+
 
-text{*exercise: what's going on here?*}
+text\<open>exercise: what's going on here?\<close>
 lemma "\<lbrakk>P\<and>Q\<longrightarrow>R; P\<longrightarrow>Q; P\<rbrakk> \<Longrightarrow> R"
 by (drule mp, (intro conjI)?, assumption+)+
 
-text{*defer and prefer*}
+text\<open>defer and prefer\<close>
 
 lemma "hard \<and> (P \<or> ~P) \<and> (Q\<longrightarrow>Q)"
-apply (intro conjI)   --{* @{subgoals[display,indent=0,margin=65]} *}
-defer 1   --{* @{subgoals[display,indent=0,margin=65]} *}
-apply blast+   --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (intro conjI)   \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+defer 1   \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+apply blast+   \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 oops
 
 lemma "ok1 \<and> ok2 \<and> doubtful"
-apply (intro conjI)   --{* @{subgoals[display,indent=0,margin=65]} *}
-prefer 3   --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (intro conjI)   \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+prefer 3   \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 oops
 
 lemma "bigsubgoal1 \<and> bigsubgoal2 \<and> bigsubgoal3 \<and> bigsubgoal4 \<and> bigsubgoal5 \<and> bigsubgoal6"
-apply (intro conjI)   --{* @{subgoals[display,indent=0,margin=65]} *}
-txt{* @{subgoals[display,indent=0,margin=65]} 
+apply (intro conjI)   \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+txt\<open>@{subgoals[display,indent=0,margin=65]} 
 A total of 6 subgoals...
-*}
+\<close>
 oops
 
 
--- a/src/Doc/Tutorial/Rules/find2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/find2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,7 +3,7 @@
 lemma "A \<and> B"
 (*>*)
 
-txt{*\index{finding theorems}\index{searching theorems} In
+txt\<open>\index{finding theorems}\index{searching theorems} In
 \S\ref{sec:find}, we introduced Proof General's \pgmenu{Find} button
 for finding theorems in the database via pattern matching. If we are
 inside a proof, we can be more specific; we can search for introduction,
@@ -16,12 +16,12 @@
 \texttt{intro}. You will be shown a few rules ending in @{text"\<Longrightarrow> ?P \<and> ?Q"},
 among them @{thm[source]conjI}\@. You may even discover that
 the very theorem you are trying to prove is already in the
-database.  Given the goal *}
+database.  Given the goal\<close>
 (*<*)
 oops
 lemma "A \<longrightarrow> A"
 (*>*)
-txt{*\vspace{-\bigskipamount}
+txt\<open>\vspace{-\bigskipamount}
 @{subgoals[display,indent=0,margin=65]}
 the search for \texttt{intro} finds not just @{thm[source] impI}
 but also @{thm[source] imp_refl}: @{thm imp_refl}.
@@ -36,7 +36,7 @@
 Searching for elimination and destruction rules via \texttt{elim} and
 \texttt{dest} is analogous to \texttt{intro} but takes the assumptions
 into account, too.
-*}
+\<close>
 (*<*)
 oops
 end
--- a/src/Doc/Tutorial/Sets/Examples.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Sets/Examples.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,14 +2,14 @@
 
 declare [[eta_contract = false]]
 
-text{*membership, intersection *}
-text{*difference and empty set*}
-text{*complement, union and universal set*}
+text\<open>membership, intersection\<close>
+text\<open>difference and empty set\<close>
+text\<open>complement, union and universal set\<close>
 
 lemma "(x \<in> A \<inter> B) = (x \<in> A \<and> x \<in> B)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] IntI[no_vars]}
 \rulename{IntI}
 
@@ -18,60 +18,60 @@
 
 @{thm[display] IntD2[no_vars]}
 \rulename{IntD2}
-*}
+\<close>
 
 lemma "(x \<in> -A) = (x \<notin> A)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] Compl_iff[no_vars]}
 \rulename{Compl_iff}
-*}
+\<close>
 
 lemma "- (A \<union> B) = -A \<inter> -B"
 by blast
 
-text{*
+text\<open>
 @{thm[display] Compl_Un[no_vars]}
 \rulename{Compl_Un}
-*}
+\<close>
 
 lemma "A-A = {}"
 by blast
 
-text{*
+text\<open>
 @{thm[display] Diff_disjoint[no_vars]}
 \rulename{Diff_disjoint}
-*}
+\<close>
 
 
 
 lemma "A \<union> -A = UNIV"
 by blast
 
-text{*
+text\<open>
 @{thm[display] Compl_partition[no_vars]}
 \rulename{Compl_partition}
-*}
+\<close>
 
-text{*subset relation*}
+text\<open>subset relation\<close>
 
 
-text{*
+text\<open>
 @{thm[display] subsetI[no_vars]}
 \rulename{subsetI}
 
 @{thm[display] subsetD[no_vars]}
 \rulename{subsetD}
-*}
+\<close>
 
 lemma "((A \<union> B) \<subseteq> C) = (A \<subseteq> C \<and> B \<subseteq> C)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] Un_subset_iff[no_vars]}
 \rulename{Un_subset_iff}
-*}
+\<close>
 
 lemma "(A \<subseteq> -B) = (B \<subseteq> -A)"
 by blast
@@ -79,19 +79,19 @@
 lemma "(A <= -B) = (B <= -A)"
   oops
 
-text{*ASCII version: blast fails because of overloading because
- it doesn't have to be sets*}
+text\<open>ASCII version: blast fails because of overloading because
+ it doesn't have to be sets\<close>
 
 lemma "((A:: 'a set) <= -B) = (B <= -A)"
 by blast
 
-text{*A type constraint lets it work*}
+text\<open>A type constraint lets it work\<close>
 
-text{*An issue here: how do we discuss the distinction between ASCII and
-symbol notation?  Here the latter disambiguates.*}
+text\<open>An issue here: how do we discuss the distinction between ASCII and
+symbol notation?  Here the latter disambiguates.\<close>
 
 
-text{*
+text\<open>
 set extensionality
 
 @{thm[display] set_eqI[no_vars]}
@@ -102,19 +102,19 @@
 
 @{thm[display] equalityE[no_vars]}
 \rulename{equalityE}
-*}
+\<close>
 
 
-text{*finite sets: insertion and membership relation*}
-text{*finite set notation*}
+text\<open>finite sets: insertion and membership relation\<close>
+text\<open>finite set notation\<close>
 
 lemma "insert x A = {x} \<union> A"
 by blast
 
-text{*
+text\<open>
 @{thm[display] insert_is_Un[no_vars]}
 \rulename{insert_is_Un}
-*}
+\<close>
 
 lemma "{a,b} \<union> {c,d} = {a,b,c,d}"
 by blast
@@ -122,31 +122,31 @@
 lemma "{a,b} \<inter> {b,c} = {b}"
 apply auto
 oops
-text{*fails because it isn't valid*}
+text\<open>fails because it isn't valid\<close>
 
 lemma "{a,b} \<inter> {b,c} = (if a=c then {a,b} else {b})"
 apply simp
 by blast
 
-text{*or just force or auto.  blast alone can't handle the if-then-else*}
+text\<open>or just force or auto.  blast alone can't handle the if-then-else\<close>
 
-text{*next: some comprehension examples*}
+text\<open>next: some comprehension examples\<close>
 
 lemma "(a \<in> {z. P z}) = P a"
 by blast
 
-text{*
+text\<open>
 @{thm[display] mem_Collect_eq[no_vars]}
 \rulename{mem_Collect_eq}
-*}
+\<close>
 
 lemma "{x. x \<in> A} = A"
 by blast
 
-text{*
+text\<open>
 @{thm[display] Collect_mem_eq[no_vars]}
 \rulename{Collect_mem_eq}
-*}
+\<close>
 
 lemma "{x. P x \<or> x \<in> A} = {x. P x} \<union> A"
 by blast
@@ -161,50 +161,50 @@
        {z. \<exists>p q. z = p*q \<and> p\<in>prime \<and> q\<in>prime}"
 by (rule refl)
 
-text{*binders*}
+text\<open>binders\<close>
 
-text{*bounded quantifiers*}
+text\<open>bounded quantifiers\<close>
 
 lemma "(\<exists>x\<in>A. P x) = (\<exists>x. x\<in>A \<and> P x)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] bexI[no_vars]}
 \rulename{bexI}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] bexE[no_vars]}
 \rulename{bexE}
-*}
+\<close>
 
 lemma "(\<forall>x\<in>A. P x) = (\<forall>x. x\<in>A \<longrightarrow> P x)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] ballI[no_vars]}
 \rulename{ballI}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] bspec[no_vars]}
 \rulename{bspec}
-*}
+\<close>
 
-text{*indexed unions and variations*}
+text\<open>indexed unions and variations\<close>
 
 lemma "(\<Union>x. B x) = (\<Union>x\<in>UNIV. B x)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] UN_iff[no_vars]}
 \rulename{UN_iff}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] Union_iff[no_vars]}
 \rulename{Union_iff}
-*}
+\<close>
 
 lemma "(\<Union>x\<in>A. B x) = {y. \<exists>x\<in>A. y \<in> B x}"
 by blast
@@ -212,35 +212,35 @@
 lemma "\<Union>S = (\<Union>x\<in>S. x)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] UN_I[no_vars]}
 \rulename{UN_I}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] UN_E[no_vars]}
 \rulename{UN_E}
-*}
+\<close>
 
-text{*indexed intersections*}
+text\<open>indexed intersections\<close>
 
 lemma "(\<Inter>x. B x) = {y. \<forall>x. y \<in> B x}"
 by blast
 
-text{*
+text\<open>
 @{thm[display] INT_iff[no_vars]}
 \rulename{INT_iff}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] Inter_iff[no_vars]}
 \rulename{Inter_iff}
-*}
+\<close>
 
-text{*mention also card, Pow, etc.*}
+text\<open>mention also card, Pow, etc.\<close>
 
 
-text{*
+text\<open>
 @{thm[display] card_Un_Int[no_vars]}
 \rulename{card_Un_Int}
 
@@ -249,6 +249,6 @@
 
 @{thm[display] n_subsets[no_vars]}
 \rulename{n_subsets}
-*}
+\<close>
 
 end
--- a/src/Doc/Tutorial/Sets/Functions.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Sets/Functions.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,7 +1,7 @@
 theory Functions imports Main begin
 
 
-text{*
+text\<open>
 @{thm[display] id_def[no_vars]}
 \rulename{id_def}
 
@@ -10,18 +10,18 @@
 
 @{thm[display] o_assoc[no_vars]}
 \rulename{o_assoc}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] fun_upd_apply[no_vars]}
 \rulename{fun_upd_apply}
 
 @{thm[display] fun_upd_upd[no_vars]}
 \rulename{fun_upd_upd}
-*}
+\<close>
 
 
-text{*
+text\<open>
 definitions of injective, surjective, bijective
 
 @{thm[display] inj_on_def[no_vars]}
@@ -32,15 +32,15 @@
 
 @{thm[display] bij_def[no_vars]}
 \rulename{bij_def}
-*}
+\<close>
 
 
 
-text{*
+text\<open>
 possibly interesting theorems about inv
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] inv_f_f[no_vars]}
 \rulename{inv_f_f}
 
@@ -61,9 +61,9 @@
 
 @{thm[display] o_inv_distrib[no_vars]}
 \rulename{o_inv_distrib}
-*}
+\<close>
 
-text{*
+text\<open>
 small sample proof
 
 @{thm[display] ext[no_vars]}
@@ -71,35 +71,35 @@
 
 @{thm[display] fun_eq_iff[no_vars]}
 \rulename{fun_eq_iff}
-*}
+\<close>
 
 lemma "inj f \<Longrightarrow> (f o g = f o h) = (g = h)"
   apply (simp add: fun_eq_iff inj_on_def)
   apply (auto)
   done
 
-text{*
+text\<open>
 \begin{isabelle}
 inj\ f\ \isasymLongrightarrow \ (f\ \isasymcirc \ g\ =\ f\ \isasymcirc \ h)\ =\ (g\ =\ h)\isanewline
 \ 1.\ \isasymforall x\ y.\ f\ x\ =\ f\ y\ \isasymlongrightarrow \ x\ =\ y\ \isasymLongrightarrow \isanewline
 \ \ \ \ (\isasymforall x.\ f\ (g\ x)\ =\ f\ (h\ x))\ =\ (\isasymforall x.\ g\ x\ =\ h\ x)
 \end{isabelle}
-*}
+\<close>
  
 
-text{*image, inverse image*}
+text\<open>image, inverse image\<close>
 
-text{*
+text\<open>
 @{thm[display] image_def[no_vars]}
 \rulename{image_def}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] image_Un[no_vars]}
 \rulename{image_Un}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] image_comp[no_vars]}
 \rulename{image_comp}
 
@@ -108,12 +108,12 @@
 
 @{thm[display] bij_image_Compl_eq[no_vars]}
 \rulename{bij_image_Compl_eq}
-*}
+\<close>
 
 
-text{*
+text\<open>
 illustrates Union as well as image
-*}
+\<close>
 
 lemma "f`A \<union> g`A = (\<Union>x\<in>A. {f x, g x})"
 by blast
@@ -121,23 +121,23 @@
 lemma "f ` {(x,y). P x y} = {f(x,y) | x y. P x y}"
 by blast
 
-text{*actually a macro!*}
+text\<open>actually a macro!\<close>
 
 lemma "range f = f`UNIV"
 by blast
 
 
-text{*
+text\<open>
 inverse image
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] vimage_def[no_vars]}
 \rulename{vimage_def}
 
 @{thm[display] vimage_Compl[no_vars]}
 \rulename{vimage_Compl}
-*}
+\<close>
 
 
 end
--- a/src/Doc/Tutorial/Sets/Recur.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Sets/Recur.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,7 +1,7 @@
 theory Recur imports Main begin
 
 
-text{*
+text\<open>
 @{thm[display] mono_def[no_vars]}
 \rulename{mono_def}
 
@@ -22,9 +22,9 @@
 
 @{thm[display] coinduct[no_vars]}
 \rulename{coinduct}
-*}
+\<close>
 
-text{*\noindent
+text\<open>\noindent
 A relation $<$ is
 \bfindex{wellfounded} if it has no infinite descending chain $\cdots <
 a@2 < a@1 < a@0$. Clearly, a function definition is total iff the set
@@ -43,9 +43,9 @@
 @{thm[display]wf_induct[no_vars]}
 where @{term"wf r"} means that the relation @{term r} is wellfounded
 
-*}
+\<close>
 
-text{*
+text\<open>
 
 @{thm[display] wf_induct[no_vars]}
 \rulename{wf_induct}
@@ -74,7 +74,7 @@
 @{thm[display] wf_lex_prod[no_vars]}
 \rulename{wf_lex_prod}
 
-*}
+\<close>
 
 end
 
--- a/src/Doc/Tutorial/Sets/Relations.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Sets/Relations.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,57 +3,57 @@
 (*Id is only used in UNITY*)
 (*refl, antisym,trans,univalent,\<dots> ho hum*)
 
-text{*
+text\<open>
 @{thm[display] Id_def[no_vars]}
 \rulename{Id_def}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] relcomp_unfold[no_vars]}
 \rulename{relcomp_unfold}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] R_O_Id[no_vars]}
 \rulename{R_O_Id}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] relcomp_mono[no_vars]}
 \rulename{relcomp_mono}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] converse_iff[no_vars]}
 \rulename{converse_iff}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] converse_relcomp[no_vars]}
 \rulename{converse_relcomp}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] Image_iff[no_vars]}
 \rulename{Image_iff}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] Image_UN[no_vars]}
 \rulename{Image_UN}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] Domain_iff[no_vars]}
 \rulename{Domain_iff}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] Range_iff[no_vars]}
 \rulename{Range_iff}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] relpow.simps[no_vars]}
 \rulename{relpow.simps}
 
@@ -83,15 +83,15 @@
 
 @{thm[display] trancl_converse[no_vars]}
 \rulename{trancl_converse}
-*}
+\<close>
 
-text{*Relations.  transitive closure*}
+text\<open>Relations.  transitive closure\<close>
 
 lemma rtrancl_converseD: "(x,y) \<in> (r\<inverse>)\<^sup>* \<Longrightarrow> (y,x) \<in> r\<^sup>*"
 apply (erule rtrancl_induct)
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
  apply (rule rtrancl_refl)
 apply (blast intro: rtrancl_trans)
 done
@@ -108,44 +108,44 @@
 
 lemma rtrancl_converse: "(r\<inverse>)\<^sup>* = (r\<^sup>*)\<inverse>"
 apply (intro equalityI subsetI)
-txt{*
+txt\<open>
 after intro rules
 
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply clarify
-txt{*
+txt\<open>
 after splitting
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 oops
 
 
 lemma "(\<forall>u v. (u,v) \<in> A \<longrightarrow> u=v) \<Longrightarrow> A \<subseteq> Id"
 apply (rule subsetI)
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 
 after subsetI
-*}
+\<close>
 apply clarify
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 
 subgoals after clarify
-*}
+\<close>
 by blast
 
 
 
 
-text{*rejects*}
+text\<open>rejects\<close>
 
 lemma "(a \<in> {z. P z} \<union> {y. Q y}) = P a \<or> Q a"
 apply (blast)
 done
 
-text{*Pow, Inter too little used*}
+text\<open>Pow, Inter too little used\<close>
 
 lemma "(A \<subset> B) = (A \<subseteq> B \<and> A \<noteq> B)"
 apply (simp add: psubset_eq)
--- a/src/Doc/Tutorial/ToyList/ToyList.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/ToyList/ToyList.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,12 +2,12 @@
 imports Main
 begin
 
-text{*\noindent
+text\<open>\noindent
 HOL already has a predefined theory of lists called @{text List} ---
 @{text ToyList} is merely a small fragment of it chosen as an example.
 To avoid some ambiguities caused by defining lists twice, we manipulate
 the concrete syntax and name space of theory @{theory Main} as follows.
-*}
+\<close>
 
 no_notation Nil ("[]") and Cons (infixr "#" 65) and append (infixr "@" 65)
 hide_type list
@@ -16,7 +16,7 @@
 datatype 'a list = Nil                          ("[]")
                  | Cons 'a "'a list"            (infixr "#" 65)
 
-text{*\noindent
+text\<open>\noindent
 The datatype\index{datatype@\isacommand {datatype} (command)}
 \tydx{list} introduces two
 constructors \cdx{Nil} and \cdx{Cons}, the
@@ -45,7 +45,7 @@
 \end{warn}
 Next, two functions @{text"app"} and \cdx{rev} are defined recursively,
 in this order, because Isabelle insists on definition before use:
-*}
+\<close>
 
 primrec app :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" (infixr "@" 65) where
 "[] @ ys       = ys" |
@@ -55,7 +55,7 @@
 "rev []        = []" |
 "rev (x # xs)  = (rev xs) @ (x # [])"
 
-text{*\noindent
+text\<open>\noindent
 Each function definition is of the form
 \begin{center}
 \isacommand{primrec} \textit{name} @{text"::"} \textit{type} \textit{(optional syntax)} \isakeyword{where} \textit{equations}
@@ -116,17 +116,17 @@
 \texttt{ToyList} presented so far, you may want to test your
 functions by running them. For example, what is the value of
 @{term"rev(True#False#[])"}? Command
-*}
+\<close>
 
 value "rev (True # False # [])"
 
-text{* \noindent yields the correct result @{term"False # True # []"}.
+text\<open>\noindent yields the correct result @{term"False # True # []"}.
 But we can go beyond mere functional programming and evaluate terms with
-variables in them, executing functions symbolically: *}
+variables in them, executing functions symbolically:\<close>
 
 value "rev (a # b # c # [])"
 
-text{*\noindent yields @{term"c # b # a # []"}.
+text\<open>\noindent yields @{term"c # b # a # []"}.
 
 \section{An Introductory Proof}
 \label{sec:intro-proof}
@@ -140,11 +140,11 @@
 
 Our goal is to show that reversing a list twice produces the original
 list.
-*}
+\<close>
 
 theorem rev_rev [simp]: "rev(rev xs) = xs"
 
-txt{*\index{theorem@\isacommand {theorem} (command)|bold}%
+txt\<open>\index{theorem@\isacommand {theorem} (command)|bold}%
 \noindent
 This \isacommand{theorem} command does several things:
 \begin{itemize}
@@ -179,11 +179,11 @@
 Let us now get back to @{prop"rev(rev xs) = xs"}. Properties of recursively
 defined functions are best established by induction. In this case there is
 nothing obvious except induction on @{term"xs"}:
-*}
+\<close>
 
 apply(induct_tac xs)
 
-txt{*\noindent\index{*induct_tac (method)}%
+txt\<open>\noindent\index{*induct_tac (method)}%
 This tells Isabelle to perform induction on variable @{term"xs"}. The suffix
 @{term"tac"} stands for \textbf{tactic},\index{tactics}
 a synonym for ``theorem proving function''.
@@ -211,11 +211,11 @@
 \indexboldpos{\isasymrbrakk}{$Isabrr} and separated by semicolons.
 
 Let us try to solve both goals automatically:
-*}
+\<close>
 
 apply(auto)
 
-txt{*\noindent
+txt\<open>\noindent
 This command tells Isabelle to apply a proof strategy called
 @{text"auto"} to all subgoals. Essentially, @{text"auto"} tries to
 simplify the subgoals.  In our case, subgoal~1 is solved completely (thanks
@@ -223,22 +223,22 @@
 of subgoal~2 becomes the new subgoal~1:
 @{subgoals[display,indent=0,margin=70]}
 In order to simplify this subgoal further, a lemma suggests itself.
-*}
+\<close>
 (*<*)
 oops
 (*>*)
 
-subsubsection{*First Lemma*}
+subsubsection\<open>First Lemma\<close>
 
-text{*
+text\<open>
 \indexbold{abandoning a proof}\indexbold{proofs!abandoning}
 After abandoning the above proof attempt (at the shell level type
 \commdx{oops}) we start a new proof:
-*}
+\<close>
 
 lemma rev_app [simp]: "rev(xs @ ys) = (rev ys) @ (rev xs)"
 
-txt{*\noindent The keywords \commdx{theorem} and
+txt\<open>\noindent The keywords \commdx{theorem} and
 \commdx{lemma} are interchangeable and merely indicate
 the importance we attach to a proposition.  Therefore we use the words
 \emph{theorem} and \emph{lemma} pretty much interchangeably, too.
@@ -246,46 +246,46 @@
 There are two variables that we could induct on: @{term"xs"} and
 @{term"ys"}. Because @{text"@"} is defined by recursion on
 the first argument, @{term"xs"} is the correct one:
-*}
+\<close>
 
 apply(induct_tac xs)
 
-txt{*\noindent
+txt\<open>\noindent
 This time not even the base case is solved automatically:
-*}
+\<close>
 
 apply(auto)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,goals_limit=1]}
 Again, we need to abandon this proof attempt and prove another simple lemma
 first. In the future the step of abandoning an incomplete proof before
 embarking on the proof of a lemma usually remains implicit.
-*}
+\<close>
 (*<*)
 oops
 (*>*)
 
-subsubsection{*Second Lemma*}
+subsubsection\<open>Second Lemma\<close>
 
-text{*
+text\<open>
 We again try the canonical proof procedure:
-*}
+\<close>
 
 lemma app_Nil2 [simp]: "xs @ [] = xs"
 apply(induct_tac xs)
 apply(auto)
 
-txt{*
+txt\<open>
 \noindent
 It works, yielding the desired message @{text"No subgoals!"}:
 @{goals[display,indent=0]}
 We still need to confirm that the proof is now finished:
-*}
+\<close>
 
 done
 
-text{*\noindent
+text\<open>\noindent
 As a result of that final \commdx{done}, Isabelle associates the lemma just proved
 with its name. In this tutorial, we sometimes omit to show that final \isacommand{done}
 if it is obvious from the context that the proof is finished.
@@ -298,13 +298,13 @@
 \S\ref{sec:variables}.
 
 Going back to the proof of the first lemma
-*}
+\<close>
 
 lemma rev_app [simp]: "rev(xs @ ys) = (rev ys) @ (rev xs)"
 apply(induct_tac xs)
 apply(auto)
 
-txt{*
+txt\<open>
 \noindent
 we find that this time @{text"auto"} solves the base case, but the
 induction step merely simplifies to
@@ -316,44 +316,44 @@
 ~~~~~(rev~ys~@~rev~list)~@~(a~\#~[])~=~rev~ys~@~(rev~list~@~(a~\#~[]))
 \end{isabelle}
 and the missing lemma is associativity of @{text"@"}.
-*}
+\<close>
 (*<*)oops(*>*)
 
-subsubsection{*Third Lemma*}
+subsubsection\<open>Third Lemma\<close>
 
-text{*
+text\<open>
 Abandoning the previous attempt, the canonical proof procedure
 succeeds without further ado.
-*}
+\<close>
 
 lemma app_assoc [simp]: "(xs @ ys) @ zs = xs @ (ys @ zs)"
 apply(induct_tac xs)
 apply(auto)
 done
 
-text{*
+text\<open>
 \noindent
 Now we can prove the first lemma:
-*}
+\<close>
 
 lemma rev_app [simp]: "rev(xs @ ys) = (rev ys) @ (rev xs)"
 apply(induct_tac xs)
 apply(auto)
 done
 
-text{*\noindent
+text\<open>\noindent
 Finally, we prove our main theorem:
-*}
+\<close>
 
 theorem rev_rev [simp]: "rev(rev xs) = xs"
 apply(induct_tac xs)
 apply(auto)
 done
 
-text{*\noindent
+text\<open>\noindent
 The final \commdx{end} tells Isabelle to close the current theory because
 we are finished with its development:%
 \index{*rev (constant)|)}\index{append function|)}
-*}
+\<close>
 
 end
--- a/src/Doc/Tutorial/ToyList/ToyList_Test.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/ToyList/ToyList_Test.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,12 +2,12 @@
 imports Main
 begin
 
-ML {*
+ML \<open>
   let val text =
     map (File.read o Path.append (Resources.master_directory @{theory}) o Path.explode)
       ["ToyList1.txt", "ToyList2.txt"]
     |> implode
   in Thy_Info.script_thy Position.start text @{theory} end
-*}
+\<close>
 
 end
--- a/src/Doc/Tutorial/Trie/Trie.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Trie/Trie.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,45 +1,45 @@
 (*<*)
 theory Trie imports Main begin
 (*>*)
-text{*
+text\<open>
 To minimize running time, each node of a trie should contain an array that maps
 letters to subtries. We have chosen a
 representation where the subtries are held in an association list, i.e.\ a
 list of (letter,trie) pairs.  Abstracting over the alphabet @{typ"'a"} and the
 values @{typ"'v"} we define a trie as follows:
-*}
+\<close>
 
 datatype ('a,'v)trie = Trie  "'v option"  "('a * ('a,'v)trie)list"
 
-text{*\noindent
+text\<open>\noindent
 \index{datatypes!and nested recursion}%
 The first component is the optional value, the second component the
 association list of subtries.  This is an example of nested recursion involving products,
 which is fine because products are datatypes as well.
 We define two selector functions:
-*}
+\<close>
 
 primrec "value" :: "('a,'v)trie \<Rightarrow> 'v option" where
 "value(Trie ov al) = ov"
 primrec alist :: "('a,'v)trie \<Rightarrow> ('a * ('a,'v)trie)list" where
 "alist(Trie ov al) = al"
 
-text{*\noindent
+text\<open>\noindent
 Association lists come with a generic lookup function.  Its result
 involves type @{text option} because a lookup can fail:
-*}
+\<close>
 
 primrec assoc :: "('key * 'val)list \<Rightarrow> 'key \<Rightarrow> 'val option" where
 "assoc [] x = None" |
 "assoc (p#ps) x =
    (let (a,b) = p in if a=x then Some b else assoc ps x)"
 
-text{*
+text\<open>
 Now we can define the lookup function for tries. It descends into the trie
 examining the letters of the search string one by one. As
 recursion on lists is simpler than on tries, let us express this as primitive
 recursion on the search string argument:
-*}
+\<close>
 
 primrec lookup :: "('a,'v)trie \<Rightarrow> 'a list \<Rightarrow> 'v option" where
 "lookup t [] = value t" |
@@ -47,21 +47,21 @@
                       None \<Rightarrow> None
                     | Some at \<Rightarrow> lookup at as)"
 
-text{*
+text\<open>
 As a first simple property we prove that looking up a string in the empty
 trie @{term"Trie None []"} always returns @{const None}. The proof merely
 distinguishes the two cases whether the search string is empty or not:
-*}
+\<close>
 
 lemma [simp]: "lookup (Trie None []) as = None"
 apply(case_tac as, simp_all)
 done
 
-text{*
+text\<open>
 Things begin to get interesting with the definition of an update function
 that adds a new (string, value) pair to a trie, overwriting the old value
 associated with that string:
-*}
+\<close>
 
 primrec update:: "('a,'v)trie \<Rightarrow> 'a list \<Rightarrow> 'v \<Rightarrow> ('a,'v)trie" where
 "update t []     v = Trie (Some v) (alist t)" |
@@ -70,7 +70,7 @@
                 None \<Rightarrow> Trie None [] | Some at \<Rightarrow> at)
     in Trie (value t) ((a,update tt as v) # alist t))"
 
-text{*\noindent
+text\<open>\noindent
 The base case is obvious. In the recursive case the subtrie
 @{term tt} associated with the first letter @{term a} is extracted,
 recursively updated, and then placed in front of the association list.
@@ -81,23 +81,23 @@
 Before we start on any proofs about @{const update} we tell the simplifier to
 expand all @{text let}s and to split all @{text case}-constructs over
 options:
-*}
+\<close>
 
 declare Let_def[simp] option.split[split]
 
-text{*\noindent
+text\<open>\noindent
 The reason becomes clear when looking (probably after a failed proof
 attempt) at the body of @{const update}: it contains both
 @{text let} and a case distinction over type @{text option}.
 
 Our main goal is to prove the correct interaction of @{const update} and
 @{const lookup}:
-*}
+\<close>
 
 theorem "\<forall>t v bs. lookup (update t as v) bs =
                     (if as=bs then Some v else lookup t bs)"
 
-txt{*\noindent
+txt\<open>\noindent
 Our plan is to induct on @{term as}; hence the remaining variables are
 quantified. From the definitions it is clear that induction on either
 @{term as} or @{term bs} is required. The choice of @{term as} is 
@@ -105,10 +105,10 @@
 if @{const update} has already been simplified, which can only happen if
 @{term as} is instantiated.
 The start of the proof is conventional:
-*}
+\<close>
 apply(induct_tac as, auto)
 
-txt{*\noindent
+txt\<open>\noindent
 Unfortunately, this time we are left with three intimidating looking subgoals:
 \begin{isabelle}
 ~1.~\dots~{\isasymLongrightarrow}~lookup~\dots~bs~=~lookup~t~bs\isanewline
@@ -118,11 +118,11 @@
 Clearly, if we want to make headway we have to instantiate @{term bs} as
 well now. It turns out that instead of induction, case distinction
 suffices:
-*}
+\<close>
 apply(case_tac[!] bs, auto)
 done
 
-text{*\noindent
+text\<open>\noindent
 \index{subgoal numbering}%
 All methods ending in @{text tac} take an optional first argument that
 specifies the range of subgoals they are applied to, where @{text"[!]"} means
@@ -158,7 +158,7 @@
   with @{typ"'a \<Rightarrow> ('a,'v)trie option"}.
 \end{exercise}
 
-*}
+\<close>
 
 (*<*)
 
--- a/src/Doc/Tutorial/Types/Axioms.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Types/Axioms.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,36 +1,36 @@
 (*<*)theory Axioms imports Overloading Setup begin(*>*)
 
-subsection {* Axioms *}
+subsection \<open>Axioms\<close>
 
-text {* Attaching axioms to our classes lets us reason on the level of
+text \<open>Attaching axioms to our classes lets us reason on the level of
 classes.  The results will be applicable to all types in a class, just
 as in axiomatic mathematics.
 
 \begin{warn}
 Proofs in this section use structured \emph{Isar} proofs, which are not
 covered in this tutorial; but see @{cite "Nipkow-TYPES02"}.%
-\end{warn} *}
+\end{warn}\<close>
 
-subsubsection {* Semigroups *}
+subsubsection \<open>Semigroups\<close>
 
-text{* We specify \emph{semigroups} as subclass of @{class plus}: *}
+text\<open>We specify \emph{semigroups} as subclass of @{class plus}:\<close>
 
 class semigroup = plus +
   assumes assoc: "(x \<oplus> y) \<oplus> z = x \<oplus> (y \<oplus> z)"
 
-text {* \noindent This @{command class} specification requires that
+text \<open>\noindent This @{command class} specification requires that
 all instances of @{class semigroup} obey @{fact "assoc:"}~@{prop
 [source] "\<And>x y z :: 'a::semigroup. (x \<oplus> y) \<oplus> z = x \<oplus> (y \<oplus> z)"}.
 
 We can use this class axiom to derive further abstract theorems
-relative to class @{class semigroup}: *}
+relative to class @{class semigroup}:\<close>
 
 lemma assoc_left:
   fixes x y z :: "'a::semigroup"
   shows "x \<oplus> (y \<oplus> z) = (x \<oplus> y) \<oplus> z"
   using assoc by (rule sym)
 
-text {* \noindent The @{class semigroup} constraint on type @{typ
+text \<open>\noindent The @{class semigroup} constraint on type @{typ
 "'a"} restricts instantiations of @{typ "'a"} to types of class
 @{class semigroup} and during the proof enables us to use the fact
 @{fact assoc} whose type parameter is itself constrained to class
@@ -38,15 +38,15 @@
 can be proved in the abstract and freely reused for each instance.
 
 On instantiation, we have to give a proof that the given operations
-obey the class axioms: *}
+obey the class axioms:\<close>
 
 instantiation nat :: semigroup
 begin
 
 instance proof
 
-txt {* \noindent The proof opens with a default proof step, which for
-instance judgements invokes method @{method intro_classes}. *}
+txt \<open>\noindent The proof opens with a default proof step, which for
+instance judgements invokes method @{method intro_classes}.\<close>
 
 
   fix m n q :: nat
@@ -56,8 +56,8 @@
 
 end
 
-text {* \noindent Again, the interesting things enter the stage with
-parametric types: *}
+text \<open>\noindent Again, the interesting things enter the stage with
+parametric types:\<close>
 
 instantiation prod :: (semigroup, semigroup) semigroup
 begin
@@ -67,30 +67,30 @@
   show "p\<^sub>1 \<oplus> p\<^sub>2 \<oplus> p\<^sub>3 = p\<^sub>1 \<oplus> (p\<^sub>2 \<oplus> p\<^sub>3)"
     by (cases p\<^sub>1, cases p\<^sub>2, cases p\<^sub>3) (simp add: assoc)
 
-txt {* \noindent Associativity of product semigroups is established
+txt \<open>\noindent Associativity of product semigroups is established
 using the hypothetical associativity @{fact assoc} of the type
 components, which holds due to the @{class semigroup} constraints
 imposed on the type components by the @{command instance} proposition.
 Indeed, this pattern often occurs with parametric types and type
-classes. *}
+classes.\<close>
 
 qed
 
 end
 
-subsubsection {* Monoids *}
+subsubsection \<open>Monoids\<close>
 
-text {* We define a subclass @{text monoidl} (a semigroup with a
+text \<open>We define a subclass @{text monoidl} (a semigroup with a
 left-hand neutral) by extending @{class semigroup} with one additional
-parameter @{text neutral} together with its property: *}
+parameter @{text neutral} together with its property:\<close>
 
 class monoidl = semigroup +
   fixes neutral :: "'a" ("\<zero>")
   assumes neutl: "\<zero> \<oplus> x = x"
 
-text {* \noindent Again, we prove some instances, by providing
+text \<open>\noindent Again, we prove some instances, by providing
 suitable parameter definitions and proofs for the additional
-specifications. *}
+specifications.\<close>
 
 instantiation nat :: monoidl
 begin
@@ -106,11 +106,11 @@
 
 end
 
-text {* \noindent In contrast to the examples above, we here have both
+text \<open>\noindent In contrast to the examples above, we here have both
 specification of class operations and a non-trivial instance proof.
 
 This covers products as well:
-*}
+\<close>
 
 instantiation prod :: (monoidl, monoidl) monoidl
 begin
@@ -126,27 +126,27 @@
 
 end
 
-text {* \noindent Fully-fledged monoids are modelled by another
+text \<open>\noindent Fully-fledged monoids are modelled by another
 subclass which does not add new parameters but tightens the
-specification: *}
+specification:\<close>
 
 class monoid = monoidl +
   assumes neutr: "x \<oplus> \<zero> = x"
 
-text {* \noindent Corresponding instances for @{typ nat} and products
-are left as an exercise to the reader. *}
+text \<open>\noindent Corresponding instances for @{typ nat} and products
+are left as an exercise to the reader.\<close>
 
-subsubsection {* Groups *}
+subsubsection \<open>Groups\<close>
 
-text {* \noindent To finish our small algebra example, we add a @{text
-group} class: *}
+text \<open>\noindent To finish our small algebra example, we add a @{text
+group} class:\<close>
 
 class group = monoidl +
   fixes inv :: "'a \<Rightarrow> 'a" ("\<div> _" [81] 80)
   assumes invl: "\<div> x \<oplus> x = \<zero>"
 
-text {* \noindent We continue with a further example for abstract
-proofs relative to type classes: *}
+text \<open>\noindent We continue with a further example for abstract
+proofs relative to type classes:\<close>
 
 lemma left_cancel:
   fixes x y z :: "'a::group"
@@ -161,9 +161,9 @@
   then show "x \<oplus> y = x \<oplus> z" by simp
 qed
 
-text {* \noindent Any @{text "group"} is also a @{text "monoid"}; this
+text \<open>\noindent Any @{text "group"} is also a @{text "monoid"}; this
 can be made explicit by claiming an additional subclass relation,
-together with a proof of the logical difference: *}
+together with a proof of the logical difference:\<close>
 
 instance group \<subseteq> monoid
 proof
@@ -174,7 +174,7 @@
   then show "x \<oplus> \<zero> = x" by (simp add: left_cancel)
 qed
 
-text {* \noindent The proof result is propagated to the type system,
+text \<open>\noindent The proof result is propagated to the type system,
 making @{text group} an instance of @{text monoid} by adding an
 additional edge to the graph of subclass relation; see also
 Figure~\ref{fig:subclass}.
@@ -208,11 +208,11 @@
    \label{fig:subclass}
  \end{center}
 \end{figure}
-*}
+\<close>
 
-subsubsection {* Inconsistencies *}
+subsubsection \<open>Inconsistencies\<close>
 
-text {* The reader may be wondering what happens if we attach an
+text \<open>The reader may be wondering what happens if we attach an
 inconsistent set of axioms to a class. So far we have always avoided
 to add new axioms to HOL for fear of inconsistencies and suddenly it
 seems that we are throwing all caution to the wind. So why is there no
@@ -230,12 +230,12 @@
 
 Even if each individual class is consistent, intersections of
 (unrelated) classes readily become inconsistent in practice. Now we
-know this need not worry us. *}
+know this need not worry us.\<close>
 
 
-subsubsection{* Syntactic Classes and Predefined Overloading *}
+subsubsection\<open>Syntactic Classes and Predefined Overloading\<close>
 
-text {* In our algebra example, we have started with a \emph{syntactic
+text \<open>In our algebra example, we have started with a \emph{syntactic
 class} @{class plus} which only specifies operations but no axioms; it
 would have been also possible to start immediately with class @{class
 semigroup}, specifying the @{text "\<oplus>"} operation and associativity at
@@ -256,6 +256,6 @@
 Further note that classes may contain axioms but \emph{no} operations.
 An example is class @{class finite} from theory @{theory Finite_Set}
 which specifies a type to be finite: @{lemma [source] "finite (UNIV :: 'a::finite
-set)" by (fact finite_UNIV)}. *}
+set)" by (fact finite_UNIV)}.\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Types/Numbers.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Types/Numbers.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,26 +2,26 @@
 imports Complex_Main
 begin
 
-text{*
+text\<open>
 
 numeric literals; default simprules; can re-orient
-*}
+\<close>
 
 lemma "2 * m = m + m"
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 oops
 
 fun h :: "nat \<Rightarrow> nat" where
 "h i = (if i = 3 then 2 else i)"
 
-text{*
+text\<open>
 @{term"h 3 = 2"}
 @{term"h i  = i"}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] numeral_One[no_vars]}
 \rulename{numeral_One}
 
@@ -41,19 +41,19 @@
 \rulename{add.left_commute}
 
 these form ac_simps; similarly there is ac_simps
-*}
+\<close>
 
 lemma "Suc(i + j*l*k + m*n) = f (n*m + i + k*j*l)"
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply (simp add: ac_simps ac_simps)
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 oops
 
-text{*
+text\<open>
 
 @{thm[display] div_le_mono[no_vars]}
 \rulename{div_le_mono}
@@ -66,23 +66,23 @@
 
 @{thm[display] nat_diff_split[no_vars]}
 \rulename{nat_diff_split}
-*}
+\<close>
 
 
 lemma "(n - 1) * (n + 1) = n * n - (1::nat)"
 apply (clarsimp split: nat_diff_split iff del: less_Suc0)
- --{* @{subgoals[display,indent=0,margin=65]} *}
+ \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (subgoal_tac "n=0", force, arith)
 done
 
 
 lemma "(n - 2) * (n + 2) = n * n - (4::nat)"
 apply (simp split: nat_diff_split, clarify)
- --{* @{subgoals[display,indent=0,margin=65]} *}
+ \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (subgoal_tac "n=0 | n=1", force, arith)
 done
 
-text{*
+text\<open>
 @{thm[display] mod_if[no_vars]}
 \rulename{mod_if}
 
@@ -118,10 +118,10 @@
 \rulename{dvd_add}
 
 For the integers, I'd list a few theorems that somehow involve negative 
-numbers.*}
+numbers.\<close>
 
 
-text{*
+text\<open>
 Division, remainder of negatives
 
 
@@ -154,7 +154,7 @@
 
 @{thm[display] zmod_zmult2_eq[no_vars]}
 \rulename{zmod_zmult2_eq}
-*}  
+\<close>  
 
 lemma "abs (x+y) \<le> abs x + abs (y :: int)"
 by arith
@@ -163,7 +163,7 @@
 by (simp add: abs_if) 
 
 
-text {*Induction rules for the Integers
+text \<open>Induction rules for the Integers
 
 @{thm[display] int_ge_induct[no_vars]}
 \rulename{int_ge_induct}
@@ -176,9 +176,9 @@
 
 @{thm[display] int_less_induct[no_vars]}
 \rulename{int_less_induct}
-*}  
+\<close>  
 
-text {*FIELDS
+text \<open>FIELDS
 
 @{thm[display] dense[no_vars]}
 \rulename{dense}
@@ -205,32 +205,32 @@
 
 @{thm[display] add_divide_distrib[no_vars]}
 \rulename{add_divide_distrib}
-*}
+\<close>
 
 lemma "3/4 < (7/8 :: real)"
 by simp 
 
 lemma "P ((3/4) * (8/15 :: real))"
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply simp 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 oops
 
 lemma "(3/4) * (8/15) < (x :: real)"
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply simp 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 oops
 
-text{*
+text\<open>
 Ring and Field
 
 Requires a field, or else an ordered ring
@@ -243,16 +243,16 @@
 
 @{thm[display] mult_cancel_left[no_vars]}
 \rulename{mult_cancel_left}
-*}
+\<close>
 
-text{*
+text\<open>
 effect of show sorts on the above
 
 @{thm[display,show_sorts] mult_cancel_left[no_vars]}
 \rulename{mult_cancel_left}
-*}
+\<close>
 
-text{*
+text\<open>
 absolute value
 
 @{thm[display] abs_mult[no_vars]}
@@ -274,7 +274,7 @@
 \rulename{power_abs}
 
 
-*}
+\<close>
 
 
 end
--- a/src/Doc/Tutorial/Types/Overloading.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Types/Overloading.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,76 +2,76 @@
 
 hide_class (open) plus (*>*)
 
-text {* Type classes allow \emph{overloading}; thus a constant may
-have multiple definitions at non-overlapping types. *}
+text \<open>Type classes allow \emph{overloading}; thus a constant may
+have multiple definitions at non-overlapping types.\<close>
 
-subsubsection {* Overloading *}
+subsubsection \<open>Overloading\<close>
 
-text {* We can introduce a binary infix addition operator @{text "\<oplus>"}
-for arbitrary types by means of a type class: *}
+text \<open>We can introduce a binary infix addition operator @{text "\<oplus>"}
+for arbitrary types by means of a type class:\<close>
 
 class plus =
   fixes plus :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "\<oplus>" 70)
 
-text {* \noindent This introduces a new class @{class [source] plus},
+text \<open>\noindent This introduces a new class @{class [source] plus},
 along with a constant @{const [source] plus} with nice infix syntax.
 @{const [source] plus} is also named \emph{class operation}.  The type
 of @{const [source] plus} carries a class constraint @{typ [source] "'a
 :: plus"} on its type variable, meaning that only types of class
 @{class [source] plus} can be instantiated for @{typ [source] "'a"}.
 To breathe life into @{class [source] plus} we need to declare a type
-to be an \bfindex{instance} of @{class [source] plus}: *}
+to be an \bfindex{instance} of @{class [source] plus}:\<close>
 
 instantiation nat :: plus
 begin
 
-text {* \noindent Command \isacommand{instantiation} opens a local
+text \<open>\noindent Command \isacommand{instantiation} opens a local
 theory context.  Here we can now instantiate @{const [source] plus} on
-@{typ nat}: *}
+@{typ nat}:\<close>
 
 primrec plus_nat :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
     "(0::nat) \<oplus> n = n"
   | "Suc m \<oplus> n = Suc (m \<oplus> n)"
 
-text {* \noindent Note that the name @{const [source] plus} carries a
+text \<open>\noindent Note that the name @{const [source] plus} carries a
 suffix @{text "_nat"}; by default, the local name of a class operation
 @{text f} to be instantiated on type constructor @{text \<kappa>} is mangled
 as @{text f_\<kappa>}.  In case of uncertainty, these names may be inspected
 using the @{command "print_context"} command.
 
 Although class @{class [source] plus} has no axioms, the instantiation must be
-formally concluded by a (trivial) instantiation proof ``..'': *}
+formally concluded by a (trivial) instantiation proof ``..'':\<close>
 
 instance ..
 
-text {* \noindent More interesting \isacommand{instance} proofs will
+text \<open>\noindent More interesting \isacommand{instance} proofs will
 arise below.
 
-The instantiation is finished by an explicit *}
+The instantiation is finished by an explicit\<close>
 
 end
 
-text {* \noindent From now on, terms like @{term "Suc (m \<oplus> 2)"} are
-legal. *}
+text \<open>\noindent From now on, terms like @{term "Suc (m \<oplus> 2)"} are
+legal.\<close>
 
 instantiation prod :: (plus, plus) plus
 begin
 
-text {* \noindent Here we instantiate the product type @{type prod} to
+text \<open>\noindent Here we instantiate the product type @{type prod} to
 class @{class [source] plus}, given that its type arguments are of
-class @{class [source] plus}: *}
+class @{class [source] plus}:\<close>
 
 fun plus_prod :: "'a \<times> 'b \<Rightarrow> 'a \<times> 'b \<Rightarrow> 'a \<times> 'b" where
   "(x, y) \<oplus> (w, z) = (x \<oplus> w, y \<oplus> z)"
 
-text {* \noindent Obviously, overloaded specifications may include
-recursion over the syntactic structure of types. *}
+text \<open>\noindent Obviously, overloaded specifications may include
+recursion over the syntactic structure of types.\<close>
 
 instance ..
 
 end
 
-text {* \noindent This way we have encoded the canonical lifting of
-binary operations to products by means of type classes. *}
+text \<open>\noindent This way we have encoded the canonical lifting of
+binary operations to products by means of type classes.\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Types/Pairs.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Types/Pairs.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,19 +1,19 @@
 (*<*)theory Pairs imports Main begin(*>*)
 
-section{*Pairs and Tuples*}
+section\<open>Pairs and Tuples\<close>
 
-text{*\label{sec:products}
+text\<open>\label{sec:products}
 Ordered pairs were already introduced in \S\ref{sec:pairs}, but only with a minimal
 repertoire of operations: pairing and the two projections @{term fst} and
 @{term snd}. In any non-trivial application of pairs you will find that this
 quickly leads to unreadable nests of projections. This
 section introduces syntactic sugar to overcome this
 problem: pattern matching with tuples.
-*}
+\<close>
 
-subsection{*Pattern Matching with Tuples*}
+subsection\<open>Pattern Matching with Tuples\<close>
 
-text{*
+text\<open>
 Tuples may be used as patterns in $\lambda$-abstractions,
 for example @{text"\<lambda>(x,y,z).x+y+z"} and @{text"\<lambda>((x,y),z).x+y+z"}. In fact,
 tuple patterns can be used in most variable binding constructs,
@@ -46,18 +46,18 @@
 Pattern matching in
 other variable binding constructs is translated similarly. Thus we need to
 understand how to reason about such constructs.
-*}
+\<close>
 
-subsection{*Theorem Proving*}
+subsection\<open>Theorem Proving\<close>
 
-text{*
+text\<open>
 The most obvious approach is the brute force expansion of @{term split}:
-*}
+\<close>
 
 lemma "(\<lambda>(x,y).x) p = fst p"
 by(simp add: split_def)
 
-text{* \noindent
+text\<open>\noindent
 This works well if rewriting with @{thm[source]split_def} finishes the
 proof, as it does above.  But if it does not, you end up with exactly what
 we are trying to avoid: nests of @{term fst} and @{term snd}. Thus this
@@ -74,76 +74,76 @@
 In case of a subterm of the form @{term"case_prod f p"} this is easy: the split
 rule @{thm[source]prod.split} replaces @{term p} by a pair:%
 \index{*split (method)}
-*}
+\<close>
 
 lemma "(\<lambda>(x,y).y) p = snd p"
 apply(split prod.split)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0]}
 This subgoal is easily proved by simplification. Thus we could have combined
 simplification and splitting in one command that proves the goal outright:
-*}
+\<close>
 (*<*)
 by simp
 lemma "(\<lambda>(x,y).y) p = snd p"(*>*)
 by(simp split: prod.split)
 
-text{*
+text\<open>
 Let us look at a second example:
-*}
+\<close>
 
 lemma "let (x,y) = p in fst p = x"
 apply(simp only: Let_def)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0]}
 A paired @{text let} reduces to a paired $\lambda$-abstraction, which
 can be split as above. The same is true for paired set comprehension:
-*}
+\<close>
 
 (*<*)by(simp split: prod.split)(*>*)
 lemma "p \<in> {(x,y). x=y} \<longrightarrow> fst p = snd p"
 apply simp
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0]}
 Again, simplification produces a term suitable for @{thm[source]prod.split}
 as above. If you are worried about the strange form of the premise:
 @{text"case_prod (=)"} is short for @{term"\<lambda>(x,y). x=y"}.
 The same proof procedure works for
-*}
+\<close>
 
 (*<*)by(simp split: prod.split)(*>*)
 lemma "p \<in> {(x,y). x=y} \<Longrightarrow> fst p = snd p"
 
-txt{*\noindent
+txt\<open>\noindent
 except that we now have to use @{thm[source]prod.split_asm}, because
 @{term split} occurs in the assumptions.
 
 However, splitting @{term split} is not always a solution, as no @{term split}
 may be present in the goal. Consider the following function:
-*}
+\<close>
 
 (*<*)by(simp split: prod.split_asm)(*>*)
 primrec swap :: "'a \<times> 'b \<Rightarrow> 'b \<times> 'a" where "swap (x,y) = (y,x)"
 
-text{*\noindent
+text\<open>\noindent
 Note that the above \isacommand{primrec} definition is admissible
 because @{text"\<times>"} is a datatype. When we now try to prove
-*}
+\<close>
 
 lemma "swap(swap p) = p"
 
-txt{*\noindent
+txt\<open>\noindent
 simplification will do nothing, because the defining equation for
 @{const[source] swap} expects a pair. Again, we need to turn @{term p}
 into a pair first, but this time there is no @{term split} in sight.
 The only thing we can do is to split the term by hand:
-*}
+\<close>
 apply(case_tac p)
 
-txt{*\noindent
+txt\<open>\noindent
 @{subgoals[display,indent=0]}
 Again, \methdx{case_tac} is applicable because @{text"\<times>"} is a datatype.
 The subgoal is easily proved by @{text simp}.
@@ -154,20 +154,20 @@
 
 Alternatively, you can split \emph{all} @{text"\<And>"}-quantified variables
 in a goal with the rewrite rule @{thm[source]split_paired_all}:
-*}
+\<close>
 
 (*<*)by simp(*>*)
 lemma "\<And>p q. swap(swap p) = q \<longrightarrow> p = q"
 apply(simp only: split_paired_all)
 
-txt{*\noindent
+txt\<open>\noindent
 @{subgoals[display,indent=0,margin=70]}
-*}
+\<close>
 
 apply simp
 done
 
-text{*\noindent
+text\<open>\noindent
 Note that we have intentionally included only @{thm[source]split_paired_all}
 in the first simplification step, and then we simplify again. 
 This time the reason was not merely
@@ -176,22 +176,22 @@
 of the simplifier.
 The following command could fail (here it does not)
 where two separate \isa{simp} applications succeed.
-*}
+\<close>
 
 (*<*)
 lemma "\<And>p q. swap(swap p) = q \<longrightarrow> p = q"
 (*>*)
 apply(simp add: split_paired_all)
 (*<*)done(*>*)
-text{*\noindent
+text\<open>\noindent
 Finally, the simplifier automatically splits all @{text"\<forall>"} and
 @{text"\<exists>"}-quantified variables:
-*}
+\<close>
 
 lemma "\<forall>p. \<exists>q. swap p = swap q"
 by simp
 
-text{*\noindent
+text\<open>\noindent
 To turn off this automatic splitting, disable the
 responsible simplification rules:
 \begin{center}
@@ -202,7 +202,7 @@
 \hfill
 (@{thm[source]split_paired_Ex})
 \end{center}
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Tutorial/Types/Records.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Types/Records.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,11 +1,11 @@
 
-section {* Records \label{sec:records} *}
+section \<open>Records \label{sec:records}\<close>
 
 (*<*)
 theory Records imports Main begin
 (*>*)
 
-text {*
+text \<open>
   \index{records|(}%
   Records are familiar from programming languages.  A record of $n$
   fields is essentially an $n$-tuple, but the record's components have
@@ -26,97 +26,97 @@
   work on all possible extensions of a given type scheme; polymorphism
   takes care of structural sub-typing behind the scenes.  There are
   also explicit coercion functions between fixed record types.
-*}
+\<close>
 
 
-subsection {* Record Basics *}
+subsection \<open>Record Basics\<close>
 
-text {*
+text \<open>
   Record types are not primitive in Isabelle and have a delicate
   internal representation @{cite "NaraschewskiW-TPHOLs98"}, based on
   nested copies of the primitive product type.  A \commdx{record}
   declaration introduces a new record type scheme by specifying its
   fields, which are packaged internally to hold up the perception of
   the record as a distinguished entity.  Here is a simple example:
-*}
+\<close>
 
 record point =
   Xcoord :: int
   Ycoord :: int
 
-text {*\noindent
+text \<open>\noindent
   Records of type @{typ point} have two fields named @{const Xcoord}
   and @{const Ycoord}, both of type~@{typ int}.  We now define a
   constant of type @{typ point}:
-*}
+\<close>
 
 definition pt1 :: point where
 "pt1 \<equiv> (| Xcoord = 999, Ycoord = 23 |)"
 
-text {*\noindent
+text \<open>\noindent
   We see above the ASCII notation for record brackets.  You can also
   use the symbolic brackets @{text \<lparr>} and @{text \<rparr>}.  Record type
   expressions can be also written directly with individual fields.
   The type name above is merely an abbreviation.
-*}
+\<close>
 
 definition pt2 :: "\<lparr>Xcoord :: int, Ycoord :: int\<rparr>" where
 "pt2 \<equiv> \<lparr>Xcoord = -45, Ycoord = 97\<rparr>"
 
-text {*
+text \<open>
   For each field, there is a \emph{selector}\index{selector!record}
   function of the same name.  For example, if @{text p} has type @{typ
   point} then @{text "Xcoord p"} denotes the value of the @{text
   Xcoord} field of~@{text p}.  Expressions involving field selection
   of explicit records are simplified automatically:
-*}
+\<close>
 
 lemma "Xcoord \<lparr>Xcoord = a, Ycoord = b\<rparr> = a"
   by simp
 
-text {*
+text \<open>
   The \emph{update}\index{update!record} operation is functional.  For
   example, @{term "p\<lparr>Xcoord := 0\<rparr>"} is a record whose @{const Xcoord}
   value is zero and whose @{const Ycoord} value is copied from~@{text
   p}.  Updates of explicit records are also simplified automatically:
-*}
+\<close>
 
 lemma "\<lparr>Xcoord = a, Ycoord = b\<rparr>\<lparr>Xcoord := 0\<rparr> =
          \<lparr>Xcoord = 0, Ycoord = b\<rparr>"
   by simp
 
-text {*
+text \<open>
   \begin{warn}
   Field names are declared as constants and can no longer be used as
   variables.  It would be unwise, for example, to call the fields of
   type @{typ point} simply @{text x} and~@{text y}.
   \end{warn}
-*}
+\<close>
 
 
-subsection {* Extensible Records and Generic Operations *}
+subsection \<open>Extensible Records and Generic Operations\<close>
 
-text {*
+text \<open>
   \index{records!extensible|(}%
 
   Now, let us define coloured points (type @{text cpoint}) to be
   points extended with a field @{text col} of type @{text colour}:
-*}
+\<close>
 
 datatype colour = Red | Green | Blue
 
 record cpoint = point +
   col :: colour
 
-text {*\noindent
+text \<open>\noindent
   The fields of this new type are @{const Xcoord}, @{text Ycoord} and
   @{text col}, in that order.
-*}
+\<close>
 
 definition cpt1 :: cpoint where
 "cpt1 \<equiv> \<lparr>Xcoord = 999, Ycoord = 23, col = Green\<rparr>"
 
-text {*
+text \<open>
   We can define generic operations that work on arbitrary
   instances of a record scheme, e.g.\ covering @{typ point}, @{typ
   cpoint}, and any further extensions.  Every record structure has an
@@ -127,12 +127,12 @@
   implicitly set to @{text "()"}, the empty tuple, which has type
   @{typ unit}.  Within the record brackets, you can refer to the
   @{text more} field by writing ``@{text "\<dots>"}'' (three dots):
-*}
+\<close>
 
 lemma "Xcoord \<lparr>Xcoord = a, Ycoord = b, \<dots> = p\<rparr> = a"
   by simp
 
-text {*
+text \<open>
   This lemma applies to any record whose first two fields are @{text
   Xcoord} and~@{const Ycoord}.  Note that @{text "\<lparr>Xcoord = a, Ycoord
   = b, \<dots> = ()\<rparr>"} is exactly the same as @{text "\<lparr>Xcoord = a, Ycoord
@@ -142,12 +142,12 @@
 
   The @{text more} pseudo-field may be manipulated directly as well,
   but the identifier needs to be qualified:
-*}
+\<close>
 
 lemma "point.more cpt1 = \<lparr>col = Green\<rparr>"
   by (simp add: cpt1_def)
 
-text {*\noindent
+text \<open>\noindent
   We see that the colour part attached to this @{typ point} is a
   rudimentary record in its own right, namely @{text "\<lparr>col =
   Green\<rparr>"}.  In order to select or update @{text col}, this fragment
@@ -176,33 +176,33 @@
   In the following example we define two operations --- methods, if we
   regard records as objects --- to get and set any point's @{text
   Xcoord} field.
-*}
+\<close>
 
 definition getX :: "'a point_scheme \<Rightarrow> int" where
 "getX r \<equiv> Xcoord r"
 definition setX :: "'a point_scheme \<Rightarrow> int \<Rightarrow> 'a point_scheme" where
 "setX r a \<equiv> r\<lparr>Xcoord := a\<rparr>"
 
-text {*
+text \<open>
   Here is a generic method that modifies a point, incrementing its
   @{const Xcoord} field.  The @{text Ycoord} and @{text more} fields
   are copied across.  It works for any record type scheme derived from
   @{typ point} (including @{typ cpoint} etc.):
-*}
+\<close>
 
 definition incX :: "'a point_scheme \<Rightarrow> 'a point_scheme" where
 "incX r \<equiv>
   \<lparr>Xcoord = Xcoord r + 1, Ycoord = Ycoord r, \<dots> = point.more r\<rparr>"
 
-text {*
+text \<open>
   Generic theorems can be proved about generic methods.  This trivial
   lemma relates @{const incX} to @{text getX} and @{text setX}:
-*}
+\<close>
 
 lemma "incX r = setX r (getX r + 1)"
   by (simp add: getX_def setX_def incX_def)
 
-text {*
+text \<open>
   \begin{warn}
   If you use the symbolic record brackets @{text \<lparr>} and @{text \<rparr>},
   then you must also use the symbolic ellipsis, ``@{text \<dots>}'', rather
@@ -211,30 +211,30 @@
   more distinct on screen than they are on paper.)
   \end{warn}%
   \index{records!extensible|)}
-*}
+\<close>
 
 
-subsection {* Record Equality *}
+subsection \<open>Record Equality\<close>
 
-text {*
+text \<open>
   Two records are equal\index{equality!of records} if all pairs of
   corresponding fields are equal.  Concrete record equalities are
   simplified automatically:
-*}
+\<close>
 
 lemma "(\<lparr>Xcoord = a, Ycoord = b\<rparr> = \<lparr>Xcoord = a', Ycoord = b'\<rparr>) =
     (a = a' \<and> b = b')"
   by simp
 
-text {*
+text \<open>
   The following equality is similar, but generic, in that @{text r}
   can be any instance of @{typ "'a point_scheme"}:
-*}
+\<close>
 
 lemma "r\<lparr>Xcoord := a, Ycoord := b\<rparr> = r\<lparr>Ycoord := b, Xcoord := a\<rparr>"
   by simp
 
-text {*\noindent
+text \<open>\noindent
   We see above the syntax for iterated updates.  We could equivalently
   have written the left-hand side as @{text "r\<lparr>Xcoord := a\<rparr>\<lparr>Ycoord :=
   b\<rparr>"}.
@@ -242,80 +242,80 @@
   Record equality is \emph{extensional}:
   \index{extensionality!for records} a record is determined entirely
   by the values of its fields.
-*}
+\<close>
 
 lemma "r = \<lparr>Xcoord = Xcoord r, Ycoord = Ycoord r\<rparr>"
   by simp
 
-text {*\noindent
+text \<open>\noindent
   The generic version of this equality includes the pseudo-field
   @{text more}:
-*}
+\<close>
 
 lemma "r = \<lparr>Xcoord = Xcoord r, Ycoord = Ycoord r, \<dots> = point.more r\<rparr>"
   by simp
 
-text {*
+text \<open>
   The simplifier can prove many record equalities
   automatically, but general equality reasoning can be tricky.
   Consider proving this obvious fact:
-*}
+\<close>
 
 lemma "r\<lparr>Xcoord := a\<rparr> = r\<lparr>Xcoord := a'\<rparr> \<Longrightarrow> a = a'"
   apply simp?
   oops
 
-text {*\noindent
+text \<open>\noindent
   Here the simplifier can do nothing, since general record equality is
   not eliminated automatically.  One way to proceed is by an explicit
   forward step that applies the selector @{const Xcoord} to both sides
   of the assumed record equality:
-*}
+\<close>
 
 lemma "r\<lparr>Xcoord := a\<rparr> = r\<lparr>Xcoord := a'\<rparr> \<Longrightarrow> a = a'"
   apply (drule_tac f = Xcoord in arg_cong)
-  txt {* @{subgoals [display, indent = 0, margin = 65]}
+  txt \<open>@{subgoals [display, indent = 0, margin = 65]}
     Now, @{text simp} will reduce the assumption to the desired
-    conclusion. *}
+    conclusion.\<close>
   apply simp
   done
 
-text {*
+text \<open>
   The @{text cases} method is preferable to such a forward proof.  We
   state the desired lemma again:
-*}
+\<close>
 
 lemma "r\<lparr>Xcoord := a\<rparr> = r\<lparr>Xcoord := a'\<rparr> \<Longrightarrow> a = a'"
 
-  txt {* The \methdx{cases} method adds an equality to replace the
+  txt \<open>The \methdx{cases} method adds an equality to replace the
   named record term by an explicit record expression, listing all
   fields.  It even includes the pseudo-field @{text more}, since the
-  record equality stated here is generic for all extensions. *}
+  record equality stated here is generic for all extensions.\<close>
 
   apply (cases r)
 
-  txt {* @{subgoals [display, indent = 0, margin = 65]} Again, @{text
+  txt \<open>@{subgoals [display, indent = 0, margin = 65]} Again, @{text
   simp} finishes the proof.  Because @{text r} is now represented as
   an explicit record construction, the updates can be applied and the
   record equality can be replaced by equality of the corresponding
-  fields (due to injectivity). *}
+  fields (due to injectivity).\<close>
 
   apply simp
   done
 
-text {*
+text \<open>
   The generic cases method does not admit references to locally bound
   parameters of a goal.  In longer proof scripts one might have to
   fall back on the primitive @{text rule_tac} used together with the
   internal field representation rules of records.  The above use of
   @{text "(cases r)"} would become @{text "(rule_tac r = r in
   point.cases_scheme)"}.
-*}
+\<close>
 
 
-subsection {* Extending and Truncating Records *}
+subsection \<open>Extending and Truncating Records\<close>
 
-text {*
+text \<open>
   Each record declaration introduces a number of derived operations to
   refer collectively to a record's fields and to convert between fixed
   record types.  They can, for instance, convert between types @{typ
@@ -361,33 +361,33 @@
   extending an ordinary point.  Function @{text point.extend} augments
   @{text pt1} with a colour value, which is converted into an
   appropriate record fragment by @{text cpoint.fields}.
-*}
+\<close>
 
 definition cpt2 :: cpoint where
 "cpt2 \<equiv> point.extend pt1 (cpoint.fields Green)"
 
-text {*
+text \<open>
   The coloured points @{const cpt1} and @{text cpt2} are equal.  The
   proof is trivial, by unfolding all the definitions.  We deliberately
   omit the definition of~@{text pt1} in order to reveal the underlying
   comparison on type @{typ point}.
-*}
+\<close>
 
 lemma "cpt1 = cpt2"
   apply (simp add: cpt1_def cpt2_def point.defs cpoint.defs)
-  txt {* @{subgoals [display, indent = 0, margin = 65]} *}
+  txt \<open>@{subgoals [display, indent = 0, margin = 65]}\<close>
   apply (simp add: pt1_def)
   done
 
-text {*
+text \<open>
   In the example below, a coloured point is truncated to leave a
   point.  We use the @{text truncate} function of the target record.
-*}
+\<close>
 
 lemma "point.truncate cpt2 = pt1"
   by (simp add: pt1_def cpt2_def point.defs)
 
-text {*
+text \<open>
   \begin{exercise}
   Extend record @{typ cpoint} to have a further field, @{text
   intensity}, of type~@{typ nat}.  Experiment with generic operations
@@ -401,7 +401,7 @@
   Model a small class hierarchy using records.
   \end{exercise}
   \index{records|)}
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Types/Typedefs.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Types/Typedefs.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,8 +1,8 @@
 (*<*)theory Typedefs imports Main begin(*>*)
 
-section{*Introducing New Types*}
+section\<open>Introducing New Types\<close>
 
-text{*\label{sec:adv-typedef}
+text\<open>\label{sec:adv-typedef}
 For most applications, a combination of predefined types like @{typ bool} and
 @{text"\<Rightarrow>"} with recursive datatypes and records is quite sufficient. Very
 occasionally you may feel the need for a more advanced type.  If you
@@ -12,19 +12,19 @@
   Types in HOL must be non-empty; otherwise the quantifier rules would be
   unsound, because $\exists x.\ x=x$ is a theorem.
 \end{warn}
-*}
+\<close>
 
-subsection{*Declaring New Types*}
+subsection\<open>Declaring New Types\<close>
 
-text{*\label{sec:typedecl}
+text\<open>\label{sec:typedecl}
 \index{types!declaring|(}%
 \index{typedecl@\isacommand {typedecl} (command)}%
 The most trivial way of introducing a new type is by a \textbf{type
-declaration}: *}
+declaration}:\<close>
 
 typedecl my_new_type
 
-text{*\noindent
+text\<open>\noindent
 This does not define @{typ my_new_type} at all but merely introduces its
 name. Thus we know nothing about this type, except that it is
 non-empty. Such declarations without definitions are
@@ -39,23 +39,23 @@
 If you are looking for a quick and dirty way of introducing a new type
 together with its properties: declare the type and state its properties as
 axioms. Example:
-*}
+\<close>
 
 axiomatization where
 just_one: "\<exists>x::my_new_type. \<forall>y. x = y"
 
-text{*\noindent
+text\<open>\noindent
 However, we strongly discourage this approach, except at explorative stages
 of your development. It is extremely easy to write down contradictory sets of
 axioms, in which case you will be able to prove everything but it will mean
 nothing.  In the example above, the axiomatic approach is
 unnecessary: a one-element type called @{typ unit} is already defined in HOL.
 \index{types!declaring|)}
-*}
+\<close>
 
-subsection{*Defining New Types*}
+subsection\<open>Defining New Types\<close>
 
-text{*\label{sec:typedef}
+text\<open>\label{sec:typedef}
 \index{types!defining|(}%
 \index{typedecl@\isacommand {typedef} (command)|(}%
 Now we come to the most general means of safely introducing a new type, the
@@ -67,22 +67,22 @@
 
 Let us work a simple example, the definition of a three-element type.
 It is easily represented by the first three natural numbers:
-*}
+\<close>
 
 typedef three = "{0::nat, 1, 2}"
 
-txt{*\noindent
+txt\<open>\noindent
 In order to enforce that the representing set on the right-hand side is
 non-empty, this definition actually starts a proof to that effect:
 @{subgoals[display,indent=0]}
 Fortunately, this is easy enough to show, even \isa{auto} could do it.
 In general, one has to provide a witness, in our case 0:
-*}
+\<close>
 
 apply(rule_tac x = 0 in exI)
 by simp
 
-text{*
+text\<open>
 This type definition introduces the new type @{typ three} and asserts
 that it is a copy of the set @{term"{0::nat,1,2}"}. This assertion
 is expressed via a bijection between the \emph{type} @{typ three} and the
@@ -125,13 +125,13 @@
 \end{itemize}
 In our example it suffices to give the three elements of type @{typ three}
 names:
-*}
+\<close>
 
 definition A :: three where "A \<equiv> Abs_three 0"
 definition B :: three where "B \<equiv> Abs_three 1"
 definition C :: three where "C \<equiv> Abs_three 2"
 
-text{*
+text\<open>
 So far, everything was easy. But it is clear that reasoning about @{typ
 three} will be hell if we have to go back to @{typ nat} every time. Thus our
 aim must be to raise our level of abstraction by deriving enough theorems
@@ -169,45 +169,45 @@
 Distinctness of @{term A}, @{term B} and @{term C} follows immediately
 if we expand their definitions and rewrite with the injectivity
 of @{term Abs_three}:
-*}
+\<close>
 
 lemma "A \<noteq> B \<and> B \<noteq> A \<and> A \<noteq> C \<and> C \<noteq> A \<and> B \<noteq> C \<and> C \<noteq> B"
 by(simp add: Abs_three_inject A_def B_def C_def)
 
-text{*\noindent
+text\<open>\noindent
 Of course we rely on the simplifier to solve goals like @{prop"(0::nat) \<noteq> 1"}.
 
 The fact that @{term A}, @{term B} and @{term C} exhaust type @{typ three} is
 best phrased as a case distinction theorem: if you want to prove @{prop"P x"}
 (where @{term x} is of type @{typ three}) it suffices to prove @{prop"P A"},
-@{prop"P B"} and @{prop"P C"}: *}
+@{prop"P B"} and @{prop"P C"}:\<close>
 
 lemma three_cases: "\<lbrakk> P A; P B; P C \<rbrakk> \<Longrightarrow> P x"
 
-txt{*\noindent Again this follows easily using the induction principle stemming from the type definition:*}
+txt\<open>\noindent Again this follows easily using the induction principle stemming from the type definition:\<close>
 
 apply(induct_tac x)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0]}
 Simplification leads to the disjunction @{prop"y
 = 0 \<or> y = 1 \<or> y = (2::nat)"} which \isa{auto} separates into three
-subgoals, each of which is easily solved by simplification: *}
+subgoals, each of which is easily solved by simplification:\<close>
 
 apply(auto simp add: A_def B_def C_def)
 done
 
-text{*\noindent
+text\<open>\noindent
 This concludes the derivation of the characteristic theorems for
 type @{typ three}.
 
 The attentive reader has realized long ago that the
 above lengthy definition can be collapsed into one line:
-*}
+\<close>
 
 datatype better_three = A | B | C
 
-text{*\noindent
+text\<open>\noindent
 In fact, the \isacommand{datatype} command performs internally more or less
 the same derivations as we did, which gives you some idea what life would be
 like without \isacommand{datatype}.
@@ -232,6 +232,6 @@
 abstract functions $F$ and properties $P$.%
 \index{typedecl@\isacommand {typedef} (command)|)}%
 \index{types!defining|)}
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/HOL/Data_Structures/AA_Map.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/AA_Map.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -141,7 +141,7 @@
           by (auto simp add: skew_invar split_invar)
       next
         case (Incr)
-        thus ?thesis using invar_NodeR2[OF `invar ?t` Incr(2) 1 iir] 1 \<open>x < a\<close>
+        thus ?thesis using invar_NodeR2[OF \<open>invar ?t\<close> Incr(2) 1 iir] 1 \<open>x < a\<close>
           by (auto simp add: skew_invar split_invar split: if_splits)
       qed
     qed
@@ -193,7 +193,7 @@
 qed (simp add: post_del_def)
 
 
-subsection {* Functional Correctness Proofs *}
+subsection \<open>Functional Correctness Proofs\<close>
 
 theorem inorder_update:
   "sorted1(inorder t) \<Longrightarrow> inorder(update x y t) = upd_list x y (inorder t)"
--- a/src/HOL/Data_Structures/AA_Set.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/AA_Set.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -69,13 +69,13 @@
                \<Rightarrow> Node (lva+1) (Node (lv-1) l x t2) a
                     (split (Node (if sngl t1 then lva else lva+1) t3 b t4)))))"
 
-text{* In the paper, the last case of @{const adjust} is expressed with the help of an
+text\<open>In the paper, the last case of @{const adjust} is expressed with the help of an
 incorrect auxiliary function \texttt{nlvl}.
 
 Function @{text del_max} below is called \texttt{dellrg} in the paper.
 The latter is incorrect for two reasons: \texttt{dellrg} is meant to delete the largest
 element but recurses on the left instead of the right subtree; the invariant
-is not restored.*}
+is not restored.\<close>
 
 fun del_max :: "'a aa_tree \<Rightarrow> 'a aa_tree * 'a" where
 "del_max (Node lv l a Leaf) = (l,a)" |
@@ -269,7 +269,7 @@
           by (auto simp add: skew_invar split_invar)
       next
         case (Incr)
-        thus ?thesis using invar_NodeR2[OF `invar ?t` Incr(2) 1 iir] 1 \<open>x < a\<close>
+        thus ?thesis using invar_NodeR2[OF \<open>invar ?t\<close> Incr(2) 1 iir] 1 \<open>x < a\<close>
           by (auto simp add: skew_invar split_invar split: if_splits)
       qed
     qed
--- a/src/HOL/Data_Structures/AList_Upd_Del.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/AList_Upd_Del.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,6 +1,6 @@
 (* Author: Tobias Nipkow *)
 
-section {* Association List Update and Deletion *}
+section \<open>Association List Update and Deletion\<close>
 
 theory AList_Upd_Del
 imports Sorted_Less
@@ -8,8 +8,8 @@
 
 abbreviation "sorted1 ps \<equiv> sorted(map fst ps)"
 
-text{* Define own @{text map_of} function to avoid pulling in an unknown
-amount of lemmas implicitly (via the simpset). *}
+text\<open>Define own @{text map_of} function to avoid pulling in an unknown
+amount of lemmas implicitly (via the simpset).\<close>
 
 hide_const (open) map_of
 
--- a/src/HOL/Data_Structures/AVL_Map.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/AVL_Map.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -23,7 +23,7 @@
    GT \<Rightarrow> balL l (a,b) (delete x r))"
 
 
-subsection {* Functional Correctness Proofs *}
+subsection \<open>Functional Correctness Proofs\<close>
 
 theorem inorder_update:
   "sorted1(inorder t) \<Longrightarrow> inorder(update x y t) = upd_list x y (inorder t)"
--- a/src/HOL/Data_Structures/AVL_Set.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/AVL_Set.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -14,7 +14,7 @@
 
 type_synonym 'a avl_tree = "('a,nat) tree"
 
-text {* Invariant: *}
+text \<open>Invariant:\<close>
 
 fun avl :: "'a avl_tree \<Rightarrow> bool" where
 "avl Leaf = True" |
@@ -80,9 +80,9 @@
      GT \<Rightarrow> balL l a (delete x r))"
 
 
-subsection {* Functional Correctness Proofs *}
+subsection \<open>Functional Correctness Proofs\<close>
 
-text{* Very different from the AFP/AVL proofs *}
+text\<open>Very different from the AFP/AVL proofs\<close>
 
 
 subsubsection "Proofs for insert"
@@ -137,12 +137,12 @@
 qed (rule TrueI)+
 
 
-subsection {* AVL invariants *}
+subsection \<open>AVL invariants\<close>
 
-text{* Essentially the AFP/AVL proofs *}
+text\<open>Essentially the AFP/AVL proofs\<close>
 
 
-subsubsection {* Insertion maintains AVL balance *}
+subsubsection \<open>Insertion maintains AVL balance\<close>
 
 declare Let_def [simp]
 
@@ -222,7 +222,7 @@
 
 (* It appears that these two properties need to be proved simultaneously: *)
 
-text{* Insertion maintains the AVL property: *}
+text\<open>Insertion maintains the AVL property:\<close>
 
 theorem avl_insert_aux:
   assumes "avl t"
@@ -244,7 +244,7 @@
       with Node 1 show ?thesis by (auto simp add:avl_balL)
     next
       case False
-      with Node 1 `x\<noteq>a` show ?thesis by (auto simp add:avl_balR)
+      with Node 1 \<open>x\<noteq>a\<close> show ?thesis by (auto simp add:avl_balR)
     qed
   qed
   case 2
@@ -259,7 +259,7 @@
       case True
       with Node 2 show ?thesis
       proof(cases "height (insert x l) = height r + 2")
-        case False with Node 2 `x < a` show ?thesis by (auto simp: height_balL2)
+        case False with Node 2 \<open>x < a\<close> show ?thesis by (auto simp: height_balL2)
       next
         case True 
         hence "(height (balL (insert x l) a r) = height r + 2) \<or>
@@ -268,10 +268,10 @@
         thus ?thesis
         proof
           assume ?A
-          with 2 `x < a` show ?thesis by (auto)
+          with 2 \<open>x < a\<close> show ?thesis by (auto)
         next
           assume ?B
-          with True 1 Node(2) `x < a` show ?thesis by (simp) arith
+          with True 1 Node(2) \<open>x < a\<close> show ?thesis by (simp) arith
         qed
       qed
     next
@@ -279,7 +279,7 @@
       with Node 2 show ?thesis 
       proof(cases "height (insert x r) = height l + 2")
         case False
-        with Node 2 `\<not>x < a` show ?thesis by (auto simp: height_balR2)
+        with Node 2 \<open>\<not>x < a\<close> show ?thesis by (auto simp: height_balR2)
       next
         case True 
         hence "(height (balR l a (insert x r)) = height l + 2) \<or>
@@ -288,10 +288,10 @@
         thus ?thesis 
         proof
           assume ?A
-          with 2 `\<not>x < a` show ?thesis by (auto)
+          with 2 \<open>\<not>x < a\<close> show ?thesis by (auto)
         next
           assume ?B
-          with True 1 Node(4) `\<not>x < a` show ?thesis by (simp) arith
+          with True 1 Node(4) \<open>\<not>x < a\<close> show ?thesis by (simp) arith
         qed
       qed
     qed
@@ -299,7 +299,7 @@
 qed simp_all
 
 
-subsubsection {* Deletion maintains AVL balance *}
+subsubsection \<open>Deletion maintains AVL balance\<close>
 
 lemma avl_del_max:
   assumes "avl x" and "x \<noteq> Leaf"
@@ -317,7 +317,7 @@
   case (Node h l a r)
   case 2
   let ?r' = "fst (del_max r)"
-  from `avl x` Node 2 have "avl l" and "avl r" by simp_all
+  from \<open>avl x\<close> Node 2 have "avl l" and "avl r" by simp_all
   thus ?case using Node 2 height_balL[of l ?r' a] height_balL2[of l ?r' a]
     apply (auto split:prod.splits simp del:avl.simps) by arith+
 qed auto
@@ -331,13 +331,13 @@
   let ?l = "Node lh ll ln lr"
   let ?r = "Node rh rl rn rr"
   let ?l' = "fst (del_max ?l)"
-  from `avl t` and Node_Node have "avl ?r" by simp
-  from `avl t` and Node_Node have "avl ?l" by simp
+  from \<open>avl t\<close> and Node_Node have "avl ?r" by simp
+  from \<open>avl t\<close> and Node_Node have "avl ?l" by simp
   hence "avl(?l')" "height ?l = height(?l') \<or>
          height ?l = height(?l') + 1" by (rule avl_del_max,simp)+
-  with `avl t` Node_Node have "height ?l' = height ?r \<or> height ?l' = height ?r + 1
+  with \<open>avl t\<close> Node_Node have "height ?l' = height ?r \<or> height ?l' = height ?r + 1
             \<or> height ?r = height ?l' + 1 \<or> height ?r = height ?l' + 2" by fastforce
-  with `avl ?l'` `avl ?r` have "avl(balR ?l' (snd(del_max ?l)) ?r)"
+  with \<open>avl ?l'\<close> \<open>avl ?r\<close> have "avl(balR ?l' (snd(del_max ?l)) ?r)"
     by (rule avl_balR)
   with Node_Node show ?thesis by (auto split:prod.splits)
 qed simp_all
@@ -352,19 +352,19 @@
   let ?r = "Node rh rl rn rr"
   let ?l' = "fst (del_max ?l)"
   let ?t' = "balR ?l' (snd(del_max ?l)) ?r"
-  from `avl t` and Node_Node have "avl ?r" by simp
-  from `avl t` and Node_Node have "avl ?l" by simp
+  from \<open>avl t\<close> and Node_Node have "avl ?r" by simp
+  from \<open>avl t\<close> and Node_Node have "avl ?l" by simp
   hence "avl(?l')"  by (rule avl_del_max,simp)
-  have l'_height: "height ?l = height ?l' \<or> height ?l = height ?l' + 1" using `avl ?l` by (intro avl_del_max) auto
-  have t_height: "height t = 1 + max (height ?l) (height ?r)" using `avl t` Node_Node by simp
-  have "height t = height ?t' \<or> height t = height ?t' + 1" using  `avl t` Node_Node
+  have l'_height: "height ?l = height ?l' \<or> height ?l = height ?l' + 1" using \<open>avl ?l\<close> by (intro avl_del_max) auto
+  have t_height: "height t = 1 + max (height ?l) (height ?r)" using \<open>avl t\<close> Node_Node by simp
+  have "height t = height ?t' \<or> height t = height ?t' + 1" using  \<open>avl t\<close> Node_Node
   proof(cases "height ?r = height ?l' + 2")
     case False
-    show ?thesis using l'_height t_height False by (subst  height_balR2[OF `avl ?l'` `avl ?r` False])+ arith
+    show ?thesis using l'_height t_height False by (subst  height_balR2[OF \<open>avl ?l'\<close> \<open>avl ?r\<close> False])+ arith
   next
     case True
     show ?thesis
-    proof(cases rule: disjE[OF height_balR[OF True `avl ?l'` `avl ?r`, of "snd (del_max ?l)"]])
+    proof(cases rule: disjE[OF height_balR[OF True \<open>avl ?l'\<close> \<open>avl ?r\<close>, of "snd (del_max ?l)"]])
       case 1
       thus ?thesis using l'_height t_height True by arith
     next
@@ -375,7 +375,7 @@
   thus ?thesis using Node_Node by (auto split:prod.splits)
 qed simp_all
 
-text{* Deletion maintains the AVL property: *}
+text\<open>Deletion maintains the AVL property:\<close>
 
 theorem avl_delete_aux:
   assumes "avl t" 
@@ -396,7 +396,7 @@
       with Node 1 show ?thesis by (auto simp add:avl_balR)
     next
       case False
-      with Node 1 `x\<noteq>n` show ?thesis by (auto simp add:avl_balL)
+      with Node 1 \<open>x\<noteq>n\<close> show ?thesis by (auto simp add:avl_balL)
     qed
   qed
   case 2
@@ -414,7 +414,7 @@
       case True
       show ?thesis
       proof(cases "height r = height (delete x l) + 2")
-        case False with Node 1 `x < n` show ?thesis by(auto simp: balR_def)
+        case False with Node 1 \<open>x < n\<close> show ?thesis by(auto simp: balR_def)
       next
         case True 
         hence "(height (balR (delete x l) n r) = height (delete x l) + 2) \<or>
@@ -423,17 +423,17 @@
         thus ?thesis 
         proof
           assume ?A
-          with `x < n` Node 2 show ?thesis by(auto simp: balR_def)
+          with \<open>x < n\<close> Node 2 show ?thesis by(auto simp: balR_def)
         next
           assume ?B
-          with `x < n` Node 2 show ?thesis by(auto simp: balR_def)
+          with \<open>x < n\<close> Node 2 show ?thesis by(auto simp: balR_def)
         qed
       qed
     next
       case False
       show ?thesis
       proof(cases "height l = height (delete x r) + 2")
-        case False with Node 1 `\<not>x < n` `x \<noteq> n` show ?thesis by(auto simp: balL_def)
+        case False with Node 1 \<open>\<not>x < n\<close> \<open>x \<noteq> n\<close> show ?thesis by(auto simp: balL_def)
       next
         case True 
         hence "(height (balL l n (delete x r)) = height (delete x r) + 2) \<or>
@@ -442,10 +442,10 @@
         thus ?thesis 
         proof
           assume ?A
-          with `\<not>x < n` `x \<noteq> n` Node 2 show ?thesis by(auto simp: balL_def)
+          with \<open>\<not>x < n\<close> \<open>x \<noteq> n\<close> Node 2 show ?thesis by(auto simp: balL_def)
         next
           assume ?B
-          with `\<not>x < n` `x \<noteq> n` Node 2 show ?thesis by(auto simp: balL_def)
+          with \<open>\<not>x < n\<close> \<open>x \<noteq> n\<close> Node 2 show ?thesis by(auto simp: balL_def)
         qed
       qed
     qed
--- a/src/HOL/Data_Structures/Brother12_Map.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/Brother12_Map.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -130,24 +130,24 @@
     proof cases
       assume "l \<in> B h"
       from n2_type3[OF Suc.IH(1)[OF this] lr(2)]
-      show ?thesis using `x<a` by(simp)
+      show ?thesis using \<open>x<a\<close> by(simp)
     next
       assume "l \<notin> B h"
       hence "l \<in> U h" "r \<in> B h" using lr by auto
       from n2_type1[OF Suc.IH(2)[OF this(1)] this(2)]
-      show ?thesis using `x<a` by(simp)
+      show ?thesis using \<open>x<a\<close> by(simp)
     qed
     moreover
     have ?case if "x > a"
     proof cases
       assume "r \<in> B h"
       from n2_type3[OF lr(1) Suc.IH(1)[OF this]]
-      show ?thesis using `x>a` by(simp)
+      show ?thesis using \<open>x>a\<close> by(simp)
     next
       assume "r \<notin> B h"
       hence "l \<in> B h" "r \<in> U h" using lr by auto
       from n2_type2[OF this(1) Suc.IH(2)[OF this(2)]]
-      show ?thesis using `x>a` by(simp)
+      show ?thesis using \<open>x>a\<close> by(simp)
     qed
     moreover
     have ?case if [simp]: "x=a"
--- a/src/HOL/Data_Structures/Brother12_Set.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/Brother12_Set.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -234,7 +234,7 @@
 lemma Bp_if_B: "t \<in> B h \<Longrightarrow> t \<in> Bp h"
 by (cases h rule: Bp.cases) simp_all
 
-text{* An automatic proof: *}
+text\<open>An automatic proof:\<close>
 
 lemma
   "(t \<in> B h \<longrightarrow> ins x t \<in> Bp h) \<and> (t \<in> U h \<longrightarrow> ins x t \<in> T h)"
@@ -243,7 +243,7 @@
 apply (fastforce simp: Bp_if_B n2_type dest: n1_type)
 done
 
-text{* A detailed proof: *}
+text\<open>A detailed proof:\<close>
 
 lemma ins_type:
 shows "t \<in> B h \<Longrightarrow> ins x t \<in> Bp h" and "t \<in> U h \<Longrightarrow> ins x t \<in> T h"
@@ -269,7 +269,7 @@
         hence 1: "t1 \<in> U h" and 2: "t2 \<in> B h" using t1 t12 by auto
         show ?thesis by (metis Suc.IH(2)[OF 1] Bp_if_B[OF 2] n2_type)
       qed
-      with `x < a` show ?case by simp
+      with \<open>x < a\<close> show ?case by simp
     qed
     moreover
     have ?case if "a < x"
@@ -283,13 +283,13 @@
         hence 1: "t1 \<in> B h" and 2: "t2 \<in> U h" using t2 t12 by auto
         show ?thesis by (metis Bp_if_B[OF 1] Suc.IH(2)[OF 2] n2_type)
       qed
-      with `a < x` show ?case by simp
+      with \<open>a < x\<close> show ?case by simp
     qed
     moreover
     have ?case if "x = a"
     proof -
       from 1 have "t \<in> Bp (Suc h)" by(rule Bp_if_B)
-      thus "?case" using `x = a` by simp
+      thus "?case" using \<open>x = a\<close> by simp
     qed
     ultimately show ?case by auto
   next
@@ -404,24 +404,24 @@
     proof cases
       assume "l \<in> B h"
       from n2_type3[OF Suc.IH(1)[OF this] lr(2)]
-      show ?thesis using `x<a` by(simp)
+      show ?thesis using \<open>x<a\<close> by(simp)
     next
       assume "l \<notin> B h"
       hence "l \<in> U h" "r \<in> B h" using lr by auto
       from n2_type1[OF Suc.IH(2)[OF this(1)] this(2)]
-      show ?thesis using `x<a` by(simp)
+      show ?thesis using \<open>x<a\<close> by(simp)
     qed
     moreover
     have ?case if "x > a"
     proof cases
       assume "r \<in> B h"
       from n2_type3[OF lr(1) Suc.IH(1)[OF this]]
-      show ?thesis using `x>a` by(simp)
+      show ?thesis using \<open>x>a\<close> by(simp)
     next
       assume "r \<notin> B h"
       hence "l \<in> B h" "r \<in> U h" using lr by auto
       from n2_type2[OF this(1) Suc.IH(2)[OF this(2)]]
-      show ?thesis using `x>a` by(simp)
+      show ?thesis using \<open>x>a\<close> by(simp)
     qed
     moreover
     have ?case if [simp]: "x=a"
--- a/src/HOL/Data_Structures/Cmp.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/Cmp.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,6 +1,6 @@
 (* Author: Tobias Nipkow, Daniel Stüwe *)
 
-section {* Three-Way Comparison *}
+section \<open>Three-Way Comparison\<close>
 
 theory Cmp
 imports Main
--- a/src/HOL/Data_Structures/Leftist_Heap.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/Leftist_Heap.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -24,7 +24,7 @@
 "rk Leaf = 0" |
 "rk (Node n _ _ _) = n"
 
-text{* The invariants: *}
+text\<open>The invariants:\<close>
 
 fun (in linorder) heap :: "('a,'b) tree \<Rightarrow> bool" where
 "heap Leaf = True" |
@@ -111,7 +111,7 @@
     hence "ltree (merge ?t1 ?t2) = ltree (node l1 a1 (merge r1 ?t2))" by simp
     also have "\<dots> = (ltree l1 \<and> ltree(merge r1 ?t2))"
       by(simp add: ltree_node)
-    also have "..." using "3.prems" "3.IH"(1)[OF `a1 \<le> a2`] by (simp)
+    also have "..." using "3.prems" "3.IH"(1)[OF \<open>a1 \<le> a2\<close>] by (simp)
     finally show ?thesis .
   next (* analogous but automatic *)
     assume "\<not> a1 \<le> a2"
--- a/src/HOL/Data_Structures/Less_False.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/Less_False.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,12 +1,12 @@
 (* Author: Tobias Nipkow *)
 
-section {* Improved Simproc for $<$ *}
+section \<open>Improved Simproc for $<$\<close>
 
 theory Less_False
 imports Main
 begin
 
-simproc_setup less_False ("(x::'a::order) < y") = {* fn _ => fn ctxt => fn ct =>
+simproc_setup less_False ("(x::'a::order) < y") = \<open>fn _ => fn ctxt => fn ct =>
   let
     fun prp t thm = Thm.full_prop_of thm aconv t;
 
@@ -26,6 +26,6 @@
          | SOME thm => NONE
       end;
   in prove_less_False (Thm.term_of ct) end
-*}
+\<close>
 
 end
--- a/src/HOL/Data_Structures/List_Ins_Del.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/List_Ins_Del.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,6 +1,6 @@
 (* Author: Tobias Nipkow *)
 
-section {* List Insertion and Deletion *}
+section \<open>List Insertion and Deletion\<close>
 
 theory List_Ins_Del
 imports Sorted_Less
@@ -26,10 +26,10 @@
   "sorted(xs @ [x]) = (sorted xs \<and> (\<forall>y \<in> elems xs. y < x))"
 by(simp add: elems_eq_set sorted_wrt_append)
 
-text{* The above two rules introduce quantifiers. It turns out
+text\<open>The above two rules introduce quantifiers. It turns out
 that in practice this is not a problem because of the simplicity of
 the "isin" functions that implement @{const elems}. Nevertheless
-it is possible to avoid the quantifiers with the help of some rewrite rules: *}
+it is possible to avoid the quantifiers with the help of some rewrite rules:\<close>
 
 lemma sorted_ConsD: "sorted (y # xs) \<Longrightarrow> x \<le> y \<Longrightarrow> x \<notin> elems xs"
 by (auto simp: sorted_Cons_iff)
--- a/src/HOL/Data_Structures/Map_by_Ordered.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/Map_by_Ordered.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,6 +1,6 @@
 (* Author: Tobias Nipkow *)
 
-section {* Implementing Ordered Maps *}
+section \<open>Implementing Ordered Maps\<close>
 
 theory Map_by_Ordered
 imports AList_Upd_Del
--- a/src/HOL/Data_Structures/Set_by_Ordered.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/Set_by_Ordered.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,6 +1,6 @@
 (* Author: Tobias Nipkow *)
 
-section {* Implementing Ordered Sets *}
+section \<open>Implementing Ordered Sets\<close>
 
 theory Set_by_Ordered
 imports List_Ins_Del
--- a/src/HOL/Data_Structures/Sorted_Less.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/Sorted_Less.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,6 +1,6 @@
 (* Author: Tobias Nipkow *)
 
-section {* Lists Sorted wrt $<$ *}
+section \<open>Lists Sorted wrt $<$\<close>
 
 theory Sorted_Less
 imports Less_False
--- a/src/HOL/Data_Structures/Tree234.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/Tree234.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,6 +1,6 @@
 (* Author: Tobias Nipkow *)
 
-section {* 2-3-4 Trees *}
+section \<open>2-3-4 Trees\<close>
 
 theory Tree234
 imports Main
@@ -36,7 +36,7 @@
 
 end
 
-text{* Balanced: *}
+text\<open>Balanced:\<close>
 fun bal :: "'a tree234 \<Rightarrow> bool" where
 "bal Leaf = True" |
 "bal (Node2 l _ r) = (bal l & bal r & height l = height r)" |
--- a/src/HOL/Data_Structures/Tree234_Set.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/Tree234_Set.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -283,7 +283,7 @@
 
 subsubsection "Proofs for insert"
 
-text{* First a standard proof that @{const ins} preserves @{const bal}. *}
+text\<open>First a standard proof that @{const ins} preserves @{const bal}.\<close>
 
 instantiation up\<^sub>i :: (type)height
 begin
@@ -300,8 +300,8 @@
 by (induct t) (auto split!: if_split up\<^sub>i.split)
 
 
-text{* Now an alternative proof (by Brian Huffman) that runs faster because
-two properties (balance and height) are combined in one predicate. *}
+text\<open>Now an alternative proof (by Brian Huffman) that runs faster because
+two properties (balance and height) are combined in one predicate.\<close>
 
 inductive full :: "nat \<Rightarrow> 'a tree234 \<Rightarrow> bool" where
 "full 0 Leaf" |
@@ -348,11 +348,11 @@
 lemma bal_iff_full: "bal t \<longleftrightarrow> (\<exists>n. full n t)"
   by (auto elim!: bal_imp_full full_imp_bal)
 
-text {* The @{const "insert"} function either preserves the height of the
+text \<open>The @{const "insert"} function either preserves the height of the
 tree, or increases it by one. The constructor returned by the @{term
 "insert"} function determines which: A return value of the form @{term
 "T\<^sub>i t"} indicates that the height will be the same. A value of the
-form @{term "Up\<^sub>i l p r"} indicates an increase in height. *}
+form @{term "Up\<^sub>i l p r"} indicates an increase in height.\<close>
 
 primrec full\<^sub>i :: "nat \<Rightarrow> 'a up\<^sub>i \<Rightarrow> bool" where
 "full\<^sub>i n (T\<^sub>i t) \<longleftrightarrow> full n t" |
@@ -361,7 +361,7 @@
 lemma full\<^sub>i_ins: "full n t \<Longrightarrow> full\<^sub>i n (ins a t)"
 by (induct rule: full.induct) (auto, auto split: up\<^sub>i.split)
 
-text {* The @{const insert} operation preserves balance. *}
+text \<open>The @{const insert} operation preserves balance.\<close>
 
 lemma bal_insert: "bal t \<Longrightarrow> bal (insert a t)"
 unfolding bal_iff_full insert_def
--- a/src/HOL/Data_Structures/Tree23_Set.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/Data_Structures/Tree23_Set.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -206,7 +206,7 @@
 
 subsubsection "Proofs for insert"
 
-text{* First a standard proof that @{const ins} preserves @{const bal}. *}
+text\<open>First a standard proof that @{const ins} preserves @{const bal}.\<close>
 
 instantiation up\<^sub>i :: (type)height
 begin
@@ -222,8 +222,8 @@
 lemma bal_ins: "bal t \<Longrightarrow> bal (tree\<^sub>i(ins a t)) \<and> height(ins a t) = height t"
 by (induct t) (auto split!: if_split up\<^sub>i.split) (* 15 secs in 2015 *)
 
-text{* Now an alternative proof (by Brian Huffman) that runs faster because
-two properties (balance and height) are combined in one predicate. *}
+text\<open>Now an alternative proof (by Brian Huffman) that runs faster because
+two properties (balance and height) are combined in one predicate.\<close>
 
 inductive full :: "nat \<Rightarrow> 'a tree23 \<Rightarrow> bool" where
 "full 0 Leaf" |
@@ -264,11 +264,11 @@
 lemma bal_iff_full: "bal t \<longleftrightarrow> (\<exists>n. full n t)"
   by (auto elim!: bal_imp_full full_imp_bal)
 
-text {* The @{const "insert"} function either preserves the height of the
+text \<open>The @{const "insert"} function either preserves the height of the
 tree, or increases it by one. The constructor returned by the @{term
 "insert"} function determines which: A return value of the form @{term
 "T\<^sub>i t"} indicates that the height will be the same. A value of the
-form @{term "Up\<^sub>i l p r"} indicates an increase in height. *}
+form @{term "Up\<^sub>i l p r"} indicates an increase in height.\<close>
 
 fun full\<^sub>i :: "nat \<Rightarrow> 'a up\<^sub>i \<Rightarrow> bool" where
 "full\<^sub>i n (T\<^sub>i t) \<longleftrightarrow> full n t" |
@@ -277,7 +277,7 @@
 lemma full\<^sub>i_ins: "full n t \<Longrightarrow> full\<^sub>i n (ins a t)"
 by (induct rule: full.induct) (auto split: up\<^sub>i.split)
 
-text {* The @{const insert} operation preserves balance. *}
+text \<open>The @{const insert} operation preserves balance.\<close>
 
 lemma bal_insert: "bal t \<Longrightarrow> bal (insert a t)"
 unfolding bal_iff_full insert_def
--- a/src/HOL/IMP/ACom.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/ACom.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -17,7 +17,7 @@
 
 notation com.SKIP ("SKIP")
 
-text_raw{*\snip{stripdef}{1}{1}{% *}
+text_raw\<open>\snip{stripdef}{1}{1}{%\<close>
 fun strip :: "'a acom \<Rightarrow> com" where
 "strip (SKIP {P}) = SKIP" |
 "strip (x ::= e {P}) = x ::= e" |
@@ -25,18 +25,18 @@
 "strip (IF b THEN {P\<^sub>1} C\<^sub>1 ELSE {P\<^sub>2} C\<^sub>2 {P}) =
   IF b THEN strip C\<^sub>1 ELSE strip C\<^sub>2" |
 "strip ({I} WHILE b DO {P} C {Q}) = WHILE b DO strip C"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
-text_raw{*\snip{asizedef}{1}{1}{% *}
+text_raw\<open>\snip{asizedef}{1}{1}{%\<close>
 fun asize :: "com \<Rightarrow> nat" where
 "asize SKIP = 1" |
 "asize (x ::= e) = 1" |
 "asize (C\<^sub>1;;C\<^sub>2) = asize C\<^sub>1 + asize C\<^sub>2" |
 "asize (IF b THEN C\<^sub>1 ELSE C\<^sub>2) = asize C\<^sub>1 + asize C\<^sub>2 + 3" |
 "asize (WHILE b DO C) = asize C + 3"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
-text_raw{*\snip{annotatedef}{1}{1}{% *}
+text_raw\<open>\snip{annotatedef}{1}{1}{%\<close>
 definition shift :: "(nat \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> 'a" where
 "shift f n = (\<lambda>p. f(p+n))"
 
@@ -50,9 +50,9 @@
   {f(asize c\<^sub>1 + asize c\<^sub>2 + 2)}" |
 "annotate f (WHILE b DO c) =
   {f 0} WHILE b DO {f 1} annotate (shift f 2) c {f(asize c + 2)}"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
-text_raw{*\snip{annosdef}{1}{1}{% *}
+text_raw\<open>\snip{annosdef}{1}{1}{%\<close>
 fun annos :: "'a acom \<Rightarrow> 'a list" where
 "annos (SKIP {P}) = [P]" |
 "annos (x ::= e {P}) = [P]" |
@@ -60,7 +60,7 @@
 "annos (IF b THEN {P\<^sub>1} C\<^sub>1 ELSE {P\<^sub>2} C\<^sub>2 {Q}) =
   P\<^sub>1 # annos C\<^sub>1 @  P\<^sub>2 # annos C\<^sub>2 @ [Q]" |
 "annos ({I} WHILE b DO {P} C {Q}) = I # P # annos C @ [Q]"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 definition anno :: "'a acom \<Rightarrow> nat \<Rightarrow> 'a" where
 "anno C p = annos C ! p"
@@ -68,7 +68,7 @@
 definition post :: "'a acom \<Rightarrow>'a" where
 "post C = last(annos C)"
 
-text_raw{*\snip{mapacomdef}{1}{2}{% *}
+text_raw\<open>\snip{mapacomdef}{1}{2}{%\<close>
 fun map_acom :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a acom \<Rightarrow> 'b acom" where
 "map_acom f (SKIP {P}) = SKIP {f P}" |
 "map_acom f (x ::= e {P}) = x ::= e {f P}" |
@@ -78,7 +78,7 @@
   {f Q}" |
 "map_acom f ({I} WHILE b DO {P} C {Q}) =
   {f I} WHILE b DO {f P} map_acom f C {f Q}"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 
 lemma annos_ne: "annos C \<noteq> []"
--- a/src/HOL/IMP/AExp.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/AExp.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -8,24 +8,24 @@
 type_synonym val = int
 type_synonym state = "vname \<Rightarrow> val"
 
-text_raw{*\snip{AExpaexpdef}{2}{1}{% *}
+text_raw\<open>\snip{AExpaexpdef}{2}{1}{%\<close>
 datatype aexp = N int | V vname | Plus aexp aexp
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
-text_raw{*\snip{AExpavaldef}{1}{2}{% *}
+text_raw\<open>\snip{AExpavaldef}{1}{2}{%\<close>
 fun aval :: "aexp \<Rightarrow> state \<Rightarrow> val" where
 "aval (N n) s = n" |
 "aval (V x) s = s x" |
 "aval (Plus a\<^sub>1 a\<^sub>2) s = aval a\<^sub>1 s + aval a\<^sub>2 s"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 
 value "aval (Plus (V ''x'') (N 5)) (\<lambda>x. if x = ''x'' then 7 else 0)"
 
-text {* The same state more concisely: *}
+text \<open>The same state more concisely:\<close>
 value "aval (Plus (V ''x'') (N 5)) ((\<lambda>x. 0) (''x'':= 7))"
 
-text {* A little syntax magic to write larger states compactly: *}
+text \<open>A little syntax magic to write larger states compactly:\<close>
 
 definition null_state ("<>") where
   "null_state \<equiv> \<lambda>x. 0"
@@ -35,28 +35,28 @@
   "_State ms" == "_Update <> ms"
   "_State (_updbinds b bs)" <= "_Update (_State b) bs"
 
-text {* \noindent
+text \<open>\noindent
   We can now write a series of updates to the function @{text "\<lambda>x. 0"} compactly:
-*}
+\<close>
 lemma "<a := 1, b := 2> = (<> (a := 1)) (b := (2::int))"
   by (rule refl)
 
 value "aval (Plus (V ''x'') (N 5)) <''x'' := 7>"
 
 
-text {* In  the @{term[source] "<a := b>"} syntax, variables that are not mentioned are 0 by default:
-*}
+text \<open>In  the @{term[source] "<a := b>"} syntax, variables that are not mentioned are 0 by default:
+\<close>
 value "aval (Plus (V ''x'') (N 5)) <''y'' := 7>"
 
-text{* Note that this @{text"<\<dots>>"} syntax works for any function space
-@{text"\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2"} where @{text "\<tau>\<^sub>2"} has a @{text 0}. *}
+text\<open>Note that this @{text"<\<dots>>"} syntax works for any function space
+@{text"\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2"} where @{text "\<tau>\<^sub>2"} has a @{text 0}.\<close>
 
 
 subsection "Constant Folding"
 
-text{* Evaluate constant subsexpressions: *}
+text\<open>Evaluate constant subsexpressions:\<close>
 
-text_raw{*\snip{AExpasimpconstdef}{0}{2}{% *}
+text_raw\<open>\snip{AExpasimpconstdef}{0}{2}{%\<close>
 fun asimp_const :: "aexp \<Rightarrow> aexp" where
 "asimp_const (N n) = N n" |
 "asimp_const (V x) = V x" |
@@ -64,7 +64,7 @@
   (case (asimp_const a\<^sub>1, asimp_const a\<^sub>2) of
     (N n\<^sub>1, N n\<^sub>2) \<Rightarrow> N(n\<^sub>1+n\<^sub>2) |
     (b\<^sub>1,b\<^sub>2) \<Rightarrow> Plus b\<^sub>1 b\<^sub>2)"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 theorem aval_asimp_const:
   "aval (asimp_const a) s = aval a s"
@@ -72,16 +72,16 @@
 apply (auto split: aexp.split)
 done
 
-text{* Now we also eliminate all occurrences 0 in additions. The standard
-method: optimized versions of the constructors: *}
+text\<open>Now we also eliminate all occurrences 0 in additions. The standard
+method: optimized versions of the constructors:\<close>
 
-text_raw{*\snip{AExpplusdef}{0}{2}{% *}
+text_raw\<open>\snip{AExpplusdef}{0}{2}{%\<close>
 fun plus :: "aexp \<Rightarrow> aexp \<Rightarrow> aexp" where
 "plus (N i\<^sub>1) (N i\<^sub>2) = N(i\<^sub>1+i\<^sub>2)" |
 "plus (N i) a = (if i=0 then a else Plus (N i) a)" |
 "plus a (N i) = (if i=0 then a else Plus a (N i))" |
 "plus a\<^sub>1 a\<^sub>2 = Plus a\<^sub>1 a\<^sub>2"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 lemma aval_plus[simp]:
   "aval (plus a1 a2) s = aval a1 s + aval a2 s"
@@ -89,16 +89,16 @@
 apply simp_all (* just for a change from auto *)
 done
 
-text_raw{*\snip{AExpasimpdef}{2}{0}{% *}
+text_raw\<open>\snip{AExpasimpdef}{2}{0}{%\<close>
 fun asimp :: "aexp \<Rightarrow> aexp" where
 "asimp (N n) = N n" |
 "asimp (V x) = V x" |
 "asimp (Plus a\<^sub>1 a\<^sub>2) = plus (asimp a\<^sub>1) (asimp a\<^sub>2)"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
-text{* Note that in @{const asimp_const} the optimized constructor was
+text\<open>Note that in @{const asimp_const} the optimized constructor was
 inlined. Making it a separate function @{const plus} improves modularity of
-the code and the proofs. *}
+the code and the proofs.\<close>
 
 value "asimp (Plus (Plus (N 0) (N 0)) (Plus (V ''x'') (N 0)))"
 
--- a/src/HOL/IMP/ASM.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/ASM.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,30 +4,30 @@
 
 subsection "Stack Machine"
 
-text_raw{*\snip{ASMinstrdef}{0}{1}{% *}
+text_raw\<open>\snip{ASMinstrdef}{0}{1}{%\<close>
 datatype instr = LOADI val | LOAD vname | ADD
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
-text_raw{*\snip{ASMstackdef}{1}{2}{% *}
+text_raw\<open>\snip{ASMstackdef}{1}{2}{%\<close>
 type_synonym stack = "val list"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
-text{* \noindent Abbreviations are transparent: they are unfolded after
+text\<open>\noindent Abbreviations are transparent: they are unfolded after
 parsing and folded back again before printing. Internally, they do not
-exist.*}
+exist.\<close>
 
-text_raw{*\snip{ASMexeconedef}{0}{1}{% *}
+text_raw\<open>\snip{ASMexeconedef}{0}{1}{%\<close>
 fun exec1 :: "instr \<Rightarrow> state \<Rightarrow> stack \<Rightarrow> stack" where
 "exec1 (LOADI n) _ stk  =  n # stk" |
 "exec1 (LOAD x) s stk  =  s(x) # stk" |
 "exec1  ADD _ (j # i # stk)  =  (i + j) # stk"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
-text_raw{*\snip{ASMexecdef}{1}{2}{% *}
+text_raw\<open>\snip{ASMexecdef}{1}{2}{%\<close>
 fun exec :: "instr list \<Rightarrow> state \<Rightarrow> stack \<Rightarrow> stack" where
 "exec [] _ stk = stk" |
 "exec (i#is) s stk = exec is s (exec1 i s stk)"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 value "exec [LOADI 5, LOAD ''y'', ADD] <''x'' := 42, ''y'' := 43> [50]"
 
@@ -40,12 +40,12 @@
 
 subsection "Compilation"
 
-text_raw{*\snip{ASMcompdef}{0}{2}{% *}
+text_raw\<open>\snip{ASMcompdef}{0}{2}{%\<close>
 fun comp :: "aexp \<Rightarrow> instr list" where
 "comp (N n) = [LOADI n]" |
 "comp (V x) = [LOAD x]" |
 "comp (Plus e\<^sub>1 e\<^sub>2) = comp e\<^sub>1 @ comp e\<^sub>2 @ [ADD]"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 value "comp (Plus (Plus (V ''x'') (N 1)) (V ''z''))"
 
--- a/src/HOL/IMP/Abs_Int0.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Abs_Int0.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -6,9 +6,9 @@
 
 subsection "Orderings"
 
-text{* The basic type classes @{class order}, @{class semilattice_sup} and @{class order_top} are
+text\<open>The basic type classes @{class order}, @{class semilattice_sup} and @{class order_top} are
 defined in @{theory Main}, more precisely in theories @{theory Orderings} and @{theory Lattices}.
-If you view this theory with jedit, just click on the names to get there. *}
+If you view this theory with jedit, just click on the names to get there.\<close>
 
 class semilattice_sup_top = semilattice_sup + order_top
 
@@ -148,7 +148,7 @@
 "\<gamma>_option \<gamma> None = {}" |
 "\<gamma>_option \<gamma> (Some a) = \<gamma> a"
 
-text{* The interface for abstract values: *}
+text\<open>The interface for abstract values:\<close>
 
 locale Val_semilattice =
 fixes \<gamma> :: "'av::semilattice_sup_top \<Rightarrow> val set"
@@ -161,9 +161,9 @@
 
 type_synonym 'av st = "(vname \<Rightarrow> 'av)"
 
-text{* The for-clause (here and elsewhere) only serves the purpose of fixing
+text\<open>The for-clause (here and elsewhere) only serves the purpose of fixing
 the name of the type parameter @{typ 'av} which would otherwise be renamed to
-@{typ 'a}. *}
+@{typ 'a}.\<close>
 
 locale Abs_Int_fun = Val_semilattice where \<gamma>=\<gamma>
   for \<gamma> :: "'av::semilattice_sup_top \<Rightarrow> val set"
@@ -210,7 +210,7 @@
 lemma mono_gamma_c: "C1 \<le> C2 \<Longrightarrow> \<gamma>\<^sub>c C1 \<le> \<gamma>\<^sub>c C2"
 by (simp add: less_eq_acom_def mono_gamma_o size_annos anno_map_acom size_annos_same[of C1 C2])
 
-text{* Correctness: *}
+text\<open>Correctness:\<close>
 
 lemma aval'_correct: "s : \<gamma>\<^sub>s S \<Longrightarrow> aval a s : \<gamma>(aval' a S)"
 by (induct a) (auto simp: gamma_num' gamma_plus' \<gamma>_fun_def)
@@ -232,7 +232,7 @@
 proof(simp add: CS_def AI_def)
   assume 1: "pfp (step' \<top>) (bot c) = Some C"
   have pfp': "step' \<top> C \<le> C" by(rule pfp_pfp[OF 1])
-  have 2: "step (\<gamma>\<^sub>o \<top>) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c C"  --"transfer the pfp'"
+  have 2: "step (\<gamma>\<^sub>o \<top>) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c C"  \<comment>"transfer the pfp'"
   proof(rule order_trans)
     show "step (\<gamma>\<^sub>o \<top>) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c (step' \<top> C)" by(rule step_step')
     show "... \<le> \<gamma>\<^sub>c C" by (metis mono_gamma_c[OF pfp'])
@@ -299,7 +299,7 @@
   show "wf {(y,x). ((I x \<and> x \<le> f x) \<and> \<not> f x \<le> x) \<and> y = f x}"
     by(rule wf_subset[OF wf_measure[of m]]) (auto simp: m I)
 next
-  show "I x0 \<and> x0 \<le> f x0" using `I x0` `x0 \<le> f x0` by blast
+  show "I x0 \<and> x0 \<le> f x0" using \<open>I x0\<close> \<open>x0 \<le> f x0\<close> by blast
 next
   fix x assume "I x \<and> x \<le> f x" thus "I(f x) \<and> f x \<le> f(f x)"
     by (blast intro: I mono)
@@ -331,7 +331,7 @@
 definition m_c :: "'av st option acom \<Rightarrow> nat" ("m\<^sub>c") where
 "m_c C = sum_list (map (\<lambda>a. m_o a (vars C)) (annos C))"
 
-text{* Upper complexity bound: *}
+text\<open>Upper complexity bound:\<close>
 lemma m_c_h: "m_c C \<le> size(annos C) * (h * card(vars C) + 1)"
 proof-
   let ?X = "vars C" let ?n = "card ?X" let ?a = "size(annos C)"
@@ -351,11 +351,11 @@
 assumes m2: "x < y \<Longrightarrow> m x > m y"
 begin
 
-text{* The predicates @{text "top_on_ty a X"} that follow describe that any abstract
+text\<open>The predicates @{text "top_on_ty a X"} that follow describe that any abstract
 state in @{text a} maps all variables in @{text X} to @{term \<top>}.
 This is an important invariant for the termination proof where we argue that only
 the finitely many variables in the program change. That the others do not change
-follows because they remain @{term \<top>}. *}
+follows because they remain @{term \<top>}.\<close>
 
 fun top_on_st :: "'av st \<Rightarrow> vname set \<Rightarrow> bool" ("top'_on\<^sub>s") where
 "top_on_st S X = (\<forall>x\<in>X. S x = \<top>)"
@@ -409,7 +409,7 @@
   from assms(2,3,4) have "EX x:X. S1 x < S2 x"
     by(simp add: fun_eq_iff) (metis Compl_iff le_neq_trans)
   hence 2: "\<exists>x\<in>X. m(S1 x) > m(S2 x)" by (metis m2)
-  from sum_strict_mono_ex1[OF `finite X` 1 2]
+  from sum_strict_mono_ex1[OF \<open>finite X\<close> 1 2]
   show "(\<Sum>x\<in>X. m (S2 x)) < (\<Sum>x\<in>X. m (S1 x))" .
 qed
 
@@ -450,7 +450,7 @@
     using i(1) top(2) by(simp add: top_on_acom_def size_annos_same[OF strip_eq])
   from i have "m_o (annos C1 ! i) ?X > m_o (annos C2 ! i) ?X" (is "?P i")
     by (metis 0 less_option_def m_o2[OF finite_cvars topo1] topo2)
-  hence 2: "\<exists>i < size(annos C2). ?P i" using `i < size(annos C2)` by blast
+  hence 2: "\<exists>i < size(annos C2). ?P i" using \<open>i < size(annos C2)\<close> by blast
   have "(\<Sum>i<size(annos C2). m_o (annos C2 ! i) ?X)
          < (\<Sum>i<size(annos C2). m_o (annos C1 ! i) ?X)"
     apply(rule sum_strict_mono_ex1) using 1 2 by (auto)
@@ -480,7 +480,7 @@
 
 end
 
-text{* Problem: not executable because of the comparison of abstract states,
-i.e. functions, in the pre-fixpoint computation. *}
+text\<open>Problem: not executable because of the comparison of abstract states,
+i.e. functions, in the pre-fixpoint computation.\<close>
 
 end
--- a/src/HOL/IMP/Abs_Int1.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Abs_Int1.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -6,7 +6,7 @@
 
 subsection "Computable Abstract Interpretation"
 
-text{* Abstract interpretation over type @{text st} instead of functions. *}
+text\<open>Abstract interpretation over type @{text st} instead of functions.\<close>
 
 context Gamma_semilattice
 begin
@@ -48,7 +48,7 @@
 by(simp add: step'_def)
 
 
-text{* Correctness: *}
+text\<open>Correctness:\<close>
 
 lemma step_step': "step (\<gamma>\<^sub>o S) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c (step' S C)"
 unfolding step_def step'_def
@@ -59,7 +59,7 @@
 proof(simp add: CS_def AI_def)
   assume 1: "pfp (step' \<top>) (bot c) = Some C"
   have pfp': "step' \<top> C \<le> C" by(rule pfp_pfp[OF 1])
-  have 2: "step (\<gamma>\<^sub>o \<top>) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c C"  --"transfer the pfp'"
+  have 2: "step (\<gamma>\<^sub>o \<top>) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c C"  \<comment>"transfer the pfp'"
   proof(rule order_trans)
     show "step (\<gamma>\<^sub>o \<top>) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c (step' \<top> C)" by(rule step_step')
     show "... \<le> \<gamma>\<^sub>c C" by (metis mono_gamma_c[OF pfp'])
@@ -120,7 +120,7 @@
 definition m_c :: "'av st option acom \<Rightarrow> nat" ("m\<^sub>c") where
 "m_c C = sum_list (map (\<lambda>a. m_o a (vars C)) (annos C))"
 
-text{* Upper complexity bound: *}
+text\<open>Upper complexity bound:\<close>
 lemma m_c_h: "m_c C \<le> size(annos C) * (h * card(vars C) + 1)"
 proof-
   let ?X = "vars C" let ?n = "card ?X" let ?a = "size(annos C)"
@@ -191,7 +191,7 @@
   from assms(2,3,4) have "EX x:X. S1 x < S2 x"
     by(simp add: fun_eq_iff) (metis Compl_iff le_neq_trans)
   hence 2: "\<exists>x\<in>X. m(S1 x) > m(S2 x)" by (metis m2)
-  from sum_strict_mono_ex1[OF `finite X` 1 2]
+  from sum_strict_mono_ex1[OF \<open>finite X\<close> 1 2]
   show "(\<Sum>x\<in>X. m (S2 x)) < (\<Sum>x\<in>X. m (S1 x))" .
 qed
 
@@ -234,7 +234,7 @@
     using i(1) top(2) by(simp add: top_on_acom_def size_annos_same[OF strip_eq])
   from i have "m_o (annos C1 ! i) ?X > m_o (annos C2 ! i) ?X" (is "?P i")
     by (metis 0 less_option_def m_o2[OF finite_cvars topo1] topo2)
-  hence 2: "\<exists>i < size(annos C2). ?P i" using `i < size(annos C2)` by blast
+  hence 2: "\<exists>i < size(annos C2). ?P i" using \<open>i < size(annos C2)\<close> by blast
   have "(\<Sum>i<size(annos C2). m_o (annos C2 ! i) ?X)
          < (\<Sum>i<size(annos C2). m_o (annos C1 ! i) ?X)"
     apply(rule sum_strict_mono_ex1) using 1 2 by (auto)
--- a/src/HOL/IMP/Abs_Int1_const.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Abs_Int1_const.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -118,7 +118,7 @@
 value "show_acom (the(AI_const test6_const))"
 
 
-text{* Monotonicity: *}
+text\<open>Monotonicity:\<close>
 
 global_interpretation Abs_Int_mono
 where \<gamma> = \<gamma>_const and num' = Const and plus' = plus_const
@@ -126,7 +126,7 @@
   case 1 thus ?case by(auto simp: plus_const_cases split: const.split)
 qed
 
-text{* Termination: *}
+text\<open>Termination:\<close>
 
 definition m_const :: "const \<Rightarrow> nat" where
 "m_const x = (if x = Any then 0 else 1)"
--- a/src/HOL/IMP/Abs_Int1_parity.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Abs_Int1_parity.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -8,29 +8,29 @@
 
 datatype parity = Even | Odd | Either
 
-text{* Instantiation of class @{class order} with type @{typ parity}: *}
+text\<open>Instantiation of class @{class order} with type @{typ parity}:\<close>
 
 instantiation parity :: order
 begin
 
-text{* First the definition of the interface function @{text"\<le>"}. Note that
+text\<open>First the definition of the interface function @{text"\<le>"}. Note that
 the header of the definition must refer to the ascii name @{const less_eq} of the
 constants as @{text less_eq_parity} and the definition is named @{text
-less_eq_parity_def}.  Inside the definition the symbolic names can be used. *}
+less_eq_parity_def}.  Inside the definition the symbolic names can be used.\<close>
 
 definition less_eq_parity where
 "x \<le> y = (y = Either \<or> x=y)"
 
-text{* We also need @{text"<"}, which is defined canonically: *}
+text\<open>We also need @{text"<"}, which is defined canonically:\<close>
 
 definition less_parity where
 "x < y = (x \<le> y \<and> \<not> y \<le> (x::parity))"
 
-text{*\noindent(The type annotation is necessary to fix the type of the polymorphic predicates.)
+text\<open>\noindent(The type annotation is necessary to fix the type of the polymorphic predicates.)
 
 Now the instance proof, i.e.\ the proof that the definition fulfills
 the axioms (assumptions) of the class. The initial proof-step generates the
-necessary proof obligations. *}
+necessary proof obligations.\<close>
 
 instance
 proof
@@ -47,7 +47,7 @@
 
 end
 
-text{* Instantiation of class @{class semilattice_sup_top} with type @{typ parity}: *}
+text\<open>Instantiation of class @{class semilattice_sup_top} with type @{typ parity}:\<close>
 
 instantiation parity :: semilattice_sup_top
 begin
@@ -58,11 +58,11 @@
 definition top_parity where
 "\<top> = Either"
 
-text{* Now the instance proof. This time we take a shortcut with the help of
+text\<open>Now the instance proof. This time we take a shortcut with the help of
 proof method @{text goal_cases}: it creates cases 1 ... n for the subgoals
 1 ... n; in case i, i is also the name of the assumptions of subgoal i and
 @{text "case?"} refers to the conclusion of subgoal i.
-The class axioms are presented in the same order as in the class definition. *}
+The class axioms are presented in the same order as in the class definition.\<close>
 
 instance
 proof (standard, goal_cases)
@@ -78,10 +78,10 @@
 end
 
 
-text{* Now we define the functions used for instantiating the abstract
+text\<open>Now we define the functions used for instantiating the abstract
 interpretation locales. Note that the Isabelle terminology is
 \emph{interpretation}, not \emph{instantiation} of locales, but we use
-instantiation to avoid confusion with abstract interpretation.  *}
+instantiation to avoid confusion with abstract interpretation.\<close>
 
 fun \<gamma>_parity :: "parity \<Rightarrow> val set" where
 "\<gamma>_parity Even = {i. i mod 2 = 0}" |
@@ -99,12 +99,12 @@
 "plus_parity Either y  = Either" |
 "plus_parity x Either  = Either"
 
-text{* First we instantiate the abstract value interface and prove that the
-functions on type @{typ parity} have all the necessary properties: *}
+text\<open>First we instantiate the abstract value interface and prove that the
+functions on type @{typ parity} have all the necessary properties:\<close>
 
 global_interpretation Val_semilattice
 where \<gamma> = \<gamma>_parity and num' = num_parity and plus' = plus_parity
-proof (standard, goal_cases) txt{* subgoals are the locale axioms *}
+proof (standard, goal_cases) txt\<open>subgoals are the locale axioms\<close>
   case 1 thus ?case by(auto simp: less_eq_parity_def)
 next
   case 2 show ?case by(auto simp: top_parity_def)
@@ -116,14 +116,14 @@
       (auto simp add: mod_add_eq [symmetric])
 qed
 
-text{* In case 4 we needed to refer to particular variables.
+text\<open>In case 4 we needed to refer to particular variables.
 Writing (i x y z) fixes the names of the variables in case i to be x, y and z
 in the left-to-right order in which the variables occur in the subgoal.
-Underscores are anonymous placeholders for variable names we don't care to fix. *}
+Underscores are anonymous placeholders for variable names we don't care to fix.\<close>
 
-text{* Instantiating the abstract interpretation locale requires no more
+text\<open>Instantiating the abstract interpretation locale requires no more
 proofs (they happened in the instatiation above) but delivers the
-instantiated abstract interpreter which we call @{text AI_parity}: *}
+instantiated abstract interpreter which we call @{text AI_parity}:\<close>
 
 global_interpretation Abs_Int
 where \<gamma> = \<gamma>_parity and num' = num_parity and plus' = plus_parity
--- a/src/HOL/IMP/Abs_Int2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Abs_Int2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -83,14 +83,14 @@
  (let (a1,a2) = inv_plus' a (aval'' e1 S) (aval'' e2 S)
   in inv_aval' e1 a1 (inv_aval' e2 a2 S))"
 
-text{* The test for @{const bot} in the @{const V}-case is important: @{const
+text\<open>The test for @{const bot} in the @{const V}-case is important: @{const
 bot} indicates that a variable has no possible values, i.e.\ that the current
 program point is unreachable. But then the abstract state should collapse to
 @{const None}. Put differently, we maintain the invariant that in an abstract
 state of the form @{term"Some s"}, all variables are mapped to non-@{const
 bot} values. Otherwise the (pointwise) sup of two abstract states, one of
 which contains @{const bot} values, may produce too large a result, thus
-making the analysis less precise. *}
+making the analysis less precise.\<close>
 
 
 fun inv_bval' :: "bexp \<Rightarrow> bool \<Rightarrow> 'av st option \<Rightarrow> 'av st option" where
@@ -108,7 +108,7 @@
   case N thus ?case by simp (metis test_num')
 next
   case (V x)
-  obtain S' where "S = Some S'" and "s : \<gamma>\<^sub>s S'" using `s : \<gamma>\<^sub>o S`
+  obtain S' where "S = Some S'" and "s : \<gamma>\<^sub>s S'" using \<open>s : \<gamma>\<^sub>o S\<close>
     by(auto simp: in_gamma_option_iff)
   moreover hence "s x : \<gamma> (fun S' x)"
     by(simp add: \<gamma>_st_def)
@@ -170,7 +170,7 @@
 proof(simp add: CS_def AI_def)
   assume 1: "pfp (step' \<top>) (bot c) = Some C"
   have pfp': "step' \<top> C \<le> C" by(rule pfp_pfp[OF 1])
-  have 2: "step (\<gamma>\<^sub>o \<top>) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c C"  --"transfer the pfp'"
+  have 2: "step (\<gamma>\<^sub>o \<top>) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c C"  \<comment>"transfer the pfp'"
   proof(rule order_trans)
     show "step (\<gamma>\<^sub>o \<top>) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c (step' \<top> C)" by(rule step_step')
     show "... \<le> \<gamma>\<^sub>c C" by (metis mono_gamma_c[OF pfp'])
--- a/src/HOL/IMP/Abs_Int2_ivl.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Abs_Int2_ivl.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -124,7 +124,7 @@
 
 end
 
-text{* Implement (naive) executable equality: *}
+text\<open>Implement (naive) executable equality:\<close>
 instantiation ivl :: equal
 begin
 
@@ -362,7 +362,7 @@
 ..
 
 
-text{* Monotonicity: *}
+text\<open>Monotonicity:\<close>
 
 lemma mono_plus_ivl: "iv1 \<le> iv2 \<Longrightarrow> iv3 \<le> iv4 \<Longrightarrow> iv1+iv3 \<le> iv2+(iv4::ivl)"
 apply transfer
@@ -405,7 +405,7 @@
 
 value "show_acom_opt (AI_ivl test1_ivl)"
 
-text{* Better than @{text AI_const}: *}
+text\<open>Better than @{text AI_const}:\<close>
 value "show_acom_opt (AI_ivl test3_const)"
 value "show_acom_opt (AI_ivl test4_const)"
 value "show_acom_opt (AI_ivl test6_const)"
@@ -418,8 +418,8 @@
 value "show_acom (steps test2_ivl 2)"
 value "show_acom (steps test2_ivl 3)"
 
-text{* Fixed point reached in 2 steps.
- Not so if the start value of x is known: *}
+text\<open>Fixed point reached in 2 steps.
+ Not so if the start value of x is known:\<close>
 
 value "show_acom_opt (AI_ivl test3_ivl)"
 value "show_acom (steps test3_ivl 0)"
@@ -429,17 +429,17 @@
 value "show_acom (steps test3_ivl 4)"
 value "show_acom (steps test3_ivl 5)"
 
-text{* Takes as many iterations as the actual execution. Would diverge if
+text\<open>Takes as many iterations as the actual execution. Would diverge if
 loop did not terminate. Worse still, as the following example shows: even if
 the actual execution terminates, the analysis may not. The value of y keeps
-decreasing as the analysis is iterated, no matter how long: *}
+decreasing as the analysis is iterated, no matter how long:\<close>
 
 value "show_acom (steps test4_ivl 50)"
 
-text{* Relationships between variables are NOT captured: *}
+text\<open>Relationships between variables are NOT captured:\<close>
 value "show_acom_opt (AI_ivl test5_ivl)"
 
-text{* Again, the analysis would not terminate: *}
+text\<open>Again, the analysis would not terminate:\<close>
 value "show_acom (steps test6_ivl 50)"
 
 end
--- a/src/HOL/IMP/Abs_Int3.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Abs_Int3.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -193,7 +193,7 @@
     note 1 = conjunct1[OF 12] and 2 = conjunct2[OF 12]
     let ?p' = "p \<triangle> f p"
     show "P ?p'" by (blast intro: P Pinv)
-    have "f ?p' \<le> f p" by(rule mono[OF `P (p \<triangle> f p)`  P narrow2_acom[OF 1]])
+    have "f ?p' \<le> f p" by(rule mono[OF \<open>P (p \<triangle> f p)\<close>  P narrow2_acom[OF 1]])
     also have "\<dots> \<le> ?p'" by(rule narrow1_acom[OF 1])
     finally show "f ?p' \<le> ?p'" .
     have "?p' \<le> p" by (rule narrow2_acom[OF 1])
@@ -267,9 +267,9 @@
 definition "step_up_ivl n = ((\<lambda>C. C \<nabla> step_ivl \<top> C)^^n)"
 definition "step_down_ivl n = ((\<lambda>C. C \<triangle> step_ivl \<top> C)^^n)"
 
-text{* For @{const test3_ivl}, @{const AI_ivl} needed as many iterations as
+text\<open>For @{const test3_ivl}, @{const AI_ivl} needed as many iterations as
 the loop took to execute. In contrast, @{const AI_wn_ivl} converges in a
-constant number of steps: *}
+constant number of steps:\<close>
 
 value "show_acom (step_up_ivl 1 (bot test3_ivl))"
 value "show_acom (step_up_ivl 2 (bot test3_ivl))"
@@ -286,7 +286,7 @@
 value "show_acom_opt (AI_wn_ivl test3_ivl)"
 
 
-text{* Now all the analyses terminate: *}
+text\<open>Now all the analyses terminate:\<close>
 
 value "show_acom_opt (AI_wn_ivl test4_ivl)"
 value "show_acom_opt (AI_wn_ivl test5_ivl)"
@@ -322,11 +322,11 @@
   \<Longrightarrow> top_on_acom (C1 \<triangle> C2 :: _ st option acom) X"
 by(auto simp add: narrow_acom_def top_on_acom_def)(metis top_on_opt_narrow in_set_zipE)
 
-text{* The assumptions for widening and narrowing differ because during
+text\<open>The assumptions for widening and narrowing differ because during
 narrowing we have the invariant @{prop"y \<le> x"} (where @{text y} is the next
 iterate), but during widening there is no such invariant, there we only have
 that not yet @{prop"y \<le> x"}. This complicates the termination proof for
-widening. *}
+widening.\<close>
 
 locale Measure_wn = Measure1 where m=m
   for m :: "'av::{order_top,wn} \<Rightarrow> nat" +
@@ -359,7 +359,7 @@
     by(auto simp add: Ball_def)
   hence 2: "\<exists>x\<in>X. m(S1 x) > m(S1 x \<nabla> S2 x)"
     using assms(3) m_widen by blast
-  from sum_strict_mono_ex1[OF `finite X` 1 2]
+  from sum_strict_mono_ex1[OF \<open>finite X\<close> 1 2]
   show ?thesis .
 qed
 
@@ -413,7 +413,7 @@
   hence 2: "\<exists>x\<in>X. n(S1 x \<triangle> S2 x) < n(S1 x)"
     by (metis assms(3-5) eq_iff less_le_not_le n_narrow)
   show ?thesis
-    apply(rule sum_strict_mono_ex1[OF `finite X`]) using 1 2 by blast+
+    apply(rule sum_strict_mono_ex1[OF \<open>finite X\<close>]) using 1 2 by blast+
 qed
 
 lemma n_s_narrow: "finite X \<Longrightarrow> fun S1 = fun S2 on -X \<Longrightarrow> S2 \<le> S1 \<Longrightarrow> S1 \<triangle> S2 < S1
@@ -469,7 +469,7 @@
 and "P C" shows "EX C'. iter_widen f C = Some C'"
 proof(simp add: iter_widen_def,
       rule measure_while_option_Some[where P = P and f=m])
-  show "P C" by(rule `P C`)
+  show "P C" by(rule \<open>P C\<close>)
 next
   fix C assume "P C" "\<not> f C \<le> C" thus "P (C \<nabla> f C) \<and> m (C \<nabla> f C) < m C"
     by(simp add: P_f P_widen m_widen)
@@ -552,7 +552,7 @@
   case 3 thus ?case by(rule m_ivl_widen)
 next
   case 4 from 4(2) show ?case by(rule n_ivl_narrow)
-  -- "note that the first assms is unnecessary for intervals"
+  \<comment> "note that the first assms is unnecessary for intervals"
 qed
 
 lemma iter_winden_step_ivl_termination:
@@ -585,8 +585,8 @@
 
 subsubsection "Counterexamples"
 
-text{* Widening is increasing by assumption, but @{prop"x \<le> f x"} is not an invariant of widening.
-It can already be lost after the first step: *}
+text\<open>Widening is increasing by assumption, but @{prop"x \<le> f x"} is not an invariant of widening.
+It can already be lost after the first step:\<close>
 
 lemma assumes "!!x y::'a::wn. x \<le> y \<Longrightarrow> f x \<le> f y"
 and "x \<le> f x" and "\<not> f x \<le> x" shows "x \<nabla> f x \<le> f(x \<nabla> f x)"
@@ -601,9 +601,9 @@
 *)
 oops
 
-text{* Widening terminates but may converge more slowly than Kleene iteration.
+text\<open>Widening terminates but may converge more slowly than Kleene iteration.
 In the following model, Kleene iteration goes from 0 to the least pfp
-in one step but widening takes 2 steps to reach a strictly larger pfp: *}
+in one step but widening takes 2 steps to reach a strictly larger pfp:\<close>
 lemma assumes "!!x y::'a::wn. x \<le> y \<Longrightarrow> f x \<le> f y"
 and "x \<le> f x" and "\<not> f x \<le> x" and "f(f x) \<le> f x"
 shows "f(x \<nabla> f x) \<le> x \<nabla> f x"
--- a/src/HOL/IMP/Abs_Int_Tests.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Abs_Int_Tests.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,37 +4,37 @@
 
 subsection "Test Programs"
 
-text{* For constant propagation: *}
+text\<open>For constant propagation:\<close>
 
-text{* Straight line code: *}
+text\<open>Straight line code:\<close>
 definition "test1_const =
  ''y'' ::= N 7;;
  ''z'' ::= Plus (V ''y'') (N 2);;
  ''y'' ::= Plus (V ''x'') (N 0)"
 
-text{* Conditional: *}
+text\<open>Conditional:\<close>
 definition "test2_const =
  IF Less (N 41) (V ''x'') THEN ''x'' ::= N 5 ELSE ''x'' ::= N 5"
 
-text{* Conditional, test is relevant: *}
+text\<open>Conditional, test is relevant:\<close>
 definition "test3_const =
  ''x'' ::= N 42;;
  IF Less (N 41) (V ''x'') THEN ''x'' ::= N 5 ELSE ''x'' ::= N 6"
 
-text{* While: *}
+text\<open>While:\<close>
 definition "test4_const =
  ''x'' ::= N 0;; WHILE Bc True DO ''x'' ::= N 0"
 
-text{* While, test is relevant: *}
+text\<open>While, test is relevant:\<close>
 definition "test5_const =
  ''x'' ::= N 0;; WHILE Less (V ''x'') (N 1) DO ''x'' ::= N 1"
 
-text{* Iteration is needed: *}
+text\<open>Iteration is needed:\<close>
 definition "test6_const =
   ''x'' ::= N 0;; ''y'' ::= N 0;; ''z'' ::= N 2;;
   WHILE Less (V ''x'') (N 1) DO (''x'' ::= V ''y'';; ''y'' ::= V ''z'')"
 
-text{* For intervals: *}
+text\<open>For intervals:\<close>
 
 definition "test1_ivl =
  ''y'' ::= N 7;;
--- a/src/HOL/IMP/Abs_Int_init.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Abs_Int_init.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,6 +4,6 @@
         Vars Collecting Abs_Int_Tests
 begin
 
-hide_const (open) top bot dom  --"to avoid qualified names"
+hide_const (open) top bot dom  \<comment>"to avoid qualified names"
 
 end
--- a/src/HOL/IMP/Abs_State.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Abs_State.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -10,15 +10,15 @@
 "fun_rep [] = (\<lambda>x. \<top>)" |
 "fun_rep ((x,a)#ps) = (fun_rep ps) (x := a)"
 
-lemma fun_rep_map_of[code]: --"original def is too slow"
+lemma fun_rep_map_of[code]: \<comment>"original def is too slow"
   "fun_rep ps = (%x. case map_of ps x of None \<Rightarrow> \<top> | Some a \<Rightarrow> a)"
 by(induction ps rule: fun_rep.induct) auto
 
 definition eq_st :: "('a::top) st_rep \<Rightarrow> 'a st_rep \<Rightarrow> bool" where
 "eq_st S1 S2 = (fun_rep S1 = fun_rep S2)"
 
-hide_type st  --"hide previous def to avoid long names"
-declare [[typedef_overloaded]] --"allow quotient types to depend on classes"
+hide_type st  \<comment>"hide previous def to avoid long names"
+declare [[typedef_overloaded]] \<comment>"allow quotient types to depend on classes"
 
 quotient_type 'a st = "('a::top) st_rep" / eq_st
 morphisms rep_st St
--- a/src/HOL/IMP/BExp.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/BExp.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,13 +4,13 @@
 
 datatype bexp = Bc bool | Not bexp | And bexp bexp | Less aexp aexp
 
-text_raw{*\snip{BExpbvaldef}{1}{2}{% *}
+text_raw\<open>\snip{BExpbvaldef}{1}{2}{%\<close>
 fun bval :: "bexp \<Rightarrow> state \<Rightarrow> bool" where
 "bval (Bc v) s = v" |
 "bval (Not b) s = (\<not> bval b s)" |
 "bval (And b\<^sub>1 b\<^sub>2) s = (bval b\<^sub>1 s \<and> bval b\<^sub>2 s)" |
 "bval (Less a\<^sub>1 a\<^sub>2) s = (aval a\<^sub>1 s < aval a\<^sub>2 s)"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 value "bval (Less (V ''x'') (Plus (N 3) (V ''y'')))
             <''x'' := 3, ''y'' := 1>"
@@ -18,54 +18,54 @@
 
 subsection "Constant Folding"
 
-text{* Optimizing constructors: *}
+text\<open>Optimizing constructors:\<close>
 
-text_raw{*\snip{BExplessdef}{0}{2}{% *}
+text_raw\<open>\snip{BExplessdef}{0}{2}{%\<close>
 fun less :: "aexp \<Rightarrow> aexp \<Rightarrow> bexp" where
 "less (N n\<^sub>1) (N n\<^sub>2) = Bc(n\<^sub>1 < n\<^sub>2)" |
 "less a\<^sub>1 a\<^sub>2 = Less a\<^sub>1 a\<^sub>2"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 lemma [simp]: "bval (less a1 a2) s = (aval a1 s < aval a2 s)"
 apply(induction a1 a2 rule: less.induct)
 apply simp_all
 done
 
-text_raw{*\snip{BExpanddef}{2}{2}{% *}
+text_raw\<open>\snip{BExpanddef}{2}{2}{%\<close>
 fun "and" :: "bexp \<Rightarrow> bexp \<Rightarrow> bexp" where
 "and (Bc True) b = b" |
 "and b (Bc True) = b" |
 "and (Bc False) b = Bc False" |
 "and b (Bc False) = Bc False" |
 "and b\<^sub>1 b\<^sub>2 = And b\<^sub>1 b\<^sub>2"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 lemma bval_and[simp]: "bval (and b1 b2) s = (bval b1 s \<and> bval b2 s)"
 apply(induction b1 b2 rule: and.induct)
 apply simp_all
 done
 
-text_raw{*\snip{BExpnotdef}{2}{2}{% *}
+text_raw\<open>\snip{BExpnotdef}{2}{2}{%\<close>
 fun not :: "bexp \<Rightarrow> bexp" where
 "not (Bc True) = Bc False" |
 "not (Bc False) = Bc True" |
 "not b = Not b"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 lemma bval_not[simp]: "bval (not b) s = (\<not> bval b s)"
 apply(induction b rule: not.induct)
 apply simp_all
 done
 
-text{* Now the overall optimizer: *}
+text\<open>Now the overall optimizer:\<close>
 
-text_raw{*\snip{BExpbsimpdef}{0}{2}{% *}
+text_raw\<open>\snip{BExpbsimpdef}{0}{2}{%\<close>
 fun bsimp :: "bexp \<Rightarrow> bexp" where
 "bsimp (Bc v) = Bc v" |
 "bsimp (Not b) = not(bsimp b)" |
 "bsimp (And b\<^sub>1 b\<^sub>2) = and (bsimp b\<^sub>1) (bsimp b\<^sub>2)" |
 "bsimp (Less a\<^sub>1 a\<^sub>2) = less (asimp a\<^sub>1) (asimp a\<^sub>2)"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 value "bsimp (And (Less (N 0) (N 1)) b)"
 
--- a/src/HOL/IMP/Big_Step.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Big_Step.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,13 +4,13 @@
 
 subsection "Big-Step Semantics of Commands"
 
-text {*
+text \<open>
 The big-step semantics is a straight-forward inductive definition
 with concrete syntax. Note that the first parameter is a tuple,
 so the syntax becomes @{text "(c,s) \<Rightarrow> s'"}.
-*}
+\<close>
 
-text_raw{*\snip{BigStepdef}{0}{1}{% *}
+text_raw\<open>\snip{BigStepdef}{0}{1}{%\<close>
 inductive
   big_step :: "com \<times> state \<Rightarrow> state \<Rightarrow> bool" (infix "\<Rightarrow>" 55)
 where
@@ -23,30 +23,30 @@
 WhileTrue:
 "\<lbrakk> bval b s\<^sub>1;  (c,s\<^sub>1) \<Rightarrow> s\<^sub>2;  (WHILE b DO c, s\<^sub>2) \<Rightarrow> s\<^sub>3 \<rbrakk> 
 \<Longrightarrow> (WHILE b DO c, s\<^sub>1) \<Rightarrow> s\<^sub>3"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
-text_raw{*\snip{BigStepEx}{1}{2}{% *}
+text_raw\<open>\snip{BigStepEx}{1}{2}{%\<close>
 schematic_goal ex: "(''x'' ::= N 5;; ''y'' ::= V ''x'', s) \<Rightarrow> ?t"
 apply(rule Seq)
 apply(rule Assign)
 apply simp
 apply(rule Assign)
 done
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 thm ex[simplified]
 
-text{* We want to execute the big-step rules: *}
+text\<open>We want to execute the big-step rules:\<close>
 
 code_pred big_step .
 
-text{* For inductive definitions we need command
-       \texttt{values} instead of \texttt{value}. *}
+text\<open>For inductive definitions we need command
+       \texttt{values} instead of \texttt{value}.\<close>
 
 values "{t. (SKIP, \<lambda>_. 0) \<Rightarrow> t}"
 
-text{* We need to translate the result state into a list
-to display it. *}
+text\<open>We need to translate the result state into a list
+to display it.\<close>
 
 values "{map t [''x''] |t. (SKIP, <''x'' := 42>) \<Rightarrow> t}"
 
@@ -57,46 +57,46 @@
    <''x'' := 0, ''y'' := 13>) \<Rightarrow> t}"
 
 
-text{* Proof automation: *}
+text\<open>Proof automation:\<close>
 
-text {* The introduction rules are good for automatically
+text \<open>The introduction rules are good for automatically
 construction small program executions. The recursive cases
 may require backtracking, so we declare the set as unsafe
-intro rules. *}
+intro rules.\<close>
 declare big_step.intros [intro]
 
-text{* The standard induction rule 
-@{thm [display] big_step.induct [no_vars]} *}
+text\<open>The standard induction rule 
+@{thm [display] big_step.induct [no_vars]}\<close>
 
 thm big_step.induct
 
-text{*
+text\<open>
 This induction schema is almost perfect for our purposes, but
 our trick for reusing the tuple syntax means that the induction
 schema has two parameters instead of the @{text c}, @{text s},
 and @{text s'} that we are likely to encounter. Splitting
 the tuple parameter fixes this:
-*}
+\<close>
 lemmas big_step_induct = big_step.induct[split_format(complete)]
 thm big_step_induct
-text {*
+text \<open>
 @{thm [display] big_step_induct [no_vars]}
-*}
+\<close>
 
 
 subsection "Rule inversion"
 
-text{* What can we deduce from @{prop "(SKIP,s) \<Rightarrow> t"} ?
-That @{prop "s = t"}. This is how we can automatically prove it: *}
+text\<open>What can we deduce from @{prop "(SKIP,s) \<Rightarrow> t"} ?
+That @{prop "s = t"}. This is how we can automatically prove it:\<close>
 
 inductive_cases SkipE[elim!]: "(SKIP,s) \<Rightarrow> t"
 thm SkipE
 
-text{* This is an \emph{elimination rule}. The [elim] attribute tells auto,
+text\<open>This is an \emph{elimination rule}. The [elim] attribute tells auto,
 blast and friends (but not simp!) to use it automatically; [elim!] means that
 it is applied eagerly.
 
-Similarly for the other commands: *}
+Similarly for the other commands:\<close>
 
 inductive_cases AssignE[elim!]: "(x ::= a,s) \<Rightarrow> t"
 thm AssignE
@@ -107,20 +107,20 @@
 
 inductive_cases WhileE[elim]: "(WHILE b DO c,s) \<Rightarrow> t"
 thm WhileE
-text{* Only [elim]: [elim!] would not terminate. *}
+text\<open>Only [elim]: [elim!] would not terminate.\<close>
 
-text{* An automatic example: *}
+text\<open>An automatic example:\<close>
 
 lemma "(IF b THEN SKIP ELSE SKIP, s) \<Rightarrow> t \<Longrightarrow> t = s"
 by blast
 
-text{* Rule inversion by hand via the ``cases'' method: *}
+text\<open>Rule inversion by hand via the ``cases'' method:\<close>
 
 lemma assumes "(IF b THEN SKIP ELSE SKIP, s) \<Rightarrow> t"
 shows "t = s"
 proof-
   from assms show ?thesis
-  proof cases  --"inverting assms"
+  proof cases  \<comment>"inverting assms"
     case IfTrue thm IfTrue
     thus ?thesis by blast
   next
@@ -133,7 +133,7 @@
   "(x ::= a,s) \<Rightarrow> s' \<longleftrightarrow> (s' = s(x := aval a s))"
   by auto
 
-text {* An example combining rule inversion and derivations *}
+text \<open>An example combining rule inversion and derivations\<close>
 lemma Seq_assoc:
   "(c1;; c2;; c3, s) \<Rightarrow> s' \<longleftrightarrow> (c1;; (c2;; c3), s) \<Rightarrow> s'"
 proof
@@ -147,7 +147,7 @@
   with c1
   show "(c1;; (c2;; c3), s) \<Rightarrow> s'" by (rule Seq)
 next
-  -- "The other direction is analogous"
+  \<comment> "The other direction is analogous"
   assume "(c1;; (c2;; c3), s) \<Rightarrow> s'"
   thus "(c1;; c2;; c3, s) \<Rightarrow> s'" by auto
 qed
@@ -155,70 +155,70 @@
 
 subsection "Command Equivalence"
 
-text {*
+text \<open>
   We call two statements @{text c} and @{text c'} equivalent wrt.\ the
   big-step semantics when \emph{@{text c} started in @{text s} terminates
   in @{text s'} iff @{text c'} started in the same @{text s} also terminates
   in the same @{text s'}}. Formally:
-*}
-text_raw{*\snip{BigStepEquiv}{0}{1}{% *}
+\<close>
+text_raw\<open>\snip{BigStepEquiv}{0}{1}{%\<close>
 abbreviation
   equiv_c :: "com \<Rightarrow> com \<Rightarrow> bool" (infix "\<sim>" 50) where
   "c \<sim> c' \<equiv> (\<forall>s t. (c,s) \<Rightarrow> t  =  (c',s) \<Rightarrow> t)"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
-text {*
+text \<open>
 Warning: @{text"\<sim>"} is the symbol written \verb!\ < s i m >! (without spaces).
 
   As an example, we show that loop unfolding is an equivalence
   transformation on programs:
-*}
+\<close>
 lemma unfold_while:
   "(WHILE b DO c) \<sim> (IF b THEN c;; WHILE b DO c ELSE SKIP)" (is "?w \<sim> ?iw")
 proof -
-  -- "to show the equivalence, we look at the derivation tree for"
-  -- "each side and from that construct a derivation tree for the other side"
+  \<comment> "to show the equivalence, we look at the derivation tree for"
+  \<comment> "each side and from that construct a derivation tree for the other side"
   have "(?iw, s) \<Rightarrow> t" if assm: "(?w, s) \<Rightarrow> t" for s t
   proof -
     from assm show ?thesis
-    proof cases --"rule inversion on \<open>(?w, s) \<Rightarrow> t\<close>"
+    proof cases \<comment>"rule inversion on \<open>(?w, s) \<Rightarrow> t\<close>"
       case WhileFalse
       thus ?thesis by blast
     next
       case WhileTrue
-      from `bval b s` `(?w, s) \<Rightarrow> t` obtain s' where
+      from \<open>bval b s\<close> \<open>(?w, s) \<Rightarrow> t\<close> obtain s' where
         "(c, s) \<Rightarrow> s'" and "(?w, s') \<Rightarrow> t" by auto
-      -- "now we can build a derivation tree for the @{text IF}"
-      -- "first, the body of the True-branch:"
+      \<comment> "now we can build a derivation tree for the @{text IF}"
+      \<comment> "first, the body of the True-branch:"
       hence "(c;; ?w, s) \<Rightarrow> t" by (rule Seq)
-      -- "then the whole @{text IF}"
-      with `bval b s` show ?thesis by (rule IfTrue)
+      \<comment> "then the whole @{text IF}"
+      with \<open>bval b s\<close> show ?thesis by (rule IfTrue)
     qed
   qed
   moreover
-  -- "now the other direction:"
+  \<comment> "now the other direction:"
   have "(?w, s) \<Rightarrow> t" if assm: "(?iw, s) \<Rightarrow> t" for s t
   proof -
     from assm show ?thesis
-    proof cases --"rule inversion on \<open>(?iw, s) \<Rightarrow> t\<close>"
+    proof cases \<comment>"rule inversion on \<open>(?iw, s) \<Rightarrow> t\<close>"
       case IfFalse
-      hence "s = t" using `(?iw, s) \<Rightarrow> t` by blast
-      thus ?thesis using `\<not>bval b s` by blast
+      hence "s = t" using \<open>(?iw, s) \<Rightarrow> t\<close> by blast
+      thus ?thesis using \<open>\<not>bval b s\<close> by blast
     next
       case IfTrue
-      -- "and for this, only the Seq-rule is applicable:"
-      from `(c;; ?w, s) \<Rightarrow> t` obtain s' where
+      \<comment> "and for this, only the Seq-rule is applicable:"
+      from \<open>(c;; ?w, s) \<Rightarrow> t\<close> obtain s' where
         "(c, s) \<Rightarrow> s'" and "(?w, s') \<Rightarrow> t" by auto
-      -- "with this information, we can build a derivation tree for @{text WHILE}"
-      with `bval b s` show ?thesis by (rule WhileTrue)
+      \<comment> "with this information, we can build a derivation tree for @{text WHILE}"
+      with \<open>bval b s\<close> show ?thesis by (rule WhileTrue)
     qed
   qed
   ultimately
   show ?thesis by blast
 qed
 
-text {* Luckily, such lengthy proofs are seldom necessary.  Isabelle can
-prove many such facts automatically.  *}
+text \<open>Luckily, such lengthy proofs are seldom necessary.  Isabelle can
+prove many such facts automatically.\<close>
 
 lemma while_unfold:
   "(WHILE b DO c) \<sim> (IF b THEN c;; WHILE b DO c ELSE SKIP)"
@@ -244,9 +244,9 @@
 lemma sim_while_cong: "c \<sim> c' \<Longrightarrow> WHILE b DO c \<sim> WHILE b DO c'"
 by (metis sim_while_cong_aux)
 
-text {* Command equivalence is an equivalence relation, i.e.\ it is
+text \<open>Command equivalence is an equivalence relation, i.e.\ it is
 reflexive, symmetric, and transitive. Because we used an abbreviation
-above, Isabelle derives this automatically. *}
+above, Isabelle derives this automatically.\<close>
 
 lemma sim_refl:  "c \<sim> c" by simp
 lemma sim_sym:   "(c \<sim> c') = (c' \<sim> c)" by auto
@@ -254,35 +254,35 @@
 
 subsection "Execution is deterministic"
 
-text {* This proof is automatic. *}
+text \<open>This proof is automatic.\<close>
 
 theorem big_step_determ: "\<lbrakk> (c,s) \<Rightarrow> t; (c,s) \<Rightarrow> u \<rbrakk> \<Longrightarrow> u = t"
   by (induction arbitrary: u rule: big_step.induct) blast+
 
-text {*
+text \<open>
   This is the proof as you might present it in a lecture. The remaining
   cases are simple enough to be proved automatically:
-*}
-text_raw{*\snip{BigStepDetLong}{0}{2}{% *}
+\<close>
+text_raw\<open>\snip{BigStepDetLong}{0}{2}{%\<close>
 theorem
   "(c,s) \<Rightarrow> t  \<Longrightarrow>  (c,s) \<Rightarrow> t'  \<Longrightarrow>  t' = t"
 proof (induction arbitrary: t' rule: big_step.induct)
-  -- "the only interesting case, @{text WhileTrue}:"
+  \<comment> "the only interesting case, @{text WhileTrue}:"
   fix b c s s\<^sub>1 t t'
-  -- "The assumptions of the rule:"
+  \<comment> "The assumptions of the rule:"
   assume "bval b s" and "(c,s) \<Rightarrow> s\<^sub>1" and "(WHILE b DO c,s\<^sub>1) \<Rightarrow> t"
-  -- {* Ind.Hyp; note the @{text"\<And>"} because of arbitrary: *}
+  \<comment> \<open>Ind.Hyp; note the @{text"\<And>"} because of arbitrary:\<close>
   assume IHc: "\<And>t'. (c,s) \<Rightarrow> t' \<Longrightarrow> t' = s\<^sub>1"
   assume IHw: "\<And>t'. (WHILE b DO c,s\<^sub>1) \<Rightarrow> t' \<Longrightarrow> t' = t"
-  -- "Premise of implication:"
+  \<comment> "Premise of implication:"
   assume "(WHILE b DO c,s) \<Rightarrow> t'"
-  with `bval b s` obtain s\<^sub>1' where
+  with \<open>bval b s\<close> obtain s\<^sub>1' where
       c: "(c,s) \<Rightarrow> s\<^sub>1'" and
       w: "(WHILE b DO c,s\<^sub>1') \<Rightarrow> t'"
     by auto
   from c IHc have "s\<^sub>1' = s\<^sub>1" by blast
   with w IHw show "t' = t" by blast
-qed blast+ -- "prove the rest automatically"
-text_raw{*}%endsnip*}
+qed blast+ \<comment> "prove the rest automatically"
+text_raw\<open>}%endsnip\<close>
 
 end
--- a/src/HOL/IMP/C_like.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/C_like.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -51,7 +51,7 @@
 
 declare [[values_timeout = 3600]]
 
-text{* Examples: *}
+text\<open>Examples:\<close>
 
 definition
 "array_sum =
@@ -59,7 +59,7 @@
  DO ( N 2 ::= Plus (!(N 2)) (!(!(N 0)));
       N 0 ::= Plus (!(N 0)) (N 1) )"
 
-text {* To show the first n variables in a @{typ "nat \<Rightarrow> nat"} state: *}
+text \<open>To show the first n variables in a @{typ "nat \<Rightarrow> nat"} state:\<close>
 definition 
   "list t n = map t [0 ..< n]"
 
--- a/src/HOL/IMP/Collecting.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Collecting.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -190,14 +190,14 @@
   case (WhileTrue b s1 c' s2 s3)
   from WhileTrue.prems(1) obtain I P C' Q where "C = {I} WHILE b DO {P} C' {Q}" "strip C' = c'"
     by(auto simp: strip_eq_While)
-  from WhileTrue.prems(3) `C = _`
+  from WhileTrue.prems(3) \<open>C = _\<close>
   have "step P C' \<le> C'"  "{s \<in> I. bval b s} \<le> P"  "S \<le> I"  "step (post C') C \<le> C"
     by (auto simp: step_def post_def)
   have "step {s \<in> I. bval b s} C' \<le> C'"
-    by (rule order_trans[OF mono2_step[OF order_refl `{s \<in> I. bval b s} \<le> P`] `step P C' \<le> C'`])
-  have "s1: {s:I. bval b s}" using `s1 \<in> S` `S \<subseteq> I` `bval b s1` by auto
-  note s2_in_post_C' = WhileTrue.IH(1)[OF `strip C' = c'` this `step {s \<in> I. bval b s} C' \<le> C'`]
-  from WhileTrue.IH(2)[OF WhileTrue.prems(1) s2_in_post_C' `step (post C') C \<le> C`]
+    by (rule order_trans[OF mono2_step[OF order_refl \<open>{s \<in> I. bval b s} \<le> P\<close>] \<open>step P C' \<le> C'\<close>])
+  have "s1: {s:I. bval b s}" using \<open>s1 \<in> S\<close> \<open>S \<subseteq> I\<close> \<open>bval b s1\<close> by auto
+  note s2_in_post_C' = WhileTrue.IH(1)[OF \<open>strip C' = c'\<close> this \<open>step {s \<in> I. bval b s} C' \<le> C'\<close>]
+  from WhileTrue.IH(2)[OF WhileTrue.prems(1) s2_in_post_C' \<open>step (post C') C \<le> C\<close>]
   show ?case .
 next
   case (WhileFalse b s1 c') thus ?case
--- a/src/HOL/IMP/Collecting1.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Collecting1.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,9 +4,9 @@
 
 subsection "A small step semantics on annotated commands"
 
-text{* The idea: the state is propagated through the annotated command as an
+text\<open>The idea: the state is propagated through the annotated command as an
 annotation @{term "{s}"}, all other annotations are @{term "{}"}. It is easy
-to show that this semantics approximates the collecting semantics. *}
+to show that this semantics approximates the collecting semantics.\<close>
 
 lemma step_preserves_le:
   "\<lbrakk> step S cs = cs; S' \<subseteq> S; cs' \<le> cs \<rbrakk> \<Longrightarrow>
@@ -32,7 +32,7 @@
 proof-
   let ?bot = "annotate (\<lambda>p. {}) (strip cs)"
   have "?bot \<le> cs" by(induction cs) auto
-  from step_preserves_le[OF assms(1)_ this, of "{s}"] `s:S`
+  from step_preserves_le[OF assms(1)_ this, of "{s}"] \<open>s:S\<close>
   have 1: "step {s} ?bot \<le> cs" by simp
   from steps_empty_preserves_le[OF assms(1) 1]
   show ?thesis by(simp add: steps_def)
--- a/src/HOL/IMP/Collecting_Examples.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Collecting_Examples.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,12 +4,12 @@
 
 subsection "Pretty printing state sets"
 
-text{* Tweak code generation to work with sets of non-equality types: *}
+text\<open>Tweak code generation to work with sets of non-equality types:\<close>
 declare insert_code[code del] union_coset_filter[code del]
 lemma insert_code [code]:  "insert x (set xs) = set (x#xs)"
 by simp
 
-text{* Compensate for the fact that sets may now have duplicates: *}
+text\<open>Compensate for the fact that sets may now have duplicates:\<close>
 definition compact :: "'a set \<Rightarrow> 'a set" where
 "compact X = X"
 
@@ -18,9 +18,9 @@
 
 definition "vars_acom = compact o vars o strip"
 
-text{* In order to display commands annotated with state sets, states must be
+text\<open>In order to display commands annotated with state sets, states must be
 translated into a printable format as sets of variable-state pairs, for the
-variables in the command: *}
+variables in the command:\<close>
 
 definition show_acom :: "state set acom \<Rightarrow> (vname*val)set set acom" where
 "show_acom C =
@@ -33,7 +33,7 @@
                 DO ''x'' ::= Plus (V ''x'') (N 2)"
 definition C0 :: "state set acom" where "C0 = annotate (%p. {}) c0"
 
-text{* Collecting semantics: *}
+text\<open>Collecting semantics:\<close>
 
 value "show_acom (((step {<>}) ^^ 1) C0)"
 value "show_acom (((step {<>}) ^^ 2) C0)"
@@ -44,7 +44,7 @@
 value "show_acom (((step {<>}) ^^ 7) C0)"
 value "show_acom (((step {<>}) ^^ 8) C0)"
 
-text{* Small-step semantics: *}
+text\<open>Small-step semantics:\<close>
 value "show_acom (((step {}) ^^ 0) (step {<>} C0))"
 value "show_acom (((step {}) ^^ 1) (step {<>} C0))"
 value "show_acom (((step {}) ^^ 2) (step {<>} C0))"
--- a/src/HOL/IMP/Compiler.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Compiler.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -7,32 +7,32 @@
 
 subsection "List setup"
 
-text {* 
+text \<open>
   In the following, we use the length of lists as integers 
   instead of natural numbers. Instead of converting @{typ nat}
   to @{typ int} explicitly, we tell Isabelle to coerce @{typ nat}
   automatically when necessary.
-*}
+\<close>
 declare [[coercion_enabled]] 
 declare [[coercion "int :: nat \<Rightarrow> int"]]
 
-text {* 
+text \<open>
   Similarly, we will want to access the ith element of a list, 
   where @{term i} is an @{typ int}.
-*}
+\<close>
 fun inth :: "'a list \<Rightarrow> int \<Rightarrow> 'a" (infixl "!!" 100) where
 "(x # xs) !! i = (if i = 0 then x else xs !! (i - 1))"
 
-text {*
+text \<open>
   The only additional lemma we need about this function 
   is indexing over append:
-*}
+\<close>
 lemma inth_append [simp]:
   "0 \<le> i \<Longrightarrow>
   (xs @ ys) !! i = (if i < size xs then xs !! i else ys !! (i - size xs))"
 by (induction xs arbitrary: i) (auto simp: algebra_simps)
 
-text{* We hide coercion @{const int} applied to @{const length}: *}
+text\<open>We hide coercion @{const int} applied to @{const length}:\<close>
 
 abbreviation (output)
   "isize xs == int (length xs)"
@@ -42,11 +42,11 @@
 
 subsection "Instructions and Stack Machine"
 
-text_raw{*\snip{instrdef}{0}{1}{% *}
+text_raw\<open>\snip{instrdef}{0}{1}{%\<close>
 datatype instr = 
   LOADI int | LOAD vname | ADD | STORE vname |
   JMP int | JMPLESS int | JMPGE int
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 type_synonym stack = "val list"
 type_synonym config = "int \<times> state \<times> stack"
@@ -91,11 +91,11 @@
     (0, <''x'' := 3, ''y'' := 4>, []) \<rightarrow>* (i,t,stk)}"
 
 
-subsection{* Verification infrastructure *}
+subsection\<open>Verification infrastructure\<close>
 
-text{* Below we need to argue about the execution of code that is embedded in
+text\<open>Below we need to argue about the execution of code that is embedded in
 larger programs. For this purpose we show that execution is preserved by
-appending code to the left or right of a program. *}
+appending code to the left or right of a program.\<close>
 
 lemma iexec_shift [simp]: 
   "((n+i',s',stk') = iexec x (n+i,s,stk)) = ((i',s',stk') = iexec x (i,s,stk))"
@@ -122,13 +122,13 @@
   P' @ P \<turnstile> (size(P')+i,s,stk) \<rightarrow>* (size(P')+i',s',stk')"
   by (induction rule: exec_induct) (blast intro: star.step exec1_appendL)+
 
-text{* Now we specialise the above lemmas to enable automatic proofs of
+text\<open>Now we specialise the above lemmas to enable automatic proofs of
 @{prop "P \<turnstile> c \<rightarrow>* c'"} where @{text P} is a mixture of concrete instructions and
 pieces of code that we already know how they execute (by induction), combined
 by @{text "@"} and @{text "#"}. Backward jumps are not supported.
 The details should be skipped on a first reading.
 
-If we have just executed the first instruction of the program, drop it: *}
+If we have just executed the first instruction of the program, drop it:\<close>
 
 lemma exec_Cons_1 [intro]:
   "P \<turnstile> (0,s,stk) \<rightarrow>* (j,t,stk') \<Longrightarrow>
@@ -144,8 +144,8 @@
    \<Longrightarrow> P' @ P \<turnstile> (i,s,stk) \<rightarrow>* (i',s',stk')"
 by (drule exec_appendL[where P'=P']) simp
 
-text{* Split the execution of a compound program up into the execution of its
-parts: *}
+text\<open>Split the execution of a compound program up into the execution of its
+parts:\<close>
 
 lemma exec_append_trans[intro]:
   fixes i' i'' j'' :: int
@@ -246,7 +246,7 @@
   let ?cb = "bcomp b False (size ?cc + 1)"
   let ?cw = "ccomp(WHILE b DO c)"
   have "?cw \<turnstile> (0,s1,stk) \<rightarrow>* (size ?cb,s1,stk)"
-    using `bval b s1` by fastforce
+    using \<open>bval b s1\<close> by fastforce
   moreover
   have "?cw \<turnstile> (size ?cb,s1,stk) \<rightarrow>* (size ?cb + size ?cc,s2,stk)"
     using WhileTrue.IH(1) by fastforce
--- a/src/HOL/IMP/Compiler2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Compiler2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,16 +4,16 @@
 imports Compiler
 begin
 
-text {*
+text \<open>
 The preservation of the source code semantics is already shown in the 
 parent theory @{theory Compiler}. This here shows the second direction.
-*}
+\<close>
 
-section {* Compiler Correctness, Reverse Direction *}
+section \<open>Compiler Correctness, Reverse Direction\<close>
 
-subsection {* Definitions *}
+subsection \<open>Definitions\<close>
 
-text {* Execution in @{term n} steps for simpler induction *}
+text \<open>Execution in @{term n} steps for simpler induction\<close>
 primrec 
   exec_n :: "instr list \<Rightarrow> config \<Rightarrow> nat \<Rightarrow> config \<Rightarrow> bool" 
   ("_/ \<turnstile> (_ \<rightarrow>^_/ _)" [65,0,1000,55] 55)
@@ -21,26 +21,26 @@
   "P \<turnstile> c \<rightarrow>^0 c' = (c'=c)" |
   "P \<turnstile> c \<rightarrow>^(Suc n) c'' = (\<exists>c'. (P \<turnstile> c \<rightarrow> c') \<and> P \<turnstile> c' \<rightarrow>^n c'')"
 
-text {* The possible successor PCs of an instruction at position @{term n} *}
-text_raw{*\snip{isuccsdef}{0}{1}{% *}
+text \<open>The possible successor PCs of an instruction at position @{term n}\<close>
+text_raw\<open>\snip{isuccsdef}{0}{1}{%\<close>
 definition isuccs :: "instr \<Rightarrow> int \<Rightarrow> int set" where
 "isuccs i n = (case i of
   JMP j \<Rightarrow> {n + 1 + j} |
   JMPLESS j \<Rightarrow> {n + 1 + j, n + 1} |
   JMPGE j \<Rightarrow> {n + 1 + j, n + 1} |
   _ \<Rightarrow> {n +1})"
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
-text {* The possible successors PCs of an instruction list *}
+text \<open>The possible successors PCs of an instruction list\<close>
 definition succs :: "instr list \<Rightarrow> int \<Rightarrow> int set" where
 "succs P n = {s. \<exists>i::int. 0 \<le> i \<and> i < size P \<and> s \<in> isuccs (P!!i) (n+i)}" 
 
-text {* Possible exit PCs of a program *}
+text \<open>Possible exit PCs of a program\<close>
 definition exits :: "instr list \<Rightarrow> int set" where
 "exits P = succs P 0 - {0..< size P}"
 
   
-subsection {* Basic properties of @{term exec_n} *}
+subsection \<open>Basic properties of @{term exec_n}\<close>
 
 lemma exec_n_exec:
   "P \<turnstile> c \<rightarrow>^n c' \<Longrightarrow> P \<turnstile> c \<rightarrow>* c'"
@@ -69,7 +69,7 @@
   by (cases c') simp
 
 
-subsection {* Concrete symbolic execution steps *}
+subsection \<open>Concrete symbolic execution steps\<close>
 
 lemma exec_n_step:
   "n \<noteq> n' \<Longrightarrow> 
@@ -89,7 +89,7 @@
 lemmas exec_n_simps = exec_n_step exec_n_end
 
 
-subsection {* Basic properties of @{term succs} *}
+subsection \<open>Basic properties of @{term succs}\<close>
 
 lemma succs_simps [simp]: 
   "succs [ADD] n = {n + 1}"
@@ -261,7 +261,7 @@
   using ccomp_exits by auto
 
 
-subsection {* Splitting up machine executions *}
+subsection \<open>Splitting up machine executions\<close>
 
 lemma exec1_split:
   fixes i j :: int
@@ -333,9 +333,9 @@
      (auto dest: exec_n_split [where P="[]", simplified])
   
 
-text {*
+text \<open>
   Dropping the left context of a potentially incomplete execution of @{term c}.
-*}
+\<close>
 
 lemma exec1_drop_left:
   fixes i n :: int
@@ -366,14 +366,14 @@
     step: "P @ P' \<turnstile> (i, s, stk) \<rightarrow> (i', s'', stk'')" and
     rest: "P @ P' \<turnstile> (i', s'', stk'') \<rightarrow>^k (n, s', stk')"
     by auto
-  from step `size P \<le> i`
+  from step \<open>size P \<le> i\<close>
   have *: "P' \<turnstile> (i - size P, s, stk) \<rightarrow> (i' - size P, s'', stk'')" 
     by (rule exec1_drop_left)
   then have "i' - size P \<in> succs P' 0"
     by (fastforce dest!: succs_iexec1 simp: exec1_def simp del: iexec.simps)
-  with `exits P' \<subseteq> {0..}`
+  with \<open>exits P' \<subseteq> {0..}\<close>
   have "size P \<le> i'" by (auto simp: exits_def)
-  from rest this `exits P' \<subseteq> {0..}`     
+  from rest this \<open>exits P' \<subseteq> {0..}\<close>     
   have "P' \<turnstile> (i' - size P, s'', stk'') \<rightarrow>^k (n - size P, s', stk')"
     by (rule Suc.IH)
   with * show ?case by auto
@@ -420,7 +420,7 @@
 qed
 
 
-subsection {* Correctness theorem *}
+subsection \<open>Correctness theorem\<close>
 
 lemma acomp_neq_Nil [simp]:
   "acomp a \<noteq> []"
@@ -522,7 +522,7 @@
   let ?cs = "ccomp ?if"
   let ?bcomp = "bcomp b False (size (ccomp c1) + 1)"
   
-  from `?cs \<turnstile> (0,s,stk) \<rightarrow>^n (size ?cs,t,stk')`
+  from \<open>?cs \<turnstile> (0,s,stk) \<rightarrow>^n (size ?cs,t,stk')\<close>
   obtain i' :: int and k m s'' stk'' where
     cs: "?cs \<turnstile> (i',s'',stk'') \<rightarrow>^m (size ?cs,t,stk')" and
         "?bcomp \<turnstile> (0,s,stk) \<rightarrow>^k (i', s'', stk'')" 
--- a/src/HOL/IMP/Def_Init_Big.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Def_Init_Big.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -32,8 +32,8 @@
 
 subsection "Soundness wrt Big Steps"
 
-text{* Note the special form of the induction because one of the arguments
-of the inductive predicate is not a variable but the term @{term"Some s"}: *}
+text\<open>Note the special form of the induction because one of the arguments
+of the inductive predicate is not a variable but the term @{term"Some s"}:\<close>
 
 theorem Sound:
   "\<lbrakk> (c,Some s) \<Rightarrow> s';  D A c A';  A \<subseteq> dom s \<rbrakk>
@@ -55,7 +55,7 @@
     by auto (metis bval_Some option.simps(3) order_trans)
 next
   case (WhileTrue b s c s' s'')
-  from `D A (WHILE b DO c) A'` obtain A' where "D A c A'" by blast
+  from \<open>D A (WHILE b DO c) A'\<close> obtain A' where "D A c A'" by blast
   then obtain t' where "s' = Some t'" "A \<subseteq> dom t'"
     by (metis D_incr WhileTrue(3,7) subset_trans)
   from WhileTrue(5)[OF this(1) WhileTrue(6) this(2)] show ?case .
--- a/src/HOL/IMP/Def_Init_Small.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Def_Init_Small.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -45,10 +45,10 @@
   case (If b c1 c2)
   then obtain M1 M2 where "vars b \<subseteq> A" "D A c1 M1" "D A c2 M2" "M = M1 \<inter> M2"
     by auto
-  with If.IH `A \<subseteq> A'` obtain M1' M2'
+  with If.IH \<open>A \<subseteq> A'\<close> obtain M1' M2'
     where "D A' c1 M1'" "D A' c2 M2'" and "M1 \<subseteq> M1'" "M2 \<subseteq> M2'" by metis
   hence "D A' (IF b THEN c1 ELSE c2) (M1' \<inter> M2')" and "M \<subseteq> M1' \<inter> M2'"
-    using `vars b \<subseteq> A` `A \<subseteq> A'` `M = M1 \<inter> M2` by(fastforce intro: D.intros)+
+    using \<open>vars b \<subseteq> A\<close> \<open>A \<subseteq> A'\<close> \<open>M = M1 \<inter> M2\<close> by(fastforce intro: D.intros)+
   thus ?case by metis
 next
   case While thus ?case by auto (metis D.intros(5) subset_trans)
@@ -61,8 +61,8 @@
   then obtain A' where A': "vars b \<subseteq> dom s" "A = dom s" "D (dom s) c A'" by blast
   then obtain A'' where "D A' c A''" by (metis D_incr D_mono)
   with A' have "D (dom s) (IF b THEN c;; WHILE b DO c ELSE SKIP) (dom s)"
-    by (metis D.If[OF `vars b \<subseteq> dom s` D.Seq[OF `D (dom s) c A'` D.While[OF _ `D A' c A''`]] D.Skip] D_incr Int_absorb1 subset_trans)
-  thus ?case by (metis D_incr `A = dom s`)
+    by (metis D.If[OF \<open>vars b \<subseteq> dom s\<close> D.Seq[OF \<open>D (dom s) c A'\<close> D.While[OF _ \<open>D A' c A''\<close>]] D.Skip] D_incr Int_absorb1 subset_trans)
+  thus ?case by (metis D_incr \<open>A = dom s\<close>)
 next
   case Seq2 thus ?case by auto (metis D_mono D.intros(3))
 qed (auto intro: D.intros)
--- a/src/HOL/IMP/Denotational.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Denotational.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -30,7 +30,7 @@
   finally show ?thesis .
 qed
 
-text{* Equivalence of denotational and big-step semantics: *}
+text\<open>Equivalence of denotational and big-step semantics:\<close>
 
 lemma D_if_big_step:  "(c,s) \<Rightarrow> t \<Longrightarrow> (s,t) \<in> D(c)"
 proof (induction rule: big_step_induct)
@@ -79,10 +79,10 @@
 proof
   fix a b :: "'a set" assume "a \<subseteq> b"
   let ?S = "\<lambda>n::nat. if n=0 then a else b"
-  have "chain ?S" using `a \<subseteq> b` by(auto simp: chain_def)
+  have "chain ?S" using \<open>a \<subseteq> b\<close> by(auto simp: chain_def)
   hence "f(UN n. ?S n) = (UN n. f(?S n))"
     using assms by(simp add: cont_def)
-  moreover have "(UN n. ?S n) = b" using `a \<subseteq> b` by (auto split: if_splits)
+  moreover have "(UN n. ?S n) = b" using \<open>a \<subseteq> b\<close> by (auto split: if_splits)
   moreover have "(UN n. f(?S n)) = f a \<union> f b" by (auto split: if_splits)
   ultimately show "f a \<subseteq> f b" by (metis Un_upper1)
 qed
@@ -123,7 +123,7 @@
       case 0 show ?case by simp
     next
       case Suc
-      from monoD[OF mono_if_cont[OF assms] Suc] `f p \<subseteq> p`
+      from monoD[OF mono_if_cont[OF assms] Suc] \<open>f p \<subseteq> p\<close>
       show ?case by simp
     qed
   qed
@@ -134,7 +134,7 @@
 by(auto simp: cont_def W_def)
 
 
-subsection{*The denotational semantics is deterministic*}
+subsection\<open>The denotational semantics is deterministic\<close>
 
 lemma single_valued_UN_chain:
   assumes "chain S" "(\<And>n. single_valued (S n))"
--- a/src/HOL/IMP/Finite_Reachable.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Finite_Reachable.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,15 +4,15 @@
 
 subsection "Finite number of reachable commands"
 
-text{* This theory shows that in the small-step semantics one can only reach
+text\<open>This theory shows that in the small-step semantics one can only reach
 a finite number of commands from any given command. Hence one can see the
 command component of a small-step configuration as a combination of the
-program to be executed and a pc. *}
+program to be executed and a pc.\<close>
 
 definition reachable :: "com \<Rightarrow> com set" where
 "reachable c = {c'. \<exists>s t. (c,s) \<rightarrow>* (c',t)}"
 
-text{* Proofs need induction on the length of a small-step reduction sequence. *}
+text\<open>Proofs need induction on the length of a small-step reduction sequence.\<close>
 
 fun small_stepsn :: "com * state \<Rightarrow> nat \<Rightarrow> com * state \<Rightarrow> bool"
     ("_ \<rightarrow>'(_') _" [55,0,55] 55) where
@@ -132,8 +132,8 @@
           assume "\<exists>s2 m1 m2. (c, s) \<rightarrow>(m1) (SKIP, s2) \<and>
             (WHILE b DO c, s2) \<rightarrow>(m2) (c2, t) \<and> m1 + m2 < n3" (is "\<exists>x y z. ?P x y z")
           then obtain s2 m1 m2 where "?P s2 m1 m2" by blast
-          with `n2 = Suc n3` `n1 = Suc n2`have "m2 < n1" by arith
-          from less.IH[OF this] `?P s2 m1 m2` show ?thesis by blast
+          with \<open>n2 = Suc n3\<close> \<open>n1 = Suc n2\<close>have "m2 < n1" by arith
+          from less.IH[OF this] \<open>?P s2 m1 m2\<close> show ?thesis by blast
         qed
       next
         assume "(iw', s') = (SKIP, s)"
--- a/src/HOL/IMP/Hoare.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Hoare.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -47,10 +47,10 @@
   "\<lbrakk> \<turnstile> {P} c {Q};  \<forall>s. Q s \<longrightarrow> Q' s \<rbrakk> \<Longrightarrow>  \<turnstile> {P} c {Q'}"
 by (blast intro: conseq)
 
-text{* The assignment and While rule are awkward to use in actual proofs
+text\<open>The assignment and While rule are awkward to use in actual proofs
 because their pre and postcondition are of a very special form and the actual
 goal would have to match this form exactly. Therefore we derive two variants
-with arbitrary pre and postconditions. *}
+with arbitrary pre and postconditions.\<close>
 
 lemma Assign': "\<forall>s. P s \<longrightarrow> Q(s[a/x]) \<Longrightarrow> \<turnstile> {P} x ::= a {Q}"
 by (simp add: strengthen_pre[OF _ Assign])
--- a/src/HOL/IMP/Hoare_Examples.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Hoare_Examples.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,7 +4,7 @@
 
 hide_const (open) sum
 
-text{* Summing up the first @{text x} natural numbers in variable @{text y}. *}
+text\<open>Summing up the first @{text x} natural numbers in variable @{text y}.\<close>
 
 fun sum :: "int \<Rightarrow> int" where
 "sum i = (if i \<le> 0 then 0 else sum (i - 1) + i)"
@@ -22,9 +22,9 @@
       ''x'' ::= Plus (V ''x'') (N (- 1)))"
 
 
-subsubsection{* Proof by Operational Semantics *}
+subsubsection\<open>Proof by Operational Semantics\<close>
 
-text{* The behaviour of the loop is proved by induction: *}
+text\<open>The behaviour of the loop is proved by induction:\<close>
 
 lemma while_sum:
   "(wsum, s) \<Rightarrow> t \<Longrightarrow> t ''y'' = s ''y'' + sum(s ''x'')"
@@ -32,12 +32,12 @@
 apply(auto)
 done
 
-text{* We were lucky that the proof was automatic, except for the
+text\<open>We were lucky that the proof was automatic, except for the
 induction. In general, such proofs will not be so easy. The automation is
 partly due to the right inversion rules that we set up as automatic
 elimination rules that decompose big-step premises.
 
-Now we prefix the loop with the necessary initialization: *}
+Now we prefix the loop with the necessary initialization:\<close>
 
 lemma sum_via_bigstep:
   assumes "(''y'' ::= N 0;; wsum, s) \<Rightarrow> t"
@@ -48,10 +48,10 @@
 qed
 
 
-subsubsection{* Proof by Hoare Logic *}
+subsubsection\<open>Proof by Hoare Logic\<close>
 
-text{* Note that we deal with sequences of commands from right to left,
-pulling back the postcondition towards the precondition. *}
+text\<open>Note that we deal with sequences of commands from right to left,
+pulling back the postcondition towards the precondition.\<close>
 
 lemma "\<turnstile> {\<lambda>s. s ''x'' = n} ''y'' ::= N 0;; wsum {\<lambda>s. s ''y'' = sum n}"
 apply(rule Seq)
@@ -67,11 +67,11 @@
 apply simp
 done
 
-text{* The proof is intentionally an apply script because it merely composes
+text\<open>The proof is intentionally an apply script because it merely composes
 the rules of Hoare logic. Of course, in a few places side conditions have to
 be proved. But since those proofs are 1-liners, a structured proof is
 overkill. In fact, we shall learn later that the application of the Hoare
 rules can be automated completely and all that is left for the user is to
-provide the loop invariants and prove the side-conditions. *}
+provide the loop invariants and prove the side-conditions.\<close>
 
 end
--- a/src/HOL/IMP/Hoare_Total.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Hoare_Total.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -8,18 +8,18 @@
 
 subsubsection "Hoare Logic for Total Correctness --- Separate Termination Relation"
 
-text{* Note that this definition of total validity @{text"\<Turnstile>\<^sub>t"} only
-works if execution is deterministic (which it is in our case). *}
+text\<open>Note that this definition of total validity @{text"\<Turnstile>\<^sub>t"} only
+works if execution is deterministic (which it is in our case).\<close>
 
 definition hoare_tvalid :: "assn \<Rightarrow> com \<Rightarrow> assn \<Rightarrow> bool"
   ("\<Turnstile>\<^sub>t {(1_)}/ (_)/ {(1_)}" 50) where
 "\<Turnstile>\<^sub>t {P}c{Q}  \<longleftrightarrow>  (\<forall>s. P s \<longrightarrow> (\<exists>t. (c,s) \<Rightarrow> t \<and> Q t))"
 
-text{* Provability of Hoare triples in the proof system for total
+text\<open>Provability of Hoare triples in the proof system for total
 correctness is written @{text"\<turnstile>\<^sub>t {P}c{Q}"} and defined
 inductively. The rules for @{text"\<turnstile>\<^sub>t"} differ from those for
 @{text"\<turnstile>"} only in the one place where nontermination can arise: the
-@{term While}-rule. *}
+@{term While}-rule.\<close>
 
 inductive
   hoaret :: "assn \<Rightarrow> com \<Rightarrow> assn \<Rightarrow> bool" ("\<turnstile>\<^sub>t ({(1_)}/ (_)/ {(1_)})" 50)
@@ -42,17 +42,17 @@
 conseq: "\<lbrakk> \<forall>s. P' s \<longrightarrow> P s; \<turnstile>\<^sub>t {P}c{Q}; \<forall>s. Q s \<longrightarrow> Q' s  \<rbrakk> \<Longrightarrow>
            \<turnstile>\<^sub>t {P'}c{Q'}"
 
-text{* The @{term While}-rule is like the one for partial correctness but it
+text\<open>The @{term While}-rule is like the one for partial correctness but it
 requires additionally that with every execution of the loop body some measure
 relation @{term[source]"T :: state \<Rightarrow> nat \<Rightarrow> bool"} decreases.
-The following functional version is more intuitive: *}
+The following functional version is more intuitive:\<close>
 
 lemma While_fun:
   "\<lbrakk> \<And>n::nat. \<turnstile>\<^sub>t {\<lambda>s. P s \<and> bval b s \<and> n = f s} c {\<lambda>s. P s \<and> f s < n}\<rbrakk>
    \<Longrightarrow> \<turnstile>\<^sub>t {P} WHILE b DO c {\<lambda>s. P s \<and> \<not>bval b s}"
   by (rule While [where T="\<lambda>s n. n = f s", simplified])
 
-text{* Building in the consequence rule: *}
+text\<open>Building in the consequence rule:\<close>
 
 lemma strengthen_pre:
   "\<lbrakk> \<forall>s. P' s \<longrightarrow> P s;  \<turnstile>\<^sub>t {P} c {Q} \<rbrakk> \<Longrightarrow> \<turnstile>\<^sub>t {P'} c {Q}"
@@ -72,7 +72,7 @@
 by(blast intro: assms(1) weaken_post[OF While_fun assms(2)])
 
 
-text{* Our standard example: *}
+text\<open>Our standard example:\<close>
 
 lemma "\<turnstile>\<^sub>t {\<lambda>s. s ''x'' = i} ''y'' ::= N 0;; wsum {\<lambda>s. s ''y'' = sum i}"
 apply(rule Seq)
@@ -90,7 +90,7 @@
 done
 
 
-text{* The soundness theorem: *}
+text\<open>The soundness theorem:\<close>
 
 theorem hoaret_sound: "\<turnstile>\<^sub>t {P}c{Q}  \<Longrightarrow>  \<Turnstile>\<^sub>t {P}c{Q}"
 proof(unfold hoare_tvalid_def, induction rule: hoaret.induct)
@@ -105,10 +105,10 @@
 qed fastforce+
 
 
-text{*
+text\<open>
 The completeness proof proceeds along the same lines as the one for partial
 correctness. First we have to strengthen our notion of weakest precondition
-to take termination into account: *}
+to take termination into account:\<close>
 
 definition wpt :: "com \<Rightarrow> assn \<Rightarrow> assn" ("wp\<^sub>t") where
 "wp\<^sub>t c Q  =  (\<lambda>s. \<exists>t. (c,s) \<Rightarrow> t \<and> Q t)"
@@ -133,15 +133,15 @@
 done
 
 
-text{* Now we define the number of iterations @{term "WHILE b DO c"} needs to
+text\<open>Now we define the number of iterations @{term "WHILE b DO c"} needs to
 terminate when started in state @{text s}. Because this is a truly partial
-function, we define it as an (inductive) relation first: *}
+function, we define it as an (inductive) relation first:\<close>
 
 inductive Its :: "bexp \<Rightarrow> com \<Rightarrow> state \<Rightarrow> nat \<Rightarrow> bool" where
 Its_0: "\<not> bval b s \<Longrightarrow> Its b c s 0" |
 Its_Suc: "\<lbrakk> bval b s;  (c,s) \<Rightarrow> s';  Its b c s' n \<rbrakk> \<Longrightarrow> Its b c s (Suc n)"
 
-text{* The relation is in fact a function: *}
+text\<open>The relation is in fact a function:\<close>
 
 lemma Its_fun: "Its b c s n \<Longrightarrow> Its b c s n' \<Longrightarrow> n=n'"
 proof(induction arbitrary: n' rule:Its.induct)
@@ -150,7 +150,7 @@
   case Its_Suc thus ?case by(metis Its.cases big_step_determ)
 qed
 
-text{* For all terminating loops, @{const Its} yields a result: *}
+text\<open>For all terminating loops, @{const Its} yields a result:\<close>
 
 lemma WHILE_Its: "(WHILE b DO c,s) \<Rightarrow> t \<Longrightarrow> \<exists>n. Its b c s n"
 proof(induction "WHILE b DO c" s t rule: big_step_induct)
@@ -179,13 +179,13 @@
   proof -
     have "wp\<^sub>t c (?R n) s" if "bval b s" and "?T s n" and "(?w, s) \<Rightarrow> t" and "Q t" for s t
     proof -
-      from `bval b s` and `(?w, s) \<Rightarrow> t` obtain s' where
+      from \<open>bval b s\<close> and \<open>(?w, s) \<Rightarrow> t\<close> obtain s' where
         "(c,s) \<Rightarrow> s'" "(?w,s') \<Rightarrow> t" by auto
-      from `(?w, s') \<Rightarrow> t` obtain n' where "?T s' n'"
+      from \<open>(?w, s') \<Rightarrow> t\<close> obtain n' where "?T s' n'"
         by (blast dest: WHILE_Its)
-      with `bval b s` and `(c, s) \<Rightarrow> s'` have "?T s (Suc n')" by (rule Its_Suc)
-      with `?T s n` have "n = Suc n'" by (rule Its_fun)
-      with `(c,s) \<Rightarrow> s'` and `(?w,s') \<Rightarrow> t` and `Q t` and `?T s' n'`
+      with \<open>bval b s\<close> and \<open>(c, s) \<Rightarrow> s'\<close> have "?T s (Suc n')" by (rule Its_Suc)
+      with \<open>?T s n\<close> have "n = Suc n'" by (rule Its_fun)
+      with \<open>(c,s) \<Rightarrow> s'\<close> and \<open>(?w,s') \<Rightarrow> t\<close> and \<open>Q t\<close> and \<open>?T s' n'\<close>
       show ?thesis by (auto simp: wpt_def)
     qed
     thus ?thesis
@@ -199,11 +199,11 @@
 qed
 
 
-text{*\noindent In the @{term While}-case, @{const Its} provides the obvious
+text\<open>\noindent In the @{term While}-case, @{const Its} provides the obvious
 termination argument.
 
 The actual completeness theorem follows directly, in the same manner
-as for partial correctness: *}
+as for partial correctness:\<close>
 
 theorem hoaret_complete: "\<Turnstile>\<^sub>t {P}c{Q} \<Longrightarrow> \<turnstile>\<^sub>t {P}c{Q}"
 apply(rule strengthen_pre[OF _ wpt_is_pre])
--- a/src/HOL/IMP/Hoare_Total_EX.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Hoare_Total_EX.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -6,11 +6,11 @@
 
 subsubsection "Hoare Logic for Total Correctness --- \<open>nat\<close>-Indexed Invariant"
 
-text{* This is the standard set of rules that you find in many publications.
+text\<open>This is the standard set of rules that you find in many publications.
 The While-rule is different from the one in Concrete Semantics in that the
 invariant is indexed by natural numbers and goes down by 1 with
 every iteration. The completeness proof is easier but the rule is harder
-to apply in program proofs. *}
+to apply in program proofs.\<close>
 
 definition hoare_tvalid :: "assn \<Rightarrow> com \<Rightarrow> assn \<Rightarrow> bool"
   ("\<Turnstile>\<^sub>t {(1_)}/ (_)/ {(1_)}" 50) where
@@ -37,7 +37,7 @@
 conseq: "\<lbrakk> \<forall>s. P' s \<longrightarrow> P s; \<turnstile>\<^sub>t {P}c{Q}; \<forall>s. Q s \<longrightarrow> Q' s  \<rbrakk> \<Longrightarrow>
            \<turnstile>\<^sub>t {P'}c{Q'}"
 
-text{* Building in the consequence rule: *}
+text\<open>Building in the consequence rule:\<close>
 
 lemma strengthen_pre:
   "\<lbrakk> \<forall>s. P' s \<longrightarrow> P s;  \<turnstile>\<^sub>t {P} c {Q} \<rbrakk> \<Longrightarrow> \<turnstile>\<^sub>t {P'} c {Q}"
@@ -50,7 +50,7 @@
 lemma Assign': "\<forall>s. P s \<longrightarrow> Q(s[a/x]) \<Longrightarrow> \<turnstile>\<^sub>t {P} x ::= a {Q}"
 by (simp add: strengthen_pre[OF _ Assign])
 
-text{* The soundness theorem: *}
+text\<open>The soundness theorem:\<close>
 
 theorem hoaret_sound: "\<turnstile>\<^sub>t {P}c{Q}  \<Longrightarrow>  \<Turnstile>\<^sub>t {P}c{Q}"
 proof(unfold hoare_tvalid_def, induction rule: hoaret.induct)
@@ -91,8 +91,8 @@
 done
 
 
-text{* Function @{text wpw} computes the weakest precondition of a While-loop
-that is unfolded a fixed number of times. *}
+text\<open>Function @{text wpw} computes the weakest precondition of a While-loop
+that is unfolded a fixed number of times.\<close>
 
 fun wpw :: "bexp \<Rightarrow> com \<Rightarrow> nat \<Rightarrow> assn \<Rightarrow> assn" where
 "wpw b c 0 Q s = (\<not> bval b s \<and> Q s)" |
--- a/src/HOL/IMP/Hoare_Total_EX2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Hoare_Total_EX2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -6,12 +6,12 @@
 
 subsubsection "Hoare Logic for Total Correctness --- With Logical Variables"
 
-text{* This is the standard set of rules that you find in many publications.
+text\<open>This is the standard set of rules that you find in many publications.
 In the while-rule, a logical variable is needed to remember the pre-value
 of the variant (an expression that decreases by one with each iteration).
 In this theory, logical variables are modeled explicitly.
 A simpler (but not quite as flexible) approach is found in theory \<open>Hoare_Total_EX\<close>:
-pre and post-condition are connected via a universally quantified HOL variable. *}
+pre and post-condition are connected via a universally quantified HOL variable.\<close>
 
 type_synonym lvname = string
 type_synonym assn2 = "(lvname \<Rightarrow> nat) \<Rightarrow> state \<Rightarrow> bool"
@@ -42,7 +42,7 @@
 conseq: "\<lbrakk> \<forall>l s. P' l s \<longrightarrow> P l s; \<turnstile>\<^sub>t {P}c{Q}; \<forall>l s. Q l s \<longrightarrow> Q' l s  \<rbrakk> \<Longrightarrow>
            \<turnstile>\<^sub>t {P'}c{Q'}"
 
-text{* Building in the consequence rule: *}
+text\<open>Building in the consequence rule:\<close>
 
 lemma strengthen_pre:
   "\<lbrakk> \<forall>l s. P' l s \<longrightarrow> P l s;  \<turnstile>\<^sub>t {P} c {Q} \<rbrakk> \<Longrightarrow> \<turnstile>\<^sub>t {P'} c {Q}"
@@ -55,7 +55,7 @@
 lemma Assign': "\<forall>l s. P l s \<longrightarrow> Q l (s[a/x]) \<Longrightarrow> \<turnstile>\<^sub>t {P} x ::= a {Q}"
 by (simp add: strengthen_pre[OF _ Assign])
 
-text{* The soundness theorem: *}
+text\<open>The soundness theorem:\<close>
 
 theorem hoaret_sound: "\<turnstile>\<^sub>t {P}c{Q}  \<Longrightarrow>  \<Turnstile>\<^sub>t {P}c{Q}"
 proof(unfold hoare_tvalid_def, induction rule: hoaret.induct)
@@ -92,8 +92,8 @@
 by (auto simp: wpt_def fun_eq_iff)
 
 
-text{* Function @{text wpw} computes the weakest precondition of a While-loop
-that is unfolded a fixed number of times. *}
+text\<open>Function @{text wpw} computes the weakest precondition of a While-loop
+that is unfolded a fixed number of times.\<close>
 
 fun wpw :: "bexp \<Rightarrow> com \<Rightarrow> nat \<Rightarrow> assn2 \<Rightarrow> assn2" where
 "wpw b c 0 Q l s = (\<not> bval b s \<and> Q l s)" |
--- a/src/HOL/IMP/Live.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Live.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -48,7 +48,7 @@
 lemma L_While_X: "X \<subseteq> L (WHILE b DO c) X"
 by auto
 
-text{* Disable L WHILE equation and reason only with L WHILE constraints *}
+text\<open>Disable L WHILE equation and reason only with L WHILE constraints\<close>
 declare L.simps(5)[simp del]
 
 subsection "Correctness"
@@ -74,16 +74,16 @@
   case (IfTrue b s c1 s' c2)
   hence "s = t on vars b" "s = t on L c1 X" by auto
   from  bval_eq_if_eq_on_vars[OF this(1)] IfTrue(1) have "bval b t" by simp
-  from IfTrue.IH[OF `s = t on L c1 X`] obtain t' where
+  from IfTrue.IH[OF \<open>s = t on L c1 X\<close>] obtain t' where
     "(c1, t) \<Rightarrow> t'" "s' = t' on X" by auto
-  thus ?case using `bval b t` by auto
+  thus ?case using \<open>bval b t\<close> by auto
 next
   case (IfFalse b s c2 s' c1)
   hence "s = t on vars b" "s = t on L c2 X" by auto
   from  bval_eq_if_eq_on_vars[OF this(1)] IfFalse(1) have "~bval b t" by simp
-  from IfFalse.IH[OF `s = t on L c2 X`] obtain t' where
+  from IfFalse.IH[OF \<open>s = t on L c2 X\<close>] obtain t' where
     "(c2, t) \<Rightarrow> t'" "s' = t' on X" by auto
-  thus ?case using `~bval b t` by auto
+  thus ?case using \<open>~bval b t\<close> by auto
 next
   case (WhileFalse b s c)
   hence "~ bval b t"
@@ -92,7 +92,7 @@
 next
   case (WhileTrue b s1 c s2 s3 X t1)
   let ?w = "WHILE b DO c"
-  from `bval b s1` WhileTrue.prems have "bval b t1"
+  from \<open>bval b s1\<close> WhileTrue.prems have "bval b t1"
     by (metis L_While_vars bval_eq_if_eq_on_vars set_mp)
   have "s1 = t1 on L c (L ?w X)" using L_While_pfp WhileTrue.prems
     by (blast)
@@ -100,13 +100,13 @@
     "(c, t1) \<Rightarrow> t2" "s2 = t2 on L ?w X" by auto
   from WhileTrue.IH(2)[OF this(2)] obtain t3 where "(?w,t2) \<Rightarrow> t3" "s3 = t3 on X"
     by auto
-  with `bval b t1` `(c, t1) \<Rightarrow> t2` show ?case by auto
+  with \<open>bval b t1\<close> \<open>(c, t1) \<Rightarrow> t2\<close> show ?case by auto
 qed
 
 
 subsection "Program Optimization"
 
-text{* Burying assignments to dead variables: *}
+text\<open>Burying assignments to dead variables:\<close>
 fun bury :: "com \<Rightarrow> vname set \<Rightarrow> com" where
 "bury SKIP X = SKIP" |
 "bury (x ::= a) X = (if x \<in> X then x ::= a else SKIP)" |
@@ -114,9 +114,9 @@
 "bury (IF b THEN c\<^sub>1 ELSE c\<^sub>2) X = IF b THEN bury c\<^sub>1 X ELSE bury c\<^sub>2 X" |
 "bury (WHILE b DO c) X = WHILE b DO bury c (L (WHILE b DO c) X)"
 
-text{* We could prove the analogous lemma to @{thm[source]L_correct}, and the
+text\<open>We could prove the analogous lemma to @{thm[source]L_correct}, and the
 proof would be very similar. However, we phrase it as a semantics
-preservation property: *}
+preservation property:\<close>
 
 theorem bury_correct:
   "(c,s) \<Rightarrow> s'  \<Longrightarrow> s = t on L c X \<Longrightarrow>
@@ -139,16 +139,16 @@
   case (IfTrue b s c1 s' c2)
   hence "s = t on vars b" "s = t on L c1 X" by auto
   from  bval_eq_if_eq_on_vars[OF this(1)] IfTrue(1) have "bval b t" by simp
-  from IfTrue.IH[OF `s = t on L c1 X`] obtain t' where
+  from IfTrue.IH[OF \<open>s = t on L c1 X\<close>] obtain t' where
     "(bury c1 X, t) \<Rightarrow> t'" "s' =t' on X" by auto
-  thus ?case using `bval b t` by auto
+  thus ?case using \<open>bval b t\<close> by auto
 next
   case (IfFalse b s c2 s' c1)
   hence "s = t on vars b" "s = t on L c2 X" by auto
   from  bval_eq_if_eq_on_vars[OF this(1)] IfFalse(1) have "~bval b t" by simp
-  from IfFalse.IH[OF `s = t on L c2 X`] obtain t' where
+  from IfFalse.IH[OF \<open>s = t on L c2 X\<close>] obtain t' where
     "(bury c2 X, t) \<Rightarrow> t'" "s' = t' on X" by auto
-  thus ?case using `~bval b t` by auto
+  thus ?case using \<open>~bval b t\<close> by auto
 next
   case (WhileFalse b s c)
   hence "~ bval b t" by (metis L_While_vars bval_eq_if_eq_on_vars set_mp)
@@ -157,7 +157,7 @@
 next
   case (WhileTrue b s1 c s2 s3 X t1)
   let ?w = "WHILE b DO c"
-  from `bval b s1` WhileTrue.prems have "bval b t1"
+  from \<open>bval b s1\<close> WhileTrue.prems have "bval b t1"
     by (metis L_While_vars bval_eq_if_eq_on_vars set_mp)
   have "s1 = t1 on L c (L ?w X)"
     using L_While_pfp WhileTrue.prems by blast
@@ -166,14 +166,14 @@
   from WhileTrue.IH(2)[OF this(2)] obtain t3
     where "(bury ?w X,t2) \<Rightarrow> t3" "s3 = t3 on X"
     by auto
-  with `bval b t1` `(bury c (L ?w X), t1) \<Rightarrow> t2` show ?case by auto
+  with \<open>bval b t1\<close> \<open>(bury c (L ?w X), t1) \<Rightarrow> t2\<close> show ?case by auto
 qed
 
 corollary final_bury_correct: "(c,s) \<Rightarrow> s' \<Longrightarrow> (bury c UNIV,s) \<Rightarrow> s'"
 using bury_correct[of c s s' UNIV]
 by (auto simp: fun_eq_iff[symmetric])
 
-text{* Now the opposite direction. *}
+text\<open>Now the opposite direction.\<close>
 
 lemma SKIP_bury[simp]:
   "SKIP = bury c X \<longleftrightarrow> c = SKIP | (EX x a. c = x::=a & x \<notin> X)"
@@ -221,9 +221,9 @@
   have "s = t on vars b" "s = t on L c1 X" using IfTrue.prems c by auto
   from bval_eq_if_eq_on_vars[OF this(1)] IfTrue(1) have "bval b t" by simp
   note IH = IfTrue.hyps(3)
-  from IH[OF bc1 `s = t on L c1 X`] obtain t' where
+  from IH[OF bc1 \<open>s = t on L c1 X\<close>] obtain t' where
     "(c1, t) \<Rightarrow> t'" "s' =t' on X" by auto
-  thus ?case using c `bval b t` by auto
+  thus ?case using c \<open>bval b t\<close> by auto
 next
   case (IfFalse b s bc2 s' bc1)
   then obtain c1 c2 where c: "c = IF b THEN c1 ELSE c2"
@@ -231,9 +231,9 @@
   have "s = t on vars b" "s = t on L c2 X" using IfFalse.prems c by auto
   from bval_eq_if_eq_on_vars[OF this(1)] IfFalse(1) have "~bval b t" by simp
   note IH = IfFalse.hyps(3)
-  from IH[OF bc2 `s = t on L c2 X`] obtain t' where
+  from IH[OF bc2 \<open>s = t on L c2 X\<close>] obtain t' where
     "(c2, t) \<Rightarrow> t'" "s' =t' on X" by auto
-  thus ?case using c `~bval b t` by auto
+  thus ?case using c \<open>~bval b t\<close> by auto
 next
   case (WhileFalse b s c)
   hence "~ bval b t"
@@ -244,7 +244,7 @@
   case (WhileTrue b s1 bc' s2 s3 w X t1)
   then obtain c' where w: "w = WHILE b DO c'"
     and bc': "bc' = bury c' (L (WHILE b DO c') X)" by auto
-  from `bval b s1` WhileTrue.prems w have "bval b t1"
+  from \<open>bval b s1\<close> WhileTrue.prems w have "bval b t1"
     by auto (metis L_While_vars bval_eq_if_eq_on_vars set_mp)
   note IH = WhileTrue.hyps(3,5)
   have "s1 = t1 on L c' (L w X)"
@@ -254,7 +254,7 @@
   from IH(2)[OF WhileTrue.hyps(6), of t2] w this(2) obtain t3
     where "(w,t2) \<Rightarrow> t3" "s3 = t3 on X"
     by auto
-  with `bval b t1` `(c', t1) \<Rightarrow> t2` w show ?case by auto
+  with \<open>bval b t1\<close> \<open>(c', t1) \<Rightarrow> t2\<close> w show ?case by auto
 qed
 
 corollary final_bury_correct2: "(bury c UNIV,s) \<Rightarrow> s' \<Longrightarrow> (c,s) \<Rightarrow> s'"
--- a/src/HOL/IMP/Live_True.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Live_True.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -46,7 +46,7 @@
 lemma L_While_X: "X \<subseteq> L (WHILE b DO c) X"
 using L_While_unfold by blast
 
-text{* Disable @{text "L WHILE"} equation and reason only with @{text "L WHILE"} constraints: *}
+text\<open>Disable @{text "L WHILE"} equation and reason only with @{text "L WHILE"} constraints:\<close>
 declare L.simps(5)[simp del]
 
 
@@ -73,16 +73,16 @@
   case (IfTrue b s c1 s' c2)
   hence "s = t on vars b" and "s = t on L c1 X" by auto
   from  bval_eq_if_eq_on_vars[OF this(1)] IfTrue(1) have "bval b t" by simp
-  from IfTrue.IH[OF `s = t on L c1 X`] obtain t' where
+  from IfTrue.IH[OF \<open>s = t on L c1 X\<close>] obtain t' where
     "(c1, t) \<Rightarrow> t'" "s' = t' on X" by auto
-  thus ?case using `bval b t` by auto
+  thus ?case using \<open>bval b t\<close> by auto
 next
   case (IfFalse b s c2 s' c1)
   hence "s = t on vars b" "s = t on L c2 X" by auto
   from  bval_eq_if_eq_on_vars[OF this(1)] IfFalse(1) have "~bval b t" by simp
-  from IfFalse.IH[OF `s = t on L c2 X`] obtain t' where
+  from IfFalse.IH[OF \<open>s = t on L c2 X\<close>] obtain t' where
     "(c2, t) \<Rightarrow> t'" "s' = t' on X" by auto
-  thus ?case using `~bval b t` by auto
+  thus ?case using \<open>~bval b t\<close> by auto
 next
   case (WhileFalse b s c)
   hence "~ bval b t"
@@ -91,7 +91,7 @@
 next
   case (WhileTrue b s1 c s2 s3 X t1)
   let ?w = "WHILE b DO c"
-  from `bval b s1` WhileTrue.prems have "bval b t1"
+  from \<open>bval b s1\<close> WhileTrue.prems have "bval b t1"
     by (metis L_While_vars bval_eq_if_eq_on_vars set_mp)
   have "s1 = t1 on L c (L ?w X)" using  L_While_pfp WhileTrue.prems
     by (blast)
@@ -99,7 +99,7 @@
     "(c, t1) \<Rightarrow> t2" "s2 = t2 on L ?w X" by auto
   from WhileTrue.IH(2)[OF this(2)] obtain t3 where "(?w,t2) \<Rightarrow> t3" "s3 = t3 on X"
     by auto
-  with `bval b t1` `(c, t1) \<Rightarrow> t2` show ?case by auto
+  with \<open>bval b t1\<close> \<open>(c, t1) \<Rightarrow> t2\<close> show ?case by auto
 qed
 
 
@@ -114,11 +114,11 @@
   thus ?case by (simp add: L.simps(5))
 qed auto
 
-text{* Make @{const L} executable by replacing @{const lfp} with the @{const
+text\<open>Make @{const L} executable by replacing @{const lfp} with the @{const
 while} combinator from theory @{theory While_Combinator}. The @{const while}
 combinator obeys the recursion equation
 @{thm[display] While_Combinator.while_unfold[no_vars]}
-and is thus executable. *}
+and is thus executable.\<close>
 
 lemma L_While: fixes b c X
 assumes "finite X" defines "f == \<lambda>Y. vars b \<union> X \<union> L c Y"
@@ -132,7 +132,7 @@
     fix Y show "Y \<subseteq> ?V \<Longrightarrow> f Y \<subseteq> ?V"
       unfolding f_def using L_subset_vars[of c] by blast
   next
-    show "finite ?V" using `finite X` by simp
+    show "finite ?V" using \<open>finite X\<close> by simp
   qed
   thus ?thesis by (simp add: f_def L.simps(5))
 qed
@@ -147,11 +147,11 @@
    in while (\<lambda>Y. f Y \<noteq> Y) f {})"
 by(rule L_While_let, simp)
 
-text{* Replace the equation for @{text "L (WHILE \<dots>)"} by the executable @{thm[source] L_While_set}: *}
+text\<open>Replace the equation for @{text "L (WHILE \<dots>)"} by the executable @{thm[source] L_While_set}:\<close>
 lemmas [code] = L.simps(1-4) L_While_set
-text{* Sorry, this syntax is odd. *}
+text\<open>Sorry, this syntax is odd.\<close>
 
-text{* A test: *}
+text\<open>A test:\<close>
 lemma "(let b = Less (N 0) (V ''y''); c = ''y'' ::= V ''x'';; ''x'' ::= V ''z''
   in L (WHILE b DO c) {''y''}) = {''x'', ''y'', ''z''}"
 by eval
@@ -159,14 +159,14 @@
 
 subsection "Limiting the number of iterations"
 
-text{* The final parameter is the default value: *}
+text\<open>The final parameter is the default value:\<close>
 
 fun iter :: "('a \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a" where
 "iter f 0 p d = d" |
 "iter f (Suc n) p d = (if f p = p then p else iter f n (f p) d)"
 
-text{* A version of @{const L} with a bounded number of iterations (here: 2)
-in the WHILE case: *}
+text\<open>A version of @{const L} with a bounded number of iterations (here: 2)
+in the WHILE case:\<close>
 
 fun Lb :: "com \<Rightarrow> vname set \<Rightarrow> vname set" where
 "Lb SKIP X = X" |
@@ -175,7 +175,7 @@
 "Lb (IF b THEN c\<^sub>1 ELSE c\<^sub>2) X = vars b \<union> Lb c\<^sub>1 X \<union> Lb c\<^sub>2 X" |
 "Lb (WHILE b DO c) X = iter (\<lambda>A. vars b \<union> X \<union> Lb c A) 2 {} (vars b \<union> rvars c \<union> X)"
 
-text{* @{const Lb} (and @{const iter}) is not monotone! *}
+text\<open>@{const Lb} (and @{const iter}) is not monotone!\<close>
 lemma "let w = WHILE Bc False DO (''x'' ::= V ''y'';; ''z'' ::= V ''x'')
   in \<not> (Lb w {''z''} \<subseteq> Lb w {''y'',''z''})"
 by eval
--- a/src/HOL/IMP/OO.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/OO.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -79,11 +79,11 @@
 
 code_pred (modes: i => i => o => bool) big_step .
 
-text{* Example: natural numbers encoded as objects with a predecessor
+text\<open>Example: natural numbers encoded as objects with a predecessor
 field. Null is zero. Method succ adds an object in front, method add
 adds as many objects in front as the parameter specifies.
 
-First, the method bodies: *}
+First, the method bodies:\<close>
 
 definition
 "m_succ  =  (''s'' ::= New)\<bullet>''pred'' ::= V ''this''; V ''s''"
@@ -93,19 +93,19 @@
   THEN V ''this''
   ELSE V ''this''\<bullet>''succ''<Null>\<bullet>''add''<V ''param''\<bullet>''pred''>"
 
-text{* The method environment: *}
+text\<open>The method environment:\<close>
 definition
 "menv = (\<lambda>m. Null)(''succ'' := m_succ, ''add'' := m_add)"
 
-text{* The main code, adding 1 and 2: *}
+text\<open>The main code, adding 1 and 2:\<close>
 definition "main =
   ''1'' ::= Null\<bullet>''succ''<Null>;
   ''2'' ::= V ''1''\<bullet>''succ''<Null>;
   V ''2'' \<bullet> ''add'' <V ''1''>"
 
-text{* Execution of semantics. The final variable environment and store are
+text\<open>Execution of semantics. The final variable environment and store are
 converted into lists of references based on given lists of variable and field
-names to extract. *}
+names to extract.\<close>
 
 values
  "{(r, map ve' [''1'',''2''], map (\<lambda>n. map (s' n)[''pred'']) [0..<n])|
--- a/src/HOL/IMP/Poly_Types.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Poly_Types.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,7 +4,7 @@
 
 datatype ty = Ity | Rty | TV nat
 
-text{* Everything else remains the same. *}
+text\<open>Everything else remains the same.\<close>
 
 type_synonym tyenv = "vname \<Rightarrow> ty"
 
@@ -42,7 +42,7 @@
 "tsubst S t = t"
 
 
-subsection{* Typing is Preserved by Substitution *}
+subsection\<open>Typing is Preserved by Substitution\<close>
 
 lemma subst_atyping: "E \<turnstile>p a : t \<Longrightarrow> tsubst S \<circ> E \<turnstile>p a : tsubst S t"
 apply(induction rule: atyping.induct)
--- a/src/HOL/IMP/Sec_Type_Expr.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Sec_Type_Expr.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -10,9 +10,9 @@
 class sec =
 fixes sec :: "'a \<Rightarrow> nat"
 
-text{* The security/confidentiality level of each variable is globally fixed
+text\<open>The security/confidentiality level of each variable is globally fixed
 for simplicity. For the sake of examples --- the general theory does not rely
-on it! --- a variable of length @{text n} has security level @{text n}: *}
+on it! --- a variable of length @{text n} has security level @{text n}:\<close>
 
 instantiation list :: (type)sec
 begin
--- a/src/HOL/IMP/Sec_Typing.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Sec_Typing.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -27,7 +27,7 @@
   "l \<turnstile> x ::= a"  "l \<turnstile> c\<^sub>1;;c\<^sub>2"  "l \<turnstile> IF b THEN c\<^sub>1 ELSE c\<^sub>2"  "l \<turnstile> WHILE b DO c"
 
 
-text{* An important property: anti-monotonicity. *}
+text\<open>An important property: anti-monotonicity.\<close>
 
 lemma anti_mono: "\<lbrakk> l \<turnstile> c;  l' \<le> l \<rbrakk> \<Longrightarrow> l' \<turnstile> c"
 apply(induction arbitrary: l' rule: sec_type.induct)
@@ -73,60 +73,60 @@
 next
   case (Assign x a s)
   have [simp]: "t' = t(x := aval a t)" using Assign by auto
-  have "sec x >= sec a" using `0 \<turnstile> x ::= a` by auto
+  have "sec x >= sec a" using \<open>0 \<turnstile> x ::= a\<close> by auto
   show ?case
   proof auto
     assume "sec x \<le> l"
-    with `sec x >= sec a` have "sec a \<le> l" by arith
+    with \<open>sec x >= sec a\<close> have "sec a \<le> l" by arith
     thus "aval a s = aval a t"
-      by (rule aval_eq_if_eq_le[OF `s = t (\<le> l)`])
+      by (rule aval_eq_if_eq_le[OF \<open>s = t (\<le> l)\<close>])
   next
     fix y assume "y \<noteq> x" "sec y \<le> l"
-    thus "s y = t y" using `s = t (\<le> l)` by simp
+    thus "s y = t y" using \<open>s = t (\<le> l)\<close> by simp
   qed
 next
   case Seq thus ?case by blast
 next
   case (IfTrue b s c1 s' c2)
-  have "sec b \<turnstile> c1" "sec b \<turnstile> c2" using `0 \<turnstile> IF b THEN c1 ELSE c2` by auto
+  have "sec b \<turnstile> c1" "sec b \<turnstile> c2" using \<open>0 \<turnstile> IF b THEN c1 ELSE c2\<close> by auto
   show ?case
   proof cases
     assume "sec b \<le> l"
-    hence "s = t (\<le> sec b)" using `s = t (\<le> l)` by auto
-    hence "bval b t" using `bval b s` by(simp add: bval_eq_if_eq_le)
-    with IfTrue.IH IfTrue.prems(1,3) `sec b \<turnstile> c1`  anti_mono
+    hence "s = t (\<le> sec b)" using \<open>s = t (\<le> l)\<close> by auto
+    hence "bval b t" using \<open>bval b s\<close> by(simp add: bval_eq_if_eq_le)
+    with IfTrue.IH IfTrue.prems(1,3) \<open>sec b \<turnstile> c1\<close>  anti_mono
     show ?thesis by auto
   next
     assume "\<not> sec b \<le> l"
     have 1: "sec b \<turnstile> IF b THEN c1 ELSE c2"
-      by(rule sec_type.intros)(simp_all add: `sec b \<turnstile> c1` `sec b \<turnstile> c2`)
-    from confinement[OF `(c1, s) \<Rightarrow> s'` `sec b \<turnstile> c1`] `\<not> sec b \<le> l`
+      by(rule sec_type.intros)(simp_all add: \<open>sec b \<turnstile> c1\<close> \<open>sec b \<turnstile> c2\<close>)
+    from confinement[OF \<open>(c1, s) \<Rightarrow> s'\<close> \<open>sec b \<turnstile> c1\<close>] \<open>\<not> sec b \<le> l\<close>
     have "s = s' (\<le> l)" by auto
     moreover
-    from confinement[OF `(IF b THEN c1 ELSE c2, t) \<Rightarrow> t'` 1] `\<not> sec b \<le> l`
+    from confinement[OF \<open>(IF b THEN c1 ELSE c2, t) \<Rightarrow> t'\<close> 1] \<open>\<not> sec b \<le> l\<close>
     have "t = t' (\<le> l)" by auto
-    ultimately show "s' = t' (\<le> l)" using `s = t (\<le> l)` by auto
+    ultimately show "s' = t' (\<le> l)" using \<open>s = t (\<le> l)\<close> by auto
   qed
 next
   case (IfFalse b s c2 s' c1)
-  have "sec b \<turnstile> c1" "sec b \<turnstile> c2" using `0 \<turnstile> IF b THEN c1 ELSE c2` by auto
+  have "sec b \<turnstile> c1" "sec b \<turnstile> c2" using \<open>0 \<turnstile> IF b THEN c1 ELSE c2\<close> by auto
   show ?case
   proof cases
     assume "sec b \<le> l"
-    hence "s = t (\<le> sec b)" using `s = t (\<le> l)` by auto
-    hence "\<not> bval b t" using `\<not> bval b s` by(simp add: bval_eq_if_eq_le)
-    with IfFalse.IH IfFalse.prems(1,3) `sec b \<turnstile> c2` anti_mono
+    hence "s = t (\<le> sec b)" using \<open>s = t (\<le> l)\<close> by auto
+    hence "\<not> bval b t" using \<open>\<not> bval b s\<close> by(simp add: bval_eq_if_eq_le)
+    with IfFalse.IH IfFalse.prems(1,3) \<open>sec b \<turnstile> c2\<close> anti_mono
     show ?thesis by auto
   next
     assume "\<not> sec b \<le> l"
     have 1: "sec b \<turnstile> IF b THEN c1 ELSE c2"
-      by(rule sec_type.intros)(simp_all add: `sec b \<turnstile> c1` `sec b \<turnstile> c2`)
-    from confinement[OF big_step.IfFalse[OF IfFalse(1,2)] 1] `\<not> sec b \<le> l`
+      by(rule sec_type.intros)(simp_all add: \<open>sec b \<turnstile> c1\<close> \<open>sec b \<turnstile> c2\<close>)
+    from confinement[OF big_step.IfFalse[OF IfFalse(1,2)] 1] \<open>\<not> sec b \<le> l\<close>
     have "s = s' (\<le> l)" by auto
     moreover
-    from confinement[OF `(IF b THEN c1 ELSE c2, t) \<Rightarrow> t'` 1] `\<not> sec b \<le> l`
+    from confinement[OF \<open>(IF b THEN c1 ELSE c2, t) \<Rightarrow> t'\<close> 1] \<open>\<not> sec b \<le> l\<close>
     have "t = t' (\<le> l)" by auto
-    ultimately show "s' = t' (\<le> l)" using `s = t (\<le> l)` by auto
+    ultimately show "s' = t' (\<le> l)" using \<open>s = t (\<le> l)\<close> by auto
   qed
 next
   case (WhileFalse b s c)
@@ -134,52 +134,52 @@
   show ?case
   proof cases
     assume "sec b \<le> l"
-    hence "s = t (\<le> sec b)" using `s = t (\<le> l)` by auto
-    hence "\<not> bval b t" using `\<not> bval b s` by(simp add: bval_eq_if_eq_le)
+    hence "s = t (\<le> sec b)" using \<open>s = t (\<le> l)\<close> by auto
+    hence "\<not> bval b t" using \<open>\<not> bval b s\<close> by(simp add: bval_eq_if_eq_le)
     with WhileFalse.prems(1,3) show ?thesis by auto
   next
     assume "\<not> sec b \<le> l"
     have 1: "sec b \<turnstile> WHILE b DO c"
-      by(rule sec_type.intros)(simp_all add: `sec b \<turnstile> c`)
-    from confinement[OF `(WHILE b DO c, t) \<Rightarrow> t'` 1] `\<not> sec b \<le> l`
+      by(rule sec_type.intros)(simp_all add: \<open>sec b \<turnstile> c\<close>)
+    from confinement[OF \<open>(WHILE b DO c, t) \<Rightarrow> t'\<close> 1] \<open>\<not> sec b \<le> l\<close>
     have "t = t' (\<le> l)" by auto
-    thus "s = t' (\<le> l)" using `s = t (\<le> l)` by auto
+    thus "s = t' (\<le> l)" using \<open>s = t (\<le> l)\<close> by auto
   qed
 next
   case (WhileTrue b s1 c s2 s3 t1 t3)
   let ?w = "WHILE b DO c"
-  have "sec b \<turnstile> c" using `0 \<turnstile> WHILE b DO c` by auto
+  have "sec b \<turnstile> c" using \<open>0 \<turnstile> WHILE b DO c\<close> by auto
   show ?case
   proof cases
     assume "sec b \<le> l"
-    hence "s1 = t1 (\<le> sec b)" using `s1 = t1 (\<le> l)` by auto
+    hence "s1 = t1 (\<le> sec b)" using \<open>s1 = t1 (\<le> l)\<close> by auto
     hence "bval b t1"
-      using `bval b s1` by(simp add: bval_eq_if_eq_le)
+      using \<open>bval b s1\<close> by(simp add: bval_eq_if_eq_le)
     then obtain t2 where "(c,t1) \<Rightarrow> t2" "(?w,t2) \<Rightarrow> t3"
-      using `(?w,t1) \<Rightarrow> t3` by auto
-    from WhileTrue.IH(2)[OF `(?w,t2) \<Rightarrow> t3` `0 \<turnstile> ?w`
-      WhileTrue.IH(1)[OF `(c,t1) \<Rightarrow> t2` anti_mono[OF `sec b \<turnstile> c`]
-        `s1 = t1 (\<le> l)`]]
+      using \<open>(?w,t1) \<Rightarrow> t3\<close> by auto
+    from WhileTrue.IH(2)[OF \<open>(?w,t2) \<Rightarrow> t3\<close> \<open>0 \<turnstile> ?w\<close>
+      WhileTrue.IH(1)[OF \<open>(c,t1) \<Rightarrow> t2\<close> anti_mono[OF \<open>sec b \<turnstile> c\<close>]
+        \<open>s1 = t1 (\<le> l)\<close>]]
     show ?thesis by simp
   next
     assume "\<not> sec b \<le> l"
-    have 1: "sec b \<turnstile> ?w" by(rule sec_type.intros)(simp_all add: `sec b \<turnstile> c`)
-    from confinement[OF big_step.WhileTrue[OF WhileTrue.hyps] 1] `\<not> sec b \<le> l`
+    have 1: "sec b \<turnstile> ?w" by(rule sec_type.intros)(simp_all add: \<open>sec b \<turnstile> c\<close>)
+    from confinement[OF big_step.WhileTrue[OF WhileTrue.hyps] 1] \<open>\<not> sec b \<le> l\<close>
     have "s1 = s3 (\<le> l)" by auto
     moreover
-    from confinement[OF `(WHILE b DO c, t1) \<Rightarrow> t3` 1] `\<not> sec b \<le> l`
+    from confinement[OF \<open>(WHILE b DO c, t1) \<Rightarrow> t3\<close> 1] \<open>\<not> sec b \<le> l\<close>
     have "t1 = t3 (\<le> l)" by auto
-    ultimately show "s3 = t3 (\<le> l)" using `s1 = t1 (\<le> l)` by auto
+    ultimately show "s3 = t3 (\<le> l)" using \<open>s1 = t1 (\<le> l)\<close> by auto
   qed
 qed
 
 
 subsection "The Standard Typing System"
 
-text{* The predicate @{prop"l \<turnstile> c"} is nicely intuitive and executable. The
+text\<open>The predicate @{prop"l \<turnstile> c"} is nicely intuitive and executable. The
 standard formulation, however, is slightly different, replacing the maximum
 computation by an antimonotonicity rule. We introduce the standard system now
-and show the equivalence with our formulation. *}
+and show the equivalence with our formulation.\<close>
 
 inductive sec_type' :: "nat \<Rightarrow> com \<Rightarrow> bool" ("(_/ \<turnstile>'' _)" [0,0] 50) where
 Skip':
--- a/src/HOL/IMP/Sec_TypingT.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Sec_TypingT.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -71,104 +71,104 @@
   case Skip thus ?case by auto
 next
   case (Assign x a s)
-  have "sec x >= sec a" using `0 \<turnstile> x ::= a` by auto
+  have "sec x >= sec a" using \<open>0 \<turnstile> x ::= a\<close> by auto
   have "(x ::= a,t) \<Rightarrow> t(x := aval a t)" by auto
   moreover
   have "s(x := aval a s) = t(x := aval a t) (\<le> l)"
   proof auto
     assume "sec x \<le> l"
-    with `sec x \<ge> sec a` have "sec a \<le> l" by arith
+    with \<open>sec x \<ge> sec a\<close> have "sec a \<le> l" by arith
     thus "aval a s = aval a t"
-      by (rule aval_eq_if_eq_le[OF `s = t (\<le> l)`])
+      by (rule aval_eq_if_eq_le[OF \<open>s = t (\<le> l)\<close>])
   next
     fix y assume "y \<noteq> x" "sec y \<le> l"
-    thus "s y = t y" using `s = t (\<le> l)` by simp
+    thus "s y = t y" using \<open>s = t (\<le> l)\<close> by simp
   qed
   ultimately show ?case by blast
 next
   case Seq thus ?case by blast
 next
   case (IfTrue b s c1 s' c2)
-  have "sec b \<turnstile> c1" "sec b \<turnstile> c2" using `0 \<turnstile> IF b THEN c1 ELSE c2` by auto
+  have "sec b \<turnstile> c1" "sec b \<turnstile> c2" using \<open>0 \<turnstile> IF b THEN c1 ELSE c2\<close> by auto
   obtain t' where t': "(c1, t) \<Rightarrow> t'" "s' = t' (\<le> l)"
-    using IfTrue.IH[OF anti_mono[OF `sec b \<turnstile> c1`] `s = t (\<le> l)`] by blast
+    using IfTrue.IH[OF anti_mono[OF \<open>sec b \<turnstile> c1\<close>] \<open>s = t (\<le> l)\<close>] by blast
   show ?case
   proof cases
     assume "sec b \<le> l"
-    hence "s = t (\<le> sec b)" using `s = t (\<le> l)` by auto
-    hence "bval b t" using `bval b s` by(simp add: bval_eq_if_eq_le)
+    hence "s = t (\<le> sec b)" using \<open>s = t (\<le> l)\<close> by auto
+    hence "bval b t" using \<open>bval b s\<close> by(simp add: bval_eq_if_eq_le)
     thus ?thesis by (metis t' big_step.IfTrue)
   next
     assume "\<not> sec b \<le> l"
     hence 0: "sec b \<noteq> 0" by arith
     have 1: "sec b \<turnstile> IF b THEN c1 ELSE c2"
-      by(rule sec_type.intros)(simp_all add: `sec b \<turnstile> c1` `sec b \<turnstile> c2`)
-    from confinement[OF big_step.IfTrue[OF IfTrue(1,2)] 1] `\<not> sec b \<le> l`
+      by(rule sec_type.intros)(simp_all add: \<open>sec b \<turnstile> c1\<close> \<open>sec b \<turnstile> c2\<close>)
+    from confinement[OF big_step.IfTrue[OF IfTrue(1,2)] 1] \<open>\<not> sec b \<le> l\<close>
     have "s = s' (\<le> l)" by auto
     moreover
     from termi_if_non0[OF 1 0, of t] obtain t' where
       t': "(IF b THEN c1 ELSE c2,t) \<Rightarrow> t'" ..
     moreover
-    from confinement[OF t' 1] `\<not> sec b \<le> l`
+    from confinement[OF t' 1] \<open>\<not> sec b \<le> l\<close>
     have "t = t' (\<le> l)" by auto
     ultimately
-    show ?case using `s = t (\<le> l)` by auto
+    show ?case using \<open>s = t (\<le> l)\<close> by auto
   qed
 next
   case (IfFalse b s c2 s' c1)
-  have "sec b \<turnstile> c1" "sec b \<turnstile> c2" using `0 \<turnstile> IF b THEN c1 ELSE c2` by auto
+  have "sec b \<turnstile> c1" "sec b \<turnstile> c2" using \<open>0 \<turnstile> IF b THEN c1 ELSE c2\<close> by auto
   obtain t' where t': "(c2, t) \<Rightarrow> t'" "s' = t' (\<le> l)"
-    using IfFalse.IH[OF anti_mono[OF `sec b \<turnstile> c2`] `s = t (\<le> l)`] by blast
+    using IfFalse.IH[OF anti_mono[OF \<open>sec b \<turnstile> c2\<close>] \<open>s = t (\<le> l)\<close>] by blast
   show ?case
   proof cases
     assume "sec b \<le> l"
-    hence "s = t (\<le> sec b)" using `s = t (\<le> l)` by auto
-    hence "\<not> bval b t" using `\<not> bval b s` by(simp add: bval_eq_if_eq_le)
+    hence "s = t (\<le> sec b)" using \<open>s = t (\<le> l)\<close> by auto
+    hence "\<not> bval b t" using \<open>\<not> bval b s\<close> by(simp add: bval_eq_if_eq_le)
     thus ?thesis by (metis t' big_step.IfFalse)
   next
     assume "\<not> sec b \<le> l"
     hence 0: "sec b \<noteq> 0" by arith
     have 1: "sec b \<turnstile> IF b THEN c1 ELSE c2"
-      by(rule sec_type.intros)(simp_all add: `sec b \<turnstile> c1` `sec b \<turnstile> c2`)
-    from confinement[OF big_step.IfFalse[OF IfFalse(1,2)] 1] `\<not> sec b \<le> l`
+      by(rule sec_type.intros)(simp_all add: \<open>sec b \<turnstile> c1\<close> \<open>sec b \<turnstile> c2\<close>)
+    from confinement[OF big_step.IfFalse[OF IfFalse(1,2)] 1] \<open>\<not> sec b \<le> l\<close>
     have "s = s' (\<le> l)" by auto
     moreover
     from termi_if_non0[OF 1 0, of t] obtain t' where
       t': "(IF b THEN c1 ELSE c2,t) \<Rightarrow> t'" ..
     moreover
-    from confinement[OF t' 1] `\<not> sec b \<le> l`
+    from confinement[OF t' 1] \<open>\<not> sec b \<le> l\<close>
     have "t = t' (\<le> l)" by auto
     ultimately
-    show ?case using `s = t (\<le> l)` by auto
+    show ?case using \<open>s = t (\<le> l)\<close> by auto
   qed
 next
   case (WhileFalse b s c)
   hence [simp]: "sec b = 0" by auto
-  have "s = t (\<le> sec b)" using `s = t (\<le> l)` by auto
-  hence "\<not> bval b t" using `\<not> bval b s` by (metis bval_eq_if_eq_le le_refl)
+  have "s = t (\<le> sec b)" using \<open>s = t (\<le> l)\<close> by auto
+  hence "\<not> bval b t" using \<open>\<not> bval b s\<close> by (metis bval_eq_if_eq_le le_refl)
   with WhileFalse.prems(2) show ?case by auto
 next
   case (WhileTrue b s c s'' s')
   let ?w = "WHILE b DO c"
-  from `0 \<turnstile> ?w` have [simp]: "sec b = 0" by auto
-  have "0 \<turnstile> c" using `0 \<turnstile> WHILE b DO c` by auto
-  from WhileTrue.IH(1)[OF this `s = t (\<le> l)`]
+  from \<open>0 \<turnstile> ?w\<close> have [simp]: "sec b = 0" by auto
+  have "0 \<turnstile> c" using \<open>0 \<turnstile> WHILE b DO c\<close> by auto
+  from WhileTrue.IH(1)[OF this \<open>s = t (\<le> l)\<close>]
   obtain t'' where "(c,t) \<Rightarrow> t''" and "s'' = t'' (\<le>l)" by blast
-  from WhileTrue.IH(2)[OF `0 \<turnstile> ?w` this(2)]
+  from WhileTrue.IH(2)[OF \<open>0 \<turnstile> ?w\<close> this(2)]
   obtain t' where "(?w,t'') \<Rightarrow> t'" and "s' = t' (\<le>l)" by blast
-  from `bval b s` have "bval b t"
-    using bval_eq_if_eq_le[OF `s = t (\<le>l)`] by auto
+  from \<open>bval b s\<close> have "bval b t"
+    using bval_eq_if_eq_le[OF \<open>s = t (\<le>l)\<close>] by auto
   show ?case
-    using big_step.WhileTrue[OF `bval b t` `(c,t) \<Rightarrow> t''` `(?w,t'') \<Rightarrow> t'`]
-    by (metis `s' = t' (\<le> l)`)
+    using big_step.WhileTrue[OF \<open>bval b t\<close> \<open>(c,t) \<Rightarrow> t''\<close> \<open>(?w,t'') \<Rightarrow> t'\<close>]
+    by (metis \<open>s' = t' (\<le> l)\<close>)
 qed
 
 subsection "The Standard Termination-Sensitive System"
 
-text{* The predicate @{prop"l \<turnstile> c"} is nicely intuitive and executable. The
+text\<open>The predicate @{prop"l \<turnstile> c"} is nicely intuitive and executable. The
 standard formulation, however, is slightly different, replacing the maximum
 computation by an antimonotonicity rule. We introduce the standard system now
-and show the equivalence with our formulation. *}
+and show the equivalence with our formulation.\<close>
 
 inductive sec_type' :: "nat \<Rightarrow> com \<Rightarrow> bool" ("(_/ \<turnstile>'' _)" [0,0] 50) where
 Skip':
--- a/src/HOL/IMP/Sem_Equiv.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Sem_Equiv.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -88,17 +88,17 @@
   hence IH: "P s2 \<Longrightarrow> (WHILE b' DO c', s2) \<Rightarrow> s3" by auto
   from WhileTrue.prems
   have "P \<Turnstile> b <\<sim>> b'" by simp
-  with `bval b s1` `P s1`
+  with \<open>bval b s1\<close> \<open>P s1\<close>
   have "bval b' s1" by (simp add: bequiv_up_to_def)
   moreover
   from WhileTrue.prems
   have "P \<Turnstile> c \<sim> c'" by simp
-  with `bval b s1` `P s1` `(c, s1) \<Rightarrow> s2`
+  with \<open>bval b s1\<close> \<open>P s1\<close> \<open>(c, s1) \<Rightarrow> s2\<close>
   have "(c', s1) \<Rightarrow> s2" by (simp add: equiv_up_to_def)
   moreover
   from WhileTrue.prems
   have "\<And>s s'. (c,s) \<Rightarrow> s' \<Longrightarrow> P s \<Longrightarrow> bval b s \<Longrightarrow> P s'" by simp
-  with `P s1` `bval b s1` `(c, s1) \<Rightarrow> s2`
+  with \<open>P s1\<close> \<open>bval b s1\<close> \<open>(c, s1) \<Rightarrow> s2\<close>
   have "P s2" by simp
   hence "(WHILE b' DO c', s2) \<Rightarrow> s3" by (rule IH)
   ultimately
--- a/src/HOL/IMP/Small_Step.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Small_Step.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -23,7 +23,7 @@
   small_steps :: "com * state \<Rightarrow> com * state \<Rightarrow> bool" (infix "\<rightarrow>*" 55)
 where "x \<rightarrow>* y == star small_step x y"
 
-subsection{* Executability *}
+subsection\<open>Executability\<close>
 
 code_pred small_step .
 
@@ -32,23 +32,23 @@
     <''x'' := 3, ''y'' := 7, ''z'' := 5>) \<rightarrow>* (c',t)}"
 
 
-subsection{* Proof infrastructure *}
+subsection\<open>Proof infrastructure\<close>
 
-subsubsection{* Induction rules *}
+subsubsection\<open>Induction rules\<close>
 
-text{* The default induction rule @{thm[source] small_step.induct} only works
+text\<open>The default induction rule @{thm[source] small_step.induct} only works
 for lemmas of the form @{text"a \<rightarrow> b \<Longrightarrow> \<dots>"} where @{text a} and @{text b} are
 not already pairs @{text"(DUMMY,DUMMY)"}. We can generate a suitable variant
 of @{thm[source] small_step.induct} for pairs by ``splitting'' the arguments
-@{text"\<rightarrow>"} into pairs: *}
+@{text"\<rightarrow>"} into pairs:\<close>
 lemmas small_step_induct = small_step.induct[split_format(complete)]
 
 
-subsubsection{* Proof automation *}
+subsubsection\<open>Proof automation\<close>
 
 declare small_step.intros[simp,intro]
 
-text{* Rule inversion: *}
+text\<open>Rule inversion:\<close>
 
 inductive_cases SkipE[elim!]: "(SKIP,s) \<rightarrow> ct"
 thm SkipE
@@ -60,7 +60,7 @@
 inductive_cases WhileE[elim]: "(WHILE b DO c, s) \<rightarrow> ct"
 
 
-text{* A simple property: *}
+text\<open>A simple property:\<close>
 lemma deterministic:
   "cs \<rightarrow> cs' \<Longrightarrow> cs \<rightarrow> cs'' \<Longrightarrow> cs'' = cs'"
 apply(induction arbitrary: cs'' rule: small_step.induct)
@@ -83,8 +83,8 @@
    \<Longrightarrow> (c1;;c2, s1) \<rightarrow>* (SKIP,s3)"
 by(blast intro: star.step star_seq2 star_trans)
 
-text{* The following proof corresponds to one on the board where one would
-show chains of @{text "\<rightarrow>"} and @{text "\<rightarrow>*"} steps. *}
+text\<open>The following proof corresponds to one on the board where one would
+show chains of @{text "\<rightarrow>"} and @{text "\<rightarrow>*"} steps.\<close>
 
 lemma big_to_small:
   "cs \<Rightarrow> t \<Longrightarrow> cs \<rightarrow>* (SKIP,t)"
@@ -130,7 +130,7 @@
   ultimately show "(WHILE b DO c,s) \<rightarrow>* (SKIP,t)" by (metis star.simps)
 qed
 
-text{* Each case of the induction can be proved automatically: *}
+text\<open>Each case of the induction can be proved automatically:\<close>
 lemma  "cs \<Rightarrow> t \<Longrightarrow> cs \<rightarrow>* (SKIP,t)"
 proof (induction rule: big_step.induct)
   case Skip show ?case by blast
@@ -163,9 +163,9 @@
 apply (auto intro: small1_big_continue)
 done
 
-text {*
+text \<open>
   Finally, the equivalence theorem:
-*}
+\<close>
 theorem big_iff_small:
   "cs \<Rightarrow> t = cs \<rightarrow>* (SKIP,t)"
 by(metis big_to_small small_to_big)
@@ -184,16 +184,16 @@
 lemma final_iff_SKIP: "final (c,s) = (c = SKIP)"
 by (metis SkipE finalD final_def)
 
-text{* Now we can show that @{text"\<Rightarrow>"} yields a final state iff @{text"\<rightarrow>"}
-terminates: *}
+text\<open>Now we can show that @{text"\<Rightarrow>"} yields a final state iff @{text"\<rightarrow>"}
+terminates:\<close>
 
 lemma big_iff_small_termination:
   "(EX t. cs \<Rightarrow> t) \<longleftrightarrow> (EX cs'. cs \<rightarrow>* cs' \<and> final cs')"
 by(simp add: big_iff_small final_iff_SKIP)
 
-text{* This is the same as saying that the absence of a big step result is
+text\<open>This is the same as saying that the absence of a big step result is
 equivalent with absence of a terminating small step sequence, i.e.\ with
 nontermination.  Since @{text"\<rightarrow>"} is determininistic, there is no difference
-between may and must terminate. *}
+between may and must terminate.\<close>
 
 end
--- a/src/HOL/IMP/Star.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Star.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -7,7 +7,7 @@
 refl:  "star r x x" |
 step:  "r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
 
-hide_fact (open) refl step  --"names too generic"
+hide_fact (open) refl step  \<comment>"names too generic"
 
 lemma star_trans:
   "star r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
--- a/src/HOL/IMP/Types.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Types.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,8 +2,8 @@
 
 theory Types imports Star Complex_Main begin
 
-text {* We build on @{theory Complex_Main} instead of @{theory Main} to access
-the real numbers. *}
+text \<open>We build on @{theory Complex_Main} instead of @{theory Main} to access
+the real numbers.\<close>
 
 subsection "Arithmetic Expressions"
 
@@ -12,9 +12,9 @@
 type_synonym vname = string
 type_synonym state = "vname \<Rightarrow> val"
 
-text_raw{*\snip{aexptDef}{0}{2}{% *}
+text_raw\<open>\snip{aexptDef}{0}{2}{%\<close>
 datatype aexp =  Ic int | Rc real | V vname | Plus aexp aexp
-text_raw{*}%endsnip*}
+text_raw\<open>}%endsnip\<close>
 
 inductive taval :: "aexp \<Rightarrow> state \<Rightarrow> val \<Rightarrow> bool" where
 "taval (Ic i) s (Iv i)" |
@@ -87,10 +87,10 @@
 inductive_cases [elim!]:
   "\<Gamma> \<turnstile> V x : \<tau>" "\<Gamma> \<turnstile> Ic i : \<tau>" "\<Gamma> \<turnstile> Rc r : \<tau>" "\<Gamma> \<turnstile> Plus a1 a2 : \<tau>"
 
-text{* Warning: the ``:'' notation leads to syntactic ambiguities,
+text\<open>Warning: the ``:'' notation leads to syntactic ambiguities,
 i.e. multiple parse trees, because ``:'' also stands for set membership.
 In most situations Isabelle's type system will reject all but one parse tree,
-but will still inform you of the potential ambiguity. *}
+but will still inform you of the potential ambiguity.\<close>
 
 inductive btyping :: "tyenv \<Rightarrow> bexp \<Rightarrow> bool" (infix "\<turnstile>" 50)
 where
@@ -184,10 +184,10 @@
   show ?case
   proof(cases bv)
     assume "bv"
-    with `tbval b s bv` show ?case by simp (metis IfTrue)
+    with \<open>tbval b s bv\<close> show ?case by simp (metis IfTrue)
   next
     assume "\<not>bv"
-    with `tbval b s bv` show ?case by simp (metis IfFalse)
+    with \<open>tbval b s bv\<close> show ?case by simp (metis IfFalse)
   qed
 next
   case While_ty show ?case by (metis While)
--- a/src/HOL/IMP/VCG.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/VCG.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,8 +4,8 @@
 
 subsection "Verification Conditions"
 
-text{* Annotated commands: commands where loops are annotated with
-invariants. *}
+text\<open>Annotated commands: commands where loops are annotated with
+invariants.\<close>
 
 datatype acom =
   Askip                  ("SKIP") |
@@ -16,7 +16,7 @@
 
 notation com.SKIP ("SKIP")
 
-text{* Strip annotations: *}
+text\<open>Strip annotations:\<close>
 
 fun strip :: "acom \<Rightarrow> com" where
 "strip SKIP = SKIP" |
@@ -25,7 +25,7 @@
 "strip (IF b THEN C\<^sub>1 ELSE C\<^sub>2) = (IF b THEN strip C\<^sub>1 ELSE strip C\<^sub>2)" |
 "strip ({_} WHILE b DO C) = (WHILE b DO strip C)"
 
-text{* Weakest precondition from annotated commands: *}
+text\<open>Weakest precondition from annotated commands:\<close>
 
 fun pre :: "acom \<Rightarrow> assn \<Rightarrow> assn" where
 "pre SKIP Q = Q" |
@@ -35,7 +35,7 @@
   (\<lambda>s. if bval b s then pre C\<^sub>1 Q s else pre C\<^sub>2 Q s)" |
 "pre ({I} WHILE b DO C) Q = I"
 
-text{* Verification condition: *}
+text\<open>Verification condition:\<close>
 
 fun vc :: "acom \<Rightarrow> assn \<Rightarrow> bool" where
 "vc SKIP Q = True" |
@@ -48,14 +48,14 @@
     vc C I)"
 
 
-text {* Soundness: *}
+text \<open>Soundness:\<close>
 
 lemma vc_sound: "vc C Q \<Longrightarrow> \<turnstile> {pre C Q} strip C {Q}"
 proof(induction C arbitrary: Q)
   case (Awhile I b C)
   show ?case
   proof(simp, rule While')
-    from `vc (Awhile I b C) Q`
+    from \<open>vc (Awhile I b C) Q\<close>
     have vc: "vc C I" and IQ: "\<forall>s. I s \<and> \<not> bval b s \<longrightarrow> Q s" and
          pre: "\<forall>s. I s \<and> bval b s \<longrightarrow> pre C I s" by simp_all
     have "\<turnstile> {pre C I} strip C {I}" by(rule Awhile.IH[OF vc])
@@ -70,7 +70,7 @@
 by (metis strengthen_pre vc_sound)
 
 
-text{* Completeness: *}
+text\<open>Completeness:\<close>
 
 lemma pre_mono:
   "\<forall>s. P s \<longrightarrow> P' s \<Longrightarrow> pre C P s \<Longrightarrow> pre C P' s"
--- a/src/HOL/IMP/VCG_Total_EX.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/VCG_Total_EX.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -6,8 +6,8 @@
 
 subsection "Verification Conditions for Total Correctness"
 
-text{* Annotated commands: commands where loops are annotated with
-invariants. *}
+text\<open>Annotated commands: commands where loops are annotated with
+invariants.\<close>
 
 datatype acom =
   Askip                  ("SKIP") |
@@ -19,7 +19,7 @@
 
 notation com.SKIP ("SKIP")
 
-text{* Strip annotations: *}
+text\<open>Strip annotations:\<close>
 
 fun strip :: "acom \<Rightarrow> com" where
 "strip SKIP = SKIP" |
@@ -28,7 +28,7 @@
 "strip (IF b THEN C\<^sub>1 ELSE C\<^sub>2) = (IF b THEN strip C\<^sub>1 ELSE strip C\<^sub>2)" |
 "strip ({_} WHILE b DO C) = (WHILE b DO strip C)"
 
-text{* Weakest precondition from annotated commands: *}
+text\<open>Weakest precondition from annotated commands:\<close>
 
 fun pre :: "acom \<Rightarrow> assn \<Rightarrow> assn" where
 "pre SKIP Q = Q" |
@@ -38,7 +38,7 @@
   (\<lambda>s. if bval b s then pre C\<^sub>1 Q s else pre C\<^sub>2 Q s)" |
 "pre ({I} WHILE b DO C) Q = (\<lambda>s. EX n. I n s)"
 
-text{* Verification condition: *}
+text\<open>Verification condition:\<close>
 
 fun vc :: "acom \<Rightarrow> assn \<Rightarrow> bool" where
 "vc SKIP Q = True" |
--- a/src/HOL/IMP/VCG_Total_EX2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/VCG_Total_EX2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -12,8 +12,8 @@
 with logical variables and proves soundness and completeness.
 \<close>
 
-text{* Annotated commands: commands where loops are annotated with
-invariants. *}
+text\<open>Annotated commands: commands where loops are annotated with
+invariants.\<close>
 
 datatype acom =
   Askip                  ("SKIP") |
@@ -25,7 +25,7 @@
 
 notation com.SKIP ("SKIP")
 
-text{* Strip annotations: *}
+text\<open>Strip annotations:\<close>
 
 fun strip :: "acom \<Rightarrow> com" where
 "strip SKIP = SKIP" |
@@ -34,7 +34,7 @@
 "strip (IF b THEN C\<^sub>1 ELSE C\<^sub>2) = (IF b THEN strip C\<^sub>1 ELSE strip C\<^sub>2)" |
 "strip ({_/_} WHILE b DO C) = (WHILE b DO strip C)"
 
-text{* Weakest precondition from annotated commands: *}
+text\<open>Weakest precondition from annotated commands:\<close>
 
 fun pre :: "acom \<Rightarrow> assn2 \<Rightarrow> assn2" where
 "pre SKIP Q = Q" |
@@ -44,7 +44,7 @@
   (\<lambda>l s. if bval b s then pre C\<^sub>1 Q l s else pre C\<^sub>2 Q l s)" |
 "pre ({I/x} WHILE b DO C) Q = (\<lambda>l s. EX n. I (l(x:=n)) s)"
 
-text{* Verification condition: *}
+text\<open>Verification condition:\<close>
 
 fun vc :: "acom \<Rightarrow> assn2 \<Rightarrow> bool" where
 "vc SKIP Q = True" |
@@ -71,7 +71,7 @@
 qed (auto intro: conseq Seq If simp: Skip Assign)
 
 
-text{* Completeness: *}
+text\<open>Completeness:\<close>
 
 lemma pre_mono:
   "\<forall>l s. P l s \<longrightarrow> P' l s \<Longrightarrow> pre C P l s \<Longrightarrow> pre C P' l s"
--- a/src/HOL/IMP/Vars.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/HOL/IMP/Vars.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -7,17 +7,17 @@
 
 subsection "The Variables in an Expression"
 
-text{* We need to collect the variables in both arithmetic and boolean
+text\<open>We need to collect the variables in both arithmetic and boolean
 expressions. For a change we do not introduce two functions, e.g.\ @{text
 avars} and @{text bvars}, but we overload the name @{text vars}
-via a \emph{type class}, a device that originated with Haskell: *}
+via a \emph{type class}, a device that originated with Haskell:\<close>
  
 class vars =
 fixes vars :: "'a \<Rightarrow> vname set"
 
-text{* This defines a type class ``vars'' with a single
+text\<open>This defines a type class ``vars'' with a single
 function of (coincidentally) the same name. Then we define two separated
-instances of the class, one for @{typ aexp} and one for @{typ bexp}: *}
+instances of the class, one for @{typ aexp} and one for @{typ bexp}:\<close>
 
 instantiation aexp :: vars
 begin