isabelle update_cartouches -c;
authorwenzelm
Fri, 12 Jan 2018 14:08:53 +0100
changeset 67406 23307fd33906
parent 67405 e9ab4ad7bd15
child 67407 dbaa38bd223a
isabelle update_cartouches -c;
src/Doc/Classes/Classes.thy
src/Doc/Functions/Functions.thy
src/Doc/How_to_Prove_it/How_to_Prove_it.thy
src/Doc/Logics_ZF/FOL_examples.thy
src/Doc/Logics_ZF/IFOL_examples.thy
src/Doc/Logics_ZF/If.thy
src/Doc/Logics_ZF/ZF_Isar.thy
src/Doc/Logics_ZF/ZF_examples.thy
src/Doc/Prog_Prove/Basics.thy
src/Doc/Prog_Prove/Bool_nat_list.thy
src/Doc/Prog_Prove/Isar.thy
src/Doc/Prog_Prove/LaTeXsugar.thy
src/Doc/Prog_Prove/Logic.thy
src/Doc/Prog_Prove/Types_and_funs.thy
src/Doc/Sugar/Sugar.thy
src/Doc/Tutorial/Advanced/Partial.thy
src/Doc/Tutorial/Advanced/WFrec.thy
src/Doc/Tutorial/Advanced/simp2.thy
src/Doc/Tutorial/CTL/Base.thy
src/Doc/Tutorial/CTL/CTL.thy
src/Doc/Tutorial/CTL/CTLind.thy
src/Doc/Tutorial/CTL/PDL.thy
src/Doc/Tutorial/CodeGen/CodeGen.thy
src/Doc/Tutorial/Datatype/ABexpr.thy
src/Doc/Tutorial/Datatype/Fundata.thy
src/Doc/Tutorial/Datatype/Nested.thy
src/Doc/Tutorial/Documents/Documents.thy
src/Doc/Tutorial/Fun/fun0.thy
src/Doc/Tutorial/Ifexpr/Ifexpr.thy
src/Doc/Tutorial/Inductive/AB.thy
src/Doc/Tutorial/Inductive/Advanced.thy
src/Doc/Tutorial/Inductive/Even.thy
src/Doc/Tutorial/Inductive/Mutual.thy
src/Doc/Tutorial/Inductive/Star.thy
src/Doc/Tutorial/Misc/AdvancedInd.thy
src/Doc/Tutorial/Misc/Itrev.thy
src/Doc/Tutorial/Misc/Option2.thy
src/Doc/Tutorial/Misc/Plus.thy
src/Doc/Tutorial/Misc/Tree.thy
src/Doc/Tutorial/Misc/Tree2.thy
src/Doc/Tutorial/Misc/appendix.thy
src/Doc/Tutorial/Misc/case_exprs.thy
src/Doc/Tutorial/Misc/fakenat.thy
src/Doc/Tutorial/Misc/natsum.thy
src/Doc/Tutorial/Misc/pairs2.thy
src/Doc/Tutorial/Misc/prime_def.thy
src/Doc/Tutorial/Misc/simp.thy
src/Doc/Tutorial/Misc/types.thy
src/Doc/Tutorial/Protocol/Event.thy
src/Doc/Tutorial/Protocol/Message.thy
src/Doc/Tutorial/Protocol/NS_Public.thy
src/Doc/Tutorial/Protocol/Public.thy
src/Doc/Tutorial/Recdef/Induction.thy
src/Doc/Tutorial/Recdef/Nested0.thy
src/Doc/Tutorial/Recdef/Nested1.thy
src/Doc/Tutorial/Recdef/Nested2.thy
src/Doc/Tutorial/Recdef/examples.thy
src/Doc/Tutorial/Recdef/simplification.thy
src/Doc/Tutorial/Recdef/termination.thy
src/Doc/Tutorial/Rules/Basic.thy
src/Doc/Tutorial/Rules/Blast.thy
src/Doc/Tutorial/Rules/Force.thy
src/Doc/Tutorial/Rules/Forward.thy
src/Doc/Tutorial/Rules/TPrimes.thy
src/Doc/Tutorial/Rules/Tacticals.thy
src/Doc/Tutorial/Rules/find2.thy
src/Doc/Tutorial/Sets/Examples.thy
src/Doc/Tutorial/Sets/Functions.thy
src/Doc/Tutorial/Sets/Recur.thy
src/Doc/Tutorial/Sets/Relations.thy
src/Doc/Tutorial/ToyList/ToyList.thy
src/Doc/Tutorial/ToyList/ToyList_Test.thy
src/Doc/Tutorial/Trie/Trie.thy
src/Doc/Tutorial/Types/Axioms.thy
src/Doc/Tutorial/Types/Numbers.thy
src/Doc/Tutorial/Types/Overloading.thy
src/Doc/Tutorial/Types/Pairs.thy
src/Doc/Tutorial/Types/Records.thy
src/Doc/Tutorial/Types/Typedefs.thy
src/HOL/Data_Structures/AA_Map.thy
src/HOL/Data_Structures/AA_Set.thy
src/HOL/Data_Structures/AList_Upd_Del.thy
src/HOL/Data_Structures/AVL_Map.thy
src/HOL/Data_Structures/AVL_Set.thy
src/HOL/Data_Structures/Brother12_Map.thy
src/HOL/Data_Structures/Brother12_Set.thy
src/HOL/Data_Structures/Cmp.thy
src/HOL/Data_Structures/Leftist_Heap.thy
src/HOL/Data_Structures/Less_False.thy
src/HOL/Data_Structures/List_Ins_Del.thy
src/HOL/Data_Structures/Map_by_Ordered.thy
src/HOL/Data_Structures/Set_by_Ordered.thy
src/HOL/Data_Structures/Sorted_Less.thy
src/HOL/Data_Structures/Tree234.thy
src/HOL/Data_Structures/Tree234_Set.thy
src/HOL/Data_Structures/Tree23_Set.thy
src/HOL/IMP/ACom.thy
src/HOL/IMP/AExp.thy
src/HOL/IMP/ASM.thy
src/HOL/IMP/Abs_Int0.thy
src/HOL/IMP/Abs_Int1.thy
src/HOL/IMP/Abs_Int1_const.thy
src/HOL/IMP/Abs_Int1_parity.thy
src/HOL/IMP/Abs_Int2.thy
src/HOL/IMP/Abs_Int2_ivl.thy
src/HOL/IMP/Abs_Int3.thy
src/HOL/IMP/Abs_Int_Tests.thy
src/HOL/IMP/Abs_Int_init.thy
src/HOL/IMP/Abs_State.thy
src/HOL/IMP/BExp.thy
src/HOL/IMP/Big_Step.thy
src/HOL/IMP/C_like.thy
src/HOL/IMP/Collecting.thy
src/HOL/IMP/Collecting1.thy
src/HOL/IMP/Collecting_Examples.thy
src/HOL/IMP/Compiler.thy
src/HOL/IMP/Compiler2.thy
src/HOL/IMP/Def_Init_Big.thy
src/HOL/IMP/Def_Init_Small.thy
src/HOL/IMP/Denotational.thy
src/HOL/IMP/Finite_Reachable.thy
src/HOL/IMP/Hoare.thy
src/HOL/IMP/Hoare_Examples.thy
src/HOL/IMP/Hoare_Total.thy
src/HOL/IMP/Hoare_Total_EX.thy
src/HOL/IMP/Hoare_Total_EX2.thy
src/HOL/IMP/Live.thy
src/HOL/IMP/Live_True.thy
src/HOL/IMP/OO.thy
src/HOL/IMP/Poly_Types.thy
src/HOL/IMP/Sec_Type_Expr.thy
src/HOL/IMP/Sec_Typing.thy
src/HOL/IMP/Sec_TypingT.thy
src/HOL/IMP/Sem_Equiv.thy
src/HOL/IMP/Small_Step.thy
src/HOL/IMP/Star.thy
src/HOL/IMP/Types.thy
src/HOL/IMP/VCG.thy
src/HOL/IMP/VCG_Total_EX.thy
src/HOL/IMP/VCG_Total_EX2.thy
src/HOL/IMP/Vars.thy
--- a/src/Doc/Classes/Classes.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Classes/Classes.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -558,12 +558,12 @@
 context %quote semigroup
 begin
 
-term %quote "x \<otimes> y" -- \<open>example 1\<close>
-term %quote "(x::nat) \<otimes> y" -- \<open>example 2\<close>
+term %quote "x \<otimes> y" \<comment> \<open>example 1\<close>
+term %quote "(x::nat) \<otimes> y" \<comment> \<open>example 2\<close>
 
 end  %quote
 
-term %quote "x \<otimes> y" -- \<open>example 3\<close>
+term %quote "x \<otimes> y" \<comment> \<open>example 3\<close>
 
 text \<open>
   \<^noindent> Here in example 1, the term refers to the local class
--- a/src/Doc/Functions/Functions.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Functions/Functions.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1095,11 +1095,11 @@
   let ?R = "measure (\<lambda>x. 101 - x)"
   show "wf ?R" ..
 
-  fix n :: nat assume "\<not> 100 < n" -- "Assumptions for both calls"
+  fix n :: nat assume "\<not> 100 < n" \<comment> "Assumptions for both calls"
 
-  thus "(n + 11, n) \<in> ?R" by simp -- "Inner call"
+  thus "(n + 11, n) \<in> ?R" by simp \<comment> "Inner call"
 
-  assume inner_trm: "f91_dom (n + 11)" -- "Outer call"
+  assume inner_trm: "f91_dom (n + 11)" \<comment> "Outer call"
   with f91_estimate have "n + 11 < f91 (n + 11) + 11" .
   with \<open>\<not> 100 < n\<close> show "(f91 (n + 11), n) \<in> ?R" by simp
 qed
--- a/src/Doc/How_to_Prove_it/How_to_Prove_it.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/How_to_Prove_it/How_to_Prove_it.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,7 +3,7 @@
 imports Complex_Main
 begin
 (*>*)
-text{*
+text\<open>
 \chapter{@{theory Main}}
 
 \section{Natural numbers}
@@ -34,12 +34,12 @@
 
 \noindent
 Example:
-*}
+\<close>
 
 lemma fixes x :: int shows "x ^ 3 = x * x * x"
 by (simp add: numeral_eq_Suc)
 
-text{* This is a typical situation: function ``@{text"^"}'' is defined
+text\<open>This is a typical situation: function ``@{text"^"}'' is defined
 by pattern matching on @{const Suc} but is applied to a numeral.
 
 Note: simplification with @{thm[source] numeral_eq_Suc} will convert all numerals.
@@ -80,7 +80,7 @@
 But what to do when proper multiplication is involved?
 At this point it can be helpful to simplify with the lemma list
 @{thm [source] algebra_simps}. Examples:
-*}
+\<close>
 
 lemma fixes x :: int
   shows "(x + y) * (y - z) = (y - z) * x + y * (y-z)"
@@ -90,7 +90,7 @@
   shows "(x + y) * (y - z) = (y - z) * x + y * (y-z)"
 by(simp add: algebra_simps)
 
-text{*
+text\<open>
 Rewriting with @{thm[source] algebra_simps} has the following effect:
 terms are rewritten into a normal form by multiplying out,
 rearranging sums and products into some canonical order.
@@ -101,33 +101,33 @@
 and @{class comm_ring}) this yields a decision procedure for equality.
 
 Additional function and predicate symbols are not a problem either:
-*}
+\<close>
 
 lemma fixes f :: "int \<Rightarrow> int" shows "2 * f(x*y) - f(y*x) < f(y*x) + 1"
 by(simp add: algebra_simps)
 
-text{* Here @{thm[source]algebra_simps} merely has the effect of rewriting
+text\<open>Here @{thm[source]algebra_simps} merely has the effect of rewriting
 @{term"y*x"} to @{term"x*y"} (or the other way around). This yields
 a problem of the form @{prop"2*t - t < t + (1::int)"} and we are back in the
 realm of linear arithmetic.
 
 Because @{thm[source]algebra_simps} multiplies out, terms can explode.
 If one merely wants to bring sums or products into a canonical order
-it suffices to rewrite with @{thm [source] ac_simps}: *}
+it suffices to rewrite with @{thm [source] ac_simps}:\<close>
 
 lemma fixes f :: "int \<Rightarrow> int" shows "f(x*y*z) - f(z*x*y) = 0"
 by(simp add: ac_simps)
 
-text{* The lemmas @{thm[source]algebra_simps} take care of addition, subtraction
+text\<open>The lemmas @{thm[source]algebra_simps} take care of addition, subtraction
 and multiplication (algebraic structures up to rings) but ignore division (fields).
 The lemmas @{thm[source]field_simps} also deal with division:
-*}
+\<close>
 
 lemma fixes x :: real shows "x+z \<noteq> 0 \<Longrightarrow> 1 + y/(x+z) = (x+y+z)/(x+z)"
 by(simp add: field_simps)
 
-text{* Warning: @{thm[source]field_simps} can blow up your terms
-beyond recognition. *}
+text\<open>Warning: @{thm[source]field_simps} can blow up your terms
+beyond recognition.\<close>
 
 (*<*)
 end
--- a/src/Doc/Logics_ZF/FOL_examples.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Logics_ZF/FOL_examples.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,30 +1,30 @@
-section{*Examples of Classical Reasoning*}
+section\<open>Examples of Classical Reasoning\<close>
 
 theory FOL_examples imports FOL begin
 
 lemma "EX y. ALL x. P(y)-->P(x)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule exCI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule allI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule allE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-txt{*see below for @{text allI} combined with @{text swap}*}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+txt\<open>see below for @{text allI} combined with @{text swap}\<close>
 apply (erule allI [THEN [2] swap])
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule notE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption
 done
 
-text {*
+text \<open>
 @{thm[display] allI [THEN [2] swap]}
-*}
+\<close>
 
 lemma "EX y. ALL x. P(y)-->P(x)"
 by blast
--- a/src/Doc/Logics_ZF/IFOL_examples.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Logics_ZF/IFOL_examples.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,56 +1,56 @@
-section{*Examples of Intuitionistic Reasoning*}
+section\<open>Examples of Intuitionistic Reasoning\<close>
 
 theory IFOL_examples imports IFOL begin
 
-text{*Quantifier example from the book Logic and Computation*}
+text\<open>Quantifier example from the book Logic and Computation\<close>
 lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule allI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule exI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule exE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule allE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-txt{*Now @{text "apply assumption"} fails*}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+txt\<open>Now @{text "apply assumption"} fails\<close>
 oops
 
-text{*Trying again, with the same first two steps*}
+text\<open>Trying again, with the same first two steps\<close>
 lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule allI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule exE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule exI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule allE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 done
 
 lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
-by (tactic {*IntPr.fast_tac @{context} 1*})
+by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*Example of Dyckhoff's method*}
+text\<open>Example of Dyckhoff's method\<close>
 lemma "~ ~ ((P-->Q) | (Q-->P))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (unfold not_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule impI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule disj_impE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule imp_impE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
  apply (erule imp_impE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
 apply (erule FalseE)+
 done
--- a/src/Doc/Logics_ZF/If.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Logics_ZF/If.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -12,35 +12,35 @@
 
 lemma ifI:
     "[| P ==> Q; ~P ==> R |] ==> if(P,Q,R)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (simp add: if_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply blast
 done
 
 lemma ifE:
    "[| if(P,Q,R);  [| P; Q |] ==> S; [| ~P; R |] ==> S |] ==> S"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (simp add: if_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply blast
 done
 
 lemma if_commute: "if(P, if(Q,A,B), if(Q,C,D)) <-> if(Q, if(P,A,C), if(P,B,D))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule iffI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule ifE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule ifE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule ifI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule ifI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 oops
 
-text{*Trying again from the beginning in order to use @{text blast}*}
+text\<open>Trying again from the beginning in order to use @{text blast}\<close>
 declare ifI [intro!]
 declare ifE [elim!]
 
@@ -49,34 +49,34 @@
 
 
 lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 by blast
 
-text{*Trying again from the beginning in order to prove from the definitions*}
+text\<open>Trying again from the beginning in order to prove from the definitions\<close>
 lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (simp add: if_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply blast
 done
 
 
-text{*An invalid formula.  High-level rules permit a simpler diagnosis*}
+text\<open>An invalid formula.  High-level rules permit a simpler diagnosis\<close>
 lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply auto
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 (*The next step will fail unless subgoals remain*)
 apply (tactic all_tac)
 oops
 
-text{*Trying again from the beginning in order to prove from the definitions*}
+text\<open>Trying again from the beginning in order to prove from the definitions\<close>
 lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (simp add: if_def)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (auto) 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 (*The next step will fail unless subgoals remain*)
 apply (tactic all_tac)
 oops
--- a/src/Doc/Logics_ZF/ZF_Isar.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Logics_ZF/ZF_Isar.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -6,11 +6,11 @@
 ML_file "../antiquote_setup.ML"
 (*>*)
 
-chapter {* Some Isar language elements *}
+chapter \<open>Some Isar language elements\<close>
 
-section {* Type checking *}
+section \<open>Type checking\<close>
 
-text {*
+text \<open>
   The ZF logic is essentially untyped, so the concept of ``type
   checking'' is performed as logical reasoning about set-membership
   statements.  A special method assists users in this task; a version
@@ -39,14 +39,14 @@
   the context.
 
   \end{description}
-*}
+\<close>
 
 
-section {* (Co)Inductive sets and datatypes *}
+section \<open>(Co)Inductive sets and datatypes\<close>
 
-subsection {* Set definitions *}
+subsection \<open>Set definitions\<close>
 
-text {*
+text \<open>
   In ZF everything is a set.  The generic inductive package also
   provides a specific view for ``datatype'' specifications.
   Coinductive definitions are available in both cases, too.
@@ -97,12 +97,12 @@
   See @{cite "isabelle-ZF"} for further information on inductive
   definitions in ZF, but note that this covers the old-style theory
   format.
-*}
+\<close>
 
 
-subsection {* Primitive recursive functions *}
+subsection \<open>Primitive recursive functions\<close>
 
-text {*
+text \<open>
   \begin{matharray}{rcl}
     @{command_def (ZF) "primrec"} & : & @{text "theory \<rightarrow> theory"} \\
   \end{matharray}
@@ -110,12 +110,12 @@
   @{rail \<open>
     @@{command (ZF) primrec} (@{syntax thmdecl}? @{syntax prop} +)
   \<close>}
-*}
+\<close>
 
 
-subsection {* Cases and induction: emulating tactic scripts *}
+subsection \<open>Cases and induction: emulating tactic scripts\<close>
 
-text {*
+text \<open>
   The following important tactical tools of Isabelle/ZF have been
   ported to Isar.  These should not be used in proper proof texts.
 
@@ -133,6 +133,6 @@
     ;
     @@{command (ZF) inductive_cases} (@{syntax thmdecl}? (@{syntax prop} +) + @'and')
   \<close>}
-*}
+\<close>
 
 end
--- a/src/Doc/Logics_ZF/ZF_examples.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Logics_ZF/ZF_examples.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,8 +1,8 @@
-section{*Examples of Reasoning in ZF Set Theory*}
+section\<open>Examples of Reasoning in ZF Set Theory\<close>
 
 theory ZF_examples imports ZFC begin
 
-subsection {* Binary Trees *}
+subsection \<open>Binary Trees\<close>
 
 consts
   bt :: "i => i"
@@ -12,11 +12,11 @@
 
 declare bt.intros [simp]
 
-text{*Induction via tactic emulation*}
+text\<open>Induction via tactic emulation\<close>
 lemma Br_neq_left [rule_format]: "l \<in> bt(A) ==> \<forall>x r. Br(x, l, r) \<noteq> l"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   apply (induct_tac l)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   apply auto
   done
 
@@ -25,26 +25,26 @@
   apply (tactic {*exhaust_tac "l" 1*})
 *)
 
-text{*The new induction method, which I don't understand*}
+text\<open>The new induction method, which I don't understand\<close>
 lemma Br_neq_left': "l \<in> bt(A) ==> (!!x r. Br(x, l, r) \<noteq> l)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   apply (induct set: bt)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   apply auto
   done
 
 lemma Br_iff: "Br(a,l,r) = Br(a',l',r') <-> a=a' & l=l' & r=r'"
-  -- "Proving a freeness theorem."
+  \<comment> "Proving a freeness theorem."
   by (blast elim!: bt.free_elims)
 
 inductive_cases Br_in_bt: "Br(a,l,r) \<in> bt(A)"
-  -- "An elimination rule, for type-checking."
+  \<comment> "An elimination rule, for type-checking."
 
-text {*
+text \<open>
 @{thm[display] Br_in_bt[no_vars]}
-*}
+\<close>
 
-subsection{*Primitive recursion*}
+subsection\<open>Primitive recursion\<close>
 
 consts  n_nodes :: "i => i"
 primrec
@@ -71,7 +71,7 @@
  by (simp add: n_nodes_tail_def n_nodes_aux_eq) 
 
 
-subsection {*Inductive definitions*}
+subsection \<open>Inductive definitions\<close>
 
 consts  Fin       :: "i=>i"
 inductive
@@ -114,7 +114,7 @@
   type_intros  llist.intros
 
 
-subsection{*Powerset example*}
+subsection\<open>Powerset example\<close>
 
 lemma Pow_mono: "A\<subseteq>B  ==>  Pow(A) \<subseteq> Pow(B)"
 apply (rule subsetI)
@@ -124,78 +124,78 @@
 done
 
 lemma "Pow(A Int B) = Pow(A) Int Pow(B)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule equalityI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule Int_greatest)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule Int_lower1 [THEN Pow_mono])
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule Int_lower2 [THEN Pow_mono])
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule subsetI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule IntE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule PowI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (drule PowD)+
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule Int_greatest)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (assumption+)
 done
 
-text{*Trying again from the beginning in order to use @{text blast}*}
+text\<open>Trying again from the beginning in order to use @{text blast}\<close>
 lemma "Pow(A Int B) = Pow(A) Int Pow(B)"
 by blast
 
 
 lemma "C\<subseteq>D ==> Union(C) \<subseteq> Union(D)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule subsetI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule UnionE)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule UnionI)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule subsetD)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
 done
 
-text{*A more abstract version of the same proof*}
+text\<open>A more abstract version of the same proof\<close>
 
 lemma "C\<subseteq>D ==> Union(C) \<subseteq> Union(D)"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule Union_least)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule Union_upper)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule subsetD, assumption)
 done
 
 
 lemma "[| a \<in> A;  f \<in> A->B;  g \<in> C->D;  A \<inter> C = 0 |] ==> (f \<union> g)`a = f`a"
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule apply_equality)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule UnI1)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule apply_Pair)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule fun_disjoint_Un)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption 
 done
 
--- a/src/Doc/Prog_Prove/Basics.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Prog_Prove/Basics.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,7 +3,7 @@
 imports Main
 begin
 (*>*)
-text{*
+text\<open>
 This chapter introduces HOL as a functional programming language and shows
 how to prove properties of functional programs by induction.
 
@@ -149,7 +149,7 @@
 to see the proof state in the output window.
 \end{warn}
 \fi
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Prog_Prove/Bool_nat_list.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Prog_Prove/Bool_nat_list.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,7 +4,7 @@
 begin
 (*>*)
 
-text{*
+text\<open>
 \vspace{-4ex}
 \section{\texorpdfstring{Types @{typ bool}, @{typ nat} and @{text list}}{Types bool, nat and list}}
 
@@ -19,13 +19,13 @@
 with the two values \indexed{@{const True}}{True} and \indexed{@{const False}}{False} and
 with many predefined functions:  @{text "\<not>"}, @{text "\<and>"}, @{text "\<or>"}, @{text
 "\<longrightarrow>"}, etc. Here is how conjunction could be defined by pattern matching:
-*}
+\<close>
 
 fun conj :: "bool \<Rightarrow> bool \<Rightarrow> bool" where
 "conj True True = True" |
 "conj _ _ = False"
 
-text{* Both the datatype and function definitions roughly follow the syntax
+text\<open>Both the datatype and function definitions roughly follow the syntax
 of functional programming languages.
 
 \subsection{Type \indexed{@{typ nat}}{nat}}
@@ -37,13 +37,13 @@
 @{text 0}, @{term"Suc 0"}, @{term"Suc(Suc 0)"}, etc.
 There are many predefined functions: @{text "+"}, @{text "*"}, @{text
 "\<le>"}, etc. Here is how you could define your own addition:
-*}
+\<close>
 
 fun add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "add 0 n = n" |
 "add (Suc m) n = Suc(add m n)"
 
-text{* And here is a proof of the fact that @{prop"add m 0 = m"}: *}
+text\<open>And here is a proof of the fact that @{prop"add m 0 = m"}:\<close>
 
 lemma add_02: "add m 0 = m"
 apply(induction m)
@@ -53,7 +53,7 @@
 lemma "add m 0 = m"
 apply(induction m)
 (*>*)
-txt{* The \isacom{lemma} command starts the proof and gives the lemma
+txt\<open>The \isacom{lemma} command starts the proof and gives the lemma
 a name, @{text add_02}. Properties of recursively defined functions
 need to be established by induction in most cases.
 Command \isacom{apply}@{text"(induction m)"} instructs Isabelle to
@@ -75,11 +75,11 @@
 the induction hypothesis.
 As a result of that final \isacom{done}, Isabelle associates the lemma
 just proved with its name. You can now inspect the lemma with the command
-*}
+\<close>
 
 thm add_02
 
-txt{* which displays @{thm[show_question_marks,display] add_02} The free
+txt\<open>which displays @{thm[show_question_marks,display] add_02} The free
 variable @{text m} has been replaced by the \concept{unknown}
 @{text"?m"}. There is no logical difference between the two but there is an
 operational one: unknowns can be instantiated, which is what you want after
@@ -153,7 +153,7 @@
 
 Although lists are already predefined, we define our own copy for
 demonstration purposes:
-*}
+\<close>
 (*<*)
 apply(auto)
 done 
@@ -164,7 +164,7 @@
 for map: map
 (*>*)
 
-text{*
+text\<open>
 \begin{itemize}
 \item Type @{typ "'a list"} is the type of lists over elements of type @{typ 'a}. Because @{typ 'a} is a type variable, lists are in fact \concept{polymorphic}: the elements of a list can be of arbitrary type (but must all be of the same type).
 \item Lists have two constructors: @{const Nil}, the empty list, and @{const Cons}, which puts an element (of type @{typ 'a}) in front of a list (of type @{typ "'a list"}).
@@ -175,7 +175,7 @@
 types of a constructor needs to be enclosed in quotation marks, unless
 it is just an identifier (e.g., @{typ nat} or @{typ 'a}).
 \end{itemize}
-We also define two standard functions, append and reverse: *}
+We also define two standard functions, append and reverse:\<close>
 
 fun app :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where
 "app Nil ys = ys" |
@@ -185,18 +185,18 @@
 "rev Nil = Nil" |
 "rev (Cons x xs) = app (rev xs) (Cons x Nil)"
 
-text{* By default, variables @{text xs}, @{text ys} and @{text zs} are of
+text\<open>By default, variables @{text xs}, @{text ys} and @{text zs} are of
 @{text list} type.
 
-Command \indexed{\isacommand{value}}{value} evaluates a term. For example, *}
+Command \indexed{\isacommand{value}}{value} evaluates a term. For example,\<close>
 
 value "rev(Cons True (Cons False Nil))"
 
-text{* yields the result @{value "rev(Cons True (Cons False Nil))"}. This works symbolically, too: *}
+text\<open>yields the result @{value "rev(Cons True (Cons False Nil))"}. This works symbolically, too:\<close>
 
 value "rev(Cons a (Cons b Nil))"
 
-text{* yields @{value "rev(Cons a (Cons b Nil))"}.
+text\<open>yields @{value "rev(Cons a (Cons b Nil))"}.
 \medskip
 
 Figure~\ref{fig:MyList} shows the theory created so far.
@@ -238,28 +238,28 @@
 We will now demonstrate the typical proof process, which involves
 the formulation and proof of auxiliary lemmas.
 Our goal is to show that reversing a list twice produces the original
-list. *}
+list.\<close>
 
 theorem rev_rev [simp]: "rev(rev xs) = xs"
 
-txt{* Commands \isacom{theorem} and \isacom{lemma} are
+txt\<open>Commands \isacom{theorem} and \isacom{lemma} are
 interchangeable and merely indicate the importance we attach to a
 proposition. Via the bracketed attribute @{text simp} we also tell Isabelle
 to make the eventual theorem a \conceptnoidx{simplification rule}: future proofs
 involving simplification will replace occurrences of @{term"rev(rev xs)"} by
-@{term"xs"}. The proof is by induction: *}
+@{term"xs"}. The proof is by induction:\<close>
 
 apply(induction xs)
 
-txt{*
+txt\<open>
 As explained above, we obtain two subgoals, namely the base case (@{const Nil}) and the induction step (@{const Cons}):
 @{subgoals[display,indent=0,margin=65]}
 Let us try to solve both goals automatically:
-*}
+\<close>
 
 apply(auto)
 
-txt{*Subgoal~1 is proved, and disappears; the simplified version
+txt\<open>Subgoal~1 is proved, and disappears; the simplified version
 of subgoal~2 becomes the new subgoal~1:
 @{subgoals[display,indent=0,margin=70]}
 In order to simplify this subgoal further, a lemma suggests itself.
@@ -267,22 +267,22 @@
 \subsubsection{A First Lemma}
 
 We insert the following lemma in front of the main theorem:
-*}
+\<close>
 (*<*)
 oops
 (*>*)
 lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
 
-txt{* There are two variables that we could induct on: @{text xs} and
+txt\<open>There are two variables that we could induct on: @{text xs} and
 @{text ys}. Because @{const app} is defined by recursion on
 the first argument, @{text xs} is the correct one:
-*}
+\<close>
 
 apply(induction xs)
 
-txt{* This time not even the base case is solved automatically: *}
+txt\<open>This time not even the base case is solved automatically:\<close>
 apply(auto)
-txt{*
+txt\<open>
 \vspace{-5ex}
 @{subgoals[display,goals_limit=1]}
 Again, we need to abandon this proof attempt and prove another simple lemma
@@ -291,7 +291,7 @@
 \subsubsection{A Second Lemma}
 
 We again try the canonical proof procedure:
-*}
+\<close>
 (*<*)
 oops
 (*>*)
@@ -300,16 +300,16 @@
 apply(auto)
 done
 
-text{*
+text\<open>
 Thankfully, this worked.
 Now we can continue with our stuck proof attempt of the first lemma:
-*}
+\<close>
 
 lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
 apply(induction xs)
 apply(auto)
 
-txt{*
+txt\<open>
 We find that this time @{text"auto"} solves the base case, but the
 induction step merely simplifies to
 @{subgoals[display,indent=0,goals_limit=1]}
@@ -319,7 +319,7 @@
 \subsubsection{Associativity of @{const app}}
 
 The canonical proof procedure succeeds without further ado:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma app_assoc [simp]: "app (app xs ys) zs = app xs (app ys zs)"
 apply(induction xs)
@@ -336,7 +336,7 @@
 apply(auto)
 done
 (*>*)
-text{*
+text\<open>
 Finally the proofs of @{thm[source] rev_app} and @{thm[source] rev_rev}
 succeed, too.
 
@@ -457,7 +457,7 @@
 \mbox{@{text"sum_upto n"}} @{text"="} @{text"0 + ... + n"} and prove
 @{prop" sum_upto (n::nat) = n * (n+1) div 2"}.
 \end{exercise}
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Prog_Prove/Isar.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Prog_Prove/Isar.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,7 +4,7 @@
 begin
 declare [[quick_and_dirty]]
 (*>*)
-text{*
+text\<open>
 Apply-scripts are unreadable and hard to maintain. The language of choice
 for larger proofs is \concept{Isar}. The two key features of Isar are:
 \begin{itemize}
@@ -14,7 +14,7 @@
 \end{itemize}
 Whereas apply-scripts are like assembly language programs, Isar proofs
 are like structured programs with comments. A typical Isar proof looks like this:
-*}text{*
+\<close>text\<open>
 \begin{tabular}{@ {}l}
 \isacom{proof}\\
 \quad\isacom{assume} @{text"\""}$\mathit{formula}_0$@{text"\""}\\
@@ -24,7 +24,7 @@
 \quad\isacom{show} @{text"\""}$\mathit{formula}_{n+1}$@{text"\""} \quad\isacom{by} @{text \<dots>}\\
 \isacom{qed}
 \end{tabular}
-*}text{*
+\<close>text\<open>
 It proves $\mathit{formula}_0 \Longrightarrow \mathit{formula}_{n+1}$
 (provided each proof step succeeds).
 The intermediate \isacom{have} statements are merely stepping stones
@@ -89,7 +89,7 @@
 We show a number of proofs of Cantor's theorem that a function from a set to
 its powerset cannot be surjective, illustrating various features of Isar. The
 constant @{const surj} is predefined.
-*}
+\<close>
 
 lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
 proof
@@ -99,7 +99,7 @@
   from 2 show "False" by blast
 qed
 
-text{*
+text\<open>
 The \isacom{proof} command lacks an explicit method by which to perform
 the proof. In such cases Isabelle tries to use some standard introduction
 rule, in the above case for @{text"\<not>"}:
@@ -125,7 +125,7 @@
 in a UNIX pipe. In such cases the predefined name @{text this} can be used
 to refer to the proposition proved in the previous step. This allows us to
 eliminate all labels from our proof (we suppress the \isacom{lemma} statement):
-*}
+\<close>
 (*<*)
 lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
 (*>*)
@@ -135,7 +135,7 @@
   from this show "False" by blast
 qed
 
-text{* We have also taken the opportunity to compress the two \isacom{have}
+text\<open>We have also taken the opportunity to compress the two \isacom{have}
 steps into one.
 
 To compact the text further, Isar has a few convenient abbreviations:
@@ -150,7 +150,7 @@
 
 \noindent
 With the help of these abbreviations the proof becomes
-*}
+\<close>
 (*<*)
 lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
 (*>*)
@@ -159,7 +159,7 @@
   hence "\<exists>a. {x. x \<notin> f x} = f a" by(auto simp: surj_def)
   thus "False" by blast
 qed
-text{*
+text\<open>
 
 There are two further linguistic variations:
 \medskip
@@ -180,14 +180,14 @@
 Lemmas can also be stated in a more structured fashion. To demonstrate this
 feature with Cantor's theorem, we rephrase \noquotes{@{prop[source]"\<not> surj f"}}
 a little:
-*}
+\<close>
 
 lemma
   fixes f :: "'a \<Rightarrow> 'a set"
   assumes s: "surj f"
   shows "False"
 
-txt{* The optional \isacom{fixes} part allows you to state the types of
+txt\<open>The optional \isacom{fixes} part allows you to state the types of
 variables up front rather than by decorating one of their occurrences in the
 formula with a type constraint. The key advantage of the structured format is
 the \isacom{assumes} part that allows you to name each assumption; multiple
@@ -195,7 +195,7 @@
 \isacom{shows} part gives the goal. The actual theorem that will come out of
 the proof is \noquotes{@{prop[source]"surj f \<Longrightarrow> False"}}, but during the proof the assumption
 \noquotes{@{prop[source]"surj f"}} is available under the name @{text s} like any other fact.
-*}
+\<close>
 
 proof -
   have "\<exists> a. {x. x \<notin> f x} = f a" using s
@@ -203,7 +203,7 @@
   thus "False" by blast
 qed
 
-text{*
+text\<open>
 \begin{warn}
 Note the hyphen after the \isacom{proof} command.
 It is the null method that does nothing to the goal. Leaving it out would be asking
@@ -235,42 +235,42 @@
 starting from a formula @{text P} we have the two cases @{text P} and
 @{prop"~P"}, and starting from a fact @{prop"P \<or> Q"}
 we have the two cases @{text P} and @{text Q}:
-*}text_raw{*
+\<close>text_raw\<open>
 \begin{tabular}{@ {}ll@ {}}
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "R" proof-(*>*)
 show "R"
 proof cases
   assume "P"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "R" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "R" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 next
   assume "\<not> P"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "R" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "R" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)oops(*>*)
-text_raw {* }
+text_raw \<open>}
 \end{minipage}\index{cases@@{text cases}}
 &
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "R" proof-(*>*)
-have "P \<or> Q" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+have "P \<or> Q" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 then show "R"
 proof
   assume "P"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "R" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "R" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 next
   assume "Q"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "R" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "R" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 \end{tabular}
 \medskip
@@ -278,19 +278,19 @@
 How to prove a logical equivalence:
 \end{isamarkuptext}%
 \isa{%
-*}
+\<close>
 (*<*)lemma "P\<longleftrightarrow>Q" proof-(*>*)
 show "P \<longleftrightarrow> Q"
 proof
   assume "P"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "Q" (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "Q" (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 next
   assume "Q"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "P" (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "P" (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)qed(*>*)
-text_raw {* }
+text_raw \<open>}
 \medskip
 \begin{isamarkuptext}%
 Proofs by contradiction (@{thm[source] ccontr} stands for ``classical contradiction''):
@@ -298,30 +298,30 @@
 \begin{tabular}{@ {}ll@ {}}
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "\<not> P" proof-(*>*)
 show "\<not> P"
 proof
   assume "P"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "False" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "False" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 &
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "P" proof-(*>*)
 show "P"
 proof (rule ccontr)
   assume "\<not>P"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "False" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "False" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 \end{tabular}
 \medskip
@@ -331,30 +331,30 @@
 \begin{tabular}{@ {}ll@ {}}
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "ALL x. P x" proof-(*>*)
 show "\<forall>x. P(x)"
 proof
   fix x
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "P(x)" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "P(x)" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 &
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "EX x. P(x)" proof-(*>*)
 show "\<exists>x. P(x)"
 proof
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "P(witness)" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "P(witness)" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed
 (*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 \end{tabular}
 \medskip
@@ -369,12 +369,12 @@
 
 How to reason forward from \noquotes{@{prop[source] "\<exists>x. P(x)"}}:
 \end{isamarkuptext}%
-*}
+\<close>
 (*<*)lemma True proof- assume 1: "EX x. P x"(*>*)
-have "\<exists>x. P(x)" (*<*)by(rule 1)(*>*)text_raw{*\ \isasymproof\\*}
+have "\<exists>x. P(x)" (*<*)by(rule 1)(*>*)text_raw\<open>\ \isasymproof\\\<close>
 then obtain x where p: "P(x)" by blast
 (*<*)oops(*>*)
-text{*
+text\<open>
 After the \indexed{\isacom{obtain}}{obtain} step, @{text x} (we could have chosen any name)
 is a fixed local
 variable, and @{text p} is the name of the fact
@@ -382,7 +382,7 @@
 This pattern works for one or more @{text x}.
 As an example of the \isacom{obtain} command, here is the proof of
 Cantor's theorem in more detail:
-*}
+\<close>
 
 lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
 proof
@@ -393,7 +393,7 @@
   thus "False" by blast
 qed
 
-text_raw{*
+text_raw\<open>
 \begin{isamarkuptext}%
 
 Finally, how to prove set equality and subset relationship:
@@ -401,31 +401,31 @@
 \begin{tabular}{@ {}ll@ {}}
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "A = (B::'a set)" proof-(*>*)
 show "A = B"
 proof
-  show "A \<subseteq> B" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  show "A \<subseteq> B" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 next
-  show "B \<subseteq> A" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  show "B \<subseteq> A" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)qed(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 &
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "A <= (B::'a set)" proof-(*>*)
 show "A \<subseteq> B"
 proof
   fix x
   assume "x \<in> A"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "x \<in> B" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "x \<in> B" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)qed(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 \end{tabular}
 \begin{isamarkuptext}%
@@ -522,34 +522,34 @@
 the pattern for later use. As an example, consider the proof pattern for
 @{text"\<longleftrightarrow>"}:
 \end{isamarkuptext}%
-*}
+\<close>
 (*<*)lemma "formula\<^sub>1 \<longleftrightarrow> formula\<^sub>2" proof-(*>*)
 show "formula\<^sub>1 \<longleftrightarrow> formula\<^sub>2" (is "?L \<longleftrightarrow> ?R")
 proof
   assume "?L"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "?R" (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "?R" (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 next
   assume "?R"
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show "?L" (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show "?L" (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)qed(*>*)
 
-text{* Instead of duplicating @{text"formula\<^sub>i"} in the text, we introduce
+text\<open>Instead of duplicating @{text"formula\<^sub>i"} in the text, we introduce
 the two abbreviations @{text"?L"} and @{text"?R"} by pattern matching.
 Pattern matching works wherever a formula is stated, in particular
 with \isacom{have} and \isacom{lemma}.
 
 The unknown \indexed{@{text"?thesis"}}{thesis} is implicitly matched against any goal stated by
-\isacom{lemma} or \isacom{show}. Here is a typical example: *}
+\isacom{lemma} or \isacom{show}. Here is a typical example:\<close>
 
 lemma "formula"
 proof -
-  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
-  show ?thesis (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
+  show ?thesis (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 qed
 
-text{* 
+text\<open>
 Unknowns can also be instantiated with \indexed{\isacom{let}}{let} commands
 \begin{quote}
 \isacom{let} @{text"?t"} = @{text"\""}\textit{some-big-term}@{text"\""}
@@ -588,37 +588,37 @@
 Sometimes one needs a number of facts to enable some deduction. Of course
 one can name these facts individually, as shown on the right,
 but one can also combine them with \isacom{moreover}, as shown on the left:
-*}text_raw{*
+\<close>text_raw\<open>
 \begin{tabular}{@ {}ll@ {}}
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "P" proof-(*>*)
-have "P\<^sub>1" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
-moreover have "P\<^sub>2" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+have "P\<^sub>1" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
+moreover have "P\<^sub>2" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 moreover
-text_raw{*\\$\vdots$\\\hspace{-1.4ex}*}(*<*)have "True" ..(*>*)
-moreover have "P\<^sub>n" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
-ultimately have "P"  (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+text_raw\<open>\\$\vdots$\\\hspace{-1.4ex}\<close>(*<*)have "True" ..(*>*)
+moreover have "P\<^sub>n" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
+ultimately have "P"  (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 (*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 &
 \qquad
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "P" proof-(*>*)
-have lab\<^sub>1: "P\<^sub>1" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
-have lab\<^sub>2: "P\<^sub>2" (*<*)sorry(*>*)text_raw{*\ \isasymproof*}
-text_raw{*\\$\vdots$\\\hspace{-1.4ex}*}
-have lab\<^sub>n: "P\<^sub>n" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
-from lab\<^sub>1 lab\<^sub>2 text_raw{*\ $\dots$\\*}
-have "P"  (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+have lab\<^sub>1: "P\<^sub>1" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
+have lab\<^sub>2: "P\<^sub>2" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\<close>
+text_raw\<open>\\$\vdots$\\\hspace{-1.4ex}\<close>
+have lab\<^sub>n: "P\<^sub>n" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
+from lab\<^sub>1 lab\<^sub>2 text_raw\<open>\ $\dots$\\\<close>
+have "P"  (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 (*<*)oops(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 \end{tabular}
 \begin{isamarkuptext}%
@@ -643,7 +643,7 @@
 As an example we prove a simple fact about divisibility on integers.
 The definition of @{text "dvd"} is @{thm dvd_def}.
 \end{isamarkuptext}%
-*}
+\<close>
 
 lemma fixes a b :: int assumes "b dvd (a+b)" shows "b dvd a"
 proof -
@@ -654,28 +654,28 @@
   then show ?thesis using assms by(auto simp add: dvd_def)
 qed
 
-text{*
+text\<open>
 
 \subsection*{Exercises}
 
 \exercise
 Give a readable, structured proof of the following lemma:
-*}
+\<close>
 lemma assumes T: "\<forall>x y. T x y \<or> T y x"
   and A: "\<forall>x y. A x y \<and> A y x \<longrightarrow> x = y"
   and TA: "\<forall>x y. T x y \<longrightarrow> A x y" and "A x y"
   shows "T x y"
 (*<*)oops(*>*)
-text{*
+text\<open>
 \endexercise
 
 \exercise
 Give a readable, structured proof of the following lemma:
-*}
+\<close>
 lemma "\<exists>ys zs. xs = ys @ zs \<and>
             (length ys = length zs \<or> length ys = length zs + 1)"
 (*<*)oops(*>*)
-text{*
+text\<open>
 Hint: There are predefined functions @{const_typ take} and @{const_typ drop}
 such that @{text"take k [x\<^sub>1,\<dots>] = [x\<^sub>1,\<dots>,x\<^sub>k]"} and
 @{text"drop k [x\<^sub>1,\<dots>] = [x\<^bsub>k+1\<^esub>,\<dots>]"}. Let sledgehammer find and apply
@@ -692,7 +692,7 @@
 which form some term takes: is it @{text 0} or of the form @{term"Suc n"},
 is it @{term"[]"} or of the form @{term"x#xs"}, etc. Here is a typical example
 proof by case analysis on the form of @{text xs}:
-*}
+\<close>
 
 lemma "length(tl xs) = length xs - 1"
 proof (cases xs)
@@ -703,7 +703,7 @@
   thus ?thesis by simp
 qed
 
-text{*\index{cases@@{text"cases"}|(}Function @{text tl} (''tail'') is defined by @{thm list.sel(2)} and
+text\<open>\index{cases@@{text"cases"}|(}Function @{text tl} (''tail'') is defined by @{thm list.sel(2)} and
 @{thm list.sel(3)}. Note that the result type of @{const length} is @{typ nat}
 and @{prop"0 - 1 = (0::nat)"}.
 
@@ -721,7 +721,7 @@
 but also gives the assumption @{text"\"t = C x\<^sub>1 \<dots> x\<^sub>n\""} a name: @{text C},
 like the constructor.
 Here is the \isacom{case} version of the proof above:
-*}
+\<close>
 (*<*)lemma "length(tl xs) = length xs - 1"(*>*)
 proof (cases xs)
   case Nil
@@ -731,7 +731,7 @@
   thus ?thesis by simp
 qed
 
-text{* Remember that @{text Nil} and @{text Cons} are the alphanumeric names
+text\<open>Remember that @{text Nil} and @{text Cons} are the alphanumeric names
 for @{text"[]"} and @{text"#"}. The names of the assumptions
 are not used because they are directly piped (via \isacom{thus})
 into the proof of the claim.
@@ -745,7 +745,7 @@
 the sum (@{text"\<Sum>"}) of the first @{text n} natural numbers
 (@{text"{0..n::nat}"}) is equal to \mbox{@{term"n*(n+1) div 2::nat"}}.
 Never mind the details, just focus on the pattern:
-*}
+\<close>
 
 lemma "\<Sum>{0..n::nat} = n*(n+1) div 2"
 proof (induction n)
@@ -755,10 +755,10 @@
   thus "\<Sum>{0..Suc n} = Suc n*(Suc n+1) div 2" by simp
 qed
 
-text{* Except for the rewrite steps, everything is explicitly given. This
+text\<open>Except for the rewrite steps, everything is explicitly given. This
 makes the proof easily readable, but the duplication means it is tedious to
 write and maintain. Here is how pattern
-matching can completely avoid any duplication: *}
+matching can completely avoid any duplication:\<close>
 
 lemma "\<Sum>{0..n::nat} = n*(n+1) div 2" (is "?P n")
 proof (induction n)
@@ -768,7 +768,7 @@
   thus "?P(Suc n)" by simp
 qed
 
-text{* The first line introduces an abbreviation @{text"?P n"} for the goal.
+text\<open>The first line introduces an abbreviation @{text"?P n"} for the goal.
 Pattern matching @{text"?P n"} with the goal instantiates @{text"?P"} to the
 function @{term"\<lambda>n. \<Sum>{0..n::nat} = n*(n+1) div 2"}.  Now the proposition to
 be proved in the base case can be written as @{text"?P 0"}, the induction
@@ -777,7 +777,7 @@
 
 Induction also provides the \isacom{case} idiom that abbreviates
 the \isacom{fix}-\isacom{assume} step. The above proof becomes
-*}
+\<close>
 (*<*)lemma "\<Sum>{0..n::nat} = n*(n+1) div 2"(*>*)
 proof (induction n)
   case 0
@@ -787,29 +787,29 @@
   thus ?case by simp
 qed
 
-text{*
+text\<open>
 The unknown @{text"?case"}\index{case?@@{text"?case"}|(} is set in each case to the required
 claim, i.e., @{text"?P 0"} and \mbox{@{text"?P(Suc n)"}} in the above proof,
 without requiring the user to define a @{text "?P"}. The general
 pattern for induction over @{typ nat} is shown on the left-hand side:
-*}text_raw{*
+\<close>text_raw\<open>
 \begin{tabular}{@ {}ll@ {}}
 \begin{minipage}[t]{.4\textwidth}
 \isa{%
-*}
+\<close>
 (*<*)lemma "P(n::nat)" proof -(*>*)
 show "P(n)"
 proof (induction n)
   case 0
-  text_raw{*\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}*}
-  show ?case (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}\<close>
+  show ?case (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 next
   case (Suc n)
-  text_raw{*\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}*}
-  show ?case (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}\<close>
+  show ?case (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)qed(*>*)
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 &
 \begin{minipage}[t]{.4\textwidth}
@@ -824,8 +824,8 @@
 \end{minipage}
 \end{tabular}
 \medskip
-*}
-text{*
+\<close>
+text\<open>
 On the right side you can see what the \isacom{case} command
 on the left stands for.
 
@@ -910,7 +910,7 @@
 
 Recall the inductive and recursive definitions of even numbers in
 \autoref{sec:inductive-defs}:
-*}
+\<close>
 
 inductive ev :: "nat \<Rightarrow> bool" where
 ev0: "ev 0" |
@@ -921,13 +921,13 @@
 "evn (Suc 0) = False" |
 "evn (Suc(Suc n)) = evn n"
 
-text{* We recast the proof of @{prop"ev n \<Longrightarrow> evn n"} in Isar. The
+text\<open>We recast the proof of @{prop"ev n \<Longrightarrow> evn n"} in Isar. The
 left column shows the actual proof text, the right column shows
-the implicit effect of the two \isacom{case} commands:*}text_raw{*
+the implicit effect of the two \isacom{case} commands:\<close>text_raw\<open>
 \begin{tabular}{@ {}l@ {\qquad}l@ {}}
 \begin{minipage}[t]{.5\textwidth}
 \isa{%
-*}
+\<close>
 
 lemma "ev n \<Longrightarrow> evn n"
 proof(induction rule: ev.induct)
@@ -941,7 +941,7 @@
   thus ?case by simp
 qed
 
-text_raw {* }
+text_raw \<open>}
 \end{minipage}
 &
 \begin{minipage}[t]{.5\textwidth}
@@ -957,8 +957,8 @@
 \end{minipage}
 \end{tabular}
 \medskip
-*}
-text{*
+\<close>
+text\<open>
 The proof resembles structural induction, but the induction rule is given
 explicitly and the names of the cases are the names of the rules in the
 inductive definition.
@@ -986,7 +986,7 @@
 case @{thm[source] evSS} is derived from a renamed version of
 rule @{thm[source] evSS}: @{text"ev m \<Longrightarrow> ev(Suc(Suc m))"}.
 Here is an example with a (contrived) intermediate step that refers to @{text m}:
-*}
+\<close>
 
 lemma "ev n \<Longrightarrow> evn n"
 proof(induction rule: ev.induct)
@@ -994,16 +994,16 @@
 next
   case (evSS m)
   have "evn(Suc(Suc m)) = evn m" by simp
-  thus ?case using `evn m` by blast
+  thus ?case using \<open>evn m\<close> by blast
 qed
 
-text{*
+text\<open>
 \indent
 In general, let @{text I} be a (for simplicity unary) inductively defined
 predicate and let the rules in the definition of @{text I}
 be called @{text "rule\<^sub>1"}, \dots, @{text "rule\<^sub>n"}. A proof by rule
 induction follows this pattern:\index{inductionrule@@{text"induction ... rule:"}}
-*}
+\<close>
 
 (*<*)
 inductive I where rule\<^sub>1: "I()" |  rule\<^sub>2: "I()" |  rule\<^sub>n: "I()"
@@ -1011,21 +1011,21 @@
 show "I x \<Longrightarrow> P x"
 proof(induction rule: I.induct)
   case rule\<^sub>1
-  text_raw{*\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
-  show ?case (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}\<close>
+  show ?case (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 next
-  text_raw{*\\[-.4ex]$\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
+  text_raw\<open>\\[-.4ex]$\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}\<close>
 (*<*)
   case rule\<^sub>2
   show ?case sorry
 (*>*)
 next
   case rule\<^sub>n
-  text_raw{*\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
-  show ?case (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
+  text_raw\<open>\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}\<close>
+  show ?case (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
 qed(*<*)qed(*>*)
 
-text{*
+text\<open>
 One can provide explicit variable names by writing
 \isacom{case}~@{text"(rule\<^sub>i x\<^sub>1 \<dots> x\<^sub>k)"}, thus renaming the first @{text k}
 free variables in rule @{text i} to @{text"x\<^sub>1 \<dots> x\<^sub>k"},
@@ -1071,7 +1071,7 @@
 A simple example is the proof that @{prop"ev n \<Longrightarrow> ev (n - 2)"}. We
 already went through the details informally in \autoref{sec:Logic:even}. This
 is the Isar proof:
-*}
+\<close>
 (*<*)
 notepad
 begin fix n
@@ -1087,7 +1087,7 @@
 end
 (*>*)
 
-text{* The key point here is that a case analysis over some inductively
+text\<open>The key point here is that a case analysis over some inductively
 defined predicate is triggered by piping the given fact
 (here: \isacom{from}~@{text this}) into a proof by @{text cases}.
 Let us examine the assumptions available in each case. In case @{text ev0}
@@ -1101,7 +1101,7 @@
 rule @{text evSS} can yield @{prop"ev(Suc 0)"} because @{text"Suc 0"} unifies
 neither with @{text 0} nor with @{term"Suc(Suc n)"}. Impossible cases do not
 have to be proved. Hence we can prove anything from @{prop"ev(Suc 0)"}:
-*}
+\<close>
 (*<*)
 notepad begin fix P
 (*>*)
@@ -1110,14 +1110,14 @@
 end
 (*>*)
 
-text{* That is, @{prop"ev(Suc 0)"} is simply not provable: *}
+text\<open>That is, @{prop"ev(Suc 0)"} is simply not provable:\<close>
 
 lemma "\<not> ev(Suc 0)"
 proof
   assume "ev(Suc 0)" then show False by cases
 qed
 
-text{* Normally not all cases will be impossible. As a simple exercise,
+text\<open>Normally not all cases will be impossible. As a simple exercise,
 prove that \mbox{@{prop"\<not> ev(Suc(Suc(Suc 0)))"}.}
 
 \subsection{Advanced Rule Induction}
@@ -1147,23 +1147,23 @@
 \isacom{proof}@{text"(induction \"r\" \"s\" \"t\" arbitrary: \<dots> rule: I.induct)"}\index{inductionrule@@{text"induction ... rule:"}}\index{arbitrary@@{text"arbitrary:"}}
 \end{isabelle}
 Like for rule inversion, cases that are impossible because of constructor clashes
-will not show up at all. Here is a concrete example: *}
+will not show up at all. Here is a concrete example:\<close>
 
 lemma "ev (Suc m) \<Longrightarrow> \<not> ev m"
 proof(induction "Suc m" arbitrary: m rule: ev.induct)
   fix n assume IH: "\<And>m. n = Suc m \<Longrightarrow> \<not> ev m"
   show "\<not> ev (Suc n)"
-  proof --"contradiction"
+  proof \<comment>"contradiction"
     assume "ev(Suc n)"
     thus False
-    proof cases --"rule inversion"
+    proof cases \<comment>"rule inversion"
       fix k assume "n = Suc k" "ev k"
       thus False using IH by auto
     qed
   qed
 qed
 
-text{*
+text\<open>
 Remarks:
 \begin{itemize}
 \item 
@@ -1200,12 +1200,12 @@
 
 \exercise
 Give a structured proof by rule inversion:
-*}
+\<close>
 
 lemma assumes a: "ev(Suc(Suc n))" shows "ev n"
 (*<*)oops(*>*)
 
-text{*
+text\<open>
 \endexercise
 
 \begin{exercise}
@@ -1236,7 +1236,7 @@
 @{const replicate} @{text"::"} @{typ"nat \<Rightarrow> 'a \<Rightarrow> 'a list"} is predefined
 and @{term"replicate n x"} yields the list @{text"[x, \<dots>, x]"} of length @{text n}.
 \end{exercise}
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Prog_Prove/LaTeXsugar.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Prog_Prove/LaTeXsugar.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -43,7 +43,7 @@
   "_asms" :: "prop \<Rightarrow> asms \<Rightarrow> asms" ("_ /\<^latex>\<open>{\\normalsize \\,\<close>and\<^latex>\<open>\\,}\<close>/ _")
   "_asm" :: "prop \<Rightarrow> asms" ("_")
 
-setup{*
+setup\<open>
   let
     fun pretty ctxt c =
       let val tc = Proof_Context.read_const {proper = true, strict = false} ctxt c
@@ -57,7 +57,7 @@
           Thy_Output.output ctxt
             (Thy_Output.maybe_pretty_source pretty ctxt src [arg]))
   end;
-*}
+\<close>
 
 end
 (*>*)
--- a/src/Doc/Prog_Prove/Logic.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Prog_Prove/Logic.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,7 +3,7 @@
 imports LaTeXsugar
 begin
 (*>*)
-text{*
+text\<open>
 \vspace{-5ex}
 \section{Formulas}
 
@@ -147,11 +147,11 @@
 
 \exercise
 Start from the data type of binary trees defined earlier:
-*}
+\<close>
 
 datatype 'a tree = Tip | Node "'a tree" 'a "'a tree"
 
-text{*
+text\<open>
 Define a function @{text "set ::"} @{typ "'a tree \<Rightarrow> 'a set"}
 that returns the elements in a tree and a function
 @{text "ord ::"} @{typ "int tree \<Rightarrow> bool"}
@@ -169,7 +169,7 @@
 So far we have only seen @{text simp} and \indexed{@{text auto}}{auto}: Both perform
 rewriting, both can also prove linear arithmetic facts (no multiplication),
 and @{text auto} is also able to prove simple logical or set-theoretic goals:
-*}
+\<close>
 
 lemma "\<forall>x. \<exists>y. x = y"
 by auto
@@ -177,7 +177,7 @@
 lemma "A \<subseteq> B \<inter> C \<Longrightarrow> A \<subseteq> B \<union> C"
 by auto
 
-text{* where
+text\<open>where
 \begin{quote}
 \isacom{by} \textit{proof-method}
 \end{quote}
@@ -200,13 +200,13 @@
 subgoal only, and it can be modified like @{text auto}, e.g.,
 with @{text "simp add"}. Here is a typical example of what @{text fastforce}
 can do:
-*}
+\<close>
 
 lemma "\<lbrakk> \<forall>xs \<in> A. \<exists>ys. xs = ys @ ys;  us \<in> A \<rbrakk>
    \<Longrightarrow> \<exists>n. length us = n+n"
 by fastforce
 
-text{* This lemma is out of reach for @{text auto} because of the
+text\<open>This lemma is out of reach for @{text auto} because of the
 quantifiers.  Even @{text fastforce} fails when the quantifier structure
 becomes more complicated. In a few cases, its slow version @{text force}
 succeeds where @{text fastforce} fails.
@@ -215,7 +215,7 @@
 following example, @{text T} and @{text A} are two binary predicates. It
 is shown that if @{text T} is total, @{text A} is antisymmetric and @{text T} is
 a subset of @{text A}, then @{text A} is a subset of @{text T}:
-*}
+\<close>
 
 lemma
   "\<lbrakk> \<forall>x y. T x y \<or> T y x;
@@ -224,7 +224,7 @@
    \<Longrightarrow> \<forall>x y. A x y \<longrightarrow> T x y"
 by blast
 
-text{*
+text\<open>
 We leave it to the reader to figure out why this lemma is true.
 Method @{text blast}
 \begin{itemize}
@@ -245,16 +245,16 @@
 queried over the internet. If successful, a proof command is generated and can
 be inserted into your proof.  The biggest win of \isacom{sledgehammer} is
 that it will take into account the whole lemma library and you do not need to
-feed in any lemma explicitly. For example,*}
+feed in any lemma explicitly. For example,\<close>
 
 lemma "\<lbrakk> xs @ ys = ys @ xs;  length xs = length ys \<rbrakk> \<Longrightarrow> xs = ys"
 
-txt{* cannot be solved by any of the standard proof methods, but
-\isacom{sledgehammer} finds the following proof: *}
+txt\<open>cannot be solved by any of the standard proof methods, but
+\isacom{sledgehammer} finds the following proof:\<close>
 
 by (metis append_eq_conv_conj)
 
-text{* We do not explain how the proof was found but what this command
+text\<open>We do not explain how the proof was found but what this command
 means. For a start, Isabelle does not trust external tools (and in particular
 not the translations from Isabelle's logic to those tools!)
 and insists on a proof that it can check. This is what \indexed{@{text metis}}{metis} does.
@@ -286,12 +286,12 @@
 because it does not involve multiplication, although multiplication with
 numbers, e.g., @{text"2*n"}, is allowed. Such formulas can be proved by
 \indexed{@{text arith}}{arith}:
-*}
+\<close>
 
 lemma "\<lbrakk> (a::nat) \<le> x + b; 2*x < c \<rbrakk> \<Longrightarrow> 2*a + 1 \<le> 2*b + c"
 by arith
 
-text{* In fact, @{text auto} and @{text simp} can prove many linear
+text\<open>In fact, @{text auto} and @{text simp} can prove many linear
 arithmetic formulas already, like the one above, by calling a weak but fast
 version of @{text arith}. Hence it is usually not necessary to invoke
 @{text arith} explicitly.
@@ -425,12 +425,12 @@
 @{thm[source] le_trans}, transitivity of @{text"\<le>"} on type @{typ nat},
 is not an introduction rule by default because of the disastrous effect
 on the search space, but can be useful in specific situations:
-*}
+\<close>
 
 lemma "\<lbrakk> (a::nat) \<le> b; b \<le> c; c \<le> d; d \<le> e \<rbrakk> \<Longrightarrow> a \<le> e"
 by(blast intro: le_trans)
 
-text{*
+text\<open>
 Of course this is just an example and could be proved by @{text arith}, too.
 
 \subsection{Forward Proof}
@@ -459,11 +459,11 @@
 by unifying and thus proving @{text "A\<^sub>i"} with @{text "r\<^sub>i"}, @{text"i = 1\<dots>m"}.
 Here is an example, where @{thm[source]refl} is the theorem
 @{thm[show_question_marks] refl}:
-*}
+\<close>
 
 thm conjI[OF refl[of "a"] refl[of "b"]]
 
-text{* yields the theorem @{thm conjI[OF refl[of "a"] refl[of "b"]]}.
+text\<open>yields the theorem @{thm conjI[OF refl[of "a"] refl[of "b"]]}.
 The command \isacom{thm} merely displays the result.
 
 Forward reasoning also makes sense in connection with proof states.
@@ -474,12 +474,12 @@
 allows proof search to reason forward with @{text r}, i.e.,
 to replace an assumption @{text A'}, where @{text A'} unifies with @{text A},
 with the correspondingly instantiated @{text B}. For example, @{thm[source,show_question_marks] Suc_leD} is the theorem \mbox{@{thm Suc_leD}}, which works well for forward reasoning:
-*}
+\<close>
 
 lemma "Suc(Suc(Suc a)) \<le> b \<Longrightarrow> a \<le> b"
 by(blast dest: Suc_leD)
 
-text{* In this particular example we could have backchained with
+text\<open>In this particular example we could have backchained with
 @{thm[source] Suc_leD}, too, but because the premise is more complicated than the conclusion this can easily lead to nontermination.
 
 %\subsection{Finding Theorems}
@@ -516,14 +516,14 @@
 The operative word ``inductive'' means that these are the only even numbers.
 In Isabelle we give the two rules the names @{text ev0} and @{text evSS}
 and write
-*}
+\<close>
 
 inductive ev :: "nat \<Rightarrow> bool" where
 ev0:    "ev 0" |
 evSS:  (*<*)"ev n \<Longrightarrow> ev (Suc(Suc n))"(*>*)
-text_raw{* @{prop[source]"ev n \<Longrightarrow> ev (n + 2)"} *}
+text_raw\<open>@{prop[source]"ev n \<Longrightarrow> ev (n + 2)"}\<close>
 
-text{* To get used to inductive definitions, we will first prove a few
+text\<open>To get used to inductive definitions, we will first prove a few
 properties of @{const ev} informally before we descend to the Isabelle level.
 
 How do we prove that some number is even, e.g., @{prop "ev 4"}? Simply by combining the defining rules for @{const ev}:
@@ -535,14 +535,14 @@
 
 Showing that all even numbers have some property is more complicated.  For
 example, let us prove that the inductive definition of even numbers agrees
-with the following recursive one:*}
+with the following recursive one:\<close>
 
 fun evn :: "nat \<Rightarrow> bool" where
 "evn 0 = True" |
 "evn (Suc 0) = False" |
 "evn (Suc(Suc n)) = evn n"
 
-text{* We prove @{prop"ev m \<Longrightarrow> evn m"}.  That is, we
+text\<open>We prove @{prop"ev m \<Longrightarrow> evn m"}.  That is, we
 assume @{prop"ev m"} and by induction on the form of its derivation
 prove @{prop"evn m"}. There are two cases corresponding to the two rules
 for @{const ev}:
@@ -606,60 +606,60 @@
 direction: @{text "evSS[OF evSS[OF ev0]]"} yields the theorem @{thm evSS[OF
 evSS[OF ev0]]}. Alternatively, you can also prove it as a lemma in backwards
 fashion. Although this is more verbose, it allows us to demonstrate how each
-rule application changes the proof state: *}
+rule application changes the proof state:\<close>
 
 lemma "ev(Suc(Suc(Suc(Suc 0))))"
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,goals_limit=1]}
-*}
+\<close>
 apply(rule evSS)
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,goals_limit=1]}
-*}
+\<close>
 apply(rule evSS)
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,goals_limit=1]}
-*}
+\<close>
 apply(rule ev0)
 done
 
-text{* \indent
+text\<open>\indent
 Rule induction is applied by giving the induction rule explicitly via the
-@{text"rule:"} modifier:\index{inductionrule@@{text"induction ... rule:"}}*}
+@{text"rule:"} modifier:\index{inductionrule@@{text"induction ... rule:"}}\<close>
 
 lemma "ev m \<Longrightarrow> evn m"
 apply(induction rule: ev.induct)
 by(simp_all)
 
-text{* Both cases are automatic. Note that if there are multiple assumptions
+text\<open>Both cases are automatic. Note that if there are multiple assumptions
 of the form @{prop"ev t"}, method @{text induction} will induct on the leftmost
 one.
 
 As a bonus, we also prove the remaining direction of the equivalence of
 @{const ev} and @{const evn}:
-*}
+\<close>
 
 lemma "evn n \<Longrightarrow> ev n"
 apply(induction n rule: evn.induct)
 
-txt{* This is a proof by computation induction on @{text n} (see
+txt\<open>This is a proof by computation induction on @{text n} (see
 \autoref{sec:recursive-funs}) that sets up three subgoals corresponding to
 the three equations for @{const evn}:
 @{subgoals[display,indent=0]}
 The first and third subgoals follow with @{thm[source]ev0} and @{thm[source]evSS}, and the second subgoal is trivially true because @{prop"evn(Suc 0)"} is @{const False}:
-*}
+\<close>
 
 by (simp_all add: ev0 evSS)
 
-text{* The rules for @{const ev} make perfect simplification and introduction
+text\<open>The rules for @{const ev} make perfect simplification and introduction
 rules because their premises are always smaller than the conclusion. It
 makes sense to turn them into simplification and introduction rules
 permanently, to enhance proof automation. They are named @{thm[source] ev.intros}
-\index{intros@@{text".intros"}} by Isabelle: *}
+\index{intros@@{text".intros"}} by Isabelle:\<close>
 
 declare ev.intros[simp,intro]
 
-text{* The rules of an inductive definition are not simplification rules by
+text\<open>The rules of an inductive definition are not simplification rules by
 default because, in contrast to recursive functions, there is no termination
 requirement for inductive definitions.
 
@@ -707,13 +707,13 @@
 r"}, because @{text"star r"} is meant to be the reflexive transitive closure.
 That is, @{prop"star r x y"} is meant to be true if from @{text x} we can
 reach @{text y} in finitely many @{text r} steps. This concept is naturally
-defined inductively: *}
+defined inductively:\<close>
 
 inductive star :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"  for r where
 refl:  "star r x x" |
 step:  "r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
 
-text{* The base case @{thm[source] refl} is reflexivity: @{term "x=y"}. The
+text\<open>The base case @{thm[source] refl} is reflexivity: @{term "x=y"}. The
 step case @{thm[source]step} combines an @{text r} step (from @{text x} to
 @{text y}) and a @{term"star r"} step (from @{text y} to @{text z}) into a
 @{term"star r"} step (from @{text x} to @{text z}).
@@ -723,7 +723,7 @@
 generates a simpler induction rule.
 
 By definition @{term"star r"} is reflexive. It is also transitive, but we
-need rule induction to prove that: *}
+need rule induction to prove that:\<close>
 
 lemma star_trans: "star r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
 apply(induction rule: star.induct)
@@ -732,15 +732,15 @@
 apply(rename_tac u x y)
 defer
 (*>*)
-txt{* The induction is over @{prop"star r x y"} (the first matching assumption)
+txt\<open>The induction is over @{prop"star r x y"} (the first matching assumption)
 and we try to prove \mbox{@{prop"star r y z \<Longrightarrow> star r x z"}},
 which we abbreviate by @{prop"P x y"}. These are our two subgoals:
 @{subgoals[display,indent=0]}
 The first one is @{prop"P x x"}, the result of case @{thm[source]refl},
 and it is trivial:\index{assumption@@{text assumption}}
-*}
+\<close>
 apply(assumption)
-txt{* Let us examine subgoal @{text 2}, case @{thm[source] step}.
+txt\<open>Let us examine subgoal @{text 2}, case @{thm[source] step}.
 Assumptions @{prop"r u x"} and \mbox{@{prop"star r x y"}}
 are the premises of rule @{thm[source]step}.
 Assumption @{prop"star r y z \<Longrightarrow> star r x z"} is \mbox{@{prop"P x y"}},
@@ -749,11 +749,11 @@
 The proof itself is straightforward: from \mbox{@{prop"star r y z"}} the IH
 leads to @{prop"star r x z"} which, together with @{prop"r u x"},
 leads to \mbox{@{prop"star r u z"}} via rule @{thm[source]step}:
-*}
+\<close>
 apply(metis step)
 done
 
-text{*\index{rule induction|)}
+text\<open>\index{rule induction|)}
 
 \subsection{The General Case}
 
@@ -804,13 +804,13 @@
 
 \exercise
 We could also have defined @{const star} as follows:
-*}
+\<close>
 
 inductive star' :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" for r where
 refl': "star' r x x" |
 step': "star' r x y \<Longrightarrow> r y z \<Longrightarrow> star' r x z"
 
-text{*
+text\<open>
 The single @{text r} step is performed after rather than before the @{text star'}
 steps. Prove @{prop "star' r x y \<Longrightarrow> star r x y"} and
 @{prop "star r x y \<Longrightarrow> star' r x y"}. You may need lemmas.
@@ -877,7 +877,7 @@
 some suitable value of @{text "?"}.
 \end{exercise}
 \fi
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Prog_Prove/Types_and_funs.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Prog_Prove/Types_and_funs.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,16 +3,16 @@
 imports Main
 begin
 (*>*)
-text{*
+text\<open>
 \vspace{-5ex}
 \section{Type and Function Definitions}
 
 Type synonyms are abbreviations for existing types, for example
-\index{string@@{text string}}*}
+\index{string@@{text string}}\<close>
 
 type_synonym string = "char list"
 
-text{*
+text\<open>
 Type synonyms are expanded after parsing and are not present in internal representation and output. They are mere conveniences for the reader.
 
 \subsection{Datatypes}
@@ -54,22 +54,22 @@
 Case expressions must be enclosed in parentheses.
 
 As an example of a datatype beyond @{typ nat} and @{text list}, consider binary trees:
-*}
+\<close>
 
 datatype 'a tree = Tip | Node  "'a tree"  'a  "'a tree"
 
-text{* with a mirror function: *}
+text\<open>with a mirror function:\<close>
 
 fun mirror :: "'a tree \<Rightarrow> 'a tree" where
 "mirror Tip = Tip" |
 "mirror (Node l a r) = Node (mirror r) a (mirror l)"
 
-text{* The following lemma illustrates induction: *}
+text\<open>The following lemma illustrates induction:\<close>
 
 lemma "mirror(mirror t) = t"
 apply(induction t)
 
-txt{* yields
+txt\<open>yields
 @{subgoals[display]}
 The induction step contains two induction hypotheses, one for each subtree.
 An application of @{text auto} finishes the proof.
@@ -81,7 +81,7 @@
 elements of @{typ 'a}, you wrap them up in @{const Some} and call
 the new type @{typ"'a option"}. A typical application is a lookup function
 on a list of key-value pairs, often called an association list:
-*}
+\<close>
 (*<*)
 apply auto
 done
@@ -90,7 +90,7 @@
 "lookup [] x = None" |
 "lookup ((a,b) # ps) x = (if a = x then Some b else lookup ps x)"
 
-text{*
+text\<open>
 Note that @{text"\<tau>\<^sub>1 * \<tau>\<^sub>2"} is the type of pairs, also written @{text"\<tau>\<^sub>1 \<times> \<tau>\<^sub>2"}.
 Pairs can be taken apart either by pattern matching (as above) or with the
 projection functions @{const fst} and @{const snd}: @{thm fst_conv[of x y]} and @{thm snd_conv[of x y]}.
@@ -101,23 +101,23 @@
 \subsection{Definitions}
 
 Non-recursive functions can be defined as in the following example:
-\index{definition@\isacom{definition}}*}
+\index{definition@\isacom{definition}}\<close>
 
 definition sq :: "nat \<Rightarrow> nat" where
 "sq n = n * n"
 
-text{* Such definitions do not allow pattern matching but only
+text\<open>Such definitions do not allow pattern matching but only
 @{text"f x\<^sub>1 \<dots> x\<^sub>n = t"}, where @{text f} does not occur in @{text t}.
 
 \subsection{Abbreviations}
 
 Abbreviations are similar to definitions:
-\index{abbreviation@\isacom{abbreviation}}*}
+\index{abbreviation@\isacom{abbreviation}}\<close>
 
 abbreviation sq' :: "nat \<Rightarrow> nat" where
 "sq' n \<equiv> n * n"
 
-text{* The key difference is that @{const sq'} is only syntactic sugar:
+text\<open>The key difference is that @{const sq'} is only syntactic sugar:
 after parsing, @{term"sq' t"} is replaced by \mbox{@{term"t*t"}};
 before printing, every occurrence of @{term"u*u"} is replaced by
 \mbox{@{term"sq' u"}}.  Internally, @{const sq'} does not exist.
@@ -153,14 +153,14 @@
 Functions defined with \isacom{fun} come with their own induction schema
 that mirrors the recursion schema and is derived from the termination
 order. For example,
-*}
+\<close>
 
 fun div2 :: "nat \<Rightarrow> nat" where
 "div2 0 = 0" |
 "div2 (Suc 0) = 0" |
 "div2 (Suc(Suc n)) = Suc(div2 n)"
 
-text{* does not just define @{const div2} but also proves a
+text\<open>does not just define @{const div2} but also proves a
 customized induction rule:
 \[
 \inferrule{
@@ -170,12 +170,12 @@
 {\mbox{@{thm (concl) div2.induct[of _ "m"]}}}
 \]
 This customized induction rule can simplify inductive proofs. For example,
-*}
+\<close>
 
 lemma "div2(n) = n div 2"
 apply(induction n rule: div2.induct)
 
-txt{* (where the infix @{text div} is the predefined division operation)
+txt\<open>(where the infix @{text div} is the predefined division operation)
 yields the subgoals
 @{subgoals[display,margin=65]}
 An application of @{text auto} finishes the proof.
@@ -260,7 +260,7 @@
 append is linear in its first argument.  A linear time version of
 @{const rev} requires an extra argument where the result is accumulated
 gradually, using only~@{text"#"}:
-*}
+\<close>
 (*<*)
 apply auto
 done
@@ -269,7 +269,7 @@
 "itrev []        ys = ys" |
 "itrev (x#xs) ys = itrev xs (x#ys)"
 
-text{* The behaviour of @{const itrev} is simple: it reverses
+text\<open>The behaviour of @{const itrev} is simple: it reverses
 its first argument by stacking its elements onto the second argument,
 and it returns that second argument when the first one becomes
 empty. Note that @{const itrev} is tail-recursive: it can be
@@ -277,17 +277,17 @@
 
 Naturally, we would like to show that @{const itrev} does indeed reverse
 its first argument provided the second one is empty:
-*}
+\<close>
 
 lemma "itrev xs [] = rev xs"
 
-txt{* There is no choice as to the induction variable:
-*}
+txt\<open>There is no choice as to the induction variable:
+\<close>
 
 apply(induction xs)
 apply(auto)
 
-txt{*
+txt\<open>
 Unfortunately, this attempt does not prove
 the induction step:
 @{subgoals[display,margin=70]}
@@ -299,11 +299,11 @@
 \end{quote}
 Of course one cannot do this naively: @{prop"itrev xs ys = rev xs"} is
 just not true.  The correct generalization is
-*}
+\<close>
 (*<*)oops(*>*)
 lemma "itrev xs ys = rev xs @ ys"
 (*<*)apply(induction xs, auto)(*>*)
-txt{*
+txt\<open>
 If @{text ys} is replaced by @{term"[]"}, the right-hand side simplifies to
 @{term"rev xs"}, as required.
 In this instance it was easy to guess the right generalization.
@@ -320,21 +320,21 @@
 @{term"a # ys"} instead of @{text ys}. Hence we prove the theorem
 for all @{text ys} instead of a fixed one. We can instruct induction
 to perform this generalization for us by adding @{text "arbitrary: ys"}\index{arbitrary@@{text"arbitrary:"}}.
-*}
+\<close>
 (*<*)oops
 lemma "itrev xs ys = rev xs @ ys"
 (*>*)
 apply(induction xs arbitrary: ys)
 
-txt{* The induction hypothesis in the induction step is now universally quantified over @{text ys}:
+txt\<open>The induction hypothesis in the induction step is now universally quantified over @{text ys}:
 @{subgoals[display,margin=65]}
 Thus the proof succeeds:
-*}
+\<close>
 
 apply auto
 done
 
-text{*
+text\<open>
 This leads to another heuristic for generalization:
 \begin{quote}
 \emph{Generalize induction by generalizing all free
@@ -547,7 +547,7 @@
 Define a function @{text "nodes :: tree0 \<Rightarrow> nat"} that counts the number of
 all nodes (inner nodes and leaves) in such a tree.
 Consider the following recursive function:
-*}
+\<close>
 (*<*)
 datatype tree0 = Tip | Node tree0 tree0
 (*>*)
@@ -555,7 +555,7 @@
 "explode 0 t = t" |
 "explode (Suc n) t = explode n (Node t t)"
 
-text {*
+text \<open>
 Find an equation expressing the size of a tree after exploding it
 (\noquotes{@{term [source] "nodes (explode n t)"}}) as a function
 of @{term "nodes t"} and @{text n}. Prove your equation.
@@ -569,11 +569,11 @@
 \exercise
 Define arithmetic expressions in one variable over integers (type @{typ int})
 as a data type:
-*}
+\<close>
 
 datatype exp = Var | Const int | Add exp exp | Mult exp exp
 
-text{*
+text\<open>
 Define a function \noquotes{@{term [source]"eval :: exp \<Rightarrow> int \<Rightarrow> int"}}
 such that @{term"eval e x"} evaluates @{text e} at the value
 @{text x}.
@@ -589,7 +589,7 @@
 \mbox{@{prop"evalp (coeffs e) x = eval e x"}.}
 Hint: consider the hint in Exercise~\ref{exe:tree0}.
 \endexercise
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Sugar/Sugar.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Sugar/Sugar.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -7,7 +7,7 @@
 no_translations
   ("prop") "P \<and> Q \<Longrightarrow> R" <= ("prop") "P \<Longrightarrow> Q \<Longrightarrow> R"
 (*>*)
-text{*
+text\<open>
 \section{Introduction}
 
 This document is for those Isabelle users who have mastered
@@ -142,13 +142,13 @@
 \end{quote}
 into the relevant \texttt{ROOT} file, just before the \texttt{theories} for that session.
 The rest of this document is produced with this flag set to \texttt{false}.
-*}
+\<close>
 
 (*<*)declare [[show_question_marks = false]](*>*)
 
-subsection {*Qualified names*}
+subsection \<open>Qualified names\<close>
 
-text{* If there are multiple declarations of the same name, Isabelle prints
+text\<open>If there are multiple declarations of the same name, Isabelle prints
 the qualified name, for example @{text "T.length"}, where @{text T} is the
 theory it is defined in, to distinguish it from the predefined @{const[source]
 "List.length"}. In case there is no danger of confusion, you can insist on
@@ -201,10 +201,10 @@
 \end{quote}
 
 Sometimes Isabelle $\eta$-contracts terms, for example in the following definition:
-*}
+\<close>
 fun eta where
 "eta (x \<cdot> xs) = (\<forall>y \<in> set xs. x < y)"
-text{*
+text\<open>
 \noindent
 If you now print the defining equation, the result is not what you hoped for:
 \begin{quote}
@@ -287,12 +287,12 @@
 When displaying theorems with the \texttt{display} option, for example as in
 \verb!@!\verb!{thm[display] refl}! @{thm[display] refl} the theorem is
 set in small font. It uses the \LaTeX-macro \verb!\isastyle!,
-which is also the style that regular theory text is set in, e.g. *}
+which is also the style that regular theory text is set in, e.g.\<close>
 
 lemma "t = t"
 (*<*)oops(*>*)
 
-text{* \noindent Otherwise \verb!\isastyleminor! is used,
+text\<open>\noindent Otherwise \verb!\isastyleminor! is used,
 which does not modify the font size (assuming you stick to the default
 \verb!\isabellestyle{it}! in \texttt{root.tex}). If you prefer
 normal font size throughout your text, include
@@ -447,23 +447,23 @@
 papers, but some key lemmas might be of interest.
 It is usually easiest to put them in figures like the one in Fig.\
 \ref{fig:proof}. This was achieved with the \isakeyword{text\_raw} command:
-*}
-text_raw {*
+\<close>
+text_raw \<open>
   \begin{figure}
   \begin{center}\begin{minipage}{0.6\textwidth}  
   \isastyleminor\isamarkuptrue
-*}
+\<close>
 lemma True
 proof -
-  -- "pretty trivial"
+  \<comment> "pretty trivial"
   show True by force
 qed
-text_raw {*    
+text_raw \<open>
   \end{minipage}\end{center}
   \caption{Example proof in a figure.}\label{fig:proof}
   \end{figure}
-*}
-text {*
+\<close>
+text \<open>
 
 \begin{quote}
 \small
@@ -574,7 +574,7 @@
 \texttt{const\_typ} defined in \texttt{LaTeXsugar}. For example,
 \verb!@!\verb!{const_typ length}! produces @{const_typ length}.
 
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Advanced/Partial.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Advanced/Partial.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,6 +1,6 @@
 (*<*)theory Partial imports While_Combinator begin(*>*)
 
-text{*\noindent Throughout this tutorial, we have emphasized
+text\<open>\noindent Throughout this tutorial, we have emphasized
 that all functions in HOL are total.  We cannot hope to define
 truly partial functions, but must make them total.  A straightforward
 method is to lift the result type of the function from $\tau$ to
@@ -23,29 +23,29 @@
 We have already seen an instance of underdefinedness by means of
 non-exhaustive pattern matching: the definition of @{term last} in
 \S\ref{sec:fun}. The same is allowed for \isacommand{primrec}
-*}
+\<close>
 
 consts hd :: "'a list \<Rightarrow> 'a"
 primrec "hd (x#xs) = x"
 
-text{*\noindent
+text\<open>\noindent
 although it generates a warning.
 Even ordinary definitions allow underdefinedness, this time by means of
 preconditions:
-*}
+\<close>
 
 definition subtract :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "n \<le> m \<Longrightarrow> subtract m n \<equiv> m - n"
 
-text{*
+text\<open>
 The rest of this section is devoted to the question of how to define
 partial recursive functions by other means than non-exhaustive pattern
 matching.
-*}
+\<close>
 
-subsubsection{*Guarded Recursion*}
+subsubsection\<open>Guarded Recursion\<close>
 
-text{* 
+text\<open>
 \index{recursion!guarded}%
 Neither \isacommand{primrec} nor \isacommand{recdef} allow to
 prefix an equation with a condition in the way ordinary definitions do
@@ -59,14 +59,14 @@
 which is ideal for specifying underdefined functions on top of it.
 
 As a simple example we define division on @{typ nat}:
-*}
+\<close>
 
 consts divi :: "nat \<times> nat \<Rightarrow> nat"
 recdef divi "measure(\<lambda>(m,n). m)"
   "divi(m,0) = arbitrary"
   "divi(m,n) = (if m < n then 0 else divi(m-n,n)+1)"
 
-text{*\noindent Of course we could also have defined
+text\<open>\noindent Of course we could also have defined
 @{term"divi(m,0)"} to be some specific number, for example 0. The
 latter option is chosen for the predefined @{text div} function, which
 simplifies proofs at the expense of deviating from the
@@ -83,14 +83,14 @@
 known \emph{Union-Find} algorithm.
 The snag is that it may not terminate if @{term f} has non-trivial cycles.
 Phrased differently, the relation
-*}
+\<close>
 
 definition step1 :: "('a \<Rightarrow> 'a) \<Rightarrow> ('a \<times> 'a)set" where
   "step1 f \<equiv> {(y,x). y = f x \<and> y \<noteq> x}"
 
-text{*\noindent
+text\<open>\noindent
 must be well-founded. Thus we make the following definition:
-*}
+\<close>
 
 consts find :: "('a \<Rightarrow> 'a) \<times> 'a \<Rightarrow> 'a"
 recdef find "same_fst (\<lambda>f. wf(step1 f)) step1"
@@ -99,7 +99,7 @@
                 else arbitrary)"
 (hints recdef_simp: step1_def)
 
-text{*\noindent
+text\<open>\noindent
 The recursion equation itself should be clear enough: it is our aborted
 first attempt augmented with a check that there are no non-trivial loops.
 To express the required well-founded relation we employ the
@@ -122,29 +122,29 @@
 
 Normally you will then derive the following conditional variant from
 the recursion equation:
-*}
+\<close>
 
 lemma [simp]:
   "wf(step1 f) \<Longrightarrow> find(f,x) = (if f x = x then x else find(f, f x))"
 by simp
 
-text{*\noindent Then you should disable the original recursion equation:*}
+text\<open>\noindent Then you should disable the original recursion equation:\<close>
 
 declare find.simps[simp del]
 
-text{*
+text\<open>
 Reasoning about such underdefined functions is like that for other
 recursive functions.  Here is a simple example of recursion induction:
-*}
+\<close>
 
 lemma "wf(step1 f) \<longrightarrow> f(find(f,x)) = find(f,x)"
 apply(induct_tac f x rule: find.induct)
 apply simp
 done
 
-subsubsection{*The {\tt\slshape while} Combinator*}
+subsubsection\<open>The {\tt\slshape while} Combinator\<close>
 
-text{*If the recursive function happens to be tail recursive, its
+text\<open>If the recursive function happens to be tail recursive, its
 definition becomes a triviality if based on the predefined \cdx{while}
 combinator.  The latter lives in the Library theory \thydx{While_Combinator}.
 % which is not part of {text Main} but needs to
@@ -158,13 +158,13 @@
 \end{verbatim}
 In general, @{term s} will be a tuple or record.  As an example
 consider the following definition of function @{const find}:
-*}
+\<close>
 
 definition find2 :: "('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a" where
   "find2 f x \<equiv>
    fst(while (\<lambda>(x,x'). x' \<noteq> x) (\<lambda>(x,x'). (x',f x')) (x,f x))"
 
-text{*\noindent
+text\<open>\noindent
 The loop operates on two ``local variables'' @{term x} and @{term x'}
 containing the ``current'' and the ``next'' value of function @{term f}.
 They are initialized with the global @{term x} and @{term"f x"}. At the
@@ -185,7 +185,7 @@
 of induction we apply the above while rule, suitably instantiated.
 Only the final premise of @{thm[source]while_rule} is left unproved
 by @{text auto} but falls to @{text simp}:
-*}
+\<close>
 
 lemma lem: "wf(step1 f) \<Longrightarrow>
   \<exists>y. while (\<lambda>(x,x'). x' \<noteq> x) (\<lambda>(x,x'). (x',f x')) (x,f x) = (y,y) \<and>
@@ -196,16 +196,16 @@
 apply(simp add: inv_image_def step1_def)
 done
 
-text{*
+text\<open>
 The theorem itself is a simple consequence of this lemma:
-*}
+\<close>
 
 theorem "wf(step1 f) \<Longrightarrow> f(find2 f x) = find2 f x"
 apply(drule_tac x = x in lem)
 apply(auto simp add: find2_def)
 done
 
-text{* Let us conclude this section on partial functions by a
+text\<open>Let us conclude this section on partial functions by a
 discussion of the merits of the @{term while} combinator. We have
 already seen that the advantage of not having to
 provide a termination argument when defining a function via @{term
@@ -219,6 +219,6 @@
 definition that is impossible to execute or prohibitively slow.
 Thus, if you are aiming for an efficiently executable definition
 of a partial function, you are likely to need @{term while}.
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Advanced/WFrec.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Advanced/WFrec.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,12 +1,12 @@
 (*<*)theory WFrec imports Main begin(*>*)
 
-text{*\noindent
+text\<open>\noindent
 So far, all recursive definitions were shown to terminate via measure
 functions. Sometimes this can be inconvenient or
 impossible. Fortunately, \isacommand{recdef} supports much more
 general definitions. For example, termination of Ackermann's function
 can be shown by means of the \rmindex{lexicographic product} @{text"<*lex*>"}:
-*}
+\<close>
 
 consts ack :: "nat\<times>nat \<Rightarrow> nat"
 recdef ack "measure(\<lambda>m. m) <*lex*> measure(\<lambda>n. n)"
@@ -14,7 +14,7 @@
   "ack(Suc m,0)     = ack(m, 1)"
   "ack(Suc m,Suc n) = ack(m,ack(Suc m,n))"
 
-text{*\noindent
+text\<open>\noindent
 The lexicographic product decreases if either its first component
 decreases (as in the second equation and in the outer call in the
 third equation) or its first component stays the same and the second
@@ -39,7 +39,7 @@
 product of two well-founded relations is again well-founded, which we relied
 on when defining Ackermann's function above.
 Of course the lexicographic product can also be iterated:
-*}
+\<close>
 
 consts contrived :: "nat \<times> nat \<times> nat \<Rightarrow> nat"
 recdef contrived
@@ -49,7 +49,7 @@
 "contrived(Suc i,0,0) = contrived(i,i,i)"
 "contrived(0,0,0)     = 0"
 
-text{*
+text\<open>
 Lexicographic products of measure functions already go a long
 way. Furthermore, you may embed a type in an
 existing well-founded relation via the inverse image construction @{term
@@ -64,42 +64,42 @@
 \isacommand{recdef}.  For example, the greater-than relation can be made
 well-founded by cutting it off at a certain point.  Here is an example
 of a recursive function that calls itself with increasing values up to ten:
-*}
+\<close>
 
 consts f :: "nat \<Rightarrow> nat"
 recdef (*<*)(permissive)(*>*)f "{(i,j). j<i \<and> i \<le> (10::nat)}"
 "f i = (if 10 \<le> i then 0 else i * f(Suc i))"
 
-text{*\noindent
+text\<open>\noindent
 Since \isacommand{recdef} is not prepared for the relation supplied above,
 Isabelle rejects the definition.  We should first have proved that
 our relation was well-founded:
-*}
+\<close>
 
 lemma wf_greater: "wf {(i,j). j<i \<and> i \<le> (N::nat)}"
 
-txt{*\noindent
+txt\<open>\noindent
 The proof is by showing that our relation is a subset of another well-founded
 relation: one given by a measure function.\index{*wf_subset (theorem)}
-*}
+\<close>
 
 apply (rule wf_subset [of "measure (\<lambda>k::nat. N-k)"], blast)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 
 \noindent
 The inclusion remains to be proved. After unfolding some definitions, 
 we are left with simple arithmetic that is dispatched automatically.
-*}
+\<close>
 
 by (clarify, simp add: measure_def inv_image_def)
 
-text{*\noindent
+text\<open>\noindent
 
 Armed with this lemma, we use the \attrdx{recdef_wf} attribute to attach a
 crucial hint\cmmdx{hints} to our definition:
-*}
+\<close>
 (*<*)
 consts g :: "nat \<Rightarrow> nat"
 recdef g "{(i,j). j<i \<and> i \<le> (10::nat)}"
@@ -107,13 +107,13 @@
 (*>*)
 (hints recdef_wf: wf_greater)
 
-text{*\noindent
+text\<open>\noindent
 Alternatively, we could have given @{text "measure (\<lambda>k::nat. 10-k)"} for the
 well-founded relation in our \isacommand{recdef}.  However, the arithmetic
 goal in the lemma above would have arisen instead in the \isacommand{recdef}
 termination proof, where we have less control.  A tailor-made termination
 relation makes even more sense when it can be used in several function
 declarations.
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Advanced/simp2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Advanced/simp2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,19 +2,19 @@
 theory simp2 imports Main begin
 (*>*)
 
-section{*Simplification*}
+section\<open>Simplification\<close>
 
-text{*\label{sec:simplification-II}\index{simplification|(}
+text\<open>\label{sec:simplification-II}\index{simplification|(}
 This section describes features not covered until now.  It also
 outlines the simplification process itself, which can be helpful
 when the simplifier does not do what you expect of it.
-*}
+\<close>
 
-subsection{*Advanced Features*}
+subsection\<open>Advanced Features\<close>
 
-subsubsection{*Congruence Rules*}
+subsubsection\<open>Congruence Rules\<close>
 
-text{*\label{sec:simp-cong}
+text\<open>\label{sec:simp-cong}
 While simplifying the conclusion $Q$
 of $P \Imp Q$, it is legal to use the assumption $P$.
 For $\Imp$ this policy is hardwired, but 
@@ -62,11 +62,11 @@
 \par\noindent
 is occasionally useful but is not a default rule; you have to declare it explicitly.
 \end{warn}
-*}
+\<close>
 
-subsubsection{*Permutative Rewrite Rules*}
+subsubsection\<open>Permutative Rewrite Rules\<close>
 
-text{*
+text\<open>
 \index{rewrite rules!permutative|bold}%
 An equation is a \textbf{permutative rewrite rule} if the left-hand
 side and right-hand side are the same up to renaming of variables.  The most
@@ -105,20 +105,20 @@
 Note that ordered rewriting for @{text"+"} and @{text"*"} on numbers is rarely
 necessary because the built-in arithmetic prover often succeeds without
 such tricks.
-*}
+\<close>
 
-subsection{*How the Simplifier Works*}
+subsection\<open>How the Simplifier Works\<close>
 
-text{*\label{sec:SimpHow}
+text\<open>\label{sec:SimpHow}
 Roughly speaking, the simplifier proceeds bottom-up: subterms are simplified
 first.  A conditional equation is only applied if its condition can be
 proved, again by simplification.  Below we explain some special features of
 the rewriting process. 
-*}
+\<close>
 
-subsubsection{*Higher-Order Patterns*}
+subsubsection\<open>Higher-Order Patterns\<close>
 
-text{*\index{simplification rule|(}
+text\<open>\index{simplification rule|(}
 So far we have pretended the simplifier can deal with arbitrary
 rewrite rules. This is not quite true.  For reasons of feasibility,
 the simplifier expects the
@@ -153,11 +153,11 @@
   
 There is no restriction on the form of the right-hand
 sides.  They may not contain extraneous term or type variables, though.
-*}
+\<close>
 
-subsubsection{*The Preprocessor*}
+subsubsection\<open>The Preprocessor\<close>
 
-text{*\label{sec:simp-preprocessor}
+text\<open>\label{sec:simp-preprocessor}
 When a theorem is declared a simplification rule, it need not be a
 conditional equation already.  The simplifier will turn it into a set of
 conditional equations automatically.  For example, @{prop"f x =
@@ -183,7 +183,7 @@
 \end{center}
 \index{simplification rule|)}
 \index{simplification|)}
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Tutorial/CTL/Base.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/CTL/Base.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,8 +1,8 @@
 (*<*)theory Base imports Main begin(*>*)
 
-section{*Case Study: Verified Model Checking*}
+section\<open>Case Study: Verified Model Checking\<close>
 
-text{*\label{sec:VMC}
+text\<open>\label{sec:VMC}
 This chapter ends with a case study concerning model checking for 
 Computation Tree Logic (CTL), a temporal logic.
 Model checking is a popular technique for the verification of finite
@@ -54,11 +54,11 @@
 
 Abstracting from this concrete example, we assume there is a type of
 states:
-*}
+\<close>
 
 typedecl state
 
-text{*\noindent
+text\<open>\noindent
 Command \commdx{typedecl} merely declares a new type but without
 defining it (see \S\ref{sec:typedecl}). Thus we know nothing
 about the type other than its existence. That is exactly what we need
@@ -67,25 +67,25 @@
 parameter of everything but declaring @{typ state} globally as above
 reduces clutter.  Similarly we declare an arbitrary but fixed
 transition system, i.e.\ a relation between states:
-*}
+\<close>
 
 consts M :: "(state \<times> state)set"
 
-text{*\noindent
+text\<open>\noindent
 This is Isabelle's way of declaring a constant without defining it.
 Finally we introduce a type of atomic propositions
-*}
+\<close>
 
 typedecl "atom"
 
-text{*\noindent
+text\<open>\noindent
 and a \emph{labelling function}
-*}
+\<close>
 
 consts L :: "state \<Rightarrow> atom set"
 
-text{*\noindent
+text\<open>\noindent
 telling us which atomic propositions are true in each state.
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/CTL/CTL.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/CTL/CTL.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,14 +1,14 @@
 (*<*)theory CTL imports Base begin(*>*)
 
-subsection{*Computation Tree Logic --- CTL*}
+subsection\<open>Computation Tree Logic --- CTL\<close>
 
-text{*\label{sec:CTL}
+text\<open>\label{sec:CTL}
 \index{CTL|(}%
 The semantics of PDL only needs reflexive transitive closure.
 Let us be adventurous and introduce a more expressive temporal operator.
 We extend the datatype
 @{text formula} by a new constructor
-*}
+\<close>
 (*<*)
 datatype formula = Atom "atom"
                   | Neg formula
@@ -17,23 +17,23 @@
                   | EF formula(*>*)
                   | AF formula
 
-text{*\noindent
+text\<open>\noindent
 which stands for ``\emph{A}lways in the \emph{F}uture'':
 on all infinite paths, at some point the formula holds.
 Formalizing the notion of an infinite path is easy
 in HOL: it is simply a function from @{typ nat} to @{typ state}.
-*}
+\<close>
 
 definition Paths :: "state \<Rightarrow> (nat \<Rightarrow> state)set" where
 "Paths s \<equiv> {p. s = p 0 \<and> (\<forall>i. (p i, p(i+1)) \<in> M)}"
 
-text{*\noindent
+text\<open>\noindent
 This definition allows a succinct statement of the semantics of @{const AF}:
 \footnote{Do not be misled: neither datatypes nor recursive functions can be
 extended by new constructors or equations. This is just a trick of the
 presentation (see \S\ref{sec:doc-prep-suppress}). In reality one has to define
 a new datatype and a new function.}
-*}
+\<close>
 (*<*)
 primrec valid :: "state \<Rightarrow> formula \<Rightarrow> bool" ("(_ \<Turnstile> _)" [80,80] 80) where
 "s \<Turnstile> Atom a  =  (a \<in> L s)" |
@@ -44,18 +44,18 @@
 (*>*)
 "s \<Turnstile> AF f    = (\<forall>p \<in> Paths s. \<exists>i. p i \<Turnstile> f)"
 
-text{*\noindent
+text\<open>\noindent
 Model checking @{const AF} involves a function which
 is just complicated enough to warrant a separate definition:
-*}
+\<close>
 
 definition af :: "state set \<Rightarrow> state set \<Rightarrow> state set" where
 "af A T \<equiv> A \<union> {s. \<forall>t. (s, t) \<in> M \<longrightarrow> t \<in> T}"
 
-text{*\noindent
+text\<open>\noindent
 Now we define @{term "mc(AF f)"} as the least set @{term T} that includes
 @{term"mc f"} and all states all of whose direct successors are in @{term T}:
-*}
+\<close>
 (*<*)
 primrec mc :: "formula \<Rightarrow> state set" where
 "mc(Atom a)  = {s. a \<in> L s}" |
@@ -65,10 +65,10 @@
 "mc(EF f)    = lfp(\<lambda>T. mc f \<union> M\<inverse> `` T)"|(*>*)
 "mc(AF f)    = lfp(af(mc f))"
 
-text{*\noindent
+text\<open>\noindent
 Because @{const af} is monotone in its second argument (and also its first, but
 that is irrelevant), @{term"af A"} has a least fixed point:
-*}
+\<close>
 
 lemma mono_af: "mono(af A)"
 apply(simp add: mono_def af_def)
@@ -96,16 +96,16 @@
 apply(subst lfp_unfold[OF mono_ef])
 by(blast)
 (*>*)
-text{*
+text\<open>
 All we need to prove now is  @{prop"mc(AF f) = {s. s \<Turnstile> AF f}"}, which states
 that @{term mc} and @{text"\<Turnstile>"} agree for @{const AF}\@.
 This time we prove the two inclusions separately, starting
 with the easy one:
-*}
+\<close>
 
 theorem AF_lemma1: "lfp(af A) \<subseteq> {s. \<forall>p \<in> Paths s. \<exists>i. p i \<in> A}"
 
-txt{*\noindent
+txt\<open>\noindent
 In contrast to the analogous proof for @{const EF}, and just
 for a change, we do not use fixed point induction.  Park-induction,
 named after David Park, is weaker but sufficient for this proof:
@@ -114,24 +114,24 @@
 \end{center}
 The instance of the premise @{prop"f S \<subseteq> S"} is proved pointwise,
 a decision that \isa{auto} takes for us:
-*}
+\<close>
 apply(rule lfp_lowerbound)
 apply(auto simp add: af_def Paths_def)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=70,goals_limit=1]}
 In this remaining case, we set @{term t} to @{term"p(1::nat)"}.
 The rest is automatic, which is surprising because it involves
 finding the instantiation @{term"\<lambda>i::nat. p(i+1)"}
 for @{text"\<forall>p"}.
-*}
+\<close>
 
 apply(erule_tac x = "p 1" in allE)
 apply(auto)
 done
 
 
-text{*
+text\<open>
 The opposite inclusion is proved by contradiction: if some state
 @{term s} is not in @{term"lfp(af A)"}, then we can construct an
 infinite @{term A}-avoiding path starting from~@{term s}. The reason is
@@ -143,7 +143,7 @@
 
 The one-step argument in the sketch above
 is proved by a variant of contraposition:
-*}
+\<close>
 
 lemma not_in_lfp_afD:
  "s \<notin> lfp(af A) \<Longrightarrow> s \<notin> A \<and> (\<exists> t. (s,t) \<in> M \<and> t \<notin> lfp(af A))"
@@ -152,20 +152,20 @@
 apply(simp add: af_def)
 done
 
-text{*\noindent
+text\<open>\noindent
 We assume the negation of the conclusion and prove @{term"s : lfp(af A)"}.
 Unfolding @{const lfp} once and
 simplifying with the definition of @{const af} finishes the proof.
 
 Now we iterate this process. The following construction of the desired
 path is parameterized by a predicate @{term Q} that should hold along the path:
-*}
+\<close>
 
 primrec path :: "state \<Rightarrow> (state \<Rightarrow> bool) \<Rightarrow> (nat \<Rightarrow> state)" where
 "path s Q 0 = s" |
 "path s Q (Suc n) = (SOME t. (path s Q n,t) \<in> M \<and> Q t)"
 
-text{*\noindent
+text\<open>\noindent
 Element @{term"n+1::nat"} on this path is some arbitrary successor
 @{term t} of element @{term n} such that @{term"Q t"} holds.  Remember that @{text"SOME t. R t"}
 is some arbitrary but fixed @{term t} such that @{prop"R t"} holds (see \S\ref{sec:SOME}). Of
@@ -175,43 +175,43 @@
 
 Let us show that if each state @{term s} that satisfies @{term Q}
 has a successor that again satisfies @{term Q}, then there exists an infinite @{term Q}-path:
-*}
+\<close>
 
 lemma infinity_lemma:
   "\<lbrakk> Q s; \<forall>s. Q s \<longrightarrow> (\<exists> t. (s,t) \<in> M \<and> Q t) \<rbrakk> \<Longrightarrow>
    \<exists>p\<in>Paths s. \<forall>i. Q(p i)"
 
-txt{*\noindent
+txt\<open>\noindent
 First we rephrase the conclusion slightly because we need to prove simultaneously
 both the path property and the fact that @{term Q} holds:
-*}
+\<close>
 
 apply(subgoal_tac
   "\<exists>p. s = p 0 \<and> (\<forall>i::nat. (p i, p(i+1)) \<in> M \<and> Q(p i))")
 
-txt{*\noindent
+txt\<open>\noindent
 From this proposition the original goal follows easily:
-*}
+\<close>
 
  apply(simp add: Paths_def, blast)
 
-txt{*\noindent
+txt\<open>\noindent
 The new subgoal is proved by providing the witness @{term "path s Q"} for @{term p}:
-*}
+\<close>
 
 apply(rule_tac x = "path s Q" in exI)
 apply(clarsimp)
 
-txt{*\noindent
+txt\<open>\noindent
 After simplification and clarification, the subgoal has the following form:
 @{subgoals[display,indent=0,margin=70,goals_limit=1]}
 It invites a proof by induction on @{term i}:
-*}
+\<close>
 
 apply(induct_tac i)
  apply(simp)
 
-txt{*\noindent
+txt\<open>\noindent
 After simplification, the base case boils down to
 @{subgoals[display,indent=0,margin=70,goals_limit=1]}
 The conclusion looks exceedingly trivial: after all, @{term t} is chosen such that @{prop"(s,t):M"}
@@ -223,11 +223,11 @@
 two subgoals: @{prop"EX a. (s, a) : M & Q a"}, which follows from the assumptions, and
 @{prop"(s, x) : M & Q x ==> (s,x) : M"}, which is trivial. Thus it is not surprising that
 @{text fast} can prove the base case quickly:
-*}
+\<close>
 
  apply(fast intro: someI2_ex)
 
-txt{*\noindent
+txt\<open>\noindent
 What is worth noting here is that we have used \methdx{fast} rather than
 @{text blast}.  The reason is that @{text blast} would fail because it cannot
 cope with @{thm[source]someI2_ex}: unifying its conclusion with the current
@@ -242,7 +242,7 @@
 occurrences of @{text SOME}. As a result, @{text fast} is no longer able to
 solve the subgoal and we apply @{thm[source]someI2_ex} by hand.  We merely
 show the proof commands but do not describe the details:
-*}
+\<close>
 
 apply(simp)
 apply(rule someI2_ex)
@@ -252,7 +252,7 @@
 apply(blast)
 done
 
-text{*
+text\<open>
 Function @{const path} has fulfilled its purpose now and can be forgotten.
 It was merely defined to provide the witness in the proof of the
 @{thm[source]infinity_lemma}. Aficionados of minimal proofs might like to know
@@ -261,7 +261,7 @@
 @{term[display]"rec_nat s (\<lambda>n t. SOME u. (t,u)\<in>M \<and> Q u)"}
 is extensionally equal to @{term"path s Q"},
 where @{term rec_nat} is the predefined primitive recursor on @{typ nat}.
-*}
+\<close>
 (*<*)
 lemma
 "\<lbrakk> Q s; \<forall> s. Q s \<longrightarrow> (\<exists> t. (s,t)\<in>M \<and> Q t) \<rbrakk> \<Longrightarrow>
@@ -284,37 +284,37 @@
 by(blast)
 (*>*)
 
-text{*
+text\<open>
 At last we can prove the opposite direction of @{thm[source]AF_lemma1}:
-*}
+\<close>
 
 theorem AF_lemma2: "{s. \<forall>p \<in> Paths s. \<exists>i. p i \<in> A} \<subseteq> lfp(af A)"
 
-txt{*\noindent
+txt\<open>\noindent
 The proof is again pointwise and then by contraposition:
-*}
+\<close>
 
 apply(rule subsetI)
 apply(erule contrapos_pp)
 apply simp
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,goals_limit=1]}
 Applying the @{thm[source]infinity_lemma} as a destruction rule leaves two subgoals, the second
 premise of @{thm[source]infinity_lemma} and the original subgoal:
-*}
+\<close>
 
 apply(drule infinity_lemma)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 Both are solved automatically:
-*}
+\<close>
 
  apply(auto dest: not_in_lfp_afD)
 done
 
-text{*
+text\<open>
 If you find these proofs too complicated, we recommend that you read
 \S\ref{sec:CTL-revisited}, where we show how inductive definitions lead to
 simpler arguments.
@@ -322,20 +322,20 @@
 The main theorem is proved as for PDL, except that we also derive the
 necessary equality @{text"lfp(af A) = ..."} by combining
 @{thm[source]AF_lemma1} and @{thm[source]AF_lemma2} on the spot:
-*}
+\<close>
 
 theorem "mc f = {s. s \<Turnstile> f}"
 apply(induct_tac f)
 apply(auto simp add: EF_lemma equalityI[OF AF_lemma1 AF_lemma2])
 done
 
-text{*
+text\<open>
 
 The language defined above is not quite CTL\@. The latter also includes an
 until-operator @{term"EU f g"} with semantics ``there \emph{E}xists a path
 where @{term f} is true \emph{U}ntil @{term g} becomes true''.  We need
 an auxiliary function:
-*}
+\<close>
 
 primrec
 until:: "state set \<Rightarrow> state set \<Rightarrow> state \<Rightarrow> state list \<Rightarrow> bool" where
@@ -345,7 +345,7 @@
  eusem :: "state set \<Rightarrow> state set \<Rightarrow> state set" where
 "eusem A B \<equiv> {s. \<exists>p. until A B s p}"(*>*)
 
-text{*\noindent
+text\<open>\noindent
 Expressing the semantics of @{term EU} is now straightforward:
 @{prop[display]"s \<Turnstile> EU f g = (\<exists>p. until {t. t \<Turnstile> f} {t. t \<Turnstile> g} s p)"}
 Note that @{term EU} is not definable in terms of the other operators!
@@ -362,7 +362,7 @@
 %which enables you to read and write {text"E[f U g]"} instead of {term"EU f g"}.
 \end{exercise}
 For more CTL exercises see, for example, Huth and Ryan @{cite "Huth-Ryan-book"}.
-*}
+\<close>
 
 (*<*)
 definition eufix :: "state set \<Rightarrow> state set \<Rightarrow> state set \<Rightarrow> state set" where
@@ -435,7 +435,7 @@
 *)
 (*>*)
 
-text{* Let us close this section with a few words about the executability of
+text\<open>Let us close this section with a few words about the executability of
 our model checkers.  It is clear that if all sets are finite, they can be
 represented as lists and the usual set operations are easily
 implemented. Only @{const lfp} requires a little thought.  Fortunately, theory
@@ -445,5 +445,5 @@
 iterated application of @{term F} to~@{term"{}"} until a fixed point is
 reached. It is actually possible to generate executable functional programs
 from HOL definitions, but that is beyond the scope of the tutorial.%
-\index{CTL|)} *}
+\index{CTL|)}\<close>
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/CTL/CTLind.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/CTL/CTLind.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,8 +1,8 @@
 (*<*)theory CTLind imports CTL begin(*>*)
 
-subsection{*CTL Revisited*}
+subsection\<open>CTL Revisited\<close>
 
-text{*\label{sec:CTL-revisited}
+text\<open>\label{sec:CTL-revisited}
 \index{CTL|(}%
 The purpose of this section is twofold: to demonstrate
 some of the induction principles and heuristics discussed above and to
@@ -22,7 +22,7 @@
 A}-avoiding path:
 % Second proof of opposite direction, directly by well-founded induction
 % on the initial segment of M that avoids A.
-*}
+\<close>
 
 inductive_set
   Avoid :: "state \<Rightarrow> state set \<Rightarrow> state set"
@@ -31,7 +31,7 @@
     "s \<in> Avoid s A"
   | "\<lbrakk> t \<in> Avoid s A; t \<notin> A; (t,u) \<in> M \<rbrakk> \<Longrightarrow> u \<in> Avoid s A"
 
-text{*
+text\<open>
 It is easy to see that for any infinite @{term A}-avoiding path @{term f}
 with @{prop"f(0::nat) \<in> Avoid s A"} there is an infinite @{term A}-avoiding path
 starting with @{term s} because (by definition of @{const Avoid}) there is a
@@ -40,7 +40,7 @@
 this requires the following
 reformulation, as explained in \S\ref{sec:ind-var-in-prems} above;
 the @{text rule_format} directive undoes the reformulation after the proof.
-*}
+\<close>
 
 lemma ex_infinite_path[rule_format]:
   "t \<in> Avoid s A  \<Longrightarrow>
@@ -52,7 +52,7 @@
 apply(simp_all add: Paths_def split: nat.split)
 done
 
-text{*\noindent
+text\<open>\noindent
 The base case (@{prop"t = s"}) is trivial and proved by @{text blast}.
 In the induction step, we have an infinite @{term A}-avoiding path @{term f}
 starting from @{term u}, a successor of @{term t}. Now we simply instantiate
@@ -66,12 +66,12 @@
 inductive proof this must be generalized to the statement that every point @{term t}
 ``between'' @{term s} and @{term A}, in other words all of @{term"Avoid s A"},
 is contained in @{term"lfp(af A)"}:
-*}
+\<close>
 
 lemma Avoid_in_lfp[rule_format(no_asm)]:
   "\<forall>p\<in>Paths s. \<exists>i. p i \<in> A \<Longrightarrow> t \<in> Avoid s A \<longrightarrow> t \<in> lfp(af A)"
 
-txt{*\noindent
+txt\<open>\noindent
 The proof is by induction on the ``distance'' between @{term t} and @{term
 A}. Remember that @{prop"lfp(af A) = A \<union> M\<inverse> `` lfp(af A)"}.
 If @{term t} is already in @{term A}, then @{prop"t \<in> lfp(af A)"} is
@@ -85,14 +85,14 @@
 As we shall see presently, the absence of infinite @{term A}-avoiding paths
 starting from @{term s} implies well-foundedness of this relation. For the
 moment we assume this and proceed with the induction:
-*}
+\<close>
 
 apply(subgoal_tac "wf{(y,x). (x,y) \<in> M \<and> x \<in> Avoid s A \<and> x \<notin> A}")
  apply(erule_tac a = t in wf_induct)
  apply(clarsimp)
 (*<*)apply(rename_tac t)(*>*)
 
-txt{*\noindent
+txt\<open>\noindent
 @{subgoals[display,indent=0,margin=65]}
 Now the induction hypothesis states that if @{prop"t \<notin> A"}
 then all successors of @{term t} that are in @{term"Avoid s A"} are in
@@ -104,13 +104,13 @@
 @{term"Avoid s A"}, because we also assume @{prop"t \<in> Avoid s A"}.
 Hence, by the induction hypothesis, all successors of @{term t} are indeed in
 @{term"lfp(af A)"}. Mechanically:
-*}
+\<close>
 
  apply(subst lfp_unfold[OF mono_af])
  apply(simp (no_asm) add: af_def)
  apply(blast intro: Avoid.intros)
 
-txt{*
+txt\<open>
 Having proved the main goal, we return to the proof obligation that the 
 relation used above is indeed well-founded. This is proved by contradiction: if
 the relation is not well-founded then there exists an infinite @{term
@@ -119,7 +119,7 @@
 @{thm[display]wf_iff_no_infinite_down_chain[no_vars]}
 From lemma @{thm[source]ex_infinite_path} the existence of an infinite
 @{term A}-avoiding path starting in @{term s} follows, contradiction.
-*}
+\<close>
 
 apply(erule contrapos_pp)
 apply(simp add: wf_iff_no_infinite_down_chain)
@@ -128,7 +128,7 @@
 apply(auto simp add: Paths_def)
 done
 
-text{*
+text\<open>
 The @{text"(no_asm)"} modifier of the @{text"rule_format"} directive in the
 statement of the lemma means
 that the assumption is left unchanged; otherwise the @{text"\<forall>p"} 
@@ -139,7 +139,7 @@
 The main theorem is simply the corollary where @{prop"t = s"},
 when the assumption @{prop"t \<in> Avoid s A"} is trivially true
 by the first @{const Avoid}-rule. Isabelle confirms this:%
-\index{CTL|)}*}
+\index{CTL|)}\<close>
 
 theorem AF_lemma2:  "{s. \<forall>p \<in> Paths s. \<exists> i. p i \<in> A} \<subseteq> lfp(af A)"
 by(auto elim: Avoid_in_lfp intro: Avoid.intros)
--- a/src/Doc/Tutorial/CTL/PDL.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/CTL/PDL.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,8 +1,8 @@
 (*<*)theory PDL imports Base begin(*>*)
 
-subsection{*Propositional Dynamic Logic --- PDL*}
+subsection\<open>Propositional Dynamic Logic --- PDL\<close>
 
-text{*\index{PDL|(}
+text\<open>\index{PDL|(}
 The formulae of PDL are built up from atomic propositions via
 negation and conjunction and the two temporal
 connectives @{text AX} and @{text EF}\@. Since formulae are essentially
@@ -10,7 +10,7 @@
 \footnote{The customary definition of PDL
 @{cite "HarelKT-DL"} looks quite different from ours, but the two are easily
 shown to be equivalent.}
-*}
+\<close>
 
 datatype formula = Atom "atom"
                   | Neg formula
@@ -18,13 +18,13 @@
                   | AX formula
                   | EF formula
 
-text{*\noindent
+text\<open>\noindent
 This resembles the boolean expression case study in
 \S\ref{sec:boolex}.
 A validity relation between states and formulae specifies the semantics.
 The syntax annotation allows us to write @{text"s \<Turnstile> f"} instead of
 \hbox{@{text"valid s f"}}. The definition is by recursion over the syntax:
-*}
+\<close>
 
 primrec valid :: "state \<Rightarrow> formula \<Rightarrow> bool"   ("(_ \<Turnstile> _)" [80,80] 80)
 where
@@ -34,7 +34,7 @@
 "s \<Turnstile> AX f    = (\<forall>t. (s,t) \<in> M \<longrightarrow> t \<Turnstile> f)" |
 "s \<Turnstile> EF f    = (\<exists>t. (s,t) \<in> M\<^sup>* \<and> t \<Turnstile> f)"
 
-text{*\noindent
+text\<open>\noindent
 The first three equations should be self-explanatory. The temporal formula
 @{term"AX f"} means that @{term f} is true in \emph{A}ll ne\emph{X}t states whereas
 @{term"EF f"} means that there \emph{E}xists some \emph{F}uture state in which @{term f} is
@@ -43,7 +43,7 @@
 
 Now we come to the model checker itself. It maps a formula into the
 set of states where the formula is true.  It too is defined by
-recursion over the syntax: *}
+recursion over the syntax:\<close>
 
 primrec mc :: "formula \<Rightarrow> state set" where
 "mc(Atom a)  = {s. a \<in> L s}" |
@@ -52,7 +52,7 @@
 "mc(AX f)    = {s. \<forall>t. (s,t) \<in> M  \<longrightarrow> t \<in> mc f}" |
 "mc(EF f)    = lfp(\<lambda>T. mc f \<union> (M\<inverse> `` T))"
 
-text{*\noindent
+text\<open>\noindent
 Only the equation for @{term EF} deserves some comments. Remember that the
 postfix @{text"\<inverse>"} and the infix @{text"``"} are predefined and denote the
 converse of a relation and the image of a set under a relation.  Thus
@@ -65,40 +65,40 @@
 
 First we prove monotonicity of the function inside @{term lfp}
 in order to make sure it really has a least fixed point.
-*}
+\<close>
 
 lemma mono_ef: "mono(\<lambda>T. A \<union> (M\<inverse> `` T))"
 apply(rule monoI)
 apply blast
 done
 
-text{*\noindent
+text\<open>\noindent
 Now we can relate model checking and semantics. For the @{text EF} case we need
 a separate lemma:
-*}
+\<close>
 
 lemma EF_lemma:
   "lfp(\<lambda>T. A \<union> (M\<inverse> `` T)) = {s. \<exists>t. (s,t) \<in> M\<^sup>* \<and> t \<in> A}"
 
-txt{*\noindent
+txt\<open>\noindent
 The equality is proved in the canonical fashion by proving that each set
 includes the other; the inclusion is shown pointwise:
-*}
+\<close>
 
 apply(rule equalityI)
  apply(rule subsetI)
  apply(simp)(*<*)apply(rename_tac s)(*>*)
 
-txt{*\noindent
+txt\<open>\noindent
 Simplification leaves us with the following first subgoal
 @{subgoals[display,indent=0,goals_limit=1]}
 which is proved by @{term lfp}-induction:
-*}
+\<close>
 
  apply(erule lfp_induct_set)
   apply(rule mono_ef)
  apply(simp)
-txt{*\noindent
+txt\<open>\noindent
 Having disposed of the monotonicity subgoal,
 simplification leaves us with the following goal:
 \begin{isabelle}
@@ -108,19 +108,19 @@
 \end{isabelle}
 It is proved by @{text blast}, using the transitivity of 
 \isa{M\isactrlsup {\isacharasterisk}}.
-*}
+\<close>
 
  apply(blast intro: rtrancl_trans)
 
-txt{*
+txt\<open>
 We now return to the second set inclusion subgoal, which is again proved
 pointwise:
-*}
+\<close>
 
 apply(rule subsetI)
 apply(simp, clarify)
 
-txt{*\noindent
+txt\<open>\noindent
 After simplification and clarification we are left with
 @{subgoals[display,indent=0,goals_limit=1]}
 This goal is proved by induction on @{term"(s,t)\<in>M\<^sup>*"}. But since the model
@@ -132,44 +132,44 @@
 It says that if @{prop"(a,b):r\<^sup>*"} and we know @{prop"P b"} then we can infer
 @{prop"P a"} provided each step backwards from a predecessor @{term z} of
 @{term b} preserves @{term P}.
-*}
+\<close>
 
 apply(erule converse_rtrancl_induct)
 
-txt{*\noindent
+txt\<open>\noindent
 The base case
 @{subgoals[display,indent=0,goals_limit=1]}
 is solved by unrolling @{term lfp} once
-*}
+\<close>
 
  apply(subst lfp_unfold[OF mono_ef])
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,goals_limit=1]}
 and disposing of the resulting trivial subgoal automatically:
-*}
+\<close>
 
  apply(blast)
 
-txt{*\noindent
+txt\<open>\noindent
 The proof of the induction step is identical to the one for the base case:
-*}
+\<close>
 
 apply(subst lfp_unfold[OF mono_ef])
 apply(blast)
 done
 
-text{*
+text\<open>
 The main theorem is proved in the familiar manner: induction followed by
 @{text auto} augmented with the lemma as a simplification rule.
-*}
+\<close>
 
 theorem "mc f = {s. s \<Turnstile> f}"
 apply(induct_tac f)
 apply(auto simp add: EF_lemma)
 done
 
-text{*
+text\<open>
 \begin{exercise}
 @{term AX} has a dual operator @{term EN} 
 (``there exists a next state such that'')%
@@ -183,7 +183,7 @@
 @{prop[display]"(s \<Turnstile> EF f) = (s \<Turnstile> f | s \<Turnstile> EN(EF f))"}
 \end{exercise}
 \index{PDL|)}
-*}
+\<close>
 (*<*)
 theorem main: "mc f = {s. s \<Turnstile> f}"
 apply(induct_tac f)
--- a/src/Doc/Tutorial/CodeGen/CodeGen.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/CodeGen/CodeGen.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,9 +2,9 @@
 theory CodeGen imports Main begin
 (*>*)
 
-section{*Case Study: Compiling Expressions*}
+section\<open>Case Study: Compiling Expressions\<close>
 
-text{*\label{sec:ExprCompiler}
+text\<open>\label{sec:ExprCompiler}
 \index{compiling expressions example|(}%
 The task is to develop a compiler from a generic type of expressions (built
 from variables, constants and binary operations) to a stack machine.  This
@@ -13,45 +13,45 @@
 type of variables or values but make them type parameters.  Neither is there
 a fixed set of binary operations: instead the expression contains the
 appropriate function itself.
-*}
+\<close>
 
 type_synonym 'v binop = "'v \<Rightarrow> 'v \<Rightarrow> 'v"
 datatype (dead 'a, 'v) expr = Cex 'v
                       | Vex 'a
                       | Bex "'v binop"  "('a,'v)expr"  "('a,'v)expr"
 
-text{*\noindent
+text\<open>\noindent
 The three constructors represent constants, variables and the application of
 a binary operation to two subexpressions.
 
 The value of an expression with respect to an environment that maps variables to
 values is easily defined:
-*}
+\<close>
 
 primrec "value" :: "('a,'v)expr \<Rightarrow> ('a \<Rightarrow> 'v) \<Rightarrow> 'v" where
 "value (Cex v) env = v" |
 "value (Vex a) env = env a" |
 "value (Bex f e1 e2) env = f (value e1 env) (value e2 env)"
 
-text{*
+text\<open>
 The stack machine has three instructions: load a constant value onto the
 stack, load the contents of an address onto the stack, and apply a
 binary operation to the two topmost elements of the stack, replacing them by
 the result. As for @{text"expr"}, addresses and values are type parameters:
-*}
+\<close>
 
 datatype (dead 'a, 'v) instr = Const 'v
                        | Load 'a
                        | Apply "'v binop"
 
-text{*
+text\<open>
 The execution of the stack machine is modelled by a function
 @{text"exec"} that takes a list of instructions, a store (modelled as a
 function from addresses to values, just like the environment for
 evaluating expressions), and a stack (modelled as a list) of values,
 and returns the stack at the end of the execution --- the store remains
 unchanged:
-*}
+\<close>
 
 primrec exec :: "('a,'v)instr list \<Rightarrow> ('a\<Rightarrow>'v) \<Rightarrow> 'v list \<Rightarrow> 'v list"
 where
@@ -61,7 +61,7 @@
   | Load a   \<Rightarrow> exec is s ((s a)#vs)
   | Apply f  \<Rightarrow> exec is s ((f (hd vs) (hd(tl vs)))#(tl(tl vs))))"
 
-text{*\noindent
+text\<open>\noindent
 Recall that @{term"hd"} and @{term"tl"}
 return the first element and the remainder of a list.
 Because all functions are total, \cdx{hd} is defined even for the empty
@@ -72,54 +72,54 @@
 
 The compiler is a function from expressions to a list of instructions. Its
 definition is obvious:
-*}
+\<close>
 
 primrec compile :: "('a,'v)expr \<Rightarrow> ('a,'v)instr list" where
 "compile (Cex v)       = [Const v]" |
 "compile (Vex a)       = [Load a]" |
 "compile (Bex f e1 e2) = (compile e2) @ (compile e1) @ [Apply f]"
 
-text{*
+text\<open>
 Now we have to prove the correctness of the compiler, i.e.\ that the
 execution of a compiled expression results in the value of the expression:
-*}
+\<close>
 theorem "exec (compile e) s [] = [value e s]"
 (*<*)oops(*>*)
-text{*\noindent
+text\<open>\noindent
 This theorem needs to be generalized:
-*}
+\<close>
 
 theorem "\<forall>vs. exec (compile e) s vs = (value e s) # vs"
 
-txt{*\noindent
+txt\<open>\noindent
 It will be proved by induction on @{term"e"} followed by simplification.  
 First, we must prove a lemma about executing the concatenation of two
 instruction sequences:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma exec_app[simp]:
   "\<forall>vs. exec (xs@ys) s vs = exec ys s (exec xs s vs)" 
 
-txt{*\noindent
+txt\<open>\noindent
 This requires induction on @{term"xs"} and ordinary simplification for the
 base cases. In the induction step, simplification leaves us with a formula
 that contains two @{text"case"}-expressions over instructions. Thus we add
 automatic case splitting, which finishes the proof:
-*}
+\<close>
 apply(induct_tac xs, simp, simp split: instr.split)
 (*<*)done(*>*)
-text{*\noindent
+text\<open>\noindent
 Note that because both \methdx{simp_all} and \methdx{auto} perform simplification, they can
 be modified in the same way as @{text simp}.  Thus the proof can be
 rewritten as
-*}
+\<close>
 (*<*)
 declare exec_app[simp del]
 lemma [simp]: "\<forall>vs. exec (xs@ys) s vs = exec ys s (exec xs s vs)" 
 (*>*)
 apply(induct_tac xs, simp_all split: instr.split)
 (*<*)done(*>*)
-text{*\noindent
+text\<open>\noindent
 Although this is more compact, it is less clear for the reader of the proof.
 
 We could now go back and prove @{prop"exec (compile e) s [] = [value e s]"}
@@ -127,7 +127,7 @@
 However, this is unnecessary because the generalized version fully subsumes
 its instance.%
 \index{compiling expressions example|)}
-*}
+\<close>
 (*<*)
 theorem "\<forall>vs. exec (compile e) s vs = (value e s) # vs"
 by(induct_tac e, auto)
--- a/src/Doc/Tutorial/Datatype/ABexpr.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Datatype/ABexpr.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory ABexpr imports Main begin
 (*>*)
 
-text{*
+text\<open>
 \index{datatypes!mutually recursive}%
 Sometimes it is necessary to define two datatypes that depend on each
 other. This is called \textbf{mutual recursion}. As an example consider a
@@ -15,7 +15,7 @@
   comparisons like ``$m<n$''.
 \end{itemize}
 In Isabelle this becomes
-*}
+\<close>
 
 datatype 'a aexp = IF   "'a bexp" "'a aexp" "'a aexp"
                  | Sum  "'a aexp" "'a aexp"
@@ -26,14 +26,14 @@
                  | And  "'a bexp" "'a bexp"
                  | Neg  "'a bexp"
 
-text{*\noindent
+text\<open>\noindent
 Type @{text"aexp"} is similar to @{text"expr"} in \S\ref{sec:ExprCompiler},
 except that we have added an @{text IF} constructor,
 fixed the values to be of type @{typ"nat"} and declared the two binary
 operations @{text Sum} and @{term"Diff"}.  Boolean
 expressions can be arithmetic comparisons, conjunctions and negations.
 The semantics is given by two evaluation functions:
-*}
+\<close>
 
 primrec evala :: "'a aexp \<Rightarrow> ('a \<Rightarrow> nat) \<Rightarrow> nat" and
          evalb :: "'a bexp \<Rightarrow> ('a \<Rightarrow> nat) \<Rightarrow> bool" where
@@ -48,7 +48,7 @@
 "evalb (And b1 b2) env = (evalb b1 env \<and> evalb b2 env)" |
 "evalb (Neg b) env = (\<not> evalb b env)"
 
-text{*\noindent
+text\<open>\noindent
 
 Both take an expression and an environment (a mapping from variables
 @{typ"'a"} to values @{typ"nat"}) and return its arithmetic/boolean
@@ -60,7 +60,7 @@
 the empty line is purely for readability.
 
 In the same fashion we also define two functions that perform substitution:
-*}
+\<close>
 
 primrec substa :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a aexp \<Rightarrow> 'b aexp" and
          substb :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a bexp \<Rightarrow> 'b bexp" where
@@ -75,7 +75,7 @@
 "substb s (And b1 b2) = And (substb s b1) (substb s b2)" |
 "substb s (Neg b) = Neg (substb s b)"
 
-text{*\noindent
+text\<open>\noindent
 Their first argument is a function mapping variables to expressions, the
 substitution. It is applied to all variables in the second argument. As a
 result, the type of variables in the expression may change from @{typ"'a"}
@@ -89,19 +89,19 @@
 boolean expressions (by induction), you find that you always need the other
 theorem in the induction step. Therefore you need to state and prove both
 theorems simultaneously:
-*}
+\<close>
 
 lemma "evala (substa s a) env = evala a (\<lambda>x. evala (s x) env) \<and>
         evalb (substb s b) env = evalb b (\<lambda>x. evala (s x) env)"
 apply(induct_tac a and b)
 
-txt{*\noindent The resulting 8 goals (one for each constructor) are proved in one fell swoop:
-*}
+txt\<open>\noindent The resulting 8 goals (one for each constructor) are proved in one fell swoop:
+\<close>
 
 apply simp_all
 (*<*)done(*>*)
 
-text{*
+text\<open>
 In general, given $n$ mutually recursive datatypes $\tau@1$, \dots, $\tau@n$,
 an inductive proof expects a goal of the form
 \[ P@1(x@1)\ \land \dots \land P@n(x@n) \]
@@ -121,7 +121,7 @@
   it.  ({\em Hint:} proceed as in \S\ref{sec:boolex} and read the discussion
   of type annotations following lemma @{text subst_id} below).
 \end{exercise}
-*}
+\<close>
 (*<*)
 primrec norma :: "'a aexp \<Rightarrow> 'a aexp" and
         normb :: "'a bexp \<Rightarrow> 'a aexp \<Rightarrow> 'a aexp \<Rightarrow> 'a aexp" where
--- a/src/Doc/Tutorial/Datatype/Fundata.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Datatype/Fundata.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,7 +3,7 @@
 (*>*)
 datatype (dead 'a,'i) bigtree = Tip | Br 'a "'i \<Rightarrow> ('a,'i)bigtree"
 
-text{*\noindent
+text\<open>\noindent
 Parameter @{typ"'a"} is the type of values stored in
 the @{term Br}anches of the tree, whereas @{typ"'i"} is the index
 type over which the tree branches. If @{typ"'i"} is instantiated to
@@ -17,14 +17,14 @@
 has merely @{term"Tip"}s as further subtrees.
 
 Function @{term"map_bt"} applies a function to all labels in a @{text"bigtree"}:
-*}
+\<close>
 
 primrec map_bt :: "('a \<Rightarrow> 'b) \<Rightarrow> ('a,'i)bigtree \<Rightarrow> ('b,'i)bigtree"
 where
 "map_bt f Tip      = Tip" |
 "map_bt f (Br a F) = Br (f a) (\<lambda>i. map_bt f (F i))"
 
-text{*\noindent This is a valid \isacommand{primrec} definition because the
+text\<open>\noindent This is a valid \isacommand{primrec} definition because the
 recursive calls of @{term"map_bt"} involve only subtrees of
 @{term"F"}, which is itself a subterm of the left-hand side. Thus termination
 is assured.  The seasoned functional programmer might try expressing
@@ -32,18 +32,18 @@
 however will reject.  Applying @{term"map_bt"} to only one of its arguments
 makes the termination proof less obvious.
 
-The following lemma has a simple proof by induction:  *}
+The following lemma has a simple proof by induction:\<close>
 
 lemma "map_bt (g o f) T = map_bt g (map_bt f T)"
 apply(induct_tac T, simp_all)
 done
 (*<*)lemma "map_bt (g o f) T = map_bt g (map_bt f T)"
 apply(induct_tac T, rename_tac[2] F)(*>*)
-txt{*\noindent
+txt\<open>\noindent
 Because of the function type, the proof state after induction looks unusual.
 Notice the quantified induction hypothesis:
 @{subgoals[display,indent=0]}
-*}
+\<close>
 (*<*)
 oops
 end
--- a/src/Doc/Tutorial/Datatype/Nested.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Datatype/Nested.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory Nested imports ABexpr begin
 (*>*)
 
-text{*
+text\<open>
 \index{datatypes!and nested recursion}%
 So far, all datatypes had the property that on the right-hand side of their
 definition they occurred only at the top-level: directly below a
@@ -10,11 +10,11 @@
 datatype occurs nested in some other datatype (but not inside itself!).
 Consider the following model of terms
 where function symbols can be applied to a list of arguments:
-*}
+\<close>
 (*<*)hide_const Var(*>*)
 datatype ('v,'f)"term" = Var 'v | App 'f "('v,'f)term list"
 
-text{*\noindent
+text\<open>\noindent
 Note that we need to quote @{text term} on the left to avoid confusion with
 the Isabelle command \isacommand{term}.
 Parameter @{typ"'v"} is the type of variables and @{typ"'f"} the type of
@@ -41,7 +41,7 @@
 
 Let us define a substitution function on terms. Because terms involve term
 lists, we need to define two substitution functions simultaneously:
-*}
+\<close>
 
 primrec
 subst :: "('v\<Rightarrow>('v,'f)term) \<Rightarrow> ('v,'f)term      \<Rightarrow> ('v,'f)term" and
@@ -54,7 +54,7 @@
 "substs s [] = []" |
 "substs s (t # ts) = subst s t # substs s ts"
 
-text{*\noindent
+text\<open>\noindent
 Individual equations in a \commdx{primrec} definition may be
 named as shown for @{thm[source]subst_App}.
 The significance of this device will become apparent below.
@@ -63,14 +63,14 @@
 to prove a related statement about term lists simultaneously. For example,
 the fact that the identity substitution does not change a term needs to be
 strengthened and proved as follows:
-*}
+\<close>
 
 lemma subst_id(*<*)(*referred to from ABexpr*)(*>*): "subst  Var t  = (t ::('v,'f)term)  \<and>
                   substs Var ts = (ts::('v,'f)term list)"
 apply(induct_tac t and ts rule: subst.induct substs.induct, simp_all)
 done
 
-text{*\noindent
+text\<open>\noindent
 Note that @{term Var} is the identity substitution because by definition it
 leaves variables unchanged: @{prop"subst Var (Var x) = Var x"}. Note also
 that the type annotations are necessary because otherwise there is nothing in
@@ -100,7 +100,7 @@
 @{text"map f [x1,...,xn] = [f x1,...,f xn]"}. This is true, but Isabelle
 insists on the conjunctive format. Fortunately, we can easily \emph{prove}
 that the suggested equation holds:
-*}
+\<close>
 (*<*)
 (* Exercise 1: *)
 lemma "subst  ((subst f) \<circ> g) t  = subst  f (subst g t) \<and>
@@ -133,14 +133,14 @@
 apply(induct_tac ts, simp_all)
 done
 
-text{*\noindent
+text\<open>\noindent
 What is more, we can now disable the old defining equation as a
 simplification rule:
-*}
+\<close>
 
 declare subst_App [simp del]
 
-text{*\noindent The advantage is that now we have replaced @{const
+text\<open>\noindent The advantage is that now we have replaced @{const
 substs} by @{const map}, we can profit from the large number of
 pre-proved lemmas about @{const map}.  Unfortunately, inductive proofs
 about type @{text term} are still awkward because they expect a
@@ -155,5 +155,5 @@
 Of course, you may also combine mutual and nested recursion of datatypes. For example,
 constructor @{text Sum} in \S\ref{sec:datatype-mut-rec} could take a list of
 expressions as its argument: @{text Sum}~@{typ[quotes]"'a aexp list"}.
-*}
+\<close>
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Documents/Documents.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Documents/Documents.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,9 +2,9 @@
 theory Documents imports Main begin
 (*>*)
 
-section {* Concrete Syntax \label{sec:concrete-syntax} *}
+section \<open>Concrete Syntax \label{sec:concrete-syntax}\<close>
 
-text {*
+text \<open>
   The core concept of Isabelle's framework for concrete syntax is that
   of \bfindex{mixfix annotations}.  Associated with any kind of
   constant declaration, mixfixes affect both the grammar productions
@@ -19,12 +19,12 @@
 
   Below we introduce a few simple syntax declaration
   forms that already cover many common situations fairly well.
-*}
+\<close>
 
 
-subsection {* Infix Annotations *}
+subsection \<open>Infix Annotations\<close>
 
-text {*
+text \<open>
   Syntax annotations may be included wherever constants are declared,
   such as \isacommand{definition} and \isacommand{primrec} --- and also
   \isacommand{datatype}, which declares constructor operations.
@@ -35,12 +35,12 @@
   Infix declarations\index{infix annotations} provide a useful special
   case of mixfixes.  The following example of the exclusive-or
   operation on boolean values illustrates typical infix declarations.
-*}
+\<close>
 
 definition xor :: "bool \<Rightarrow> bool \<Rightarrow> bool"    (infixl "[+]" 60)
 where "A [+] B \<equiv> (A \<and> \<not> B) \<or> (\<not> A \<and> B)"
 
-text {*
+text \<open>
   \noindent Now @{text "xor A B"} and @{text "A [+] B"} refer to the
   same expression internally.  Any curried function with at least two
   arguments may be given infix syntax.  For partial applications with
@@ -75,12 +75,12 @@
   below 50; algebraic ones (like @{text "+"} and @{text "*"}) are
   above 50.  User syntax should strive to coexist with common HOL
   forms, or use the mostly unused range 100--900.
-*}
+\<close>
 
 
-subsection {* Mathematical Symbols \label{sec:syntax-symbols} *}
+subsection \<open>Mathematical Symbols \label{sec:syntax-symbols}\<close>
 
-text {*
+text \<open>
   Concrete syntax based on ASCII characters has inherent limitations.
   Mathematical notation demands a larger repertoire of glyphs.
   Several standards of extended character sets have been proposed over
@@ -133,39 +133,39 @@
 
   Replacing our previous definition of @{text xor} by the
   following specifies an Isabelle symbol for the new operator:
-*}
+\<close>
 
 (*<*)
 hide_const xor
-setup {* Sign.add_path "version1" *}
+setup \<open>Sign.add_path "version1"\<close>
 (*>*)
 definition xor :: "bool \<Rightarrow> bool \<Rightarrow> bool"    (infixl "\<oplus>" 60)
 where "A \<oplus> B \<equiv> (A \<and> \<not> B) \<or> (\<not> A \<and> B)"
 (*<*)
-setup {* Sign.local_path *}
+setup \<open>Sign.local_path\<close>
 (*>*)
 
-text {*
+text \<open>
   It is possible to provide alternative syntax forms
   through the \bfindex{print mode} concept~@{cite "isabelle-isar-ref"}.  By
   convention, the mode of ``$xsymbols$'' is enabled whenever
   Proof~General's X-Symbol mode or {\LaTeX} output is active.  Now
   consider the following hybrid declaration of @{text xor}:
-*}
+\<close>
 
 (*<*)
 hide_const xor
-setup {* Sign.add_path "version2" *}
+setup \<open>Sign.add_path "version2"\<close>
 (*>*)
 definition xor :: "bool \<Rightarrow> bool \<Rightarrow> bool"    (infixl "[+]\<ignore>" 60)
 where "A [+]\<ignore> B \<equiv> (A \<and> \<not> B) \<or> (\<not> A \<and> B)"
 
 notation (xsymbols) xor (infixl "\<oplus>\<ignore>" 60)
 (*<*)
-setup {* Sign.local_path *}
+setup \<open>Sign.local_path\<close>
 (*>*)
 
-text {*\noindent
+text \<open>\noindent
 The \commdx{notation} command associates a mixfix
 annotation with a known constant.  The print mode specification,
 here @{text "(xsymbols)"}, is optional.
@@ -174,17 +174,17 @@
 output uses the nicer syntax of $xsymbols$ whenever that print mode is
 active.  Such an arrangement is particularly useful for interactive
 development, where users may type ASCII text and see mathematical
-symbols displayed during proofs.  *}
+symbols displayed during proofs.\<close>
 
 
-subsection {* Prefix Annotations *}
+subsection \<open>Prefix Annotations\<close>
 
-text {*
+text \<open>
   Prefix syntax annotations\index{prefix annotation} are another form
   of mixfixes @{cite "isabelle-isar-ref"}, without any template arguments or
   priorities --- just some literal syntax.  The following example
   associates common symbols with the constructors of a datatype.
-*}
+\<close>
 
 datatype currency =
     Euro nat    ("\<euro>")
@@ -192,7 +192,7 @@
   | Yen nat     ("\<yen>")
   | Dollar nat  ("$")
 
-text {*
+text \<open>
   \noindent Here the mixfix annotations on the rightmost column happen
   to consist of a single Isabelle symbol each: \verb,\,\verb,<euro>,,
   \verb,\,\verb,<pounds>,, \verb,\,\verb,<yen>,, and \verb,$,.  Recall
@@ -204,12 +204,12 @@
   Commission.
 
   Prefix syntax works the same way for other commands that introduce new constants, e.g. \isakeyword{primrec}.
-*}
+\<close>
 
 
-subsection {* Abbreviations \label{sec:abbreviations} *}
+subsection \<open>Abbreviations \label{sec:abbreviations}\<close>
 
-text{* Mixfix syntax annotations merely decorate particular constant
+text\<open>Mixfix syntax annotations merely decorate particular constant
 application forms with concrete syntax, for instance replacing
 @{text "xor A B"} by @{text "A \<oplus> B"}.  Occasionally, the relationship
 between some piece of notation and its internal form is more
@@ -223,12 +223,12 @@
 A typical use of abbreviations is to introduce relational notation for
 membership in a set of pairs, replacing @{text "(x, y) \<in> sim"} by
 @{text "x \<approx> y"}. We assume that a constant @{text sim } of type
-@{typ"('a \<times> 'a) set"} has been introduced at this point. *}
+@{typ"('a \<times> 'a) set"} has been introduced at this point.\<close>
 (*<*)consts sim :: "('a \<times> 'a) set"(*>*)
 abbreviation sim2 :: "'a \<Rightarrow> 'a \<Rightarrow> bool"   (infix "\<approx>" 50)
 where "x \<approx> y  \<equiv>  (x, y) \<in> sim"
 
-text {* \noindent The given meta-equality is used as a rewrite rule
+text \<open>\noindent The given meta-equality is used as a rewrite rule
 after parsing (replacing \mbox{@{prop"x \<approx> y"}} by @{text"(x,y) \<in>
 sim"}) and before printing (turning @{text"(x,y) \<in> sim"} back into
 \mbox{@{prop"x \<approx> y"}}). The name of the dummy constant @{text "sim2"}
@@ -238,14 +238,14 @@
 provide variant versions of fundamental relational expressions, such
 as @{text \<noteq>} for negated equalities.  The following declaration
 stems from Isabelle/HOL itself:
-*}
+\<close>
 
 abbreviation not_equal :: "'a \<Rightarrow> 'a \<Rightarrow> bool"    (infixl "~=\<ignore>" 50)
 where "x ~=\<ignore> y  \<equiv>  \<not> (x = y)"
 
 notation (xsymbols) not_equal (infix "\<noteq>\<ignore>" 50)
 
-text {* \noindent The notation @{text \<noteq>} is introduced separately to restrict it
+text \<open>\noindent The notation @{text \<noteq>} is introduced separately to restrict it
 to the \emph{xsymbols} mode.
 
 Abbreviations are appropriate when the defined concept is a
@@ -257,12 +257,12 @@
 Abbreviations are a simplified form of the general concept of
 \emph{syntax translations}; even heavier transformations may be
 written in ML @{cite "isabelle-isar-ref"}.
-*}
+\<close>
 
 
-section {* Document Preparation \label{sec:document-preparation} *}
+section \<open>Document Preparation \label{sec:document-preparation}\<close>
 
-text {*
+text \<open>
   Isabelle/Isar is centered around the concept of \bfindex{formal
   proof documents}\index{documents|bold}.  The outcome of a formal
   development effort is meant to be a human-readable record, presented
@@ -279,27 +279,27 @@
 
   Here is an example to illustrate the idea of Isabelle document
   preparation.
-*}
+\<close>
 
-text_raw {* \begin{quotation} *}
+text_raw \<open>\begin{quotation}\<close>
 
-text {*
+text \<open>
   The following datatype definition of @{text "'a bintree"} models
   binary trees with nodes being decorated by elements of type @{typ
   'a}.
-*}
+\<close>
 
 datatype 'a bintree =
      Leaf | Branch 'a  "'a bintree"  "'a bintree"
 
-text {*
+text \<open>
   \noindent The datatype induction rule generated here is of the form
   @{thm [indent = 1, display] bintree.induct [no_vars]}
-*}
+\<close>
 
-text_raw {* \end{quotation} *}
+text_raw \<open>\end{quotation}\<close>
 
-text {*
+text \<open>
   \noindent The above document output has been produced as follows:
 
   \begin{ttbox}
@@ -324,12 +324,12 @@
   to formal entities by means of ``antiquotations'' (such as
   \texttt{\at}\verb,{text "'a bintree"}, or
   \texttt{\at}\verb,{typ 'a},), see also \S\ref{sec:doc-prep-text}.
-*}
+\<close>
 
 
-subsection {* Isabelle Sessions *}
+subsection \<open>Isabelle Sessions\<close>
 
-text {*
+text \<open>
   In contrast to the highly interactive mode of Isabelle/Isar theory
   development, the document preparation stage essentially works in
   batch-mode.  An Isabelle \bfindex{session} consists of a collection
@@ -412,12 +412,12 @@
   Isabelle batch session leaves the generated sources in their target
   location, identified by the accompanying error message.  This lets
   you trace {\LaTeX} problems with the generated files at hand.
-*}
+\<close>
 
 
-subsection {* Structure Markup *}
+subsection \<open>Structure Markup\<close>
 
-text {*
+text \<open>
   The large-scale structure of Isabelle documents follows existing
   {\LaTeX} conventions, with chapters, sections, subsubsections etc.
   The Isar language includes separate \bfindex{markup commands}, which
@@ -460,12 +460,12 @@
 
   end
   \end{ttbox}
-*}
+\<close>
 
 
-subsection {* Formal Comments and Antiquotations \label{sec:doc-prep-text} *}
+subsection \<open>Formal Comments and Antiquotations \label{sec:doc-prep-text}\<close>
 
-text {*
+text \<open>
   Isabelle \bfindex{source comments}, which are of the form
   \verb,(,\verb,*,~@{text \<dots>}~\verb,*,\verb,),, essentially act like
   white space and do not really contribute to the content.  They
@@ -481,14 +481,14 @@
   \verb,{,\verb,*,~@{text \<dots>}~\verb,*,\verb,}, as before.  Multiple
   marginal comments may be given at the same time.  Here is a simple
   example:
-*}
+\<close>
 
 lemma "A --> A"
-  -- "a triviality of propositional logic"
-  -- "(should not really bother)"
-  by (rule impI) -- "implicit assumption step involved here"
+  \<comment> "a triviality of propositional logic"
+  \<comment> "(should not really bother)"
+  by (rule impI) \<comment> "implicit assumption step involved here"
 
-text {*
+text \<open>
   \noindent The above output has been produced as follows:
 
 \begin{verbatim}
@@ -593,12 +593,12 @@
   document very easily, independently of the term language of
   Isabelle.  Manual {\LaTeX} code would leave more control over the
   typesetting, but is also slightly more tedious.
-*}
+\<close>
 
 
-subsection {* Interpretation of Symbols \label{sec:doc-prep-symbols} *}
+subsection \<open>Interpretation of Symbols \label{sec:doc-prep-symbols}\<close>
 
-text {*
+text \<open>
   As has been pointed out before (\S\ref{sec:syntax-symbols}),
   Isabelle symbols are the smallest syntactic entities --- a
   straightforward generalization of ASCII characters.  While Isabelle
@@ -640,12 +640,12 @@
   quotes are not printed at all.  The resulting quality of typesetting
   is quite good, so this should be the default style for work that
   gets distributed to a broader audience.
-*}
+\<close>
 
 
-subsection {* Suppressing Output \label{sec:doc-prep-suppress} *}
+subsection \<open>Suppressing Output \label{sec:doc-prep-suppress}\<close>
 
-text {*
+text \<open>
   By default, Isabelle's document system generates a {\LaTeX} file for
   each theory that gets loaded while running the session.  The
   generated \texttt{session.tex} will include all of these in order of
@@ -683,11 +683,11 @@
   commands involving ML code).  Users may add their own tags using the
   \verb,%,\emph{tag} notation right after a command name.  In the
   subsequent example we hide a particularly irrelevant proof:
-*}
+\<close>
 
 lemma "x = x" by %invisible (simp)
 
-text {*
+text \<open>
   The original source has been ``\verb,lemma "x = x" by %invisible (simp),''.
   Tags observe the structure of proofs; adjacent commands with the
   same tag are joined into a single region.  The Isabelle document
@@ -705,12 +705,12 @@
   of the theory, of course.  For example, we may hide parts of a proof
   that seem unfit for general public inspection.  The following
   ``fully automatic'' proof is actually a fake:
-*}
+\<close>
 
 lemma "x \<noteq> (0::int) \<Longrightarrow> 0 < x * x"
   by (auto(*<*)simp add: zero_less_mult_iff(*>*))
 
-text {*
+text \<open>
   \noindent The real source of the proof has been as follows:
 
 \begin{verbatim}
@@ -722,7 +722,7 @@
   should not misrepresent the underlying theory development.  It is
   easy to invalidate the visible text by hiding references to
   questionable axioms, for example.
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Fun/fun0.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Fun/fun0.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,19 +2,19 @@
 theory fun0 imports Main begin
 (*>*)
 
-text{*
+text\<open>
 \subsection{Definition}
 \label{sec:fun-examples}
 
 Here is a simple example, the \rmindex{Fibonacci function}:
-*}
+\<close>
 
 fun fib :: "nat \<Rightarrow> nat" where
 "fib 0 = 0" |
 "fib (Suc 0) = 1" |
 "fib (Suc(Suc x)) = fib x + fib (Suc x)"
 
-text{*\noindent
+text\<open>\noindent
 This resembles ordinary functional programming languages. Note the obligatory
 \isacommand{where} and \isa{|}. Command \isacommand{fun} declares and
 defines the function in one go. Isabelle establishes termination automatically
@@ -22,35 +22,35 @@
 
 Slightly more interesting is the insertion of a fixed element
 between any two elements of a list:
-*}
+\<close>
 
 fun sep :: "'a \<Rightarrow> 'a list \<Rightarrow> 'a list" where
 "sep a []     = []" |
 "sep a [x]    = [x]" |
 "sep a (x#y#zs) = x # a # sep a (y#zs)"
 
-text{*\noindent
+text\<open>\noindent
 This time the length of the list decreases with the
 recursive call; the first argument is irrelevant for termination.
 
 Pattern matching\index{pattern matching!and \isacommand{fun}}
 need not be exhaustive and may employ wildcards:
-*}
+\<close>
 
 fun last :: "'a list \<Rightarrow> 'a" where
 "last [x]      = x" |
 "last (_#y#zs) = last (y#zs)"
 
-text{*
+text\<open>
 Overlapping patterns are disambiguated by taking the order of equations into
 account, just as in functional programming:
-*}
+\<close>
 
 fun sep1 :: "'a \<Rightarrow> 'a list \<Rightarrow> 'a list" where
 "sep1 a (x#y#zs) = x # a # sep1 a (y#zs)" |
 "sep1 _ xs       = xs"
 
-text{*\noindent
+text\<open>\noindent
 To guarantee that the second equation can only be applied if the first
 one does not match, Isabelle internally replaces the second equation
 by the two possibilities that are left: @{prop"sep1 a [] = []"} and
@@ -59,13 +59,13 @@
 
 Because of its pattern matching syntax, \isacommand{fun} is also useful
 for the definition of non-recursive functions:
-*}
+\<close>
 
 fun swap12 :: "'a list \<Rightarrow> 'a list" where
 "swap12 (x#y#zs) = y#x#zs" |
 "swap12 zs       = zs"
 
-text{*
+text\<open>
 After a function~$f$ has been defined via \isacommand{fun},
 its defining equations (or variants derived from them) are available
 under the name $f$@{text".simps"} as theorems.
@@ -87,14 +87,14 @@
 More generally, \isacommand{fun} allows any \emph{lexicographic
 combination} of size measures in case there are multiple
 arguments. For example, the following version of \rmindex{Ackermann's
-function} is accepted: *}
+function} is accepted:\<close>
 
 fun ack2 :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "ack2 n 0 = Suc n" |
 "ack2 0 (Suc m) = ack2 (Suc 0) m" |
 "ack2 (Suc n) (Suc m) = ack2 (ack2 n (Suc m)) m"
 
-text{* The order of arguments has no influence on whether
+text\<open>The order of arguments has no influence on whether
 \isacommand{fun} can prove termination of a function. For more details
 see elsewhere~@{cite bulwahnKN07}.
 
@@ -108,12 +108,12 @@
 terminate because of automatic splitting of @{text "if"}.
 \index{*if expressions!splitting of}
 Let us look at an example:
-*}
+\<close>
 
 fun gcd :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "gcd m n = (if n=0 then m else gcd n (m mod n))"
 
-text{*\noindent
+text\<open>\noindent
 The second argument decreases with each recursive call.
 The termination condition
 @{prop[display]"n ~= (0::nat) ==> m mod n < n"}
@@ -145,32 +145,32 @@
 If possible, the definition should be given by pattern matching on the left
 rather than @{text "if"} on the right. In the case of @{term gcd} the
 following alternative definition suggests itself:
-*}
+\<close>
 
 fun gcd1 :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "gcd1 m 0 = m" |
 "gcd1 m n = gcd1 n (m mod n)"
 
-text{*\noindent
+text\<open>\noindent
 The order of equations is important: it hides the side condition
 @{prop"n ~= (0::nat)"}.  Unfortunately, not all conditionals can be
 expressed by pattern matching.
 
 A simple alternative is to replace @{text "if"} by @{text case}, 
 which is also available for @{typ bool} and is not split automatically:
-*}
+\<close>
 
 fun gcd2 :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "gcd2 m n = (case n=0 of True \<Rightarrow> m | False \<Rightarrow> gcd2 n (m mod n))"
 
-text{*\noindent
+text\<open>\noindent
 This is probably the neatest solution next to pattern matching, and it is
 always available.
 
 A final alternative is to replace the offending simplification rules by
 derived conditional ones. For @{term gcd} it means we have to prove
 these lemmas:
-*}
+\<close>
 
 lemma [simp]: "gcd m 0 = m"
 apply(simp)
@@ -180,15 +180,15 @@
 apply(simp)
 done
 
-text{*\noindent
+text\<open>\noindent
 Simplification terminates for these proofs because the condition of the @{text
 "if"} simplifies to @{term True} or @{term False}.
 Now we can disable the original simplification rule:
-*}
+\<close>
 
 declare gcd.simps [simp del]
 
-text{*
+text\<open>
 \index{induction!recursion|(}
 \index{recursion induction|(}
 
@@ -207,29 +207,29 @@
 you are trying to establish holds for the left-hand side provided it holds
 for all recursive calls on the right-hand side. Here is a simple example
 involving the predefined @{term"map"} functional on lists:
-*}
+\<close>
 
 lemma "map f (sep x xs) = sep (f x) (map f xs)"
 
-txt{*\noindent
+txt\<open>\noindent
 Note that @{term"map f xs"}
 is the result of applying @{term"f"} to all elements of @{term"xs"}. We prove
 this lemma by recursion induction over @{term"sep"}:
-*}
+\<close>
 
 apply(induct_tac x xs rule: sep.induct)
 
-txt{*\noindent
+txt\<open>\noindent
 The resulting proof state has three subgoals corresponding to the three
 clauses for @{term"sep"}:
 @{subgoals[display,indent=0]}
 The rest is pure simplification:
-*}
+\<close>
 
 apply simp_all
 done
 
-text{*\noindent The proof goes smoothly because the induction rule
+text\<open>\noindent The proof goes smoothly because the induction rule
 follows the recursion of @{const sep}.  Try proving the above lemma by
 structural induction, and you find that you need an additional case
 distinction.
@@ -255,7 +255,7 @@
 holds for the tail of that list.
 \index{induction!recursion|)}
 \index{recursion induction|)}
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Tutorial/Ifexpr/Ifexpr.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Ifexpr/Ifexpr.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,26 +2,26 @@
 theory Ifexpr imports Main begin
 (*>*)
 
-subsection{*Case Study: Boolean Expressions*}
+subsection\<open>Case Study: Boolean Expressions\<close>
 
-text{*\label{sec:boolex}\index{boolean expressions example|(}
+text\<open>\label{sec:boolex}\index{boolean expressions example|(}
 The aim of this case study is twofold: it shows how to model boolean
 expressions and some algorithms for manipulating them, and it demonstrates
 the constructs introduced above.
-*}
+\<close>
 
-subsubsection{*Modelling Boolean Expressions*}
+subsubsection\<open>Modelling Boolean Expressions\<close>
 
-text{*
+text\<open>
 We want to represent boolean expressions built up from variables and
 constants by negation and conjunction. The following datatype serves exactly
 that purpose:
-*}
+\<close>
 
 datatype boolex = Const bool | Var nat | Neg boolex
                 | And boolex boolex
 
-text{*\noindent
+text\<open>\noindent
 The two constants are represented by @{term"Const True"} and
 @{term"Const False"}. Variables are represented by terms of the form
 @{term"Var n"}, where @{term"n"} is a natural number (type @{typ"nat"}).
@@ -34,7 +34,7 @@
 Hence the function @{text"value"} takes an additional parameter, an
 \emph{environment} of type @{typ"nat => bool"}, which maps variables to their
 values:
-*}
+\<close>
 
 primrec "value" :: "boolex \<Rightarrow> (nat \<Rightarrow> bool) \<Rightarrow> bool" where
 "value (Const b) env = b" |
@@ -42,20 +42,20 @@
 "value (Neg b)   env = (\<not> value b env)" |
 "value (And b c) env = (value b env \<and> value c env)"
 
-text{*\noindent
+text\<open>\noindent
 \subsubsection{If-Expressions}
 
 An alternative and often more efficient (because in a certain sense
 canonical) representation are so-called \emph{If-expressions} built up
 from constants (@{term"CIF"}), variables (@{term"VIF"}) and conditionals
 (@{term"IF"}):
-*}
+\<close>
 
 datatype ifex = CIF bool | VIF nat | IF ifex ifex ifex
 
-text{*\noindent
+text\<open>\noindent
 The evaluation of If-expressions proceeds as for @{typ"boolex"}:
-*}
+\<close>
 
 primrec valif :: "ifex \<Rightarrow> (nat \<Rightarrow> bool) \<Rightarrow> bool" where
 "valif (CIF b)    env = b" |
@@ -63,13 +63,13 @@
 "valif (IF b t e) env = (if valif b env then valif t env
                                         else valif e env)"
 
-text{*
+text\<open>
 \subsubsection{Converting Boolean and If-Expressions}
 
 The type @{typ"boolex"} is close to the customary representation of logical
 formulae, whereas @{typ"ifex"} is designed for efficiency. It is easy to
 translate from @{typ"boolex"} into @{typ"ifex"}:
-*}
+\<close>
 
 primrec bool2if :: "boolex \<Rightarrow> ifex" where
 "bool2if (Const b) = CIF b" |
@@ -77,22 +77,22 @@
 "bool2if (Neg b)   = IF (bool2if b) (CIF False) (CIF True)" |
 "bool2if (And b c) = IF (bool2if b) (bool2if c) (CIF False)"
 
-text{*\noindent
+text\<open>\noindent
 At last, we have something we can verify: that @{term"bool2if"} preserves the
 value of its argument:
-*}
+\<close>
 
 lemma "valif (bool2if b) env = value b env"
 
-txt{*\noindent
+txt\<open>\noindent
 The proof is canonical:
-*}
+\<close>
 
 apply(induct_tac b)
 apply(auto)
 done
 
-text{*\noindent
+text\<open>\noindent
 In fact, all proofs in this case study look exactly like this. Hence we do
 not show them below.
 
@@ -102,7 +102,7 @@
 repeatedly replacing a subterm of the form @{term"IF (IF b x y) z u"} by
 @{term"IF b (IF x z u) (IF y z u)"}, which has the same value. The following
 primitive recursive functions perform this task:
-*}
+\<close>
 
 primrec normif :: "ifex \<Rightarrow> ifex \<Rightarrow> ifex \<Rightarrow> ifex" where
 "normif (CIF b)    t e = IF (CIF b) t e" |
@@ -114,18 +114,18 @@
 "norm (VIF x)    = VIF x" |
 "norm (IF b t e) = normif b (norm t) (norm e)"
 
-text{*\noindent
+text\<open>\noindent
 Their interplay is tricky; we leave it to you to develop an
 intuitive understanding. Fortunately, Isabelle can help us to verify that the
 transformation preserves the value of the expression:
-*}
+\<close>
 
 theorem "valif (norm b) env = valif b env"(*<*)oops(*>*)
 
-text{*\noindent
+text\<open>\noindent
 The proof is canonical, provided we first show the following simplification
 lemma, which also helps to understand what @{term"normif"} does:
-*}
+\<close>
 
 lemma [simp]:
   "\<forall>t e. valif (normif b t e) env = valif (IF b t e) env"
@@ -137,13 +137,13 @@
 apply(induct_tac b)
 by(auto)
 (*>*)
-text{*\noindent
+text\<open>\noindent
 Note that the lemma does not have a name, but is implicitly used in the proof
 of the theorem shown above because of the @{text"[simp]"} attribute.
 
 But how can we be sure that @{term"norm"} really produces a normal form in
 the above sense? We define a function that tests If-expressions for normality:
-*}
+\<close>
 
 primrec normal :: "ifex \<Rightarrow> bool" where
 "normal(CIF b) = True" |
@@ -151,10 +151,10 @@
 "normal(IF b t e) = (normal t \<and> normal e \<and>
      (case b of CIF b \<Rightarrow> True | VIF x \<Rightarrow> True | IF x y z \<Rightarrow> False))"
 
-text{*\noindent
+text\<open>\noindent
 Now we prove @{term"normal(norm b)"}. Of course, this requires a lemma about
 normality of @{term"normif"}:
-*}
+\<close>
 
 lemma [simp]: "\<forall>t e. normal(normif b t e) = (normal t \<and> normal e)"
 (*<*)
@@ -166,7 +166,7 @@
 by(auto)
 (*>*)
 
-text{*\medskip
+text\<open>\medskip
 How do we come up with the required lemmas? Try to prove the main theorems
 without them and study carefully what @{text auto} leaves unproved. This 
 can provide the clue.  The necessity of universal quantification
@@ -181,7 +181,7 @@
   equalities (@{text"="}).)
 \end{exercise}
 \index{boolean expressions example|)}
-*}
+\<close>
 (*<*)
 
 primrec normif2 :: "ifex => ifex => ifex => ifex" where
--- a/src/Doc/Tutorial/Inductive/AB.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/AB.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,8 +1,8 @@
 (*<*)theory AB imports Main begin(*>*)
 
-section{*Case Study: A Context Free Grammar*}
+section\<open>Case Study: A Context Free Grammar\<close>
 
-text{*\label{sec:CFG}
+text\<open>\label{sec:CFG}
 \index{grammars!defining inductively|(}%
 Grammars are nothing but shorthands for inductive definitions of nonterminals
 which represent sets of strings. For example, the production
@@ -21,24 +21,24 @@
 
 We start by fixing the alphabet, which consists only of @{term a}'s
 and~@{term b}'s:
-*}
+\<close>
 
 datatype alfa = a | b
 
-text{*\noindent
+text\<open>\noindent
 For convenience we include the following easy lemmas as simplification rules:
-*}
+\<close>
 
 lemma [simp]: "(x \<noteq> a) = (x = b) \<and> (x \<noteq> b) = (x = a)"
 by (case_tac x, auto)
 
-text{*\noindent
+text\<open>\noindent
 Words over this alphabet are of type @{typ"alfa list"}, and
 the three nonterminals are declared as sets of such words.
 The productions above are recast as a \emph{mutual} inductive
 definition\index{inductive definition!simultaneous}
 of @{term S}, @{term A} and~@{term B}:
-*}
+\<close>
 
 inductive_set
   S :: "alfa list set" and
@@ -55,31 +55,31 @@
 | "w \<in> S            \<Longrightarrow> b#w   \<in> B"
 | "\<lbrakk> v \<in> B; w \<in> B \<rbrakk> \<Longrightarrow> a#v@w \<in> B"
 
-text{*\noindent
+text\<open>\noindent
 First we show that all words in @{term S} contain the same number of @{term
 a}'s and @{term b}'s. Since the definition of @{term S} is by mutual
 induction, so is the proof: we show at the same time that all words in
 @{term A} contain one more @{term a} than @{term b} and all words in @{term
 B} contain one more @{term b} than @{term a}.
-*}
+\<close>
 
 lemma correctness:
   "(w \<in> S \<longrightarrow> size[x\<leftarrow>w. x=a] = size[x\<leftarrow>w. x=b])     \<and>
    (w \<in> A \<longrightarrow> size[x\<leftarrow>w. x=a] = size[x\<leftarrow>w. x=b] + 1) \<and>
    (w \<in> B \<longrightarrow> size[x\<leftarrow>w. x=b] = size[x\<leftarrow>w. x=a] + 1)"
 
-txt{*\noindent
+txt\<open>\noindent
 These propositions are expressed with the help of the predefined @{term
 filter} function on lists, which has the convenient syntax @{text"[x\<leftarrow>xs. P
 x]"}, the list of all elements @{term x} in @{term xs} such that @{prop"P x"}
 holds. Remember that on lists @{text size} and @{text length} are synonymous.
 
 The proof itself is by rule induction and afterwards automatic:
-*}
+\<close>
 
 by (rule S_A_B.induct, auto)
 
-text{*\noindent
+text\<open>\noindent
 This may seem surprising at first, and is indeed an indication of the power
 of inductive definitions. But it is also quite straightforward. For example,
 consider the production $A \to b A A$: if $v,w \in A$ and the elements of $A$
@@ -109,13 +109,13 @@
 and @{term b}'s to an arbitrary property @{term P}. Otherwise we would have
 to prove the desired lemma twice, once as stated above and once with the
 roles of @{term a}'s and @{term b}'s interchanged.
-*}
+\<close>
 
 lemma step1: "\<forall>i < size w.
   \<bar>(int(size[x\<leftarrow>take (i+1) w. P x])-int(size[x\<leftarrow>take (i+1) w. \<not>P x]))
    - (int(size[x\<leftarrow>take i w. P x])-int(size[x\<leftarrow>take i w. \<not>P x]))\<bar> \<le> 1"
 
-txt{*\noindent
+txt\<open>\noindent
 The lemma is a bit hard to read because of the coercion function
 @{text"int :: nat \<Rightarrow> int"}. It is required because @{term size} returns
 a natural number, but subtraction on type~@{typ nat} will do the wrong thing.
@@ -126,34 +126,34 @@
 The proof is by induction on @{term w}, with a trivial base case, and a not
 so trivial induction step. Since it is essentially just arithmetic, we do not
 discuss it.
-*}
+\<close>
 
 apply(induct_tac w)
 apply(auto simp add: abs_if take_Cons split: nat.split)
 done
 
-text{*
+text\<open>
 Finally we come to the above-mentioned lemma about cutting in half a word with two more elements of one sort than of the other sort:
-*}
+\<close>
 
 lemma part1:
  "size[x\<leftarrow>w. P x] = size[x\<leftarrow>w. \<not>P x]+2 \<Longrightarrow>
   \<exists>i\<le>size w. size[x\<leftarrow>take i w. P x] = size[x\<leftarrow>take i w. \<not>P x]+1"
 
-txt{*\noindent
+txt\<open>\noindent
 This is proved by @{text force} with the help of the intermediate value theorem,
 instantiated appropriately and with its first premise disposed of by lemma
 @{thm[source]step1}:
-*}
+\<close>
 
 apply(insert nat0_intermed_int_val[OF step1, of "P" "w" "1"])
 by force
 
-text{*\noindent
+text\<open>\noindent
 
 Lemma @{thm[source]part1} tells us only about the prefix @{term"take i w"}.
 An easy lemma deals with the suffix @{term"drop i w"}:
-*}
+\<close>
 
 
 lemma part2:
@@ -163,7 +163,7 @@
    \<Longrightarrow> size[x\<leftarrow>drop i w. P x] = size[x\<leftarrow>drop i w. \<not>P x]+1"
 by(simp del: append_take_drop_id)
 
-text{*\noindent
+text\<open>\noindent
 In the proof we have disabled the normally useful lemma
 \begin{isabelle}
 @{thm append_take_drop_id[no_vars]}
@@ -174,34 +174,34 @@
 
 To dispose of trivial cases automatically, the rules of the inductive
 definition are declared simplification rules:
-*}
+\<close>
 
 declare S_A_B.intros[simp]
 
-text{*\noindent
+text\<open>\noindent
 This could have been done earlier but was not necessary so far.
 
 The completeness theorem tells us that if a word has the same number of
 @{term a}'s and @{term b}'s, then it is in @{term S}, and similarly 
 for @{term A} and @{term B}:
-*}
+\<close>
 
 theorem completeness:
   "(size[x\<leftarrow>w. x=a] = size[x\<leftarrow>w. x=b]     \<longrightarrow> w \<in> S) \<and>
    (size[x\<leftarrow>w. x=a] = size[x\<leftarrow>w. x=b] + 1 \<longrightarrow> w \<in> A) \<and>
    (size[x\<leftarrow>w. x=b] = size[x\<leftarrow>w. x=a] + 1 \<longrightarrow> w \<in> B)"
 
-txt{*\noindent
+txt\<open>\noindent
 The proof is by induction on @{term w}. Structural induction would fail here
 because, as we can see from the grammar, we need to make bigger steps than
 merely appending a single letter at the front. Hence we induct on the length
 of @{term w}, using the induction rule @{thm[source]length_induct}:
-*}
+\<close>
 
 apply(induct_tac w rule: length_induct)
 apply(rename_tac w)
 
-txt{*\noindent
+txt\<open>\noindent
 The @{text rule} parameter tells @{text induct_tac} explicitly which induction
 rule to use. For details see \S\ref{sec:complete-ind} below.
 In this case the result is that we may assume the lemma already
@@ -210,13 +210,13 @@
 
 The proof continues with a case distinction on @{term w},
 on whether @{term w} is empty or not.
-*}
+\<close>
 
 apply(case_tac w)
  apply(simp_all)
 (*<*)apply(rename_tac x v)(*>*)
 
-txt{*\noindent
+txt\<open>\noindent
 Simplification disposes of the base case and leaves only a conjunction
 of two step cases to be proved:
 if @{prop"w = a#v"} and @{prop[display]"size[x\<in>v. x=a] = size[x\<in>v. x=b]+2"} then
@@ -226,49 +226,49 @@
 After breaking the conjunction up into two cases, we can apply
 @{thm[source]part1} to the assumption that @{term w} contains two more @{term
 a}'s than @{term b}'s.
-*}
+\<close>
 
 apply(rule conjI)
  apply(clarify)
  apply(frule part1[of "\<lambda>x. x=a", simplified])
  apply(clarify)
-txt{*\noindent
+txt\<open>\noindent
 This yields an index @{prop"i \<le> length v"} such that
 @{prop[display]"length [x\<leftarrow>take i v . x = a] = length [x\<leftarrow>take i v . x = b] + 1"}
 With the help of @{thm[source]part2} it follows that
 @{prop[display]"length [x\<leftarrow>drop i v . x = a] = length [x\<leftarrow>drop i v . x = b] + 1"}
-*}
+\<close>
 
  apply(drule part2[of "\<lambda>x. x=a", simplified])
   apply(assumption)
 
-txt{*\noindent
+txt\<open>\noindent
 Now it is time to decompose @{term v} in the conclusion @{prop"b#v \<in> A"}
 into @{term"take i v @ drop i v"},
-*}
+\<close>
 
  apply(rule_tac n1=i and t=v in subst[OF append_take_drop_id])
 
-txt{*\noindent
+txt\<open>\noindent
 (the variables @{term n1} and @{term t} are the result of composing the
 theorems @{thm[source]subst} and @{thm[source]append_take_drop_id})
 after which the appropriate rule of the grammar reduces the goal
 to the two subgoals @{prop"take i v \<in> A"} and @{prop"drop i v \<in> A"}:
-*}
+\<close>
 
  apply(rule S_A_B.intros)
 
-txt{*
+txt\<open>
 Both subgoals follow from the induction hypothesis because both @{term"take i
 v"} and @{term"drop i v"} are shorter than @{term w}:
-*}
+\<close>
 
   apply(force simp add: min_less_iff_disj)
  apply(force split: nat_diff_split)
 
-txt{*
+txt\<open>
 The case @{prop"w = b#v"} is proved analogously:
-*}
+\<close>
 
 apply(clarify)
 apply(frule part1[of "\<lambda>x. x=b", simplified])
@@ -280,7 +280,7 @@
  apply(force simp add: min_less_iff_disj)
 by(force simp add: min_less_iff_disj split: nat_diff_split)
 
-text{*
+text\<open>
 We conclude this section with a comparison of our proof with 
 Hopcroft\index{Hopcroft, J. E.} and Ullman's\index{Ullman, J. D.}
 @{cite \<open>p.\ts81\<close> HopcroftUllman}.
@@ -304,6 +304,6 @@
 cases.  Such errors are found in many pen-and-paper proofs when they
 are scrutinized formally.%
 \index{grammars!defining inductively|)}
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Inductive/Advanced.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/Advanced.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 ML_file "../../antiquote_setup.ML"
 (*>*)
 
-text {*
+text \<open>
 The premises of introduction rules may contain universal quantifiers and
 monotone functions.  A universal quantifier lets the rule 
 refer to any number of instances of 
@@ -10,11 +10,11 @@
 to existing constructions (such as ``list of'') over the inductively defined
 set.  The examples below show how to use the additional expressiveness
 and how to reason from the resulting definitions.
-*}
+\<close>
 
-subsection{* Universal Quantifiers in Introduction Rules \label{sec:gterm-datatype} *}
+subsection\<open>Universal Quantifiers in Introduction Rules \label{sec:gterm-datatype}\<close>
 
-text {*
+text \<open>
 \index{ground terms example|(}%
 \index{quantifiers!and inductive definitions|(}%
 As a running example, this section develops the theory of \textbf{ground
@@ -23,19 +23,19 @@
 constant as a function applied to the null argument  list.  Let us declare a
 datatype @{text gterm} for the type of ground  terms. It is a type constructor
 whose argument is a type of  function symbols. 
-*}
+\<close>
 
 datatype 'f gterm = Apply 'f "'f gterm list"
 
-text {*
+text \<open>
 To try it out, we declare a datatype of some integer operations: 
 integer constants, the unary minus operator and the addition 
 operator.
-*}
+\<close>
 
 datatype integer_op = Number int | UnaryMinus | Plus
 
-text {*
+text \<open>
 Now the type @{typ "integer_op gterm"} denotes the ground 
 terms built over those symbols.
 
@@ -56,7 +56,7 @@
 to our inductively defined set: is a ground term 
 over~@{text F}.  The function @{term set} denotes the set of elements in a given 
 list. 
-*}
+\<close>
 
 inductive_set
   gterms :: "'f set \<Rightarrow> 'f gterm set"
@@ -65,11 +65,11 @@
 step[intro!]: "\<lbrakk>\<forall>t \<in> set args. t \<in> gterms F;  f \<in> F\<rbrakk>
                \<Longrightarrow> (Apply f args) \<in> gterms F"
 
-text {*
+text \<open>
 To demonstrate a proof from this definition, let us 
 show that the function @{term gterms}
 is \textbf{monotone}.  We shall need this concept shortly.
-*}
+\<close>
 
 lemma gterms_mono: "F\<subseteq>G \<Longrightarrow> gterms F \<subseteq> gterms G"
 apply clarify
@@ -81,7 +81,7 @@
 apply clarify
 apply (erule gterms.induct)
 (*>*)
-txt{*
+txt\<open>
 Intuitively, this theorem says that
 enlarging the set of function symbols enlarges the set of ground 
 terms. The proof is a trivial rule induction.
@@ -92,9 +92,9 @@
 The assumptions state that @{text f} belongs 
 to~@{text F}, which is included in~@{text G}, and that every element of the list @{text args} is
 a ground term over~@{text G}.  The @{text blast} method finds this chain of reasoning easily.  
-*}
+\<close>
 (*<*)oops(*>*)
-text {*
+text \<open>
 \begin{warn}
 Why do we call this function @{text gterms} instead 
 of @{text gterm}?  A constant may have the same name as a type.  However,
@@ -113,7 +113,7 @@
 terms and a function  symbol~@{text f}. If the length of the list matches the
 function's arity  then applying @{text f} to @{text args} yields a well-formed
 term.
-*}
+\<close>
 
 inductive_set
   well_formed_gterm :: "('f \<Rightarrow> nat) \<Rightarrow> 'f gterm set"
@@ -123,16 +123,16 @@
                 length args = arity f\<rbrakk>
                \<Longrightarrow> (Apply f args) \<in> well_formed_gterm arity"
 
-text {*
+text \<open>
 The inductive definition neatly captures the reasoning above.
 The universal quantification over the
 @{text set} of arguments expresses that all of them are well-formed.%
 \index{quantifiers!and inductive definitions|)}
-*}
+\<close>
 
-subsection{* Alternative Definition Using a Monotone Function *}
+subsection\<open>Alternative Definition Using a Monotone Function\<close>
 
-text {*
+text \<open>
 \index{monotone functions!and inductive definitions|(}% 
 An inductive definition may refer to the
 inductively defined  set through an arbitrary monotone function.  To
@@ -148,7 +148,7 @@
 introduction rule.  The first premise states that @{text args} belongs to
 the @{text lists} of well-formed terms.  This formulation is more
 direct, if more obscure, than using a universal quantifier.
-*}
+\<close>
 
 inductive_set
   well_formed_gterm' :: "('f \<Rightarrow> nat) \<Rightarrow> 'f gterm set"
@@ -159,7 +159,7 @@
                \<Longrightarrow> (Apply f args) \<in> well_formed_gterm' arity"
 monos lists_mono
 
-text {*
+text \<open>
 We cite the theorem @{text lists_mono} to justify 
 using the function @{term lists}.%
 \footnote{This particular theorem is installed by default already, but we
@@ -194,15 +194,15 @@
 Further lists of well-formed
 terms become available and none are taken away.%
 \index{monotone functions!and inductive definitions|)} 
-*}
+\<close>
 
-subsection{* A Proof of Equivalence *}
+subsection\<open>A Proof of Equivalence\<close>
 
-text {*
+text \<open>
 We naturally hope that these two inductive definitions of ``well-formed'' 
 coincide.  The equality can be proved by separate inclusions in 
 each direction.  Each is a trivial rule induction. 
-*}
+\<close>
 
 lemma "well_formed_gterm arity \<subseteq> well_formed_gterm' arity"
 apply clarify
@@ -214,7 +214,7 @@
 apply clarify
 apply (erule well_formed_gterm.induct)
 (*>*)
-txt {*
+txt \<open>
 The @{text clarify} method gives
 us an element of @{term "well_formed_gterm arity"} on which to perform 
 induction.  The resulting subgoal can be proved automatically:
@@ -222,7 +222,7 @@
 This proof resembles the one given in
 {\S}\ref{sec:gterm-datatype} above, especially in the form of the
 induction hypothesis.  Next, we consider the opposite inclusion:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma "well_formed_gterm' arity \<subseteq> well_formed_gterm arity"
 apply clarify
@@ -234,7 +234,7 @@
 apply clarify
 apply (erule well_formed_gterm'.induct)
 (*>*)
-txt {*
+txt \<open>
 The proof script is virtually identical,
 but the subgoal after applying induction may be surprising:
 @{subgoals[display,indent=0,margin=65]}
@@ -257,13 +257,13 @@
 distribute over intersection.  Monotonicity implies one direction of
 this set equality; we have this theorem:
 @{named_thms [display,indent=0] mono_Int [no_vars] (mono_Int)}
-*}
+\<close>
 (*<*)oops(*>*)
 
 
-subsection{* Another Example of Rule Inversion *}
+subsection\<open>Another Example of Rule Inversion\<close>
 
-text {*
+text \<open>
 \index{rule inversion|(}%
 Does @{term gterms} distribute over intersection?  We have proved that this
 function is monotone, so @{text mono_Int} gives one of the inclusions.  The
@@ -271,20 +271,20 @@
 sets
 @{term F} and~@{term G} then it is also a ground term over their intersection,
 @{term "F \<inter> G"}.
-*}
+\<close>
 
 lemma gterms_IntI:
      "t \<in> gterms F \<Longrightarrow> t \<in> gterms G \<longrightarrow> t \<in> gterms (F\<inter>G)"
 (*<*)oops(*>*)
-text {*
+text \<open>
 Attempting this proof, we get the assumption 
 @{term "Apply f args \<in> gterms G"}, which cannot be broken down. 
 It looks like a job for rule inversion:\cmmdx{inductive\protect\_cases}
-*}
+\<close>
 
 inductive_cases gterm_Apply_elim [elim!]: "Apply f args \<in> gterms F"
 
-text {*
+text \<open>
 Here is the result.
 @{named_thms [display,indent=0,margin=50] gterm_Apply_elim [no_vars] (gterm_Apply_elim)}
 This rule replaces an assumption about @{term "Apply f args"} by 
@@ -295,7 +295,7 @@
 have given the @{text "elim!"} attribute. 
 
 Now we can prove the other half of that distributive law.
-*}
+\<close>
 
 lemma gterms_IntI [rule_format, intro!]:
      "t \<in> gterms F \<Longrightarrow> t \<in> gterms G \<longrightarrow> t \<in> gterms (F\<inter>G)"
@@ -306,7 +306,7 @@
 lemma "t \<in> gterms F \<Longrightarrow> t \<in> gterms G \<longrightarrow> t \<in> gterms (F\<inter>G)"
 apply (erule gterms.induct)
 (*>*)
-txt {*
+txt \<open>
 The proof begins with rule induction over the definition of
 @{term gterms}, which leaves a single subgoal:  
 @{subgoals[display,indent=0,margin=65]}
@@ -320,13 +320,13 @@
 
 \smallskip
 Our distributive law is a trivial consequence of previously-proved results:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma gterms_Int_eq [simp]:
      "gterms (F \<inter> G) = gterms F \<inter> gterms G"
 by (blast intro!: mono_Int monoI gterms_mono)
 
-text_raw {*
+text_raw \<open>
 \index{rule inversion|)}%
 \index{ground terms example|)}
 
@@ -339,7 +339,7 @@
 list of argument types paired with the result type. 
 Complete this inductive definition:
 \begin{isabelle}
-*}
+\<close>
 
 inductive_set
   well_typed_gterm :: "('f \<Rightarrow> 't list * 't) \<Rightarrow> ('f gterm * 't)set"
@@ -352,15 +352,15 @@
      \<Longrightarrow> (Apply f (map fst args), rtype) 
          \<in> well_typed_gterm sig"
 (*>*)
-text_raw {*
+text_raw \<open>
 \end{isabelle}
 \end{exercise}
 \end{isamarkuptext}
-*}
+\<close>
 
 (*<*)
 
-text{*the following declaration isn't actually used*}
+text\<open>the following declaration isn't actually used\<close>
 primrec
   integer_arity :: "integer_op \<Rightarrow> nat"
 where
@@ -368,7 +368,7 @@
 | "integer_arity UnaryMinus        = 1"
 | "integer_arity Plus              = 2"
 
-text{* the rest isn't used: too complicated.  OK for an exercise though.*}
+text\<open>the rest isn't used: too complicated.  OK for an exercise though.\<close>
 
 inductive_set
   integer_signature :: "(integer_op * (unit list * unit)) set"
--- a/src/Doc/Tutorial/Inductive/Even.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/Even.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,9 +2,9 @@
 ML_file "../../antiquote_setup.ML" 
 (*>*)
 
-section{* The Set of Even Numbers *}
+section\<open>The Set of Even Numbers\<close>
 
-text {*
+text \<open>
 \index{even numbers!defining inductively|(}%
 The set of even numbers can be inductively defined as the least set
 containing 0 and closed under the operation $+2$.  Obviously,
@@ -12,20 +12,20 @@
 We shall prove below that the two formulations coincide.  On the way we
 shall examine the primary means of reasoning about inductively defined
 sets: rule induction.
-*}
+\<close>
 
-subsection{* Making an Inductive Definition *}
+subsection\<open>Making an Inductive Definition\<close>
 
-text {*
+text \<open>
 Using \commdx{inductive\protect\_set}, we declare the constant @{text even} to be
 a set of natural numbers with the desired properties.
-*}
+\<close>
 
 inductive_set even :: "nat set" where
 zero[intro!]: "0 \<in> even" |
 step[intro!]: "n \<in> even \<Longrightarrow> (Suc (Suc n)) \<in> even"
 
-text {*
+text \<open>
 An inductive definition consists of introduction rules.  The first one
 above states that 0 is even; the second states that if $n$ is even, then so
 is~$n+2$.  Given this declaration, Isabelle generates a fixed point
@@ -44,16 +44,16 @@
 apply them aggressively. Obviously, regarding 0 as even is safe.  The
 @{text step} rule is also safe because $n+2$ is even if and only if $n$ is
 even.  We prove this equivalence later.
-*}
+\<close>
 
-subsection{*Using Introduction Rules*}
+subsection\<open>Using Introduction Rules\<close>
 
-text {*
+text \<open>
 Our first lemma states that numbers of the form $2\times k$ are even.
 Introduction rules are used to show that specific values belong to the
 inductive set.  Such proofs typically involve 
 induction, perhaps over some other inductive set.
-*}
+\<close>
 
 lemma two_times_even[intro!]: "2*k \<in> even"
 apply (induct_tac k)
@@ -63,7 +63,7 @@
 lemma "2*k \<in> even"
 apply (induct_tac k)
 (*>*)
-txt {*
+txt \<open>
 \noindent
 The first step is induction on the natural number @{text k}, which leaves
 two subgoals:
@@ -75,14 +75,14 @@
 definition of @{text even} (using the divides relation) and our inductive
 definition.  One direction of this equivalence is immediate by the lemma
 just proved, whose @{text "intro!"} attribute ensures it is applied automatically.
-*}
+\<close>
 (*<*)oops(*>*)
 lemma dvd_imp_even: "2 dvd n \<Longrightarrow> n \<in> even"
 by (auto simp add: dvd_def)
 
-subsection{* Rule Induction \label{sec:rule-induction} *}
+subsection\<open>Rule Induction \label{sec:rule-induction}\<close>
 
-text {*
+text \<open>
 \index{rule induction|(}%
 From the definition of the set
 @{term even}, Isabelle has
@@ -102,56 +102,56 @@
 Induction is the usual way of proving a property of the elements of an
 inductively defined set.  Let us prove that all members of the set
 @{term even} are multiples of two.
-*}
+\<close>
 
 lemma even_imp_dvd: "n \<in> even \<Longrightarrow> 2 dvd n"
-txt {*
+txt \<open>
 We begin by applying induction.  Note that @{text even.induct} has the form
 of an elimination rule, so we use the method @{text erule}.  We get two
 subgoals:
-*}
+\<close>
 apply (erule even.induct)
-txt {*
+txt \<open>
 @{subgoals[display,indent=0]}
 We unfold the definition of @{text dvd} in both subgoals, proving the first
 one and simplifying the second:
-*}
+\<close>
 apply (simp_all add: dvd_def)
-txt {*
+txt \<open>
 @{subgoals[display,indent=0]}
 The next command eliminates the existential quantifier from the assumption
 and replaces @{text n} by @{text "2 * k"}.
-*}
+\<close>
 apply clarify
-txt {*
+txt \<open>
 @{subgoals[display,indent=0]}
 To conclude, we tell Isabelle that the desired value is
 @{term "Suc k"}.  With this hint, the subgoal falls to @{text simp}.
-*}
+\<close>
 apply (rule_tac x = "Suc k" in exI, simp)
 (*<*)done(*>*)
 
-text {*
+text \<open>
 Combining the previous two results yields our objective, the
 equivalence relating @{term even} and @{text dvd}. 
 %
 %we don't want [iff]: discuss?
-*}
+\<close>
 
 theorem even_iff_dvd: "(n \<in> even) = (2 dvd n)"
 by (blast intro: dvd_imp_even even_imp_dvd)
 
 
-subsection{* Generalization and Rule Induction \label{sec:gen-rule-induction} *}
+subsection\<open>Generalization and Rule Induction \label{sec:gen-rule-induction}\<close>
 
-text {*
+text \<open>
 \index{generalizing for induction}%
 Before applying induction, we typically must generalize
 the induction formula.  With rule induction, the required generalization
 can be hard to find and sometimes requires a complete reformulation of the
 problem.  In this  example, our first attempt uses the obvious statement of
 the result.  It fails:
-*}
+\<close>
 
 lemma "Suc (Suc n) \<in> even \<Longrightarrow> n \<in> even"
 apply (erule even.induct)
@@ -160,7 +160,7 @@
 lemma "Suc (Suc n) \<in> even \<Longrightarrow> n \<in> even"
 apply (erule even.induct)
 (*>*)
-txt {*
+txt \<open>
 Rule induction finds no occurrences of @{term "Suc(Suc n)"} in the
 conclusion, which it therefore leaves unchanged.  (Look at
 @{text even.induct} to see why this happens.)  We have these subgoals:
@@ -171,7 +171,7 @@
 in general is described in {\S}\ref{sec:ind-var-in-prems} below.
 In the current case the solution is easy because
 we have the necessary inverse, subtraction:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma even_imp_even_minus_2: "n \<in> even \<Longrightarrow> n - 2 \<in> even"
 apply (erule even.induct)
@@ -181,7 +181,7 @@
 lemma "n \<in>  even \<Longrightarrow> n - 2 \<in> even"
 apply (erule even.induct)
 (*>*)
-txt {*
+txt \<open>
 This lemma is trivially inductive.  Here are the subgoals:
 @{subgoals[display,indent=0]}
 The first is trivial because @{text "0 - 2"} simplifies to @{text 0}, which is
@@ -191,24 +191,24 @@
 
 \medskip
 Using our lemma, we can easily prove the result we originally wanted:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma Suc_Suc_even_imp_even: "Suc (Suc n) \<in> even \<Longrightarrow> n \<in> even"
 by (drule even_imp_even_minus_2, simp)
 
-text {*
+text \<open>
 We have just proved the converse of the introduction rule @{text even.step}.
 This suggests proving the following equivalence.  We give it the
 \attrdx{iff} attribute because of its obvious value for simplification.
-*}
+\<close>
 
 lemma [iff]: "((Suc (Suc n)) \<in> even) = (n \<in> even)"
 by (blast dest: Suc_Suc_even_imp_even)
 
 
-subsection{* Rule Inversion \label{sec:rule-inversion} *}
+subsection\<open>Rule Inversion \label{sec:rule-inversion}\<close>
 
-text {*
+text \<open>
 \index{rule inversion|(}%
 Case analysis on an inductive definition is called \textbf{rule
 inversion}.  It is frequently used in proofs about operational
@@ -232,11 +232,11 @@
 @{term "Suc(Suc n)"} then the first case becomes irrelevant, while the second
 case tells us that @{term n} belongs to @{term even}.  Isabelle will generate
 this instance for us:
-*}
+\<close>
 
 inductive_cases Suc_Suc_cases [elim!]: "Suc(Suc n) \<in> even"
 
-text {*
+text \<open>
 The \commdx{inductive\protect\_cases} command generates an instance of
 the @{text cases} rule for the supplied pattern and gives it the supplied name:
 @{named_thms [display,indent=0] Suc_Suc_cases [no_vars] (Suc_Suc_cases)}
@@ -265,13 +265,13 @@
 
 For one-off applications of rule inversion, use the \methdx{ind_cases} method. 
 Here is an example:
-*}
+\<close>
 
 (*<*)lemma "Suc(Suc n) \<in> even \<Longrightarrow> P"(*>*)
 apply (ind_cases "Suc(Suc n) \<in> even")
 (*<*)oops(*>*)
 
-text {*
+text \<open>
 The specified instance of the @{text cases} rule is generated, then applied
 as an elimination rule.
 
@@ -285,6 +285,6 @@
 used.  Later examples will show that they are actually worth using.%
 \index{rule inversion|)}%
 \index{even numbers!defining inductively|)}
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Inductive/Mutual.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/Mutual.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,12 +1,12 @@
 (*<*)theory Mutual imports Main begin(*>*)
 
-subsection{*Mutually Inductive Definitions*}
+subsection\<open>Mutually Inductive Definitions\<close>
 
-text{*
+text\<open>
 Just as there are datatypes defined by mutual recursion, there are sets defined
 by mutual induction. As a trivial example we consider the even and odd
 natural numbers:
-*}
+\<close>
 
 inductive_set
   Even :: "nat set" and
@@ -16,7 +16,7 @@
 | EvenI: "n \<in> Odd \<Longrightarrow> Suc n \<in> Even"
 | OddI:  "n \<in> Even \<Longrightarrow> Suc n \<in> Odd"
 
-text{*\noindent
+text\<open>\noindent
 The mutually inductive definition of multiple sets is no different from
 that of a single set, except for induction: just as for mutually recursive
 datatypes, induction needs to involve all the simultaneously defined sets. In
@@ -26,25 +26,25 @@
 
 If we want to prove that all even numbers are divisible by two, we have to
 generalize the statement as follows:
-*}
+\<close>
 
 lemma "(m \<in> Even \<longrightarrow> 2 dvd m) \<and> (n \<in> Odd \<longrightarrow> 2 dvd (Suc n))"
 
-txt{*\noindent
+txt\<open>\noindent
 The proof is by rule induction. Because of the form of the induction theorem,
 it is applied by @{text rule} rather than @{text erule} as for ordinary
 inductive definitions:
-*}
+\<close>
 
 apply(rule Even_Odd.induct)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0]}
 The first two subgoals are proved by simplification and the final one can be
 proved in the same manner as in \S\ref{sec:rule-induction}
 where the same subgoal was encountered before.
 We do not show the proof script.
-*}
+\<close>
 (*<*)
   apply simp
  apply simp
@@ -55,17 +55,17 @@
 done
 (*>*)
 
-subsection{*Inductively Defined Predicates\label{sec:ind-predicates}*}
+subsection\<open>Inductively Defined Predicates\label{sec:ind-predicates}\<close>
 
-text{*\index{inductive predicates|(}
+text\<open>\index{inductive predicates|(}
 Instead of a set of even numbers one can also define a predicate on @{typ nat}:
-*}
+\<close>
 
 inductive evn :: "nat \<Rightarrow> bool" where
 zero: "evn 0" |
 step: "evn n \<Longrightarrow> evn(Suc(Suc n))"
 
-text{*\noindent Everything works as before, except that
+text\<open>\noindent Everything works as before, except that
 you write \commdx{inductive} instead of \isacommand{inductive\_set} and
 @{prop"evn n"} instead of @{prop"n : Even"}.
 When defining an n-ary relation as a predicate, it is recommended to curry
@@ -75,6 +75,6 @@
 
 When should you choose sets and when predicates? If you intend to combine your notion with set theoretic notation, define it as an inductive set. If not, define it as an inductive predicate, thus avoiding the @{text"\<in>"} notation. But note that predicates of more than one argument cannot be combined with the usual set theoretic operators: @{term"P \<union> Q"} is not well-typed if @{text"P, Q :: \<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> bool"}, you have to write @{term"%x y. P x y & Q x y"} instead.
 \index{inductive predicates|)}
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Inductive/Star.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Inductive/Star.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,8 +1,8 @@
 (*<*)theory Star imports Main begin(*>*)
 
-section{*The Reflexive Transitive Closure*}
+section\<open>The Reflexive Transitive Closure\<close>
 
-text{*\label{sec:rtc}
+text\<open>\label{sec:rtc}
 \index{reflexive transitive closure!defining inductively|(}%
 An inductive definition may accept parameters, so it can express 
 functions that yield sets.
@@ -12,7 +12,7 @@
 introduced in \S\ref{sec:Relations}, where the operator @{text"\<^sup>*"} was
 defined as a least fixed point because inductive definitions were not yet
 available. But now they are:
-*}
+\<close>
 
 inductive_set
   rtc :: "('a \<times> 'a)set \<Rightarrow> ('a \<times> 'a)set"   ("_*" [1000] 999)
@@ -21,7 +21,7 @@
   rtc_refl[iff]:  "(x,x) \<in> r*"
 | rtc_step:       "\<lbrakk> (x,y) \<in> r; (y,z) \<in> r* \<rbrakk> \<Longrightarrow> (x,z) \<in> r*"
 
-text{*\noindent
+text\<open>\noindent
 The function @{term rtc} is annotated with concrete syntax: instead of
 @{text"rtc r"} we can write @{term"r*"}. The actual definition
 consists of two rules. Reflexivity is obvious and is immediately given the
@@ -36,12 +36,12 @@
 for a start, it does not even mention transitivity.
 The rest of this section is devoted to proving that it is equivalent to
 the standard definition. We start with a simple lemma:
-*}
+\<close>
 
 lemma [intro]: "(x,y) \<in> r \<Longrightarrow> (x,y) \<in> r*"
 by(blast intro: rtc_step)
 
-text{*\noindent
+text\<open>\noindent
 Although the lemma itself is an unremarkable consequence of the basic rules,
 it has the advantage that it can be declared an introduction rule without the
 danger of killing the automatic tactics because @{term"r*"} occurs only in
@@ -61,12 +61,12 @@
 expects a premise of the form $(x@1,\dots,x@n) \in R$.
 
 Now we turn to the inductive proof of transitivity:
-*}
+\<close>
 
 lemma rtc_trans: "\<lbrakk> (x,y) \<in> r*; (y,z) \<in> r* \<rbrakk> \<Longrightarrow> (x,z) \<in> r*"
 apply(erule rtc.induct)
 
-txt{*\noindent
+txt\<open>\noindent
 Unfortunately, even the base case is a problem:
 @{subgoals[display,indent=0,goals_limit=1]}
 We have to abandon this proof attempt.
@@ -85,12 +85,12 @@
 goal, of the pair @{term"(x,y)"} only @{term x} appears also in the
 conclusion, but not @{term y}. Thus our induction statement is too
 general. Fortunately, it can easily be specialized:
-transfer the additional premise @{prop"(y,z):r*"} into the conclusion:*}
+transfer the additional premise @{prop"(y,z):r*"} into the conclusion:\<close>
 (*<*)oops(*>*)
 lemma rtc_trans[rule_format]:
   "(x,y) \<in> r* \<Longrightarrow> (y,z) \<in> r* \<longrightarrow> (x,z) \<in> r*"
 
-txt{*\noindent
+txt\<open>\noindent
 This is not an obscure trick but a generally applicable heuristic:
 \begin{quote}\em
 When proving a statement by rule induction on $(x@1,\dots,x@n) \in R$,
@@ -101,24 +101,24 @@
 \S\ref{sec:ind-var-in-prems}. The @{text rule_format} directive turns
 @{text"\<longrightarrow>"} back into @{text"\<Longrightarrow>"}: in the end we obtain the original
 statement of our lemma.
-*}
+\<close>
 
 apply(erule rtc.induct)
 
-txt{*\noindent
+txt\<open>\noindent
 Now induction produces two subgoals which are both proved automatically:
 @{subgoals[display,indent=0]}
-*}
+\<close>
 
  apply(blast)
 apply(blast intro: rtc_step)
 done
 
-text{*
+text\<open>
 Let us now prove that @{term"r*"} is really the reflexive transitive closure
 of @{term r}, i.e.\ the least reflexive and transitive
 relation containing @{term r}. The latter is easily formalized
-*}
+\<close>
 
 inductive_set
   rtc2 :: "('a \<times> 'a)set \<Rightarrow> ('a \<times> 'a)set"
@@ -128,10 +128,10 @@
 | "(x,x) \<in> rtc2 r"
 | "\<lbrakk> (x,y) \<in> rtc2 r; (y,z) \<in> rtc2 r \<rbrakk> \<Longrightarrow> (x,z) \<in> rtc2 r"
 
-text{*\noindent
+text\<open>\noindent
 and the equivalence of the two definitions is easily shown by the obvious rule
 inductions:
-*}
+\<close>
 
 lemma "(x,y) \<in> rtc2 r \<Longrightarrow> (x,y) \<in> r*"
 apply(erule rtc2.induct)
@@ -146,7 +146,7 @@
 apply(blast intro: rtc2.intros)
 done
 
-text{*
+text\<open>
 So why did we start with the first definition? Because it is simpler. It
 contains only two rules, and the single step rule is simpler than
 transitivity.  As a consequence, @{thm[source]rtc.induct} is simpler than
@@ -164,7 +164,7 @@
 @{term rtc} where @{thm[source]rtc_step} is replaced by its converse as shown
 in exercise~\ref{ex:converse-rtc-step}.
 \end{exercise}
-*}
+\<close>
 (*<*)
 lemma rtc_step2[rule_format]: "(x,y) : r* \<Longrightarrow> (y,z) : r --> (x,z) : r*"
 apply(erule rtc.induct)
--- a/src/Doc/Tutorial/Misc/AdvancedInd.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/AdvancedInd.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,29 +2,29 @@
 theory AdvancedInd imports Main begin
 (*>*)
 
-text{*\noindent
+text\<open>\noindent
 Now that we have learned about rules and logic, we take another look at the
 finer points of induction.  We consider two questions: what to do if the
 proposition to be proved is not directly amenable to induction
 (\S\ref{sec:ind-var-in-prems}), and how to utilize (\S\ref{sec:complete-ind})
 and even derive (\S\ref{sec:derive-ind}) new induction schemas. We conclude
 with an extended example of induction (\S\ref{sec:CTL-revisited}).
-*}
+\<close>
 
-subsection{*Massaging the Proposition*}
+subsection\<open>Massaging the Proposition\<close>
 
-text{*\label{sec:ind-var-in-prems}
+text\<open>\label{sec:ind-var-in-prems}
 Often we have assumed that the theorem to be proved is already in a form
 that is amenable to induction, but sometimes it isn't.
 Here is an example.
 Since @{term"hd"} and @{term"last"} return the first and last element of a
 non-empty list, this lemma looks easy to prove:
-*}
+\<close>
 
 lemma "xs \<noteq> [] \<Longrightarrow> hd(rev xs) = last xs"
 apply(induct_tac xs)
 
-txt{*\noindent
+txt\<open>\noindent
 But induction produces the warning
 \begin{quote}\tt
 Induction variable occurs also among premises!
@@ -51,14 +51,14 @@
 implication~(@{text"\<longrightarrow>"}), letting
 \attrdx{rule_format} (\S\ref{sec:forward}) convert the
 result to the usual @{text"\<Longrightarrow>"} form:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma hd_rev [rule_format]: "xs \<noteq> [] \<longrightarrow> hd(rev xs) = last xs"
 (*<*)
 apply(induct_tac xs)
 (*>*)
 
-txt{*\noindent
+txt\<open>\noindent
 This time, induction leaves us with a trivial base case:
 @{subgoals[display,indent=0,goals_limit=1]}
 And @{text"auto"} completes the proof.
@@ -109,12 +109,12 @@
 Unfortunately, this induction schema cannot be expressed as a
 single theorem because it depends on the number of free variables in $t$ ---
 the notation $\overline{y}$ is merely an informal device.
-*}
+\<close>
 (*<*)by auto(*>*)
 
-subsection{*Beyond Structural and Recursion Induction*}
+subsection\<open>Beyond Structural and Recursion Induction\<close>
 
-text{*\label{sec:complete-ind}
+text\<open>\label{sec:complete-ind}
 So far, inductive proofs were by structural induction for
 primitive recursive functions and recursion induction for total recursive
 functions. But sometimes structural induction is awkward and there is no
@@ -130,12 +130,12 @@
 @{thm[display]"nat_less_induct"[no_vars]}
 As an application, we prove a property of the following
 function:
-*}
+\<close>
 
 axiomatization f :: "nat \<Rightarrow> nat"
   where f_ax: "f(f(n)) < f(Suc(n))" for n :: nat
 
-text{*
+text\<open>
 \begin{warn}
 We discourage the use of axioms because of the danger of
 inconsistencies.  Axiom @{text f_ax} does
@@ -148,35 +148,35 @@
 The axiom for @{term"f"} implies @{prop"n <= f n"}, which can
 be proved by induction on \mbox{@{term"f n"}}. Following the recipe outlined
 above, we have to phrase the proposition as follows to allow induction:
-*}
+\<close>
 
 lemma f_incr_lem: "\<forall>i. k = f i \<longrightarrow> i \<le> f i"
 
-txt{*\noindent
+txt\<open>\noindent
 To perform induction on @{term k} using @{thm[source]nat_less_induct}, we use
 the same general induction method as for recursion induction (see
 \S\ref{sec:fun-induction}):
-*}
+\<close>
 
 apply(induct_tac k rule: nat_less_induct)
 
-txt{*\noindent
+txt\<open>\noindent
 We get the following proof state:
 @{subgoals[display,indent=0,margin=65]}
 After stripping the @{text"\<forall>i"}, the proof continues with a case
 distinction on @{term"i"}. The case @{prop"i = (0::nat)"} is trivial and we focus on
 the other case:
-*}
+\<close>
 
 apply(rule allI)
 apply(case_tac i)
  apply(simp)
-txt{*
+txt\<open>
 @{subgoals[display,indent=0]}
-*}
+\<close>
 by(blast intro!: f_ax Suc_leI intro: le_less_trans)
 
-text{*\noindent
+text\<open>\noindent
 If you find the last step puzzling, here are the two lemmas it employs:
 \begin{isabelle}
 @{thm Suc_leI[no_vars]}
@@ -203,19 +203,19 @@
 proofs are easy to write but hard to read and understand.
 
 The desired result, @{prop"i <= f i"}, follows from @{thm[source]f_incr_lem}:
-*}
+\<close>
 
 lemmas f_incr = f_incr_lem[rule_format, OF refl]
 
-text{*\noindent
+text\<open>\noindent
 The final @{thm[source]refl} gets rid of the premise @{text"?k = f ?i"}. 
 We could have included this derivation in the original statement of the lemma:
-*}
+\<close>
 
 lemma f_incr[rule_format, OF refl]: "\<forall>i. k = f i \<longrightarrow> i \<le> f i"
 (*<*)oops(*>*)
 
-text{*
+text\<open>
 \begin{exercise}
 From the axiom and lemma for @{term"f"}, show that @{term"f"} is the
 identity function.
@@ -235,32 +235,32 @@
 which is a special case of @{thm[source]measure_induct}
 @{thm[display]measure_induct[no_vars]}
 where @{term f} may be any function into type @{typ nat}.
-*}
+\<close>
 
-subsection{*Derivation of New Induction Schemas*}
+subsection\<open>Derivation of New Induction Schemas\<close>
 
-text{*\label{sec:derive-ind}
+text\<open>\label{sec:derive-ind}
 \index{induction!deriving new schemas}%
 Induction schemas are ordinary theorems and you can derive new ones
 whenever you wish.  This section shows you how, using the example
 of @{thm[source]nat_less_induct}. Assume we only have structural induction
 available for @{typ"nat"} and want to derive complete induction.  We
 must generalize the statement as shown:
-*}
+\<close>
 
 lemma induct_lem: "(\<And>n::nat. \<forall>m<n. P m \<Longrightarrow> P n) \<Longrightarrow> \<forall>m<n. P m"
 apply(induct_tac n)
 
-txt{*\noindent
+txt\<open>\noindent
 The base case is vacuously true. For the induction step (@{prop"m <
 Suc n"}) we distinguish two cases: case @{prop"m < n"} is true by induction
 hypothesis and case @{prop"m = n"} follows from the assumption, again using
 the induction hypothesis:
-*}
+\<close>
  apply(blast)
 by(blast elim: less_SucE)
 
-text{*\noindent
+text\<open>\noindent
 The elimination rule @{thm[source]less_SucE} expresses the case distinction:
 @{thm[display]"less_SucE"[no_vars]}
 
@@ -270,16 +270,16 @@
 and remove the trivial condition @{prop"n < Suc n"}. Fortunately, this
 happens automatically when we add the lemma as a new premise to the
 desired goal:
-*}
+\<close>
 
 theorem nat_less_induct: "(\<And>n::nat. \<forall>m<n. P m \<Longrightarrow> P n) \<Longrightarrow> P n"
 by(insert induct_lem, blast)
 
-text{*
+text\<open>
 HOL already provides the mother of
 all inductions, well-founded induction (see \S\ref{sec:Well-founded}).  For
 example theorem @{thm[source]nat_less_induct} is
 a special case of @{thm[source]wf_induct} where @{term r} is @{text"<"} on
 @{typ nat}. The details can be found in theory \isa{Wellfounded_Recursion}.
-*}
+\<close>
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Misc/Itrev.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/Itrev.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -5,9 +5,9 @@
 declare [[names_unique = false]]
 (*>*)
 
-section{*Induction Heuristics*}
+section\<open>Induction Heuristics\<close>
 
-text{*\label{sec:InductionHeuristics}
+text\<open>\label{sec:InductionHeuristics}
 \index{induction heuristics|(}%
 The purpose of this section is to illustrate some simple heuristics for
 inductive proofs. The first one we have already mentioned in our initial
@@ -43,13 +43,13 @@
 @{text"@"} is linear in its first argument.  A linear time version of
 @{term"rev"} reqires an extra argument where the result is accumulated
 gradually, using only~@{text"#"}:
-*}
+\<close>
 
 primrec itrev :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where
 "itrev []     ys = ys" |
 "itrev (x#xs) ys = itrev xs (x#ys)"
 
-text{*\noindent
+text\<open>\noindent
 The behaviour of \cdx{itrev} is simple: it reverses
 its first argument by stacking its elements onto the second argument,
 and returning that second argument when the first one becomes
@@ -58,17 +58,17 @@
 
 Naturally, we would like to show that @{term"itrev"} does indeed reverse
 its first argument provided the second one is empty:
-*}
+\<close>
 
 lemma "itrev xs [] = rev xs"
 
-txt{*\noindent
+txt\<open>\noindent
 There is no choice as to the induction variable, and we immediately simplify:
-*}
+\<close>
 
 apply(induct_tac xs, simp_all)
 
-txt{*\noindent
+txt\<open>\noindent
 Unfortunately, this attempt does not prove
 the induction step:
 @{subgoals[display,indent=0,margin=70]}
@@ -80,11 +80,11 @@
 \end{quote}
 Of course one cannot do this na\"{\i}vely: @{term"itrev xs ys = rev xs"} is
 just not true.  The correct generalization is
-*}
+\<close>
 (*<*)oops(*>*)
 lemma "itrev xs ys = rev xs @ ys"
 (*<*)apply(induct_tac xs, simp_all)(*>*)
-txt{*\noindent
+txt\<open>\noindent
 If @{term"ys"} is replaced by @{term"[]"}, the right-hand side simplifies to
 @{term"rev xs"}, as required.
 
@@ -100,14 +100,14 @@
 the subgoal, but the induction hypothesis needs to be applied with
 @{term"a # ys"} instead of @{term"ys"}. Hence we prove the theorem
 for all @{term"ys"} instead of a fixed one:
-*}
+\<close>
 (*<*)oops(*>*)
 lemma "\<forall>ys. itrev xs ys = rev xs @ ys"
 (*<*)
 by(induct_tac xs, simp_all)
 (*>*)
 
-text{*\noindent
+text\<open>\noindent
 This time induction on @{term"xs"} followed by simplification succeeds. This
 leads to another heuristic for generalization:
 \begin{quote}
@@ -139,7 +139,7 @@
 Additionally, you can read \S\ref{sec:advanced-ind}
 to learn about some advanced techniques for inductive proofs.%
 \index{induction heuristics|)}
-*}
+\<close>
 (*<*)
 declare [[names_unique = true]]
 end
--- a/src/Doc/Tutorial/Misc/Option2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/Option2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,14 +4,14 @@
 hide_type option
 (*>*)
 
-text{*\indexbold{*option (type)}\indexbold{*None (constant)}%
+text\<open>\indexbold{*option (type)}\indexbold{*None (constant)}%
 \indexbold{*Some (constant)}
 Our final datatype is very simple but still eminently useful:
-*}
+\<close>
 
 datatype 'a option = None | Some 'a
 
-text{*\noindent
+text\<open>\noindent
 Frequently one needs to add a distinguished element to some existing type.
 For example, type @{text"t option"} can model the result of a computation that
 may either terminate with an error (represented by @{const None}) or return
@@ -21,7 +21,7 @@
 customized constructors like @{term Error} and @{term Infinity},
 but it is often simpler to use @{text option}. For an application see
 \S\ref{sec:Trie}.
-*}
+\<close>
 (*<*)
 (*
 definition infplus :: "nat option \<Rightarrow> nat option \<Rightarrow> nat option" where
--- a/src/Doc/Tutorial/Misc/Plus.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/Plus.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,13 +2,13 @@
 theory Plus imports Main begin
 (*>*)
 
-text{*\noindent Define the following addition function *}
+text\<open>\noindent Define the following addition function\<close>
 
 primrec add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
 "add m 0 = m" |
 "add m (Suc n) = add (Suc m) n"
 
-text{*\noindent and prove*}
+text\<open>\noindent and prove\<close>
 (*<*)
 lemma [simp]: "!m. add m n = m+n"
 apply(induct_tac n)
--- a/src/Doc/Tutorial/Misc/Tree.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/Tree.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,9 +2,9 @@
 theory Tree imports Main begin
 (*>*)
 
-text{*\noindent
+text\<open>\noindent
 Define the datatype of \rmindex{binary trees}:
-*}
+\<close>
 
 datatype 'a tree = Tip | Node "'a tree" 'a "'a tree"(*<*)
 
@@ -12,10 +12,10 @@
 "mirror Tip = Tip" |
 "mirror (Node l x r) = Node (mirror r) x (mirror l)"(*>*)
 
-text{*\noindent
+text\<open>\noindent
 Define a function @{term"mirror"} that mirrors a binary tree
 by swapping subtrees recursively. Prove
-*}
+\<close>
 
 lemma mirror_mirror: "mirror(mirror t) = t"
 (*<*)
@@ -27,10 +27,10 @@
 "flatten (Node l x r) = flatten l @ [x] @ flatten r"
 (*>*)
 
-text{*\noindent
+text\<open>\noindent
 Define a function @{term"flatten"} that flattens a tree into a list
 by traversing it in infix order. Prove
-*}
+\<close>
 
 lemma "flatten(mirror t) = rev(flatten t)"
 (*<*)
--- a/src/Doc/Tutorial/Misc/Tree2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/Tree2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,17 +2,17 @@
 theory Tree2 imports Tree begin
 (*>*)
 
-text{*\noindent In Exercise~\ref{ex:Tree} we defined a function
+text\<open>\noindent In Exercise~\ref{ex:Tree} we defined a function
 @{term"flatten"} from trees to lists. The straightforward version of
 @{term"flatten"} is based on @{text"@"} and is thus, like @{term"rev"},
 quadratic. A linear time version of @{term"flatten"} again reqires an extra
-argument, the accumulator. Define *}
+argument, the accumulator. Define\<close>
 (*<*)primrec(*>*)flatten2 :: "'a tree \<Rightarrow> 'a list \<Rightarrow> 'a list"(*<*)where
 "flatten2 Tip xs = xs" |
 "flatten2 (Node l x r) xs = flatten2 l (x#(flatten2 r xs))"
 (*>*)
 
-text{*\noindent and prove*}
+text\<open>\noindent and prove\<close>
 (*<*)
 lemma [simp]: "!xs. flatten2 t xs = flatten t @ xs"
 apply(induct_tac t)
--- a/src/Doc/Tutorial/Misc/appendix.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/appendix.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 imports Main
 begin(*>*)
 
-text{*
+text\<open>
 \begin{table}[htbp]
 \begin{center}
 \begin{tabular}{lll}
@@ -28,6 +28,6 @@
 \label{tab:overloading}
 \end{center}
 \end{table}
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Misc/case_exprs.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/case_exprs.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory case_exprs imports Main begin
 (*>*)
 
-text{*
+text\<open>
 \subsection{Case Expressions}
 \label{sec:case-expressions}\index{*case expressions}%
 HOL also features \isa{case}-expressions for analyzing
@@ -50,20 +50,20 @@
 it works for any datatype.  In some cases, induction is overkill and a case
 distinction over all constructors of the datatype suffices.  This is performed
 by \methdx{case_tac}.  Here is a trivial example:
-*}
+\<close>
 
 lemma "(case xs of [] \<Rightarrow> [] | y#ys \<Rightarrow> xs) = xs"
 apply(case_tac xs)
 
-txt{*\noindent
+txt\<open>\noindent
 results in the proof state
 @{subgoals[display,indent=0,margin=65]}
 which is solved automatically:
-*}
+\<close>
 
 apply(auto)
 (*<*)done(*>*)
-text{*
+text\<open>
 Note that we do not need to give a lemma a name if we do not intend to refer
 to it explicitly in the future.
 Other basic laws about a datatype are applied automatically during
@@ -81,7 +81,7 @@
   the @{term xs} as a new free variable distinct from the bound
   @{term xs} in the goal.
 \end{warn}
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Misc/fakenat.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/fakenat.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,11 +2,11 @@
 theory fakenat imports Main begin
 (*>*)
 
-text{*\noindent
+text\<open>\noindent
 The type \tydx{nat} of natural
 numbers is predefined to have the constructors \cdx{0} and~\cdx{Suc}.
 It behaves approximately as if it were declared like this:
-*}
+\<close>
 
 datatype nat = zero ("0") | Suc nat
 (*<*)
--- a/src/Doc/Tutorial/Misc/natsum.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/natsum.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,26 +1,26 @@
 (*<*)
 theory natsum imports Main begin
 (*>*)
-text{*\noindent
+text\<open>\noindent
 In particular, there are @{text"case"}-expressions, for example
 @{term[display]"case n of 0 => 0 | Suc m => m"}
 primitive recursion, for example
-*}
+\<close>
 
 primrec sum :: "nat \<Rightarrow> nat" where
 "sum 0 = 0" |
 "sum (Suc n) = Suc n + sum n"
 
-text{*\noindent
+text\<open>\noindent
 and induction, for example
-*}
+\<close>
 
 lemma "sum n + sum n = n*(Suc n)"
 apply(induct_tac n)
 apply(auto)
 done
 
-text{*\newcommand{\mystar}{*%
+text\<open>\newcommand{\mystar}{*%
 }
 \index{arithmetic operations!for \protect\isa{nat}}%
 The arithmetic operations \isadxboldpos{+}{$HOL2arithfun},
@@ -73,40 +73,40 @@
 Both @{text auto} and @{text simp}
 (a method introduced below, \S\ref{sec:Simplification}) prove 
 simple arithmetic goals automatically:
-*}
+\<close>
 
 lemma "\<lbrakk> \<not> m < n; m < n + (1::nat) \<rbrakk> \<Longrightarrow> m = n"
 (*<*)by(auto)(*>*)
 
-text{*\noindent
+text\<open>\noindent
 For efficiency's sake, this built-in prover ignores quantified formulae,
 many logical connectives, and all arithmetic operations apart from addition.
 In consequence, @{text auto} and @{text simp} cannot prove this slightly more complex goal:
-*}
+\<close>
 
 lemma "m \<noteq> (n::nat) \<Longrightarrow> m < n \<or> n < m"
 (*<*)by(arith)(*>*)
 
-text{*\noindent The method \methdx{arith} is more general.  It attempts to
+text\<open>\noindent The method \methdx{arith} is more general.  It attempts to
 prove the first subgoal provided it is a \textbf{linear arithmetic} formula.
 Such formulas may involve the usual logical connectives (@{text"\<not>"},
 @{text"\<and>"}, @{text"\<or>"}, @{text"\<longrightarrow>"}, @{text"="},
 @{text"\<forall>"}, @{text"\<exists>"}), the relations @{text"="},
 @{text"\<le>"} and @{text"<"}, and the operations @{text"+"}, @{text"-"},
-@{term min} and @{term max}.  For example, *}
+@{term min} and @{term max}.  For example,\<close>
 
 lemma "min i (max j (k*k)) = max (min (k*k) i) (min i (j::nat))"
 apply(arith)
 (*<*)done(*>*)
 
-text{*\noindent
+text\<open>\noindent
 succeeds because @{term"k*k"} can be treated as atomic. In contrast,
-*}
+\<close>
 
 lemma "n*n = n+1 \<Longrightarrow> n=0"
 (*<*)oops(*>*)
 
-text{*\noindent
+text\<open>\noindent
 is not proved by @{text arith} because the proof relies 
 on properties of multiplication. Only multiplication by numerals (which is
 the same as iterated addition) is taken into account.
@@ -122,7 +122,7 @@
 If the formula involves quantifiers, @{text arith} may take
 super-exponential time and space.
 \end{warn}
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Misc/pairs2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/pairs2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,7 +1,7 @@
 (*<*)
 theory pairs2 imports Main begin
 (*>*)
-text{*\label{sec:pairs}\index{pairs and tuples}
+text\<open>\label{sec:pairs}\index{pairs and tuples}
 HOL also has ordered pairs: \isa{($a@1$,$a@2$)} is of type $\tau@1$
 \indexboldpos{\isasymtimes}{$Isatype} $\tau@2$ provided each $a@i$ is of type
 $\tau@i$. The functions \cdx{fst} and
@@ -29,7 +29,7 @@
 records are preferable.
 \end{itemize}
 For more information on pairs and records see Chapter~\ref{ch:more-types}.
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Tutorial/Misc/prime_def.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/prime_def.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory prime_def imports Main begin
 consts prime :: "nat \<Rightarrow> bool"
 (*>*)
-text{*
+text\<open>
 \begin{warn}
 A common mistake when writing definitions is to introduce extra free
 variables on the right-hand side.  Consider the following, flawed definition
@@ -14,7 +14,7 @@
 The correct version is
 @{term[display,quotes]"prime(p) == 1 < p & (!m. m dvd p --> (m=1 | m=p))"}
 \end{warn}
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Tutorial/Misc/simp.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/simp.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,9 +2,9 @@
 theory simp imports Main begin
 (*>*)
 
-subsection{*Simplification Rules*}
+subsection\<open>Simplification Rules\<close>
 
-text{*\index{simplification rules}
+text\<open>\index{simplification rules}
 To facilitate simplification,  
 the attribute @{text"[simp]"}\index{*simp (attribute)}
 declares theorems to be simplification rules, which the simplifier
@@ -49,11 +49,11 @@
   different path) on $A$, it is not defined what the simplification attribute
   of that theorem will be in $C$: it could be either.
 \end{warn}
-*} 
+\<close> 
 
-subsection{*The {\tt\slshape simp}  Method*}
+subsection\<open>The {\tt\slshape simp}  Method\<close>
 
-text{*\index{*simp (method)|bold}
+text\<open>\index{*simp (method)|bold}
 The general format of the simplification method is
 \begin{quote}
 @{text simp} \textit{list of modifiers}
@@ -65,11 +65,11 @@
 only the first subgoal and may thus need to be repeated --- use
 \methdx{simp_all} to simplify all subgoals.
 If nothing changes, @{text simp} fails.
-*}
+\<close>
 
-subsection{*Adding and Deleting Simplification Rules*}
+subsection\<open>Adding and Deleting Simplification Rules\<close>
 
-text{*
+text\<open>
 \index{simplification rules!adding and deleting}%
 If a certain theorem is merely needed in a few proofs by simplification,
 we do not need to make it a global simplification rule. Instead we can modify
@@ -88,41 +88,41 @@
 \begin{quote}
 \isacommand{apply}@{text"(simp add: mod_mult_distrib add_mult_distrib)"}
 \end{quote}
-*}
+\<close>
 
-subsection{*Assumptions*}
+subsection\<open>Assumptions\<close>
 
-text{*\index{simplification!with/of assumptions}
+text\<open>\index{simplification!with/of assumptions}
 By default, assumptions are part of the simplification process: they are used
 as simplification rules and are simplified themselves. For example:
-*}
+\<close>
 
 lemma "\<lbrakk> xs @ zs = ys @ xs; [] @ xs = [] @ [] \<rbrakk> \<Longrightarrow> ys = zs"
 apply simp
 done
 
-text{*\noindent
+text\<open>\noindent
 The second assumption simplifies to @{term"xs = []"}, which in turn
 simplifies the first assumption to @{term"zs = ys"}, thus reducing the
 conclusion to @{term"ys = ys"} and hence to @{term"True"}.
 
 In some cases, using the assumptions can lead to nontermination:
-*}
+\<close>
 
 lemma "\<forall>x. f x = g (f (g x)) \<Longrightarrow> f [] = f [] @ []"
 
-txt{*\noindent
+txt\<open>\noindent
 An unmodified application of @{text"simp"} loops.  The culprit is the
 simplification rule @{term"f x = g (f (g x))"}, which is extracted from
 the assumption.  (Isabelle notices certain simple forms of
 nontermination but not this one.)  The problem can be circumvented by
 telling the simplifier to ignore the assumptions:
-*}
+\<close>
 
 apply(simp (no_asm))
 done
 
-text{*\noindent
+text\<open>\noindent
 Three modifiers influence the treatment of assumptions:
 \begin{description}
 \item[@{text"(no_asm)"}]\index{*no_asm (modifier)}
@@ -145,11 +145,11 @@
 %positive, and from left to right, if $n$ is negative.
 %Beware that such rotations make proofs quite brittle.
 %\end{warn}
-*}
+\<close>
 
-subsection{*Rewriting with Definitions*}
+subsection\<open>Rewriting with Definitions\<close>
 
-text{*\label{sec:Simp-with-Defs}\index{simplification!with definitions}
+text\<open>\label{sec:Simp-with-Defs}\index{simplification!with definitions}
 Constant definitions (\S\ref{sec:ConstDefinitions}) can be used as
 simplification rules, but by default they are not: the simplifier does not
 expand them automatically.  Definitions are intended for introducing abstract
@@ -159,32 +159,32 @@
 proofs more robust: if the definition has to be changed,
 only the proofs of the abstract properties will be affected.
 
-For example, given *}
+For example, given\<close>
 
 definition xor :: "bool \<Rightarrow> bool \<Rightarrow> bool" where
 "xor A B \<equiv> (A \<and> \<not>B) \<or> (\<not>A \<and> B)"
 
-text{*\noindent
+text\<open>\noindent
 we may want to prove
-*}
+\<close>
 
 lemma "xor A (\<not>A)"
 
-txt{*\noindent
+txt\<open>\noindent
 Typically, we begin by unfolding some definitions:
 \indexbold{definitions!unfolding}
-*}
+\<close>
 
 apply(simp only: xor_def)
 
-txt{*\noindent
+txt\<open>\noindent
 In this particular case, the resulting goal
 @{subgoals[display,indent=0]}
 can be proved by simplification. Thus we could have proved the lemma outright by
-*}(*<*)oops lemma "xor A (\<not>A)"(*>*)
+\<close>(*<*)oops lemma "xor A (\<not>A)"(*>*)
 apply(simp add: xor_def)
 (*<*)done(*>*)
-text{*\noindent
+text\<open>\noindent
 Of course we can also unfold definitions in the middle of a proof.
 
 \begin{warn}
@@ -199,78 +199,78 @@
 one or several definitions, as in \isacommand{apply}\isa{(unfold xor_def)}.
 This is can be useful in situations where \isa{simp} does too much.
 Warning: \isa{unfold} acts on all subgoals!
-*}
+\<close>
 
-subsection{*Simplifying {\tt\slshape let}-Expressions*}
+subsection\<open>Simplifying {\tt\slshape let}-Expressions\<close>
 
-text{*\index{simplification!of \isa{let}-expressions}\index{*let expressions}%
+text\<open>\index{simplification!of \isa{let}-expressions}\index{*let expressions}%
 Proving a goal containing \isa{let}-expressions almost invariably requires the
 @{text"let"}-con\-structs to be expanded at some point. Since
 @{text"let"}\ldots\isa{=}\ldots@{text"in"}{\ldots} is just syntactic sugar for
 the predefined constant @{term"Let"}, expanding @{text"let"}-constructs
-means rewriting with \tdx{Let_def}: *}
+means rewriting with \tdx{Let_def}:\<close>
 
 lemma "(let xs = [] in xs@ys@xs) = ys"
 apply(simp add: Let_def)
 done
 
-text{*
+text\<open>
 If, in a particular context, there is no danger of a combinatorial explosion
 of nested @{text"let"}s, you could even simplify with @{thm[source]Let_def} by
 default:
-*}
+\<close>
 declare Let_def [simp]
 
-subsection{*Conditional Simplification Rules*}
+subsection\<open>Conditional Simplification Rules\<close>
 
-text{*
+text\<open>
 \index{conditional simplification rules}%
 So far all examples of rewrite rules were equations. The simplifier also
 accepts \emph{conditional} equations, for example
-*}
+\<close>
 
 lemma hd_Cons_tl[simp]: "xs \<noteq> []  \<Longrightarrow>  hd xs # tl xs = xs"
 apply(case_tac xs, simp, simp)
 done
 
-text{*\noindent
+text\<open>\noindent
 Note the use of ``\ttindexboldpos{,}{$Isar}'' to string together a
 sequence of methods. Assuming that the simplification rule
 @{term"(rev xs = []) = (xs = [])"}
 is present as well,
 the lemma below is proved by plain simplification:
-*}
+\<close>
 
 lemma "xs \<noteq> [] \<Longrightarrow> hd(rev xs) # tl(rev xs) = rev xs"
 (*<*)
 by(simp)
 (*>*)
-text{*\noindent
+text\<open>\noindent
 The conditional equation @{thm[source]hd_Cons_tl} above
 can simplify @{term"hd(rev xs) # tl(rev xs)"} to @{term"rev xs"}
 because the corresponding precondition @{term"rev xs ~= []"}
 simplifies to @{term"xs ~= []"}, which is exactly the local
 assumption of the subgoal.
-*}
+\<close>
 
 
-subsection{*Automatic Case Splits*}
+subsection\<open>Automatic Case Splits\<close>
 
-text{*\label{sec:AutoCaseSplits}\indexbold{case splits}%
+text\<open>\label{sec:AutoCaseSplits}\indexbold{case splits}%
 Goals containing @{text"if"}-expressions\index{*if expressions!splitting of}
 are usually proved by case
 distinction on the boolean condition.  Here is an example:
-*}
+\<close>
 
 lemma "\<forall>xs. if xs = [] then rev xs = [] else rev xs \<noteq> []"
 
-txt{*\noindent
+txt\<open>\noindent
 The goal can be split by a special method, \methdx{split}:
-*}
+\<close>
 
 apply(split if_split)
 
-txt{*\noindent
+txt\<open>\noindent
 @{subgoals[display,indent=0]}
 where \tdx{if_split} is a theorem that expresses splitting of
 @{text"if"}s. Because
@@ -280,11 +280,11 @@
 
 This splitting idea generalizes from @{text"if"} to \sdx{case}.
 Let us simplify a case analysis over lists:\index{*list.split (theorem)}
-*}(*<*)by simp(*>*)
+\<close>(*<*)by simp(*>*)
 lemma "(case xs of [] \<Rightarrow> zs | y#ys \<Rightarrow> y#(ys@zs)) = xs@zs"
 apply(split list.split)
  
-txt{*
+txt\<open>
 @{subgoals[display,indent=0]}
 The simplifier does not split
 @{text"case"}-expressions, as it does @{text"if"}-expressions, 
@@ -293,26 +293,26 @@
 @{text split}\index{*split (modifier)} 
 for adding splitting rules explicitly.  The
 lemma above can be proved in one step by
-*}
+\<close>
 (*<*)oops
 lemma "(case xs of [] \<Rightarrow> zs | y#ys \<Rightarrow> y#(ys@zs)) = xs@zs"
 (*>*)
 apply(simp split: list.split)
 (*<*)done(*>*)
-text{*\noindent
+text\<open>\noindent
 whereas \isacommand{apply}@{text"(simp)"} alone will not succeed.
 
 Every datatype $t$ comes with a theorem
 $t$@{text".split"} which can be declared to be a \bfindex{split rule} either
 locally as above, or by giving it the \attrdx{split} attribute globally:
-*}
+\<close>
 
 declare list.split [split]
 
-text{*\noindent
+text\<open>\noindent
 The @{text"split"} attribute can be removed with the @{text"del"} modifier,
 either locally
-*}
+\<close>
 (*<*)
 lemma "dummy=dummy"
 (*>*)
@@ -320,12 +320,12 @@
 (*<*)
 oops
 (*>*)
-text{*\noindent
+text\<open>\noindent
 or globally:
-*}
+\<close>
 declare list.split [split del]
 
-text{*
+text\<open>
 Polished proofs typically perform splitting within @{text simp} rather than 
 invoking the @{text split} method.  However, if a goal contains
 several @{text "if"} and @{text case} expressions, 
@@ -335,12 +335,12 @@
 The split rules shown above are intended to affect only the subgoal's
 conclusion.  If you want to split an @{text"if"} or @{text"case"}-expression
 in the assumptions, you have to apply \tdx{if_split_asm} or
-$t$@{text".split_asm"}: *}
+$t$@{text".split_asm"}:\<close>
 
 lemma "if xs = [] then ys \<noteq> [] else ys = [] \<Longrightarrow> xs @ ys \<noteq> []"
 apply(split if_split_asm)
 
-txt{*\noindent
+txt\<open>\noindent
 Unlike splitting the conclusion, this step creates two
 separate subgoals, which here can be solved by @{text"simp_all"}:
 @{subgoals[display,indent=0]}
@@ -357,22 +357,22 @@
   simplified at first, until either the expression reduces to one of the
   cases or it is split.
 \end{warn}
-*}
+\<close>
 (*<*)
 by(simp_all)
 (*>*)
 
-subsection{*Tracing*}
-text{*\indexbold{tracing the simplifier}
+subsection\<open>Tracing\<close>
+text\<open>\indexbold{tracing the simplifier}
 Using the simplifier effectively may take a bit of experimentation.  Set the
 Proof General flag \pgmenu{Isabelle} $>$ \pgmenu{Settings} $>$ \pgmenu{Trace Simplifier} to get a better idea of what is going on:
-*}
+\<close>
 
 lemma "rev [a] = []"
 apply(simp)
 (*<*)oops(*>*)
 
-text{*\noindent
+text\<open>\noindent
 produces the following trace in Proof General's \pgmenu{Trace} buffer:
 
 \begin{ttbox}\makeatother
@@ -418,7 +418,7 @@
 obtained the desired trace.
 Since this is easily forgotten (and may have the unpleasant effect of
 swamping the interface with trace information), here is how you can switch
-the trace on locally in a proof: *}
+the trace on locally in a proof:\<close>
 
 (*<*)lemma "x=x"
 (*>*)
@@ -426,14 +426,14 @@
 apply simp
 (*<*)oops(*>*)
 
-text{* \noindent
+text\<open>\noindent
 Within the current proof, all simplifications in subsequent proof steps
 will be traced, but the text reminds you to remove the \isa{using} clause
-after it has done its job. *}
+after it has done its job.\<close>
 
-subsection{*Finding Theorems\label{sec:find}*}
+subsection\<open>Finding Theorems\label{sec:find}\<close>
 
-text{*\indexbold{finding theorems}\indexbold{searching theorems}
+text\<open>\indexbold{finding theorems}\indexbold{searching theorems}
 Isabelle's large database of proved theorems 
 offers a powerful search engine. Its chief limitation is
 its restriction to the theories currently loaded.
@@ -512,7 +512,7 @@
 through previous searches and just modify them. This saves you having
 to type in lengthy expressions again and again.
 \end{pgnote}
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Tutorial/Misc/types.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Misc/types.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,30 +3,30 @@
 type_synonym gate = "bool \<Rightarrow> bool \<Rightarrow> bool"
 type_synonym ('a, 'b) alist = "('a \<times> 'b) list"
 
-text{*\noindent
+text\<open>\noindent
 Internally all synonyms are fully expanded.  As a consequence Isabelle's
 output never contains synonyms.  Their main purpose is to improve the
 readability of theories.  Synonyms can be used just like any other
 type.
-*}
+\<close>
 
-subsection{*Constant Definitions*}
+subsection\<open>Constant Definitions\<close>
 
-text{*\label{sec:ConstDefinitions}\indexbold{definitions}%
+text\<open>\label{sec:ConstDefinitions}\indexbold{definitions}%
 Nonrecursive definitions can be made with the \commdx{definition}
 command, for example @{text nand} and @{text xor} gates
 (based on type @{typ gate} above):
-*}
+\<close>
 
 definition nand :: gate where "nand A B \<equiv> \<not>(A \<and> B)"
 definition xor  :: gate where "xor  A B \<equiv> A \<and> \<not>B \<or> \<not>A \<and> B"
 
-text{*\noindent%
+text\<open>\noindent%
 The symbol \indexboldpos{\isasymequiv}{$IsaEq} is a special form of equality
 that must be used in constant definitions.
 Pattern-matching is not allowed: each definition must be of
 the form $f\,x@1\,\dots\,x@n~\isasymequiv~t$.
 Section~\ref{sec:Simp-with-Defs} explains how definitions are used
 in proofs. The default name of each definition is $f$@{text"_def"}, where
-$f$ is the name of the defined constant.*}
+$f$ is the name of the defined constant.\<close>
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Protocol/Event.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Protocol/Event.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -7,7 +7,7 @@
     stores are visible to him
 *)(*<*)
 
-section{*Theory of Events for Security Protocols*}
+section\<open>Theory of Events for Security Protocols\<close>
 
 theory Event imports Message begin
 
@@ -20,10 +20,10 @@
         | Notes agent       msg
        
 consts 
-  bad    :: "agent set"                         -- {* compromised agents *}
+  bad    :: "agent set"                         \<comment> \<open>compromised agents\<close>
 
 
-text{*The constant "spies" is retained for compatibility's sake*}
+text\<open>The constant "spies" is retained for compatibility's sake\<close>
 
 primrec
   knows :: "agent => event list => msg set"
@@ -50,7 +50,7 @@
   spies  :: "event list => msg set" where
   "spies == knows Spy"
 
-text{*Spy has access to his own key for spoof messages, but Server is secure*}
+text\<open>Spy has access to his own key for spoof messages, but Server is secure\<close>
 specification (bad)
   Spy_in_bad     [iff]: "Spy \<in> bad"
   Server_not_bad [iff]: "Server \<notin> bad"
@@ -73,9 +73,9 @@
                         Says A B X => parts {X} \<union> used evs
                       | Gets A X   => used evs
                       | Notes A X  => parts {X} \<union> used evs)"
-    --{*The case for @{term Gets} seems anomalous, but @{term Gets} always
+    \<comment>\<open>The case for @{term Gets} seems anomalous, but @{term Gets} always
         follows @{term Says} in real protocols.  Seems difficult to change.
-        See @{text Gets_correct} in theory @{text "Guard/Extensions.thy"}. *}
+        See @{text Gets_correct} in theory @{text "Guard/Extensions.thy"}.\<close>
 
 lemma Notes_imp_used [rule_format]: "Notes A X \<in> set evs --> X \<in> used evs"
 apply (induct_tac evs)
@@ -88,7 +88,7 @@
 done
 
 
-subsection{*Function @{term knows}*}
+subsection\<open>Function @{term knows}\<close>
 
 (*Simplifying   
  parts(insert X (knows Spy evs)) = parts{X} \<union> parts(knows Spy evs).
@@ -99,8 +99,8 @@
      "knows Spy (Says A B X # evs) = insert X (knows Spy evs)"
 by simp
 
-text{*Letting the Spy see "bad" agents' notes avoids redundant case-splits
-      on whether @{term "A=Spy"} and whether @{term "A\<in>bad"}*}
+text\<open>Letting the Spy see "bad" agents' notes avoids redundant case-splits
+      on whether @{term "A=Spy"} and whether @{term "A\<in>bad"}\<close>
 lemma knows_Spy_Notes [simp]:
      "knows Spy (Notes A X # evs) =  
           (if A:bad then insert X (knows Spy evs) else knows Spy evs)"
@@ -121,7 +121,7 @@
      "knows Spy evs \<subseteq> knows Spy (Gets A X # evs)"
 by (simp add: subset_insertI)
 
-text{*Spy sees what is sent on the traffic*}
+text\<open>Spy sees what is sent on the traffic\<close>
 lemma Says_imp_knows_Spy [rule_format]:
      "Says A B X \<in> set evs --> X \<in> knows Spy evs"
 apply (induct_tac "evs")
@@ -135,21 +135,21 @@
 done
 
 
-text{*Elimination rules: derive contradictions from old Says events containing
-  items known to be fresh*}
+text\<open>Elimination rules: derive contradictions from old Says events containing
+  items known to be fresh\<close>
 lemmas knows_Spy_partsEs =
      Says_imp_knows_Spy [THEN parts.Inj, elim_format] 
      parts.Body [elim_format]
 
 lemmas Says_imp_analz_Spy = Says_imp_knows_Spy [THEN analz.Inj]
 
-text{*Compatibility for the old "spies" function*}
+text\<open>Compatibility for the old "spies" function\<close>
 lemmas spies_partsEs = knows_Spy_partsEs
 lemmas Says_imp_spies = Says_imp_knows_Spy
 lemmas parts_insert_spies = parts_insert_knows_A [of _ Spy]
 
 
-subsection{*Knowledge of Agents*}
+subsection\<open>Knowledge of Agents\<close>
 
 lemma knows_Says: "knows A (Says A B X # evs) = insert X (knows A evs)"
 by simp
@@ -171,21 +171,21 @@
 lemma knows_subset_knows_Gets: "knows A evs \<subseteq> knows A (Gets A' X # evs)"
 by (simp add: subset_insertI)
 
-text{*Agents know what they say*}
+text\<open>Agents know what they say\<close>
 lemma Says_imp_knows [rule_format]: "Says A B X \<in> set evs --> X \<in> knows A evs"
 apply (induct_tac "evs")
 apply (simp_all (no_asm_simp) split: event.split)
 apply blast
 done
 
-text{*Agents know what they note*}
+text\<open>Agents know what they note\<close>
 lemma Notes_imp_knows [rule_format]: "Notes A X \<in> set evs --> X \<in> knows A evs"
 apply (induct_tac "evs")
 apply (simp_all (no_asm_simp) split: event.split)
 apply blast
 done
 
-text{*Agents know what they receive*}
+text\<open>Agents know what they receive\<close>
 lemma Gets_imp_knows_agents [rule_format]:
      "A \<noteq> Spy --> Gets A X \<in> set evs --> X \<in> knows A evs"
 apply (induct_tac "evs")
@@ -193,8 +193,8 @@
 done
 
 
-text{*What agents DIFFERENT FROM Spy know 
-  was either said, or noted, or got, or known initially*}
+text\<open>What agents DIFFERENT FROM Spy know 
+  was either said, or noted, or got, or known initially\<close>
 lemma knows_imp_Says_Gets_Notes_initState [rule_format]:
      "[| X \<in> knows A evs; A \<noteq> Spy |] ==> EX B.  
   Says A B X \<in> set evs | Gets A X \<in> set evs | Notes A X \<in> set evs | X \<in> initState A"
@@ -204,8 +204,8 @@
 apply blast
 done
 
-text{*What the Spy knows -- for the time being --
-  was either said or noted, or known initially*}
+text\<open>What the Spy knows -- for the time being --
+  was either said or noted, or known initially\<close>
 lemma knows_Spy_imp_Says_Notes_initState [rule_format]:
      "[| X \<in> knows Spy evs |] ==> EX A B.  
   Says A B X \<in> set evs | Notes A X \<in> set evs | X \<in> initState Spy"
@@ -241,15 +241,15 @@
 apply (blast intro: initState_into_used)
 done
 
-text{*NOTE REMOVAL--laws above are cleaner, as they don't involve "case"*}
+text\<open>NOTE REMOVAL--laws above are cleaner, as they don't involve "case"\<close>
 declare knows_Cons [simp del]
         used_Nil [simp del] used_Cons [simp del]
 
 
-text{*For proving theorems of the form @{term "X \<notin> analz (knows Spy evs) --> P"}
+text\<open>For proving theorems of the form @{term "X \<notin> analz (knows Spy evs) --> P"}
   New events added by induction to "evs" are discarded.  Provided 
   this information isn't needed, the proof will be much shorter, since
-  it will omit complicated reasoning about @{term analz}.*}
+  it will omit complicated reasoning about @{term analz}.\<close>
 
 lemmas analz_mono_contra =
        knows_Spy_subset_knows_Spy_Says [THEN analz_mono, THEN contra_subsetD]
@@ -259,12 +259,12 @@
 lemmas analz_impI = impI [where P = "Y \<notin> analz (knows Spy evs)"] for Y evs
 
 ML
-{*
+\<open>
 fun analz_mono_contra_tac ctxt =
   resolve_tac ctxt @{thms analz_impI} THEN' 
   REPEAT1 o (dresolve_tac ctxt @{thms analz_mono_contra})
   THEN' mp_tac ctxt
-*}
+\<close>
 
 lemma knows_subset_knows_Cons: "knows A evs \<subseteq> knows A (e # evs)"
 by (induct e, auto simp: knows_Cons)
@@ -275,7 +275,7 @@
 done
 
 
-text{*For proving @{text new_keys_not_used}*}
+text\<open>For proving @{text new_keys_not_used}\<close>
 lemma keysFor_parts_insert:
      "[| K \<in> keysFor (parts (insert X G));  X \<in> synth (analz H) |] 
       ==> K \<in> keysFor (parts (G \<union> H)) | Key (invKey K) \<in> parts H" 
@@ -284,16 +284,16 @@
            analz_subset_parts [THEN keysFor_mono, THEN [2] rev_subsetD]
     intro: analz_subset_parts [THEN subsetD] parts_mono [THEN [2] rev_subsetD])
 
-method_setup analz_mono_contra = {*
-    Scan.succeed (fn ctxt => SIMPLE_METHOD (REPEAT_FIRST (analz_mono_contra_tac ctxt))) *}
+method_setup analz_mono_contra = \<open>
+    Scan.succeed (fn ctxt => SIMPLE_METHOD (REPEAT_FIRST (analz_mono_contra_tac ctxt)))\<close>
     "for proving theorems of the form X \<notin> analz (knows Spy evs) --> P"
 
-subsubsection{*Useful for case analysis on whether a hash is a spoof or not*}
+subsubsection\<open>Useful for case analysis on whether a hash is a spoof or not\<close>
 
 lemmas syan_impI = impI [where P = "Y \<notin> synth (analz (knows Spy evs))"] for Y evs
 
 ML
-{*
+\<open>
 val knows_Cons = @{thm knows_Cons};
 val used_Nil = @{thm used_Nil};
 val used_Cons = @{thm used_Cons};
@@ -339,16 +339,16 @@
       @{thm knows_Spy_subset_knows_Spy_Gets} RS @{thm synth_analz_mono} RS @{thm contra_subsetD}])
   THEN'
   mp_tac ctxt
-*}
+\<close>
 
-method_setup synth_analz_mono_contra = {*
-    Scan.succeed (fn ctxt => SIMPLE_METHOD (REPEAT_FIRST (synth_analz_mono_contra_tac ctxt))) *}
+method_setup synth_analz_mono_contra = \<open>
+    Scan.succeed (fn ctxt => SIMPLE_METHOD (REPEAT_FIRST (synth_analz_mono_contra_tac ctxt)))\<close>
     "for proving theorems of the form X \<notin> synth (analz (knows Spy evs)) --> P"
 (*>*)
 
-section{* Event Traces \label{sec:events} *}
+section\<open>Event Traces \label{sec:events}\<close>
 
-text {*
+text \<open>
 The system's behaviour is formalized as a set of traces of
 \emph{events}.  The most important event, @{text "Says A B X"}, expresses
 $A\to B : X$, which is the attempt by~$A$ to send~$B$ the message~$X$.
@@ -379,7 +379,7 @@
 \item @{term "synth (analz (knows Spy evs))"} is everything that the spy
 could generate
 \end{itemize}
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Protocol/Message.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Protocol/Message.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -5,7 +5,7 @@
 Inductive relations "parts", "analz" and "synth"
 *)(*<*)
 
-section{*Theory of Agents and Messages for Security Protocols*}
+section\<open>Theory of Agents and Messages for Security Protocols\<close>
 
 theory Message imports Main begin
 ML_file "../../antiquote_setup.ML"
@@ -15,27 +15,27 @@
 by blast
 (*>*)
 
-section{* Agents and Messages *}
+section\<open>Agents and Messages\<close>
 
-text {*
+text \<open>
 All protocol specifications refer to a syntactic theory of messages. 
 Datatype
 @{text agent} introduces the constant @{text Server} (a trusted central
 machine, needed for some protocols), an infinite population of
 friendly agents, and the~@{text Spy}:
-*}
+\<close>
 
 datatype agent = Server | Friend nat | Spy
 
-text {*
+text \<open>
 Keys are just natural numbers.  Function @{text invKey} maps a public key to
 the matching private key, and vice versa:
-*}
+\<close>
 
 type_synonym key = nat
 consts invKey :: "key \<Rightarrow> key"
 (*<*)
-consts all_symmetric :: bool        --{*true if all keys are symmetric*}
+consts all_symmetric :: bool        \<comment>\<open>true if all keys are symmetric\<close>
 
 specification (invKey)
   invKey [simp]: "invKey (invKey K) = K"
@@ -43,18 +43,18 @@
     by (rule exI [of _ id], auto)
 
 
-text{*The inverse of a symmetric key is itself; that of a public key
-      is the private key and vice versa*}
+text\<open>The inverse of a symmetric key is itself; that of a public key
+      is the private key and vice versa\<close>
 
 definition symKeys :: "key set" where
   "symKeys == {K. invKey K = K}"
 (*>*)
 
-text {*
+text \<open>
 Datatype
 @{text msg} introduces the message forms, which include agent names, nonces,
 keys, compound messages, and encryptions.  
-*}
+\<close>
 
 datatype
      msg = Agent  agent
@@ -63,7 +63,7 @@
          | MPair  msg msg
          | Crypt  key msg
 
-text {*
+text \<open>
 \noindent
 The notation $\comp{X\sb 1,\ldots X\sb{n-1},X\sb n}$
 abbreviates
@@ -76,10 +76,10 @@
 wrong key succeeds but yields garbage.  Our model of encryption is
 realistic if encryption adds some redundancy to the plaintext, such as a
 checksum, so that garbage can be detected.
-*}
+\<close>
 
 (*<*)
-text{*Concrete syntax: messages appear as \<open>\<lbrace>A,B,NA\<rbrace>\<close>, etc...*}
+text\<open>Concrete syntax: messages appear as \<open>\<lbrace>A,B,NA\<rbrace>\<close>, etc...\<close>
 syntax
   "_MTuple"      :: "['a, args] => 'a * 'b"       ("(2\<lbrace>_,/ _\<rbrace>)")
 translations
@@ -88,11 +88,11 @@
 
 
 definition keysFor :: "msg set => key set" where
-    --{*Keys useful to decrypt elements of a message set*}
+    \<comment>\<open>Keys useful to decrypt elements of a message set\<close>
   "keysFor H == invKey ` {K. \<exists>X. Crypt K X \<in> H}"
 
 
-subsubsection{*Inductive Definition of All Parts" of a Message*}
+subsubsection\<open>Inductive Definition of All Parts" of a Message\<close>
 
 inductive_set
   parts :: "msg set => msg set"
@@ -104,7 +104,7 @@
   | Body:        "Crypt K X \<in> parts H ==> X \<in> parts H"
 
 
-text{*Monotonicity*}
+text\<open>Monotonicity\<close>
 lemma parts_mono: "G \<subseteq> H ==> parts(G) \<subseteq> parts(H)"
 apply auto
 apply (erule parts.induct) 
@@ -112,7 +112,7 @@
 done
 
 
-text{*Equations hold because constructors are injective.*}
+text\<open>Equations hold because constructors are injective.\<close>
 lemma Friend_image_eq [simp]: "(Friend x \<in> Friend`A) = (x:A)"
 by auto
 
@@ -123,7 +123,7 @@
 by auto
 
 
-subsubsection{*Inverse of keys *}
+subsubsection\<open>Inverse of keys\<close>
 
 lemma invKey_eq [simp]: "(invKey K = invKey K') = (K=K')"
 apply safe
@@ -131,7 +131,7 @@
 done
 
 
-subsection{*keysFor operator*}
+subsection\<open>keysFor operator\<close>
 
 lemma keysFor_empty [simp]: "keysFor {} = {}"
 by (unfold keysFor_def, blast)
@@ -142,7 +142,7 @@
 lemma keysFor_UN [simp]: "keysFor (\<Union>i\<in>A. H i) = (\<Union>i\<in>A. keysFor (H i))"
 by (unfold keysFor_def, blast)
 
-text{*Monotonicity*}
+text\<open>Monotonicity\<close>
 lemma keysFor_mono: "G \<subseteq> H ==> keysFor(G) \<subseteq> keysFor(H)"
 by (unfold keysFor_def, blast)
 
@@ -169,7 +169,7 @@
 by (unfold keysFor_def, blast)
 
 
-subsection{*Inductive relation "parts"*}
+subsection\<open>Inductive relation "parts"\<close>
 
 lemma MPair_parts:
      "[| \<lbrace>X,Y\<rbrace> \<in> parts H;        
@@ -177,10 +177,10 @@
 by (blast dest: parts.Fst parts.Snd) 
 
 declare MPair_parts [elim!]  parts.Body [dest!]
-text{*NB These two rules are UNSAFE in the formal sense, as they discard the
+text\<open>NB These two rules are UNSAFE in the formal sense, as they discard the
      compound message.  They work well on THIS FILE.  
   @{text MPair_parts} is left as SAFE because it speeds up proofs.
-  The Crypt rule is normally kept UNSAFE to avoid breaking up certificates.*}
+  The Crypt rule is normally kept UNSAFE to avoid breaking up certificates.\<close>
 
 lemma parts_increasing: "H \<subseteq> parts(H)"
 by blast
@@ -195,12 +195,12 @@
 lemma parts_emptyE [elim!]: "X\<in> parts{} ==> P"
 by simp
 
-text{*WARNING: loops if H = {Y}, therefore must not be repeated!*}
+text\<open>WARNING: loops if H = {Y}, therefore must not be repeated!\<close>
 lemma parts_singleton: "X\<in> parts H ==> \<exists>Y\<in>H. X\<in> parts {Y}"
 by (erule parts.induct, fast+)
 
 
-subsubsection{*Unions *}
+subsubsection\<open>Unions\<close>
 
 lemma parts_Un_subset1: "parts(G) \<union> parts(H) \<subseteq> parts(G \<union> H)"
 by (intro Un_least parts_mono Un_upper1 Un_upper2)
@@ -218,8 +218,8 @@
 apply (simp only: parts_Un)
 done
 
-text{*TWO inserts to avoid looping.  This rewrite is better than nothing.
-  Not suitable for Addsimps: its behaviour can be strange.*}
+text\<open>TWO inserts to avoid looping.  This rewrite is better than nothing.
+  Not suitable for Addsimps: its behaviour can be strange.\<close>
 lemma parts_insert2:
      "parts (insert X (insert Y H)) = parts {X} \<union> parts {Y} \<union> parts H"
 apply (simp add: Un_assoc)
@@ -237,12 +237,12 @@
 lemma parts_UN [simp]: "parts(\<Union>x\<in>A. H x) = (\<Union>x\<in>A. parts(H x))"
 by (intro equalityI parts_UN_subset1 parts_UN_subset2)
 
-text{*Added to simplify arguments to parts, analz and synth.
-  NOTE: the UN versions are no longer used!*}
+text\<open>Added to simplify arguments to parts, analz and synth.
+  NOTE: the UN versions are no longer used!\<close>
 
 
-text{*This allows @{text blast} to simplify occurrences of 
-  @{term "parts(G\<union>H)"} in the assumption.*}
+text\<open>This allows @{text blast} to simplify occurrences of 
+  @{term "parts(G\<union>H)"} in the assumption.\<close>
 lemmas in_parts_UnE = parts_Un [THEN equalityD1, THEN subsetD, THEN UnE] 
 declare in_parts_UnE [elim!]
 
@@ -250,7 +250,7 @@
 lemma parts_insert_subset: "insert X (parts H) \<subseteq> parts(insert X H)"
 by (blast intro: parts_mono [THEN [2] rev_subsetD])
 
-subsubsection{*Idempotence and transitivity *}
+subsubsection\<open>Idempotence and transitivity\<close>
 
 lemma parts_partsD [dest!]: "X\<in> parts (parts H) ==> X\<in> parts H"
 by (erule parts.induct, blast+)
@@ -267,7 +267,7 @@
 lemma parts_trans: "[| X\<in> parts G;  G \<subseteq> parts H |] ==> X\<in> parts H"
 by (drule parts_mono, blast)
 
-text{*Cut*}
+text\<open>Cut\<close>
 lemma parts_cut:
      "[| Y\<in> parts (insert X G);  X\<in> parts H |] ==> Y\<in> parts (G \<union> H)" 
 by (blast intro: parts_trans) 
@@ -277,7 +277,7 @@
 by (force dest!: parts_cut intro: parts_insertI)
 
 
-subsubsection{*Rewrite rules for pulling out atomic messages *}
+subsubsection\<open>Rewrite rules for pulling out atomic messages\<close>
 
 lemmas parts_insert_eq_I = equalityI [OF subsetI parts_insert_subset]
 
@@ -323,21 +323,21 @@
 done
 
 
-text{*In any message, there is an upper bound N on its greatest nonce.*}
+text\<open>In any message, there is an upper bound N on its greatest nonce.\<close>
 lemma msg_Nonce_supply: "\<exists>N. \<forall>n. N\<le>n --> Nonce n \<notin> parts {msg}"
 apply (induct_tac "msg")
 apply (simp_all (no_asm_simp) add: exI parts_insert2)
- txt{*MPair case: blast works out the necessary sum itself!*}
+ txt\<open>MPair case: blast works out the necessary sum itself!\<close>
  prefer 2 apply auto apply (blast elim!: add_leE)
-txt{*Nonce case*}
+txt\<open>Nonce case\<close>
 apply (rename_tac nat)
 apply (rule_tac x = "N + Suc nat" in exI, auto) 
 done
 (*>*)
 
-section{* Modelling the Adversary *}
+section\<open>Modelling the Adversary\<close>
 
-text {*
+text \<open>
 The spy is part of the system and must be built into the model.  He is
 a malicious user who does not have to follow the protocol.  He
 watches the network and uses any keys he knows to decrypt messages.
@@ -349,7 +349,7 @@
 messages. The set @{text "analz H"} formalizes what the adversary can learn
 from the set of messages~$H$.  The closure properties of this set are
 defined inductively.
-*}
+\<close>
 
 inductive_set
   analz :: "msg set \<Rightarrow> msg set"
@@ -362,14 +362,14 @@
              "\<lbrakk>Crypt K X \<in> analz H; Key(invKey K) \<in> analz H\<rbrakk>
               \<Longrightarrow> X \<in> analz H"
 (*<*)
-text{*Monotonicity; Lemma 1 of Lowe's paper*}
+text\<open>Monotonicity; Lemma 1 of Lowe's paper\<close>
 lemma analz_mono: "G\<subseteq>H ==> analz(G) \<subseteq> analz(H)"
 apply auto
 apply (erule analz.induct) 
 apply (auto dest: analz.Fst analz.Snd) 
 done
 
-text{*Making it safe speeds up proofs*}
+text\<open>Making it safe speeds up proofs\<close>
 lemma MPair_analz [elim!]:
      "[| \<lbrace>X,Y\<rbrace> \<in> analz H;        
              [| X \<in> analz H; Y \<in> analz H |] ==> P   
@@ -402,22 +402,22 @@
 
 lemmas analz_insertI = subset_insertI [THEN analz_mono, THEN [2] rev_subsetD]
 
-subsubsection{*General equational properties *}
+subsubsection\<open>General equational properties\<close>
 
 lemma analz_empty [simp]: "analz{} = {}"
 apply safe
 apply (erule analz.induct, blast+)
 done
 
-text{*Converse fails: we can analz more from the union than from the 
-  separate parts, as a key in one might decrypt a message in the other*}
+text\<open>Converse fails: we can analz more from the union than from the 
+  separate parts, as a key in one might decrypt a message in the other\<close>
 lemma analz_Un: "analz(G) \<union> analz(H) \<subseteq> analz(G \<union> H)"
 by (intro Un_least analz_mono Un_upper1 Un_upper2)
 
 lemma analz_insert: "insert X (analz H) \<subseteq> analz(insert X H)"
 by (blast intro: analz_mono [THEN [2] rev_subsetD])
 
-subsubsection{*Rewrite rules for pulling out atomic messages *}
+subsubsection\<open>Rewrite rules for pulling out atomic messages\<close>
 
 lemmas analz_insert_eq_I = equalityI [OF subsetI analz_insert]
 
@@ -433,7 +433,7 @@
 apply (erule analz.induct, auto) 
 done
 
-text{*Can only pull out Keys if they are not needed to decrypt the rest*}
+text\<open>Can only pull out Keys if they are not needed to decrypt the rest\<close>
 lemma analz_insert_Key [simp]: 
     "K \<notin> keysFor (analz H) ==>   
           analz (insert (Key K) H) = insert (Key K) (analz H)"
@@ -452,7 +452,7 @@
 apply (blast intro: analz.Fst analz.Snd)+
 done
 
-text{*Can pull out enCrypted message if the Key is not known*}
+text\<open>Can pull out enCrypted message if the Key is not known\<close>
 lemma analz_insert_Crypt:
      "Key (invKey K) \<notin> analz H 
       ==> analz (insert (Crypt K X) H) = insert (Crypt K X) (analz H)"
@@ -482,10 +482,10 @@
                insert (Crypt K X) (analz (insert X H))"
 by (intro equalityI lemma1 lemma2)
 
-text{*Case analysis: either the message is secure, or it is not! Effective,
+text\<open>Case analysis: either the message is secure, or it is not! Effective,
 but can cause subgoals to blow up! Use with @{text "if_split"}; apparently
 @{text "split_tac"} does not cope with patterns such as @{term"analz (insert
-(Crypt K X) H)"} *} 
+(Crypt K X) H)"}\<close> 
 lemma analz_Crypt_if [simp]:
      "analz (insert (Crypt K X) H) =                 
           (if (Key (invKey K) \<in> analz H)                 
@@ -494,7 +494,7 @@
 by (simp add: analz_insert_Crypt analz_insert_Decrypt)
 
 
-text{*This rule supposes "for the sake of argument" that we have the key.*}
+text\<open>This rule supposes "for the sake of argument" that we have the key.\<close>
 lemma analz_insert_Crypt_subset:
      "analz (insert (Crypt K X) H) \<subseteq>   
            insert (Crypt K X) (analz (insert X H))"
@@ -509,7 +509,7 @@
 done
 
 
-subsubsection{*Idempotence and transitivity *}
+subsubsection\<open>Idempotence and transitivity\<close>
 
 lemma analz_analzD [dest!]: "X\<in> analz (analz H) ==> X\<in> analz H"
 by (erule analz.induct, blast+)
@@ -526,7 +526,7 @@
 lemma analz_trans: "[| X\<in> analz G;  G \<subseteq> analz H |] ==> X\<in> analz H"
 by (drule analz_mono, blast)
 
-text{*Cut; Lemma 2 of Lowe*}
+text\<open>Cut; Lemma 2 of Lowe\<close>
 lemma analz_cut: "[| Y\<in> analz (insert X H);  X\<in> analz H |] ==> Y\<in> analz H"
 by (erule analz_trans, blast)
 
@@ -534,14 +534,14 @@
    "Y: analz (insert X H) ==> X: analz H --> Y: analz H"
 *)
 
-text{*This rewrite rule helps in the simplification of messages that involve
+text\<open>This rewrite rule helps in the simplification of messages that involve
   the forwarding of unknown components (X).  Without it, removing occurrences
-  of X can be very complicated. *}
+  of X can be very complicated.\<close>
 lemma analz_insert_eq: "X\<in> analz H ==> analz (insert X H) = analz H"
 by (blast intro: analz_cut analz_insertI)
 
 
-text{*A congruence rule for "analz" *}
+text\<open>A congruence rule for "analz"\<close>
 
 lemma analz_subset_cong:
      "[| analz G \<subseteq> analz G'; analz H \<subseteq> analz H' |] 
@@ -559,14 +559,14 @@
      "analz H = analz H' ==> analz(insert X H) = analz(insert X H')"
 by (force simp only: insert_def intro!: analz_cong)
 
-text{*If there are no pairs or encryptions then analz does nothing*}
+text\<open>If there are no pairs or encryptions then analz does nothing\<close>
 lemma analz_trivial:
      "[| \<forall>X Y. \<lbrace>X,Y\<rbrace> \<notin> H;  \<forall>X K. Crypt K X \<notin> H |] ==> analz H = H"
 apply safe
 apply (erule analz.induct, blast+)
 done
 
-text{*These two are obsolete (with a single Spy) but cost little to prove...*}
+text\<open>These two are obsolete (with a single Spy) but cost little to prove...\<close>
 lemma analz_UN_analz_lemma:
      "X\<in> analz (\<Union>i\<in>A. analz (H i)) ==> X\<in> analz (\<Union>i\<in>A. H i)"
 apply (erule analz.induct)
@@ -576,7 +576,7 @@
 lemma analz_UN_analz [simp]: "analz (\<Union>i\<in>A. analz (H i)) = analz (\<Union>i\<in>A. H i)"
 by (blast intro: analz_UN_analz_lemma analz_mono [THEN [2] rev_subsetD])
 (*>*)
-text {*
+text \<open>
 Note the @{text Decrypt} rule: the spy can decrypt a
 message encrypted with key~$K$ if he has the matching key,~$K^{-1}$. 
 Properties proved by rule induction include the following:
@@ -585,7 +585,7 @@
 The set of fake messages that an intruder could invent
 starting from~@{text H} is @{text "synth(analz H)"}, where @{text "synth H"}
 formalizes what the adversary can build from the set of messages~$H$.  
-*}
+\<close>
 
 inductive_set
   synth :: "msg set \<Rightarrow> msg set"
@@ -618,7 +618,7 @@
 apply (simp (no_asm_use))
 done
 (*>*)
-text {*
+text \<open>
 The set includes all agent names.  Nonces and keys are assumed to be
 unguessable, so none are included beyond those already in~$H$.   Two
 elements of @{term "synth H"} can be combined, and an element can be encrypted
@@ -629,11 +629,11 @@
 @{named_thms [display,indent=0] analz_synth [no_vars] (analz_synth)}
 Rule inversion plays a major role in reasoning about @{text synth}, through
 declarations such as this one:
-*}
+\<close>
 
 inductive_cases Nonce_synth [elim!]: "Nonce n \<in> synth H"
 
-text {*
+text \<open>
 \noindent
 The resulting elimination rule replaces every assumption of the form
 @{term "Nonce n \<in> synth H"} by @{term "Nonce n \<in> H"},
@@ -651,22 +651,22 @@
 use @{text parts} to express general well-formedness properties of a protocol,
 for example, that an uncompromised agent's private key will never be
 included as a component of any message.
-*}
+\<close>
 (*<*)
 lemma synth_increasing: "H \<subseteq> synth(H)"
 by blast
 
-subsubsection{*Unions *}
+subsubsection\<open>Unions\<close>
 
-text{*Converse fails: we can synth more from the union than from the 
-  separate parts, building a compound message using elements of each.*}
+text\<open>Converse fails: we can synth more from the union than from the 
+  separate parts, building a compound message using elements of each.\<close>
 lemma synth_Un: "synth(G) \<union> synth(H) \<subseteq> synth(G \<union> H)"
 by (intro Un_least synth_mono Un_upper1 Un_upper2)
 
 lemma synth_insert: "insert X (synth H) \<subseteq> synth(insert X H)"
 by (blast intro: synth_mono [THEN [2] rev_subsetD])
 
-subsubsection{*Idempotence and transitivity *}
+subsubsection\<open>Idempotence and transitivity\<close>
 
 lemma synth_synthD [dest!]: "X\<in> synth (synth H) ==> X\<in> synth H"
 by (erule synth.induct, blast+)
@@ -683,7 +683,7 @@
 lemma synth_trans: "[| X\<in> synth G;  G \<subseteq> synth H |] ==> X\<in> synth H"
 by (drule synth_mono, blast)
 
-text{*Cut; Lemma 2 of Lowe*}
+text\<open>Cut; Lemma 2 of Lowe\<close>
 lemma synth_cut: "[| Y\<in> synth (insert X H);  X\<in> synth H |] ==> Y\<in> synth H"
 by (erule synth_trans, blast)
 
@@ -706,7 +706,7 @@
 by (unfold keysFor_def, blast)
 
 
-subsubsection{*Combinations of parts, analz and synth *}
+subsubsection\<open>Combinations of parts, analz and synth\<close>
 
 lemma parts_synth [simp]: "parts (synth H) = parts H \<union> synth H"
 apply (rule equalityI)
@@ -722,13 +722,13 @@
 done
 
 
-subsubsection{*For reasoning about the Fake rule in traces *}
+subsubsection\<open>For reasoning about the Fake rule in traces\<close>
 
 lemma parts_insert_subset_Un: "X\<in> G ==> parts(insert X H) \<subseteq> parts G \<union> parts H"
 by (rule subset_trans [OF parts_mono parts_Un_subset2], blast)
 
-text{*More specifically for Fake.  Very occasionally we could do with a version
-  of the form  @{term"parts{X} \<subseteq> synth (analz H) \<union> parts H"} *}
+text\<open>More specifically for Fake.  Very occasionally we could do with a version
+  of the form  @{term"parts{X} \<subseteq> synth (analz H) \<union> parts H"}\<close>
 lemma Fake_parts_insert:
      "X \<in> synth (analz H) ==>  
       parts (insert X H) \<subseteq> synth (analz H) \<union> parts H"
@@ -742,8 +742,8 @@
       ==> Z \<in>  synth (analz H) \<union> parts H"
 by (blast dest: Fake_parts_insert  [THEN subsetD, dest])
 
-text{*@{term H} is sometimes @{term"Key ` KK \<union> spies evs"}, so can't put 
-  @{term "G=H"}.*}
+text\<open>@{term H} is sometimes @{term"Key ` KK \<union> spies evs"}, so can't put 
+  @{term "G=H"}.\<close>
 lemma Fake_analz_insert:
      "X\<in> synth (analz G) ==>  
       analz (insert X H) \<subseteq> synth (analz G) \<union> analz (G \<union> H)"
@@ -762,8 +762,8 @@
      "(X \<in> analz H | X \<in> parts H) = (X \<in> parts H)"
 by (blast intro: analz_subset_parts [THEN subsetD])
 
-text{*Without this equation, other rules for synth and analz would yield
-  redundant cases*}
+text\<open>Without this equation, other rules for synth and analz would yield
+  redundant cases\<close>
 lemma MPair_synth_analz [iff]:
      "(\<lbrace>X,Y\<rbrace> \<in> synth (analz H)) =  
       (X \<in> synth (analz H) & Y \<in> synth (analz H))"
@@ -775,12 +775,12 @@
 by blast
 
 
-text{*We do NOT want Crypt... messages broken up in protocols!!*}
+text\<open>We do NOT want Crypt... messages broken up in protocols!!\<close>
 declare parts.Body [rule del]
 
 
-text{*Rewrites to push in Key and Crypt messages, so that other messages can
-    be pulled out using the @{text analz_insert} rules*}
+text\<open>Rewrites to push in Key and Crypt messages, so that other messages can
+    be pulled out using the @{text analz_insert} rules\<close>
 
 lemmas pushKeys =
   insert_commute [of "Key K" "Agent C"]
@@ -800,14 +800,14 @@
   insert_commute [of "Crypt X K" "MPair X' Y"]
   for X K C N X' Y
 
-text{*Cannot be added with @{text "[simp]"} -- messages should not always be
-  re-ordered. *}
+text\<open>Cannot be added with @{text "[simp]"} -- messages should not always be
+  re-ordered.\<close>
 lemmas pushes = pushKeys pushCrypts
 
 
-subsection{*Tactics useful for many protocol proofs*}
+subsection\<open>Tactics useful for many protocol proofs\<close>
 ML
-{*
+\<open>
 val invKey = @{thm invKey};
 val keysFor_def = @{thm keysFor_def};
 val symKeys_def = @{thm symKeys_def};
@@ -858,11 +858,11 @@
        simp_tac ctxt 1,
        REPEAT (FIRSTGOAL (resolve_tac ctxt [allI,impI,notI,conjI,iffI])),
        DEPTH_SOLVE (atomic_spy_analz_tac ctxt 1)]) i);
-*}
+\<close>
 
-text{*By default only @{text o_apply} is built-in.  But in the presence of
+text\<open>By default only @{text o_apply} is built-in.  But in the presence of
 eta-expansion this means that some terms displayed as @{term "f o g"} will be
-rewritten, and others will not!*}
+rewritten, and others will not!\<close>
 declare o_def [simp]
 
 
@@ -883,7 +883,7 @@
 apply (rule synth_analz_mono, blast)   
 done
 
-text{*Two generalizations of @{text analz_insert_eq}*}
+text\<open>Two generalizations of @{text analz_insert_eq}\<close>
 lemma gen_analz_insert_eq [rule_format]:
      "X \<in> analz H ==> ALL G. H \<subseteq> G --> analz (insert X G) = analz G"
 by (blast intro: analz_cut analz_insertI analz_mono [THEN [2] rev_subsetD])
@@ -904,16 +904,16 @@
 
 lemmas Fake_parts_sing_imp_Un = Fake_parts_sing [THEN [2] rev_subsetD]
 
-method_setup spy_analz = {*
-    Scan.succeed (SIMPLE_METHOD' o spy_analz_tac) *}
+method_setup spy_analz = \<open>
+    Scan.succeed (SIMPLE_METHOD' o spy_analz_tac)\<close>
     "for proving the Fake case when analz is involved"
 
-method_setup atomic_spy_analz = {*
-    Scan.succeed (SIMPLE_METHOD' o atomic_spy_analz_tac) *}
+method_setup atomic_spy_analz = \<open>
+    Scan.succeed (SIMPLE_METHOD' o atomic_spy_analz_tac)\<close>
     "for debugging spy_analz"
 
-method_setup Fake_insert_simp = {*
-    Scan.succeed (SIMPLE_METHOD' o Fake_insert_simp_tac) *}
+method_setup Fake_insert_simp = \<open>
+    Scan.succeed (SIMPLE_METHOD' o Fake_insert_simp_tac)\<close>
     "for debugging spy_analz"
 
 
--- a/src/Doc/Tutorial/Protocol/NS_Public.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Protocol/NS_Public.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -6,12 +6,12 @@
 *)(*<*)
 theory NS_Public imports Public begin(*>*)
 
-section{* Modelling the Protocol \label{sec:modelling} *}
+section\<open>Modelling the Protocol \label{sec:modelling}\<close>
 
-text_raw {*
+text_raw \<open>
 \begin{figure}
 \begin{isabelle}
-*}
+\<close>
 
 inductive_set ns_public :: "event list set"
   where
@@ -40,13 +40,13 @@
               \<in> set evs3\<rbrakk>
           \<Longrightarrow> Says A B (Crypt (pubK B) (Nonce NB)) # evs3 \<in> ns_public"
 
-text_raw {*
+text_raw \<open>
 \end{isabelle}
 \caption{An Inductive Protocol Definition}\label{fig:ns_public}
 \end{figure}
-*}
+\<close>
 
-text {*
+text \<open>
 Let us formalize the Needham-Schroeder public-key protocol, as corrected by
 Lowe:
 \begin{alignat*%
@@ -84,9 +84,9 @@
 Benefits of this approach are simplicity and clarity.  The semantic model
 is set theory, proofs are by induction and the translation from the informal
 notation to the inductive rules is straightforward. 
-*}
+\<close>
 
-section{* Proving Elementary Properties \label{sec:regularity} *}
+section\<open>Proving Elementary Properties \label{sec:regularity}\<close>
 
 (*<*)
 declare knows_Spy_partsEs [elim]
@@ -109,7 +109,7 @@
 (*Spy never sees another agent's private key! (unless it's bad at start)*)
 (*>*)
 
-text {*
+text \<open>
 Secrecy properties can be hard to prove.  The conclusion of a typical
 secrecy theorem is 
 @{term "X \<notin> analz (knows Spy evs)"}.  The difficulty arises from
@@ -124,13 +124,13 @@
 @{text A}'s private key in a message, whether protected by encryption or
 not, is enough to confirm that @{text A} is compromised.  The proof, like
 nearly all protocol proofs, is by induction over traces.
-*}
+\<close>
 
 lemma Spy_see_priK [simp]:
       "evs \<in> ns_public
        \<Longrightarrow> (Key (priK A) \<in> parts (knows Spy evs)) = (A \<in> bad)"
 apply (erule ns_public.induct, simp_all)
-txt {*
+txt \<open>
 The induction yields five subgoals, one for each rule in the definition of
 @{text ns_public}.  The idea is to prove that the protocol property holds initially
 (rule @{text Nil}), is preserved by each of the legitimate protocol steps (rules
@@ -141,7 +141,7 @@
 at all, so only @{text Fake} is relevant. Indeed, simplification leaves
 only the @{text Fake} case, as indicated by the variable name @{text evsf}:
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 by blast
 (*<*)
 lemma Spy_analz_priK [simp]:
@@ -149,7 +149,7 @@
 by auto
 (*>*)
 
-text {*
+text \<open>
 The @{text Fake} case is proved automatically.  If
 @{term "priK A"} is in the extended trace then either (1) it was already in the
 original trace or (2) it was
@@ -165,7 +165,7 @@
 induction, simplification, @{text blast}.  The first line uses the rule
 @{text rev_mp} to prepare the induction by moving two assumptions into the 
 induction formula.
-*}
+\<close>
 
 lemma no_nonce_NS1_NS2:
     "\<lbrakk>Crypt (pubK C) \<lbrace>NA', Nonce NA, Agent D\<rbrace> \<in> parts (knows Spy evs);
@@ -177,11 +177,11 @@
 apply (blast intro: analz_insertI)+
 done
 
-text {*
+text \<open>
 The following unicity lemma states that, if \isa{NA} is secret, then its
 appearance in any instance of message~1 determines the other components. 
 The proof is similar to the previous one.
-*}
+\<close>
 
 lemma unique_NA:
      "\<lbrakk>Crypt(pubK B)  \<lbrace>Nonce NA, Agent A \<rbrace> \<in> parts(knows Spy evs);
@@ -196,7 +196,7 @@
 done
 (*>*)
 
-section{* Proving Secrecy Theorems \label{sec:secrecy} *}
+section\<open>Proving Secrecy Theorems \label{sec:secrecy}\<close>
 
 (*<*)
 (*Secrecy: Spy does not see the nonce sent in msg NS1 if A and B are secure
@@ -264,21 +264,21 @@
 done
 (*>*)
 
-text {*
+text \<open>
 The secrecy theorems for Bob (the second participant) are especially
 important because they fail for the original protocol.  The following
 theorem states that if Bob sends message~2 to Alice, and both agents are
 uncompromised, then Bob's nonce will never reach the spy.
-*}
+\<close>
 
 theorem Spy_not_see_NB [dest]:
  "\<lbrakk>Says B A (Crypt (pubK A) \<lbrace>Nonce NA, Nonce NB, Agent B\<rbrace>) \<in> set evs;
    A \<notin> bad;  B \<notin> bad;  evs \<in> ns_public\<rbrakk>
   \<Longrightarrow> Nonce NB \<notin> analz (knows Spy evs)"
-txt {*
+txt \<open>
 To prove it, we must formulate the induction properly (one of the
 assumptions mentions~@{text evs}), apply induction, and simplify:
-*}
+\<close>
 
 apply (erule rev_mp, erule ns_public.induct, simp_all)
 (*<*)
@@ -288,7 +288,7 @@
 apply (blast intro: no_nonce_NS1_NS2)
 (*>*)
 
-txt {*
+txt \<open>
 The proof states are too complicated to present in full.  
 Let's examine the simplest subgoal, that for message~1.  The following
 event has just occurred:
@@ -335,7 +335,7 @@
 @{text B} has sent an instance of message~2 to~@{text A} and has received the
 expected reply, then that reply really originated with~@{text A}.  The
 proof is a simple induction.
-*}
+\<close>
 
 (*<*)
 by (blast intro: no_nonce_NS1_NS2)
@@ -368,7 +368,7 @@
 by (erule ns_public.induct, auto)
 (*>*)
 
-text {*
+text \<open>
 From similar assumptions, we can prove that @{text A} started the protocol
 run by sending an instance of message~1 involving the nonce~@{text NA}\@. 
 For this theorem, the conclusion is 
@@ -395,6 +395,6 @@
 the strategy illustrated above, but the subgoals can
 be much bigger and there are more of them.
 \index{protocols!security|)}
-*}
+\<close>
 
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Protocol/Public.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Protocol/Public.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -10,13 +10,13 @@
 begin
 (*>*)
 
-text {*
+text \<open>
 The function
 @{text pubK} maps agents to their public keys.  The function
 @{text priK} maps agents to their private keys.  It is merely
 an abbreviation (cf.\ \S\ref{sec:abbreviations}) defined in terms of
 @{text invKey} and @{text pubK}.
-*}
+\<close>
 
 consts pubK :: "agent \<Rightarrow> key"
 abbreviation priK :: "agent \<Rightarrow> key"
@@ -37,7 +37,7 @@
 end
 (*>*)
 
-text {*
+text \<open>
 \noindent
 The set @{text bad} consists of those agents whose private keys are known to
 the spy.
@@ -45,7 +45,7 @@
 Two axioms are asserted about the public-key cryptosystem. 
 No two agents have the same public key, and no private key equals
 any public key.
-*}
+\<close>
 
 axiomatization where
   inj_pubK:        "inj pubK" and
@@ -156,16 +156,16 @@
 (*Specialized methods*)
 
 (*Tactic for possibility theorems*)
-ML {*
+ML \<open>
 fun possibility_tac ctxt =
     REPEAT (*omit used_Says so that Nonces start from different traces!*)
     (ALLGOALS (simp_tac (ctxt delsimps [used_Says]))
      THEN
      REPEAT_FIRST (eq_assume_tac ORELSE' 
                    resolve_tac ctxt [refl, conjI, @{thm Nonce_supply}]));
-*}
+\<close>
 
-method_setup possibility = {* Scan.succeed (SIMPLE_METHOD o possibility_tac) *}
+method_setup possibility = \<open>Scan.succeed (SIMPLE_METHOD o possibility_tac)\<close>
     "for proving possibility theorems"
 
 end
--- a/src/Doc/Tutorial/Recdef/Induction.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/Induction.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory Induction imports examples simplification begin
 (*>*)
 
-text{*
+text\<open>
 Assuming we have defined our function such that Isabelle could prove
 termination and that the recursion equations (or some suitable derived
 equations) are simplification rules, we might like to prove something about
@@ -17,29 +17,29 @@
 you are trying to establish holds for the left-hand side provided it holds
 for all recursive calls on the right-hand side. Here is a simple example
 involving the predefined @{term"map"} functional on lists:
-*}
+\<close>
 
 lemma "map f (sep(x,xs)) = sep(f x, map f xs)"
 
-txt{*\noindent
+txt\<open>\noindent
 Note that @{term"map f xs"}
 is the result of applying @{term"f"} to all elements of @{term"xs"}. We prove
 this lemma by recursion induction over @{term"sep"}:
-*}
+\<close>
 
 apply(induct_tac x xs rule: sep.induct)
 
-txt{*\noindent
+txt\<open>\noindent
 The resulting proof state has three subgoals corresponding to the three
 clauses for @{term"sep"}:
 @{subgoals[display,indent=0]}
 The rest is pure simplification:
-*}
+\<close>
 
 apply simp_all
 done
 
-text{*
+text\<open>
 Try proving the above lemma by structural induction, and you find that you
 need an additional case distinction. What is worse, the names of variables
 are invented by Isabelle and have nothing to do with the names in the
@@ -64,7 +64,7 @@
 empty list, the singleton list, and the list with at least two elements.
 The final case has an induction hypothesis:  you may assume that @{term"P"}
 holds for the tail of that list.
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Recdef/Nested0.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/Nested0.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,14 +2,14 @@
 theory Nested0 imports Main begin
 (*>*)
 
-text{*
+text\<open>
 \index{datatypes!nested}%
 In \S\ref{sec:nested-datatype} we defined the datatype of terms
-*}
+\<close>
 
 datatype ('a,'b)"term" = Var 'a | App 'b "('a,'b)term list"
 
-text{*\noindent
+text\<open>\noindent
 and closed with the observation that the associated schema for the definition
 of primitive recursive functions leads to overly verbose definitions. Moreover,
 if you have worked exercise~\ref{ex:trev-trev} you will have noticed that
@@ -18,7 +18,7 @@
 We will now show you how \isacommand{recdef} can simplify
 definitions and proofs about nested recursive datatypes. As an example we
 choose exercise~\ref{ex:trev-trev}:
-*}
+\<close>
 
 consts trev  :: "('a,'b)term \<Rightarrow> ('a,'b)term"
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Recdef/Nested1.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/Nested1.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory Nested1 imports Nested0 begin
 (*>*)
 
-text{*\noindent
+text\<open>\noindent
 Although the definition of @{term trev} below is quite natural, we will have
 to overcome a minor difficulty in convincing Isabelle of its termination.
 It is precisely this difficulty that is the \textit{raison d'\^etre} of
@@ -11,13 +11,13 @@
 Defining @{term trev} by \isacommand{recdef} rather than \isacommand{primrec}
 simplifies matters because we are now free to use the recursion equation
 suggested at the end of \S\ref{sec:nested-datatype}:
-*}
+\<close>
 
 recdef (*<*)(permissive)(*>*)trev "measure size"
  "trev (Var x)    = Var x"
  "trev (App f ts) = App f (rev(map trev ts))"
 
-text{*\noindent
+text\<open>\noindent
 Remember that function @{term size} is defined for each \isacommand{datatype}.
 However, the definition does not succeed. Isabelle complains about an
 unproved termination condition
@@ -36,7 +36,7 @@
 \isacommand{recdef} knows about @{term map}.
 
 The termination condition is easily proved by induction:
-*}
+\<close>
 
 (*<*)
 end
--- a/src/Doc/Tutorial/Recdef/Nested2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/Nested2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -9,25 +9,25 @@
  "trev (Var x) = Var x"
  "trev (App f ts) = App f (rev(map trev ts))"
 (*>*)
-text{*\noindent
+text\<open>\noindent
 By making this theorem a simplification rule, \isacommand{recdef}
 applies it automatically and the definition of @{term"trev"}
 succeeds now. As a reward for our effort, we can now prove the desired
 lemma directly.  We no longer need the verbose
 induction schema for type @{text"term"} and can use the simpler one arising from
 @{term"trev"}:
-*}
+\<close>
 
 lemma "trev(trev t) = t"
 apply(induct_tac t rule: trev.induct)
-txt{*
+txt\<open>
 @{subgoals[display,indent=0]}
 Both the base case and the induction step fall to simplification:
-*}
+\<close>
 
 by(simp_all add: rev_map sym[OF map_compose] cong: map_cong)
 
-text{*\noindent
+text\<open>\noindent
 If the proof of the induction step mystifies you, we recommend that you go through
 the chain of simplification steps in detail; you will probably need the help of
 @{text"simp_trace"}. Theorem @{thm[source]map_cong} is discussed below.
@@ -65,7 +65,7 @@
 into a situation where you need to supply \isacommand{recdef} with new
 congruence rules, you can append a hint after the end of
 the recursion equations:\cmmdx{hints}
-*}
+\<close>
 (*<*)
 consts dummy :: "nat => nat"
 recdef dummy "{}"
@@ -73,19 +73,19 @@
 (*>*)
 (hints recdef_cong: map_cong)
 
-text{*\noindent
+text\<open>\noindent
 Or you can declare them globally
 by giving them the \attrdx{recdef_cong} attribute:
-*}
+\<close>
 
 declare map_cong[recdef_cong]
 
-text{*
+text\<open>
 The @{text cong} and @{text recdef_cong} attributes are
 intentionally kept apart because they control different activities, namely
 simplification and making recursive definitions.
 %The simplifier's congruence rules cannot be used by recdef.
 %For example the weak congruence rules for if and case would prevent
 %recdef from generating sensible termination conditions.
-*}
+\<close>
 (*<*)end(*>*)
--- a/src/Doc/Tutorial/Recdef/examples.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/examples.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,9 +2,9 @@
 theory examples imports Main begin
 (*>*)
 
-text{*
+text\<open>
 Here is a simple example, the \rmindex{Fibonacci function}:
-*}
+\<close>
 
 consts fib :: "nat \<Rightarrow> nat"
 recdef fib "measure(\<lambda>n. n)"
@@ -12,7 +12,7 @@
   "fib (Suc 0) = 1"
   "fib (Suc(Suc x)) = fib x + fib (Suc x)"
 
-text{*\noindent
+text\<open>\noindent
 \index{measure functions}%
 The definition of @{term"fib"} is accompanied by a \textbf{measure function}
 @{term"%n. n"} which maps the argument of @{term"fib"} to a
@@ -25,7 +25,7 @@
 
 Slightly more interesting is the insertion of a fixed element
 between any two elements of a list:
-*}
+\<close>
 
 consts sep :: "'a \<times> 'a list \<Rightarrow> 'a list"
 recdef sep "measure (\<lambda>(a,xs). length xs)"
@@ -33,7 +33,7 @@
   "sep(a, [x])    = [x]"
   "sep(a, x#y#zs) = x # a # sep(a,y#zs)"
 
-text{*\noindent
+text\<open>\noindent
 This time the measure is the length of the list, which decreases with the
 recursive call; the first component of the argument tuple is irrelevant.
 The details of tupled $\lambda$-abstractions @{text"\<lambda>(x\<^sub>1,\<dots>,x\<^sub>n)"} are
@@ -41,24 +41,24 @@
 
 Pattern matching\index{pattern matching!and \isacommand{recdef}}
 need not be exhaustive:
-*}
+\<close>
 
 consts last :: "'a list \<Rightarrow> 'a"
 recdef last "measure (\<lambda>xs. length xs)"
   "last [x]      = x"
   "last (x#y#zs) = last (y#zs)"
 
-text{*
+text\<open>
 Overlapping patterns are disambiguated by taking the order of equations into
 account, just as in functional programming:
-*}
+\<close>
 
 consts sep1 :: "'a \<times> 'a list \<Rightarrow> 'a list"
 recdef sep1 "measure (\<lambda>(a,xs). length xs)"
   "sep1(a, x#y#zs) = x # a # sep1(a,y#zs)"
   "sep1(a, xs)     = xs"
 
-text{*\noindent
+text\<open>\noindent
 To guarantee that the second equation can only be applied if the first
 one does not match, Isabelle internally replaces the second equation
 by the two possibilities that are left: @{prop"sep1(a,[]) = []"} and
@@ -73,17 +73,17 @@
   argument is relevant for termination, you can also rearrange the order of
   arguments as in the following definition:
 \end{warn}
-*}
+\<close>
 consts sep2 :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a list"
 recdef sep2 "measure length"
   "sep2 (x#y#zs) = (\<lambda>a. x # a # sep2 (y#zs) a)"
   "sep2 xs       = (\<lambda>a. xs)"
 
-text{*
+text\<open>
 Because of its pattern-matching syntax, \isacommand{recdef} is also useful
 for the definition of non-recursive functions, where the termination measure
 degenerates to the empty set @{term"{}"}:
-*}
+\<close>
 
 consts swap12 :: "'a list \<Rightarrow> 'a list"
 recdef swap12 "{}"
--- a/src/Doc/Tutorial/Recdef/simplification.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/simplification.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory simplification imports Main begin
 (*>*)
 
-text{*
+text\<open>
 Once we have proved all the termination conditions, the \isacommand{recdef} 
 recursion equations become simplification rules, just as with
 \isacommand{primrec}. In most cases this works fine, but there is a subtle
@@ -10,13 +10,13 @@
 terminate because of automatic splitting of @{text "if"}.
 \index{*if expressions!splitting of}
 Let us look at an example:
-*}
+\<close>
 
 consts gcd :: "nat\<times>nat \<Rightarrow> nat"
 recdef gcd "measure (\<lambda>(m,n).n)"
   "gcd (m, n) = (if n=0 then m else gcd(n, m mod n))"
 
-text{*\noindent
+text\<open>\noindent
 According to the measure function, the second argument should decrease with
 each recursive call. The resulting termination condition
 @{term[display]"n ~= (0::nat) ==> m mod n < n"}
@@ -48,7 +48,7 @@
 If possible, the definition should be given by pattern matching on the left
 rather than @{text "if"} on the right. In the case of @{term gcd} the
 following alternative definition suggests itself:
-*}
+\<close>
 
 consts gcd1 :: "nat\<times>nat \<Rightarrow> nat"
 recdef gcd1 "measure (\<lambda>(m,n).n)"
@@ -56,27 +56,27 @@
   "gcd1 (m, n) = gcd1(n, m mod n)"
 
 
-text{*\noindent
+text\<open>\noindent
 The order of equations is important: it hides the side condition
 @{prop"n ~= (0::nat)"}.  Unfortunately, in general the case distinction
 may not be expressible by pattern matching.
 
 A simple alternative is to replace @{text "if"} by @{text case}, 
 which is also available for @{typ bool} and is not split automatically:
-*}
+\<close>
 
 consts gcd2 :: "nat\<times>nat \<Rightarrow> nat"
 recdef gcd2 "measure (\<lambda>(m,n).n)"
   "gcd2(m,n) = (case n=0 of True \<Rightarrow> m | False \<Rightarrow> gcd2(n,m mod n))"
 
-text{*\noindent
+text\<open>\noindent
 This is probably the neatest solution next to pattern matching, and it is
 always available.
 
 A final alternative is to replace the offending simplification rules by
 derived conditional ones. For @{term gcd} it means we have to prove
 these lemmas:
-*}
+\<close>
 
 lemma [simp]: "gcd (m, 0) = m"
 apply(simp)
@@ -86,11 +86,11 @@
 apply(simp)
 done
 
-text{*\noindent
+text\<open>\noindent
 Simplification terminates for these proofs because the condition of the @{text
 "if"} simplifies to @{term True} or @{term False}.
 Now we can disable the original simplification rule:
-*}
+\<close>
 
 declare gcd.simps [simp del]
 
--- a/src/Doc/Tutorial/Recdef/termination.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Recdef/termination.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,7 +2,7 @@
 theory "termination" imports examples begin
 (*>*)
 
-text{*
+text\<open>
 When a function~$f$ is defined via \isacommand{recdef}, Isabelle tries to prove
 its termination with the help of the user-supplied measure.  Each of the examples
 above is simple enough that Isabelle can automatically prove that the
@@ -14,14 +14,14 @@
 simplification rules.
 
 Isabelle may fail to prove the termination condition for some
-recursive call.  Let us try to define Quicksort:*}
+recursive call.  Let us try to define Quicksort:\<close>
 
 consts qs :: "nat list \<Rightarrow> nat list"
 recdef(*<*)(permissive)(*>*) qs "measure length"
  "qs [] = []"
  "qs(x#xs) = qs(filter (\<lambda>y. y\<le>x) xs) @ [x] @ qs(filter (\<lambda>y. x<y) xs)"
 
-text{*\noindent where @{term filter} is predefined and @{term"filter P xs"}
+text\<open>\noindent where @{term filter} is predefined and @{term"filter P xs"}
 is the list of elements of @{term xs} satisfying @{term P}.
 This definition of @{term qs} fails, and Isabelle prints an error message
 showing you what it was unable to prove:
@@ -38,7 +38,7 @@
 proved). Because \isacommand{recdef}'s termination prover involves
 simplification, we include in our second attempt a hint: the
 \attrdx{recdef_simp} attribute says to use @{thm[source]less_Suc_eq_le} as a
-simplification rule.\cmmdx{hints}  *}
+simplification rule.\cmmdx{hints}\<close>
 
 (*<*)global consts qs :: "nat list \<Rightarrow> nat list" (*>*)
 recdef qs "measure length"
@@ -46,25 +46,25 @@
  "qs(x#xs) = qs(filter (\<lambda>y. y\<le>x) xs) @ [x] @ qs(filter (\<lambda>y. x<y) xs)"
 (hints recdef_simp: less_Suc_eq_le)
 (*<*)local(*>*)
-text{*\noindent
+text\<open>\noindent
 This time everything works fine. Now @{thm[source]qs.simps} contains precisely
 the stated recursion equations for @{text qs} and they have become
 simplification rules.
 Thus we can automatically prove results such as this one:
-*}
+\<close>
 
 theorem "qs[2,3,0] = qs[3,0,2]"
 apply(simp)
 done
 
-text{*\noindent
+text\<open>\noindent
 More exciting theorems require induction, which is discussed below.
 
 If the termination proof requires a lemma that is of general use, you can
 turn it permanently into a simplification rule, in which case the above
 \isacommand{hint} is not necessary. But in the case of
 @{thm[source]less_Suc_eq_le} this would be of dubious value.
-*}
+\<close>
 (*<*)
 end
 (*>*)
--- a/src/Doc/Tutorial/Rules/Basic.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/Basic.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -35,9 +35,9 @@
  apply assumption
 done
 
-text {*
+text \<open>
 by eliminates uses of assumption and done
-*}
+\<close>
 
 lemma imp_uncurry': "P \<longrightarrow> Q \<longrightarrow> R \<Longrightarrow> P \<and> Q \<longrightarrow> R"
 apply (rule impI)
@@ -47,21 +47,21 @@
 by (drule mp)
 
 
-text {*
+text \<open>
 substitution
 
 @{thm[display] ssubst}
 \rulename{ssubst}
-*}
+\<close>
 
 lemma "\<lbrakk> x = f x; P(f x) \<rbrakk> \<Longrightarrow> P x"
 by (erule ssubst)
 
-text {*
+text \<open>
 also provable by simp (re-orients)
-*}
+\<close>
 
-text {*
+text \<open>
 the subst method
 
 @{thm[display] mult.commute}
@@ -69,17 +69,17 @@
 
 this would fail:
 apply (simp add: mult.commute) 
-*}
+\<close>
 
 
 lemma "\<lbrakk>P x y z; Suc x < y\<rbrakk> \<Longrightarrow> f z = x*y"
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply (subst mult.commute) 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 oops
 
 (*exercise involving THEN*)
@@ -90,11 +90,11 @@
 
 lemma "\<lbrakk>x = f x; triple (f x) (f x) x\<rbrakk> \<Longrightarrow> triple x x x"
 apply (erule ssubst) 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
-back --{* @{subgoals[display,indent=0,margin=65]} *}
-back --{* @{subgoals[display,indent=0,margin=65]} *}
-back --{* @{subgoals[display,indent=0,margin=65]} *}
-back --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+back \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+back \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+back \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+back \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply assumption
 done
 
@@ -102,9 +102,9 @@
 apply (erule ssubst, assumption)
 done
 
-text{*
+text\<open>
 or better still 
-*}
+\<close>
 
 lemma "\<lbrakk> x = f x; triple (f x) (f x) x \<rbrakk> \<Longrightarrow> triple x x x"
 by (erule ssubst)
@@ -120,7 +120,7 @@
 by (erule_tac P="\<lambda>u. triple u u x" in ssubst)
 
 
-text {*
+text \<open>
 negation
 
 @{thm[display] notI}
@@ -143,41 +143,41 @@
 
 @{thm[display] contrapos_nn}
 \rulename{contrapos_nn}
-*}
+\<close>
 
 
 lemma "\<lbrakk>\<not>(P\<longrightarrow>Q); \<not>(R\<longrightarrow>Q)\<rbrakk> \<Longrightarrow> R"
 apply (erule_tac Q="R\<longrightarrow>Q" in contrapos_np)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (intro impI)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 by (erule notE)
 
-text {*
+text \<open>
 @{thm[display] disjCI}
 \rulename{disjCI}
-*}
+\<close>
 
 lemma "(P \<or> Q) \<and> R \<Longrightarrow> P \<or> Q \<and> R"
 apply (intro disjCI conjI)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 
 apply (elim conjE disjE)
  apply assumption
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 
 by (erule contrapos_np, rule conjI)
-text{*
+text\<open>
 proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ {\isadigit{6}}\isanewline
 \isanewline
 goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
 {\isacharparenleft}P\ {\isasymor}\ Q{\isacharparenright}\ {\isasymand}\ R\ {\isasymLongrightarrow}\ P\ {\isasymor}\ Q\ {\isasymand}\ R\isanewline
 \ {\isadigit{1}}{\isachardot}\ {\isasymlbrakk}R{\isacharsemicolon}\ Q{\isacharsemicolon}\ {\isasymnot}\ P{\isasymrbrakk}\ {\isasymLongrightarrow}\ Q\isanewline
 \ {\isadigit{2}}{\isachardot}\ {\isasymlbrakk}R{\isacharsemicolon}\ Q{\isacharsemicolon}\ {\isasymnot}\ P{\isasymrbrakk}\ {\isasymLongrightarrow}\ R
-*}
+\<close>
 
 
-text{*rule_tac, etc.*}
+text\<open>rule_tac, etc.\<close>
 
 
 lemma "P&Q"
@@ -185,23 +185,23 @@
 oops
 
 
-text{*unification failure trace *}
+text\<open>unification failure trace\<close>
 
 declare [[unify_trace_failure = true]]
 
 lemma "P(a, f(b, g(e,a), b), a) \<Longrightarrow> P(a, f(b, g(c,a), b), a)"
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 apply assumption
 Clash: e =/= c
 
 Clash: == =/= Trueprop
-*}
+\<close>
 oops
 
 lemma "\<forall>x y. P(x,y) --> P(y,x)"
 apply auto
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 apply assumption
 
@@ -209,15 +209,15 @@
 
 Clash: == =/= Trueprop
 Clash: == =/= Trueprop
-*}
+\<close>
 oops
 
 declare [[unify_trace_failure = false]]
 
 
-text{*Quantifiers*}
+text\<open>Quantifiers\<close>
 
-text {*
+text \<open>
 @{thm[display] allI}
 \rulename{allI}
 
@@ -226,7 +226,7 @@
 
 @{thm[display] spec}
 \rulename{spec}
-*}
+\<close>
 
 lemma "\<forall>x. P x \<longrightarrow> P x"
 apply (rule allI)
@@ -237,74 +237,74 @@
 apply (drule spec)
 by (drule mp)
 
-text{*rename_tac*}
+text\<open>rename_tac\<close>
 lemma "x < y \<Longrightarrow> \<forall>x y. P x (f y)"
 apply (intro allI)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rename_tac v w)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 oops
 
 
 lemma "\<lbrakk>\<forall>x. P x \<longrightarrow> P (h x); P a\<rbrakk> \<Longrightarrow> P(h (h a))"
 apply (frule spec)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (drule mp, assumption)
 apply (drule spec)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 by (drule mp)
 
 lemma "\<lbrakk>\<forall>x. P x \<longrightarrow> P (f x); P a\<rbrakk> \<Longrightarrow> P(f (f a))"
 by blast
 
 
-text{*
-the existential quantifier*}
+text\<open>
+the existential quantifier\<close>
 
-text {*
+text \<open>
 @{thm[display]"exI"}
 \rulename{exI}
 
 @{thm[display]"exE"}
 \rulename{exE}
-*}
+\<close>
 
 
-text{*
-instantiating quantifiers explicitly by rule_tac and erule_tac*}
+text\<open>
+instantiating quantifiers explicitly by rule_tac and erule_tac\<close>
 
 lemma "\<lbrakk>\<forall>x. P x \<longrightarrow> P (h x); P a\<rbrakk> \<Longrightarrow> P(h (h a))"
 apply (frule spec)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (drule mp, assumption)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (drule_tac x = "h a" in spec)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 by (drule mp)
 
-text {*
+text \<open>
 @{thm[display]"dvd_def"}
 \rulename{dvd_def}
-*}
+\<close>
 
 lemma mult_dvd_mono: "\<lbrakk>i dvd m; j dvd n\<rbrakk> \<Longrightarrow> i*j dvd (m*n :: nat)"
 apply (simp add: dvd_def)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule exE) 
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule exE) 
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rename_tac l)
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule_tac x="k*l" in exI) 
-        --{* @{subgoals[display,indent=0,margin=65]} *}
+        \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply simp
 done
 
-text{*
-Hilbert-epsilon theorems*}
+text\<open>
+Hilbert-epsilon theorems\<close>
 
-text{*
+text\<open>
 @{thm[display] the_equality[no_vars]}
 \rulename{the_equality}
 
@@ -330,29 +330,29 @@
 
 @{thm[display] order_antisym[no_vars]}
 \rulename{order_antisym}
-*}
+\<close>
 
 
 lemma "inv Suc (Suc n) = n"
 by (simp add: inv_def)
 
-text{*but we know nothing about inv Suc 0*}
+text\<open>but we know nothing about inv Suc 0\<close>
 
 theorem Least_equality:
      "\<lbrakk> P (k::nat);  \<forall>x. P x \<longrightarrow> k \<le> x \<rbrakk> \<Longrightarrow> (LEAST x. P(x)) = k"
 apply (simp add: Least_def)
  
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
    
 apply (rule the_equality)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 
 first subgoal is existence; second is uniqueness
-*}
+\<close>
 by (auto intro: order_antisym)
 
 
@@ -360,19 +360,19 @@
      "(\<forall>x. \<exists>y. P x y) \<Longrightarrow> \<exists>f. \<forall>x. P x (f x)"
 apply (rule exI, rule allI)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 
 state after intro rules
-*}
+\<close>
 apply (drule spec, erule exE)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 
 applying @text{someI} automatically instantiates
 @{term f} to @{term "\<lambda>x. SOME y. P x y"}
-*}
+\<close>
 
 by (rule someI)
 
@@ -385,7 +385,7 @@
 apply (rule exI [of _  "\<lambda>x. SOME y. P x y"])
 by (blast intro: someI)
 
-text{*end of Epsilon section*}
+text\<open>end of Epsilon section\<close>
 
 
 lemma "(\<exists>x. P x) \<or> (\<exists>x. Q x) \<Longrightarrow> \<exists>x. P x \<or> Q x"
@@ -433,11 +433,11 @@
 
 lemma "\<forall>y. R y y \<Longrightarrow> \<exists>x. \<forall>y. R x y"
 apply (rule exI) 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule allI) 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (drule spec) 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 oops
 
 lemma "\<forall>x. \<exists>y. x=y"
--- a/src/Doc/Tutorial/Rules/Blast.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/Blast.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -4,9 +4,9 @@
        ((\<exists>x. \<forall>y. q(x)=q(y)) = ((\<exists>x. p(x))=(\<forall>y. q(y))))"
 by blast
 
-text{*\noindent Until now, we have proved everything using only induction and
+text\<open>\noindent Until now, we have proved everything using only induction and
 simplification.  Substantial proofs require more elaborate types of
-inference.*}
+inference.\<close>
 
 lemma "(\<forall>x. honest(x) \<and> industrious(x) \<longrightarrow> healthy(x)) \<and>  
        \<not> (\<exists>x. grocer(x) \<and> healthy(x)) \<and> 
@@ -20,13 +20,13 @@
         (\<Union>i\<in>I. \<Union>j\<in>J. A(i) \<inter> B(j))"
 by blast
 
-text {*
+text \<open>
 @{thm[display] mult_is_0}
  \rulename{mult_is_0}}
 
 @{thm[display] finite_Un}
  \rulename{finite_Un}}
-*}
+\<close>
 
 
 lemma [iff]: "(xs@ys = []) = (xs=[] & ys=[])"
--- a/src/Doc/Tutorial/Rules/Force.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/Force.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -15,20 +15,20 @@
 apply clarify
 oops
 
-text {*
+text \<open>
 proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ {\isadigit{1}}\isanewline
 \isanewline
 goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
 {\isacharparenleft}{\isasymforall}x{\isachardot}\ P\ x{\isacharparenright}\ {\isasymand}\ {\isacharparenleft}{\isasymexists}x{\isachardot}\ Q\ x{\isacharparenright}\ {\isasymlongrightarrow}\ {\isacharparenleft}{\isasymforall}x{\isachardot}\ P\ x\ {\isasymand}\ Q\ x{\isacharparenright}\isanewline
 \ {\isadigit{1}}{\isachardot}\ {\isasymAnd}x\ xa{\isachardot}\ {\isasymlbrakk}{\isasymforall}x{\isachardot}\ P\ x{\isacharsemicolon}\ Q\ xa{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ x\ {\isasymand}\ Q\ x
-*}
+\<close>
 
-text {*
+text \<open>
 couldn't find a good example of clarsimp
 
 @{thm[display]"someI"}
 \rulename{someI}
-*}
+\<close>
 
 lemma "\<lbrakk>Q a; P a\<rbrakk> \<Longrightarrow> P (SOME x. P x \<and> Q x) \<and> Q (SOME x. P x \<and> Q x)"
 apply (rule someI)
@@ -38,13 +38,13 @@
 apply (fast intro!: someI)
 done
 
-text{*
+text\<open>
 proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ \isadigit{1}\isanewline
 \isanewline
 goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
 {\isasymlbrakk}Q\ a{\isacharsemicolon}\ P\ a{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ {\isacharparenleft}SOME\ x{\isachardot}\ P\ x\ {\isasymand}\ Q\ x{\isacharparenright}\ {\isasymand}\ Q\ {\isacharparenleft}SOME\ x{\isachardot}\ P\ x\ {\isasymand}\ Q\ x{\isacharparenright}\isanewline
 \ \isadigit{1}{\isachardot}\ {\isasymlbrakk}Q\ a{\isacharsemicolon}\ P\ a{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ {\isacharquery}x\ {\isasymand}\ Q\ {\isacharquery}x
-*}
+\<close>
 
 end
 
--- a/src/Doc/Tutorial/Rules/Forward.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/Forward.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,12 +1,12 @@
 theory Forward imports TPrimes begin
 
-text{*\noindent
+text\<open>\noindent
 Forward proof material: of, OF, THEN, simplify, rule_format.
-*}
+\<close>
 
-text{*\noindent
+text\<open>\noindent
 SKIP most developments...
-*}
+\<close>
 
 (** Commutativity **)
 
@@ -29,13 +29,13 @@
 apply (simp add: gcd_commute [of "Suc 0"])
 done
 
-text{*\noindent
+text\<open>\noindent
 as far as HERE.
-*}
+\<close>
 
-text{*\noindent
+text\<open>\noindent
 SKIP THIS PROOF
-*}
+\<close>
 
 lemma gcd_mult_distrib2: "k * gcd m n = gcd (k*m) (k*n)"
 apply (induct_tac m n rule: gcd.induct)
@@ -45,14 +45,14 @@
 apply simp_all
 done
 
-text {*
+text \<open>
 @{thm[display] gcd_mult_distrib2}
 \rulename{gcd_mult_distrib2}
-*}
+\<close>
 
-text{*\noindent
+text\<open>\noindent
 of, simplified
-*}
+\<close>
 
 
 lemmas gcd_mult_0 = gcd_mult_distrib2 [of k 1]
@@ -64,7 +64,7 @@
 
 lemmas where3 = gcd_mult_distrib2 [where m=1 and k="j+k"]
 
-text {*
+text \<open>
 example using ``of'':
 @{thm[display] gcd_mult_distrib2 [of _ 1]}
 
@@ -82,7 +82,7 @@
 
 @{thm[display] sym}
 \rulename{sym}
-*}
+\<close>
 
 lemmas gcd_mult0 = gcd_mult_1 [THEN sym]
       (*not quite right: we need ?k but this gives k*)
@@ -90,9 +90,9 @@
 lemmas gcd_mult0' = gcd_mult_distrib2 [of k 1, simplified, THEN sym]
       (*better in one step!*)
 
-text {*
+text \<open>
 more legible, and variables properly generalized
-*}
+\<close>
 
 lemma gcd_mult [simp]: "gcd k (k*n) = k"
 by (rule gcd_mult_distrib2 [of k 1, simplified, THEN sym])
@@ -101,15 +101,15 @@
 lemmas gcd_self0 = gcd_mult [of k 1, simplified]
 
 
-text {*
+text \<open>
 @{thm[display] gcd_mult}
 \rulename{gcd_mult}
 
 @{thm[display] gcd_self0}
 \rulename{gcd_self0}
-*}
+\<close>
 
-text {*
+text \<open>
 Rules handy with THEN
 
 @{thm[display] iffD1}
@@ -117,18 +117,18 @@
 
 @{thm[display] iffD2}
 \rulename{iffD2}
-*}
+\<close>
 
 
-text {*
+text \<open>
 again: more legible, and variables properly generalized
-*}
+\<close>
 
 lemma gcd_self [simp]: "gcd k k = k"
 by (rule gcd_mult [of k 1, simplified])
 
 
-text{*
+text\<open>
 NEXT SECTION: Methods for Forward Proof
 
 NEW
@@ -136,48 +136,48 @@
 theorem arg_cong, useful in forward steps
 @{thm[display] arg_cong[no_vars]}
 \rulename{arg_cong}
-*}
+\<close>
 
 lemma "2 \<le> u \<Longrightarrow> u*m \<noteq> Suc(u*n)"
 apply (intro notI)
-txt{*
+txt\<open>
 before using arg_cong
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply (drule_tac f="\<lambda>x. x mod u" in arg_cong)
-txt{*
+txt\<open>
 after using arg_cong
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply (simp add: mod_Suc)
 done
 
-text{*
+text\<open>
 have just used this rule:
 @{thm[display] mod_Suc[no_vars]}
 \rulename{mod_Suc}
 
 @{thm[display] mult_le_mono1[no_vars]}
 \rulename{mult_le_mono1}
-*}
+\<close>
 
 
-text{*
+text\<open>
 example of "insert"
-*}
+\<close>
 
 lemma relprime_dvd_mult:
       "\<lbrakk> gcd k n = 1; k dvd m*n \<rbrakk> \<Longrightarrow> k dvd m"
 apply (insert gcd_mult_distrib2 [of m k n])
-txt{*@{subgoals[display,indent=0,margin=65]}*}
+txt\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply simp
-txt{*@{subgoals[display,indent=0,margin=65]}*}
+txt\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (erule_tac t="m" in ssubst)
 apply simp
 done
 
 
-text {*
+text \<open>
 @{thm[display] relprime_dvd_mult}
 \rulename{relprime_dvd_mult}
 
@@ -185,7 +185,7 @@
 
 @{thm[display] div_mult_mod_eq}
 \rulename{div_mult_mod_eq}
-*}
+\<close>
 
 (*MOVED to Force.thy, which now depends only on Divides.thy
 lemma div_mult_self_is_m: "0<n \<Longrightarrow> (m*n) div n = (m::nat)"
@@ -197,7 +197,7 @@
 lemma relprime_20_81: "gcd 20 81 = 1"
 by (simp add: gcd.simps)
 
-text {*
+text \<open>
 Examples of 'OF'
 
 @{thm[display] relprime_dvd_mult}
@@ -214,20 +214,20 @@
 @{thm[display] dvd_add [OF dvd_refl dvd_refl]}
 
 @{thm[display] dvd_add [OF _ dvd_refl]}
-*}
+\<close>
 
 lemma "\<lbrakk>(z::int) < 37; 66 < 2*z; z*z \<noteq> 1225; Q(34); Q(36)\<rbrakk> \<Longrightarrow> Q(z)"
 apply (subgoal_tac "z = 34 \<or> z = 36")
-txt{*
+txt\<open>
 the tactic leaves two subgoals:
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply blast
 apply (subgoal_tac "z \<noteq> 35")
-txt{*
+txt\<open>
 the tactic leaves two subgoals:
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply arith
 apply force
 done
--- a/src/Doc/Tutorial/Rules/TPrimes.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/TPrimes.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -8,10 +8,10 @@
   "gcd m n = (if n=0 then m else gcd n (m mod n))"
 
 
-text {*Now in Basic.thy!
+text \<open>Now in Basic.thy!
 @{thm[display]"dvd_def"}
 \rulename{dvd_def}
-*}
+\<close>
 
 
 (*** Euclid's Algorithm ***)
@@ -29,30 +29,30 @@
 (*gcd(m,n) divides m and n.  The conjunctions don't seem provable separately*)
 lemma gcd_dvd_both: "(gcd m n dvd m) \<and> (gcd m n dvd n)"
 apply (induct_tac m n rule: gcd.induct)
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 apply (case_tac "n=0")
-txt{*subgoals after the case tac
+txt\<open>subgoals after the case tac
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply (simp_all) 
-  --{* @{subgoals[display,indent=0,margin=65]} *}
+  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 by (blast dest: dvd_mod_imp_dvd)
 
 
 
-text {*
+text \<open>
 @{thm[display] dvd_mod_imp_dvd}
 \rulename{dvd_mod_imp_dvd}
 
 @{thm[display] dvd_trans}
 \rulename{dvd_trans}
-*}
+\<close>
 
 lemmas gcd_dvd1 [iff] = gcd_dvd_both [THEN conjunct1]
 lemmas gcd_dvd2 [iff] = gcd_dvd_both [THEN conjunct2]
 
 
-text {*
+text \<open>
 \begin{quote}
 @{thm[display] gcd_dvd1}
 \rulename{gcd_dvd1}
@@ -60,7 +60,7 @@
 @{thm[display] gcd_dvd2}
 \rulename{gcd_dvd2}
 \end{quote}
-*}
+\<close>
 
 (*Maximality: for all m,n,k naturals, 
                 if k divides m and k divides n then k divides gcd(m,n)*)
@@ -68,16 +68,16 @@
       "k dvd m \<longrightarrow> k dvd n \<longrightarrow> k dvd gcd m n"
 apply (induct_tac m n rule: gcd.induct)
 apply (case_tac "n=0")
-txt{*subgoals after the case tac
+txt\<open>subgoals after the case tac
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply (simp_all add: dvd_mod)
 done
 
-text {*
+text \<open>
 @{thm[display] dvd_mod}
 \rulename{dvd_mod}
-*}
+\<close>
 
 (*just checking the claim that case_tac "n" works too*)
 lemma "k dvd m \<longrightarrow> k dvd n \<longrightarrow> k dvd gcd m n"
@@ -110,7 +110,7 @@
 done
 
 
-text {*
+text \<open>
 @{thm[display] dvd_antisym}
 \rulename{dvd_antisym}
 
@@ -123,7 +123,7 @@
 \ \ \ \ \ \ \ n\ dvd\ a\ \isasymand \ n\ dvd\ b\ \isasymand \ (\isasymforall d.\ d\ dvd\ a\ \isasymand \ d\ dvd\ b\ \isasymlongrightarrow \ d\ dvd\ n)\isasymrbrakk \isanewline
 \ \ \ \ \isasymLongrightarrow \ m\ =\ n
 \end{isabelle}
-*}
+\<close>
 
 lemma gcd_assoc: "gcd (gcd k m) n = gcd k (gcd m n)"
   apply (rule is_gcd_unique)
@@ -132,7 +132,7 @@
   apply (blast intro: dvd_trans)
   done
 
-text{*
+text\<open>
 \begin{isabelle}
 proof\ (prove):\ step\ 3\isanewline
 \isanewline
@@ -141,7 +141,7 @@
 \ 1.\ gcd\ (k,\ gcd\ (m,\ n))\ dvd\ k\ \isasymand \isanewline
 \ \ \ \ gcd\ (k,\ gcd\ (m,\ n))\ dvd\ m\ \isasymand \ gcd\ (k,\ gcd\ (m,\ n))\ dvd\ n
 \end{isabelle}
-*}
+\<close>
 
 
 lemma gcd_dvd_gcd_mult: "gcd m n dvd gcd (k*m) n"
--- a/src/Doc/Tutorial/Rules/Tacticals.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/Tacticals.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,6 +1,6 @@
 theory Tacticals imports Main begin
 
-text{*REPEAT*}
+text\<open>REPEAT\<close>
 lemma "\<lbrakk>P\<longrightarrow>Q; Q\<longrightarrow>R; R\<longrightarrow>S; P\<rbrakk> \<Longrightarrow> S"
 apply (drule mp, assumption)
 apply (drule mp, assumption)
@@ -11,32 +11,32 @@
 lemma "\<lbrakk>P\<longrightarrow>Q; Q\<longrightarrow>R; R\<longrightarrow>S; P\<rbrakk> \<Longrightarrow> S"
 by (drule mp, assumption)+
 
-text{*ORELSE with REPEAT*}
+text\<open>ORELSE with REPEAT\<close>
 lemma "\<lbrakk>Q\<longrightarrow>R; P\<longrightarrow>Q; x<5\<longrightarrow>P;  Suc x < 5\<rbrakk> \<Longrightarrow> R" 
 by (drule mp, (assumption|arith))+
 
-text{*exercise: what's going on here?*}
+text\<open>exercise: what's going on here?\<close>
 lemma "\<lbrakk>P\<and>Q\<longrightarrow>R; P\<longrightarrow>Q; P\<rbrakk> \<Longrightarrow> R"
 by (drule mp, (intro conjI)?, assumption+)+
 
-text{*defer and prefer*}
+text\<open>defer and prefer\<close>
 
 lemma "hard \<and> (P \<or> ~P) \<and> (Q\<longrightarrow>Q)"
-apply (intro conjI)   --{* @{subgoals[display,indent=0,margin=65]} *}
-defer 1   --{* @{subgoals[display,indent=0,margin=65]} *}
-apply blast+   --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (intro conjI)   \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+defer 1   \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+apply blast+   \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 oops
 
 lemma "ok1 \<and> ok2 \<and> doubtful"
-apply (intro conjI)   --{* @{subgoals[display,indent=0,margin=65]} *}
-prefer 3   --{* @{subgoals[display,indent=0,margin=65]} *}
+apply (intro conjI)   \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+prefer 3   \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
 oops
 
 lemma "bigsubgoal1 \<and> bigsubgoal2 \<and> bigsubgoal3 \<and> bigsubgoal4 \<and> bigsubgoal5 \<and> bigsubgoal6"
-apply (intro conjI)   --{* @{subgoals[display,indent=0,margin=65]} *}
-txt{* @{subgoals[display,indent=0,margin=65]} 
+apply (intro conjI)   \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
+txt\<open>@{subgoals[display,indent=0,margin=65]} 
 A total of 6 subgoals...
-*}
+\<close>
 oops
 
 
--- a/src/Doc/Tutorial/Rules/find2.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Rules/find2.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,7 +3,7 @@
 lemma "A \<and> B"
 (*>*)
 
-txt{*\index{finding theorems}\index{searching theorems} In
+txt\<open>\index{finding theorems}\index{searching theorems} In
 \S\ref{sec:find}, we introduced Proof General's \pgmenu{Find} button
 for finding theorems in the database via pattern matching. If we are
 inside a proof, we can be more specific; we can search for introduction,
@@ -16,12 +16,12 @@
 \texttt{intro}. You will be shown a few rules ending in @{text"\<Longrightarrow> ?P \<and> ?Q"},
 among them @{thm[source]conjI}\@. You may even discover that
 the very theorem you are trying to prove is already in the
-database.  Given the goal *}
+database.  Given the goal\<close>
 (*<*)
 oops
 lemma "A \<longrightarrow> A"
 (*>*)
-txt{*\vspace{-\bigskipamount}
+txt\<open>\vspace{-\bigskipamount}
 @{subgoals[display,indent=0,margin=65]}
 the search for \texttt{intro} finds not just @{thm[source] impI}
 but also @{thm[source] imp_refl}: @{thm imp_refl}.
@@ -36,7 +36,7 @@
 Searching for elimination and destruction rules via \texttt{elim} and
 \texttt{dest} is analogous to \texttt{intro} but takes the assumptions
 into account, too.
-*}
+\<close>
 (*<*)
 oops
 end
--- a/src/Doc/Tutorial/Sets/Examples.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Sets/Examples.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,14 +2,14 @@
 
 declare [[eta_contract = false]]
 
-text{*membership, intersection *}
-text{*difference and empty set*}
-text{*complement, union and universal set*}
+text\<open>membership, intersection\<close>
+text\<open>difference and empty set\<close>
+text\<open>complement, union and universal set\<close>
 
 lemma "(x \<in> A \<inter> B) = (x \<in> A \<and> x \<in> B)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] IntI[no_vars]}
 \rulename{IntI}
 
@@ -18,60 +18,60 @@
 
 @{thm[display] IntD2[no_vars]}
 \rulename{IntD2}
-*}
+\<close>
 
 lemma "(x \<in> -A) = (x \<notin> A)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] Compl_iff[no_vars]}
 \rulename{Compl_iff}
-*}
+\<close>
 
 lemma "- (A \<union> B) = -A \<inter> -B"
 by blast
 
-text{*
+text\<open>
 @{thm[display] Compl_Un[no_vars]}
 \rulename{Compl_Un}
-*}
+\<close>
 
 lemma "A-A = {}"
 by blast
 
-text{*
+text\<open>
 @{thm[display] Diff_disjoint[no_vars]}
 \rulename{Diff_disjoint}
-*}
+\<close>
 
 
 
 lemma "A \<union> -A = UNIV"
 by blast
 
-text{*
+text\<open>
 @{thm[display] Compl_partition[no_vars]}
 \rulename{Compl_partition}
-*}
+\<close>
 
-text{*subset relation*}
+text\<open>subset relation\<close>
 
 
-text{*
+text\<open>
 @{thm[display] subsetI[no_vars]}
 \rulename{subsetI}
 
 @{thm[display] subsetD[no_vars]}
 \rulename{subsetD}
-*}
+\<close>
 
 lemma "((A \<union> B) \<subseteq> C) = (A \<subseteq> C \<and> B \<subseteq> C)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] Un_subset_iff[no_vars]}
 \rulename{Un_subset_iff}
-*}
+\<close>
 
 lemma "(A \<subseteq> -B) = (B \<subseteq> -A)"
 by blast
@@ -79,19 +79,19 @@
 lemma "(A <= -B) = (B <= -A)"
   oops
 
-text{*ASCII version: blast fails because of overloading because
- it doesn't have to be sets*}
+text\<open>ASCII version: blast fails because of overloading because
+ it doesn't have to be sets\<close>
 
 lemma "((A:: 'a set) <= -B) = (B <= -A)"
 by blast
 
-text{*A type constraint lets it work*}
+text\<open>A type constraint lets it work\<close>
 
-text{*An issue here: how do we discuss the distinction between ASCII and
-symbol notation?  Here the latter disambiguates.*}
+text\<open>An issue here: how do we discuss the distinction between ASCII and
+symbol notation?  Here the latter disambiguates.\<close>
 
 
-text{*
+text\<open>
 set extensionality
 
 @{thm[display] set_eqI[no_vars]}
@@ -102,19 +102,19 @@
 
 @{thm[display] equalityE[no_vars]}
 \rulename{equalityE}
-*}
+\<close>
 
 
-text{*finite sets: insertion and membership relation*}
-text{*finite set notation*}
+text\<open>finite sets: insertion and membership relation\<close>
+text\<open>finite set notation\<close>
 
 lemma "insert x A = {x} \<union> A"
 by blast
 
-text{*
+text\<open>
 @{thm[display] insert_is_Un[no_vars]}
 \rulename{insert_is_Un}
-*}
+\<close>
 
 lemma "{a,b} \<union> {c,d} = {a,b,c,d}"
 by blast
@@ -122,31 +122,31 @@
 lemma "{a,b} \<inter> {b,c} = {b}"
 apply auto
 oops
-text{*fails because it isn't valid*}
+text\<open>fails because it isn't valid\<close>
 
 lemma "{a,b} \<inter> {b,c} = (if a=c then {a,b} else {b})"
 apply simp
 by blast
 
-text{*or just force or auto.  blast alone can't handle the if-then-else*}
+text\<open>or just force or auto.  blast alone can't handle the if-then-else\<close>
 
-text{*next: some comprehension examples*}
+text\<open>next: some comprehension examples\<close>
 
 lemma "(a \<in> {z. P z}) = P a"
 by blast
 
-text{*
+text\<open>
 @{thm[display] mem_Collect_eq[no_vars]}
 \rulename{mem_Collect_eq}
-*}
+\<close>
 
 lemma "{x. x \<in> A} = A"
 by blast
 
-text{*
+text\<open>
 @{thm[display] Collect_mem_eq[no_vars]}
 \rulename{Collect_mem_eq}
-*}
+\<close>
 
 lemma "{x. P x \<or> x \<in> A} = {x. P x} \<union> A"
 by blast
@@ -161,50 +161,50 @@
        {z. \<exists>p q. z = p*q \<and> p\<in>prime \<and> q\<in>prime}"
 by (rule refl)
 
-text{*binders*}
+text\<open>binders\<close>
 
-text{*bounded quantifiers*}
+text\<open>bounded quantifiers\<close>
 
 lemma "(\<exists>x\<in>A. P x) = (\<exists>x. x\<in>A \<and> P x)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] bexI[no_vars]}
 \rulename{bexI}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] bexE[no_vars]}
 \rulename{bexE}
-*}
+\<close>
 
 lemma "(\<forall>x\<in>A. P x) = (\<forall>x. x\<in>A \<longrightarrow> P x)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] ballI[no_vars]}
 \rulename{ballI}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] bspec[no_vars]}
 \rulename{bspec}
-*}
+\<close>
 
-text{*indexed unions and variations*}
+text\<open>indexed unions and variations\<close>
 
 lemma "(\<Union>x. B x) = (\<Union>x\<in>UNIV. B x)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] UN_iff[no_vars]}
 \rulename{UN_iff}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] Union_iff[no_vars]}
 \rulename{Union_iff}
-*}
+\<close>
 
 lemma "(\<Union>x\<in>A. B x) = {y. \<exists>x\<in>A. y \<in> B x}"
 by blast
@@ -212,35 +212,35 @@
 lemma "\<Union>S = (\<Union>x\<in>S. x)"
 by blast
 
-text{*
+text\<open>
 @{thm[display] UN_I[no_vars]}
 \rulename{UN_I}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] UN_E[no_vars]}
 \rulename{UN_E}
-*}
+\<close>
 
-text{*indexed intersections*}
+text\<open>indexed intersections\<close>
 
 lemma "(\<Inter>x. B x) = {y. \<forall>x. y \<in> B x}"
 by blast
 
-text{*
+text\<open>
 @{thm[display] INT_iff[no_vars]}
 \rulename{INT_iff}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] Inter_iff[no_vars]}
 \rulename{Inter_iff}
-*}
+\<close>
 
-text{*mention also card, Pow, etc.*}
+text\<open>mention also card, Pow, etc.\<close>
 
 
-text{*
+text\<open>
 @{thm[display] card_Un_Int[no_vars]}
 \rulename{card_Un_Int}
 
@@ -249,6 +249,6 @@
 
 @{thm[display] n_subsets[no_vars]}
 \rulename{n_subsets}
-*}
+\<close>
 
 end
--- a/src/Doc/Tutorial/Sets/Functions.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Sets/Functions.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,7 +1,7 @@
 theory Functions imports Main begin
 
 
-text{*
+text\<open>
 @{thm[display] id_def[no_vars]}
 \rulename{id_def}
 
@@ -10,18 +10,18 @@
 
 @{thm[display] o_assoc[no_vars]}
 \rulename{o_assoc}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] fun_upd_apply[no_vars]}
 \rulename{fun_upd_apply}
 
 @{thm[display] fun_upd_upd[no_vars]}
 \rulename{fun_upd_upd}
-*}
+\<close>
 
 
-text{*
+text\<open>
 definitions of injective, surjective, bijective
 
 @{thm[display] inj_on_def[no_vars]}
@@ -32,15 +32,15 @@
 
 @{thm[display] bij_def[no_vars]}
 \rulename{bij_def}
-*}
+\<close>
 
 
 
-text{*
+text\<open>
 possibly interesting theorems about inv
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] inv_f_f[no_vars]}
 \rulename{inv_f_f}
 
@@ -61,9 +61,9 @@
 
 @{thm[display] o_inv_distrib[no_vars]}
 \rulename{o_inv_distrib}
-*}
+\<close>
 
-text{*
+text\<open>
 small sample proof
 
 @{thm[display] ext[no_vars]}
@@ -71,35 +71,35 @@
 
 @{thm[display] fun_eq_iff[no_vars]}
 \rulename{fun_eq_iff}
-*}
+\<close>
 
 lemma "inj f \<Longrightarrow> (f o g = f o h) = (g = h)"
   apply (simp add: fun_eq_iff inj_on_def)
   apply (auto)
   done
 
-text{*
+text\<open>
 \begin{isabelle}
 inj\ f\ \isasymLongrightarrow \ (f\ \isasymcirc \ g\ =\ f\ \isasymcirc \ h)\ =\ (g\ =\ h)\isanewline
 \ 1.\ \isasymforall x\ y.\ f\ x\ =\ f\ y\ \isasymlongrightarrow \ x\ =\ y\ \isasymLongrightarrow \isanewline
 \ \ \ \ (\isasymforall x.\ f\ (g\ x)\ =\ f\ (h\ x))\ =\ (\isasymforall x.\ g\ x\ =\ h\ x)
 \end{isabelle}
-*}
+\<close>
  
 
-text{*image, inverse image*}
+text\<open>image, inverse image\<close>
 
-text{*
+text\<open>
 @{thm[display] image_def[no_vars]}
 \rulename{image_def}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] image_Un[no_vars]}
 \rulename{image_Un}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] image_comp[no_vars]}
 \rulename{image_comp}
 
@@ -108,12 +108,12 @@
 
 @{thm[display] bij_image_Compl_eq[no_vars]}
 \rulename{bij_image_Compl_eq}
-*}
+\<close>
 
 
-text{*
+text\<open>
 illustrates Union as well as image
-*}
+\<close>
 
 lemma "f`A \<union> g`A = (\<Union>x\<in>A. {f x, g x})"
 by blast
@@ -121,23 +121,23 @@
 lemma "f ` {(x,y). P x y} = {f(x,y) | x y. P x y}"
 by blast
 
-text{*actually a macro!*}
+text\<open>actually a macro!\<close>
 
 lemma "range f = f`UNIV"
 by blast
 
 
-text{*
+text\<open>
 inverse image
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] vimage_def[no_vars]}
 \rulename{vimage_def}
 
 @{thm[display] vimage_Compl[no_vars]}
 \rulename{vimage_Compl}
-*}
+\<close>
 
 
 end
--- a/src/Doc/Tutorial/Sets/Recur.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Sets/Recur.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,7 +1,7 @@
 theory Recur imports Main begin
 
 
-text{*
+text\<open>
 @{thm[display] mono_def[no_vars]}
 \rulename{mono_def}
 
@@ -22,9 +22,9 @@
 
 @{thm[display] coinduct[no_vars]}
 \rulename{coinduct}
-*}
+\<close>
 
-text{*\noindent
+text\<open>\noindent
 A relation $<$ is
 \bfindex{wellfounded} if it has no infinite descending chain $\cdots <
 a@2 < a@1 < a@0$. Clearly, a function definition is total iff the set
@@ -43,9 +43,9 @@
 @{thm[display]wf_induct[no_vars]}
 where @{term"wf r"} means that the relation @{term r} is wellfounded
 
-*}
+\<close>
 
-text{*
+text\<open>
 
 @{thm[display] wf_induct[no_vars]}
 \rulename{wf_induct}
@@ -74,7 +74,7 @@
 @{thm[display] wf_lex_prod[no_vars]}
 \rulename{wf_lex_prod}
 
-*}
+\<close>
 
 end
 
--- a/src/Doc/Tutorial/Sets/Relations.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Sets/Relations.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -3,57 +3,57 @@
 (*Id is only used in UNITY*)
 (*refl, antisym,trans,univalent,\<dots> ho hum*)
 
-text{*
+text\<open>
 @{thm[display] Id_def[no_vars]}
 \rulename{Id_def}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] relcomp_unfold[no_vars]}
 \rulename{relcomp_unfold}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] R_O_Id[no_vars]}
 \rulename{R_O_Id}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] relcomp_mono[no_vars]}
 \rulename{relcomp_mono}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] converse_iff[no_vars]}
 \rulename{converse_iff}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] converse_relcomp[no_vars]}
 \rulename{converse_relcomp}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] Image_iff[no_vars]}
 \rulename{Image_iff}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] Image_UN[no_vars]}
 \rulename{Image_UN}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] Domain_iff[no_vars]}
 \rulename{Domain_iff}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] Range_iff[no_vars]}
 \rulename{Range_iff}
-*}
+\<close>
 
-text{*
+text\<open>
 @{thm[display] relpow.simps[no_vars]}
 \rulename{relpow.simps}
 
@@ -83,15 +83,15 @@
 
 @{thm[display] trancl_converse[no_vars]}
 \rulename{trancl_converse}
-*}
+\<close>
 
-text{*Relations.  transitive closure*}
+text\<open>Relations.  transitive closure\<close>
 
 lemma rtrancl_converseD: "(x,y) \<in> (r\<inverse>)\<^sup>* \<Longrightarrow> (y,x) \<in> r\<^sup>*"
 apply (erule rtrancl_induct)
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
  apply (rule rtrancl_refl)
 apply (blast intro: rtrancl_trans)
 done
@@ -108,44 +108,44 @@
 
 lemma rtrancl_converse: "(r\<inverse>)\<^sup>* = (r\<^sup>*)\<inverse>"
 apply (intro equalityI subsetI)
-txt{*
+txt\<open>
 after intro rules
 
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply clarify
-txt{*
+txt\<open>
 after splitting
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 oops
 
 
 lemma "(\<forall>u v. (u,v) \<in> A \<longrightarrow> u=v) \<Longrightarrow> A \<subseteq> Id"
 apply (rule subsetI)
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 
 after subsetI
-*}
+\<close>
 apply clarify
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,margin=65]}
 
 subgoals after clarify
-*}
+\<close>
 by blast
 
 
 
 
-text{*rejects*}
+text\<open>rejects\<close>
 
 lemma "(a \<in> {z. P z} \<union> {y. Q y}) = P a \<or> Q a"
 apply (blast)
 done
 
-text{*Pow, Inter too little used*}
+text\<open>Pow, Inter too little used\<close>
 
 lemma "(A \<subset> B) = (A \<subseteq> B \<and> A \<noteq> B)"
 apply (simp add: psubset_eq)
--- a/src/Doc/Tutorial/ToyList/ToyList.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/ToyList/ToyList.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,12 +2,12 @@
 imports Main
 begin
 
-text{*\noindent
+text\<open>\noindent
 HOL already has a predefined theory of lists called @{text List} ---
 @{text ToyList} is merely a small fragment of it chosen as an example.
 To avoid some ambiguities caused by defining lists twice, we manipulate
 the concrete syntax and name space of theory @{theory Main} as follows.
-*}
+\<close>
 
 no_notation Nil ("[]") and Cons (infixr "#" 65) and append (infixr "@" 65)
 hide_type list
@@ -16,7 +16,7 @@
 datatype 'a list = Nil                          ("[]")
                  | Cons 'a "'a list"            (infixr "#" 65)
 
-text{*\noindent
+text\<open>\noindent
 The datatype\index{datatype@\isacommand {datatype} (command)}
 \tydx{list} introduces two
 constructors \cdx{Nil} and \cdx{Cons}, the
@@ -45,7 +45,7 @@
 \end{warn}
 Next, two functions @{text"app"} and \cdx{rev} are defined recursively,
 in this order, because Isabelle insists on definition before use:
-*}
+\<close>
 
 primrec app :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" (infixr "@" 65) where
 "[] @ ys       = ys" |
@@ -55,7 +55,7 @@
 "rev []        = []" |
 "rev (x # xs)  = (rev xs) @ (x # [])"
 
-text{*\noindent
+text\<open>\noindent
 Each function definition is of the form
 \begin{center}
 \isacommand{primrec} \textit{name} @{text"::"} \textit{type} \textit{(optional syntax)} \isakeyword{where} \textit{equations}
@@ -116,17 +116,17 @@
 \texttt{ToyList} presented so far, you may want to test your
 functions by running them. For example, what is the value of
 @{term"rev(True#False#[])"}? Command
-*}
+\<close>
 
 value "rev (True # False # [])"
 
-text{* \noindent yields the correct result @{term"False # True # []"}.
+text\<open>\noindent yields the correct result @{term"False # True # []"}.
 But we can go beyond mere functional programming and evaluate terms with
-variables in them, executing functions symbolically: *}
+variables in them, executing functions symbolically:\<close>
 
 value "rev (a # b # c # [])"
 
-text{*\noindent yields @{term"c # b # a # []"}.
+text\<open>\noindent yields @{term"c # b # a # []"}.
 
 \section{An Introductory Proof}
 \label{sec:intro-proof}
@@ -140,11 +140,11 @@
 
 Our goal is to show that reversing a list twice produces the original
 list.
-*}
+\<close>
 
 theorem rev_rev [simp]: "rev(rev xs) = xs"
 
-txt{*\index{theorem@\isacommand {theorem} (command)|bold}%
+txt\<open>\index{theorem@\isacommand {theorem} (command)|bold}%
 \noindent
 This \isacommand{theorem} command does several things:
 \begin{itemize}
@@ -179,11 +179,11 @@
 Let us now get back to @{prop"rev(rev xs) = xs"}. Properties of recursively
 defined functions are best established by induction. In this case there is
 nothing obvious except induction on @{term"xs"}:
-*}
+\<close>
 
 apply(induct_tac xs)
 
-txt{*\noindent\index{*induct_tac (method)}%
+txt\<open>\noindent\index{*induct_tac (method)}%
 This tells Isabelle to perform induction on variable @{term"xs"}. The suffix
 @{term"tac"} stands for \textbf{tactic},\index{tactics}
 a synonym for ``theorem proving function''.
@@ -211,11 +211,11 @@
 \indexboldpos{\isasymrbrakk}{$Isabrr} and separated by semicolons.
 
 Let us try to solve both goals automatically:
-*}
+\<close>
 
 apply(auto)
 
-txt{*\noindent
+txt\<open>\noindent
 This command tells Isabelle to apply a proof strategy called
 @{text"auto"} to all subgoals. Essentially, @{text"auto"} tries to
 simplify the subgoals.  In our case, subgoal~1 is solved completely (thanks
@@ -223,22 +223,22 @@
 of subgoal~2 becomes the new subgoal~1:
 @{subgoals[display,indent=0,margin=70]}
 In order to simplify this subgoal further, a lemma suggests itself.
-*}
+\<close>
 (*<*)
 oops
 (*>*)
 
-subsubsection{*First Lemma*}
+subsubsection\<open>First Lemma\<close>
 
-text{*
+text\<open>
 \indexbold{abandoning a proof}\indexbold{proofs!abandoning}
 After abandoning the above proof attempt (at the shell level type
 \commdx{oops}) we start a new proof:
-*}
+\<close>
 
 lemma rev_app [simp]: "rev(xs @ ys) = (rev ys) @ (rev xs)"
 
-txt{*\noindent The keywords \commdx{theorem} and
+txt\<open>\noindent The keywords \commdx{theorem} and
 \commdx{lemma} are interchangeable and merely indicate
 the importance we attach to a proposition.  Therefore we use the words
 \emph{theorem} and \emph{lemma} pretty much interchangeably, too.
@@ -246,46 +246,46 @@
 There are two variables that we could induct on: @{term"xs"} and
 @{term"ys"}. Because @{text"@"} is defined by recursion on
 the first argument, @{term"xs"} is the correct one:
-*}
+\<close>
 
 apply(induct_tac xs)
 
-txt{*\noindent
+txt\<open>\noindent
 This time not even the base case is solved automatically:
-*}
+\<close>
 
 apply(auto)
 
-txt{*
+txt\<open>
 @{subgoals[display,indent=0,goals_limit=1]}
 Again, we need to abandon this proof attempt and prove another simple lemma
 first. In the future the step of abandoning an incomplete proof before
 embarking on the proof of a lemma usually remains implicit.
-*}
+\<close>
 (*<*)
 oops
 (*>*)
 
-subsubsection{*Second Lemma*}
+subsubsection\<open>Second Lemma\<close>
 
-text{*
+text\<open>
 We again try the canonical proof procedure:
-*}
+\<close>
 
 lemma app_Nil2 [simp]: "xs @ [] = xs"
 apply(induct_tac xs)
 apply(auto)
 
-txt{*
+txt\<open>
 \noindent
 It works, yielding the desired message @{text"No subgoals!"}:
 @{goals[display,indent=0]}
 We still need to confirm that the proof is now finished:
-*}
+\<close>
 
 done
 
-text{*\noindent
+text\<open>\noindent
 As a result of that final \commdx{done}, Isabelle associates the lemma just proved
 with its name. In this tutorial, we sometimes omit to show that final \isacommand{done}
 if it is obvious from the context that the proof is finished.
@@ -298,13 +298,13 @@
 \S\ref{sec:variables}.
 
 Going back to the proof of the first lemma
-*}
+\<close>
 
 lemma rev_app [simp]: "rev(xs @ ys) = (rev ys) @ (rev xs)"
 apply(induct_tac xs)
 apply(auto)
 
-txt{*
+txt\<open>
 \noindent
 we find that this time @{text"auto"} solves the base case, but the
 induction step merely simplifies to
@@ -316,44 +316,44 @@
 ~~~~~(rev~ys~@~rev~list)~@~(a~\#~[])~=~rev~ys~@~(rev~list~@~(a~\#~[]))
 \end{isabelle}
 and the missing lemma is associativity of @{text"@"}.
-*}
+\<close>
 (*<*)oops(*>*)
 
-subsubsection{*Third Lemma*}
+subsubsection\<open>Third Lemma\<close>
 
-text{*
+text\<open>
 Abandoning the previous attempt, the canonical proof procedure
 succeeds without further ado.
-*}
+\<close>
 
 lemma app_assoc [simp]: "(xs @ ys) @ zs = xs @ (ys @ zs)"
 apply(induct_tac xs)
 apply(auto)
 done
 
-text{*
+text\<open>
 \noindent
 Now we can prove the first lemma:
-*}
+\<close>
 
 lemma rev_app [simp]: "rev(xs @ ys) = (rev ys) @ (rev xs)"
 apply(induct_tac xs)
 apply(auto)
 done
 
-text{*\noindent
+text\<open>\noindent
 Finally, we prove our main theorem:
-*}
+\<close>
 
 theorem rev_rev [simp]: "rev(rev xs) = xs"
 apply(induct_tac xs)
 apply(auto)
 done
 
-text{*\noindent
+text\<open>\noindent
 The final \commdx{end} tells Isabelle to close the current theory because
 we are finished with its development:%
 \index{*rev (constant)|)}\index{append function|)}
-*}
+\<close>
 
 end
--- a/src/Doc/Tutorial/ToyList/ToyList_Test.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/ToyList/ToyList_Test.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -2,12 +2,12 @@
 imports Main
 begin
 
-ML {*
+ML \<open>
   let val text =
     map (File.read o Path.append (Resources.master_directory @{theory}) o Path.explode)
       ["ToyList1.txt", "ToyList2.txt"]
     |> implode
   in Thy_Info.script_thy Position.start text @{theory} end
-*}
+\<close>
 
 end
--- a/src/Doc/Tutorial/Trie/Trie.thy	Thu Jan 11 13:48:17 2018 +0100
+++ b/src/Doc/Tutorial/Trie/Trie.thy	Fri Jan 12 14:08:53 2018 +0100
@@ -1,45 +1,45 @@
 (*<*)
 theory Trie imports Main begin
 (*>*)
-text{*
+text\<open>
 To minimize running time, each node of a trie should contain an array that maps
 letters to subtries. We have chosen a
 representation where the subtries are held in an association list, i.e.\ a
 list of (letter,trie) pairs.  Abstracting over the alphabet @{typ"'a"} and the
 values @{typ"'v"} we define a trie as follows:
-*}
+\<close>
 
 datatype ('a,'v)trie = Trie  "'v option"  "('a * ('a,'v)trie)list"
 
-text{*\noindent
+text\<open>\noindent
 \index{datatypes!and nested recursion}%
 The first component is the optional value, the second component the
 association list of subtries.  This is an example of nested recursion involving products,
 which is fine because products are datatypes as well.
 We define two selector functions:
-*}
+\<close>
 
 primrec "value" :: "('a,'v)trie \<Rightarrow> 'v option" where
 "value(Trie ov al) = ov"
 primrec alist :: "('a,'v)trie \<Rightarrow> ('a * ('a,'v)trie)list" where
 "alist(Trie ov al) = al"
 
-text{*\noindent
+text\<open>\noindent
 Association lists come with a generic lookup function.  Its result
 involves type @{text option} because a lookup can fail:
-*}
+\<close>
 
 primrec assoc :: "('key * 'val)list \<Rightarrow> 'key \<Rightarrow> 'val option" where
 "assoc [] x = None" |
 "assoc (p#ps) x =
    (let (a,b) = p in if a=x then Some b else assoc ps x)"
 
-text{*
+text\<open>
 Now we can define the lookup function for tries. It descends into the trie
 examining the letters of the search string one by one. As
 recursion on lists is simpler than on tries, let us express this as primitive
 recursion on the search string argument:
-*}
+\<close>
 
 primrec lookup :: "('a,'v)trie \<Rightarrow> 'a list \<Rightarrow> 'v option" where
 "lookup t [] = value t" |
@@ -47,21 +47,21 @@
                       None \<Rightarrow> None
                     | Some at \<Rightarrow> lookup at as)"
 
-text{*
+text\<open>
 As a first simple property we prove that looking up a string in the empty
 trie @{term"Trie None []"} always returns @{const None}. The proof merely
 distinguishes the two cases whether the search string is empty or not:
-*}
+\<close>
 
 lemma [simp]: "lookup (Trie None []) as = None"
 apply(case_tac as, simp_all)
 done
 
-text{*
+text\<open>
 Things begin to get interesting with the definition of an update function
 that adds a new (string, value) pair to a trie, overwriting the old value
 associated with that string:
-*}
+\<close>
 
 primrec update:: "('a,'v)trie \<Rightarrow> 'a list \<Rightarrow> 'v \<Rightarrow> ('a,'v)trie" where
 "update t []     v = Trie (Some v) (alist t)" |
@@ -70,7 +70,7 @@
                 None \<Rightarrow> Trie None [] | Some at \<Rightarrow> at)
     in Trie (value t) ((a,update tt as v) # alist t))"
 
-text{*\noindent
+text\<open>\noindent
 The base case is obvious. In the recursive case the subtrie
 @{term tt} associated with the first letter @{term a} is extracted,
 recursively updated, and then placed in front of the association list.
@@ -81,23 +81,23 @@
 Before we start on any proofs about @{const update} we tell the simplifier to
 expand all @{text let}s and to split all @{text case}-constructs over
 options:
-*}
+\<close>
 
 declare Let_def[simp] option.split[split]
 
-text{*\noindent
+text\<open>\noindent
 The reason becomes clear when looking (probably after a failed proof
 attempt) at the body of @{const update}: it contains both
 @{text let} and a case distinction over type @{text option}.
 
 Our main goal is to prove the correct interaction of @{const update} and
 @{const lookup}:
-*}