isabelle update_cartouches -c -t;
authorwenzelm
Thu, 26 May 2016 17:51:22 +0200
changeset 63167 0909deb8059b
parent 63166 143f58bb34f9
child 63168 466177e5736c
isabelle update_cartouches -c -t;
src/HOL/Algebra/AbelCoset.thy
src/HOL/Algebra/Bij.thy
src/HOL/Algebra/Congruence.thy
src/HOL/Algebra/Coset.thy
src/HOL/Algebra/Divisibility.thy
src/HOL/Algebra/FiniteProduct.thy
src/HOL/Algebra/Group.thy
src/HOL/Algebra/Ideal.thy
src/HOL/Algebra/IntRing.thy
src/HOL/Algebra/Lattice.thy
src/HOL/Algebra/QuotRing.thy
src/HOL/Algebra/Ring.thy
src/HOL/Algebra/RingHom.thy
src/HOL/Algebra/Sylow.thy
src/HOL/Algebra/UnivPoly.thy
src/HOL/Cardinals/Bounded_Set.thy
src/HOL/Cardinals/Cardinal_Arithmetic.thy
src/HOL/Cardinals/Cardinal_Order_Relation.thy
src/HOL/Cardinals/Cardinals.thy
src/HOL/Cardinals/Fun_More.thy
src/HOL/Cardinals/Order_Relation_More.thy
src/HOL/Cardinals/Order_Union.thy
src/HOL/Cardinals/Ordinal_Arithmetic.thy
src/HOL/Cardinals/Wellfounded_More.thy
src/HOL/Cardinals/Wellorder_Constructions.thy
src/HOL/Cardinals/Wellorder_Embedding.thy
src/HOL/Cardinals/Wellorder_Extension.thy
src/HOL/Cardinals/Wellorder_Relation.thy
src/HOL/Codegenerator_Test/Candidates.thy
src/HOL/Codegenerator_Test/Code_Test_GHC.thy
src/HOL/Codegenerator_Test/Code_Test_OCaml.thy
src/HOL/Codegenerator_Test/Code_Test_Scala.thy
src/HOL/Codegenerator_Test/Generate.thy
src/HOL/Codegenerator_Test/Generate_Binary_Nat.thy
src/HOL/Codegenerator_Test/Generate_Efficient_Datastructures.thy
src/HOL/Codegenerator_Test/Generate_Pretty_Char.thy
src/HOL/Codegenerator_Test/Generate_Target_Nat.thy
src/HOL/Corec_Examples/LFilter.thy
src/HOL/Datatype_Examples/Compat.thy
src/HOL/Datatype_Examples/Derivation_Trees/DTree.thy
src/HOL/Datatype_Examples/Derivation_Trees/Gram_Lang.thy
src/HOL/Datatype_Examples/Derivation_Trees/Parallel.thy
src/HOL/Datatype_Examples/Derivation_Trees/Prelim.thy
src/HOL/Datatype_Examples/Koenig.thy
src/HOL/Datatype_Examples/Lambda_Term.thy
src/HOL/Datatype_Examples/Lift_BNF.thy
src/HOL/Datatype_Examples/Misc_Codatatype.thy
src/HOL/Datatype_Examples/Misc_Datatype.thy
src/HOL/Datatype_Examples/Misc_Primcorec.thy
src/HOL/Datatype_Examples/Misc_Primrec.thy
src/HOL/Datatype_Examples/Process.thy
src/HOL/Datatype_Examples/Stream_Processor.thy
src/HOL/Datatype_Examples/TreeFI.thy
src/HOL/Datatype_Examples/TreeFsetI.thy
src/HOL/IMPP/Com.thy
src/HOL/IMPP/EvenOdd.thy
src/HOL/IMPP/Hoare.thy
src/HOL/IMPP/Misc.thy
src/HOL/IMPP/Natural.thy
src/HOL/IOA/Asig.thy
src/HOL/IOA/IOA.thy
src/HOL/IOA/Solve.thy
src/HOL/Imperative_HOL/Array.thy
src/HOL/Imperative_HOL/Heap.thy
src/HOL/Imperative_HOL/Heap_Monad.thy
src/HOL/Imperative_HOL/Imperative_HOL.thy
src/HOL/Imperative_HOL/Imperative_HOL_ex.thy
src/HOL/Imperative_HOL/Overview.thy
src/HOL/Imperative_HOL/Ref.thy
src/HOL/Imperative_HOL/ex/Imperative_Quicksort.thy
src/HOL/Imperative_HOL/ex/Imperative_Reverse.thy
src/HOL/Imperative_HOL/ex/Linked_Lists.thy
src/HOL/Imperative_HOL/ex/List_Sublist.thy
src/HOL/Imperative_HOL/ex/SatChecker.thy
src/HOL/Imperative_HOL/ex/Sorted_List.thy
src/HOL/Imperative_HOL/ex/Subarray.thy
src/HOL/Import/HOL_Light_Import.thy
src/HOL/Import/HOL_Light_Maps.thy
src/HOL/Import/Import_Setup.thy
src/HOL/Induct/ABexp.thy
src/HOL/Induct/Com.thy
src/HOL/Induct/Common_Patterns.thy
src/HOL/Induct/PropLog.thy
src/HOL/Induct/QuoDataType.thy
src/HOL/Induct/QuoNestedDataType.thy
src/HOL/Induct/Sigma_Algebra.thy
src/HOL/Matrix_LP/ComputeFloat.thy
src/HOL/Matrix_LP/ComputeHOL.thy
src/HOL/Matrix_LP/Matrix.thy
src/HOL/Metis_Examples/Abstraction.thy
src/HOL/Metis_Examples/Big_O.thy
src/HOL/Metis_Examples/Binary_Tree.thy
src/HOL/Metis_Examples/Clausification.thy
src/HOL/Metis_Examples/Message.thy
src/HOL/Metis_Examples/Proxies.thy
src/HOL/Metis_Examples/Sets.thy
src/HOL/Metis_Examples/Tarski.thy
src/HOL/Metis_Examples/Trans_Closure.thy
src/HOL/Metis_Examples/Type_Encodings.thy
src/HOL/Mirabelle/Mirabelle.thy
src/HOL/Mirabelle/Mirabelle_Test.thy
src/HOL/Mirabelle/ex/Ex.thy
src/HOL/Mutabelle/MutabelleExtra.thy
src/HOL/NanoJava/AxSem.thy
src/HOL/NanoJava/Decl.thy
src/HOL/NanoJava/Equivalence.thy
src/HOL/NanoJava/Example.thy
src/HOL/NanoJava/State.thy
src/HOL/NanoJava/Term.thy
src/HOL/NanoJava/TypeRel.thy
src/HOL/Nitpick_Examples/Core_Nits.thy
src/HOL/Nitpick_Examples/Datatype_Nits.thy
src/HOL/Nitpick_Examples/Hotel_Nits.thy
src/HOL/Nitpick_Examples/Induct_Nits.thy
src/HOL/Nitpick_Examples/Integer_Nits.thy
src/HOL/Nitpick_Examples/Manual_Nits.thy
src/HOL/Nitpick_Examples/Mini_Nits.thy
src/HOL/Nitpick_Examples/Mono_Nits.thy
src/HOL/Nitpick_Examples/Pattern_Nits.thy
src/HOL/Nitpick_Examples/Record_Nits.thy
src/HOL/Nitpick_Examples/Refute_Nits.thy
src/HOL/Nitpick_Examples/Special_Nits.thy
src/HOL/Nitpick_Examples/Tests_Nits.thy
src/HOL/Nitpick_Examples/Typedef_Nits.thy
src/HOL/Nominal/Examples/CK_Machine.thy
src/HOL/Nominal/Examples/CR.thy
src/HOL/Nominal/Examples/CR_Takahashi.thy
src/HOL/Nominal/Examples/Class1.thy
src/HOL/Nominal/Examples/Class2.thy
src/HOL/Nominal/Examples/Class3.thy
src/HOL/Nominal/Examples/Compile.thy
src/HOL/Nominal/Examples/Contexts.thy
src/HOL/Nominal/Examples/Crary.thy
src/HOL/Nominal/Examples/Fsub.thy
src/HOL/Nominal/Examples/Height.thy
src/HOL/Nominal/Examples/Lam_Funs.thy
src/HOL/Nominal/Examples/Lambda_mu.thy
src/HOL/Nominal/Examples/LocalWeakening.thy
src/HOL/Nominal/Examples/Pattern.thy
src/HOL/Nominal/Examples/SN.thy
src/HOL/Nominal/Examples/SOS.thy
src/HOL/Nominal/Examples/Standardization.thy
src/HOL/Nominal/Examples/Support.thy
src/HOL/Nominal/Examples/Type_Preservation.thy
src/HOL/Nominal/Examples/VC_Condition.thy
src/HOL/Nominal/Examples/W.thy
src/HOL/Nominal/Examples/Weakening.thy
src/HOL/Nominal/Nominal.thy
src/HOL/Number_Theory/Cong.thy
src/HOL/Number_Theory/Euclidean_Algorithm.thy
src/HOL/Number_Theory/Fib.thy
src/HOL/Number_Theory/Residues.thy
src/HOL/Old_Number_Theory/Chinese.thy
src/HOL/Old_Number_Theory/EulerFermat.thy
src/HOL/Old_Number_Theory/Fib.thy
src/HOL/Old_Number_Theory/IntFact.thy
src/HOL/Old_Number_Theory/IntPrimes.thy
src/HOL/Old_Number_Theory/Legacy_GCD.thy
src/HOL/Old_Number_Theory/WilsonBij.thy
src/HOL/Predicate_Compile_Examples/Code_Prolog_Examples.thy
src/HOL/Predicate_Compile_Examples/Context_Free_Grammar_Example.thy
src/HOL/Predicate_Compile_Examples/Examples.thy
src/HOL/Predicate_Compile_Examples/Hotel_Example.thy
src/HOL/Predicate_Compile_Examples/Hotel_Example_Prolog.thy
src/HOL/Predicate_Compile_Examples/Hotel_Example_Small_Generator.thy
src/HOL/Predicate_Compile_Examples/IMP_1.thy
src/HOL/Predicate_Compile_Examples/IMP_2.thy
src/HOL/Predicate_Compile_Examples/IMP_3.thy
src/HOL/Predicate_Compile_Examples/IMP_4.thy
src/HOL/Predicate_Compile_Examples/Lambda_Example.thy
src/HOL/Predicate_Compile_Examples/List_Examples.thy
src/HOL/Predicate_Compile_Examples/Predicate_Compile_Quickcheck_Examples.thy
src/HOL/Predicate_Compile_Examples/Predicate_Compile_Tests.thy
src/HOL/Predicate_Compile_Examples/Reg_Exp_Example.thy
src/HOL/Predicate_Compile_Examples/Specialisation_Examples.thy
src/HOL/Probability/Borel_Space.thy
src/HOL/Probability/Characteristic_Functions.thy
src/HOL/Probability/Distribution_Functions.thy
src/HOL/Probability/Levy.thy
src/HOL/Probability/Nonnegative_Lebesgue_Integration.thy
src/HOL/Probability/Sigma_Algebra.thy
src/HOL/Probability/Sinc_Integral.thy
src/HOL/Prolog/Func.thy
src/HOL/Prolog/HOHH.thy
src/HOL/Prolog/Test.thy
src/HOL/Prolog/Type.thy
src/HOL/Quickcheck_Examples/Completeness.thy
src/HOL/Quickcheck_Examples/Hotel_Example.thy
src/HOL/Quickcheck_Examples/Quickcheck_Examples.thy
src/HOL/Quickcheck_Examples/Quickcheck_Interfaces.thy
src/HOL/Quickcheck_Examples/Quickcheck_Lattice_Examples.thy
src/HOL/Quickcheck_Examples/Quickcheck_Narrowing_Examples.thy
src/HOL/Quotient_Examples/DList.thy
src/HOL/Quotient_Examples/FSet.thy
src/HOL/Quotient_Examples/Int_Pow.thy
src/HOL/Quotient_Examples/Lift_DList.thy
src/HOL/Quotient_Examples/Lift_FSet.thy
src/HOL/Quotient_Examples/Lift_Fun.thy
src/HOL/Quotient_Examples/Lift_Set.thy
src/HOL/Quotient_Examples/Quotient_Int.thy
src/HOL/Quotient_Examples/Quotient_Message.thy
src/HOL/SET_Protocol/Cardholder_Registration.thy
src/HOL/SET_Protocol/Event_SET.thy
src/HOL/SET_Protocol/Merchant_Registration.thy
src/HOL/SET_Protocol/Message_SET.thy
src/HOL/SET_Protocol/Public_SET.thy
src/HOL/SET_Protocol/Purchase.thy
src/HOL/SMT_Examples/Boogie.thy
src/HOL/SMT_Examples/SMT_Examples.thy
src/HOL/SMT_Examples/SMT_Tests.thy
src/HOL/SMT_Examples/SMT_Word_Examples.thy
src/HOL/SPARK/Examples/Gcd/Greatest_Common_Divisor.thy
src/HOL/SPARK/Examples/Liseq/Longest_Increasing_Subsequence.thy
src/HOL/SPARK/Examples/RIPEMD-160/Hash.thy
src/HOL/SPARK/Examples/RIPEMD-160/R_L.thy
src/HOL/SPARK/Examples/RIPEMD-160/R_R.thy
src/HOL/SPARK/Examples/RIPEMD-160/Round.thy
src/HOL/SPARK/Examples/RIPEMD-160/S_L.thy
src/HOL/SPARK/Examples/RIPEMD-160/S_R.thy
src/HOL/SPARK/Examples/Sqrt/Sqrt.thy
src/HOL/SPARK/Manual/Complex_Types.thy
src/HOL/SPARK/Manual/Example_Verification.thy
src/HOL/SPARK/Manual/Reference.thy
src/HOL/SPARK/Manual/Simple_Greatest_Common_Divisor.thy
src/HOL/SPARK/Manual/VC_Principles.thy
src/HOL/SPARK/SPARK.thy
src/HOL/SPARK/SPARK_Setup.thy
src/HOL/Statespace/DistinctTreeProver.thy
src/HOL/Statespace/StateFun.thy
src/HOL/Statespace/StateSpaceEx.thy
src/HOL/Statespace/StateSpaceLocale.thy
src/HOL/Statespace/StateSpaceSyntax.thy
src/HOL/TPTP/ATP_Problem_Import.thy
src/HOL/TPTP/ATP_Theory_Export.thy
src/HOL/TPTP/MaSh_Eval.thy
src/HOL/TPTP/MaSh_Export.thy
src/HOL/TPTP/MaSh_Export_Base.thy
src/HOL/TPTP/TPTP_Interpret_Test.thy
src/HOL/TPTP/TPTP_Parser.thy
src/HOL/TPTP/TPTP_Parser_Example.thy
src/HOL/TPTP/TPTP_Parser_Test.thy
src/HOL/TPTP/TPTP_Proof_Reconstruction.thy
src/HOL/TPTP/TPTP_Proof_Reconstruction_Test.thy
src/HOL/TPTP/TPTP_Proof_Reconstruction_Test_Units.thy
src/HOL/TPTP/TPTP_Test.thy
src/HOL/ZF/HOLZF.thy
--- a/src/HOL/Algebra/AbelCoset.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/AbelCoset.thy	Thu May 26 17:51:22 2016 +0200
@@ -10,7 +10,7 @@
 
 subsubsection \<open>Definitions\<close>
 
-text \<open>Hiding @{text "<+>"} from @{theory Sum_Type} until I come
+text \<open>Hiding \<open><+>\<close> from @{theory Sum_Type} until I come
   up with better syntax here\<close>
 
 no_notation Sum_Type.Plus (infixr "<+>" 65)
@@ -41,12 +41,12 @@
 
 definition
   A_FactGroup :: "[('a,'b) ring_scheme, 'a set] \<Rightarrow> ('a set) monoid" (infixl "A'_Mod" 65)
-    --\<open>Actually defined for groups rather than monoids\<close>
+    \<comment>\<open>Actually defined for groups rather than monoids\<close>
   where "A_FactGroup G H = FactGroup \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> H"
 
 definition
   a_kernel :: "('a, 'm) ring_scheme \<Rightarrow> ('b, 'n) ring_scheme \<Rightarrow>  ('a \<Rightarrow> 'b) \<Rightarrow> 'a set"
-    --\<open>the kernel of a homomorphism (additive)\<close>
+    \<comment>\<open>the kernel of a homomorphism (additive)\<close>
   where "a_kernel G H h =
     kernel \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>
       \<lparr>carrier = carrier H, mult = add H, one = zero H\<rparr> h"
@@ -218,7 +218,7 @@
 
 subsubsection \<open>Additive subgroups are normal\<close>
 
-text \<open>Every subgroup of an @{text "abelian_group"} is normal\<close>
+text \<open>Every subgroup of an \<open>abelian_group\<close> is normal\<close>
 
 locale abelian_subgroup = additive_subgroup + abelian_group G +
   assumes a_normal: "normal H \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
@@ -378,7 +378,7 @@
 
 lemma (in abelian_subgroup) rcosets_add_eq:
   "M \<in> a_rcosets H \<Longrightarrow> H <+> M = M"
-  -- \<open>generalizes @{text subgroup_mult_id}\<close>
+  \<comment> \<open>generalizes \<open>subgroup_mult_id\<close>\<close>
 by (rule normal.rcosets_mult_eq [OF a_normal,
     folded set_add_def A_RCOSETS_def, simplified monoid_record_simps])
 
@@ -687,7 +687,7 @@
 by (rule subgroup.rcos_module [OF a_subgroup a_group,
     folded a_r_coset_def a_inv_def, simplified monoid_record_simps])
 
---"variant"
+\<comment>"variant"
 lemma (in abelian_subgroup) a_rcos_module_minus:
   assumes "ring G"
   assumes carr: "x \<in> carrier G" "x' \<in> carrier G"
--- a/src/HOL/Algebra/Bij.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/Bij.thy	Thu May 26 17:51:22 2016 +0200
@@ -10,7 +10,7 @@
 
 definition
   Bij :: "'a set \<Rightarrow> ('a \<Rightarrow> 'a) set"
-    --\<open>Only extensional functions, since otherwise we get too many.\<close>
+    \<comment>\<open>Only extensional functions, since otherwise we get too many.\<close>
    where "Bij S = extensional S \<inter> {f. bij_betw f S S}"
 
 definition
--- a/src/HOL/Algebra/Congruence.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/Congruence.thy	Thu May 26 17:51:22 2016 +0200
@@ -15,7 +15,7 @@
   carrier :: "'a set"
 
 
-subsection \<open>Structure with Carrier and Equivalence Relation @{text eq}\<close>
+subsection \<open>Structure with Carrier and Equivalence Relation \<open>eq\<close>\<close>
 
 record 'a eq_object = "'a partial_object" +
   eq :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infixl ".=\<index>" 50)
--- a/src/HOL/Algebra/Coset.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/Coset.thy	Thu May 26 17:51:22 2016 +0200
@@ -85,7 +85,7 @@
 
 lemma (in group) coset_join2:
      "\<lbrakk>x \<in> carrier G;  subgroup H G;  x\<in>H\<rbrakk> \<Longrightarrow> H #> x = H"
-  --\<open>Alternative proof is to put @{term "x=\<one>"} in @{text repr_independence}.\<close>
+  \<comment>\<open>Alternative proof is to put @{term "x=\<one>"} in \<open>repr_independence\<close>.\<close>
 by (force simp add: subgroup.m_closed r_coset_def solve_equation)
 
 lemma (in monoid) r_coset_subset_G:
@@ -171,7 +171,7 @@
   qed
 qed
 
-text \<open>Step one for lemma @{text "rcos_module"}\<close>
+text \<open>Step one for lemma \<open>rcos_module\<close>\<close>
 lemma (in subgroup) rcos_module_imp:
   assumes "group G"
   assumes xcarr: "x \<in> carrier G"
@@ -211,7 +211,7 @@
       show "x' \<otimes> (inv x) \<in> H" by simp
 qed
 
-text \<open>Step two for lemma @{text "rcos_module"}\<close>
+text \<open>Step two for lemma \<open>rcos_module\<close>\<close>
 lemma (in subgroup) rcos_module_rev:
   assumes "group G"
   assumes carr: "x \<in> carrier G" "x' \<in> carrier G"
@@ -524,7 +524,7 @@
 done
 
 
-subsubsection \<open>Set of Inverses of an @{text r_coset}.\<close>
+subsubsection \<open>Set of Inverses of an \<open>r_coset\<close>.\<close>
 
 lemma (in normal) rcos_inv:
   assumes x:     "x \<in> carrier G"
@@ -549,7 +549,7 @@
 qed
 
 
-subsubsection \<open>Theorems for @{text "<#>"} with @{text "#>"} or @{text "<#"}.\<close>
+subsubsection \<open>Theorems for \<open><#>\<close> with \<open>#>\<close> or \<open><#\<close>.\<close>
 
 lemma (in group) setmult_rcos_assoc:
      "\<lbrakk>H \<subseteq> carrier G; K \<subseteq> carrier G; x \<in> carrier G\<rbrakk>
@@ -584,7 +584,7 @@
 by (simp add: rcos_mult_step1 rcos_mult_step2 rcos_mult_step3)
 
 lemma (in normal) rcosets_mult_eq: "M \<in> rcosets H \<Longrightarrow> H <#> M = M"
-  -- \<open>generalizes @{text subgroup_mult_id}\<close>
+  \<comment> \<open>generalizes \<open>subgroup_mult_id\<close>\<close>
   by (auto simp add: RCOSETS_def subset
         setmult_rcos_assoc subgroup_mult_id normal.axioms normal_axioms)
 
@@ -628,7 +628,7 @@
   qed
 qed
 
-text\<open>Equivalence classes of @{text rcong} correspond to left cosets.
+text\<open>Equivalence classes of \<open>rcong\<close> correspond to left cosets.
   Was there a mistake in the definitions? I'd have expected them to
   correspond to right cosets.\<close>
 
@@ -679,7 +679,7 @@
 qed
 
 
-subsection \<open>Further lemmas for @{text "r_congruent"}\<close>
+subsection \<open>Further lemmas for \<open>r_congruent\<close>\<close>
 
 text \<open>The relation is a congruence\<close>
 
@@ -780,7 +780,7 @@
 apply (simp add: r_coset_subset_G [THEN finite_subset])
 done
 
-text\<open>The next two lemmas support the proof of @{text card_cosets_equal}.\<close>
+text\<open>The next two lemmas support the proof of \<open>card_cosets_equal\<close>.\<close>
 lemma (in group) inj_on_f:
     "\<lbrakk>H \<subseteq> carrier G;  a \<in> carrier G\<rbrakk> \<Longrightarrow> inj_on (\<lambda>y. y \<otimes> inv a) (H #> a)"
 apply (rule inj_onI)
@@ -831,7 +831,7 @@
 
 definition
   FactGroup :: "[('a,'b) monoid_scheme, 'a set] \<Rightarrow> ('a set) monoid" (infixl "Mod" 65)
-    --\<open>Actually defined for groups rather than monoids\<close>
+    \<comment>\<open>Actually defined for groups rather than monoids\<close>
    where "FactGroup G H = \<lparr>carrier = rcosets\<^bsub>G\<^esub> H, mult = set_mult G, one = H\<rparr>"
 
 lemma (in normal) setmult_closed:
@@ -897,7 +897,7 @@
 
 definition
   kernel :: "('a, 'm) monoid_scheme \<Rightarrow> ('b, 'n) monoid_scheme \<Rightarrow>  ('a \<Rightarrow> 'b) \<Rightarrow> 'a set"
-    --\<open>the kernel of a homomorphism\<close>
+    \<comment>\<open>the kernel of a homomorphism\<close>
   where "kernel G H h = {x. x \<in> carrier G & h x = \<one>\<^bsub>H\<^esub>}"
 
 lemma (in group_hom) subgroup_kernel: "subgroup (kernel G H h) G"
--- a/src/HOL/Algebra/Divisibility.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/Divisibility.thy	Thu May 26 17:51:22 2016 +0200
@@ -2340,7 +2340,7 @@
 qed
 
 
---"A version using @{const factors}, more complicated"
+\<comment>"A version using @{const factors}, more complicated"
 lemma (in factorial_monoid) factors_irreducible_is_prime:
   assumes pirr: "irreducible G p"
     and pcarr: "p \<in> carrier G"
--- a/src/HOL/Algebra/FiniteProduct.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/FiniteProduct.thy	Thu May 26 17:51:22 2016 +0200
@@ -12,10 +12,10 @@
 
 subsubsection \<open>Inductive Definition of a Relation for Products over Sets\<close>
 
-text \<open>Instantiation of locale @{text LC} of theory @{text Finite_Set} is not
+text \<open>Instantiation of locale \<open>LC\<close> of theory \<open>Finite_Set\<close> is not
   possible, because here we have explicit typing rules like 
-  @{text "x \<in> carrier G"}.  We introduce an explicit argument for the domain
-  @{text D}.\<close>
+  \<open>x \<in> carrier G\<close>.  We introduce an explicit argument for the domain
+  \<open>D\<close>.\<close>
 
 inductive_set
   foldSetD :: "['a set, 'b => 'a => 'a, 'a] => ('b set * 'a) set"
@@ -111,7 +111,7 @@
   apply (erule foldSetD.cases)
    apply blast
   apply clarify
-  txt \<open>force simplification of @{text "card A < card (insert ...)"}.\<close>
+  txt \<open>force simplification of \<open>card A < card (insert ...)\<close>.\<close>
   apply (erule rev_mp)
   apply (simp add: less_Suc_eq_le)
   apply (rule impI)
@@ -221,7 +221,7 @@
     ==> foldD D f e (A Un B) = foldD D f (foldD D f e B) A"
   by (simp add: foldD_nest_Un_Int)
 
--- \<open>Delete rules to do with @{text foldSetD} relation.\<close>
+\<comment> \<open>Delete rules to do with \<open>foldSetD\<close> relation.\<close>
 
 declare foldSetD_imp_finite [simp del]
   empty_foldSetDE [rule del]
@@ -233,8 +233,8 @@
 text \<open>Commutative Monoids\<close>
 
 text \<open>
-  We enter a more restrictive context, with @{text "f :: 'a => 'a => 'a"}
-  instead of @{text "'b => 'a => 'a"}.
+  We enter a more restrictive context, with \<open>f :: 'a => 'a => 'a\<close>
+  instead of \<open>'b => 'a => 'a\<close>.
 \<close>
 
 locale ACeD =
@@ -299,7 +299,7 @@
       ("(3\<Otimes>__\<in>_. _)" [1000, 0, 51, 10] 10)
 translations
   "\<Otimes>\<^bsub>G\<^esub>i\<in>A. b" \<rightleftharpoons> "CONST finprod G (%i. b) A"
-  -- \<open>Beware of argument permutation!\<close>
+  \<comment> \<open>Beware of argument permutation!\<close>
 
 lemma (in comm_monoid) finprod_empty [simp]: 
   "finprod G f {} = \<one>"
@@ -367,7 +367,7 @@
   "[| finite A; finite B; g \<in> A \<rightarrow> carrier G; g \<in> B \<rightarrow> carrier G |] ==>
      finprod G g (A Un B) \<otimes> finprod G g (A Int B) =
      finprod G g A \<otimes> finprod G g B"
--- \<open>The reversed orientation looks more natural, but LOOPS as a simprule!\<close>
+\<comment> \<open>The reversed orientation looks more natural, but LOOPS as a simprule!\<close>
 proof (induct set: finite)
   case empty then show ?case by simp
 next
@@ -451,7 +451,7 @@
   by (rule finprod_cong') (auto simp add: simp_implies_def)
 
 text \<open>Usually, if this rule causes a failed congruence proof error,
-  the reason is that the premise @{text "g \<in> B \<rightarrow> carrier G"} cannot be shown.
+  the reason is that the premise \<open>g \<in> B \<rightarrow> carrier G\<close> cannot be shown.
   Adding @{thm [source] Pi_def} to the simpset is often useful.
   For this reason, @{thm [source] finprod_cong}
   is not added to the simpset by default.
--- a/src/HOL/Algebra/Group.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/Group.thy	Thu May 26 17:51:22 2016 +0200
@@ -26,7 +26,7 @@
 
 definition
   Units :: "_ => 'a set"
-  --\<open>The set of invertible elements\<close>
+  \<comment>\<open>The set of invertible elements\<close>
   where "Units G = {y. y \<in> carrier G & (\<exists>x \<in> carrier G. x \<otimes>\<^bsub>G\<^esub> y = \<one>\<^bsub>G\<^esub> & y \<otimes>\<^bsub>G\<^esub> x = \<one>\<^bsub>G\<^esub>)}"
 
 consts
@@ -98,7 +98,7 @@
   moreover from x y xinv yinv have "x \<otimes> (y \<otimes> y') \<otimes> x' = \<one>" by simp
   moreover note x y
   ultimately show ?thesis unfolding Units_def
-    -- "Must avoid premature use of @{text hyp_subst_tac}."
+    \<comment> "Must avoid premature use of \<open>hyp_subst_tac\<close>."
     apply (rule_tac CollectI)
     apply (rule)
     apply (fast)
@@ -486,8 +486,8 @@
 
 text \<open>
   Since @{term H} is nonempty, it contains some element @{term x}.  Since
-  it is closed under inverse, it contains @{text "inv x"}.  Since
-  it is closed under product, it contains @{text "x \<otimes> inv x = \<one>"}.
+  it is closed under inverse, it contains \<open>inv x\<close>.  Since
+  it is closed under product, it contains \<open>x \<otimes> inv x = \<one>\<close>.
 \<close>
 
 lemma (in group) one_in_subset:
--- a/src/HOL/Algebra/Ideal.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/Ideal.thy	Thu May 26 17:51:22 2016 +0200
@@ -592,14 +592,14 @@
 
 subsection \<open>Properties of Principal Ideals\<close>
 
-text \<open>@{text "\<zero>"} generates the zero ideal\<close>
+text \<open>\<open>\<zero>\<close> generates the zero ideal\<close>
 lemma (in ring) zero_genideal: "Idl {\<zero>} = {\<zero>}"
   apply rule
   apply (simp add: genideal_minimal zeroideal)
   apply (fast intro!: genideal_self)
   done
 
-text \<open>@{text "\<one>"} generates the unit ideal\<close>
+text \<open>\<open>\<one>\<close> generates the unit ideal\<close>
 lemma (in ring) one_genideal: "Idl {\<one>} = carrier R"
 proof -
   have "\<one> \<in> Idl {\<one>}"
@@ -684,7 +684,7 @@
   then show thesis using primeidealCD [OF R.is_cring notprime] by blast
 qed
 
-text \<open>If @{text "{\<zero>}"} is a prime ideal of a commutative ring, the ring is a domain\<close>
+text \<open>If \<open>{\<zero>}\<close> is a prime ideal of a commutative ring, the ring is a domain\<close>
 lemma (in cring) zeroprimeideal_domainI:
   assumes pi: "primeideal {\<zero>} R"
   shows "domain R"
@@ -828,7 +828,7 @@
 
 subsection \<open>Derived Theorems\<close>
 
---"A non-zero cring that has only the two trivial ideals is a field"
+\<comment>"A non-zero cring that has only the two trivial ideals is a field"
 lemma (in cring) trivialideals_fieldI:
   assumes carrnzero: "carrier R \<noteq> {\<zero>}"
     and haveideals: "{I. ideal I R} = {{\<zero>}, carrier R}"
@@ -921,7 +921,7 @@
   qed
 qed (simp add: zeroideal oneideal)
 
---"Jacobson Theorem 2.2"
+\<comment>"Jacobson Theorem 2.2"
 lemma (in cring) trivialideals_eq_field:
   assumes carrnzero: "carrier R \<noteq> {\<zero>}"
   shows "({I. ideal I R} = {{\<zero>}, carrier R}) = field R"
--- a/src/HOL/Algebra/IntRing.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/IntRing.thy	Thu May 26 17:51:22 2016 +0200
@@ -20,7 +20,7 @@
 done
 
 
-subsection \<open>@{text "\<Z>"}: The Set of Integers as Algebraic Structure\<close>
+subsection \<open>\<open>\<Z>\<close>: The Set of Integers as Algebraic Structure\<close>
 
 abbreviation int_ring :: "int ring" ("\<Z>")
   where "int_ring \<equiv> \<lparr>carrier = UNIV, mult = op *, one = 1, zero = 0, add = op +\<rparr>"
@@ -59,14 +59,14 @@
     and "one \<Z> = 1"
     and "pow \<Z> x n = x^n"
 proof -
-  -- "Specification"
+  \<comment> "Specification"
   show "monoid \<Z>" by standard auto
   then interpret int: monoid \<Z> .
 
-  -- "Carrier"
+  \<comment> "Carrier"
   show "carrier \<Z> = UNIV" by simp
 
-  -- "Operations"
+  \<comment> "Operations"
   { fix x y show "mult \<Z> x y = x * y" by simp }
   show "one \<Z> = 1" by simp
   show "pow \<Z> x n = x^n" by (induct n) simp_all
@@ -75,11 +75,11 @@
 interpretation int: comm_monoid \<Z>
   rewrites "finprod \<Z> f A = setprod f A"
 proof -
-  -- "Specification"
+  \<comment> "Specification"
   show "comm_monoid \<Z>" by standard auto
   then interpret int: comm_monoid \<Z> .
 
-  -- "Operations"
+  \<comment> "Operations"
   { fix x y have "mult \<Z> x y = x * y" by simp }
   note mult = this
   have one: "one \<Z> = 1" by simp
@@ -93,14 +93,14 @@
     and int_add_eq: "add \<Z> x y = x + y"
     and int_finsum_eq: "finsum \<Z> f A = setsum f A"
 proof -
-  -- "Specification"
+  \<comment> "Specification"
   show "abelian_monoid \<Z>" by standard auto
   then interpret int: abelian_monoid \<Z> .
 
-  -- "Carrier"
+  \<comment> "Carrier"
   show "carrier \<Z> = UNIV" by simp
 
-  -- "Operations"
+  \<comment> "Operations"
   { fix x y show "add \<Z> x y = x + y" by simp }
   note add = this
   show zero: "zero \<Z> = 0"
@@ -121,7 +121,7 @@
     and int_a_inv_eq: "a_inv \<Z> x = - x"
     and int_a_minus_eq: "a_minus \<Z> x y = x - y"
 proof -
-  -- "Specification"
+  \<comment> "Specification"
   show "abelian_group \<Z>"
   proof (rule abelian_groupI)
     fix x
@@ -130,7 +130,7 @@
       by simp arith
   qed auto
   then interpret int: abelian_group \<Z> .
-  -- "Operations"
+  \<comment> "Operations"
   { fix x y have "add \<Z> x y = x + y" by simp }
   note add = this
   have zero: "zero \<Z> = 0" by simp
@@ -218,7 +218,7 @@
   by standard clarsimp
 
 
-subsection \<open>Generated Ideals of @{text "\<Z>"}\<close>
+subsection \<open>Generated Ideals of \<open>\<Z>\<close>\<close>
 
 lemma int_Idl: "Idl\<^bsub>\<Z>\<^esub> {a} = {x * a | x. True}"
   apply (subst int.cgenideal_eq_genideal[symmetric]) apply simp
--- a/src/HOL/Algebra/Lattice.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/Lattice.thy	Thu May 26 17:51:22 2016 +0200
@@ -507,7 +507,7 @@
   unfolding sup_def
   by (rule someI2) (auto intro: sup_of_singletonI)
 
-text \<open>Condition on @{text A}: supremum exists.\<close>
+text \<open>Condition on \<open>A\<close>: supremum exists.\<close>
 
 lemma (in weak_upper_semilattice) sup_insertI:
   "[| !!s. least L s (Upper L (insert x A)) ==> P s;
@@ -638,7 +638,7 @@
   assumes L: "x \<in> carrier L"  "y \<in> carrier L"  "z \<in> carrier L"
   shows "x \<squnion> (y \<squnion> z) .= \<Squnion>{x, y, z}"
 proof (rule finite_sup_insertI)
-  -- \<open>The textbook argument in Jacobson I, p 457\<close>
+  \<comment> \<open>The textbook argument in Jacobson I, p 457\<close>
   fix s
   assume sup: "least L s (Upper L {x, y, z})"
   show "x \<squnion> (y \<squnion> z) .= s"
@@ -652,7 +652,7 @@
   qed (simp_all add: L least_closed [OF sup])
 qed (simp_all add: L)
 
-text \<open>Commutativity holds for @{text "="}.\<close>
+text \<open>Commutativity holds for \<open>=\<close>.\<close>
 
 lemma join_comm:
   fixes L (structure)
@@ -747,7 +747,7 @@
   unfolding inf_def
   by (rule someI2) (auto intro: inf_of_singletonI)
 
-text \<open>Condition on @{text A}: infimum exists.\<close>
+text \<open>Condition on \<open>A\<close>: infimum exists.\<close>
 
 lemma (in weak_lower_semilattice) inf_insertI:
   "[| !!i. greatest L i (Lower L (insert x A)) ==> P i;
@@ -1090,7 +1090,7 @@
 (* TODO: prove dual version *)
 
 
-subsection \<open>Orders and Lattices where @{text eq} is the Equality\<close>
+subsection \<open>Orders and Lattices where \<open>eq\<close> is the Equality\<close>
 
 locale partial_order = weak_partial_order +
   assumes eq_is_equal: "op .= = op ="
@@ -1172,7 +1172,7 @@
   "x \<in> carrier L ==> \<Sqinter>{x} = x"
   using weak_inf_of_singleton unfolding eq_is_equal .
 
-text \<open>Condition on @{text A}: infimum exists.\<close>
+text \<open>Condition on \<open>A\<close>: infimum exists.\<close>
 
 lemma (in lower_semilattice) meet_assoc_lemma:
   assumes L: "x \<in> carrier L"  "y \<in> carrier L"  "z \<in> carrier L"
--- a/src/HOL/Algebra/QuotRing.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/QuotRing.thy	Thu May 26 17:51:22 2016 +0200
@@ -84,28 +84,28 @@
 text \<open>The quotient is a ring\<close>
 lemma (in ideal) quotient_is_ring: "ring (R Quot I)"
 apply (rule ringI)
-   --\<open>abelian group\<close>
+   \<comment>\<open>abelian group\<close>
    apply (rule comm_group_abelian_groupI)
    apply (simp add: FactRing_def)
    apply (rule a_factorgroup_is_comm_group[unfolded A_FactGroup_def'])
-  --\<open>mult monoid\<close>
+  \<comment>\<open>mult monoid\<close>
   apply (rule monoidI)
       apply (simp_all add: FactRing_def A_RCOSETS_def RCOSETS_def
              a_r_coset_def[symmetric])
-      --\<open>mult closed\<close>
+      \<comment>\<open>mult closed\<close>
       apply (clarify)
       apply (simp add: rcoset_mult_add, fast)
-     --\<open>mult @{text one_closed}\<close>
+     \<comment>\<open>mult \<open>one_closed\<close>\<close>
      apply force
-    --\<open>mult assoc\<close>
+    \<comment>\<open>mult assoc\<close>
     apply clarify
     apply (simp add: rcoset_mult_add m_assoc)
-   --\<open>mult one\<close>
+   \<comment>\<open>mult one\<close>
    apply clarify
    apply (simp add: rcoset_mult_add)
   apply clarify
   apply (simp add: rcoset_mult_add)
- --\<open>distr\<close>
+ \<comment>\<open>distr\<close>
  apply clarify
  apply (simp add: rcoset_mult_add a_rcos_sum l_distr)
 apply clarify
@@ -225,7 +225,7 @@
      apply (simp add: FactRing_def A_RCOSETS_defs a_r_coset_def[symmetric], clarsimp)
      apply (simp add: rcoset_mult_add) defer 1
   proof (rule ccontr, simp)
-    --\<open>Quotient is not empty\<close>
+    \<comment>\<open>Quotient is not empty\<close>
     assume "\<zero>\<^bsub>R Quot I\<^esub> = \<one>\<^bsub>R Quot I\<^esub>"
     then have II1: "I = I +> \<one>" by (simp add: FactRing_def)
     from a_rcos_self[OF one_closed] have "\<one> \<in> I"
@@ -233,11 +233,11 @@
     then have "I = carrier R" by (rule one_imp_carrier)
     with I_notcarr show False by simp
   next
-    --\<open>Existence of Inverse\<close>
+    \<comment>\<open>Existence of Inverse\<close>
     fix a
     assume IanI: "I +> a \<noteq> I" and acarr: "a \<in> carrier R"
 
-    --\<open>Helper ideal @{text "J"}\<close>
+    \<comment>\<open>Helper ideal \<open>J\<close>\<close>
     define J :: "'a set" where "J = (carrier R #> a) <+> I"
     have idealJ: "ideal J R"
       apply (unfold J_def, rule add_ideals)
@@ -245,7 +245,7 @@
       apply (rule is_ideal)
       done
 
-    --\<open>Showing @{term "J"} not smaller than @{term "I"}\<close>
+    \<comment>\<open>Showing @{term "J"} not smaller than @{term "I"}\<close>
     have IinJ: "I \<subseteq> J"
     proof (rule, simp add: J_def r_coset_def set_add_defs)
       fix x
@@ -256,7 +256,7 @@
       with Zcarr and xI show "\<exists>xa\<in>carrier R. \<exists>k\<in>I. x = xa \<otimes> a \<oplus> k" by fast
     qed
 
-    --\<open>Showing @{term "J \<noteq> I"}\<close>
+    \<comment>\<open>Showing @{term "J \<noteq> I"}\<close>
     have anI: "a \<notin> I"
     proof (rule ccontr, simp)
       assume "a \<in> I"
@@ -274,7 +274,7 @@
 
     from aJ and anI have JnI: "J \<noteq> I" by fast
 
-    --\<open>Deducing @{term "J = carrier R"} because @{term "I"} is maximal\<close>
+    \<comment>\<open>Deducing @{term "J = carrier R"} because @{term "I"} is maximal\<close>
     from idealJ and IinJ have "J = I \<or> J = carrier R"
     proof (rule I_maximal, unfold J_def)
       have "carrier R #> a \<subseteq> carrier R"
@@ -285,7 +285,7 @@
 
     with JnI have Jcarr: "J = carrier R" by simp
 
-    --\<open>Calculating an inverse for @{term "a"}\<close>
+    \<comment>\<open>Calculating an inverse for @{term "a"}\<close>
     from one_closed[folded Jcarr]
     have "\<exists>r\<in>carrier R. \<exists>i\<in>I. \<one> = r \<otimes> a \<oplus> i"
       by (simp add: J_def r_coset_def set_add_defs)
@@ -294,7 +294,7 @@
     from one and rcarr and acarr and iI[THEN a_Hcarr]
     have rai1: "a \<otimes> r = \<ominus>i \<oplus> \<one>" by algebra
 
-    --\<open>Lifting to cosets\<close>
+    \<comment>\<open>Lifting to cosets\<close>
     from iI have "\<ominus>i \<oplus> \<one> \<in> I +> \<one>"
       by (intro a_rcosI, simp, intro a_subset, simp)
     with rai1 have "a \<otimes> r \<in> I +> \<one>" by simp
--- a/src/HOL/Algebra/Ring.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/Ring.thy	Thu May 26 17:51:22 2016 +0200
@@ -39,7 +39,7 @@
       ("(3\<Oplus>__\<in>_. _)" [1000, 0, 51, 10] 10)
 translations
   "\<Oplus>\<^bsub>G\<^esub>i\<in>A. b" \<rightleftharpoons> "CONST finsum G (%i. b) A"
-  -- \<open>Beware of argument permutation!\<close>
+  \<comment> \<open>Beware of argument permutation!\<close>
 
 
 locale abelian_group = abelian_monoid +
@@ -141,7 +141,7 @@
 
 lemmas finsum_cong = add.finprod_cong
 text \<open>Usually, if this rule causes a failed congruence proof error,
-   the reason is that the premise @{text "g \<in> B \<rightarrow> carrier G"} cannot be shown.
+   the reason is that the premise \<open>g \<in> B \<rightarrow> carrier G\<close> cannot be shown.
    Adding @{thm [source] Pi_def} to the simpset is often useful.\<close>
 
 lemmas finsum_reindex = add.finprod_reindex
@@ -206,7 +206,7 @@
 
 lemmas (in abelian_group) minus_add = add.inv_mult
  
-text \<open>Derive an @{text "abelian_group"} from a @{text "comm_group"}\<close>
+text \<open>Derive an \<open>abelian_group\<close> from a \<open>comm_group\<close>\<close>
 
 lemma comm_group_abelian_groupI:
   fixes G (structure)
@@ -283,7 +283,7 @@
   shows "cring R"
 proof (intro cring.intro ring.intro)
   show "ring_axioms R"
-    -- \<open>Right-distributivity follows from left-distributivity and
+    \<comment> \<open>Right-distributivity follows from left-distributivity and
           commutativity.\<close>
   proof (rule ring_axioms.intro)
     fix x y z
--- a/src/HOL/Algebra/RingHom.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/RingHom.thy	Thu May 26 17:51:22 2016 +0200
@@ -8,7 +8,7 @@
 
 section \<open>Homomorphisms of Non-Commutative Rings\<close>
 
-text \<open>Lifting existing lemmas in a @{text ring_hom_ring} locale\<close>
+text \<open>Lifting existing lemmas in a \<open>ring_hom_ring\<close> locale\<close>
 locale ring_hom_ring = R?: ring R + S?: ring S
     for R (structure) and S (structure) +
   fixes h
@@ -102,7 +102,7 @@
 
 subsection \<open>The Kernel of a Ring Homomorphism\<close>
 
---"the kernel of a ring homomorphism is an ideal"
+\<comment>"the kernel of a ring homomorphism is an ideal"
 lemma (in ring_hom_ring) kernel_is_ideal:
   shows "ideal (a_kernel R S h) R"
 apply (rule idealI)
--- a/src/HOL/Algebra/Sylow.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/Sylow.thy	Thu May 26 17:51:22 2016 +0200
@@ -104,7 +104,7 @@
 
 end
 
-subsection\<open>Discharging the Assumptions of @{text sylow_central}\<close>
+subsection\<open>Discharging the Assumptions of \<open>sylow_central\<close>\<close>
 
 context sylow
 begin
@@ -265,7 +265,7 @@
             intro!: M1_in_M in_quotient_imp_closed [OF RelM_equiv M_in_quot _  M1_RelM_rcosetGM1g])
 done
 
-text\<open>close to a duplicate of @{text inj_M_GmodH}\<close>
+text\<open>close to a duplicate of \<open>inj_M_GmodH\<close>\<close>
 lemma (in sylow_central) inj_GmodH_M:
      "\<exists>g \<in> rcosets H\<rightarrow>M. inj_on g (rcosets H)"
 apply (rule bexI)
--- a/src/HOL/Algebra/UnivPoly.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Algebra/UnivPoly.thy	Thu May 26 17:51:22 2016 +0200
@@ -14,7 +14,7 @@
 text \<open>
   Polynomials are formalised as modules with additional operations for
   extracting coefficients from polynomials and for obtaining monomials
-  from coefficients and exponents (record @{text "up_ring"}).  The
+  from coefficients and exponents (record \<open>up_ring\<close>).  The
   carrier set is a set of bounded functions from Nat to the
   coefficient domain.  Bounded means that these functions return zero
   above a certain bound (the degree).  There is a chapter on the
@@ -754,7 +754,7 @@
   assumes non_zero: "n ~= 0 ==> coeff P p n ~= \<zero>"
     and R: "p \<in> carrier P"
   shows "n <= deg R p"
--- \<open>Logically, this is a slightly stronger version of
+\<comment> \<open>Logically, this is a slightly stronger version of
    @{thm [source] deg_aboveD}\<close>
 proof (cases "n=0")
   case True then show ?thesis by simp
@@ -864,7 +864,7 @@
 qed
 
 text\<open>The following lemma is later \emph{overwritten} by the most
-  specific one for domains, @{text deg_smult}.\<close>
+  specific one for domains, \<open>deg_smult\<close>.\<close>
 
 lemma deg_smult_ring [simp]:
   "[| a \<in> carrier R; p \<in> carrier P |] ==>
@@ -1202,7 +1202,7 @@
   defines Eval_def: "Eval == eval R S h s"
 
 text\<open>JE: I have moved the following lemma from Ring.thy and lifted then to the locale @{term ring_hom_ring} from @{term ring_hom_cring}.\<close>
-text\<open>JE: I was considering using it in @{text eval_ring_hom}, but that property does not hold for non commutative rings, so 
+text\<open>JE: I was considering using it in \<open>eval_ring_hom\<close>, but that property does not hold for non commutative rings, so 
   maybe it is not that necessary.\<close>
 
 lemma (in ring_hom_ring) hom_finsum [simp]:
@@ -1295,8 +1295,8 @@
 qed
 
 text \<open>
-  The following lemma could be proved in @{text UP_cring} with the additional
-  assumption that @{text h} is closed.\<close>
+  The following lemma could be proved in \<open>UP_cring\<close> with the additional
+  assumption that \<open>h\<close> is closed.\<close>
 
 lemma (in UP_pre_univ_prop) eval_const:
   "[| s \<in> carrier S; r \<in> carrier R |] ==> eval R S h s (monom P r 0) = h r"
--- a/src/HOL/Cardinals/Bounded_Set.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Cardinals/Bounded_Set.thy	Thu May 26 17:51:22 2016 +0200
@@ -61,9 +61,9 @@
   hence *: "set_bset R' = ?L'" unfolding R'_def by (intro set_bset_to_set_bset)
   show ?R unfolding Grp_def relcompp.simps conversep.simps
   proof (intro CollectI case_prodI exI[of _ a] exI[of _ b] exI[of _ R'] conjI refl)
-    from * show "a = map_bset fst R'" using conjunct1[OF `?L`]
+    from * show "a = map_bset fst R'" using conjunct1[OF \<open>?L\<close>]
       by (transfer, auto simp add: image_def Int_def split: prod.splits)
-    from * show "b = map_bset snd R'" using conjunct2[OF `?L`]
+    from * show "b = map_bset snd R'" using conjunct2[OF \<open>?L\<close>]
       by (transfer, auto simp add: image_def Int_def split: prod.splits)
   qed (auto simp add: *)
 next
--- a/src/HOL/Cardinals/Cardinal_Arithmetic.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Cardinals/Cardinal_Arithmetic.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,13 +5,13 @@
 Cardinal arithmetic.
 *)
 
-section {* Cardinal Arithmetic *}
+section \<open>Cardinal Arithmetic\<close>
 
 theory Cardinal_Arithmetic
 imports BNF_Cardinal_Arithmetic Cardinal_Order_Relation
 begin
 
-subsection {* Binary sum *}
+subsection \<open>Binary sum\<close>
 
 lemma csum_Cnotzero2:
   "Cnotzero r2 \<Longrightarrow> Cnotzero (r1 +c r2)"
@@ -41,7 +41,7 @@
   by (rule ordIso_transitive[OF ordIso_symmetric[OF card_of_Plus_empty2] card_of_Field_ordIso])
 
 
-subsection {* Product *}
+subsection \<open>Product\<close>
 
 lemma Times_cprod: "|A \<times> B| =o |A| *c |B|"
 by (simp only: cprod_def Field_card_of card_of_refl)
@@ -71,7 +71,7 @@
 unfolding cprod_def by (metis Card_order_Times1 czeroI)
 
 
-subsection {* Exponentiation *}
+subsection \<open>Exponentiation\<close>
 
 lemma cexp_czero: "r ^c czero =o cone"
 unfolding cexp_def czero_def Field_card_of Func_empty by (rule single_cone)
@@ -269,7 +269,7 @@
   unfolding cexp_def cprod_def csum_def Field_card_of by (rule card_of_Func_Plus)
 
 
-subsection {* Powerset *}
+subsection \<open>Powerset\<close>
 
 definition cpow where "cpow r = |Pow (Field r)|"
 
@@ -291,7 +291,7 @@
 lemma cpow_cexp_ctwo: "cpow r =o ctwo ^c r"
 unfolding cpow_def ctwo_def cexp_def Field_card_of by (rule card_of_Pow_Func)
 
-subsection {* Inverse image *}
+subsection \<open>Inverse image\<close>
 
 lemma vimage_ordLeq:
 assumes "|A| \<le>o k" and "\<forall> a \<in> A. |vimage f {a}| \<le>o k" and "Cinfinite k"
@@ -303,7 +303,7 @@
   finally show ?thesis .
 qed
 
-subsection {* Maximum *}
+subsection \<open>Maximum\<close>
 
 definition cmax where
   "cmax r s =
@@ -372,7 +372,7 @@
   case False
   hence "s \<le>o r" by (metis ordLeq_total r s card_order_on_def)
   hence "cmax r s =o r" by (metis cmax1 r s)
-  also have "r =o r +c s" by (metis `s \<le>o r` csum_absorb1 ordIso_symmetric r)
+  also have "r =o r +c s" by (metis \<open>s \<le>o r\<close> csum_absorb1 ordIso_symmetric r)
   finally show ?thesis .
 qed
 
@@ -386,7 +386,7 @@
   case False
   hence "s \<le>o r" by (metis ordLeq_total r s card_order_on_def)
   hence "cmax r s =o r" by (metis cmax1 r s)
-  also have "r =o r *c s" by (metis Cinfinite_Cnotzero `s \<le>o r` cprod_infinite1' ordIso_symmetric r s)
+  also have "r =o r *c s" by (metis Cinfinite_Cnotzero \<open>s \<le>o r\<close> cprod_infinite1' ordIso_symmetric r s)
   finally show ?thesis .
 qed
 
--- a/src/HOL/Cardinals/Cardinal_Order_Relation.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Cardinals/Cardinal_Order_Relation.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Cardinal-order relations.
 *)
 
-section {* Cardinal-Order Relations *}
+section \<open>Cardinal-Order Relations\<close>
 
 theory Cardinal_Order_Relation
 imports BNF_Cardinal_Order_Relation Wellorder_Constructions
@@ -69,7 +69,7 @@
   curr_in[intro, simp]
 
 
-subsection {* Cardinal of a set *}
+subsection \<open>Cardinal of a set\<close>
 
 lemma card_of_inj_rel: assumes INJ: "!! x y y'. \<lbrakk>(x,y) : R; (x,y') : R\<rbrakk> \<Longrightarrow> y = y'"
 shows "|{y. EX x. (x,y) : R}| <=o |{x. EX y. (x,y) : R}|"
@@ -132,7 +132,7 @@
 qed
 
 
-subsection {* Cardinals versus set operations on arbitrary sets *}
+subsection \<open>Cardinals versus set operations on arbitrary sets\<close>
 
 lemma card_of_set_type[simp]: "|UNIV::'a set| <o |UNIV::'a set set|"
 using card_of_Pow[of "UNIV::'a set"] by simp
@@ -659,7 +659,7 @@
 by (metis assms ordIso_iff_ordLeq ordLeq_finite_Field)
 
 
-subsection {* Cardinals versus set operations involving infinite sets *}
+subsection \<open>Cardinals versus set operations involving infinite sets\<close>
 
 lemma finite_iff_cardOf_nat:
 "finite A = ( |A| <o |UNIV :: nat set| )"
@@ -704,10 +704,10 @@
 qed
 
 
-subsection {* Cardinals versus lists *}
+subsection \<open>Cardinals versus lists\<close>
 
-text{* The next is an auxiliary operator, which shall be used for inductive
-proofs of facts concerning the cardinality of @{text "List"} : *}
+text\<open>The next is an auxiliary operator, which shall be used for inductive
+proofs of facts concerning the cardinality of \<open>List\<close> :\<close>
 
 definition nlists :: "'a set \<Rightarrow> nat \<Rightarrow> 'a list set"
 where "nlists A n \<equiv> {l. set l \<le> A \<and> length l = n}"
@@ -886,7 +886,7 @@
 using lists_UNIV by auto
 
 
-subsection {* Cardinals versus the set-of-finite-sets operator *}
+subsection \<open>Cardinals versus the set-of-finite-sets operator\<close>
 
 definition Fpow :: "'a set \<Rightarrow> 'a set set"
 where "Fpow A \<equiv> {X. X \<le> A \<and> finite X}"
@@ -999,9 +999,9 @@
 using assms card_of_Fpow_infinite card_of_ordIso by blast
 
 
-subsection {* The cardinal $\omega$ and the finite cardinals *}
+subsection \<open>The cardinal $\omega$ and the finite cardinals\<close>
 
-subsubsection {* First as well-orders *}
+subsubsection \<open>First as well-orders\<close>
 
 lemma Field_natLess: "Field natLess = (UNIV::nat set)"
 by(unfold Field_def natLess_def, auto)
@@ -1111,7 +1111,7 @@
 qed
 
 
-subsubsection {* Then as cardinals *}
+subsubsection \<open>Then as cardinals\<close>
 
 lemma ordIso_natLeq_infinite1:
 "|A| =o natLeq \<Longrightarrow> \<not>finite A"
@@ -1179,7 +1179,7 @@
 qed
 
 
-subsubsection {* "Backward compatibility" with the numeric cardinal operator for finite sets *}
+subsubsection \<open>"Backward compatibility" with the numeric cardinal operator for finite sets\<close>
 
 lemma finite_card_of_iff_card2:
 assumes FIN: "finite A" and FIN': "finite B"
@@ -1221,7 +1221,7 @@
 using Field_natLeq_on card_atLeastLessThan by auto
 
 
-subsection {* The successor of a cardinal *}
+subsection \<open>The successor of a cardinal\<close>
 
 lemma embed_implies_ordIso_Restr:
 assumes WELL: "Well_order r" and WELL': "Well_order r'" and EMB: "embed r' r f"
@@ -1321,7 +1321,7 @@
 ordLeq_transitive by metis
 
 
-subsection {* Others *}
+subsection \<open>Others\<close>
 
 lemma under_mono[simp]:
 assumes "Well_order r" and "(i,j) \<in> r"
@@ -1575,7 +1575,7 @@
 using ordLeq_Func[OF B] by (metis A card_of_ordLeq_finite)
 
 
-subsection {* Infinite cardinals are limit ordinals *}
+subsection \<open>Infinite cardinals are limit ordinals\<close>
 
 lemma card_order_infinite_isLimOrd:
 assumes c: "Card_order r" and i: "\<not>finite (Field r)"
@@ -1683,7 +1683,7 @@
 qed
 
 
-subsection {* Regular vs. stable cardinals *}
+subsection \<open>Regular vs. stable cardinals\<close>
 
 definition stable :: "'a rel \<Rightarrow> bool"
 where
@@ -1851,9 +1851,9 @@
 lemma stable_nat: "stable |UNIV::nat set|"
 using stable_natLeq card_of_nat stable_ordIso by auto
 
-text{* Below, the type of "A" is not important -- we just had to choose an appropriate
+text\<open>Below, the type of "A" is not important -- we just had to choose an appropriate
    type to make "A" possible. What is important is that arbitrarily large
-   infinite sets of stable cardinality exist. *}
+   infinite sets of stable cardinality exist.\<close>
 
 lemma infinite_stable_exists:
 assumes CARD: "\<forall>r \<in> R. Card_order (r::'a rel)"
--- a/src/HOL/Cardinals/Cardinals.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Cardinals/Cardinals.thy	Thu May 26 17:51:22 2016 +0200
@@ -6,7 +6,7 @@
 Theory of ordinals and cardinals.
 *)
 
-section {* Theory of Ordinals and Cardinals *}
+section \<open>Theory of Ordinals and Cardinals\<close>
 
 theory Cardinals
 imports Ordinal_Arithmetic Cardinal_Arithmetic Wellorder_Extension
--- a/src/HOL/Cardinals/Fun_More.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Cardinals/Fun_More.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,13 +5,13 @@
 More on injections, bijections and inverses.
 *)
 
-section {* More on Injections, Bijections and Inverses *}
+section \<open>More on Injections, Bijections and Inverses\<close>
 
 theory Fun_More
 imports Main
 begin
 
-subsection {* Purely functional properties *}
+subsection \<open>Purely functional properties\<close>
 
 (* unused *)
 (*1*)lemma notIn_Un_bij_betw2:
@@ -56,7 +56,7 @@
 qed
 
 
-subsection {* Properties involving finite and infinite sets *}
+subsection \<open>Properties involving finite and infinite sets\<close>
 
 (*3*)lemma inj_on_image_Pow:
 assumes "inj_on f A"
@@ -128,7 +128,7 @@
 qed
 
 
-subsection {* Properties involving Hilbert choice *}
+subsection \<open>Properties involving Hilbert choice\<close>
 
 (*1*)lemma bij_betw_inv_into_LEFT:
 assumes BIJ: "bij_betw f A A'" and SUB: "B \<le> A"
@@ -142,7 +142,7 @@
 using assms bij_betw_inv_into_LEFT[of f A A' B] by fast
 
 
-subsection {* Other facts *}
+subsection \<open>Other facts\<close>
 
 (*3*)lemma atLeastLessThan_injective:
 assumes "{0 ..< m::nat} = {0 ..< n}"
--- a/src/HOL/Cardinals/Order_Relation_More.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Cardinals/Order_Relation_More.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,13 +5,13 @@
 Basics on order-like relations.
 *)
 
-section {* Basics on Order-Like Relations *}
+section \<open>Basics on Order-Like Relations\<close>
 
 theory Order_Relation_More
 imports Main
 begin
 
-subsection {* The upper and lower bounds operators *}
+subsection \<open>The upper and lower bounds operators\<close>
 
 lemma aboveS_subset_above: "aboveS r a \<le> above r a"
 by(auto simp add: aboveS_def above_def)
@@ -572,7 +572,7 @@
 qed
 
 
-subsection {* Properties depending on more than one relation *}
+subsection \<open>Properties depending on more than one relation\<close>
 
 lemma under_incr2:
 "r \<le> r' \<Longrightarrow> under r a \<le> under r' a"
--- a/src/HOL/Cardinals/Order_Union.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Cardinals/Order_Union.thy	Thu May 26 17:51:22 2016 +0200
@@ -4,7 +4,7 @@
 The ordinal-like sum of two orders with disjoint fields
 *)
 
-section {* Order Union *}
+section \<open>Order Union\<close>
 
 theory Order_Union
 imports Order_Relation
--- a/src/HOL/Cardinals/Ordinal_Arithmetic.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Cardinals/Ordinal_Arithmetic.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Ordinal arithmetic.
 *)
 
-section {* Ordinal Arithmetic *}
+section \<open>Ordinal Arithmetic\<close>
 
 theory Ordinal_Arithmetic
 imports Wellorder_Constructions
@@ -252,7 +252,7 @@
   fix B
   assume *: "B \<subseteq> Field {((x,y1), (x,y2)) . x \<in> A \<and> (y1, y2) \<in> r}" and "B \<noteq> {}"
   from image_mono[OF *, of snd] have "snd ` B \<subseteq> Field r" unfolding Field_def by force
-  with `B \<noteq> {}` obtain x where x: "x \<in> snd ` B" "\<forall>x'\<in>snd ` B. (x', x) \<notin> r"
+  with \<open>B \<noteq> {}\<close> obtain x where x: "x \<in> snd ` B" "\<forall>x'\<in>snd ` B. (x', x) \<notin> r"
     using spec[OF assms[unfolded wf_eq_minimal2], of "snd ` B"] by auto
   then obtain a where "(a, x) \<in> B" by auto
   moreover
@@ -271,7 +271,7 @@
   fix B
   assume *: "B \<subseteq> Field {((x1, y), (x2, y)). (x1, x2) \<in> r \<and> y \<in> A}" and "B \<noteq> {}"
   from image_mono[OF *, of fst] have "fst ` B \<subseteq> Field r" unfolding Field_def by force
-  with `B \<noteq> {}` obtain x where x: "x \<in> fst ` B" "\<forall>x'\<in>fst ` B. (x', x) \<notin> r"
+  with \<open>B \<noteq> {}\<close> obtain x where x: "x \<in> fst ` B" "\<forall>x'\<in>fst ` B. (x', x) \<notin> r"
     using spec[OF assms[unfolded wf_eq_minimal2], of "fst ` B"] by auto
   then obtain a where "(x, a) \<in> B" by auto
   moreover
@@ -364,9 +364,9 @@
   (f o Inl) \<in> fin_support z A \<and> (f o Inr) \<in> fin_support z B" (is "?L \<longleftrightarrow> ?R1 \<and> ?R2")
 proof safe
   assume ?L
-  from `?L` show ?R1 unfolding fin_support_def support_def
+  from \<open>?L\<close> show ?R1 unfolding fin_support_def support_def
     by (fastforce simp: image_iff elim: finite_surj[of _ _ "case_sum id undefined"])
-  from `?L` show ?R2 unfolding fin_support_def support_def
+  from \<open>?L\<close> show ?R2 unfolding fin_support_def support_def
     by (fastforce simp: image_iff elim: finite_surj[of _ _ "case_sum undefined id"])
 next
   assume ?R1 ?R2
@@ -618,7 +618,7 @@
       show ?thesis
       proof (cases "?fg = ?gh \<longrightarrow> f ?fg \<noteq> h ?gh")
         case True
-        show ?thesis using max_fun_diff_max2[of f g h, OF True] * `f \<noteq> h` max_fun_diff_in
+        show ?thesis using max_fun_diff_max2[of f g h, OF True] * \<open>f \<noteq> h\<close> max_fun_diff_in
           r.max2_iff[OF FINFUNCD FINFUNCD] r.max2_equals1[OF FINFUNCD FINFUNCD] max_fun_diff_le_eq
           s.in_notinI[OF disjI1] unfolding oexp_def Let_def s.max2_def mem_Collect_eq by safe metis
       next
@@ -685,10 +685,10 @@
   show "SUPP f \<noteq> {}"
   proof (rule ccontr, unfold not_not)
     assume "SUPP f = {}"
-    moreover from `f \<in> F` assms(1) have "f \<in> FINFUNC" by blast
+    moreover from \<open>f \<in> F\<close> assms(1) have "f \<in> FINFUNC" by blast
     ultimately have "f = const"
       by (auto simp: fun_eq_iff support_def FinFunc_def Func_def const_def)
-    with assms(2) `f \<in> F` show False by blast
+    with assms(2) \<open>f \<in> F\<close> show False by blast
   qed
 qed
 
@@ -787,7 +787,7 @@
                 and SUPPG: "\<forall>g \<in> G. finite (SUPP g) \<and> SUPP g \<noteq> {} \<and> SUPP g \<subseteq> Field s"
                 using maxim_isMaxim_support support_not_const by auto
               define y' where "y' = s.minim {s.maxim (SUPP f) | f. f \<in> G}"
-              from G SUPPG maxG `G \<noteq> {}` have y'min: "s.isMinim {s.maxim (SUPP f) | f. f \<in> G} y'"
+              from G SUPPG maxG \<open>G \<noteq> {}\<close> have y'min: "s.isMinim {s.maxim (SUPP f) | f. f \<in> G} y'"
                 unfolding y'_def by (intro s.minim_isMinim) (auto simp: s.isMaxim_def)
               moreover
               have "\<forall>g \<in> G. z \<notin> SUPP g" unfolding support_def G_def by auto
@@ -804,7 +804,7 @@
                 unfolding s.isMinim_def s.isMaxim_def by auto
               with zy have "y' \<noteq> y" "(y', y) \<in> s" using antisymD[OF s.ANTISYM] transD[OF s.TRANS]
                 by blast+
-              moreover from `G \<noteq> {}` have "\<exists>g \<in> G. y' = wo_rel.maxim s (SUPP g)" using y'min
+              moreover from \<open>G \<noteq> {}\<close> have "\<exists>g \<in> G. y' = wo_rel.maxim s (SUPP g)" using y'min
                 by (auto simp: G_def s.isMinim_def)
               ultimately show ?thesis using mp[OF spec[OF mp[OF spec[OF 1]]], of y' G] G by auto
             qed simp
@@ -1315,8 +1315,8 @@
     fix g h assume gh: "g \<in> FinFunc r s" "h \<in> FinFunc r s" "F g \<noteq> F h"
       "let m = s.max_fun_diff g h in (g m, h m) \<in> r"
     hence "g \<noteq> h" by auto
-    note max_fun_diff_in = rs.max_fun_diff_in[OF `g \<noteq> h` gh(1,2)]
-    and max_fun_diff_max = rs.max_fun_diff_max[OF `g \<noteq> h` gh(1,2)]
+    note max_fun_diff_in = rs.max_fun_diff_in[OF \<open>g \<noteq> h\<close> gh(1,2)]
+    and max_fun_diff_max = rs.max_fun_diff_max[OF \<open>g \<noteq> h\<close> gh(1,2)]
     with *(4) invff *(2) have "t.max_fun_diff (F g) (F h) = f (s.max_fun_diff g h)"
       unfolding t.max_fun_diff_def compat_def
       by (intro t.maxim_equality) (auto simp: t.isMaxim_def F_def dest: injfD)
@@ -1401,14 +1401,14 @@
   next
     case False thus ?thesis
     proof (cases "r = {}")
-      case True thus ?thesis using t `t \<noteq> {}` st.oexp_Well_order ozero_ordLeq[unfolded ozero_def]
+      case True thus ?thesis using t \<open>t \<noteq> {}\<close> st.oexp_Well_order ozero_ordLeq[unfolded ozero_def]
         by auto
     next
       case False
       from assms obtain f where f: "embed r s f" unfolding ordLeq_def by blast
       hence f_underS: "\<forall>a\<in>Field r. f a \<in> Field s \<and> f ` underS r a \<subseteq> underS s (f a)"
         using embed_in_Field[OF rt.rWELL f] embed_underS2[OF rt.rWELL st.rWELL f] by auto
-      from f `t \<noteq> {}` False have *: "Field r \<noteq> {}" "Field s \<noteq> {}" "Field t \<noteq> {}"
+      from f \<open>t \<noteq> {}\<close> False have *: "Field r \<noteq> {}" "Field s \<noteq> {}" "Field t \<noteq> {}"
         unfolding Field_def embed_def under_def bij_betw_def by auto
       with f obtain x where "s.zero = f x" "x \<in> Field r" unfolding embed_def bij_betw_def
         using embed_in_Field[OF r.WELL f] s.zero_under set_mp[OF under_Field[of r]] by blast
@@ -1435,11 +1435,11 @@
             by simp metis
           moreover
           with hg have "t.max_fun_diff (?f h) (?f g) = t.max_fun_diff h g" unfolding rt.oexp_def
-            using rt.max_fun_diff[OF `h \<noteq> g`] rt.max_fun_diff_in[OF `h \<noteq> g`]
+            using rt.max_fun_diff[OF \<open>h \<noteq> g\<close>] rt.max_fun_diff_in[OF \<open>h \<noteq> g\<close>]
             by (subst t.max_fun_diff_def, intro t.maxim_equality)
               (auto simp: t.isMaxim_def intro: inj_onD[OF inj] intro!: rt.max_fun_diff_max)
           with Field_fg Field_fh hg fz f_underS compat neq have "(?f h, ?f g) \<in> st.oexp"
-             using rt.max_fun_diff[OF `h \<noteq> g`] rt.max_fun_diff_in[OF `h \<noteq> g`] unfolding st.Field_oexp
+             using rt.max_fun_diff[OF \<open>h \<noteq> g\<close>] rt.max_fun_diff_in[OF \<open>h \<noteq> g\<close>] unfolding st.Field_oexp
              unfolding rt.oexp_def st.oexp_def Let_def compat_def by auto
           ultimately show "?f h \<in> underS (s ^o t) (?f g)" unfolding underS_def by auto
         qed
@@ -1497,9 +1497,9 @@
   (is "?L = (?R1 \<and> ?R2)")
 proof safe
   assume ?L
-  from `?L` show ?R1 unfolding FinFunc_def Field_osum Func_def Int_iff fin_support_Field_osum o_def
+  from \<open>?L\<close> show ?R1 unfolding FinFunc_def Field_osum Func_def Int_iff fin_support_Field_osum o_def
     by (auto split: sum.splits)
-  from `?L` show ?R2 unfolding FinFunc_def Field_osum Func_def Int_iff fin_support_Field_osum o_def
+  from \<open>?L\<close> show ?R2 unfolding FinFunc_def Field_osum Func_def Int_iff fin_support_Field_osum o_def
     by (auto split: sum.splits)
 next
   assume ?R1 ?R2
@@ -1674,14 +1674,14 @@
   interpret rst: wo_rel2 "r ^o s" t by unfold_locales (rule oexp_Well_order[OF r s], rule t)
   show ?thesis
   proof (cases "s = {} \<or> t = {}")
-    case True with `r = {}` show ?thesis
+    case True with \<open>r = {}\<close> show ?thesis
       by (auto simp: oexp_empty[OF oexp_Well_order[OF Well_order_empty s]]
         intro!: ordIso_transitive[OF ordIso_symmetric[OF oone_ordIso] oone_ordIso]
           ordIso_transitive[OF oone_ordIso_oexp[OF ordIso_symmetric[OF oone_ordIso] t] oone_ordIso])
   next
      case False
      moreover hence "s *o t \<noteq> {}" unfolding oprod_def Field_def by fastforce
-     ultimately show ?thesis using `r = {}` ozero_ordIso
+     ultimately show ?thesis using \<open>r = {}\<close> ozero_ordIso
        by (auto simp add: s t oprod_Well_order ozero_def)
   qed
 next
--- a/src/HOL/Cardinals/Wellfounded_More.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Cardinals/Wellfounded_More.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,13 +5,13 @@
 More on well-founded relations.
 *)
 
-section {* More on Well-Founded Relations *}
+section \<open>More on Well-Founded Relations\<close>
 
 theory Wellfounded_More
 imports Wellfounded Order_Relation_More
 begin
 
-subsection {* Well-founded recursion via genuine fixpoints *}
+subsection \<open>Well-founded recursion via genuine fixpoints\<close>
 
 (*2*)lemma adm_wf_unique_fixpoint:
 fixes r :: "('a * 'a) set" and
--- a/src/HOL/Cardinals/Wellorder_Constructions.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Cardinals/Wellorder_Constructions.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Constructions on wellorders.
 *)
 
-section {* Constructions on Wellorders *}
+section \<open>Constructions on Wellorders\<close>
 
 theory Wellorder_Constructions
 imports
@@ -23,7 +23,7 @@
 lemma Func_emp2[simp]: "A \<noteq> {} \<Longrightarrow> Func A {} = {}" by auto
 
 
-subsection {* Restriction to a set *}
+subsection \<open>Restriction to a set\<close>
 
 lemma Restr_incr2:
 "r <= r' \<Longrightarrow> Restr r A <= Restr r' A"
@@ -55,7 +55,7 @@
 by blast
 
 
-subsection {* Order filters versus restrictions and embeddings *}
+subsection \<open>Order filters versus restrictions and embeddings\<close>
 
 lemma ofilter_Restr:
 assumes WELL: "Well_order r" and
@@ -92,7 +92,7 @@
 by (auto simp add: ofilter_subset_embedS_iso)
 
 
-subsection {* Ordering the well-orders by existence of embeddings *}
+subsection \<open>Ordering the well-orders by existence of embeddings\<close>
 
 corollary ordLeq_refl_on: "refl_on {r. Well_order r} ordLeq"
 using ordLeq_reflexive unfolding ordLeq_def refl_on_def
@@ -169,7 +169,7 @@
 by (auto simp add: ofilter_ordLeq wo_rel.under_ofilter wo_rel_def)
 
 
-subsection {* Copy via direct images *}
+subsection \<open>Copy via direct images\<close>
 
 lemma Id_dir_image: "dir_image Id f \<le> Id"
 unfolding dir_image_def by auto
@@ -275,9 +275,9 @@
 qed
 
 
-subsection {* The maxim among a finite set of ordinals *}
+subsection \<open>The maxim among a finite set of ordinals\<close>
 
-text {* The correct phrasing would be ``a maxim of ...", as @{text "\<le>o"} is only a preorder. *}
+text \<open>The correct phrasing would be ``a maxim of ...", as \<open>\<le>o\<close> is only a preorder.\<close>
 
 definition isOmax :: "'a rel set \<Rightarrow> 'a rel \<Rightarrow> bool"
 where
@@ -437,7 +437,7 @@
 qed
 
 
-subsection {* Limit and succesor ordinals *}
+subsection \<open>Limit and succesor ordinals\<close>
 
 lemma embed_underS2:
 assumes r: "Well_order r" and s: "Well_order s"  and g: "embed r s g" and a: "a \<in> Field r"
@@ -548,7 +548,7 @@
   by (elim cases_Total3 disjE) (auto elim: cases_Total3 intro!: assms simp: Field_def)
 qed
 
-subsubsection {* Successor and limit elements of an ordinal *}
+subsubsection \<open>Successor and limit elements of an ordinal\<close>
 
 definition "succ i \<equiv> suc {i}"
 
@@ -724,7 +724,7 @@
  else L f i"
 
 
-subsubsection {* Well-order recursion with (zero), succesor, and limit *}
+subsubsection \<open>Well-order recursion with (zero), succesor, and limit\<close>
 
 definition worecSL :: "('a \<Rightarrow> 'b \<Rightarrow> 'b) \<Rightarrow> (('a \<Rightarrow> 'b) \<Rightarrow> 'a \<Rightarrow> 'b) \<Rightarrow> 'a \<Rightarrow> 'b"
 where "worecSL S L \<equiv> worec (mergeSL S L)"
@@ -819,7 +819,7 @@
 qed
 
 
-subsubsection {* Well-order succ-lim induction *}
+subsubsection \<open>Well-order succ-lim induction\<close>
 
 lemma ord_cases:
 obtains j where "i = succ j" and "aboveS j \<noteq> {}"  | "isLim i"
@@ -901,7 +901,7 @@
 abbreviation "worecZSL \<equiv> wo_rel.worecZSL"
 
 
-subsection {* Projections of wellorders *}
+subsection \<open>Projections of wellorders\<close>
 
 definition "oproj r s f \<equiv> Field s \<subseteq> f ` (Field r) \<and> compat r s f"
 
@@ -1018,7 +1018,7 @@
          fix b1 assume "b1 \<in> g ` underS r a"
          then obtain a1 where a1: "b1 = g a1" and a1: "a1 \<in> underS r a" by auto
          hence "b1 \<in> underS s (f a)"
-         using a by (metis `\<And>a1. a1 \<in> underS r a \<Longrightarrow> g a1 \<in> underS s (f a)`)
+         using a by (metis \<open>\<And>a1. a1 \<in> underS r a \<Longrightarrow> g a1 \<in> underS s (f a)\<close>)
          thus "f a \<noteq> b1 \<and> (b1, f a) \<in> s" unfolding underS_def by auto
        qed(insert fa, auto)
        thus "g a \<in> under s (f a)" unfolding under_def by auto
@@ -1061,7 +1061,7 @@
       by (auto dest!: oproj_Field2[OF f] inv_into_injective intro!: inv_into_into)
     ultimately have "(inv_into (Field r) f b, inv_into (Field r) f a) \<in> r"
       using r by (auto simp: well_order_on_def linear_order_on_def total_on_def)
-    with f[unfolded oproj_def compat_def] *(1) `a \<in> Field s`
+    with f[unfolded oproj_def compat_def] *(1) \<open>a \<in> Field s\<close>
       f_inv_into_f[of b f "Field r"] f_inv_into_f[of a f "Field r"]
       have "(b, a) \<in> s" by (metis in_mono)
     with *(2,3) s have False
--- a/src/HOL/Cardinals/Wellorder_Embedding.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Cardinals/Wellorder_Embedding.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,13 +5,13 @@
 Well-order embeddings.
 *)
 
-section {* Well-Order Embeddings *}
+section \<open>Well-Order Embeddings\<close>
 
 theory Wellorder_Embedding
 imports BNF_Wellorder_Embedding Fun_More Wellorder_Relation
 begin
 
-subsection {* Auxiliaries *}
+subsection \<open>Auxiliaries\<close>
 
 lemma UNION_bij_betw_ofilter:
 assumes WELL: "Well_order r" and
@@ -27,8 +27,8 @@
 qed
 
 
-subsection {* (Well-order) embeddings, strict embeddings, isomorphisms and order-compatible
-functions *}
+subsection \<open>(Well-order) embeddings, strict embeddings, isomorphisms and order-compatible
+functions\<close>
 
 lemma embed_halfcong:
 assumes EQ: "\<And> a. a \<in> Field r \<Longrightarrow> f a = g a" and
@@ -103,7 +103,7 @@
 using one_set_greater[of UNIV UNIV] by auto
 
 
-subsection {* Uniqueness of embeddings *}
+subsection \<open>Uniqueness of embeddings\<close>
 
 lemma comp_embedS:
 assumes WELL: "Well_order r" and WELL': "Well_order r'" and WELL'': "Well_order r''"
--- a/src/HOL/Cardinals/Wellorder_Extension.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Cardinals/Wellorder_Extension.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,16 +2,16 @@
     Author:     Christian Sternagel, JAIST
 *)
 
-section {* Extending Well-founded Relations to Wellorders *}
+section \<open>Extending Well-founded Relations to Wellorders\<close>
 
 theory Wellorder_Extension
 imports Main Order_Union
 begin
 
-subsection {* Extending Well-founded Relations to Wellorders *}
+subsection \<open>Extending Well-founded Relations to Wellorders\<close>
 
-text {*A \emph{downset} (also lower set, decreasing set, initial segment, or
-downward closed set) is closed w.r.t.\ smaller elements.*}
+text \<open>A \emph{downset} (also lower set, decreasing set, initial segment, or
+downward closed set) is closed w.r.t.\ smaller elements.\<close>
 definition downset_on where
   "downset_on A r = (\<forall>x y. (x, y) \<in> r \<and> y \<in> A \<longrightarrow> x \<in> A)"
 
@@ -30,7 +30,7 @@
   "downset_on A r \<Longrightarrow> (x, y) \<in> r \<Longrightarrow> y \<in> A \<Longrightarrow> x \<in> A"
   unfolding downset_on_def by blast
 
-text {*Extensions of relations w.r.t.\ a given set.*}
+text \<open>Extensions of relations w.r.t.\ a given set.\<close>
 definition extension_on where
   "extension_on A r s = (\<forall>x\<in>A. \<forall>y\<in>A. (x, y) \<in> s \<longrightarrow> (x, y) \<in> r)"
 
@@ -60,7 +60,7 @@
 lemma extension_on_empty [simp]: "extension_on {} p q"
   by (auto simp: extension_on_def)
 
-text {*Every well-founded relation can be extended to a wellorder.*}
+text \<open>Every well-founded relation can be extended to a wellorder.\<close>
 theorem well_order_extension:
   assumes "wf p"
   shows "\<exists>w. p \<subseteq> w \<and> Well_order w"
@@ -79,39 +79,39 @@
       trans_def I_def elim: trans_init_seg_of)
   { fix R assume "R \<in> Chains I"
     then have Ris: "R \<in> Chains init_seg_of" using mono_Chains [OF I_init] by blast
-    have subch: "chain\<^sub>\<subseteq> R" using `R \<in> Chains I` I_init
+    have subch: "chain\<^sub>\<subseteq> R" using \<open>R \<in> Chains I\<close> I_init
       by (auto simp: init_seg_of_def chain_subset_def Chains_def)
     have "\<forall>r\<in>R. Refl r" and "\<forall>r\<in>R. trans r" and "\<forall>r\<in>R. antisym r" and
       "\<forall>r\<in>R. Total r" and "\<forall>r\<in>R. wf (r - Id)" and
       "\<And>r. r \<in> R \<Longrightarrow> downset_on (Field r) p" and
       "\<And>r. r \<in> R \<Longrightarrow> extension_on (Field r) r p"
-      using Chains_wo [OF `R \<in> Chains I`] by (simp_all add: order_on_defs)
-    have "Refl (\<Union>R)" using `\<forall>r\<in>R. Refl r`  unfolding refl_on_def by fastforce
+      using Chains_wo [OF \<open>R \<in> Chains I\<close>] by (simp_all add: order_on_defs)
+    have "Refl (\<Union>R)" using \<open>\<forall>r\<in>R. Refl r\<close>  unfolding refl_on_def by fastforce
     moreover have "trans (\<Union>R)"
-      by (rule chain_subset_trans_Union [OF subch `\<forall>r\<in>R. trans r`])
+      by (rule chain_subset_trans_Union [OF subch \<open>\<forall>r\<in>R. trans r\<close>])
     moreover have "antisym (\<Union>R)"
-      by (rule chain_subset_antisym_Union [OF subch `\<forall>r\<in>R. antisym r`])
+      by (rule chain_subset_antisym_Union [OF subch \<open>\<forall>r\<in>R. antisym r\<close>])
     moreover have "Total (\<Union>R)"
-      by (rule chain_subset_Total_Union [OF subch `\<forall>r\<in>R. Total r`])
+      by (rule chain_subset_Total_Union [OF subch \<open>\<forall>r\<in>R. Total r\<close>])
     moreover have "wf ((\<Union>R) - Id)"
     proof -
       have "(\<Union>R) - Id = \<Union>{r - Id | r. r \<in> R}" by blast
-      with `\<forall>r\<in>R. wf (r - Id)` wf_Union_wf_init_segs [OF Chains_inits_DiffI [OF Ris]]
+      with \<open>\<forall>r\<in>R. wf (r - Id)\<close> wf_Union_wf_init_segs [OF Chains_inits_DiffI [OF Ris]]
       show ?thesis by fastforce
     qed
     ultimately have "Well_order (\<Union>R)" by (simp add: order_on_defs)
     moreover have "\<forall>r\<in>R. r initial_segment_of \<Union>R" using Ris
       by (simp add: Chains_init_seg_of_Union)
     moreover have "downset_on (Field (\<Union>R)) p"
-      by (rule downset_on_Union [OF `\<And>r. r \<in> R \<Longrightarrow> downset_on (Field r) p`])
+      by (rule downset_on_Union [OF \<open>\<And>r. r \<in> R \<Longrightarrow> downset_on (Field r) p\<close>])
     moreover have "extension_on (Field (\<Union>R)) (\<Union>R) p"
-      by (rule chain_subset_extension_on_Union [OF subch `\<And>r. r \<in> R \<Longrightarrow> extension_on (Field r) r p`])
+      by (rule chain_subset_extension_on_Union [OF subch \<open>\<And>r. r \<in> R \<Longrightarrow> extension_on (Field r) r p\<close>])
     ultimately have "\<Union>R \<in> ?K \<and> (\<forall>r\<in>R. (r,\<Union>R) \<in> I)"
-      using mono_Chains [OF I_init] and `R \<in> Chains I`
+      using mono_Chains [OF I_init] and \<open>R \<in> Chains I\<close>
       by (simp (no_asm) add: I_def del: Field_Union) (metis Chains_wo)
   }
   then have 1: "\<forall>R\<in>Chains I. \<exists>u\<in>Field I. \<forall>r\<in>R. (r, u) \<in> I" by (subst FI) blast
-  txt {*Zorn's Lemma yields a maximal wellorder m.*}
+  txt \<open>Zorn's Lemma yields a maximal wellorder m.\<close>
   from Zorns_po_lemma [OF 0 1] obtain m :: "('a \<times> 'a) set"
     where "Well_order m" and "downset_on (Field m) p" and "extension_on (Field m) m p" and
     max: "\<forall>r. Well_order r \<and> downset_on (Field r) p \<and> extension_on (Field r) r p \<and>
@@ -124,52 +124,52 @@
     with assms [unfolded wf_eq_minimal, THEN spec, of ?Q]
       obtain x where "x \<in> Field p" and "x \<notin> Field m" and
       min: "\<forall>y. (y, x) \<in> p \<longrightarrow> y \<notin> ?Q" by blast
-    txt {*Add @{term x} as topmost element to @{term m}.*}
+    txt \<open>Add @{term x} as topmost element to @{term m}.\<close>
     let ?s = "{(y, x) | y. y \<in> Field m}"
     let ?m = "insert (x, x) m \<union> ?s"
     have Fm: "Field ?m = insert x (Field m)" by (auto simp: Field_def)
     have "Refl m" and "trans m" and "antisym m" and "Total m" and "wf (m - Id)"
-      using `Well_order m` by (simp_all add: order_on_defs)
-    txt {*We show that the extension is a wellorder.*}
-    have "Refl ?m" using `Refl m` Fm by (auto simp: refl_on_def)
-    moreover have "trans ?m" using `trans m` `x \<notin> Field m`
+      using \<open>Well_order m\<close> by (simp_all add: order_on_defs)
+    txt \<open>We show that the extension is a wellorder.\<close>
+    have "Refl ?m" using \<open>Refl m\<close> Fm by (auto simp: refl_on_def)
+    moreover have "trans ?m" using \<open>trans m\<close> \<open>x \<notin> Field m\<close>
       unfolding trans_def Field_def Domain_unfold Domain_converse [symmetric] by blast
-    moreover have "antisym ?m" using `antisym m` `x \<notin> Field m`
+    moreover have "antisym ?m" using \<open>antisym m\<close> \<open>x \<notin> Field m\<close>
       unfolding antisym_def Field_def Domain_unfold Domain_converse [symmetric] by blast
-    moreover have "Total ?m" using `Total m` Fm by (auto simp: Relation.total_on_def)
+    moreover have "Total ?m" using \<open>Total m\<close> Fm by (auto simp: Relation.total_on_def)
     moreover have "wf (?m - Id)"
     proof -
-      have "wf ?s" using `x \<notin> Field m`
+      have "wf ?s" using \<open>x \<notin> Field m\<close>
         by (simp add: wf_eq_minimal Field_def Domain_unfold Domain_converse [symmetric]) metis
-      thus ?thesis using `wf (m - Id)` `x \<notin> Field m`
-        wf_subset [OF `wf ?s` Diff_subset]
+      thus ?thesis using \<open>wf (m - Id)\<close> \<open>x \<notin> Field m\<close>
+        wf_subset [OF \<open>wf ?s\<close> Diff_subset]
         by (fastforce intro!: wf_Un simp add: Un_Diff Field_def)
     qed
     ultimately have "Well_order ?m" by (simp add: order_on_defs)
     moreover have "extension_on (Field ?m) ?m p"
-      using `extension_on (Field m) m p` `downset_on (Field m) p`
+      using \<open>extension_on (Field m) m p\<close> \<open>downset_on (Field m) p\<close>
       by (subst Fm) (auto simp: extension_on_def dest: downset_onD)
     moreover have "downset_on (Field ?m) p"
       apply (subst Fm)
-      using `downset_on (Field m) p` and min
+      using \<open>downset_on (Field m) p\<close> and min
       unfolding downset_on_def Field_def by blast
     moreover have "(m, ?m) \<in> I"
-      using `Well_order m` and `Well_order ?m` and
-      `downset_on (Field m) p` and `downset_on (Field ?m) p` and
-      `extension_on (Field m) m p` and `extension_on (Field ?m) ?m p` and
-      `Refl m` and `x \<notin> Field m`
+      using \<open>Well_order m\<close> and \<open>Well_order ?m\<close> and
+      \<open>downset_on (Field m) p\<close> and \<open>downset_on (Field ?m) p\<close> and
+      \<open>extension_on (Field m) m p\<close> and \<open>extension_on (Field ?m) ?m p\<close> and
+      \<open>Refl m\<close> and \<open>x \<notin> Field m\<close>
       by (auto simp: I_def init_seg_of_def refl_on_def)
     ultimately
-    --{*This contradicts maximality of m:*}
-    show False using max and `x \<notin> Field m` unfolding Field_def by blast
+    \<comment>\<open>This contradicts maximality of m:\<close>
+    show False using max and \<open>x \<notin> Field m\<close> unfolding Field_def by blast
   qed
   have "p \<subseteq> m"
-    using `Field p \<subseteq> Field m` and `extension_on (Field m) m p`
+    using \<open>Field p \<subseteq> Field m\<close> and \<open>extension_on (Field m) m p\<close>
     unfolding Field_def extension_on_def by auto fast
-  with `Well_order m` show ?thesis by blast
+  with \<open>Well_order m\<close> show ?thesis by blast
 qed
 
-text {*Every well-founded relation can be extended to a total wellorder.*}
+text \<open>Every well-founded relation can be extended to a total wellorder.\<close>
 corollary total_well_order_extension:
   assumes "wf p"
   shows "\<exists>w. p \<subseteq> w \<and> Well_order w \<and> Field w = UNIV"
@@ -181,7 +181,7 @@
   have [simp]: "Field w' = ?A" using well_order_on_Well_order [OF wo'] by simp
   have *: "Field w \<inter> Field w' = {}" by simp
   let ?w = "w \<union>o w'"
-  have "p \<subseteq> ?w" using `p \<subseteq> w` by (auto simp: Osum_def)
+  have "p \<subseteq> ?w" using \<open>p \<subseteq> w\<close> by (auto simp: Osum_def)
   moreover have "Well_order ?w" using Osum_Well_order [OF * wo] and wo' by simp
   moreover have "Field ?w = UNIV" by (simp add: Field_Osum)
   ultimately show ?thesis by blast
@@ -191,23 +191,23 @@
   assumes "wf p" and "Field p \<subseteq> A"
   shows "\<exists>w. p \<subseteq> w \<and> well_order_on A w"
 proof -
-  from total_well_order_extension [OF `wf p`] obtain r
+  from total_well_order_extension [OF \<open>wf p\<close>] obtain r
     where "p \<subseteq> r" and wo: "Well_order r" and univ: "Field r = UNIV" by blast
   let ?r = "{(x, y). x \<in> A \<and> y \<in> A \<and> (x, y) \<in> r}"
-  from `p \<subseteq> r` have "p \<subseteq> ?r" using `Field p \<subseteq> A` by (auto simp: Field_def)
+  from \<open>p \<subseteq> r\<close> have "p \<subseteq> ?r" using \<open>Field p \<subseteq> A\<close> by (auto simp: Field_def)
   have 1: "Field ?r = A" using wo univ
     by (fastforce simp: Field_def order_on_defs refl_on_def)
   have "Refl r" "trans r" "antisym r" "Total r" "wf (r - Id)"
-    using `Well_order r` by (simp_all add: order_on_defs)
-  have "refl_on A ?r" using `Refl r` by (auto simp: refl_on_def univ)
-  moreover have "trans ?r" using `trans r`
+    using \<open>Well_order r\<close> by (simp_all add: order_on_defs)
+  have "refl_on A ?r" using \<open>Refl r\<close> by (auto simp: refl_on_def univ)
+  moreover have "trans ?r" using \<open>trans r\<close>
     unfolding trans_def by blast
-  moreover have "antisym ?r" using `antisym r`
+  moreover have "antisym ?r" using \<open>antisym r\<close>
     unfolding antisym_def by blast
-  moreover have "total_on A ?r" using `Total r` by (simp add: total_on_def univ)
-  moreover have "wf (?r - Id)" by (rule wf_subset [OF `wf(r - Id)`]) blast
+  moreover have "total_on A ?r" using \<open>Total r\<close> by (simp add: total_on_def univ)
+  moreover have "wf (?r - Id)" by (rule wf_subset [OF \<open>wf(r - Id)\<close>]) blast
   ultimately have "well_order_on A ?r" by (simp add: order_on_defs)
-  with `p \<subseteq> ?r` show ?thesis by blast
+  with \<open>p \<subseteq> ?r\<close> show ?thesis by blast
 qed
 
 end
--- a/src/HOL/Cardinals/Wellorder_Relation.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Cardinals/Wellorder_Relation.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Well-order relations.
 *)
 
-section {* Well-Order Relations *}
+section \<open>Well-Order Relations\<close>
 
 theory Wellorder_Relation
 imports BNF_Wellorder_Relation Wellfounded_More
@@ -14,7 +14,7 @@
 context wo_rel
 begin
 
-subsection {* Auxiliaries *}
+subsection \<open>Auxiliaries\<close>
 
 lemma PREORD: "Preorder r"
 using WELL order_on_defs[of _ r] by auto
@@ -29,7 +29,7 @@
 using TOTALS by auto
 
 
-subsection {* Well-founded induction and recursion adapted to non-strict well-order relations *}
+subsection \<open>Well-founded induction and recursion adapted to non-strict well-order relations\<close>
 
 lemma worec_unique_fixpoint:
 assumes ADM: "adm_wo H" and fp: "f = H f"
@@ -44,7 +44,7 @@
 qed
 
 
-subsubsection {* Properties of max2 *}
+subsubsection \<open>Properties of max2\<close>
 
 lemma max2_iff:
 assumes "a \<in> Field r" and "b \<in> Field r"
@@ -60,7 +60,7 @@
 qed
 
 
-subsubsection {* Properties of minim *}
+subsubsection \<open>Properties of minim\<close>
 
 lemma minim_Under:
 "\<lbrakk>B \<le> Field r; B \<noteq> {}\<rbrakk> \<Longrightarrow> minim B \<in> Under B"
@@ -166,7 +166,7 @@
 qed
 
 
-subsubsection {* Properties of supr *}
+subsubsection \<open>Properties of supr\<close>
 
 lemma supr_Above:
 assumes SUB: "B \<le> Field r" and ABOVE: "Above B \<noteq> {}"
@@ -287,7 +287,7 @@
 qed
 
 
-subsubsection {* Properties of successor *}
+subsubsection \<open>Properties of successor\<close>
 
 lemma suc_least:
 "\<lbrakk>B \<le> Field r; a \<in> Field r; (\<And> b. b \<in> B \<Longrightarrow> a \<noteq> b \<and> (b,a) \<in> r)\<rbrakk>
@@ -400,7 +400,7 @@
 qed
 
 
-subsubsection {* Properties of order filters *}
+subsubsection \<open>Properties of order filters\<close>
 
 lemma ofilter_Under[simp]:
 assumes "A \<le> Field r"
@@ -453,7 +453,7 @@
 using ofilter_under_UNION [of A] by auto
 
 
-subsubsection {* Other properties *}
+subsubsection \<open>Other properties\<close>
 
 lemma Trans_Under_regressive:
 assumes NE: "A \<noteq> {}" and SUB: "A \<le> Field r"
--- a/src/HOL/Codegenerator_Test/Candidates.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Codegenerator_Test/Candidates.thy	Thu May 26 17:51:22 2016 +0200
@@ -1,7 +1,7 @@
 
 (* Author: Florian Haftmann, TU Muenchen *)
 
-section {* A huge collection of equations to generate code from *}
+section \<open>A huge collection of equations to generate code from\<close>
 
 theory Candidates
 imports
@@ -40,7 +40,7 @@
 
 code_reserved SML upto
 
-text \<open>Explicit check in @{text OCaml} for correct precedence of let expressions in list expressions\<close>
+text \<open>Explicit check in \<open>OCaml\<close> for correct precedence of let expressions in list expressions\<close>
 
 definition funny_list :: "bool list"
 where
@@ -58,7 +58,7 @@
 where
   "check_list = (if funny_list = funny_list' then () else undefined)"
 
-text \<open>Explicit check in @{text Scala} for correct bracketing of abstractions\<close>
+text \<open>Explicit check in \<open>Scala\<close> for correct bracketing of abstractions\<close>
 
 definition funny_funs :: "(bool \<Rightarrow> bool) list \<Rightarrow> (bool \<Rightarrow> bool) list"
 where
--- a/src/HOL/Codegenerator_Test/Code_Test_GHC.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Codegenerator_Test/Code_Test_GHC.thy	Thu May 26 17:51:22 2016 +0200
@@ -10,7 +10,7 @@
 
 value [GHC] "14 + 7 * -12 :: integer"
 
-test_code -- \<open>Tests for the serialisation of @{const gcd} on @{typ integer}\<close>
+test_code \<comment> \<open>Tests for the serialisation of @{const gcd} on @{typ integer}\<close>
   "gcd 15 10 = (5 :: integer)"
   "gcd 15 (- 10) = (5 :: integer)"
   "gcd (- 10) 15 = (5 :: integer)"
--- a/src/HOL/Codegenerator_Test/Code_Test_OCaml.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Codegenerator_Test/Code_Test_OCaml.thy	Thu May 26 17:51:22 2016 +0200
@@ -10,7 +10,7 @@
 
 value [OCaml] "14 + 7 * -12 :: integer"
 
-test_code -- \<open>Tests for the serialisation of @{const gcd} on @{typ integer}\<close>
+test_code \<comment> \<open>Tests for the serialisation of @{const gcd} on @{typ integer}\<close>
   "gcd 15 10 = (5 :: integer)"
   "gcd 15 (- 10) = (5 :: integer)"
   "gcd (- 10) 15 = (5 :: integer)"
--- a/src/HOL/Codegenerator_Test/Code_Test_Scala.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Codegenerator_Test/Code_Test_Scala.thy	Thu May 26 17:51:22 2016 +0200
@@ -12,7 +12,7 @@
 
 value [Scala] "14 + 7 * -12 :: integer"
 
-test_code -- \<open>Tests for the serialisation of @{const gcd} on @{typ integer}\<close>
+test_code \<comment> \<open>Tests for the serialisation of @{const gcd} on @{typ integer}\<close>
   "gcd 15 10 = (5 :: integer)"
   "gcd 15 (- 10) = (5 :: integer)"
   "gcd (- 10) 15 = (5 :: integer)"
--- a/src/HOL/Codegenerator_Test/Generate.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Codegenerator_Test/Generate.thy	Thu May 26 17:51:22 2016 +0200
@@ -1,7 +1,7 @@
 
 (* Author: Florian Haftmann, TU Muenchen *)
 
-section {* Pervasive test of code generator *}
+section \<open>Pervasive test of code generator\<close>
 
 theory Generate
 imports
@@ -10,10 +10,10 @@
   "~~/src/HOL/Library/Finite_Lattice"
 begin
 
-text {*
+text \<open>
   If any of the checks fails, inspect the code generated
-  by a corresponding @{text export_code} command.
-*}
+  by a corresponding \<open>export_code\<close> command.
+\<close>
 
 export_code _ checking SML OCaml? Haskell? Scala
 
--- a/src/HOL/Codegenerator_Test/Generate_Binary_Nat.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Codegenerator_Test/Generate_Binary_Nat.thy	Thu May 26 17:51:22 2016 +0200
@@ -1,7 +1,7 @@
 
 (* Author: Florian Haftmann, TU Muenchen *)
 
-section {* Pervasive test of code generator *}
+section \<open>Pervasive test of code generator\<close>
 
 theory Generate_Binary_Nat
 imports
@@ -11,10 +11,10 @@
   "~~/src/HOL/Library/Code_Binary_Nat"
 begin
 
-text {*
+text \<open>
   If any of the checks fails, inspect the code generated
-  by a corresponding @{text export_code} command.
-*}
+  by a corresponding \<open>export_code\<close> command.
+\<close>
 
 export_code _ checking SML OCaml? Haskell? Scala
 
--- a/src/HOL/Codegenerator_Test/Generate_Efficient_Datastructures.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Codegenerator_Test/Generate_Efficient_Datastructures.thy	Thu May 26 17:51:22 2016 +0200
@@ -1,7 +1,7 @@
 
 (* Author: Ondrej Kuncar, TU Muenchen *)
 
-section {* Pervasive test of code generator *}
+section \<open>Pervasive test of code generator\<close>
 
 theory Generate_Efficient_Datastructures
 imports
@@ -18,7 +18,7 @@
   val consts = map_filter (try (curry (Axclass.param_of_inst thy)
     @{const_name "Quickcheck_Narrowing.partial_term_of"})) tycos;
 in fold Code.del_eqns consts thy end
-\<close> -- \<open>drop technical stuff from @{text Quickcheck_Narrowing} which is tailored towards Haskell\<close>
+\<close> \<comment> \<open>drop technical stuff from \<open>Quickcheck_Narrowing\<close> which is tailored towards Haskell\<close>
 
 (* 
    The following code equations have to be deleted because they use 
--- a/src/HOL/Codegenerator_Test/Generate_Pretty_Char.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Codegenerator_Test/Generate_Pretty_Char.thy	Thu May 26 17:51:22 2016 +0200
@@ -1,7 +1,7 @@
 
 (* Author: Florian Haftmann, TU Muenchen *)
 
-section {* Pervasive test of code generator *}
+section \<open>Pervasive test of code generator\<close>
 
 theory Generate_Pretty_Char
 imports
@@ -11,10 +11,10 @@
   "~~/src/HOL/Library/Code_Char"
 begin
 
-text {*
+text \<open>
   If any of the checks fails, inspect the code generated
-  by a corresponding @{text export_code} command.
-*}
+  by a corresponding \<open>export_code\<close> command.
+\<close>
 
 export_code _ checking SML OCaml? Haskell? Scala
 
--- a/src/HOL/Codegenerator_Test/Generate_Target_Nat.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Codegenerator_Test/Generate_Target_Nat.thy	Thu May 26 17:51:22 2016 +0200
@@ -1,7 +1,7 @@
 
 (* Author: Florian Haftmann, TU Muenchen *)
 
-section {* Pervasive test of code generator *}
+section \<open>Pervasive test of code generator\<close>
 
 theory Generate_Target_Nat
 imports
@@ -11,10 +11,10 @@
   "~~/src/HOL/Library/Code_Target_Numeral"
 begin
 
-text {*
+text \<open>
   If any of the checks fails, inspect the code generated
-  by a corresponding @{text export_code} command.
-*}
+  by a corresponding \<open>export_code\<close> command.
+\<close>
 
 export_code _ checking SML OCaml? Haskell? Scala
 
--- a/src/HOL/Corec_Examples/LFilter.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Corec_Examples/LFilter.thy	Thu May 26 17:51:22 2016 +0200
@@ -82,7 +82,7 @@
     else if P (lhd xs) then LCons (lhd xs) (f (ltl xs))
     else f (ltl xs))"
   shows "f = lfilter P"
--- \<open>It seems as if we cannot use @{thm lfilter_unique_weak} for showing this as the induction and the coinduction must be nested\<close>
+\<comment> \<open>It seems as if we cannot use @{thm lfilter_unique_weak} for showing this as the induction and the coinduction must be nested\<close>
 proof(rule ext)
   show "f xs = lfilter P xs" for xs
   proof(coinduction arbitrary: xs)
--- a/src/HOL/Datatype_Examples/Compat.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Compat.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,15 +5,15 @@
 Tests for compatibility with the old datatype package.
 *)
 
-section {* Tests for Compatibility with the Old Datatype Package *}
+section \<open>Tests for Compatibility with the Old Datatype Package\<close>
 
 theory Compat
 imports "~~/src/HOL/Library/Old_Datatype"
 begin
 
-subsection {* Viewing and Registering New-Style Datatypes as Old-Style Ones *}
+subsection \<open>Viewing and Registering New-Style Datatypes as Old-Style Ones\<close>
 
-ML {*
+ML \<open>
 fun check_len n xs label =
   length xs = n orelse error ("Expected length " ^ string_of_int (length xs) ^ " for " ^ label);
 
@@ -25,13 +25,13 @@
    these (Option.map #descr (BNF_LFP_Compat.get_info thy [] T_name)),
    these (Option.map #descr (BNF_LFP_Compat.get_info thy [BNF_LFP_Compat.Keep_Nesting] T_name)))
   |> tap (check_lens lens);
-*}
+\<close>
 
 old_datatype 'a old_lst = Old_Nl | Old_Cns 'a "'a old_lst"
 
-text {*
-A few tests to make sure that @{text old_datatype} works as expected:
-*}
+text \<open>
+A few tests to make sure that \<open>old_datatype\<close> works as expected:
+\<close>
 
 primrec old_len :: "'a old_lst \<Rightarrow> nat" where
   "old_len Old_Nl = 0"
@@ -49,208 +49,208 @@
 lemma "old_len xs = size xs"
   by (induct xs) auto
 
-ML {* get_descrs @{theory} (1, 1, 1) @{type_name old_lst} *}
+ML \<open>get_descrs @{theory} (1, 1, 1) @{type_name old_lst}\<close>
 
 datatype 'a lst = Nl | Cns 'a "'a lst"
 
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name lst} *}
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name lst}\<close>
 
 datatype_compat lst
 
-ML {* get_descrs @{theory} (1, 1, 1) @{type_name lst} *}
+ML \<open>get_descrs @{theory} (1, 1, 1) @{type_name lst}\<close>
 
 datatype 'b w = W | W' "'b w \<times> 'b list"
 
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name w} *}
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name w}\<close>
 
 datatype_compat w
 
-ML {* get_descrs @{theory} (2, 2, 1) @{type_name w} *}
+ML \<open>get_descrs @{theory} (2, 2, 1) @{type_name w}\<close>
 
 datatype ('c, 'b) s = L 'c | R 'b
 
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name s} *}
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name s}\<close>
 
 datatype 'd x = X | X' "('d x lst, 'd list) s"
 
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name x} *}
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name x}\<close>
 
 datatype_compat s
 
-ML {* get_descrs @{theory} (1, 1, 1) @{type_name s} *}
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name x} *}
+ML \<open>get_descrs @{theory} (1, 1, 1) @{type_name s}\<close>
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name x}\<close>
 
 datatype_compat x
 
-ML {* get_descrs @{theory} (3, 3, 1) @{type_name x} *}
+ML \<open>get_descrs @{theory} (3, 3, 1) @{type_name x}\<close>
 
 thm x.induct x.rec
 thm compat_x.induct compat_x.rec
 
 datatype 'a tttre = TTTre 'a "'a tttre lst lst lst"
 
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name tttre} *}
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name tttre}\<close>
 
 datatype_compat tttre
 
-ML {* get_descrs @{theory} (4, 4, 1) @{type_name tttre} *}
+ML \<open>get_descrs @{theory} (4, 4, 1) @{type_name tttre}\<close>
 
 thm tttre.induct tttre.rec
 thm compat_tttre.induct compat_tttre.rec
 
 datatype 'a ftre = FEmp | FTre "'a \<Rightarrow> 'a ftre lst"
 
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name ftre} *}
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name ftre}\<close>
 
 datatype_compat ftre
 
-ML {* get_descrs @{theory} (2, 2, 1) @{type_name ftre} *}
+ML \<open>get_descrs @{theory} (2, 2, 1) @{type_name ftre}\<close>
 
 thm ftre.induct ftre.rec
 thm compat_ftre.induct compat_ftre.rec
 
 datatype 'a btre = BTre 'a "'a btre lst" "'a btre lst"
 
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name btre} *}
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name btre}\<close>
 
 datatype_compat btre
 
-ML {* get_descrs @{theory} (3, 3, 1) @{type_name btre} *}
+ML \<open>get_descrs @{theory} (3, 3, 1) @{type_name btre}\<close>
 
 thm btre.induct btre.rec
 thm compat_btre.induct compat_btre.rec
 
 datatype 'a foo = Foo | Foo' 'a "'a bar" and 'a bar = Bar | Bar' 'a "'a foo"
 
-ML {* get_descrs @{theory} (0, 2, 2) @{type_name foo} *}
-ML {* get_descrs @{theory} (0, 2, 2) @{type_name bar} *}
+ML \<open>get_descrs @{theory} (0, 2, 2) @{type_name foo}\<close>
+ML \<open>get_descrs @{theory} (0, 2, 2) @{type_name bar}\<close>
 
 datatype_compat foo bar
 
-ML {* get_descrs @{theory} (2, 2, 2) @{type_name foo} *}
-ML {* get_descrs @{theory} (2, 2, 2) @{type_name bar} *}
+ML \<open>get_descrs @{theory} (2, 2, 2) @{type_name foo}\<close>
+ML \<open>get_descrs @{theory} (2, 2, 2) @{type_name bar}\<close>
 
 datatype 'a tre = Tre 'a "'a tre lst"
 
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name tre} *}
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name tre}\<close>
 
 datatype_compat tre
 
-ML {* get_descrs @{theory} (2, 2, 1) @{type_name tre} *}
+ML \<open>get_descrs @{theory} (2, 2, 1) @{type_name tre}\<close>
 
 thm tre.induct tre.rec
 thm compat_tre.induct compat_tre.rec
 
 datatype 'a f = F 'a and 'a g = G 'a
 
-ML {* get_descrs @{theory} (0, 2, 2) @{type_name f} *}
-ML {* get_descrs @{theory} (0, 2, 2) @{type_name g} *}
+ML \<open>get_descrs @{theory} (0, 2, 2) @{type_name f}\<close>
+ML \<open>get_descrs @{theory} (0, 2, 2) @{type_name g}\<close>
 
 datatype h = H "h f" | H'
 
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name h} *}
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name h}\<close>
 
 datatype_compat f g
 
-ML {* get_descrs @{theory} (2, 2, 2) @{type_name f} *}
-ML {* get_descrs @{theory} (2, 2, 2) @{type_name g} *}
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name h} *}
+ML \<open>get_descrs @{theory} (2, 2, 2) @{type_name f}\<close>
+ML \<open>get_descrs @{theory} (2, 2, 2) @{type_name g}\<close>
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name h}\<close>
 
 datatype_compat h
 
-ML {* get_descrs @{theory} (3, 3, 1) @{type_name h} *}
+ML \<open>get_descrs @{theory} (3, 3, 1) @{type_name h}\<close>
 
 thm h.induct h.rec
 thm compat_h.induct compat_h.rec
 
 datatype myunit = MyUnity
 
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name myunit} *}
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name myunit}\<close>
 
 datatype_compat myunit
 
-ML {* get_descrs @{theory} (1, 1, 1) @{type_name myunit} *}
+ML \<open>get_descrs @{theory} (1, 1, 1) @{type_name myunit}\<close>
 
 datatype mylist = MyNil | MyCons nat mylist
 
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name mylist} *}
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name mylist}\<close>
 
 datatype_compat mylist
 
-ML {* get_descrs @{theory} (1, 1, 1) @{type_name mylist} *}
+ML \<open>get_descrs @{theory} (1, 1, 1) @{type_name mylist}\<close>
 
 datatype foo' = FooNil | FooCons bar' foo' and bar' = Bar
 
-ML {* get_descrs @{theory} (0, 2, 2) @{type_name foo'} *}
-ML {* get_descrs @{theory} (0, 2, 2) @{type_name bar'} *}
+ML \<open>get_descrs @{theory} (0, 2, 2) @{type_name foo'}\<close>
+ML \<open>get_descrs @{theory} (0, 2, 2) @{type_name bar'}\<close>
 
 datatype_compat bar' foo'
 
-ML {* get_descrs @{theory} (2, 2, 2) @{type_name foo'} *}
-ML {* get_descrs @{theory} (2, 2, 2) @{type_name bar'} *}
+ML \<open>get_descrs @{theory} (2, 2, 2) @{type_name foo'}\<close>
+ML \<open>get_descrs @{theory} (2, 2, 2) @{type_name bar'}\<close>
 
 old_datatype funky = Funky "funky tre" | Funky'
 
-ML {* get_descrs @{theory} (3, 3, 3) @{type_name funky} *}
+ML \<open>get_descrs @{theory} (3, 3, 3) @{type_name funky}\<close>
 
 old_datatype fnky = Fnky "nat tre"
 
-ML {* get_descrs @{theory} (1, 1, 1) @{type_name fnky} *}
+ML \<open>get_descrs @{theory} (1, 1, 1) @{type_name fnky}\<close>
 
 datatype tree = Tree "tree foo"
 
-ML {* get_descrs @{theory} (0, 1, 1) @{type_name tree} *}
+ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name tree}\<close>
 
 datatype_compat tree
 
-ML {* get_descrs @{theory} (3, 3, 1) @{type_name tree} *}
+ML \<open>get_descrs @{theory} (3, 3, 1) @{type_name tree}\<close>
 
 thm tree.induct tree.rec
 thm compat_tree.induct compat_tree.rec
 
 
-subsection {* Creating New-Style Datatypes Using Old-Style Interfaces *}
+subsection \<open>Creating New-Style Datatypes Using Old-Style Interfaces\<close>
 
-ML {*
+ML \<open>
 val l_specs =
   [((@{binding l}, [("'a", @{sort type})], NoSyn),
    [(@{binding N}, [], NoSyn),
     (@{binding C}, [@{typ 'a}, Type (Sign.full_name @{theory} @{binding l}, [@{typ 'a}])],
      NoSyn)])];
-*}
+\<close>
 
-setup {* snd o BNF_LFP_Compat.add_datatype [] l_specs *}
+setup \<open>snd o BNF_LFP_Compat.add_datatype [] l_specs\<close>
 
-ML {* get_descrs @{theory} (1, 1, 1) @{type_name l} *}
+ML \<open>get_descrs @{theory} (1, 1, 1) @{type_name l}\<close>
 
 thm l.exhaust l.map l.induct l.rec l.size
 
-ML {*
+ML \<open>
 val t_specs =
   [((@{binding t}, [("'b", @{sort type})], NoSyn),
    [(@{binding T}, [@{typ 'b},
        Type (@{type_name l}, [Type (Sign.full_name @{theory} @{binding t}, [@{typ 'b}])])],
      NoSyn)])];
-*}
+\<close>
 
-setup {* snd o BNF_LFP_Compat.add_datatype [] t_specs *}
+setup \<open>snd o BNF_LFP_Compat.add_datatype [] t_specs\<close>
 
-ML {* get_descrs @{theory} (2, 2, 1) @{type_name t} *}
+ML \<open>get_descrs @{theory} (2, 2, 1) @{type_name t}\<close>
 
 thm t.exhaust t.map t.induct t.rec t.size
 thm compat_t.induct compat_t.rec
 
-ML {*
+ML \<open>
 val ft_specs =
   [((@{binding ft}, [("'a", @{sort type})], NoSyn),
    [(@{binding FT0}, [], NoSyn),
     (@{binding FT}, [@{typ 'a} --> Type (Sign.full_name @{theory} @{binding ft}, [@{typ 'a}])],
      NoSyn)])];
-*}
+\<close>
 
-setup {* snd o BNF_LFP_Compat.add_datatype [] ft_specs *}
+setup \<open>snd o BNF_LFP_Compat.add_datatype [] ft_specs\<close>
 
-ML {* get_descrs @{theory} (1, 1, 1) @{type_name ft} *}
+ML \<open>get_descrs @{theory} (1, 1, 1) @{type_name ft}\<close>
 
 thm ft.exhaust ft.induct ft.rec ft.size
 thm compat_ft.induct compat_ft.rec
--- a/src/HOL/Datatype_Examples/Derivation_Trees/DTree.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Derivation_Trees/DTree.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Derivation trees with nonterminal internal nodes and terminal leaves.
 *)
 
-section {* Trees with Nonterminal Internal Nodes and Terminal Leaves *}
+section \<open>Trees with Nonterminal Internal Nodes and Terminal Leaves\<close>
 
 theory DTree
 imports Prelim
@@ -16,7 +16,7 @@
 
 codatatype dtree = NNode (root: N) (ccont: "(T + dtree) fset")
 
-subsection{* Transporting the Characteristic Lemmas from @{text "fset"} to @{text "set"} *}
+subsection\<open>Transporting the Characteristic Lemmas from \<open>fset\<close> to \<open>set\<close>\<close>
 
 definition "Node n as \<equiv> NNode n (the_inv fset as)"
 definition "cont \<equiv> fset o ccont"
--- a/src/HOL/Datatype_Examples/Derivation_Trees/Gram_Lang.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Derivation_Trees/Gram_Lang.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Language of a grammar.
 *)
 
-section {* Language of a Grammar *}
+section \<open>Language of a Grammar\<close>
 
 theory Gram_Lang
 imports DTree "~~/src/HOL/Library/Infinite_Set"
@@ -21,7 +21,7 @@
 and used: "\<And> n. \<exists> tns. (n,tns) \<in> P"
 
 
-subsection{* Tree Basics: frontier, interior, etc. *}
+subsection\<open>Tree Basics: frontier, interior, etc.\<close>
 
 
 (* Frontier *)
@@ -309,7 +309,7 @@
   by (metis inItr.Base subtr_inItr subtr_rootL_in)
 
 
-subsection{* The Immediate Subtree Function *}
+subsection\<open>The Immediate Subtree Function\<close>
 
 (* production of: *)
 abbreviation "prodOf tr \<equiv> (id \<oplus> root) ` (cont tr)"
@@ -343,7 +343,7 @@
 by (metis (lifting) assms image_iff map_sum.simps(2))
 
 
-subsection{* Well-Formed Derivation Trees *}
+subsection\<open>Well-Formed Derivation Trees\<close>
 
 hide_const wf
 
@@ -448,7 +448,7 @@
 qed
 
 
-subsection{* Default Trees *}
+subsection\<open>Default Trees\<close>
 
 (* Pick a left-hand side of a production for each nonterminal *)
 definition S where "S n \<equiv> SOME tns. (n,tns) \<in> P"
@@ -488,7 +488,7 @@
 qed
 
 
-subsection{* Hereditary Substitution *}
+subsection\<open>Hereditary Substitution\<close>
 
 (* Auxiliary concept: The root-ommiting frontier: *)
 definition "inFrr ns tr t \<equiv> \<exists> tr'. Inr tr' \<in> cont tr \<and> inFr ns tr' t"
@@ -679,7 +679,7 @@
 end (* context *)
 
 
-subsection{* Regular Trees *}
+subsection\<open>Regular Trees\<close>
 
 definition "reg f tr \<equiv> \<forall> tr'. subtr UNIV tr' tr \<longrightarrow> tr' = f (root tr')"
 definition "regular tr \<equiv> \<exists> f. reg f tr"
@@ -770,7 +770,7 @@
 
 
 
-subsection {* Paths in a Regular Tree *}
+subsection \<open>Paths in a Regular Tree\<close>
 
 inductive path :: "(N \<Rightarrow> dtree) \<Rightarrow> N list \<Rightarrow> bool" for f where
 Base: "path f [n]"
@@ -914,7 +914,7 @@
 
 
 
-subsection{* The Regular Cut of a Tree *}
+subsection\<open>The Regular Cut of a Tree\<close>
 
 context fixes tr0 :: dtree
 begin
@@ -1081,7 +1081,7 @@
 end (* context *)
 
 
-subsection{* Recursive Description of the Regular Tree Frontiers *}
+subsection\<open>Recursive Description of the Regular Tree Frontiers\<close>
 
 lemma regular_inFr:
 assumes r: "regular tr" and In: "root tr \<in> ns"
@@ -1130,7 +1130,7 @@
 by (simp, metis (lifting) inFr_Ind_minus insert_Diff)
 
 
-subsection{* The Generated Languages *}
+subsection\<open>The Generated Languages\<close>
 
 (* The (possibly inifinite tree) generated language *)
 definition "L ns n \<equiv> {Fr ns tr | tr. wf tr \<and> root tr = n}"
--- a/src/HOL/Datatype_Examples/Derivation_Trees/Parallel.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Derivation_Trees/Parallel.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Parallel composition.
 *)
 
-section {* Parallel Composition *}
+section \<open>Parallel Composition\<close>
 
 theory Parallel
 imports DTree
@@ -19,7 +19,7 @@
     Nplus_comm: "(a::N) + b = b + (a::N)"
 and Nplus_assoc: "((a::N) + b) + c = a + (b + c)"
 
-subsection{* Corecursive Definition of Parallel Composition *}
+subsection\<open>Corecursive Definition of Parallel Composition\<close>
 
 fun par_r where "par_r (tr1,tr2) = root tr1 + root tr2"
 fun par_c where
@@ -65,7 +65,7 @@
 using Inr_cont_par[of tr1 tr2] unfolding vimage_def by auto
 
 
-subsection{* Structural Coinduction Proofs *}
+subsection\<open>Structural Coinduction Proofs\<close>
 
 lemma rel_set_rel_sum_eq[simp]:
 "rel_set (rel_sum (op =) \<phi>) A1 A2 \<longleftrightarrow>
--- a/src/HOL/Datatype_Examples/Derivation_Trees/Prelim.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Derivation_Trees/Prelim.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Preliminaries.
 *)
 
-section {* Preliminaries *}
+section \<open>Preliminaries\<close>
 
 theory Prelim
 imports "~~/src/HOL/Library/FSet"
--- a/src/HOL/Datatype_Examples/Koenig.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Koenig.thy	Thu May 26 17:51:22 2016 +0200
@@ -6,7 +6,7 @@
 Koenig's lemma.
 *)
 
-section {* Koenig's Lemma *}
+section \<open>Koenig's Lemma\<close>
 
 theory Koenig
 imports TreeFI "~~/src/HOL/Library/Stream"
--- a/src/HOL/Datatype_Examples/Lambda_Term.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Lambda_Term.thy	Thu May 26 17:51:22 2016 +0200
@@ -6,13 +6,13 @@
 Lambda-terms.
 *)
 
-section {* Lambda-Terms *}
+section \<open>Lambda-Terms\<close>
 
 theory Lambda_Term
 imports "~~/src/HOL/Library/FSet"
 begin
 
-section {* Datatype definition *}
+section \<open>Datatype definition\<close>
 
 datatype 'a trm =
   Var 'a |
@@ -21,7 +21,7 @@
   Lt "('a \<times> 'a trm) fset" "'a trm"
 
 
-subsection {* Example: The set of all variables varsOf and free variables fvarsOf of a term *}
+subsection \<open>Example: The set of all variables varsOf and free variables fvarsOf of a term\<close>
 
 primrec varsOf :: "'a trm \<Rightarrow> 'a set" where
   "varsOf (Var a) = {a}"
--- a/src/HOL/Datatype_Examples/Lift_BNF.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Lift_BNF.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Demonstration of the "lift_bnf" command.
 *)
 
-section {* Demonstration of the \textbf{lift_bnf} Command *}
+section \<open>Demonstration of the \textbf{lift_bnf} Command\<close>
 
 theory Lift_BNF
 imports Main
--- a/src/HOL/Datatype_Examples/Misc_Codatatype.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Misc_Codatatype.thy	Thu May 26 17:51:22 2016 +0200
@@ -7,7 +7,7 @@
 Miscellaneous codatatype definitions.
 *)
 
-section {* Miscellaneous Codatatype Definitions *}
+section \<open>Miscellaneous Codatatype Definitions\<close>
 
 theory Misc_Codatatype
 imports "~~/src/HOL/Library/FSet"
--- a/src/HOL/Datatype_Examples/Misc_Datatype.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Misc_Datatype.thy	Thu May 26 17:51:22 2016 +0200
@@ -7,7 +7,7 @@
 Miscellaneous datatype definitions.
 *)
 
-section {* Miscellaneous Datatype Definitions *}
+section \<open>Miscellaneous Datatype Definitions\<close>
 
 theory Misc_Datatype
 imports "~~/src/HOL/Library/Countable" "~~/src/HOL/Library/FSet"
--- a/src/HOL/Datatype_Examples/Misc_Primcorec.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Misc_Primcorec.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Miscellaneous primitive corecursive function definitions.
 *)
 
-section {* Miscellaneous Primitive Corecursive Function Definitions *}
+section \<open>Miscellaneous Primitive Corecursive Function Definitions\<close>
 
 theory Misc_Primcorec
 imports Misc_Codatatype
--- a/src/HOL/Datatype_Examples/Misc_Primrec.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Misc_Primrec.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Miscellaneous primitive recursive function definitions.
 *)
 
-section {* Miscellaneous Primitive Recursive Function Definitions *}
+section \<open>Miscellaneous Primitive Recursive Function Definitions\<close>
 
 theory Misc_Primrec
 imports Misc_Datatype
--- a/src/HOL/Datatype_Examples/Process.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Process.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Processes.
 *)
 
-section {* Processes *}
+section \<open>Processes\<close>
 
 theory Process
 imports "~~/src/HOL/Library/Stream"
@@ -17,7 +17,7 @@
 
 (* Read: prefix of, continuation of, choice 1 of, choice 2 of *)
 
-subsection {* Basic properties *}
+subsection \<open>Basic properties\<close>
 
 (* Constructors versus discriminators *)
 theorem isAction_isChoice:
@@ -28,7 +28,7 @@
 by (cases rule: process.exhaust[of p]) auto
 
 
-subsection{* Coinduction *}
+subsection\<open>Coinduction\<close>
 
 theorem process_coind[elim, consumes 1, case_names iss Action Choice, induct pred: "HOL.eq"]:
   assumes phi: "\<phi> p p'" and
@@ -50,19 +50,19 @@
   by (coinduct rule: process.coinduct_strong) (metis process.collapse(1,2) process.disc(3))
 
 
-subsection {* Coiteration (unfold) *}
+subsection \<open>Coiteration (unfold)\<close>
 
 
-section{* Coinductive definition of the notion of trace *}
+section\<open>Coinductive definition of the notion of trace\<close>
 coinductive trace where
 "trace p as \<Longrightarrow> trace (Action a p) (a ## as)"
 |
 "trace p as \<or> trace q as \<Longrightarrow> trace (Choice p q) as"
 
 
-section{* Examples of corecursive definitions: *}
+section\<open>Examples of corecursive definitions:\<close>
 
-subsection{* Single-guard fixpoint definition *}
+subsection\<open>Single-guard fixpoint definition\<close>
 
 primcorec BX where
   "isAction BX"
@@ -70,7 +70,7 @@
 | "contOf BX = BX"
 
 
-subsection{* Multi-guard fixpoint definitions, simulated with auxiliary arguments *}
+subsection\<open>Multi-guard fixpoint definitions, simulated with auxiliary arguments\<close>
 
 datatype x_y_ax = x | y | ax
 
@@ -94,7 +94,7 @@
 
 
 
-section{* Case study: Multi-guard fixpoint definitions, without auxiliary arguments *}
+section\<open>Case study: Multi-guard fixpoint definitions, without auxiliary arguments\<close>
 
 hide_const x y ax X Y AX
 
--- a/src/HOL/Datatype_Examples/Stream_Processor.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/Stream_Processor.thy	Thu May 26 17:51:22 2016 +0200
@@ -6,7 +6,7 @@
 Stream processors---a syntactic representation of continuous functions on streams.
 *)
 
-section {* Stream Processors---A Syntactic Representation of Continuous Functions on Streams *}
+section \<open>Stream Processors---A Syntactic Representation of Continuous Functions on Streams\<close>
 
 theory Stream_Processor
 imports "~~/src/HOL/Library/Stream" "~~/src/HOL/Library/BNF_Axiomatization"
@@ -15,7 +15,7 @@
 declare [[typedef_overloaded]]
 
 
-section {* Continuous Functions on Streams *}
+section \<open>Continuous Functions on Streams\<close>
 
 datatype ('a, 'b, 'c) sp\<^sub>\<mu> =
   Get "'a \<Rightarrow> ('a, 'b, 'c) sp\<^sub>\<mu>"
@@ -37,10 +37,10 @@
 lemma run\<^sub>\<nu>_copy: "run\<^sub>\<nu> copy s = s"
   by (coinduction arbitrary: s) simp
 
-text {*
+text \<open>
 To use the function package for the definition of composition the
 wellfoundedness of the subtree relation needs to be proved first.
-*}
+\<close>
 
 definition "sub \<equiv> {(f a, Get f) | a f. True}"
 
@@ -88,7 +88,7 @@
   qed
 qed
 
-text {* Alternative definition of composition using primrec instead of function *}
+text \<open>Alternative definition of composition using primrec instead of function\<close>
 
 primrec sp\<^sub>\<mu>_comp2R  where
   "sp\<^sub>\<mu>_comp2R f (Put b sp) = f b (out sp)"
@@ -125,7 +125,7 @@
   qed
 qed
 
-text {* The two definitions are equivalent *}
+text \<open>The two definitions are equivalent\<close>
 
 lemma sp\<^sub>\<mu>_comp_sp\<^sub>\<mu>_comp2[simp]: "sp o\<^sub>\<mu> sp' = sp o\<^sup>*\<^sub>\<mu> sp'"
   by (induct sp sp' rule: sp\<^sub>\<mu>_comp.induct) auto
@@ -134,7 +134,7 @@
 lemma sp\<^sub>\<mu>_rel_map_map[unfolded vimage2p_def, simp]:
   "rel_sp\<^sub>\<mu> R1 R2 (map_sp\<^sub>\<mu> f1 f2 sp) (map_sp\<^sub>\<mu> g1 g2 sp') =
   rel_sp\<^sub>\<mu> (BNF_Def.vimage2p f1 g1 R1) (BNF_Def.vimage2p f2 g2 R2) sp sp'"
-by (tactic {*
+by (tactic \<open>
   let
     val ks = 1 upto 2;
     val ctxt = @{context};
@@ -157,7 +157,7 @@
       hyp_subst_tac ctxt,
       assume_tac ctxt])
   end
-*})
+\<close>)
 
 lemma sp\<^sub>\<mu>_rel_self: "\<lbrakk>op = \<le> R1; op = \<le> R2\<rbrakk> \<Longrightarrow> rel_sp\<^sub>\<mu> R1 R2 x x"
   by (erule (1) predicate2D[OF sp\<^sub>\<mu>.rel_mono]) (simp only: sp\<^sub>\<mu>.rel_eq)
@@ -166,7 +166,7 @@
   by (coinduction arbitrary: sp sp') (auto intro!: sp\<^sub>\<mu>_rel_self)
 
 
-section {* Generalization to an Arbitrary BNF as Codomain *}
+section \<open>Generalization to an Arbitrary BNF as Codomain\<close>
 
 bnf_axiomatization ('a, 'b) F for map: F
 
--- a/src/HOL/Datatype_Examples/TreeFI.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/TreeFI.thy	Thu May 26 17:51:22 2016 +0200
@@ -6,7 +6,7 @@
 Finitely branching possibly infinite trees.
 *)
 
-section {* Finitely Branching Possibly Infinite Trees *}
+section \<open>Finitely Branching Possibly Infinite Trees\<close>
 
 theory TreeFI
 imports Main
--- a/src/HOL/Datatype_Examples/TreeFsetI.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Datatype_Examples/TreeFsetI.thy	Thu May 26 17:51:22 2016 +0200
@@ -6,7 +6,7 @@
 Finitely branching possibly infinite trees, with sets of children.
 *)
 
-section {* Finitely Branching Possibly Infinite Trees, with Sets of Children *}
+section \<open>Finitely Branching Possibly Infinite Trees, with Sets of Children\<close>
 
 theory TreeFsetI
 imports "~~/src/HOL/Library/FSet"
--- a/src/HOL/IMPP/Com.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/IMPP/Com.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
     Author:   David von Oheimb (based on a theory by Tobias Nipkow et al), TUM
 *)
 
-section {* Semantics of arithmetic and boolean expressions, Syntax of commands *}
+section \<open>Semantics of arithmetic and boolean expressions, Syntax of commands\<close>
 
 theory Com
 imports Main
@@ -83,10 +83,10 @@
   "WT_bodies = (!(pn,b):set bodies. WT b)"
 
 
-ML {*
+ML \<open>
   fun make_imp_tac ctxt =
     EVERY' [resolve_tac ctxt [mp], fn i => assume_tac ctxt (i + 1), eresolve_tac ctxt [thin_rl]]
-*}
+\<close>
 
 lemma finite_dom_body: "finite (dom body)"
 apply (unfold body_def)
--- a/src/HOL/IMPP/EvenOdd.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/IMPP/EvenOdd.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
     Author:     David von Oheimb, TUM
 *)
 
-section {* Example of mutually recursive procedures verified with Hoare logic *}
+section \<open>Example of mutually recursive procedures verified with Hoare logic\<close>
 
 theory EvenOdd
 imports Main Misc
--- a/src/HOL/IMPP/Hoare.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/IMPP/Hoare.thy	Thu May 26 17:51:22 2016 +0200
@@ -3,18 +3,18 @@
     Copyright   1999 TUM
 *)
 
-section {* Inductive definition of Hoare logic for partial correctness *}
+section \<open>Inductive definition of Hoare logic for partial correctness\<close>
 
 theory Hoare
 imports Natural
 begin
 
-text {*
+text \<open>
   Completeness is taken relative to completeness of the underlying logic.
 
   Two versions of completeness proof: nested single recursion
   vs. simultaneous recursion in call rule
-*}
+\<close>
 
 type_synonym 'a assn = "'a => state => bool"
 translations
@@ -105,7 +105,7 @@
              X:=CALL pn(a) .{Q}"
 
 
-section {* Soundness and relative completeness of Hoare rules wrt operational semantics *}
+section \<open>Soundness and relative completeness of Hoare rules wrt operational semantics\<close>
 
 lemma single_stateE:
   "state_not_singleton ==> !t. (!s::state. s = t) --> False"
@@ -209,7 +209,7 @@
 *)
 lemma thin [rule_format]: "G'||-ts ==> !G. G' <= G --> G||-ts"
 apply (erule hoare_derivs.induct)
-apply                (tactic {* ALLGOALS (EVERY'[clarify_tac @{context}, REPEAT o smp_tac @{context} 1]) *})
+apply                (tactic \<open>ALLGOALS (EVERY'[clarify_tac @{context}, REPEAT o smp_tac @{context} 1])\<close>)
 apply (rule hoare_derivs.empty)
 apply               (erule (1) hoare_derivs.insert)
 apply              (fast intro: hoare_derivs.asm)
@@ -218,7 +218,7 @@
 apply           (rule hoare_derivs.conseq, intro strip, tactic "smp_tac @{context} 2 1", clarify, tactic "smp_tac @{context} 1 1",rule exI, rule exI, erule (1) conjI)
 prefer 7
 apply          (rule_tac hoare_derivs.Body, drule_tac spec, erule_tac mp, fast)
-apply         (tactic {* ALLGOALS (resolve_tac @{context} ((funpow 5 tl) @{thms hoare_derivs.intros}) THEN_ALL_NEW (fast_tac @{context})) *})
+apply         (tactic \<open>ALLGOALS (resolve_tac @{context} ((funpow 5 tl) @{thms hoare_derivs.intros}) THEN_ALL_NEW (fast_tac @{context}))\<close>)
 done
 
 lemma weak_Body: "G|-{P}. the (body pn) .{Q} ==> G|-{P}. BODY pn .{Q}"
@@ -278,19 +278,19 @@
 
 lemma hoare_sound: "G||-ts ==> G||=ts"
 apply (erule hoare_derivs.induct)
-apply              (tactic {* TRYALL (eresolve_tac @{context} [@{thm Loop_sound_lemma}, @{thm Body_sound_lemma}] THEN_ALL_NEW assume_tac @{context}) *})
+apply              (tactic \<open>TRYALL (eresolve_tac @{context} [@{thm Loop_sound_lemma}, @{thm Body_sound_lemma}] THEN_ALL_NEW assume_tac @{context})\<close>)
 apply            (unfold hoare_valids_def)
 apply            blast
 apply           blast
 apply          (blast) (* asm *)
 apply         (blast) (* cut *)
 apply        (blast) (* weaken *)
-apply       (tactic {* ALLGOALS (EVERY'
+apply       (tactic \<open>ALLGOALS (EVERY'
   [REPEAT o Rule_Insts.thin_tac @{context} "hoare_derivs _ _" [],
-   simp_tac @{context}, clarify_tac @{context}, REPEAT o smp_tac @{context} 1]) *})
+   simp_tac @{context}, clarify_tac @{context}, REPEAT o smp_tac @{context} 1])\<close>)
 apply       (simp_all (no_asm_use) add: triple_valid_def2)
 apply       (intro strip, tactic "smp_tac @{context} 2 1", blast) (* conseq *)
-apply      (tactic {* ALLGOALS (clarsimp_tac @{context}) *}) (* Skip, Ass, Local *)
+apply      (tactic \<open>ALLGOALS (clarsimp_tac @{context})\<close>) (* Skip, Ass, Local *)
 prefer 3 apply   (force) (* Call *)
 apply  (erule_tac [2] evaln_elim_cases) (* If *)
 apply   blast+
@@ -335,24 +335,24 @@
 lemma MGF_lemma1 [rule_format (no_asm)]: "state_not_singleton ==>
   !pn:dom body. G|-{=}.BODY pn.{->} ==> WT c --> G|-{=}.c.{->}"
 apply (induct_tac c)
-apply        (tactic {* ALLGOALS (clarsimp_tac @{context}) *})
+apply        (tactic \<open>ALLGOALS (clarsimp_tac @{context})\<close>)
 prefer 7 apply        (fast intro: domI)
 apply (erule_tac [6] MGT_alternD)
 apply       (unfold MGT_def)
 apply       (drule_tac [7] bspec, erule_tac [7] domI)
-apply       (rule_tac [7] escape, tactic {* clarsimp_tac @{context} 7 *},
+apply       (rule_tac [7] escape, tactic \<open>clarsimp_tac @{context} 7\<close>,
   rename_tac [7] "fun" y Z,
   rule_tac [7] P1 = "%Z' s. s= (setlocs Z newlocs) [Loc Arg ::= fun Z]" in hoare_derivs.Call [THEN conseq1], erule_tac [7] conseq12)
 apply        (erule_tac [!] thin_rl)
 apply (rule hoare_derivs.Skip [THEN conseq2])
 apply (rule_tac [2] hoare_derivs.Ass [THEN conseq1])
-apply        (rule_tac [3] escape, tactic {* clarsimp_tac @{context} 3 *},
+apply        (rule_tac [3] escape, tactic \<open>clarsimp_tac @{context} 3\<close>,
   rename_tac [3] loc "fun" y Z,
   rule_tac [3] P1 = "%Z' s. s= (Z[Loc loc::=fun Z])" in hoare_derivs.Local [THEN conseq1],
   erule_tac [3] conseq12)
 apply         (erule_tac [5] hoare_derivs.Comp, erule_tac [5] conseq12)
-apply         (tactic {* (resolve_tac @{context} @{thms hoare_derivs.If} THEN_ALL_NEW
-                eresolve_tac @{context} @{thms conseq12}) 6 *})
+apply         (tactic \<open>(resolve_tac @{context} @{thms hoare_derivs.If} THEN_ALL_NEW
+                eresolve_tac @{context} @{thms conseq12}) 6\<close>)
 apply          (rule_tac [8] hoare_derivs.Loop [THEN conseq2], erule_tac [8] conseq12)
 apply           auto
 done
@@ -367,7 +367,7 @@
   shows "finite U ==> uG = mgt_call`U ==>
   !G. G <= uG --> n <= card uG --> card G = card uG - n --> (!c. wt c --> P G {mgt c})"
 apply (induct_tac n)
-apply  (tactic {* ALLGOALS (clarsimp_tac @{context}) *})
+apply  (tactic \<open>ALLGOALS (clarsimp_tac @{context})\<close>)
 apply  (subgoal_tac "G = mgt_call ` U")
 prefer 2
 apply   (simp add: card_seteq)
--- a/src/HOL/IMPP/Misc.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/IMPP/Misc.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
     Author:     David von Oheimb, TUM
 *)
 
-section {* Several examples for Hoare logic *}
+section \<open>Several examples for Hoare logic\<close>
 
 theory Misc
 imports Hoare
--- a/src/HOL/IMPP/Natural.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/IMPP/Natural.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
     Author:     David von Oheimb (based on a theory by Tobias Nipkow et al), TUM
 *)
 
-section {* Natural semantics of commands *}
+section \<open>Natural semantics of commands\<close>
 
 theory Natural
 imports Com
@@ -111,9 +111,9 @@
 
 lemma evaln_evalc: "<c,s> -n-> t ==> <c,s> -c-> t"
 apply (erule evaln.induct)
-apply (tactic {*
+apply (tactic \<open>
   ALLGOALS (resolve_tac @{context} @{thms evalc.intros} THEN_ALL_NEW assume_tac @{context})
-*})
+\<close>)
 done
 
 lemma Suc_le_D_lemma: "[| Suc n <= m'; (!!m. n <= m ==> P (Suc m)) |] ==> P m'"
@@ -139,12 +139,12 @@
 
 lemma evalc_evaln: "<c,s> -c-> t ==> ? n. <c,s> -n-> t"
 apply (erule evalc.induct)
-apply (tactic {* ALLGOALS (REPEAT o eresolve_tac @{context} [exE]) *})
-apply (tactic {* TRYALL (EVERY' [dresolve_tac @{context} @{thms evaln_max2}, assume_tac @{context},
-  REPEAT o eresolve_tac @{context} [exE, conjE]]) *})
+apply (tactic \<open>ALLGOALS (REPEAT o eresolve_tac @{context} [exE])\<close>)
+apply (tactic \<open>TRYALL (EVERY' [dresolve_tac @{context} @{thms evaln_max2}, assume_tac @{context},
+  REPEAT o eresolve_tac @{context} [exE, conjE]])\<close>)
 apply (tactic
-  {* ALLGOALS (resolve_tac @{context} [exI] THEN'
-    resolve_tac @{context} @{thms evaln.intros} THEN_ALL_NEW assume_tac @{context}) *})
+  \<open>ALLGOALS (resolve_tac @{context} [exI] THEN'
+    resolve_tac @{context} @{thms evaln.intros} THEN_ALL_NEW assume_tac @{context})\<close>)
 done
 
 lemma eval_eq: "<c,s> -c-> t = (? n. <c,s> -n-> t)"
--- a/src/HOL/IOA/Asig.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/IOA/Asig.thy	Thu May 26 17:51:22 2016 +0200
@@ -3,7 +3,7 @@
     Copyright   1994  TU Muenchen
 *)
 
-section {* Action signatures *}
+section \<open>Action signatures\<close>
 
 theory Asig
 imports Main
--- a/src/HOL/IOA/IOA.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/IOA/IOA.thy	Thu May 26 17:51:22 2016 +0200
@@ -3,7 +3,7 @@
     Copyright   1994  TU Muenchen
 *)
 
-section {* The I/O automata of Lynch and Tuttle *}
+section \<open>The I/O automata of Lynch and Tuttle\<close>
 
 theory IOA
 imports Asig
--- a/src/HOL/IOA/Solve.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/IOA/Solve.thy	Thu May 26 17:51:22 2016 +0200
@@ -3,7 +3,7 @@
     Copyright   1994  TU Muenchen
 *)
 
-section {* Weak possibilities mapping (abstraction) *}
+section \<open>Weak possibilities mapping (abstraction)\<close>
 
 theory Solve
 imports IOA
@@ -144,9 +144,9 @@
   prefer 2
   apply force
   apply (simp (no_asm) add: conj_disj_distribR cong add: conj_cong split add: if_split)
-  apply (tactic {*
+  apply (tactic \<open>
     REPEAT((resolve_tac @{context} [conjI, impI] 1 ORELSE eresolve_tac @{context} [conjE] 1) THEN
-      asm_full_simp_tac(@{context} addsimps [@{thm comp1_reachable}, @{thm comp2_reachable}]) 1) *})
+      asm_full_simp_tac(@{context} addsimps [@{thm comp1_reachable}, @{thm comp2_reachable}]) 1)\<close>)
   done
 
 
--- a/src/HOL/Imperative_HOL/Array.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/Array.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,13 +2,13 @@
     Author:     John Matthews, Galois Connections; Alexander Krauss, Lukas Bulwahn & Florian Haftmann, TU Muenchen
 *)
 
-section {* Monadic arrays *}
+section \<open>Monadic arrays\<close>
 
 theory Array
 imports Heap_Monad
 begin
 
-subsection {* Primitives *}
+subsection \<open>Primitives\<close>
 
 definition present :: "heap \<Rightarrow> 'a::heap array \<Rightarrow> bool" where
   "present h a \<longleftrightarrow> addr_of_array a < lim h"
@@ -36,7 +36,7 @@
   "r =!!= s \<longleftrightarrow> TYPEREP('a) \<noteq> TYPEREP('b) \<or> addr_of_array r \<noteq> addr_of_array s"
 
 
-subsection {* Monad operations *}
+subsection \<open>Monad operations\<close>
 
 definition new :: "nat \<Rightarrow> 'a::heap \<Rightarrow> 'a array Heap" where
   [code del]: "new n x = Heap_Monad.heap (alloc (replicate n x))"
@@ -70,12 +70,12 @@
   [code del]: "freeze a = Heap_Monad.tap (\<lambda>h. get h a)"
 
 
-subsection {* Properties *}
+subsection \<open>Properties\<close>
 
-text {* FIXME: Does there exist a "canonical" array axiomatisation in
-the literature?  *}
+text \<open>FIXME: Does there exist a "canonical" array axiomatisation in
+the literature?\<close>
 
-text {* Primitives *}
+text \<open>Primitives\<close>
 
 lemma noteq_sym: "a =!!= b \<Longrightarrow> b =!!= a"
   and unequal [simp]: "a \<noteq> a' \<longleftrightarrow> a =!!= a'"
@@ -160,7 +160,7 @@
   by (simp add: present_def alloc_def Let_def)
 
 
-text {* Monad operations *}
+text \<open>Monad operations\<close>
 
 lemma execute_new [execute_simps]:
   "execute (new n x) h = Some (alloc (replicate n x) h)"
@@ -352,9 +352,9 @@
 hide_const (open) present get set alloc length update noteq new of_list make len nth upd map_entry swap freeze
 
 
-subsection {* Code generator setup *}
+subsection \<open>Code generator setup\<close>
 
-subsubsection {* Logical intermediate layer *}
+subsubsection \<open>Logical intermediate layer\<close>
 
 definition new' where
   [code del]: "new' = Array.new o nat_of_integer"
@@ -439,7 +439,7 @@
 hide_const (open) new' make' len' nth' upd'
 
 
-text {* SML *}
+text \<open>SML\<close>
 
 code_printing type_constructor array \<rightharpoonup> (SML) "_/ array"
 code_printing constant Array \<rightharpoonup> (SML) "raise/ (Fail/ \"bare Array\")"
@@ -454,7 +454,7 @@
 code_reserved SML Array
 
 
-text {* OCaml *}
+text \<open>OCaml\<close>
 
 code_printing type_constructor array \<rightharpoonup> (OCaml) "_/ array"
 code_printing constant Array \<rightharpoonup> (OCaml) "failwith/ \"bare Array\""
@@ -470,7 +470,7 @@
 code_reserved OCaml Array
 
 
-text {* Haskell *}
+text \<open>Haskell\<close>
 
 code_printing type_constructor array \<rightharpoonup> (Haskell) "Heap.STArray/ Heap.RealWorld/ _"
 code_printing constant Array \<rightharpoonup> (Haskell) "error/ \"bare Array\""
@@ -484,7 +484,7 @@
 code_printing class_instance array :: HOL.equal \<rightharpoonup> (Haskell) -
 
 
-text {* Scala *}
+text \<open>Scala\<close>
 
 code_printing type_constructor array \<rightharpoonup> (Scala) "!collection.mutable.ArraySeq[_]"
 code_printing constant Array \<rightharpoonup> (Scala) "!sys.error(\"bare Array\")"
--- a/src/HOL/Imperative_HOL/Heap.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/Heap.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,15 +2,15 @@
     Author:     John Matthews, Galois Connections; Alexander Krauss, TU Muenchen
 *)
 
-section {* A polymorphic heap based on cantor encodings *}
+section \<open>A polymorphic heap based on cantor encodings\<close>
 
 theory Heap
 imports Main "~~/src/HOL/Library/Countable"
 begin
 
-subsection {* Representable types *}
+subsection \<open>Representable types\<close>
 
-text {* The type class of representable types *}
+text \<open>The type class of representable types\<close>
 
 class heap = typerep + countable
 
@@ -35,15 +35,15 @@
 instance typerep :: heap ..
 
 
-subsection {* A polymorphic heap with dynamic arrays and references *}
+subsection \<open>A polymorphic heap with dynamic arrays and references\<close>
 
-text {*
+text \<open>
   References and arrays are developed in parallel,
   but keeping them separate makes some later proofs simpler.
-*}
+\<close>
 
-type_synonym addr = nat -- "untyped heap references"
-type_synonym heap_rep = nat -- "representable values"
+type_synonym addr = nat \<comment> "untyped heap references"
+type_synonym heap_rep = nat \<comment> "representable values"
 
 record heap =
   arrays :: "typerep \<Rightarrow> addr \<Rightarrow> heap_rep list"
@@ -53,8 +53,8 @@
 definition empty :: heap where
   "empty = \<lparr>arrays = (\<lambda>_ _. []), refs = (\<lambda>_ _. 0), lim = 0\<rparr>"
 
-datatype 'a array = Array addr -- "note the phantom type 'a"
-datatype 'a ref = Ref addr -- "note the phantom type 'a"
+datatype 'a array = Array addr \<comment> "note the phantom type 'a"
+datatype 'a ref = Ref addr \<comment> "note the phantom type 'a"
 
 primrec addr_of_array :: "'a array \<Rightarrow> addr" where
   "addr_of_array (Array x) = x"
@@ -76,14 +76,14 @@
 instance ref :: (type) countable
   by (rule countable_classI [of addr_of_ref]) simp
 
-text {* Syntactic convenience *}
+text \<open>Syntactic convenience\<close>
 
-setup {*
+setup \<open>
   Sign.add_const_constraint (@{const_name Array}, SOME @{typ "nat \<Rightarrow> 'a::heap array"})
   #> Sign.add_const_constraint (@{const_name Ref}, SOME @{typ "nat \<Rightarrow> 'a::heap ref"})
   #> Sign.add_const_constraint (@{const_name addr_of_array}, SOME @{typ "'a::heap array \<Rightarrow> nat"})
   #> Sign.add_const_constraint (@{const_name addr_of_ref}, SOME @{typ "'a::heap ref \<Rightarrow> nat"})
-*}
+\<close>
 
 hide_const (open) empty
 
--- a/src/HOL/Imperative_HOL/Heap_Monad.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/Heap_Monad.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
     Author:     John Matthews, Galois Connections; Alexander Krauss, Lukas Bulwahn & Florian Haftmann, TU Muenchen
 *)
 
-section {* A monad with a polymorphic heap and primitive reasoning infrastructure *}
+section \<open>A monad with a polymorphic heap and primitive reasoning infrastructure\<close>
 
 theory Heap_Monad
 imports
@@ -10,12 +10,12 @@
   "~~/src/HOL/Library/Monad_Syntax"
 begin
 
-subsection {* The monad *}
+subsection \<open>The monad\<close>
 
-subsubsection {* Monad construction *}
+subsubsection \<open>Monad construction\<close>
 
-text {* Monadic heap actions either produce values
-  and transform the heap, or fail *}
+text \<open>Monadic heap actions either produce values
+  and transform the heap, or fail\<close>
 datatype 'a Heap = Heap "heap \<Rightarrow> ('a \<times> heap) option"
 
 lemma [code, code del]:
@@ -46,7 +46,7 @@
   by (simp add: Let_def)
 
 
-subsubsection {* Specialised lifters *}
+subsubsection \<open>Specialised lifters\<close>
 
 definition tap :: "(heap \<Rightarrow> 'a) \<Rightarrow> 'a Heap" where
   [code del]: "tap f = Heap (\<lambda>h. Some (f h, h))"
@@ -71,7 +71,7 @@
   by (simp_all add: guard_def)
 
 
-subsubsection {* Predicate classifying successful computations *}
+subsubsection \<open>Predicate classifying successful computations\<close>
 
 definition success :: "'a Heap \<Rightarrow> heap \<Rightarrow> bool" where
   "success f h \<longleftrightarrow> execute f h \<noteq> None"
@@ -109,13 +109,13 @@
   by (simp add: success_def)
 
 
-subsubsection {* Predicate for a simple relational calculus *}
+subsubsection \<open>Predicate for a simple relational calculus\<close>
 
-text {*
-  The @{text effect} predicate states that when a computation @{text c}
-  runs with the heap @{text h} will result in return value @{text r}
-  and a heap @{text "h'"}, i.e.~no exception occurs.
-*}  
+text \<open>
+  The \<open>effect\<close> predicate states that when a computation \<open>c\<close>
+  runs with the heap \<open>h\<close> will result in return value \<open>r\<close>
+  and a heap \<open>h'\<close>, i.e.~no exception occurs.
+\<close>  
 
 definition effect :: "'a Heap \<Rightarrow> heap \<Rightarrow> heap \<Rightarrow> 'a \<Rightarrow> bool" where
   effect_def: "effect c h h' r \<longleftrightarrow> execute c h = Some (r, h')"
@@ -210,7 +210,7 @@
     (auto simp add: execute_simps elim!: successE, cases "P h", auto simp add: execute_simps)
 
 
-subsubsection {* Monad combinators *}
+subsubsection \<open>Monad combinators\<close>
 
 definition return :: "'a \<Rightarrow> 'a Heap" where
   [code del]: "return x = heap (Pair x)"
@@ -232,7 +232,7 @@
   obtains "r = x" "h' = h"
   using assms by (rule effectE) (simp add: execute_simps)
 
-definition raise :: "string \<Rightarrow> 'a Heap" where -- {* the string is just decoration *}
+definition raise :: "string \<Rightarrow> 'a Heap" where \<comment> \<open>the string is just decoration\<close>
   [code del]: "raise s = Heap (\<lambda>_. None)"
 
 lemma execute_raise [execute_simps]:
@@ -306,9 +306,9 @@
   by (rule Heap_eqI) (simp add: execute_simps)
 
 
-subsection {* Generic combinators *}
+subsection \<open>Generic combinators\<close>
 
-subsubsection {* Assertions *}
+subsubsection \<open>Assertions\<close>
 
 definition assert :: "('a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a Heap" where
   "assert P x = (if P x then return x else raise ''assert'')"
@@ -338,7 +338,7 @@
   by (rule Heap_eqI) (insert assms, simp add: assert_def)
 
 
-subsubsection {* Plain lifting *}
+subsubsection \<open>Plain lifting\<close>
 
 definition lift :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a \<Rightarrow> 'b Heap" where
   "lift f = return o f"
@@ -352,7 +352,7 @@
   by (simp add: lift_def comp_def)
 
 
-subsubsection {* Iteration -- warning: this is rarely useful! *}
+subsubsection \<open>Iteration -- warning: this is rarely useful!\<close>
 
 primrec fold_map :: "('a \<Rightarrow> 'b Heap) \<Rightarrow> 'a list \<Rightarrow> 'b list Heap" where
   "fold_map f [] = return []"
@@ -382,7 +382,7 @@
 qed
 
 
-subsection {* Partial function definition setup *}
+subsection \<open>Partial function definition setup\<close>
 
 definition Heap_ord :: "'a Heap \<Rightarrow> 'a Heap \<Rightarrow> bool" where
   "Heap_ord = img_ord execute (fun_ord option_ord)"
@@ -455,9 +455,9 @@
   unfolding effect_def execute.simps
   by blast
 
-declaration {* Partial_Function.init "heap" @{term heap.fixp_fun}
+declaration \<open>Partial_Function.init "heap" @{term heap.fixp_fun}
   @{term heap.mono_body} @{thm heap.fixp_rule_uc} @{thm heap.fixp_induct_uc}
-  (SOME @{thm fixp_induct_heap}) *}
+  (SOME @{thm fixp_induct_heap})\<close>
 
 
 abbreviation "mono_Heap \<equiv> monotone (fun_ord Heap_ord) Heap_ord"
@@ -516,9 +516,9 @@
 qed
 
 
-subsection {* Code generator setup *}
+subsection \<open>Code generator setup\<close>
 
-subsubsection {* Logical intermediate layer *}
+subsubsection \<open>Logical intermediate layer\<close>
 
 definition raise' :: "String.literal \<Rightarrow> 'a Heap" where
   [code del]: "raise' s = raise (String.explode s)"
@@ -530,10 +530,10 @@
   "raise s = raise' (STR s)"
   unfolding raise'_def by (simp add: STR_inverse)
 
-code_datatype raise' -- {* avoid @{const "Heap"} formally *}
+code_datatype raise' \<comment> \<open>avoid @{const "Heap"} formally\<close>
 
 
-subsubsection {* SML and OCaml *}
+subsubsection \<open>SML and OCaml\<close>
 
 code_printing type_constructor Heap \<rightharpoonup> (SML) "(unit/ ->/ _)"
 code_printing constant bind \<rightharpoonup> (SML) "!(fn/ f'_/ =>/ fn/ ()/ =>/ f'_/ (_/ ())/ ())"
@@ -546,12 +546,12 @@
 code_printing constant Heap_Monad.raise' \<rightharpoonup> (OCaml) "failwith"
 
 
-subsubsection {* Haskell *}
+subsubsection \<open>Haskell\<close>
 
-text {* Adaption layer *}
+text \<open>Adaption layer\<close>
 
 code_printing code_module "Heap" \<rightharpoonup> (Haskell)
-{*import qualified Control.Monad;
+\<open>import qualified Control.Monad;
 import qualified Control.Monad.ST;
 import qualified Data.STRef;
 import qualified Data.Array.ST;
@@ -581,11 +581,11 @@
 readArray = Data.Array.ST.readArray;
 
 writeArray :: STArray s a -> Integer -> a -> ST s ();
-writeArray = Data.Array.ST.writeArray;*}
+writeArray = Data.Array.ST.writeArray;\<close>
 
 code_reserved Haskell Heap
 
-text {* Monad *}
+text \<open>Monad\<close>
 
 code_printing type_constructor Heap \<rightharpoonup> (Haskell) "Heap.ST/ Heap.RealWorld/ _"
 code_monad bind Haskell
@@ -593,10 +593,10 @@
 code_printing constant Heap_Monad.raise' \<rightharpoonup> (Haskell) "error"
 
 
-subsubsection {* Scala *}
+subsubsection \<open>Scala\<close>
 
 code_printing code_module "Heap" \<rightharpoonup> (Scala)
-{*object Heap {
+\<open>object Heap {
   def bind[A, B](f: Unit => A, g: A => Unit => B): Unit => B = (_: Unit) => g (f ()) ()
 }
 
@@ -628,7 +628,7 @@
   def freeze[A](a: ArraySeq[A]): List[A] =
     a.toList
 }
-*}
+\<close>
 
 code_reserved Scala Heap Ref Array
 
@@ -638,9 +638,9 @@
 code_printing constant Heap_Monad.raise' \<rightharpoonup> (Scala) "!sys.error((_))"
 
 
-subsubsection {* Target variants with less units *}
+subsubsection \<open>Target variants with less units\<close>
 
-setup {*
+setup \<open>
 
 let
 
@@ -703,7 +703,7 @@
 
 end
 
-*}
+\<close>
 
 hide_const (open) Heap heap guard raise' fold_map
 
--- a/src/HOL/Imperative_HOL/Imperative_HOL.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/Imperative_HOL.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
     Author:     John Matthews, Galois Connections; Alexander Krauss, Lukas Bulwahn & Florian Haftmann, TU Muenchen
 *)
 
-section {* Entry point into monadic imperative HOL *}
+section \<open>Entry point into monadic imperative HOL\<close>
 
 theory Imperative_HOL
 imports Array Ref
--- a/src/HOL/Imperative_HOL/Imperative_HOL_ex.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/Imperative_HOL_ex.thy	Thu May 26 17:51:22 2016 +0200
@@ -3,7 +3,7 @@
                 Alexander Krauss, Lukas Bulwahn & Florian Haftmann, TU Muenchen
 *)
 
-section {* Monadic imperative HOL with examples *}
+section \<open>Monadic imperative HOL with examples\<close>
 
 theory Imperative_HOL_ex
 imports Imperative_HOL Overview
--- a/src/HOL/Imperative_HOL/Overview.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/Overview.thy	Thu May 26 17:51:22 2016 +0200
@@ -17,9 +17,9 @@
   "_constrain" :: "prop' => type => prop'"  ("_ :: _" [4, 0] 3)
 (*>*)
 
-text {*
-  @{text "Imperative HOL"} is a leightweight framework for reasoning
-  about imperative data structures in @{text "Isabelle/HOL"}
+text \<open>
+  \<open>Imperative HOL\<close> is a leightweight framework for reasoning
+  about imperative data structures in \<open>Isabelle/HOL\<close>
   @{cite "Nipkow-et-al:2002:tutorial"}.  Its basic ideas are described in
   @{cite "Bulwahn-et-al:2008:imp_HOL"}.  However their concrete
   realisation has changed since, due to both extensions and
@@ -27,16 +27,16 @@
   \qt{as it is} by now.  It focusses on the user-view, less on matters
   of construction.  For details study of the theory sources is
   encouraged.
-*}
+\<close>
 
 
-section {* A polymorphic heap inside a monad *}
+section \<open>A polymorphic heap inside a monad\<close>
 
-text {*
+text \<open>
   Heaps (@{type heap}) can be populated by values of class @{class
   heap}; HOL's default types are already instantiated to class @{class
   heap}.  Class @{class heap} is a subclass of @{class countable};  see
-  theory @{text Countable} for ways to instantiate types as @{class countable}.
+  theory \<open>Countable\<close> for ways to instantiate types as @{class countable}.
 
   The heap is wrapped up in a monad @{typ "'a Heap"} by means of the
   following specification:
@@ -53,11 +53,11 @@
   \end{quote}
 
   This allows for equational reasoning about monadic expressions; the
-  fact collection @{text execute_simps} contains appropriate rewrites
+  fact collection \<open>execute_simps\<close> contains appropriate rewrites
   for all fundamental operations.
 
   Primitive fine-granular control over heaps is available through rule
-  @{text Heap_cases}:
+  \<open>Heap_cases\<close>:
 
   \begin{quote}
     @{thm [break] Heap_cases [no_vars]}
@@ -81,22 +81,21 @@
     @{term_type assert} \\
     @{thm assert_def [no_vars]}
   \end{quote}
-*}
+\<close>
 
 
-section {* Relational reasoning about @{type Heap} expressions *}
+section \<open>Relational reasoning about @{type Heap} expressions\<close>
 
-text {*
+text \<open>
   To establish correctness of imperative programs, predicate
 
   \begin{quote}
     @{term_type effect}
   \end{quote}
 
-  provides a simple relational calculus.  Primitive rules are @{text
-  effectI} and @{text effectE}, rules appropriate for reasoning about
-  imperative operations are available in the @{text effect_intros} and
-  @{text effect_elims} fact collections.
+  provides a simple relational calculus.  Primitive rules are \<open>effectI\<close> and \<open>effectE\<close>, rules appropriate for reasoning about
+  imperative operations are available in the \<open>effect_intros\<close> and
+  \<open>effect_elims\<close> fact collections.
 
   Often non-failure of imperative computations does not depend
   on the heap at all;  reasoning then can be easier using predicate
@@ -106,18 +105,16 @@
   \end{quote}
 
   Introduction rules for @{const success} are available in the
-  @{text success_intro} fact collection.
+  \<open>success_intro\<close> fact collection.
 
   @{const execute}, @{const effect}, @{const success} and @{const bind}
-  are related by rules @{text execute_bind_success}, @{text
-  success_bind_executeI}, @{text success_bind_effectI}, @{text
-  effect_bindI}, @{text effect_bindE} and @{text execute_bind_eq_SomeI}.
-*}
+  are related by rules \<open>execute_bind_success\<close>, \<open>success_bind_executeI\<close>, \<open>success_bind_effectI\<close>, \<open>effect_bindI\<close>, \<open>effect_bindE\<close> and \<open>execute_bind_eq_SomeI\<close>.
+\<close>
 
 
-section {* Monadic data structures *}
+section \<open>Monadic data structures\<close>
 
-text {*
+text \<open>
   The operations for monadic data structures (arrays and references)
   come in two flavours:
 
@@ -136,11 +133,11 @@
 
   Note that HOL equality coincides with reference equality and may be
   used as primitive executable operation.
-*}
+\<close>
 
-subsection {* Arrays *}
+subsection \<open>Arrays\<close>
 
-text {*
+text \<open>
   Heap operations:
 
   \begin{quote}
@@ -166,11 +163,11 @@
     @{term_type Array.swap} \\
     @{term_type Array.freeze}
   \end{quote}
-*}
+\<close>
 
-subsection {* References *}
+subsection \<open>References\<close>
 
-text {*
+text \<open>
   Heap operations:
 
   \begin{quote}
@@ -189,26 +186,26 @@
     @{term_type Ref.update} \\
     @{term_type Ref.change}
   \end{quote}
-*}
+\<close>
 
 
-section {* Code generation *}
+section \<open>Code generation\<close>
 
-text {*
+text \<open>
   Imperative HOL sets up the code generator in a way that imperative
   operations are mapped to suitable counterparts in the target
-  language.  For @{text Haskell}, a suitable @{text ST} monad is used;
-  for @{text SML}, @{text Ocaml} and @{text Scala} unit values ensure
+  language.  For \<open>Haskell\<close>, a suitable \<open>ST\<close> monad is used;
+  for \<open>SML\<close>, \<open>Ocaml\<close> and \<open>Scala\<close> unit values ensure
   that the evaluation order is the same as you would expect from the
   original monadic expressions.  These units may look cumbersome; the
-  target language variants @{text SML_imp}, @{text Ocaml_imp} and
-  @{text Scala_imp} make some effort to optimize some of them away.
-*}
+  target language variants \<open>SML_imp\<close>, \<open>Ocaml_imp\<close> and
+  \<open>Scala_imp\<close> make some effort to optimize some of them away.
+\<close>
 
 
-section {* Some hints for using the framework *}
+section \<open>Some hints for using the framework\<close>
 
-text {*
+text \<open>
   Of course a framework itself does not by itself indicate how to make
   best use of it.  Here some hints drawn from prior experiences with
   Imperative HOL:
@@ -228,8 +225,8 @@
       consists of composing those.
       
     \item Whether one should prefer equational reasoning (fact
-      collection @{text execute_simps} or relational reasoning (fact
-      collections @{text effect_intros} and @{text effect_elims}) depends
+      collection \<open>execute_simps\<close> or relational reasoning (fact
+      collections \<open>effect_intros\<close> and \<open>effect_elims\<close>) depends
       on the problems to solve.  For complex expressions or
       expressions involving binders, the relation style usually is
       superior but requires more proof text.
@@ -238,6 +235,6 @@
       HOL yourself whenever appropriate.
 
   \end{itemize}
-*}
+\<close>
 
 end
--- a/src/HOL/Imperative_HOL/Ref.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/Ref.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,19 +2,19 @@
     Author:     John Matthews, Galois Connections; Alexander Krauss, Lukas Bulwahn & Florian Haftmann, TU Muenchen
 *)
 
-section {* Monadic references *}
+section \<open>Monadic references\<close>
 
 theory Ref
 imports Array
 begin
 
-text {*
+text \<open>
   Imperative reference operations; modeled after their ML counterparts.
   See @{url "http://caml.inria.fr/pub/docs/manual-caml-light/node14.15.html"}
   and @{url "http://www.smlnj.org/doc/Conversion/top-level-comparison.html"}
-*}
+\<close>
 
-subsection {* Primitives *}
+subsection \<open>Primitives\<close>
 
 definition present :: "heap \<Rightarrow> 'a::heap ref \<Rightarrow> bool" where
   "present h r \<longleftrightarrow> addr_of_ref r < lim h"
@@ -36,7 +36,7 @@
   "r =!= s \<longleftrightarrow> TYPEREP('a) \<noteq> TYPEREP('b) \<or> addr_of_ref r \<noteq> addr_of_ref s"
 
 
-subsection {* Monad operations *}
+subsection \<open>Monad operations\<close>
 
 definition ref :: "'a::heap \<Rightarrow> 'a ref Heap" where
   [code del]: "ref v = Heap_Monad.heap (alloc v)"
@@ -56,12 +56,12 @@
    }"
 
 
-subsection {* Properties *}
+subsection \<open>Properties\<close>
 
-text {* Primitives *}
+text \<open>Primitives\<close>
 
 lemma noteq_sym: "r =!= s \<Longrightarrow> s =!= r"
-  and unequal [simp]: "r \<noteq> r' \<longleftrightarrow> r =!= r'" -- "same types!"
+  and unequal [simp]: "r \<noteq> r' \<longleftrightarrow> r =!= r'" \<comment> "same types!"
   by (auto simp add: noteq_def)
 
 lemma noteq_irrefl: "r =!= r \<Longrightarrow> False"
@@ -133,7 +133,7 @@
   by (auto simp add: noteq_def present_def)
 
 
-text {* Monad operations *}
+text \<open>Monad operations\<close>
 
 lemma execute_ref [execute_simps]:
   "execute (ref v) h = Some (alloc v h)"
@@ -216,7 +216,7 @@
   by (rule Heap_eqI) (simp add: change_def lookup_chain)
 
 
-text {* Non-interaction between imperative arrays and imperative references *}
+text \<open>Non-interaction between imperative arrays and imperative references\<close>
 
 lemma array_get_set [simp]:
   "Array.get (set r v h) = Array.get h"
@@ -261,9 +261,9 @@
 hide_const (open) present get set alloc noteq lookup update change
 
 
-subsection {* Code generator setup *}
+subsection \<open>Code generator setup\<close>
 
-text {* Intermediate operation avoids invariance problem in @{text Scala} (similar to value restriction) *}
+text \<open>Intermediate operation avoids invariance problem in \<open>Scala\<close> (similar to value restriction)\<close>
 
 definition ref' where
   [code del]: "ref' = ref"
@@ -273,7 +273,7 @@
   by (simp add: ref'_def)
 
 
-text {* SML / Eval *}
+text \<open>SML / Eval\<close>
 
 code_printing type_constructor ref \<rightharpoonup> (SML) "_/ ref"
 code_printing type_constructor ref \<rightharpoonup> (Eval) "_/ Unsynchronized.ref"
@@ -287,7 +287,7 @@
 code_reserved Eval Unsynchronized
 
 
-text {* OCaml *}
+text \<open>OCaml\<close>
 
 code_printing type_constructor ref \<rightharpoonup> (OCaml) "_/ ref"
 code_printing constant Ref \<rightharpoonup> (OCaml) "failwith/ \"bare Ref\""
@@ -299,7 +299,7 @@
 code_reserved OCaml ref
 
 
-text {* Haskell *}
+text \<open>Haskell\<close>
 
 code_printing type_constructor ref \<rightharpoonup> (Haskell) "Heap.STRef/ Heap.RealWorld/ _"
 code_printing constant Ref \<rightharpoonup> (Haskell) "error/ \"bare Ref\""
@@ -310,7 +310,7 @@
 code_printing class_instance ref :: HOL.equal \<rightharpoonup> (Haskell) -
 
 
-text {* Scala *}
+text \<open>Scala\<close>
 
 code_printing type_constructor ref \<rightharpoonup> (Scala) "!Ref[_]"
 code_printing constant Ref \<rightharpoonup> (Scala) "!sys.error(\"bare Ref\")"
--- a/src/HOL/Imperative_HOL/ex/Imperative_Quicksort.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/ex/Imperative_Quicksort.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
     Author:     Lukas Bulwahn, TU Muenchen
 *)
 
-section {* An imperative implementation of Quicksort on arrays *}
+section \<open>An imperative implementation of Quicksort on arrays\<close>
 
 theory Imperative_Quicksort
 imports
@@ -12,7 +12,7 @@
   "~~/src/HOL/Library/Code_Target_Numeral"
 begin
 
-text {* We prove QuickSort correct in the Relational Calculus. *}
+text \<open>We prove QuickSort correct in the Relational Calculus.\<close>
 
 definition swap :: "'a::heap array \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> unit Heap"
 where
@@ -76,11 +76,11 @@
 using assms
 proof (induct a l r p arbitrary: h h' rs rule:part1.induct)
   case (1 a l r p h h' rs)
-  note cr = `effect (part1 a l r p) h h' rs`
+  note cr = \<open>effect (part1 a l r p) h h' rs\<close>
   show ?case
   proof (cases "r \<le> l")
     case True (* Terminating case *)
-    with cr `l \<le> r` show ?thesis
+    with cr \<open>l \<le> r\<close> show ?thesis
       unfolding part1.simps[of a l r p]
       by (elim effect_bindE effect_ifE effect_returnE effect_nthE) auto
   next
@@ -95,7 +95,7 @@
         unfolding part1.simps[of a l r p]
         by (elim effect_bindE effect_nthE effect_ifE effect_returnE) auto
       from rec_condition have "l + 1 \<le> r" by arith
-      from 1(1)[OF rec_condition True rec1 `l + 1 \<le> r`]
+      from 1(1)[OF rec_condition True rec1 \<open>l + 1 \<le> r\<close>]
       show ?thesis by simp
     next
       case False
@@ -105,7 +105,7 @@
         unfolding part1.simps[of a l r p]
         by (elim effect_bindE effect_nthE effect_ifE effect_returnE) auto
       from rec_condition have "l \<le> r - 1" by arith
-      from 1(2) [OF rec_condition False rec2 `l \<le> r - 1`] show ?thesis by fastforce
+      from 1(2) [OF rec_condition False rec2 \<open>l \<le> r - 1\<close>] show ?thesis by fastforce
     qed
   qed
 qed
@@ -116,7 +116,7 @@
 using assms
 proof (induct a l r p arbitrary: h h' rs rule:part1.induct)
   case (1 a l r p h h' rs)
-  note cr = `effect (part1 a l r p) h h' rs`
+  note cr = \<open>effect (part1 a l r p) h h' rs\<close>
   
   show ?case
   proof (cases "r \<le> l")
@@ -138,7 +138,7 @@
   using assms
 proof (induct a l r p arbitrary: h h' rs rule:part1.induct)
   case (1 a l r p h h' rs)
-  note cr = `effect (part1 a l r p) h h' rs`
+  note cr = \<open>effect (part1 a l r p) h h' rs\<close>
   
   show ?case
   proof (cases "r \<le> l")
@@ -183,7 +183,7 @@
   using assms
 proof (induct a l r p arbitrary: h h' rs rule:part1.induct)
   case (1 a l r p h h' rs)
-  note cr = `effect (part1 a l r p) h h' rs`
+  note cr = \<open>effect (part1 a l r p) h h' rs\<close>
   
   show ?case
   proof (cases "r \<le> l")
@@ -281,8 +281,8 @@
          else middle)"
     unfolding partition.simps
     by (elim effect_bindE effect_returnE effect_nthE effect_ifE effect_updE) simp 
-  from `l < r` have "l \<le> r - 1" by arith
-  from part_returns_index_in_bounds[OF part this] rs_equals `l < r` show ?thesis by auto
+  from \<open>l < r\<close> have "l \<le> r - 1" by arith
+  from part_returns_index_in_bounds[OF part this] rs_equals \<open>l < r\<close> show ?thesis by auto
 qed
 
 lemma partition_partitions:
@@ -307,9 +307,9 @@
     by (elim effect_bindE effect_returnE effect_nthE effect_updE) simp
   from swap have swap_length_remains: "Array.length h1 a = Array.length h' a"
     unfolding swap_def by (elim effect_bindE effect_returnE effect_nthE effect_updE) auto
-  from `l < r` have "l \<le> r - 1" by simp
+  from \<open>l < r\<close> have "l \<le> r - 1" by simp
   note middle_in_bounds = part_returns_index_in_bounds[OF part this]
-  from part_outer_remains[OF part] `l < r`
+  from part_outer_remains[OF part] \<open>l < r\<close>
   have "Array.get h a ! r = Array.get h1 a ! r"
     by fastforce
   with swap
@@ -324,7 +324,7 @@
     { 
       fix i
       assume i_is_left: "l \<le> i \<and> i < rs"
-      with swap_length_remains in_bounds middle_in_bounds rs_equals `l < r`
+      with swap_length_remains in_bounds middle_in_bounds rs_equals \<open>l < r\<close>
       have i_props: "i < Array.length h' a" "i \<noteq> r" "i \<noteq> rs" by auto
       from i_is_left rs_equals have "l \<le> i \<and> i < middle \<or> i = middle" by arith
       with part_partitions[OF part] right_remains True
@@ -350,7 +350,7 @@
       next
         assume i_is: "rs < i \<and> i = r"
         with rs_equals have "Suc middle \<noteq> r" by arith
-        with middle_in_bounds `l < r` have "Suc middle \<le> r - 1" by arith
+        with middle_in_bounds \<open>l < r\<close> have "Suc middle \<le> r - 1" by arith
         with part_partitions[OF part] right_remains 
         have "Array.get h' a ! rs \<le> Array.get h1 a ! (Suc middle)"
           by fastforce
@@ -451,7 +451,7 @@
   using assms
 proof (induct a l r arbitrary: h h' rs rule: quicksort.induct)
   case (1 a l r h h' rs)
-  note cr = `effect (quicksort a l r) h h' rs`
+  note cr = \<open>effect (quicksort a l r) h h' rs\<close>
   thus ?case
   proof (cases "r > l")
     case False
@@ -498,7 +498,7 @@
   using assms
 proof (induct a l r arbitrary: h h' rs rule: quicksort.induct)
   case (1 a l r h h' rs)
-  note cr = `effect (quicksort a l r) h h' rs`
+  note cr = \<open>effect (quicksort a l r) h h' rs\<close>
   thus ?case
   proof (cases "r > l")
     case False
@@ -582,9 +582,9 @@
   with quicksort_permutes [OF effect] properties_for_sort show ?thesis by fastforce
 qed
 
-subsection {* No Errors in quicksort *}
-text {* We have proved that quicksort sorts (if no exceptions occur).
-We will now show that exceptions do not occur. *}
+subsection \<open>No Errors in quicksort\<close>
+text \<open>We have proved that quicksort sorts (if no exceptions occur).
+We will now show that exceptions do not occur.\<close>
 
 lemma success_part1I: 
   assumes "l < Array.length h a" "r < Array.length h a"
@@ -647,7 +647,7 @@
 qed
 
 
-subsection {* Example *}
+subsection \<open>Example\<close>
 
 definition "qsort a = do {
     k \<leftarrow> Array.len a;
@@ -662,7 +662,7 @@
     qsort a
   }"
 
-ML_val {* @{code example} () *}
+ML_val \<open>@{code example} ()\<close>
 
 export_code qsort checking SML SML_imp OCaml? OCaml_imp? Haskell? Scala Scala_imp
 
--- a/src/HOL/Imperative_HOL/ex/Imperative_Reverse.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/ex/Imperative_Reverse.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
     Author:     Lukas Bulwahn, TU Muenchen
 *)
 
-section {* An imperative in-place reversal on arrays *}
+section \<open>An imperative in-place reversal on arrays\<close>
 
 theory Imperative_Reverse
 imports Subarray "~~/src/HOL/Imperative_HOL/Imperative_HOL"
--- a/src/HOL/Imperative_HOL/ex/Linked_Lists.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/ex/Linked_Lists.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,15 +2,15 @@
     Author:     Lukas Bulwahn, TU Muenchen
 *)
 
-section {* Linked Lists by ML references *}
+section \<open>Linked Lists by ML references\<close>
 
 theory Linked_Lists
 imports "../Imperative_HOL" "~~/src/HOL/Library/Code_Target_Int"
 begin
 
-section {* Definition of Linked Lists *}
+section \<open>Definition of Linked Lists\<close>
 
-setup {* Sign.add_const_constraint (@{const_name Ref}, SOME @{typ "nat \<Rightarrow> 'a::type ref"}) *}
+setup \<open>Sign.add_const_constraint (@{const_name Ref}, SOME @{typ "nat \<Rightarrow> 'a::type ref"})\<close>
 datatype 'a node = Empty | Node 'a "'a node ref"
 
 primrec
@@ -55,9 +55,9 @@
 by (simp_all add: traverse.simps[of "Empty"] traverse.simps[of "Node x r"])
 
 
-section {* Proving correctness with relational abstraction *}
+section \<open>Proving correctness with relational abstraction\<close>
 
-subsection {* Definition of list_of, list_of', refs_of and refs_of' *}
+subsection \<open>Definition of list_of, list_of', refs_of and refs_of'\<close>
 
 primrec list_of :: "heap \<Rightarrow> ('a::heap) node \<Rightarrow> 'a list \<Rightarrow> bool"
 where
@@ -79,7 +79,7 @@
 | "refs_of' h r (x#xs) = ((x = r) \<and> refs_of h (Ref.get h x) xs)"
 
 
-subsection {* Properties of these definitions *}
+subsection \<open>Properties of these definitions\<close>
 
 lemma list_of_Empty[simp]: "list_of h Empty xs = (xs = [])"
 by (cases xs, auto)
@@ -216,7 +216,7 @@
   thus ?thesis by (auto simp add: refs_of'_def')
 qed
 
-subsection {* More complicated properties of these predicates *}
+subsection \<open>More complicated properties of these predicates\<close>
 
 lemma list_of_append:
   "list_of h n (as @ bs) \<Longrightarrow> \<exists>m. list_of h m bs"
@@ -258,7 +258,7 @@
   by (fastforce simp add: refs_of_distinct refs_of_next)
 
 
-subsection {* Interaction of these predicates with our heap transitions *}
+subsection \<open>Interaction of these predicates with our heap transitions\<close>
 
 lemma list_of_set_ref: "refs_of h q rs \<Longrightarrow> p \<notin> set rs \<Longrightarrow> list_of (Ref.set p v h) q as = list_of h q as"
 proof (induct as arbitrary: q rs)
@@ -378,7 +378,7 @@
   with assms that show thesis by auto
 qed
 
-section {* Proving make_llist and traverse correct *}
+section \<open>Proving make_llist and traverse correct\<close>
 
 lemma refs_of_invariant:
   assumes "refs_of h (r::('a::heap) node) xs"
@@ -518,9 +518,9 @@
     using effect_deterministic by fastforce
 qed
 
-section {* Proving correctness of in-place reversal *}
+section \<open>Proving correctness of in-place reversal\<close>
 
-subsection {* Definition of in-place reversal *}
+subsection \<open>Definition of in-place reversal\<close>
 
 partial_function (heap) rev' :: "('a::heap) node ref \<Rightarrow> 'a node ref \<Rightarrow> 'a node ref Heap"
 where
@@ -541,7 +541,7 @@
   "rev Empty = return Empty"
 | "rev (Node x n) = do { q \<leftarrow> ref Empty; p \<leftarrow> ref (Node x n); v \<leftarrow> rev' q p; !v }"
 
-subsection {* Correctness Proof *}
+subsection \<open>Correctness Proof\<close>
 
 lemma rev'_invariant:
   assumes "effect (rev' q p) h h' v"
@@ -649,10 +649,10 @@
 qed
 
 
-section {* The merge function on Linked Lists *}
-text {* We also prove merge correct *}
+section \<open>The merge function on Linked Lists\<close>
+text \<open>We also prove merge correct\<close>
 
-text{* First, we define merge on lists in a natural way. *}
+text\<open>First, we define merge on lists in a natural way.\<close>
 
 fun Lmerge :: "('a::ord) list \<Rightarrow> 'a list \<Rightarrow> 'a list"
 where
@@ -661,7 +661,7 @@
 | "Lmerge [] ys = ys"
 | "Lmerge xs [] = xs"
 
-subsection {* Definition of merge function *}
+subsection \<open>Definition of merge function\<close>
 
 partial_function (heap) merge :: "('a::{heap, ord}) node ref \<Rightarrow> 'a node ref \<Rightarrow> 'a node ref Heap"
 where
@@ -694,9 +694,9 @@
 lemma sum_distrib: "case_sum fl fr (case x of Empty \<Rightarrow> y | Node v n \<Rightarrow> (z v n)) = (case x of Empty \<Rightarrow> case_sum fl fr y | Node v n \<Rightarrow> case_sum fl fr (z v n))"
 by (cases x) auto
 
-subsection {* Induction refinement by applying the abstraction function to our induct rule *}
+subsection \<open>Induction refinement by applying the abstraction function to our induct rule\<close>
 
-text {* From our original induction rule Lmerge.induct, we derive a new rule with our list_of' predicate *}
+text \<open>From our original induction rule Lmerge.induct, we derive a new rule with our list_of' predicate\<close>
 
 lemma merge_induct2:
   assumes "list_of' h (p::'a::{heap, ord} node ref) xs"
@@ -741,7 +741,7 @@
 qed
 
 
-text {* secondly, we add the effect statement in the premise, and derive the effect statements for the single cases which we then eliminate with our effect elim rules. *}
+text \<open>secondly, we add the effect statement in the premise, and derive the effect statements for the single cases which we then eliminate with our effect elim rules.\<close>
   
 lemma merge_induct3: 
 assumes  "list_of' h p xs"
@@ -790,10 +790,10 @@
 qed
 
 
-subsection {* Proving merge correct *}
+subsection \<open>Proving merge correct\<close>
 
-text {* As many parts of the following three proofs are identical, we could actually move the
-same reasoning into an extended induction rule *}
+text \<open>As many parts of the following three proofs are identical, we could actually move the
+same reasoning into an extended induction rule\<close>
  
 lemma merge_unchanged:
   assumes "refs_of' h p xs"
@@ -822,7 +822,7 @@
     from 3(11) pnrs_def have no_inter: "set pnrs \<inter> set ys = {}" by auto
     from 3(7)[OF refs_of'_pn 3(10) this p_in] 3(3) have p_is_Node: "Ref.get h1 p = Node x pn"
       by simp
-    from 3(7)[OF refs_of'_pn 3(10) no_inter r_in] 3(8) `r \<noteq> p` show ?case
+    from 3(7)[OF refs_of'_pn 3(10) no_inter r_in] 3(8) \<open>r \<noteq> p\<close> show ?case
       by simp
   next
     case (4 x xs' y ys' p q pn qn h1 r1 h' xs ys r)
@@ -835,7 +835,7 @@
     with 4(11) 4(12) qnrs_def refs_of'_distinct[OF 4(10)] have q_in: "q \<notin> set xs \<union> set qnrs" by auto
     from 4(11) qnrs_def have no_inter: "set xs \<inter> set qnrs = {}" by auto
     from 4(7)[OF 4(9) refs_of'_qn this q_in] 4(4) have q_is_Node: "Ref.get h1 q = Node y qn" by simp
-    from 4(7)[OF 4(9) refs_of'_qn no_inter r_in] 4(8) `r \<noteq> q` show ?case
+    from 4(7)[OF 4(9) refs_of'_qn no_inter r_in] 4(8) \<open>r \<noteq> q\<close> show ?case
       by simp
   qed
 qed
@@ -937,9 +937,9 @@
   show ?case by (auto simp: list_of'_set_ref)
 qed
 
-section {* Code generation *}
+section \<open>Code generation\<close>
 
-text {* A simple example program *}
+text \<open>A simple example program\<close>
 
 definition test_1 where "test_1 = (do { ll_xs \<leftarrow> make_llist [1..(15::int)]; xs \<leftarrow> traverse ll_xs; return xs })" 
 definition test_2 where "test_2 = (do { ll_xs \<leftarrow> make_llist [1..(15::int)]; ll_ys \<leftarrow> rev ll_xs; ys \<leftarrow> traverse ll_ys; return ys })"
@@ -958,9 +958,9 @@
 
 code_reserved SML upto
 
-ML_val {* @{code test_1} () *}
-ML_val {* @{code test_2} () *}
-ML_val {* @{code test_3} () *}
+ML_val \<open>@{code test_1} ()\<close>
+ML_val \<open>@{code test_2} ()\<close>
+ML_val \<open>@{code test_3} ()\<close>
 
 export_code test_1 test_2 test_3 checking SML SML_imp OCaml? OCaml_imp? Haskell? Scala Scala_imp
 
--- a/src/HOL/Imperative_HOL/ex/List_Sublist.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/ex/List_Sublist.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
     Author:     Lukas Bulwahn, TU Muenchen
 *)
 
-section {* Slices of lists *}
+section \<open>Slices of lists\<close>
 
 theory List_Sublist
 imports "~~/src/HOL/Library/Multiset"
@@ -188,7 +188,7 @@
 apply auto
 done
 
-section {* Another sublist function *}
+section \<open>Another sublist function\<close>
 
 function sublist' :: "nat \<Rightarrow> nat \<Rightarrow> 'a list \<Rightarrow> 'a list"
 where
@@ -199,7 +199,7 @@
 by pat_completeness auto
 termination by lexicographic_order
 
-subsection {* Proving equivalence to the other sublist command *}
+subsection \<open>Proving equivalence to the other sublist command\<close>
 
 lemma sublist'_sublist: "sublist' n m xs = sublist xs {j. n \<le> j \<and> j < m}"
 apply (induct xs arbitrary: n m)
@@ -227,7 +227,7 @@
 done
 
 
-subsection {* Showing equivalence to use of drop and take for definition *}
+subsection \<open>Showing equivalence to use of drop and take for definition\<close>
 
 lemma "sublist' n m xs = take (m - n) (drop n xs)"
 apply (induct xs arbitrary: n m)
@@ -239,7 +239,7 @@
 apply simp
 done
 
-subsection {* General lemma about sublist *}
+subsection \<open>General lemma about sublist\<close>
 
 lemma sublist'_Nil[simp]: "sublist' i j [] = []"
 by simp
--- a/src/HOL/Imperative_HOL/ex/SatChecker.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/ex/SatChecker.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,22 +2,22 @@
     Author:     Lukas Bulwahn, TU Muenchen
 *)
 
-section {* An efficient checker for proofs from a SAT solver *}
+section \<open>An efficient checker for proofs from a SAT solver\<close>
 
 theory SatChecker
 imports "~~/src/HOL/Library/RBT_Impl" Sorted_List "../Imperative_HOL"
 begin
 
-section{* General settings and functions for our representation of clauses *}
+section\<open>General settings and functions for our representation of clauses\<close>
 
-subsection{* Types for Literals, Clauses and ProofSteps *}
-text {* We encode Literals as integers and Clauses as sorted Lists. *}
+subsection\<open>Types for Literals, Clauses and ProofSteps\<close>
+text \<open>We encode Literals as integers and Clauses as sorted Lists.\<close>
 
 type_synonym ClauseId = nat
 type_synonym Lit = int
 type_synonym Clause = "Lit list"
 
-text {* This resembles exactly to Isat's Proof Steps *}
+text \<open>This resembles exactly to Isat's Proof Steps\<close>
 
 type_synonym Resolvants = "ClauseId * (Lit * ClauseId) list"
 datatype ProofStep =
@@ -27,8 +27,8 @@
   | Delete ClauseId
   | Xstep ClauseId ClauseId
 
-subsection{* Interpretation of Literals, Clauses, and an array of Clauses *} 
-text {* Specific definitions for Literals as integers *}
+subsection\<open>Interpretation of Literals, Clauses, and an array of Clauses\<close> 
+text \<open>Specific definitions for Literals as integers\<close>
 
 definition compl :: "Lit \<Rightarrow> Lit"
 where
@@ -54,7 +54,7 @@
 lemma compl_exists: "\<exists>l'. l = compl l'"
 unfolding compl_def by arith
 
-text {* Specific definitions for Clauses as sorted lists *}
+text \<open>Specific definitions for Clauses as sorted lists\<close>
 
 definition interpClause :: "(nat \<Rightarrow> bool) \<Rightarrow> Clause \<Rightarrow> bool"
 where
@@ -116,12 +116,12 @@
 unfolding inconsistent_def correctClause_def
 by auto
 
-text {* Specific definition for derived clauses in the Array *}
+text \<open>Specific definition for derived clauses in the Array\<close>
 
 definition
   array_ran :: "('a::heap) option array \<Rightarrow> heap \<Rightarrow> 'a set" where
   "array_ran a h = {e. Some e \<in> set (Array.get h a)}"
-    -- {*FIXME*}
+    \<comment> \<open>FIXME\<close>
 
 lemma array_ranI: "\<lbrakk> Some b = Array.get h a ! i; i < Array.length h a \<rbrakk> \<Longrightarrow> b \<in> array_ran a h"
 unfolding array_ran_def Array.length_def by simp
@@ -165,11 +165,11 @@
   by auto
 
 
-section{* Improved version of SatChecker *}
+section\<open>Improved version of SatChecker\<close>
 
-text{* This version just uses a single list traversal. *}
+text\<open>This version just uses a single list traversal.\<close>
 
-subsection{* Function definitions *}
+subsection\<open>Function definitions\<close>
 
 primrec res_mem :: "Lit \<Rightarrow> Clause \<Rightarrow> Clause Heap"
 where
@@ -209,7 +209,7 @@
 
 declare res_mem.simps [simp del] resolve1.simps [simp del] resolve2.simps [simp del] res_thm'.simps [simp del]
 
-subsection {* Proofs about these functions *}
+subsection \<open>Proofs about these functions\<close>
 
 lemma res_mem:
 assumes "effect (res_mem l xs) h h' r"
@@ -408,7 +408,7 @@
     done
 qed
 
-subsection {* res_thm and doProofStep *}
+subsection \<open>res_thm and doProofStep\<close>
 
 definition get_clause :: "Clause option array \<Rightarrow> ClauseId \<Rightarrow> Clause Heap"
 where
@@ -598,9 +598,9 @@
 apply (drule bspec)
 by (rule array_ranI, auto)
 
-section {* Functional version with Lists as Array *}
+section \<open>Functional version with Lists as Array\<close>
 
-subsection {* List specific definitions *}
+subsection \<open>List specific definitions\<close>
 
 definition list_ran :: "'a option list \<Rightarrow> 'a set"
 where
@@ -636,7 +636,7 @@
   "correctList rootcls xs =
   (\<forall>cl \<in> list_ran xs. correctClause rootcls cl \<and> sorted cl \<and> distinct cl)"
 
-subsection {* Checker functions *}
+subsection \<open>Checker functions\<close>
 
 primrec lres_thm :: "Clause option list \<Rightarrow> (Lit * ClauseId) \<Rightarrow> Clause \<Rightarrow> Clause Heap" 
 where
@@ -670,7 +670,7 @@
   }"
 
 
-section {* Functional version with RedBlackTrees *}
+section \<open>Functional version with RedBlackTrees\<close>
 
 primrec tres_thm :: "(ClauseId, Clause) RBT_Impl.rbt \<Rightarrow> Lit \<times> ClauseId \<Rightarrow> Clause \<Rightarrow> Clause Heap"
 where
--- a/src/HOL/Imperative_HOL/ex/Sorted_List.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/ex/Sorted_List.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,13 +2,13 @@
     Author:     Lukas Bulwahn, TU Muenchen
 *)
 
-section {* Sorted lists as representation of finite sets *}
+section \<open>Sorted lists as representation of finite sets\<close>
 
 theory Sorted_List
 imports Main
 begin
 
-text {* Merge function for two distinct sorted lists to get compound distinct sorted list *}
+text \<open>Merge function for two distinct sorted lists to get compound distinct sorted list\<close>
    
 fun merge :: "('a::linorder) list \<Rightarrow> 'a list \<Rightarrow> 'a list"
 where
@@ -17,7 +17,7 @@
 | "merge xs [] = xs"
 | "merge [] ys = ys"
 
-text {* The function package does not derive automatically the more general rewrite rule as follows: *}
+text \<open>The function package does not derive automatically the more general rewrite rule as follows:\<close>
 lemma merge_Nil[simp]: "merge [] ys = ys"
 by (cases ys) auto
 
@@ -31,7 +31,7 @@
 lemma distinct_merge[simp]: "\<lbrakk> distinct xs; distinct ys; List.sorted xs; List.sorted ys \<rbrakk> \<Longrightarrow> distinct (merge xs ys)"
 by (induct xs ys rule: merge.induct, auto simp add: sorted_Cons)
 
-text {* The remove function removes an element from a sorted list *}
+text \<open>The remove function removes an element from a sorted list\<close>
 
 primrec remove :: "('a :: linorder) \<Rightarrow> 'a list \<Rightarrow> 'a list"
 where
@@ -86,7 +86,7 @@
 apply (auto simp add: sorted_Cons)
 done
 
-subsection {* Efficient member function for sorted lists *}
+subsection \<open>Efficient member function for sorted lists\<close>
 
 primrec smember :: "'a list \<Rightarrow> 'a::linorder \<Rightarrow> bool" where
   "smember [] x \<longleftrightarrow> False"
--- a/src/HOL/Imperative_HOL/ex/Subarray.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Imperative_HOL/ex/Subarray.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
     Author:     Lukas Bulwahn, TU Muenchen
 *)
 
-section {* Theorems about sub arrays *}
+section \<open>Theorems about sub arrays\<close>
 
 theory Subarray
 imports "~~/src/HOL/Imperative_HOL/Array" List_Sublist
--- a/src/HOL/Import/HOL_Light_Import.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Import/HOL_Light_Import.thy	Thu May 26 17:51:22 2016 +0200
@@ -3,7 +3,7 @@
     Author:     Alexander Krauss, QAware GmbH
 *)
 
-section {* Main HOL Light importer *}
+section \<open>Main HOL Light importer\<close>
 
 theory HOL_Light_Import
 imports HOL_Light_Maps
--- a/src/HOL/Import/HOL_Light_Maps.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Import/HOL_Light_Maps.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Based on earlier code by Steven Obua and Sebastian Skalberg
 *)
 
-section {* Type and constant mappings of HOL Light importer *}
+section \<open>Type and constant mappings of HOL Light importer\<close>
 
 theory HOL_Light_Maps
 imports Import_Setup
--- a/src/HOL/Import/Import_Setup.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Import/Import_Setup.thy	Thu May 26 17:51:22 2016 +0200
@@ -3,7 +3,7 @@
     Author:     Alexander Krauss, QAware GmbH
 *)
 
-section {* Importer machinery and required theorems *}
+section \<open>Importer machinery and required theorems\<close>
 
 theory Import_Setup
 imports Main "~~/src/HOL/Binomial"
--- a/src/HOL/Induct/ABexp.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Induct/ABexp.thy	Thu May 26 17:51:22 2016 +0200
@@ -55,7 +55,7 @@
   "evala env (substa (Var (v := a')) a) = evala (env (v := evala env a')) a"
 and subst1_bexp:
   "evalb env (substb (Var (v := a')) b) = evalb (env (v := evala env a')) b"
-    --  \<open>one variable\<close>
+    \<comment>  \<open>one variable\<close>
   by (induct a and b) simp_all
 
 lemma subst_all_aexp:
--- a/src/HOL/Induct/Com.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Induct/Com.thy	Thu May 26 17:51:22 2016 +0200
@@ -136,7 +136,7 @@
   by (rule fun_upd_same [THEN subst]) fast
 
 
-text\<open>Make the induction rule look nicer -- though @{text eta_contract} makes the new
+text\<open>Make the induction rule look nicer -- though \<open>eta_contract\<close> makes the new
     version look worse than it is...\<close>
 
 lemma split_lemma: "{((e,s),(n,s')). P e s n s'} = Collect (case_prod (%v. case_prod (case_prod P v)))"
@@ -167,11 +167,11 @@
 done
 
 
-text\<open>Lemma for @{text Function_eval}.  The major premise is that @{text "(c,s)"} executes to @{text "s1"}
+text\<open>Lemma for \<open>Function_eval\<close>.  The major premise is that \<open>(c,s)\<close> executes to \<open>s1\<close>
   using eval restricted to its functional part.  Note that the execution
-  @{text "(c,s) -[eval]-> s2"} can use unrestricted @{text eval}!  The reason is that
-  the execution @{text "(c,s) -[eval Int {...}]-> s1"} assures us that execution is
-  functional on the argument @{text "(c,s)"}.
+  \<open>(c,s) -[eval]-> s2\<close> can use unrestricted \<open>eval\<close>!  The reason is that
+  the execution \<open>(c,s) -[eval Int {...}]-> s1\<close> assures us that execution is
+  functional on the argument \<open>(c,s)\<close>.
 \<close>
 lemma com_Unique:
  "(c,s) -[eval Int {((e,s),(n,t)). \<forall>nt'. (e,s) -|-> nt' --> (n,t)=nt'}]-> s1
--- a/src/HOL/Induct/Common_Patterns.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Induct/Common_Patterns.thy	Thu May 26 17:51:22 2016 +0200
@@ -10,7 +10,7 @@
 
 text \<open>
   The subsequent Isar proof schemes illustrate common proof patterns
-  supported by the generic @{text "induct"} method.
+  supported by the generic \<open>induct\<close> method.
 
   To demonstrate variations on statement (goal) structure we refer to
   the induction rule of Peano natural numbers: @{thm nat.induct
@@ -29,8 +29,8 @@
 text \<open>
   Augmenting a problem by additional facts and locally fixed variables
   is a bread-and-butter method in many applications.  This is where
-  unwieldy object-level @{text "\<forall>"} and @{text "\<longrightarrow>"} used to occur in
-  the past.  The @{text "induct"} method works with primary means of
+  unwieldy object-level \<open>\<forall>\<close> and \<open>\<longrightarrow>\<close> used to occur in
+  the past.  The \<open>induct\<close> method works with primary means of
   the proof language instead.
 \<close>
 
@@ -80,8 +80,8 @@
 qed
 
 text \<open>
-  Observe how the local definition @{text "n = a x"} recurs in the
-  inductive cases as @{text "0 = a x"} and @{text "Suc n = a x"},
+  Observe how the local definition \<open>n = a x\<close> recurs in the
+  inductive cases as \<open>0 = a x\<close> and \<open>Suc n = a x\<close>,
   according to underlying induction rule.
 \<close>
 
@@ -155,9 +155,9 @@
   The following pattern illustrates the slightly more complex
   situation of simultaneous goals with individual local assumptions.
   In compound simultaneous statements like this, local assumptions
-  need to be included into each goal, using @{text "\<Longrightarrow>"} of the Pure
+  need to be included into each goal, using \<open>\<Longrightarrow>\<close> of the Pure
   framework.  In contrast, local parameters do not require separate
-  @{text "\<And>"} prefixes here, but may be moved into the common context
+  \<open>\<And>\<close> prefixes here, but may be moved into the common context
   of the whole statement.
 \<close>
 
@@ -196,7 +196,7 @@
 qed
 
 text \<open>
-  Here @{text "induct"} provides again nested cases with numbered
+  Here \<open>induct\<close> provides again nested cases with numbered
   sub-cases, which allows to share common parts of the body context.
   In typical applications, there could be a long intermediate proof of
   general consequences of the induction hypotheses, before finishing
@@ -208,8 +208,7 @@
 
 text \<open>
   Multiple induction rules emerge from mutual definitions of
-  datatypes, inductive predicates, functions etc.  The @{text
-  "induct"} method accepts replicated arguments (with @{text "and"}
+  datatypes, inductive predicates, functions etc.  The \<open>induct\<close> method accepts replicated arguments (with \<open>and\<close>
   separator), corresponding to each projection of the induction
   principle.
 
@@ -344,7 +343,7 @@
 text \<open>
   Working with mutual rules requires special care in composing the
   statement as a two-level conjunction, using lists of propositions
-  separated by @{text "and"}.  For example:
+  separated by \<open>and\<close>.  For example:
 \<close>
 
 inductive Evn :: "nat \<Rightarrow> bool" and Odd :: "nat \<Rightarrow> bool"
--- a/src/HOL/Induct/PropLog.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Induct/PropLog.thy	Thu May 26 17:51:22 2016 +0200
@@ -14,8 +14,8 @@
   Inductive definition of propositional logic.  Soundness and
   completeness w.r.t.\ truth-tables.
 
-  Prove: If @{text "H |= p"} then @{text "G |= p"} where @{text "G \<in>
-  Fin(H)"}
+  Prove: If \<open>H |= p\<close> then \<open>G |= p\<close> where \<open>G \<in>
+  Fin(H)\<close>
 \<close>
 
 subsection \<open>The datatype of propositions\<close>
@@ -49,8 +49,8 @@
 | eval_imp: "tt[[p->q]] = (tt[[p]] --> tt[[q]])"
 
 text \<open>
-  A finite set of hypotheses from @{text t} and the @{text Var}s in
-  @{text p}.
+  A finite set of hypotheses from \<open>t\<close> and the \<open>Var\<close>s in
+  \<open>p\<close>.
 \<close>
 
 primrec hyps :: "['a pl, 'a set] => 'a pl set"
@@ -63,8 +63,8 @@
 subsubsection \<open>Logical consequence\<close>
 
 text \<open>
-  For every valuation, if all elements of @{text H} are true then so
-  is @{text p}.
+  For every valuation, if all elements of \<open>H\<close> are true then so
+  is \<open>p\<close>.
 \<close>
 
 definition sat :: "['a pl set, 'a pl] => bool"  (infixl "|=" 50)
@@ -80,14 +80,14 @@
 done
 
 lemma thms_I: "H |- p->p"
-  -- \<open>Called @{text I} for Identity Combinator, not for Introduction.\<close>
+  \<comment> \<open>Called \<open>I\<close> for Identity Combinator, not for Introduction.\<close>
 by (best intro: thms.K thms.S thms.MP)
 
 
 subsubsection \<open>Weakening, left and right\<close>
 
 lemma weaken_left: "[| G \<subseteq> H;  G|-p |] ==> H|-p"
-  -- \<open>Order of premises is convenient with @{text THEN}\<close>
+  \<comment> \<open>Order of premises is convenient with \<open>THEN\<close>\<close>
   by (erule thms_mono [THEN predicate1D])
 
 lemma weaken_left_insert: "G |- p \<Longrightarrow> insert a G |- p"
@@ -146,7 +146,7 @@
 done
 
 lemma hyps_thms_if: "hyps p tt |- (if tt[[p]] then p else p->false)"
-  -- \<open>Typical example of strengthening the induction statement.\<close>
+  \<comment> \<open>Typical example of strengthening the induction statement.\<close>
 apply simp
 apply (induct p)
 apply (simp_all add: thms_I thms.H)
@@ -155,8 +155,8 @@
 done
 
 lemma sat_thms_p: "{} |= p ==> hyps p tt |- p"
-  -- \<open>Key lemma for completeness; yields a set of assumptions
-        satisfying @{text p}\<close>
+  \<comment> \<open>Key lemma for completeness; yields a set of assumptions
+        satisfying \<open>p\<close>\<close>
 unfolding sat_def
 by (metis (full_types) empty_iff hyps_thms_if)
 
@@ -178,7 +178,7 @@
 
 lemma thms_excluded_middle_rule:
     "[| insert p H |- q;  insert (p->false) H |- q |] ==> H |- q"
-  -- \<open>Hard to prove directly because it requires cuts\<close>
+  \<comment> \<open>Hard to prove directly because it requires cuts\<close>
 by (rule thms_excluded_middle [THEN thms.MP, THEN thms.MP], auto)
 
 
@@ -200,7 +200,7 @@
 lemma hyps_insert: "hyps p (insert v t) <= insert (#v) (hyps p t-{#v->false})"
 by (induct p) auto
 
-text \<open>Two lemmas for use with @{text weaken_left}\<close>
+text \<open>Two lemmas for use with \<open>weaken_left\<close>\<close>
 
 lemma insert_Diff_same: "B-C <= insert a (B-insert a C)"
 by fast
@@ -234,12 +234,12 @@
     "{} |= p ==> \<forall>t. hyps p t - hyps p t0 |- p"
 apply (rule hyps_subset [THEN hyps_finite [THEN finite_subset_induct]])
  apply (simp add: sat_thms_p, safe)
- txt\<open>Case @{text"hyps p t-insert(#v,Y) |- p"}\<close>
+ txt\<open>Case \<open>hyps p t-insert(#v,Y) |- p\<close>\<close>
  apply (iprover intro: thms_excluded_middle_rule
                      insert_Diff_same [THEN weaken_left]
                      insert_Diff_subset2 [THEN weaken_left]
                      hyps_Diff [THEN Diff_weaken_left])
-txt\<open>Case @{text"hyps p t-insert(#v -> false,Y) |- p"}\<close>
+txt\<open>Case \<open>hyps p t-insert(#v -> false,Y) |- p\<close>\<close>
  apply (iprover intro: thms_excluded_middle_rule
                      insert_Diff_same [THEN weaken_left]
                      insert_Diff_subset2 [THEN weaken_left]
--- a/src/HOL/Induct/QuoDataType.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Induct/QuoDataType.thy	Thu May 26 17:51:22 2016 +0200
@@ -420,7 +420,7 @@
 
 subsection\<open>The Abstract Discriminator\<close>
 
-text\<open>However, as @{text Crypt_Nonce_neq_Nonce} above illustrates, we don't
+text\<open>However, as \<open>Crypt_Nonce_neq_Nonce\<close> above illustrates, we don't
 need this function in order to prove discrimination theorems.\<close>
 
 definition
--- a/src/HOL/Induct/QuoNestedDataType.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Induct/QuoNestedDataType.thy	Thu May 26 17:51:22 2016 +0200
@@ -241,7 +241,7 @@
 regard an @{typ "exp list"} as a @{term "listrel exprel"} equivalence class\<close>
 
 text\<open>This theorem is easily proved but never used. There's no obvious way
-even to state the analogous result, @{text FnCall_Cons}.\<close>
+even to state the analogous result, \<open>FnCall_Cons\<close>.\<close>
 lemma FnCall_Nil: "FnCall F [] = Abs_Exp (exprel``{FNCALL F []})"
   by (simp add: FnCall_def)
 
@@ -390,7 +390,7 @@
 
 
 subsection\<open>The Abstract Discriminator\<close>
-text\<open>However, as @{text FnCall_Var_neq_Var} illustrates, we don't need this
+text\<open>However, as \<open>FnCall_Var_neq_Var\<close> illustrates, we don't need this
 function in order to prove discrimination theorems.\<close>
 
 definition
--- a/src/HOL/Induct/Sigma_Algebra.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Induct/Sigma_Algebra.thy	Thu May 26 17:51:22 2016 +0200
@@ -10,8 +10,7 @@
 
 text \<open>
   This is just a tiny example demonstrating the use of inductive
-  definitions in classical mathematics.  We define the least @{text
-  \<sigma>}-algebra over a given set of sets.
+  definitions in classical mathematics.  We define the least \<open>\<sigma>\<close>-algebra over a given set of sets.
 \<close>
 
 inductive_set \<sigma>_algebra :: "'a set set \<Rightarrow> 'a set set"
@@ -24,7 +23,7 @@
 
 text \<open>
   The following basic facts are consequences of the closure properties
-  of any @{text \<sigma>}-algebra, merely using the introduction rules, but
+  of any \<open>\<sigma>\<close>-algebra, merely using the introduction rules, but
   no induction nor cases.
 \<close>
 
--- a/src/HOL/Matrix_LP/ComputeFloat.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Matrix_LP/ComputeFloat.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
     Author:     Steven Obua
 *)
 
-section {* Floating Point Representation of the Reals *}
+section \<open>Floating Point Representation of the Reals\<close>
 
 theory ComputeFloat
 imports Complex_Main "~~/src/HOL/Library/Lattice_Algebras"
--- a/src/HOL/Matrix_LP/ComputeHOL.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Matrix_LP/ComputeHOL.thy	Thu May 26 17:51:22 2016 +0200
@@ -134,7 +134,7 @@
 
 lemmas compute_hol = compute_if compute_bool compute_pair compute_option compute_list compute_let
 
-ML {*
+ML \<open>
 signature ComputeHOL =
 sig
   val prep_thms : thm list -> thm list
@@ -182,6 +182,6 @@
 end
 
 end
-*}
+\<close>
 
 end
--- a/src/HOL/Matrix_LP/Matrix.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Matrix_LP/Matrix.thy	Thu May 26 17:51:22 2016 +0200
@@ -265,13 +265,13 @@
 definition associative :: "('a \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> bool" where
 "associative f == ! x y z. f (f x y) z = f x (f y z)"
 
-text{*
+text\<open>
 To reason about associativity and commutativity of operations on matrices,
 let's take a step back and look at the general situtation: Assume that we have
 sets $A$ and $B$ with $B \subset A$ and an abstraction $u: A \rightarrow B$. This abstraction has to fulfill $u(b) = b$ for all $b \in B$, but is arbitrary otherwise.
 Each function $f: A \times A \rightarrow A$ now induces a function $f': B \times B \rightarrow B$ by $f' = u \circ f$.
 It is obvious that commutativity of $f$ implies commutativity of $f'$: $f' x y = u (f x y) = u (f y x) = f' y x.$
-*}
+\<close>
 
 lemma combine_infmatrix_commute:
   "commutative f \<Longrightarrow> commutative (combine_infmatrix f)"
@@ -281,14 +281,14 @@
 "commutative f \<Longrightarrow> commutative (combine_matrix f)"
 by (simp add: combine_matrix_def commutative_def combine_infmatrix_def)
 
-text{*
+text\<open>
 On the contrary, given an associative function $f$ we cannot expect $f'$ to be associative. A counterexample is given by $A=\ganz$, $B=\{-1, 0, 1\}$,
 as $f$ we take addition on $\ganz$, which is clearly associative. The abstraction is given by  $u(a) = 0$ for $a \notin B$. Then we have
 \[ f' (f' 1 1) -1 = u(f (u (f 1 1)) -1) = u(f (u 2) -1) = u (f 0 -1) = -1, \]
 but on the other hand we have
 \[ f' 1 (f' 1 -1) = u (f 1 (u (f 1 -1))) = u (f 1 0) = 1.\]
 A way out of this problem is to assume that $f(A\times A)\subset A$ holds, and this is what we are going to do:
-*}
+\<close>
 
 lemma nonzero_positions_combine_infmatrix[simp]: "f 0 0 = 0 \<Longrightarrow> nonzero_positions (combine_infmatrix f A B) \<subseteq> (nonzero_positions A) \<union> (nonzero_positions B)"
 by (rule subsetI, simp add: nonzero_positions_def combine_infmatrix_def, auto)
@@ -303,7 +303,7 @@
 apply (rule finite_subset[of _ "(nonzero_positions (Rep_matrix A)) \<union> (nonzero_positions (Rep_matrix B))"])
 by (simp_all)
 
-text {* We need the next two lemmas only later, but it is analog to the above one, so we prove them now: *}
+text \<open>We need the next two lemmas only later, but it is analog to the above one, so we prove them now:\<close>
 lemma nonzero_positions_apply_infmatrix[simp]: "f 0 = 0 \<Longrightarrow> nonzero_positions (apply_infmatrix f A) \<subseteq> nonzero_positions A"
 by (rule subsetI, simp add: nonzero_positions_def apply_infmatrix_def, auto)
 
--- a/src/HOL/Metis_Examples/Abstraction.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Metis_Examples/Abstraction.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Example featuring Metis's support for lambda-abstractions.
 *)
 
-section {* Example Featuring Metis's Support for Lambda-Abstractions *}
+section \<open>Example Featuring Metis's Support for Lambda-Abstractions\<close>
 
 theory Abstraction
 imports "~~/src/HOL/Library/FuncSet"
--- a/src/HOL/Metis_Examples/Big_O.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Metis_Examples/Big_O.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Metis example featuring the Big O notation.
 *)
 
-section {* Metis Example Featuring the Big O Notation *}
+section \<open>Metis Example Featuring the Big O Notation\<close>
 
 theory Big_O
 imports
@@ -14,7 +14,7 @@
   "~~/src/HOL/Library/Set_Algebras"
 begin
 
-subsection {* Definitions *}
+subsection \<open>Definitions\<close>
 
 definition bigo :: "('a => 'b::linordered_idom) => ('a => 'b) set" ("(1O'(_'))") where
   "O(f::('a => 'b)) == {h. \<exists>c. \<forall>x. \<bar>h x\<bar> <= c * \<bar>f x\<bar>}"
@@ -457,7 +457,7 @@
   hence "\<exists>(v::'a) (u::'a) SKF\<^sub>7::'a \<Rightarrow> 'b. \<bar>inverse c\<bar> * \<bar>g (SKF\<^sub>7 (u * v))\<bar> \<le> u * (v * \<bar>f (SKF\<^sub>7 (u * v))\<bar>)"
     by (metis mult_left_mono)
   then show "\<exists>ca::'a. \<forall>x::'b. inverse \<bar>c\<bar> * \<bar>g x\<bar> \<le> ca * \<bar>f x\<bar>"
-    using A2 F4 by (metis F1 `0 < \<bar>inverse c\<bar>` linordered_field_class.sign_simps(23) mult_le_cancel_left_pos)
+    using A2 F4 by (metis F1 \<open>0 < \<bar>inverse c\<bar>\<close> linordered_field_class.sign_simps(23) mult_le_cancel_left_pos)
 qed
 
 lemma bigo_const_mult6 [intro]: "(\<lambda>x. c) *o O(f) <= O(f)"
@@ -489,7 +489,7 @@
 apply (simp add: fun_diff_def)
 done
 
-subsection {* Setsum *}
+subsection \<open>Setsum\<close>
 
 lemma bigo_setsum_main: "\<forall>x. \<forall>y \<in> A x. 0 <= h x y \<Longrightarrow>
     \<exists>c. \<forall>x. \<forall>y \<in> A x. \<bar>f x y\<bar> <= c * (h x y) \<Longrightarrow>
@@ -566,7 +566,7 @@
   apply auto
 done
 
-subsection {* Misc useful stuff *}
+subsection \<open>Misc useful stuff\<close>
 
 lemma bigo_useful_intro: "A <= O(f) \<Longrightarrow> B <= O(f) \<Longrightarrow>
   A + B <= O(f)"
@@ -610,7 +610,7 @@
   apply (simp add: fun_diff_def)
 done
 
-subsection {* Less than or equal to *}
+subsection \<open>Less than or equal to\<close>
 
 definition lesso :: "('a => 'b::linordered_idom) => ('a => 'b) => ('a => 'b)" (infixl "<o" 70) where
   "f <o g == (\<lambda>x. max (f x - g x) 0)"
--- a/src/HOL/Metis_Examples/Binary_Tree.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Metis_Examples/Binary_Tree.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Metis example featuring binary trees.
 *)
 
-section {* Metis Example Featuring Binary Trees *}
+section \<open>Metis Example Featuring Binary Trees\<close>
 
 theory Binary_Tree
 imports Main
@@ -53,7 +53,7 @@
   "append Lf t = t"
 | "append (Br a t1 t2) t = Br a (append t1 t) (append t2 t)"
 
-text {* \medskip BT simplification *}
+text \<open>\medskip BT simplification\<close>
 
 lemma n_leaves_reflect: "n_leaves (reflect t) = n_leaves t"
 proof (induct t)
@@ -82,9 +82,9 @@
  apply (metis depth.simps(1) reflect.simps(1))
 by (metis depth.simps(2) max.commute reflect.simps(2))
 
-text {*
+text \<open>
 The famous relationship between the numbers of leaves and nodes.
-*}
+\<close>
 
 lemma n_leaves_nodes: "n_leaves t = Suc (n_nodes t)"
 apply (induct t)
@@ -197,9 +197,9 @@
               reflect.simps(1))
 by (metis preorder_reflect reflect_reflect_ident rev_swap)
 
-text {*
+text \<open>
 Analogues of the standard properties of the append function for lists.
-*}
+\<close>
 
 lemma append_assoc [simp]: "append (append t1 t2) t3 = append t1 (append t2 t3)"
 apply (induct t1)
--- a/src/HOL/Metis_Examples/Clausification.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Metis_Examples/Clausification.thy	Thu May 26 17:51:22 2016 +0200
@@ -4,14 +4,14 @@
 Example that exercises Metis's Clausifier.
 *)
 
-section {* Example that Exercises Metis's Clausifier *}
+section \<open>Example that Exercises Metis's Clausifier\<close>
 
 theory Clausification
 imports Complex_Main
 begin
 
 
-text {* Definitional CNF for facts *}
+text \<open>Definitional CNF for facts\<close>
 
 declare [[meson_max_clauses = 10]]
 
@@ -83,7 +83,7 @@
 by (metis (full_types) rax)
 
 
-text {* Definitional CNF for goal *}
+text \<open>Definitional CNF for goal\<close>
 
 axiomatization p :: "nat \<Rightarrow> nat \<Rightarrow> bool" where
 pax: "\<exists>b. \<forall>a. (p b a \<and> p 0 0 \<and> p 1 a) \<or> (p 0 1 \<and> p 1 0 \<and> p a b)"
@@ -109,7 +109,7 @@
 by (metis (full_types) pax)
 
 
-text {* New Skolemizer *}
+text \<open>New Skolemizer\<close>
 
 declare [[metis_new_skolem]]
 
--- a/src/HOL/Metis_Examples/Message.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Metis_Examples/Message.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Metis example featuring message authentication.
 *)
 
-section {* Metis Example Featuring Message Authentication *}
+section \<open>Metis Example Featuring Message Authentication\<close>
 
 theory Message
 imports Main
@@ -19,8 +19,8 @@
 type_synonym key = nat
 
 consts
-  all_symmetric :: bool        --{*true if all keys are symmetric*}
-  invKey        :: "key=>key"  --{*inverse of a symmetric key*}
+  all_symmetric :: bool        \<comment>\<open>true if all keys are symmetric\<close>
+  invKey        :: "key=>key"  \<comment>\<open>inverse of a symmetric key\<close>
 
 specification (invKey)
   invKey [simp]: "invKey (invKey K) = K"
@@ -28,26 +28,26 @@
 by (metis id_apply)
 
 
-text{*The inverse of a symmetric key is itself; that of a public key
-      is the private key and vice versa*}
+text\<open>The inverse of a symmetric key is itself; that of a public key
+      is the private key and vice versa\<close>
 
 definition symKeys :: "key set" where
   "symKeys == {K. invKey K = K}"
 
-datatype  --{*We allow any number of friendly agents*}
+datatype  \<comment>\<open>We allow any number of friendly agents\<close>
   agent = Server | Friend nat | Spy
 
 datatype
-     msg = Agent  agent     --{*Agent names*}
-         | Number nat       --{*Ordinary integers, timestamps, ...*}
-         | Nonce  nat       --{*Unguessable nonces*}
-         | Key    key       --{*Crypto keys*}
-         | Hash   msg       --{*Hashing*}
-         | MPair  msg msg   --{*Compound messages*}
-         | Crypt  key msg   --{*Encryption, public- or shared-key*}
+     msg = Agent  agent     \<comment>\<open>Agent names\<close>
+         | Number nat       \<comment>\<open>Ordinary integers, timestamps, ...\<close>
+         | Nonce  nat       \<comment>\<open>Unguessable nonces\<close>
+         | Key    key       \<comment>\<open>Crypto keys\<close>
+         | Hash   msg       \<comment>\<open>Hashing\<close>
+         | MPair  msg msg   \<comment>\<open>Compound messages\<close>
+         | Crypt  key msg   \<comment>\<open>Encryption, public- or shared-key\<close>
 
 
-text{*Concrete syntax: messages appear as \<open>\<lbrace>A,B,NA\<rbrace>\<close>, etc...*}
+text\<open>Concrete syntax: messages appear as \<open>\<lbrace>A,B,NA\<rbrace>\<close>, etc...\<close>
 syntax
   "_MTuple"      :: "['a, args] => 'a * 'b"       ("(2\<lbrace>_,/ _\<rbrace>)")
 translations
@@ -56,15 +56,15 @@
 
 
 definition HPair :: "[msg,msg] => msg" ("(4Hash[_] /_)" [0, 1000]) where
-    --{*Message Y paired with a MAC computed with the help of X*}
+    \<comment>\<open>Message Y paired with a MAC computed with the help of X\<close>
     "Hash[X] Y == \<lbrace> Hash\<lbrace>X,Y\<rbrace>, Y\<rbrace>"
 
 definition keysFor :: "msg set => key set" where
-    --{*Keys useful to decrypt elements of a message set*}
+    \<comment>\<open>Keys useful to decrypt elements of a message set\<close>
   "keysFor H == invKey ` {K. \<exists>X. Crypt K X \<in> H}"
 
 
-subsubsection{*Inductive Definition of All Parts" of a Message*}
+subsubsection\<open>Inductive Definition of All Parts" of a Message\<close>
 
 inductive_set
   parts :: "msg set => msg set"
@@ -83,7 +83,7 @@
  apply (metis parts.Snd)
 by (metis parts.Body)
 
-text{*Equations hold because constructors are injective.*}
+text\<open>Equations hold because constructors are injective.\<close>
 lemma Friend_image_eq [simp]: "(Friend x \<in> Friend`A) = (x:A)"
 by (metis agent.inject image_iff)
 
@@ -94,13 +94,13 @@
 by (metis image_iff msg.distinct(23))
 
 
-subsubsection{*Inverse of keys *}
+subsubsection\<open>Inverse of keys\<close>
 
 lemma invKey_eq [simp]: "(invKey K = invKey K') = (K = K')"
 by (metis invKey)
 
 
-subsection{*keysFor operator*}
+subsection\<open>keysFor operator\<close>
 
 lemma keysFor_empty [simp]: "keysFor {} = {}"
 by (unfold keysFor_def, blast)
@@ -111,7 +111,7 @@
 lemma keysFor_UN [simp]: "keysFor (\<Union>i\<in>A. H i) = (\<Union>i\<in>A. keysFor (H i))"
 by (unfold keysFor_def, blast)
 
-text{*Monotonicity*}
+text\<open>Monotonicity\<close>
 lemma keysFor_mono: "G \<subseteq> H ==> keysFor(G) \<subseteq> keysFor(H)"
 by (unfold keysFor_def, blast)
 
@@ -144,7 +144,7 @@
 by (unfold keysFor_def, blast)
 
 
-subsection{*Inductive relation "parts"*}
+subsection\<open>Inductive relation "parts"\<close>
 
 lemma MPair_parts:
      "[| \<lbrace>X,Y\<rbrace> \<in> parts H;
@@ -152,10 +152,10 @@
 by (blast dest: parts.Fst parts.Snd)
 
 declare MPair_parts [elim!] parts.Body [dest!]
-text{*NB These two rules are UNSAFE in the formal sense, as they discard the
+text\<open>NB These two rules are UNSAFE in the formal sense, as they discard the
      compound message.  They work well on THIS FILE.
-  @{text MPair_parts} is left as SAFE because it speeds up proofs.
-  The Crypt rule is normally kept UNSAFE to avoid breaking up certificates.*}
+  \<open>MPair_parts\<close> is left as SAFE because it speeds up proofs.
+  The Crypt rule is normally kept UNSAFE to avoid breaking up certificates.\<close>
 
 lemma parts_increasing: "H \<subseteq> parts(H)"
 by blast
@@ -171,14 +171,14 @@
 lemma parts_emptyE [elim!]: "X\<in> parts{} ==> P"
 by simp
 
-text{*WARNING: loops if H = {Y}, therefore must not be repeated!*}
+text\<open>WARNING: loops if H = {Y}, therefore must not be repeated!\<close>
 lemma parts_singleton: "X\<in> parts H ==> \<exists>Y\<in>H. X\<in> parts {Y}"
 apply (erule parts.induct)
 apply fast+
 done
 
 
-subsubsection{*Unions *}
+subsubsection\<open>Unions\<close>
 
 lemma parts_Un_subset1: "parts(G) \<union> parts(H) \<subseteq> parts(G \<union> H)"
 by (intro Un_least parts_mono Un_upper1 Un_upper2)
@@ -212,19 +212,19 @@
 lemma parts_UN [simp]: "parts(\<Union>x\<in>A. H x) = (\<Union>x\<in>A. parts(H x))"
 by (intro equalityI parts_UN_subset1 parts_UN_subset2)
 
-text{*Added to simplify arguments to parts, analz and synth.
-  NOTE: the UN versions are no longer used!*}
+text\<open>Added to simplify arguments to parts, analz and synth.
+  NOTE: the UN versions are no longer used!\<close>
 
 
-text{*This allows @{text blast} to simplify occurrences of
-  @{term "parts(G\<union>H)"} in the assumption.*}
+text\<open>This allows \<open>blast\<close> to simplify occurrences of
+  @{term "parts(G\<union>H)"} in the assumption.\<close>
 lemmas in_parts_UnE = parts_Un [THEN equalityD1, THEN subsetD, THEN UnE]
 declare in_parts_UnE [elim!]
 
 lemma parts_insert_subset: "insert X (parts H) \<subseteq> parts(insert X H)"
 by (blast intro: parts_mono [THEN [2] rev_subsetD])
 
-subsubsection{*Idempotence and transitivity *}
+subsubsection\<open>Idempotence and transitivity\<close>
 
 lemma parts_partsD [dest!]: "X\<in> parts (parts H) ==> X\<in> parts H"
 by (erule parts.induct, blast+)
@@ -245,7 +245,7 @@
 by (metis (no_types) Un_insert_left Un_insert_right insert_absorb le_supE
           parts_Un parts_idem parts_increasing parts_trans)
 
-subsubsection{*Rewrite rules for pulling out atomic messages *}
+subsubsection\<open>Rewrite rules for pulling out atomic messages\<close>
 
 lemmas parts_insert_eq_I = equalityI [OF subsetI parts_insert_subset]
 
@@ -310,11 +310,11 @@
 apply (metis le_trans linorder_linear)
 done
 
-subsection{*Inductive relation "analz"*}
+subsection\<open>Inductive relation "analz"\<close>
 
-text{*Inductive definition of "analz" -- what can be broken down from a set of
+text\<open>Inductive definition of "analz" -- what can be broken down from a set of
     messages, including keys.  A form of downward closure.  Pairs can
-    be taken apart; messages decrypted with known keys.  *}
+    be taken apart; messages decrypted with known keys.\<close>
 
 inductive_set
   analz :: "msg set => msg set"
@@ -327,14 +327,14 @@
              "[|Crypt K X \<in> analz H; Key(invKey K): analz H|] ==> X \<in> analz H"
 
 
-text{*Monotonicity; Lemma 1 of Lowe's paper*}
+text\<open>Monotonicity; Lemma 1 of Lowe's paper\<close>
 lemma analz_mono: "G\<subseteq>H ==> analz(G) \<subseteq> analz(H)"
 apply auto
 apply (erule analz.induct)
 apply (auto dest: analz.Fst analz.Snd)
 done
 
-text{*Making it safe speeds up proofs*}
+text\<open>Making it safe speeds up proofs\<close>
 lemma MPair_analz [elim!]:
      "[| \<lbrace>X,Y\<rbrace> \<in> analz H;
              [| X \<in> analz H; Y \<in> analz H |] ==> P
@@ -367,22 +367,22 @@
 
 lemmas analz_insertI = subset_insertI [THEN analz_mono, THEN [2] rev_subsetD]
 
-subsubsection{*General equational properties *}
+subsubsection\<open>General equational properties\<close>
 
 lemma analz_empty [simp]: "analz{} = {}"
 apply safe
 apply (erule analz.induct, blast+)
 done
 
-text{*Converse fails: we can analz more from the union than from the
-  separate parts, as a key in one might decrypt a message in the other*}
+text\<open>Converse fails: we can analz more from the union than from the
+  separate parts, as a key in one might decrypt a message in the other\<close>
 lemma analz_Un: "analz(G) \<union> analz(H) \<subseteq> analz(G \<union> H)"
 by (intro Un_least analz_mono Un_upper1 Un_upper2)
 
 lemma analz_insert: "insert X (analz H) \<subseteq> analz(insert X H)"
 by (blast intro: analz_mono [THEN [2] rev_subsetD])
 
-subsubsection{*Rewrite rules for pulling out atomic messages *}
+subsubsection\<open>Rewrite rules for pulling out atomic messages\<close>
 
 lemmas analz_insert_eq_I = equalityI [OF subsetI analz_insert]
 
@@ -410,7 +410,7 @@
 apply (erule analz.induct, auto)
 done
 
-text{*Can only pull out Keys if they are not needed to decrypt the rest*}
+text\<open>Can only pull out Keys if they are not needed to decrypt the rest\<close>
 lemma analz_insert_Key [simp]:
     "K \<notin> keysFor (analz H) ==>
           analz (insert (Key K) H) = insert (Key K) (analz H)"
@@ -429,7 +429,7 @@
 apply (blast intro: analz.Fst analz.Snd)+
 done
 
-text{*Can pull out enCrypted message if the Key is not known*}
+text\<open>Can pull out enCrypted message if the Key is not known\<close>
 lemma analz_insert_Crypt:
      "Key (invKey K) \<notin> analz H
       ==> analz (insert (Crypt K X) H) = insert (Crypt K X) (analz H)"
@@ -459,10 +459,10 @@
                insert (Crypt K X) (analz (insert X H))"
 by (intro equalityI lemma1 lemma2)
 
-text{*Case analysis: either the message is secure, or it is not! Effective,
-but can cause subgoals to blow up! Use with @{text "if_split"}; apparently
-@{text "split_tac"} does not cope with patterns such as @{term"analz (insert
-(Crypt K X) H)"} *}
+text\<open>Case analysis: either the message is secure, or it is not! Effective,
+but can cause subgoals to blow up! Use with \<open>if_split\<close>; apparently
+\<open>split_tac\<close> does not cope with patterns such as @{term"analz (insert
+(Crypt K X) H)"}\<close>
 lemma analz_Crypt_if [simp]:
      "analz (insert (Crypt K X) H) =
           (if (Key (invKey K) \<in> analz H)
@@ -471,7 +471,7 @@
 by (simp add: analz_insert_Crypt analz_insert_Decrypt)
 
 
-text{*This rule supposes "for the sake of argument" that we have the key.*}
+text\<open>This rule supposes "for the sake of argument" that we have the key.\<close>
 lemma analz_insert_Crypt_subset:
      "analz (insert (Crypt K X) H) \<subseteq>
            insert (Crypt K X) (analz (insert X H))"
@@ -486,7 +486,7 @@
 done
 
 
-subsubsection{*Idempotence and transitivity *}
+subsubsection\<open>Idempotence and transitivity\<close>
 
 lemma analz_analzD [dest!]: "X\<in> analz (analz H) ==> X\<in> analz H"
 by (erule analz.induct, blast+)
@@ -509,14 +509,14 @@
 lemma analz_cut: "[| Y\<in> analz (insert X H);  X\<in> analz H |] ==> Y\<in> analz H"
 by (metis analz_idem analz_increasing analz_mono insert_absorb insert_mono insert_subset)
 
-text{*This rewrite rule helps in the simplification of messages that involve
+text\<open>This rewrite rule helps in the simplification of messages that involve
   the forwarding of unknown components (X).  Without it, removing occurrences
-  of X can be very complicated. *}
+  of X can be very complicated.\<close>
 lemma analz_insert_eq: "X\<in> analz H ==> analz (insert X H) = analz H"
 by (blast intro: analz_cut analz_insertI)
 
 
-text{*A congruence rule for "analz" *}
+text\<open>A congruence rule for "analz"\<close>
 
 lemma analz_subset_cong:
      "[| analz G \<subseteq> analz G'; analz H \<subseteq> analz H' |]
@@ -535,14 +535,14 @@
      "analz H = analz H' ==> analz(insert X H) = analz(insert X H')"
 by (force simp only: insert_def intro!: analz_cong)
 
-text{*If there are no pairs or encryptions then analz does nothing*}
+text\<open>If there are no pairs or encryptions then analz does nothing\<close>
 lemma analz_trivial:
      "[| \<forall>X Y. \<lbrace>X,Y\<rbrace> \<notin> H;  \<forall>X K. Crypt K X \<notin> H |] ==> analz H = H"
 apply safe
 apply (erule analz.induct, blast+)
 done
 
-text{*These two are obsolete (with a single Spy) but cost little to prove...*}
+text\<open>These two are obsolete (with a single Spy) but cost little to prove...\<close>
 lemma analz_UN_analz_lemma:
      "X\<in> analz (\<Union>i\<in>A. analz (H i)) ==> X\<in> analz (\<Union>i\<in>A. H i)"
 apply (erule analz.induct)
@@ -553,12 +553,12 @@
 by (blast intro: analz_UN_analz_lemma analz_mono [THEN [2] rev_subsetD])
 
 
-subsection{*Inductive relation "synth"*}
+subsection\<open>Inductive relation "synth"\<close>
 
-text{*Inductive definition of "synth" -- what can be built up from a set of
+text\<open>Inductive definition of "synth" -- what can be built up from a set of
     messages.  A form of upward closure.  Pairs can be built, messages
     encrypted with known keys.  Agent names are public domain.
-    Numbers can be guessed, but Nonces cannot be.  *}
+    Numbers can be guessed, but Nonces cannot be.\<close>
 
 inductive_set
   synth :: "msg set => msg set"
@@ -571,12 +571,12 @@
   | MPair  [intro]:   "[|X \<in> synth H;  Y \<in> synth H|] ==> \<lbrace>X,Y\<rbrace> \<in> synth H"
   | Crypt  [intro]:   "[|X \<in> synth H;  Key(K) \<in> H|] ==> Crypt K X \<in> synth H"
 
-text{*Monotonicity*}
+text\<open>Monotonicity\<close>
 lemma synth_mono: "G\<subseteq>H ==> synth(G) \<subseteq> synth(H)"
   by (auto, erule synth.induct, auto)
 
-text{*NO @{text Agent_synth}, as any Agent name can be synthesized.
-  The same holds for @{term Number}*}
+text\<open>NO \<open>Agent_synth\<close>, as any Agent name can be synthesized.
+  The same holds for @{term Number}\<close>
 inductive_cases Nonce_synth [elim!]: "Nonce n \<in> synth H"
 inductive_cases Key_synth   [elim!]: "Key K \<in> synth H"
 inductive_cases Hash_synth  [elim!]: "Hash X \<in> synth H"
@@ -587,17 +587,17 @@
 lemma synth_increasing: "H \<subseteq> synth(H)"
 by blast
 
-subsubsection{*Unions *}
+subsubsection\<open>Unions\<close>
 
-text{*Converse fails: we can synth more from the union than from the
-  separate parts, building a compound message using elements of each.*}
+text\<open>Converse fails: we can synth more from the union than from the
+  separate parts, building a compound message using elements of each.\<close>
 lemma synth_Un: "synth(G) \<union> synth(H) \<subseteq> synth(G \<union> H)"
 by (intro Un_least synth_mono Un_upper1 Un_upper2)
 
 lemma synth_insert: "insert X (synth H) \<subseteq> synth(insert X H)"
 by (metis insert_iff insert_subset subset_insertI synth.Inj synth_mono)
 
-subsubsection{*Idempotence and transitivity *}
+subsubsection\<open>Idempotence and transitivity\<close>
 
 lemma synth_synthD [dest!]: "X\<in> synth (synth H) ==> X\<in> synth H"
 by (erule synth.induct, blast+)
@@ -639,7 +639,7 @@
 by (unfold keysFor_def, blast)
 
 
-subsubsection{*Combinations of parts, analz and synth *}
+subsubsection\<open>Combinations of parts, analz and synth\<close>
 
 lemma parts_synth [simp]: "parts (synth H) = parts H \<union> synth H"
 apply (rule equalityI)
@@ -681,7 +681,7 @@
 qed
 
 
-subsubsection{*For reasoning about the Fake rule in traces *}
+subsubsection\<open>For reasoning about the Fake rule in traces\<close>
 
 lemma parts_insert_subset_Un: "X \<in> G ==> parts(insert X H) \<subseteq> parts G \<union> parts H"
 proof -
--- a/src/HOL/Metis_Examples/Proxies.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Metis_Examples/Proxies.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,10 +5,10 @@
 rudimentary higher-order reasoning.
 *)
 
-section {*
+section \<open>
 Example that Exercises Metis's and Sledgehammer's Logical Symbol Proxies for
 Rudimentary Higher-Order Reasoning.
-*}
+\<close>
 
 theory Proxies
 imports Type_Encodings
@@ -17,7 +17,7 @@
 sledgehammer_params [prover = spass, fact_filter = mepo, timeout = 30, preplay_timeout = 0,
   dont_minimize]
 
-text {* Extensionality and set constants *}
+text \<open>Extensionality and set constants\<close>
 
 lemma plus_1_not_0: "n + (1::nat) \<noteq> 0"
 by simp
@@ -57,7 +57,7 @@
 sledgehammer [expect = some]
 by (metis_exhaust B_def C_def int_le_0_imp_le_1 predicate1I)
 
-text {* Proxies for logical constants *}
+text \<open>Proxies for logical constants\<close>
 
 lemma "id (op =) x x"
 sledgehammer [type_enc = erased, expect = none] (id_apply)
--- a/src/HOL/Metis_Examples/Sets.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Metis_Examples/Sets.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Metis example featuring typed set theory.
 *)
 
-section {* Metis Example Featuring Typed Set Theory *}
+section \<open>Metis Example Featuring Typed Set Theory\<close>
 
 theory Sets
 imports Main
@@ -177,10 +177,10 @@
   thus "\<exists>z. S \<subseteq> {z}" by metis
 qed
 
-text {*
+text \<open>
   From W. W. Bledsoe and Guohui Feng, SET-VAR. JAR 11 (3), 1993, pages
   293-314.
-*}
+\<close>
 
 (* Notes: (1) The numbering doesn't completely agree with the paper.
    (2) We must rename set variables to avoid type clashes. *)
--- a/src/HOL/Metis_Examples/Tarski.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Metis_Examples/Tarski.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Metis example featuring the full theorem of Tarski.
 *)
 
-section {* Metis Example Featuring the Full Theorem of Tarski *}
+section \<open>Metis Example Featuring the Full Theorem of Tarski\<close>
 
 theory Tarski
 imports Main "~~/src/HOL/Library/FuncSet"
@@ -117,7 +117,7 @@
                              x: intY1}
                       (| pset=intY1, order=induced intY1 r|)"
 
-subsection {* Partial Order *}
+subsection \<open>Partial Order\<close>
 
 lemma (in PO) PO_imp_refl_on: "refl_on A r"
 apply (insert cl_po)
@@ -158,13 +158,13 @@
      "S \<subseteq> A ==> (| pset = S, order = induced S r |) \<in> PartialOrder"
 apply (simp (no_asm) add: PartialOrder_def)
 apply auto
--- {* refl *}
+\<comment> \<open>refl\<close>
 apply (simp add: refl_on_def induced_def)
 apply (blast intro: reflE)
--- {* antisym *}
+\<comment> \<open>antisym\<close>
 apply (simp add: antisym_def induced_def)
 apply (blast intro: antisymE)
--- {* trans *}
+\<comment> \<open>trans\<close>
 apply (simp add: trans_def induced_def)
 apply (blast intro: transE)
 done
@@ -290,7 +290,7 @@
 apply (simp add: reflE)
 done
 
-subsection {* sublattice *}
+subsection \<open>sublattice\<close>
 
 lemma (in PO) sublattice_imp_CL:
      "S <<= cl  ==> (| pset = S, order = induced S r |) \<in> CompleteLattice"
@@ -301,7 +301,7 @@
       ==> S <<= cl"
 by (simp add: sublattice_def A_def r_def)
 
-subsection {* lub *}
+subsection \<open>lub\<close>
 
 lemma (in CL) lub_unique: "[| S \<subseteq> A; isLub S cl x; isLub S cl L|] ==> x = L"
 apply (rule antisymE)
@@ -366,7 +366,7 @@
          (\<forall>z \<in> A. (\<forall>y \<in> S. (y, z):r) --> (L, z) \<in> r)|] ==> isLub S cl L"
 by (simp add: isLub_def A_def r_def)
 
-subsection {* glb *}
+subsection \<open>glb\<close>
 
 lemma (in CL) glb_in_lattice: "S \<subseteq> A ==> glb S cl \<in> A"
 apply (subst glb_dual_lub)
@@ -394,10 +394,10 @@
 apply (simp add: dualA_iff A_def, assumption)
 done
 
-text {*
+text \<open>
   Reduce the sublattice property by using substructural properties;
-  abandoned see @{text "Tarski_4.ML"}.
-*}
+  abandoned see \<open>Tarski_4.ML\<close>.
+\<close>
 
 declare (in CLF) f_cl [simp]
 
@@ -433,7 +433,7 @@
 declare (in CLF) CLF_set_def[simp del] CL_dualCL[simp del] monotone_dual[simp del]
           dualA_iff[simp del]
 
-subsection {* fixed points *}
+subsection \<open>fixed points\<close>
 
 lemma fix_subset: "fix f A \<subseteq> A"
 by (simp add: fix_def, fast)
@@ -445,7 +445,7 @@
      "[| A \<subseteq> B; x \<in> fix (%y: A. f y) A |] ==> x \<in> fix f B"
 by (simp add: fix_def, auto)
 
-subsection {* lemmas for Tarski, lub *}
+subsection \<open>lemmas for Tarski, lub\<close>
 
 (*never proved, 2007-01-22*)
 
@@ -456,14 +456,14 @@
 apply (rule lub_least, fast)
 apply (rule f_in_funcset [THEN funcset_mem])
 apply (rule lub_in_lattice, fast)
--- {* @{text "\<forall>x:H. (x, f (lub H r)) \<in> r"} *}
+\<comment> \<open>\<open>\<forall>x:H. (x, f (lub H r)) \<in> r\<close>\<close>
 apply (rule ballI)
 (*never proved, 2007-01-22*)
 apply (rule transE)
--- {* instantiates @{text "(x, ?z) \<in> order cl to (x, f x)"}, *}
--- {* because of the definition of @{text H} *}
+\<comment> \<open>instantiates \<open>(x, ?z) \<in> order cl to (x, f x)\<close>,\<close>
+\<comment> \<open>because of the definition of \<open>H\<close>\<close>
 apply fast
--- {* so it remains to show @{text "(f x, f (lub H cl)) \<in> r"} *}
+\<comment> \<open>so it remains to show \<open>(f x, f (lub H cl)) \<in> r\<close>\<close>
 apply (rule_tac f = "f" in monotoneE)
 apply (rule monotone_f, fast)
 apply (rule lub_in_lattice, fast)
@@ -557,7 +557,7 @@
 apply (metis P_def lubH_is_fixp)
 done
 
-subsection {* Tarski fixpoint theorem 1, first part *}
+subsection \<open>Tarski fixpoint theorem 1, first part\<close>
 
 declare CL.lubI[intro] fix_subset[intro] CL.lub_in_lattice[intro]
         CLF.fixf_le_lubH[simp] CLF.lubH_least_fixf[simp]
@@ -582,7 +582,7 @@
         PO.dualPO[intro] CL.CL_dualCL[intro] PO.dualr_iff[simp]
 
 lemma (in CLF) glbH_is_fixp: "H = {x. (f x, x) \<in> r & x \<in> A} ==> glb H cl \<in> P"
-  -- {* Tarski for glb *}
+  \<comment> \<open>Tarski for glb\<close>
 (*sledgehammer;*)
 apply (simp add: glb_dual_lub P_def A_def r_def)
 apply (rule dualA_iff [THEN subst])
@@ -613,7 +613,7 @@
   OF dualPO CL_dualCL] dualPO CL_dualCL CLF_dual dualr_iff)
 done
 
-subsection {* interval *}
+subsection \<open>interval\<close>
 
 declare (in CLF) CO_refl_on[simp] refl_on_def [simp]
 
@@ -671,7 +671,7 @@
 apply (rule ballI)
 apply (simp add: interval_lemma1)
 apply (simp add: isLub_upper)
--- {* @{text "(L, b) \<in> r"} *}
+\<comment> \<open>\<open>(L, b) \<in> r\<close>\<close>
 apply (simp add: isLub_least interval_lemma2)
 done
 
@@ -709,31 +709,31 @@
 prefer 2 apply assumption
 apply assumption
 apply (erule exE)
--- {* define the lub for the interval as *}
+\<comment> \<open>define the lub for the interval as\<close>
 apply (rule_tac x = "if S = {} then a else L" in exI)
 apply (simp (no_asm_simp) add: isLub_def split del: if_split)
 apply (intro impI conjI)
--- {* @{text "(if S = {} then a else L) \<in> interval r a b"} *}
+\<comment> \<open>\<open>(if S = {} then a else L) \<in> interval r a b\<close>\<close>
 apply (simp add: CL_imp_PO L_in_interval)
 apply (simp add: left_in_interval)
--- {* lub prop 1 *}
+\<comment> \<open>lub prop 1\<close>
 apply (case_tac "S = {}")
--- {* @{text "S = {}, y \<in> S = False => everything"} *}
+\<comment> \<open>\<open>S = {}, y \<in> S = False => everything\<close>\<close>
 apply fast
--- {* @{text "S \<noteq> {}"} *}
+\<comment> \<open>\<open>S \<noteq> {}\<close>\<close>
 apply simp
--- {* @{text "\<forall>y:S. (y, L) \<in> induced (interval r a b) r"} *}
+\<comment> \<open>\<open>\<forall>y:S. (y, L) \<in> induced (interval r a b) r\<close>\<close>
 apply (rule ballI)
 apply (simp add: induced_def  L_in_interval)
 apply (rule conjI)
 apply (rule subsetD)
 apply (simp add: S_intv_cl, assumption)
 apply (simp add: isLub_upper)
--- {* @{text "\<forall>z:interval r a b. (\<forall>y:S. (y, z) \<in> induced (interval r a b) r \<longrightarrow> (if S = {} then a else L, z) \<in> induced (interval r a b) r"} *}
+\<comment> \<open>\<open>\<forall>z:interval r a b. (\<forall>y:S. (y, z) \<in> induced (interval r a b) r \<longrightarrow> (if S = {} then a else L, z) \<in> induced (interval r a b) r\<close>\<close>
 apply (rule ballI)
 apply (rule impI)
 apply (case_tac "S = {}")
--- {* @{text "S = {}"} *}
+\<comment> \<open>\<open>S = {}\<close>\<close>
 apply simp
 apply (simp add: induced_def  interval_def)
 apply (rule conjI)
@@ -741,7 +741,7 @@
 apply (rule interval_not_empty)
 apply (rule CO_trans)
 apply (simp add: interval_def)
--- {* @{text "S \<noteq> {}"} *}
+\<comment> \<open>\<open>S \<noteq> {}\<close>\<close>
 apply simp
 apply (simp add: induced_def  L_in_interval)
 apply (rule isLub_least, assumption)
@@ -771,7 +771,7 @@
 lemmas (in CLF) interv_is_compl_latt =
     interval_is_sublattice [THEN sublattice_imp_CL]
 
-subsection {* Top and Bottom *}
+subsection \<open>Top and Bottom\<close>
 lemma (in CLF) Top_dual_Bot: "Top cl = Bot (dual cl)"
 by (simp add: Top_def Bot_def least_def greatest_def dualA_iff dualr_iff)
 
@@ -826,7 +826,7 @@
 apply (metis Bot_prop ex_in_conv intervalI reflE rel_imp_elem)
 done
 
-subsection {* fixed points form a partial order *}
+subsection \<open>fixed points form a partial order\<close>
 
 lemma (in CLF) fixf_po: "(| pset = P, order = induced P r|) \<in> PartialOrder"
 by (simp add: P_def fix_subset po_subset_po)
@@ -856,12 +856,12 @@
 apply (rule Y_subset_A)
 apply (rule f_in_funcset [THEN funcset_mem])
 apply (rule lubY_in_A)
--- {* @{text "Y \<subseteq> P ==> f x = x"} *}
+\<comment> \<open>\<open>Y \<subseteq> P ==> f x = x\<close>\<close>
 apply (rule ballI)
 (*sledgehammer *)
 apply (rule_tac t = "x" in fix_imp_eq [THEN subst])
 apply (erule Y_ss [simplified P_def, THEN subsetD])
--- {* @{text "reduce (f x, f (lub Y cl)) \<in> r to (x, lub Y cl) \<in> r"} by monotonicity *}
+\<comment> \<open>\<open>reduce (f x, f (lub Y cl)) \<in> r to (x, lub Y cl) \<in> r\<close> by monotonicity\<close>
 (*sledgehammer*)
 apply (rule_tac f = "f" in monotoneE)
 apply (rule monotone_f)
@@ -890,14 +890,14 @@
 apply (rule conjI)
 apply (rule transE)
 apply (rule lubY_le_flubY)
--- {* @{text "(f (lub Y cl), f x) \<in> r"} *}
+\<comment> \<open>\<open>(f (lub Y cl), f x) \<in> r\<close>\<close>
 (*sledgehammer [has been proved before now...]*)
 apply (rule_tac f=f in monotoneE)
 apply (rule monotone_f)
 apply (rule lubY_in_A)
 apply (simp add: intY1_def interval_def  intY1_elem)
 apply (simp add: intY1_def  interval_def)
--- {* @{text "(f x, Top cl) \<in> r"} *}
+\<comment> \<open>\<open>(f x, Top cl) \<in> r\<close>\<close>
 apply (rule Top_prop)
 apply (rule f_in_funcset [THEN funcset_mem])
 apply (simp add: intY1_def interval_def  intY1_elem)
@@ -966,12 +966,12 @@
      "\<exists>L. isLub Y (| pset = P, order = induced P r |) L"
 apply (rule_tac x = "v" in exI)
 apply (simp add: isLub_def)
--- {* @{text "v \<in> P"} *}
+\<comment> \<open>\<open>v \<in> P\<close>\<close>
 apply (simp add: v_in_P)
 apply (rule conjI)
 (*sledgehammer*)
--- {* @{text v} is lub *}
--- {* @{text "1. \<forall>y:Y. (y, v) \<in> induced P r"} *}
+\<comment> \<open>\<open>v\<close> is lub\<close>
+\<comment> \<open>\<open>1. \<forall>y:Y. (y, v) \<in> induced P r\<close>\<close>
 apply (rule ballI)
 apply (simp add: induced_def subsetD v_in_P)
 apply (rule conjI)
@@ -984,7 +984,7 @@
 apply (fold intY1_def)
 apply (rule CL.glb_in_lattice [OF CL.intro, OF PO.intro CL_axioms.intro, OF _ intY1_is_cl, simplified])
  apply (simp add: CL_imp_PO intY1_is_cl, force)
--- {* @{text v} is LEAST ub *}
+\<comment> \<open>\<open>v\<close> is LEAST ub\<close>
 apply clarify
 apply (rule indI)
   prefer 3 apply assumption
--- a/src/HOL/Metis_Examples/Trans_Closure.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Metis_Examples/Trans_Closure.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Metis example featuring the transitive closure.
 *)
 
-section {* Metis Example Featuring the Transitive Closure *}
+section \<open>Metis Example Featuring the Transitive Closure\<close>
 
 theory Trans_Closure
 imports Main
@@ -16,11 +16,11 @@
 type_synonym addr = nat
 
 datatype val
-  = Unit        -- "dummy result value of void expressions"
-  | Null        -- "null reference"
-  | Bool bool   -- "Boolean value"
-  | Intg int    -- "integer value"
-  | Addr addr   -- "addresses of objects in the heap"
+  = Unit        \<comment> "dummy result value of void expressions"
+  | Null        \<comment> "null reference"
+  | Bool bool   \<comment> "Boolean value"
+  | Intg int    \<comment> "integer value"
+  | Addr addr   \<comment> "addresses of objects in the heap"
 
 consts R :: "(addr \<times> addr) set"
 
--- a/src/HOL/Metis_Examples/Type_Encodings.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Metis_Examples/Type_Encodings.thy	Thu May 26 17:51:22 2016 +0200
@@ -4,9 +4,9 @@
 Example that exercises Metis's (and hence Sledgehammer's) type encodings.
 *)
 
-section {*
+section \<open>
 Example that Exercises Metis's (and Hence Sledgehammer's) Type Encodings
-*}
+\<close>
 
 theory Type_Encodings
 imports Main
@@ -14,11 +14,11 @@
 
 declare [[metis_new_skolem]]
 
-text {* Setup for testing Metis exhaustively *}
+text \<open>Setup for testing Metis exhaustively\<close>
 
 lemma fork: "P \<Longrightarrow> P \<Longrightarrow> P" by assumption
 
-ML {*
+ML \<open>
 val type_encs =
   ["erased",
    "poly_guards",
@@ -60,14 +60,14 @@
                THEN COND (has_fewer_prems 2) all_tac no_tac
                THEN tac type_encs)
   in tac type_encs end
-*}
+\<close>
 
-method_setup metis_exhaust = {*
+method_setup metis_exhaust = \<open>
   Attrib.thms >>
     (fn ths => fn ctxt => SIMPLE_METHOD (metis_exhaust_tac ctxt ths))
-*} "exhaustively run Metis with all type encodings"
+\<close> "exhaustively run Metis with all type encodings"
 
-text {* Miscellaneous tests *}
+text \<open>Miscellaneous tests\<close>
 
 lemma "x = y \<Longrightarrow> y = x"
 by metis_exhaust
--- a/src/HOL/Mirabelle/Mirabelle.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Mirabelle/Mirabelle.thy	Thu May 26 17:51:22 2016 +0200
@@ -9,13 +9,13 @@
 ML_file "Tools/mirabelle.ML"
 ML_file "../TPTP/sledgehammer_tactics.ML"
 
-ML {* Toplevel.add_hook Mirabelle.step_hook *}
+ML \<open>Toplevel.add_hook Mirabelle.step_hook\<close>
 
-ML {*
+ML \<open>
 signature MIRABELLE_ACTION =
 sig
   val invoke : (string * string) list -> theory -> theory
 end
-*}
+\<close>
 
 end
--- a/src/HOL/Mirabelle/Mirabelle_Test.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Mirabelle/Mirabelle_Test.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
     Author:     Sascha Boehme, TU Munich
 *)
 
-section {* Simple test theory for Mirabelle actions *}
+section \<open>Simple test theory for Mirabelle actions\<close>
 
 theory Mirabelle_Test
 imports Main Mirabelle
@@ -16,9 +16,9 @@
 ML_file "Tools/mirabelle_sledgehammer_filter.ML"
 ML_file "Tools/mirabelle_try0.ML"
 
-text {*
+text \<open>
   Only perform type-checking of the actions,
   any reasonable test would be too complicated.
-*}
+\<close>
 
 end
--- a/src/HOL/Mirabelle/ex/Ex.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Mirabelle/ex/Ex.thy	Thu May 26 17:51:22 2016 +0200
@@ -1,12 +1,12 @@
 theory Ex imports Pure
 begin
 
-ML {*
+ML \<open>
   val rc = Isabelle_System.bash
     "cd \"$ISABELLE_HOME/src/HOL/Library\"; isabelle mirabelle arith Inner_Product.thy";
   if rc <> 0 then error ("Mirabelle example failed: " ^ string_of_int rc)
   else ();
-*} -- "some arbitrary small test case"
+\<close> \<comment> "some arbitrary small test case"
 
 end
 
--- a/src/HOL/Mutabelle/MutabelleExtra.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Mutabelle/MutabelleExtra.thy	Thu May 26 17:51:22 2016 +0200
@@ -15,9 +15,9 @@
 ML_file "mutabelle_extra.ML"
 
 
-section {* configuration *}
+section \<open>configuration\<close>
 
-ML {* val log_directory = "" *}
+ML \<open>val log_directory = ""\<close>
 
 
 quickcheck_params [quiet, finite_types = false, report = false, size = 5, iterations = 1000]
@@ -27,25 +27,25 @@
 refute_params [maxtime = 10, minsize = 1, maxsize = 5, satsolver = jerusat]
 *)
 
-ML {* val mtds = [
+ML \<open>val mtds = [
   MutabelleExtra.quickcheck_mtd (Config.put Quickcheck.finite_types false) "random",
   MutabelleExtra.quickcheck_mtd (Config.put Quickcheck.finite_types true) "random",
   MutabelleExtra.quickcheck_mtd (Config.put Quickcheck.finite_types false) "small",
   MutabelleExtra.quickcheck_mtd (Config.put Quickcheck.finite_types true) "small"
 ]
-*}
+\<close>
 
-ML {*
+ML \<open>
 fun mutation_testing_of (name, thy) =
   (MutabelleExtra.init_random 1.0;
   MutabelleExtra.thms_of false thy
   |> MutabelleExtra.take_random 200
   |> (fn thms => MutabelleExtra.mutate_theorems_and_write_report
          @{theory} (1, 50) mtds thms (log_directory ^ "/" ^ name)))
-*}
+\<close>
   
 
-text {* Uncomment the following ML code to check the counterexample generation with all theorems of Complex_Main. *}
+text \<open>Uncomment the following ML code to check the counterexample generation with all theorems of Complex_Main.\<close>
 
 (*
 ML {*
@@ -64,9 +64,9 @@
  *}
 *)
 
-section {* Mutation testing Isabelle theories *}
+section \<open>Mutation testing Isabelle theories\<close>
 
-subsection {* List theory *}
+subsection \<open>List theory\<close>
 
 (*
 ML {*
@@ -74,9 +74,9 @@
  *}
 *)
 
-section {* Mutation testing AFP theories *}
+section \<open>Mutation testing AFP theories\<close>
 
-subsection {* AVL-Trees *}
+subsection \<open>AVL-Trees\<close>
 
 (*
 ML {*
@@ -84,7 +84,7 @@
  *}
 *)
 
-subsection {* BinaryTree *}
+subsection \<open>BinaryTree\<close>
 
 (*
 ML {*
@@ -92,7 +92,7 @@
  *}
 *)
 
-subsection {* Huffman *}
+subsection \<open>Huffman\<close>
 
 (*
 ML {*
@@ -100,7 +100,7 @@
  *}
 *)
 
-subsection {* List_Index *}
+subsection \<open>List_Index\<close>
 
 (*
 ML {*
@@ -108,7 +108,7 @@
  *}
 *)
 
-subsection {* Matrix *}
+subsection \<open>Matrix\<close>
 
 (*
 ML {*
@@ -116,7 +116,7 @@
  *}
 *)
 
-subsection {* NBE *}
+subsection \<open>NBE\<close>
 
 (*
 ML {*
@@ -124,7 +124,7 @@
  *}
 *)
 
-subsection {* Polynomial *}
+subsection \<open>Polynomial\<close>
 
 (*
 ML {*
@@ -132,7 +132,7 @@
  *}
 *)
 
-subsection {* Presburger Automata *}
+subsection \<open>Presburger Automata\<close>
 
 (*
 ML {*
--- a/src/HOL/NanoJava/AxSem.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/NanoJava/AxSem.thy	Thu May 26 17:51:22 2016 +0200
@@ -72,13 +72,13 @@
                   Impl (D,m) {Q} ==>
              A \<turnstile> {P} Meth (C,m) {Q}"
 
-  --{* @{text "\<Union>Z"} instead of @{text "\<forall>Z"} in the conclusion and\\
-       Z restricted to type state due to limitations of the inductive package *}
+  \<comment>\<open>\<open>\<Union>Z\<close> instead of \<open>\<forall>Z\<close> in the conclusion and\\
+       Z restricted to type state due to limitations of the inductive package\<close>
 | Impl: "\<forall>Z::state. A\<union> (\<Union>Z. (\<lambda>Cm. (P Z Cm, Impl Cm, Q Z Cm))`Ms) |\<turnstile> 
                             (\<lambda>Cm. (P Z Cm, body Cm, Q Z Cm))`Ms ==>
                       A |\<turnstile> (\<lambda>Cm. (P Z Cm, Impl Cm, Q Z Cm))`Ms"
 
---{* structural rules *}
+\<comment>\<open>structural rules\<close>
 
 | Asm:  "   a \<in> A ==> A |\<turnstile> {a}"
 
@@ -86,12 +86,12 @@
 
 | ConjE: "[|A |\<turnstile> C; c \<in> C |] ==> A |\<turnstile> {c}"
 
-  --{* Z restricted to type state due to limitations of the inductive package *}
+  \<comment>\<open>Z restricted to type state due to limitations of the inductive package\<close>
 | Conseq:"[| \<forall>Z::state. A \<turnstile> {P' Z} c {Q' Z};
              \<forall>s t. (\<forall>Z. P' Z s --> Q' Z t) --> (P s --> Q t) |] ==>
             A \<turnstile> {P} c {Q }"
 
-  --{* Z restricted to type state due to limitations of the inductive package *}
+  \<comment>\<open>Z restricted to type state due to limitations of the inductive package\<close>
 | eConseq:"[| \<forall>Z::state. A \<turnstile>\<^sub>e {P' Z} e {Q' Z};
              \<forall>s v t. (\<forall>Z. P' Z s --> Q' Z v t) --> (P s --> Q v t) |] ==>
             A \<turnstile>\<^sub>e {P} e {Q }"
--- a/src/HOL/NanoJava/Decl.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/NanoJava/Decl.thy	Thu May 26 17:51:22 2016 +0200
@@ -8,10 +8,10 @@
 theory Decl imports Term begin
 
 datatype ty
-  = NT           --{* null type  *}
-  | Class cname  --{* class type *}
+  = NT           \<comment>\<open>null type\<close>
+  | Class cname  \<comment>\<open>class type\<close>
 
-text{* Field declaration *}
+text\<open>Field declaration\<close>
 type_synonym fdecl           
         = "fname \<times> ty"
 
@@ -21,7 +21,7 @@
           lcl ::"(vname \<times> ty) list"
           bdy :: stmt
 
-text{* Method declaration *}
+text\<open>Method declaration\<close>
 type_synonym mdecl
         = "mname \<times> methd"
 
@@ -30,7 +30,7 @@
           flds    ::"fdecl list"
           methods ::"mdecl list"
 
-text{* Class declaration *}
+text\<open>Class declaration\<close>
 type_synonym cdecl
         = "cname \<times> class"
 
@@ -45,9 +45,9 @@
   (type) "prog " \<leftharpoondown> (type) "cdecl list"
 
 axiomatization
-  Prog    :: prog       --{* program as a global value *}
+  Prog    :: prog       \<comment>\<open>program as a global value\<close>
 and
-  Object  :: cname      --{* name of root class *}
+  Object  :: cname      \<comment>\<open>name of root class\<close>
 
 
 definition "class" :: "cname \<rightharpoonup> class" where
--- a/src/HOL/NanoJava/Equivalence.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/NanoJava/Equivalence.thy	Thu May 26 17:51:22 2016 +0200
@@ -94,9 +94,9 @@
 apply (tactic "split_all_tac @{context} 1", rename_tac P e Q)
 apply (rule hoare_ehoare.induct)
 (*18*)
-apply (tactic {* ALLGOALS (REPEAT o dresolve_tac @{context} [@{thm all_conjunct2}, @{thm all3_conjunct2}]) *})
-apply (tactic {* ALLGOALS (REPEAT o Rule_Insts.thin_tac @{context} "hoare _ _" []) *})
-apply (tactic {* ALLGOALS (REPEAT o Rule_Insts.thin_tac @{context} "ehoare _ _" []) *})
+apply (tactic \<open>ALLGOALS (REPEAT o dresolve_tac @{context} [@{thm all_conjunct2}, @{thm all3_conjunct2}])\<close>)
+apply (tactic \<open>ALLGOALS (REPEAT o Rule_Insts.thin_tac @{context} "hoare _ _" [])\<close>)
+apply (tactic \<open>ALLGOALS (REPEAT o Rule_Insts.thin_tac @{context} "ehoare _ _" [])\<close>)
 apply (simp_all only: cnvalid1_eq cenvalid_def2)
                  apply fast
                 apply fast
--- a/src/HOL/NanoJava/Example.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/NanoJava/Example.thy	Thu May 26 17:51:22 2016 +0200
@@ -9,7 +9,7 @@
 imports Equivalence
 begin
 
-text {*
+text \<open>
 
 \begin{verbatim}
 class Nat {
@@ -39,7 +39,7 @@
 }
 \end{verbatim}
 
-*}
+\<close>
 
 axiomatization where
   This_neq_Par [simp]: "This \<noteq> Par" and
@@ -62,8 +62,8 @@
   one :: expr
   where "one == {Nat}new Nat..suc(<>)"
 
-text {* The following properties could be derived from a more complete
-        program model, which we leave out for laziness. *}
+text \<open>The following properties could be derived from a more complete
+        program model, which we leave out for laziness.\<close>
 
 axiomatization where Nat_no_subclasses [simp]: "D \<preceq>C Nat = (D=Nat)"
 
--- a/src/HOL/NanoJava/State.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/NanoJava/State.thy	Thu May 26 17:51:22 2016 +0200
@@ -10,12 +10,12 @@
 definition body :: "cname \<times> mname => stmt" where
  "body \<equiv> \<lambda>(C,m). bdy (the (method C m))"
 
-text {* Locations, i.e.\ abstract references to objects *}
+text \<open>Locations, i.e.\ abstract references to objects\<close>
 typedecl loc 
 
 datatype val
-  = Null        --{* null reference *}
-  | Addr loc    --{* address, i.e. location of object *}
+  = Null        \<comment>\<open>null reference\<close>
+  | Addr loc    \<comment>\<open>address, i.e. location of object\<close>
 
 type_synonym fields
         = "(fname \<rightharpoonup> val)"
@@ -30,11 +30,11 @@
 definition init_vars :: "('a \<rightharpoonup> 'b) => ('a \<rightharpoonup> val)" where
  "init_vars m == map_option (\<lambda>T. Null) o m"
   
-text {* private: *}
+text \<open>private:\<close>
 type_synonym heap = "loc   \<rightharpoonup> obj"
 type_synonym locals = "vname \<rightharpoonup> val"  
 
-text {* private: *}
+text \<open>private:\<close>
 record  state
         = heap   :: heap
           locals :: locals
@@ -51,15 +51,15 @@
  "init_locs C m s \<equiv> s (| locals := locals s ++ 
                          init_vars (map_of (lcl (the (method C m)))) |)"
 
-text {* The first parameter of @{term set_locs} is of type @{typ state} 
-        rather than @{typ locals} in order to keep @{typ locals} private.*}
+text \<open>The first parameter of @{term set_locs} is of type @{typ state} 
+        rather than @{typ locals} in order to keep @{typ locals} private.\<close>
 definition set_locs :: "state => state => state" where
  "set_locs s s' \<equiv> s' (| locals := locals s |)"
 
 definition get_local     :: "state => vname => val" ("_<_>" [99,0] 99) where
  "get_local s x  \<equiv> the (locals s x)"
 
---{* local function: *}
+\<comment>\<open>local function:\<close>
 definition get_obj       :: "state => loc => obj" where
  "get_obj s a \<equiv> the (heap s a)"
 
@@ -69,7 +69,7 @@
 definition get_field     :: "state => loc => fname => val" where
  "get_field s a f \<equiv> the (snd (get_obj s a) f)"
 
---{* local function: *}
+\<comment>\<open>local function:\<close>
 definition hupd       :: "loc => obj => state => state"   ("hupd'(_\<mapsto>_')" [10,10] 1000) where
  "hupd a obj s \<equiv> s (| heap   := ((heap   s)(a\<mapsto>obj))|)"
 
--- a/src/HOL/NanoJava/Term.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/NanoJava/Term.thy	Thu May 26 17:51:22 2016 +0200
@@ -6,16 +6,16 @@
 
 theory Term imports Main begin
 
-typedecl cname  --{* class    name *}
-typedecl mname  --{* method   name *}
-typedecl fname  --{* field    name *}
-typedecl vname  --{* variable name *}
+typedecl cname  \<comment>\<open>class    name\<close>
+typedecl mname  \<comment>\<open>method   name\<close>
+typedecl fname  \<comment>\<open>field    name\<close>
+typedecl vname  \<comment>\<open>variable name\<close>
 
 axiomatization
-  This --{* This pointer *}
-  Par  --{* method parameter *}
-  Res  :: vname --{* method result *}
- -- {* Inequality axioms are not required for the meta theory. *}
+  This \<comment>\<open>This pointer\<close>
+  Par  \<comment>\<open>method parameter\<close>
+  Res  :: vname \<comment>\<open>method result\<close>
+ \<comment> \<open>Inequality axioms are not required for the meta theory.\<close>
 (*
 where
   This_neq_Par [simp]: "This \<noteq> Par"
@@ -24,21 +24,21 @@
 *)
 
 datatype stmt
-  = Skip                   --{* empty statement *}
+  = Skip                   \<comment>\<open>empty statement\<close>
   | Comp       stmt stmt   ("_;; _"             [91,90   ] 90)
   | Cond expr  stmt stmt   ("If '(_') _ Else _" [ 3,91,91] 91)
   | Loop vname stmt        ("While '(_') _"     [ 3,91   ] 91)
-  | LAss vname expr        ("_ :== _"           [99,   95] 94) --{* local assignment *}
-  | FAss expr  fname expr  ("_.._:==_"          [95,99,95] 94) --{* field assignment *}
-  | Meth "cname \<times> mname"   --{* virtual method *}
-  | Impl "cname \<times> mname"   --{* method implementation *}
+  | LAss vname expr        ("_ :== _"           [99,   95] 94) \<comment>\<open>local assignment\<close>
+  | FAss expr  fname expr  ("_.._:==_"          [95,99,95] 94) \<comment>\<open>field assignment\<close>
+  | Meth "cname \<times> mname"   \<comment>\<open>virtual method\<close>
+  | Impl "cname \<times> mname"   \<comment>\<open>method implementation\<close>
 and expr
-  = NewC cname       ("new _"        [   99] 95) --{* object creation  *}
-  | Cast cname expr                              --{* type cast        *}
-  | LAcc vname                                   --{* local access     *}
-  | FAcc expr  fname ("_.._"         [95,99] 95) --{* field access     *}
+  = NewC cname       ("new _"        [   99] 95) \<comment>\<open>object creation\<close>
+  | Cast cname expr                              \<comment>\<open>type cast\<close>
+  | LAcc vname                                   \<comment>\<open>local access\<close>
+  | FAcc expr  fname ("_.._"         [95,99] 95) \<comment>\<open>field access\<close>
   | Call cname expr mname expr                   
-                     ("{_}_.._'(_')" [99,95,99,95] 95) --{* method call *}
+                     ("{_}_.._'(_')" [99,95,99,95] 95) \<comment>\<open>method call\<close>
 
 end
 
--- a/src/HOL/NanoJava/TypeRel.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/NanoJava/TypeRel.thy	Thu May 26 17:51:22 2016 +0200
@@ -8,7 +8,7 @@
 imports Decl
 begin
 
-text{* Direct subclass relation *}
+text\<open>Direct subclass relation\<close>
 
 definition subcls1 :: "(cname \<times> cname) set"
 where
@@ -24,7 +24,7 @@
 
 subsection "Declarations and properties not used in the meta theory"
 
-text{* Widening, viz. method invocation conversion *}
+text\<open>Widening, viz. method invocation conversion\<close>
 inductive
   widen :: "ty => ty => bool"  ("_ \<preceq> _" [71,71] 70)
 where
@@ -103,7 +103,7 @@
 apply (subst cut_apply, auto intro: subcls1I)
 done
 
---{* Methods of a class, with inheritance and hiding *}
+\<comment>\<open>Methods of a class, with inheritance and hiding\<close>
 definition "method" :: "cname => (mname \<rightharpoonup> methd)" where
   "method C \<equiv> class_rec C methods"
 
@@ -115,7 +115,7 @@
 done
 
 
---{* Fields of a class, with inheritance and hiding *}
+\<comment>\<open>Fields of a class, with inheritance and hiding\<close>
 definition field  :: "cname => (fname \<rightharpoonup> ty)" where
   "field C \<equiv> class_rec C flds"
 
--- a/src/HOL/Nitpick_Examples/Core_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Core_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Examples featuring Nitpick's functional core.
 *)
 
-section {* Examples Featuring Nitpick's Functional Core *}
+section \<open>Examples Featuring Nitpick's Functional Core\<close>
 
 theory Core_Nits
 imports Main
@@ -14,7 +14,7 @@
 nitpick_params [verbose, card = 1-6, unary_ints, max_potential = 0,
                 sat_solver = MiniSat_JNI, max_threads = 1, timeout = 240]
 
-subsection {* Curry in a Hurry *}
+subsection \<open>Curry in a Hurry\<close>
 
 lemma "(\<lambda>f x y. (curry o case_prod) f x y) = (\<lambda>f x y. (\<lambda>x. x) f x y)"
 nitpick [card = 1-12, expect = none]
@@ -36,7 +36,7 @@
 nitpick [card = 1-12, expect = none]
 by auto
 
-subsection {* Representations *}
+subsection \<open>Representations\<close>
 
 lemma "\<exists>f. f = (\<lambda>x. x) \<and> f y = y"
 nitpick [expect = none]
@@ -156,7 +156,7 @@
 nitpick [card = 8, expect = genuine]
 oops
 
-subsection {* Quantifiers *}
+subsection \<open>Quantifiers\<close>
 
 lemma "x = y"
 nitpick [card 'a = 1, expect = none]
@@ -264,7 +264,7 @@
 nitpick [expect = none]
 by auto
 
-subsection {* Schematic Variables *}
+subsection \<open>Schematic Variables\<close>
 
 schematic_goal "x = ?x"
 nitpick [expect = none]
@@ -290,7 +290,7 @@
 nitpick [expect = none]
 by auto
 
-subsection {* Known Constants *}
+subsection \<open>Known Constants\<close>
 
 lemma "x \<equiv> Pure.all \<Longrightarrow> False"
 nitpick [card = 1, expect = genuine]
@@ -678,7 +678,7 @@
 nitpick [expect = none]
 oops
 
-subsection {* The and Eps *}
+subsection \<open>The and Eps\<close>
 
 lemma "x = The"
 nitpick [card = 5, expect = genuine]
@@ -919,7 +919,7 @@
 
 nitpick_params [max_potential = 0]
 
-subsection {* Destructors and Recursors *}
+subsection \<open>Destructors and Recursors\<close>
 
 lemma "(x::'a) = (case True of True \<Rightarrow> x | False \<Rightarrow> x)"
 nitpick [card = 2, expect = none]
--- a/src/HOL/Nitpick_Examples/Datatype_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Datatype_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Examples featuring Nitpick applied to datatypes.
 *)
 
-section {* Examples Featuring Nitpick Applied to Datatypes *}
+section \<open>Examples Featuring Nitpick Applied to Datatypes\<close>
 
 theory Datatype_Nits
 imports Main
--- a/src/HOL/Nitpick_Examples/Hotel_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Hotel_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,8 +5,8 @@
 Nitpick example based on Tobias Nipkow's hotel key card formalization.
 *)
 
-section {* Nitpick Example Based on Tobias Nipkow's Hotel Key Card
-          Formalization *}
+section \<open>Nitpick Example Based on Tobias Nipkow's Hotel Key Card
+          Formalization\<close>
 
 theory Hotel_Nits
 imports Main
--- a/src/HOL/Nitpick_Examples/Induct_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Induct_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Examples featuring Nitpick applied to (co)inductive definitions.
 *)
 
-section {* Examples Featuring Nitpick Applied to (Co)inductive Definitions *}
+section \<open>Examples Featuring Nitpick Applied to (Co)inductive Definitions\<close>
 
 theory Induct_Nits
 imports Main
--- a/src/HOL/Nitpick_Examples/Integer_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Integer_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Examples featuring Nitpick applied to natural numbers and integers.
 *)
 
-section {* Examples Featuring Nitpick Applied to Natural Numbers and Integers *}
+section \<open>Examples Featuring Nitpick Applied to Natural Numbers and Integers\<close>
 
 theory Integer_Nits
 imports Main
--- a/src/HOL/Nitpick_Examples/Manual_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Manual_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Examples from the Nitpick manual.
 *)
 
-section {* Examples from the Nitpick Manual *}
+section \<open>Examples from the Nitpick Manual\<close>
 
 (* The "expect" arguments to Nitpick in this theory and the other example
    theories are there so that the example can also serve as a regression test
@@ -15,12 +15,12 @@
 imports Real "~~/src/HOL/Library/Quotient_Product"
 begin
 
-section {* 2. First Steps *}
+section \<open>2. First Steps\<close>
 
 nitpick_params [sat_solver = MiniSat_JNI, max_threads = 1, timeout = 240]
 
 
-subsection {* 2.1. Propositional Logic *}
+subsection \<open>2.1. Propositional Logic\<close>
 
 lemma "P \<longleftrightarrow> Q"
 nitpick [expect = genuine]
@@ -30,14 +30,14 @@
 oops
 
 
-subsection {* 2.2. Type Variables *}
+subsection \<open>2.2. Type Variables\<close>
 
 lemma "x \<in> A \<Longrightarrow> (THE y. y \<in> A) \<in> A"
 nitpick [verbose, expect = genuine]
 oops
 
 
-subsection {* 2.3. Constants *}
+subsection \<open>2.3. Constants\<close>
 
 lemma "x \<in> A \<Longrightarrow> (THE y. y \<in> A) \<in> A"
 nitpick [show_consts, expect = genuine]
@@ -51,7 +51,7 @@
 by (metis the_equality)
 
 
-subsection {* 2.4. Skolemization *}
+subsection \<open>2.4. Skolemization\<close>
 
 lemma "\<exists>g. \<forall>x. g (f x) = x \<Longrightarrow> \<forall>y. \<exists>x. y = f x"
 nitpick [expect = genuine]
@@ -66,7 +66,7 @@
 oops
 
 
-subsection {* 2.5. Natural Numbers and Integers *}
+subsection \<open>2.5. Natural Numbers and Integers\<close>
 
 lemma "\<lbrakk>i \<le> j; n \<le> (m::int)\<rbrakk> \<Longrightarrow> i * n + j * m \<le> i * m + j * n"
 nitpick [expect = genuine]
@@ -87,7 +87,7 @@
 oops
 
 
-subsection {* 2.6. Inductive Datatypes *}
+subsection \<open>2.6. Inductive Datatypes\<close>
 
 lemma "hd (xs @ [y, y]) = hd xs"
 nitpick [expect = genuine]
@@ -99,7 +99,7 @@
 oops
 
 
-subsection {* 2.7. Typedefs, Records, Rationals, and Reals *}
+subsection \<open>2.7. Typedefs, Records, Rationals, and Reals\<close>
 
 definition "three = {0::nat, 1, 2}"
 typedef three = three
@@ -129,16 +129,16 @@
 nitpick [show_types, expect = genuine]
 oops
 
-ML {*
+ML \<open>
 fun my_int_postproc _ _ _ T (Const _ $ (Const _ $ t1 $ t2)) =
     HOLogic.mk_number T (snd (HOLogic.dest_number t1)
                          - snd (HOLogic.dest_number t2))
   | my_int_postproc _ _ _ _ t = t
-*}
+\<close>
 
-declaration {*
+declaration \<open>
 Nitpick_Model.register_term_postprocessor @{typ my_int} my_int_postproc
-*}
+\<close>
 
 lemma "add x y = add x x"
 nitpick [show_types]
@@ -157,7 +157,7 @@
 oops
 
 
-subsection {* 2.8. Inductive and Coinductive Predicates *}
+subsection \<open>2.8. Inductive and Coinductive Predicates\<close>
 
 inductive even where
 "even 0" |
@@ -200,7 +200,7 @@
 oops
 
 
-subsection {* 2.9. Coinductive Datatypes *}
+subsection \<open>2.9. Coinductive Datatypes\<close>
 
 codatatype 'a llist = LNil | LCons 'a "'a llist"
 
@@ -221,7 +221,7 @@
 sorry
 
 
-subsection {* 2.10. Boxing *}
+subsection \<open>2.10. Boxing\<close>
 
 datatype tm = Var nat | Lam tm | App tm tm
 
@@ -258,7 +258,7 @@
 sorry
 
 
-subsection {* 2.11. Scope Monotonicity *}
+subsection \<open>2.11. Scope Monotonicity\<close>
 
 lemma "length xs = length ys \<Longrightarrow> rev (zip xs ys) = zip xs (rev ys)"
 nitpick [verbose, expect = genuine]
@@ -270,7 +270,7 @@
 oops
 
 
-subsection {* 2.12. Inductive Properties *}
+subsection \<open>2.12. Inductive Properties\<close>
 
 inductive_set reach where
 "(4::nat) \<in> reach" |
@@ -299,12 +299,12 @@
 by (induct set: reach) arith+
 
 
-section {* 3. Case Studies *}
+section \<open>3. Case Studies\<close>
 
 nitpick_params [max_potential = 0]
 
 
-subsection {* 3.1. A Context-Free Grammar *}
+subsection \<open>3.1. A Context-Free Grammar\<close>
 
 datatype alphabet = a | b
 
@@ -379,7 +379,7 @@
 sorry
 
 
-subsection {* 3.2. AA Trees *}
+subsection \<open>3.2. AA Trees\<close>
 
 datatype 'a aa_tree = \<Lambda> | N "'a::linorder" nat "'a aa_tree" "'a aa_tree"
 
--- a/src/HOL/Nitpick_Examples/Mini_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Mini_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Examples featuring Minipick, the minimalistic version of Nitpick.
 *)
 
-section {* Examples Featuring Minipick, the Minimalistic Version of Nitpick *}
+section \<open>Examples Featuring Minipick, the Minimalistic Version of Nitpick\<close>
 
 theory Mini_Nits
 imports Main
@@ -16,96 +16,96 @@
 nitpick_params [verbose, sat_solver = MiniSat_JNI, max_threads = 1,
   total_consts = smart]
 
-ML {*
+ML \<open>
 val check = Minipick.minipick @{context}
 val expect = Minipick.minipick_expect @{context}
 val none = expect "none"
 val genuine = expect "genuine"
 val unknown = expect "unknown"
-*}
+\<close>
 
-ML_val {* genuine 1 @{prop "x = Not"} *}
-ML_val {* none 1 @{prop "\<exists>x. x = Not"} *}
-ML_val {* none 1 @{prop "\<not> False"} *}
-ML_val {* genuine 1 @{prop "\<not> True"} *}
-ML_val {* none 1 @{prop "\<not> \<not> b \<longleftrightarrow> b"} *}
-ML_val {* none 1 @{prop True} *}
-ML_val {* genuine 1 @{prop False} *}
-ML_val {* genuine 1 @{prop "True \<longleftrightarrow> False"} *}
-ML_val {* none 1 @{prop "True \<longleftrightarrow> \<not> False"} *}
-ML_val {* none 4 @{prop "\<forall>x. x = x"} *}
-ML_val {* none 4 @{prop "\<exists>x. x = x"} *}
-ML_val {* none 1 @{prop "\<forall>x. x = y"} *}
-ML_val {* genuine 2 @{prop "\<forall>x. x = y"} *}
-ML_val {* none 2 @{prop "\<exists>x. x = y"} *}
-ML_val {* none 2 @{prop "\<forall>x::'a \<times> 'a. x = x"} *}
-ML_val {* none 2 @{prop "\<exists>x::'a \<times> 'a. x = y"} *}
-ML_val {* genuine 2 @{prop "\<forall>x::'a \<times> 'a. x = y"} *}
-ML_val {* none 2 @{prop "\<exists>x::'a \<times> 'a. x = y"} *}
-ML_val {* none 1 @{prop "All = Ex"} *}
-ML_val {* genuine 2 @{prop "All = Ex"} *}
-ML_val {* none 1 @{prop "All P = Ex P"} *}
-ML_val {* genuine 2 @{prop "All P = Ex P"} *}
-ML_val {* none 4 @{prop "x = y \<longrightarrow> P x = P y"} *}
-ML_val {* none 4 @{prop "(x::'a \<times> 'a) = y \<longrightarrow> P x = P y"} *}
-ML_val {* none 2 @{prop "(x::'a \<times> 'a) = y \<longrightarrow> P x y = P y x"} *}
-ML_val {* none 4 @{prop "\<exists>x::'a \<times> 'a. x = y \<longrightarrow> P x = P y"} *}
-ML_val {* none 2 @{prop "(x::'a \<Rightarrow> 'a) = y \<longrightarrow> P x = P y"} *}
-ML_val {* none 2 @{prop "\<exists>x::'a \<Rightarrow> 'a. x = y \<longrightarrow> P x = P y"} *}
-ML_val {* genuine 1 @{prop "(op =) X = Ex"} *}
-ML_val {* none 2 @{prop "\<forall>x::'a \<Rightarrow> 'a. x = x"} *}
-ML_val {* none 1 @{prop "x = y"} *}
-ML_val {* genuine 1 @{prop "x \<longleftrightarrow> y"} *}
-ML_val {* genuine 2 @{prop "x = y"} *}
-ML_val {* genuine 1 @{prop "X \<subseteq> Y"} *}
-ML_val {* none 1 @{prop "P \<and> Q \<longleftrightarrow> Q \<and> P"} *}
-ML_val {* none 1 @{prop "P \<and> Q \<longrightarrow> P"} *}
-ML_val {* none 1 @{prop "P \<or> Q \<longleftrightarrow> Q \<or> P"} *}
-ML_val {* genuine 1 @{prop "P \<or> Q \<longrightarrow> P"} *}
-ML_val {* none 1 @{prop "(P \<longrightarrow> Q) \<longleftrightarrow> (\<not> P \<or> Q)"} *}
-ML_val {* none 4 @{prop "{a} = {a, a}"} *}
-ML_val {* genuine 2 @{prop "{a} = {a, b}"} *}
-ML_val {* genuine 1 @{prop "{a} \<noteq> {a, b}"} *}
-ML_val {* none 4 @{prop "{}\<^sup>+ = {}"} *}
-ML_val {* none 4 @{prop "UNIV\<^sup>+ = UNIV"} *}
-ML_val {* none 4 @{prop "(UNIV :: ('a \<times> 'b) set) - {} = UNIV"} *}
-ML_val {* none 4 @{prop "{} - (UNIV :: ('a \<times> 'b) set) = {}"} *}
-ML_val {* none 1 @{prop "{(a, b), (b, c)}\<^sup>+ = {(a, b), (a, c), (b, c)}"} *}
-ML_val {* genuine 2 @{prop "{(a, b), (b, c)}\<^sup>+ = {(a, b), (a, c), (b, c)}"} *}
-ML_val {* none 4 @{prop "a \<noteq> c \<Longrightarrow> {(a, b), (b, c)}\<^sup>+ = {(a, b), (a, c), (b, c)}"} *}
-ML_val {* none 4 @{prop "A \<union> B = {x. x \<in> A \<or> x \<in> B}"} *}
-ML_val {* none 4 @{prop "A \<inter> B = {x. x \<in> A \<and> x \<in> B}"} *}
-ML_val {* none 4 @{prop "A - B = (\<lambda>x. A x \<and> \<not> B x)"} *}
-ML_val {* none 4 @{prop "\<exists>a b. (a, b) = (b, a)"} *}
-ML_val {* genuine 2 @{prop "(a, b) = (b, a)"} *}
-ML_val {* genuine 2 @{prop "(a, b) \<noteq> (b, a)"} *}
-ML_val {* none 4 @{prop "\<exists>a b::'a \<times> 'a. (a, b) = (b, a)"} *}
-ML_val {* genuine 2 @{prop "(a::'a \<times> 'a, b) = (b, a)"} *}
-ML_val {* none 4 @{prop "\<exists>a b::'a \<times> 'a \<times> 'a. (a, b) = (b, a)"} *}
-ML_val {* genuine 2 @{prop "(a::'a \<times> 'a \<times> 'a, b) \<noteq> (b, a)"} *}
-ML_val {* none 4 @{prop "\<exists>a b::'a \<Rightarrow> 'a. (a, b) = (b, a)"} *}
-ML_val {* genuine 1 @{prop "(a::'a \<Rightarrow> 'a, b) \<noteq> (b, a)"} *}
-ML_val {* none 4 @{prop "fst (a, b) = a"} *}
-ML_val {* none 1 @{prop "fst (a, b) = b"} *}
-ML_val {* genuine 2 @{prop "fst (a, b) = b"} *}
-ML_val {* genuine 2 @{prop "fst (a, b) \<noteq> b"} *}
-ML_val {* genuine 2 @{prop "f ((x, z), y) = (x, z)"} *}
-ML_val {* none 2 @{prop "(ALL x. f x = fst x) \<longrightarrow> f ((x, z), y) = (x, z)"} *}
-ML_val {* none 4 @{prop "snd (a, b) = b"} *}
-ML_val {* none 1 @{prop "snd (a, b) = a"} *}
-ML_val {* genuine 2 @{prop "snd (a, b) = a"} *}
-ML_val {* genuine 2 @{prop "snd (a, b) \<noteq> a"} *}
-ML_val {* genuine 1 @{prop P} *}
-ML_val {* genuine 1 @{prop "(\<lambda>x. P) a"} *}
-ML_val {* genuine 1 @{prop "(\<lambda>x y z. P y x z) a b c"} *}
-ML_val {* none 4 @{prop "\<exists>f. f = (\<lambda>x. x) \<and> f y = y"} *}
-ML_val {* genuine 1 @{prop "\<exists>f. f p \<noteq> p \<and> (\<forall>a b. f (a, b) = (a, b))"} *}
-ML_val {* none 2 @{prop "\<exists>f. \<forall>a b. f (a, b) = (a, b)"} *}
-ML_val {* none 3 @{prop "f = (\<lambda>a b. (b, a)) \<longrightarrow> f x y = (y, x)"} *}
-ML_val {* genuine 2 @{prop "f = (\<lambda>a b. (b, a)) \<longrightarrow> f x y = (x, y)"} *}
-ML_val {* none 4 @{prop "f = (\<lambda>x. f x)"} *}
-ML_val {* none 4 @{prop "f = (\<lambda>x. f x::'a \<Rightarrow> bool)"} *}
-ML_val {* none 4 @{prop "f = (\<lambda>x y. f x y)"} *}
-ML_val {* none 4 @{prop "f = (\<lambda>x y. f x y::'a \<Rightarrow> bool)"} *}
+ML_val \<open>genuine 1 @{prop "x = Not"}\<close>
+ML_val \<open>none 1 @{prop "\<exists>x. x = Not"}\<close>
+ML_val \<open>none 1 @{prop "\<not> False"}\<close>
+ML_val \<open>genuine 1 @{prop "\<not> True"}\<close>
+ML_val \<open>none 1 @{prop "\<not> \<not> b \<longleftrightarrow> b"}\<close>
+ML_val \<open>none 1 @{prop True}\<close>
+ML_val \<open>genuine 1 @{prop False}\<close>
+ML_val \<open>genuine 1 @{prop "True \<longleftrightarrow> False"}\<close>
+ML_val \<open>none 1 @{prop "True \<longleftrightarrow> \<not> False"}\<close>
+ML_val \<open>none 4 @{prop "\<forall>x. x = x"}\<close>
+ML_val \<open>none 4 @{prop "\<exists>x. x = x"}\<close>
+ML_val \<open>none 1 @{prop "\<forall>x. x = y"}\<close>
+ML_val \<open>genuine 2 @{prop "\<forall>x. x = y"}\<close>
+ML_val \<open>none 2 @{prop "\<exists>x. x = y"}\<close>
+ML_val \<open>none 2 @{prop "\<forall>x::'a \<times> 'a. x = x"}\<close>
+ML_val \<open>none 2 @{prop "\<exists>x::'a \<times> 'a. x = y"}\<close>
+ML_val \<open>genuine 2 @{prop "\<forall>x::'a \<times> 'a. x = y"}\<close>
+ML_val \<open>none 2 @{prop "\<exists>x::'a \<times> 'a. x = y"}\<close>
+ML_val \<open>none 1 @{prop "All = Ex"}\<close>
+ML_val \<open>genuine 2 @{prop "All = Ex"}\<close>
+ML_val \<open>none 1 @{prop "All P = Ex P"}\<close>
+ML_val \<open>genuine 2 @{prop "All P = Ex P"}\<close>
+ML_val \<open>none 4 @{prop "x = y \<longrightarrow> P x = P y"}\<close>
+ML_val \<open>none 4 @{prop "(x::'a \<times> 'a) = y \<longrightarrow> P x = P y"}\<close>
+ML_val \<open>none 2 @{prop "(x::'a \<times> 'a) = y \<longrightarrow> P x y = P y x"}\<close>
+ML_val \<open>none 4 @{prop "\<exists>x::'a \<times> 'a. x = y \<longrightarrow> P x = P y"}\<close>
+ML_val \<open>none 2 @{prop "(x::'a \<Rightarrow> 'a) = y \<longrightarrow> P x = P y"}\<close>
+ML_val \<open>none 2 @{prop "\<exists>x::'a \<Rightarrow> 'a. x = y \<longrightarrow> P x = P y"}\<close>
+ML_val \<open>genuine 1 @{prop "(op =) X = Ex"}\<close>
+ML_val \<open>none 2 @{prop "\<forall>x::'a \<Rightarrow> 'a. x = x"}\<close>
+ML_val \<open>none 1 @{prop "x = y"}\<close>
+ML_val \<open>genuine 1 @{prop "x \<longleftrightarrow> y"}\<close>
+ML_val \<open>genuine 2 @{prop "x = y"}\<close>
+ML_val \<open>genuine 1 @{prop "X \<subseteq> Y"}\<close>
+ML_val \<open>none 1 @{prop "P \<and> Q \<longleftrightarrow> Q \<and> P"}\<close>
+ML_val \<open>none 1 @{prop "P \<and> Q \<longrightarrow> P"}\<close>
+ML_val \<open>none 1 @{prop "P \<or> Q \<longleftrightarrow> Q \<or> P"}\<close>
+ML_val \<open>genuine 1 @{prop "P \<or> Q \<longrightarrow> P"}\<close>
+ML_val \<open>none 1 @{prop "(P \<longrightarrow> Q) \<longleftrightarrow> (\<not> P \<or> Q)"}\<close>
+ML_val \<open>none 4 @{prop "{a} = {a, a}"}\<close>
+ML_val \<open>genuine 2 @{prop "{a} = {a, b}"}\<close>
+ML_val \<open>genuine 1 @{prop "{a} \<noteq> {a, b}"}\<close>
+ML_val \<open>none 4 @{prop "{}\<^sup>+ = {}"}\<close>
+ML_val \<open>none 4 @{prop "UNIV\<^sup>+ = UNIV"}\<close>
+ML_val \<open>none 4 @{prop "(UNIV :: ('a \<times> 'b) set) - {} = UNIV"}\<close>
+ML_val \<open>none 4 @{prop "{} - (UNIV :: ('a \<times> 'b) set) = {}"}\<close>
+ML_val \<open>none 1 @{prop "{(a, b), (b, c)}\<^sup>+ = {(a, b), (a, c), (b, c)}"}\<close>
+ML_val \<open>genuine 2 @{prop "{(a, b), (b, c)}\<^sup>+ = {(a, b), (a, c), (b, c)}"}\<close>
+ML_val \<open>none 4 @{prop "a \<noteq> c \<Longrightarrow> {(a, b), (b, c)}\<^sup>+ = {(a, b), (a, c), (b, c)}"}\<close>
+ML_val \<open>none 4 @{prop "A \<union> B = {x. x \<in> A \<or> x \<in> B}"}\<close>
+ML_val \<open>none 4 @{prop "A \<inter> B = {x. x \<in> A \<and> x \<in> B}"}\<close>
+ML_val \<open>none 4 @{prop "A - B = (\<lambda>x. A x \<and> \<not> B x)"}\<close>
+ML_val \<open>none 4 @{prop "\<exists>a b. (a, b) = (b, a)"}\<close>
+ML_val \<open>genuine 2 @{prop "(a, b) = (b, a)"}\<close>
+ML_val \<open>genuine 2 @{prop "(a, b) \<noteq> (b, a)"}\<close>
+ML_val \<open>none 4 @{prop "\<exists>a b::'a \<times> 'a. (a, b) = (b, a)"}\<close>
+ML_val \<open>genuine 2 @{prop "(a::'a \<times> 'a, b) = (b, a)"}\<close>
+ML_val \<open>none 4 @{prop "\<exists>a b::'a \<times> 'a \<times> 'a. (a, b) = (b, a)"}\<close>
+ML_val \<open>genuine 2 @{prop "(a::'a \<times> 'a \<times> 'a, b) \<noteq> (b, a)"}\<close>
+ML_val \<open>none 4 @{prop "\<exists>a b::'a \<Rightarrow> 'a. (a, b) = (b, a)"}\<close>
+ML_val \<open>genuine 1 @{prop "(a::'a \<Rightarrow> 'a, b) \<noteq> (b, a)"}\<close>
+ML_val \<open>none 4 @{prop "fst (a, b) = a"}\<close>
+ML_val \<open>none 1 @{prop "fst (a, b) = b"}\<close>
+ML_val \<open>genuine 2 @{prop "fst (a, b) = b"}\<close>
+ML_val \<open>genuine 2 @{prop "fst (a, b) \<noteq> b"}\<close>
+ML_val \<open>genuine 2 @{prop "f ((x, z), y) = (x, z)"}\<close>
+ML_val \<open>none 2 @{prop "(ALL x. f x = fst x) \<longrightarrow> f ((x, z), y) = (x, z)"}\<close>
+ML_val \<open>none 4 @{prop "snd (a, b) = b"}\<close>
+ML_val \<open>none 1 @{prop "snd (a, b) = a"}\<close>
+ML_val \<open>genuine 2 @{prop "snd (a, b) = a"}\<close>
+ML_val \<open>genuine 2 @{prop "snd (a, b) \<noteq> a"}\<close>
+ML_val \<open>genuine 1 @{prop P}\<close>
+ML_val \<open>genuine 1 @{prop "(\<lambda>x. P) a"}\<close>
+ML_val \<open>genuine 1 @{prop "(\<lambda>x y z. P y x z) a b c"}\<close>
+ML_val \<open>none 4 @{prop "\<exists>f. f = (\<lambda>x. x) \<and> f y = y"}\<close>
+ML_val \<open>genuine 1 @{prop "\<exists>f. f p \<noteq> p \<and> (\<forall>a b. f (a, b) = (a, b))"}\<close>
+ML_val \<open>none 2 @{prop "\<exists>f. \<forall>a b. f (a, b) = (a, b)"}\<close>
+ML_val \<open>none 3 @{prop "f = (\<lambda>a b. (b, a)) \<longrightarrow> f x y = (y, x)"}\<close>
+ML_val \<open>genuine 2 @{prop "f = (\<lambda>a b. (b, a)) \<longrightarrow> f x y = (x, y)"}\<close>
+ML_val \<open>none 4 @{prop "f = (\<lambda>x. f x)"}\<close>
+ML_val \<open>none 4 @{prop "f = (\<lambda>x. f x::'a \<Rightarrow> bool)"}\<close>
+ML_val \<open>none 4 @{prop "f = (\<lambda>x y. f x y)"}\<close>
+ML_val \<open>none 4 @{prop "f = (\<lambda>x y. f x y::'a \<Rightarrow> bool)"}\<close>
 
 end
--- a/src/HOL/Nitpick_Examples/Mono_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Mono_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Examples featuring Nitpick's monotonicity check.
 *)
 
-section {* Examples Featuring Nitpick's Monotonicity Check *}
+section \<open>Examples Featuring Nitpick's Monotonicity Check\<close>
 
 theory Mono_Nits
 imports Main
@@ -13,7 +13,7 @@
         (* "~/afp/thys/AVL-Trees/AVL2" "~/afp/thys/Huffman/Huffman" *)
 begin
 
-ML {*
+ML \<open>
 open Nitpick_Util
 open Nitpick_HOL
 open Nitpick_Preproc
@@ -62,84 +62,84 @@
 fun nonmono t = not (is_mono t) orelse raise BUG
 fun const t = is_const t orelse raise BUG
 fun nonconst t = not (is_const t) orelse raise BUG
-*}
+\<close>
 
-ML {* Nitpick_Mono.trace := false *}
+ML \<open>Nitpick_Mono.trace := false\<close>
 
-ML_val {* const @{term "A::('a\<Rightarrow>'b)"} *}
-ML_val {* const @{term "(A::'a set) = A"} *}
-ML_val {* const @{term "(A::'a set set) = A"} *}
-ML_val {* const @{term "(\<lambda>x::'a set. a \<in> x)"} *}
-ML_val {* const @{term "{{a::'a}} = C"} *}
-ML_val {* const @{term "{f::'a\<Rightarrow>nat} = {g::'a\<Rightarrow>nat}"} *}
-ML_val {* const @{term "A \<union> (B::'a set)"} *}
-ML_val {* const @{term "\<lambda>A B x::'a. A x \<or> B x"} *}
-ML_val {* const @{term "P (a::'a)"} *}
-ML_val {* const @{term "\<lambda>a::'a. b (c (d::'a)) (e::'a) (f::'a)"} *}
-ML_val {* const @{term "\<forall>A::'a set. a \<in> A"} *}
-ML_val {* const @{term "\<forall>A::'a set. P A"} *}
-ML_val {* const @{term "P \<or> Q"} *}
-ML_val {* const @{term "A \<union> B = (C::'a set)"} *}
-ML_val {* const @{term "(\<lambda>A B x::'a. A x \<or> B x) A B = C"} *}
-ML_val {* const @{term "(if P then (A::'a set) else B) = C"} *}
-ML_val {* const @{term "let A = (C::'a set) in A \<union> B"} *}
-ML_val {* const @{term "THE x::'b. P x"} *}
-ML_val {* const @{term "(\<lambda>x::'a. False)"} *}
-ML_val {* const @{term "(\<lambda>x::'a. True)"} *}
-ML_val {* const @{term "(\<lambda>x::'a. False) = (\<lambda>x::'a. False)"} *}
-ML_val {* const @{term "(\<lambda>x::'a. True) = (\<lambda>x::'a. True)"} *}
-ML_val {* const @{term "Let (a::'a) A"} *}
-ML_val {* const @{term "A (a::'a)"} *}
-ML_val {* const @{term "insert (a::'a) A = B"} *}
-ML_val {* const @{term "- (A::'a set)"} *}
-ML_val {* const @{term "finite (A::'a set)"} *}
-ML_val {* const @{term "\<not> finite (A::'a set)"} *}
-ML_val {* const @{term "finite (A::'a set set)"} *}
-ML_val {* const @{term "\<lambda>a::'a. A a \<and> \<not> B a"} *}
-ML_val {* const @{term "A < (B::'a set)"} *}
-ML_val {* const @{term "A \<le> (B::'a set)"} *}
-ML_val {* const @{term "[a::'a]"} *}
-ML_val {* const @{term "[a::'a set]"} *}
-ML_val {* const @{term "[A \<union> (B::'a set)]"} *}
-ML_val {* const @{term "[A \<union> (B::'a set)] = [C]"} *}
-ML_val {* const @{term "{(\<lambda>x::'a. x = a)} = C"} *}
-ML_val {* const @{term "(\<lambda>a::'a. \<not> A a) = B"} *}
-ML_val {* const @{prop "\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> f a \<and> g a \<longrightarrow> \<not> f a"} *}
-ML_val {* const @{term "\<lambda>A B x::'a. A x \<and> B x \<and> A = B"} *}
-ML_val {* const @{term "p = (\<lambda>(x::'a) (y::'a). P x \<or> \<not> Q y)"} *}
-ML_val {* const @{term "p = (\<lambda>(x::'a) (y::'a). p x y :: bool)"} *}
-ML_val {* const @{term "p = (\<lambda>A B x. A x \<and> \<not> B x) (\<lambda>x. True) (\<lambda>y. x \<noteq> y)"} *}
-ML_val {* const @{term "p = (\<lambda>y. x \<noteq> y)"} *}
-ML_val {* const @{term "(\<lambda>x. (p::'a\<Rightarrow>bool\<Rightarrow>bool) x False)"} *}
-ML_val {* const @{term "(\<lambda>x y. (p::'a\<Rightarrow>'a\<Rightarrow>bool\<Rightarrow>bool) x y False)"} *}
-ML_val {* const @{term "f = (\<lambda>x::'a. P x \<longrightarrow> Q x)"} *}
-ML_val {* const @{term "\<forall>a::'a. P a"} *}
+ML_val \<open>const @{term "A::('a\<Rightarrow>'b)"}\<close>
+ML_val \<open>const @{term "(A::'a set) = A"}\<close>
+ML_val \<open>const @{term "(A::'a set set) = A"}\<close>
+ML_val \<open>const @{term "(\<lambda>x::'a set. a \<in> x)"}\<close>
+ML_val \<open>const @{term "{{a::'a}} = C"}\<close>
+ML_val \<open>const @{term "{f::'a\<Rightarrow>nat} = {g::'a\<Rightarrow>nat}"}\<close>
+ML_val \<open>const @{term "A \<union> (B::'a set)"}\<close>
+ML_val \<open>const @{term "\<lambda>A B x::'a. A x \<or> B x"}\<close>
+ML_val \<open>const @{term "P (a::'a)"}\<close>
+ML_val \<open>const @{term "\<lambda>a::'a. b (c (d::'a)) (e::'a) (f::'a)"}\<close>
+ML_val \<open>const @{term "\<forall>A::'a set. a \<in> A"}\<close>
+ML_val \<open>const @{term "\<forall>A::'a set. P A"}\<close>
+ML_val \<open>const @{term "P \<or> Q"}\<close>
+ML_val \<open>const @{term "A \<union> B = (C::'a set)"}\<close>
+ML_val \<open>const @{term "(\<lambda>A B x::'a. A x \<or> B x) A B = C"}\<close>
+ML_val \<open>const @{term "(if P then (A::'a set) else B) = C"}\<close>
+ML_val \<open>const @{term "let A = (C::'a set) in A \<union> B"}\<close>
+ML_val \<open>const @{term "THE x::'b. P x"}\<close>
+ML_val \<open>const @{term "(\<lambda>x::'a. False)"}\<close>
+ML_val \<open>const @{term "(\<lambda>x::'a. True)"}\<close>
+ML_val \<open>const @{term "(\<lambda>x::'a. False) = (\<lambda>x::'a. False)"}\<close>
+ML_val \<open>const @{term "(\<lambda>x::'a. True) = (\<lambda>x::'a. True)"}\<close>
+ML_val \<open>const @{term "Let (a::'a) A"}\<close>
+ML_val \<open>const @{term "A (a::'a)"}\<close>
+ML_val \<open>const @{term "insert (a::'a) A = B"}\<close>
+ML_val \<open>const @{term "- (A::'a set)"}\<close>
+ML_val \<open>const @{term "finite (A::'a set)"}\<close>
+ML_val \<open>const @{term "\<not> finite (A::'a set)"}\<close>
+ML_val \<open>const @{term "finite (A::'a set set)"}\<close>
+ML_val \<open>const @{term "\<lambda>a::'a. A a \<and> \<not> B a"}\<close>
+ML_val \<open>const @{term "A < (B::'a set)"}\<close>
+ML_val \<open>const @{term "A \<le> (B::'a set)"}\<close>
+ML_val \<open>const @{term "[a::'a]"}\<close>
+ML_val \<open>const @{term "[a::'a set]"}\<close>
+ML_val \<open>const @{term "[A \<union> (B::'a set)]"}\<close>
+ML_val \<open>const @{term "[A \<union> (B::'a set)] = [C]"}\<close>
+ML_val \<open>const @{term "{(\<lambda>x::'a. x = a)} = C"}\<close>
+ML_val \<open>const @{term "(\<lambda>a::'a. \<not> A a) = B"}\<close>
+ML_val \<open>const @{prop "\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> f a \<and> g a \<longrightarrow> \<not> f a"}\<close>
+ML_val \<open>const @{term "\<lambda>A B x::'a. A x \<and> B x \<and> A = B"}\<close>
+ML_val \<open>const @{term "p = (\<lambda>(x::'a) (y::'a). P x \<or> \<not> Q y)"}\<close>
+ML_val \<open>const @{term "p = (\<lambda>(x::'a) (y::'a). p x y :: bool)"}\<close>
+ML_val \<open>const @{term "p = (\<lambda>A B x. A x \<and> \<not> B x) (\<lambda>x. True) (\<lambda>y. x \<noteq> y)"}\<close>
+ML_val \<open>const @{term "p = (\<lambda>y. x \<noteq> y)"}\<close>
+ML_val \<open>const @{term "(\<lambda>x. (p::'a\<Rightarrow>bool\<Rightarrow>bool) x False)"}\<close>
+ML_val \<open>const @{term "(\<lambda>x y. (p::'a\<Rightarrow>'a\<Rightarrow>bool\<Rightarrow>bool) x y False)"}\<close>
+ML_val \<open>const @{term "f = (\<lambda>x::'a. P x \<longrightarrow> Q x)"}\<close>
+ML_val \<open>const @{term "\<forall>a::'a. P a"}\<close>
 
-ML_val {* nonconst @{term "\<forall>P (a::'a). P a"} *}
-ML_val {* nonconst @{term "THE x::'a. P x"} *}
-ML_val {* nonconst @{term "SOME x::'a. P x"} *}
-ML_val {* nonconst @{term "(\<lambda>A B x::'a. A x \<or> B x) = myunion"} *}
-ML_val {* nonconst @{term "(\<lambda>x::'a. False) = (\<lambda>x::'a. True)"} *}
-ML_val {* nonconst @{prop "\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> a \<in> f \<and> a \<in> g \<longrightarrow> F h"} *}
+ML_val \<open>nonconst @{term "\<forall>P (a::'a). P a"}\<close>
+ML_val \<open>nonconst @{term "THE x::'a. P x"}\<close>
+ML_val \<open>nonconst @{term "SOME x::'a. P x"}\<close>
+ML_val \<open>nonconst @{term "(\<lambda>A B x::'a. A x \<or> B x) = myunion"}\<close>
+ML_val \<open>nonconst @{term "(\<lambda>x::'a. False) = (\<lambda>x::'a. True)"}\<close>
+ML_val \<open>nonconst @{prop "\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> a \<in> f \<and> a \<in> g \<longrightarrow> F h"}\<close>
 
-ML_val {* mono @{prop "Q (\<forall>x::'a set. P x)"} *}
-ML_val {* mono @{prop "P (a::'a)"} *}
-ML_val {* mono @{prop "{a} = {b::'a}"} *}
-ML_val {* mono @{prop "(\<lambda>x. x = a) = (\<lambda>y. y = (b::'a))"} *}
-ML_val {* mono @{prop "(a::'a) \<in> P \<and> P \<union> P = P"} *}
-ML_val {* mono @{prop "\<forall>F::'a set set. P"} *}
-ML_val {* mono @{prop "\<not> (\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> a \<in> f \<and> a \<in> g \<longrightarrow> F h)"} *}
-ML_val {* mono @{prop "\<not> Q (\<forall>x::'a set. P x)"} *}
-ML_val {* mono @{prop "\<not> (\<forall>x::'a. P x)"} *}
-ML_val {* mono @{prop "myall P = (P = (\<lambda>x::'a. True))"} *}
-ML_val {* mono @{prop "myall P = (P = (\<lambda>x::'a. False))"} *}
-ML_val {* mono @{prop "\<forall>x::'a. P x"} *}
-ML_val {* mono @{term "(\<lambda>A B x::'a. A x \<or> B x) \<noteq> myunion"} *}
+ML_val \<open>mono @{prop "Q (\<forall>x::'a set. P x)"}\<close>
+ML_val \<open>mono @{prop "P (a::'a)"}\<close>
+ML_val \<open>mono @{prop "{a} = {b::'a}"}\<close>
+ML_val \<open>mono @{prop "(\<lambda>x. x = a) = (\<lambda>y. y = (b::'a))"}\<close>
+ML_val \<open>mono @{prop "(a::'a) \<in> P \<and> P \<union> P = P"}\<close>
+ML_val \<open>mono @{prop "\<forall>F::'a set set. P"}\<close>
+ML_val \<open>mono @{prop "\<not> (\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> a \<in> f \<and> a \<in> g \<longrightarrow> F h)"}\<close>
+ML_val \<open>mono @{prop "\<not> Q (\<forall>x::'a set. P x)"}\<close>
+ML_val \<open>mono @{prop "\<not> (\<forall>x::'a. P x)"}\<close>
+ML_val \<open>mono @{prop "myall P = (P = (\<lambda>x::'a. True))"}\<close>
+ML_val \<open>mono @{prop "myall P = (P = (\<lambda>x::'a. False))"}\<close>
+ML_val \<open>mono @{prop "\<forall>x::'a. P x"}\<close>
+ML_val \<open>mono @{term "(\<lambda>A B x::'a. A x \<or> B x) \<noteq> myunion"}\<close>
 
-ML_val {* nonmono @{prop "A = (\<lambda>x::'a. True) \<and> A = (\<lambda>x. False)"} *}
-ML_val {* nonmono @{prop "\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> a \<in> f \<and> a \<in> g \<longrightarrow> F h"} *}
+ML_val \<open>nonmono @{prop "A = (\<lambda>x::'a. True) \<and> A = (\<lambda>x. False)"}\<close>
+ML_val \<open>nonmono @{prop "\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> a \<in> f \<and> a \<in> g \<longrightarrow> F h"}\<close>
 
-ML {*
+ML \<open>
 val preproc_timeout = seconds 5.0
 val mono_timeout = seconds 1.0
 
@@ -188,7 +188,7 @@
       in File.append path (res ^ "\n"); writeln res end
       handle Timeout.TIMEOUT _ => ()
   in thy |> theorems_of |> List.app check_theorem end
-*}
+\<close>
 
 (*
 ML_val {* check_theory @{theory AVL2} *}
@@ -199,6 +199,6 @@
 ML_val {* check_theory @{theory Relation} *}
 *)
 
-ML {* getenv "ISABELLE_TMP" *}
+ML \<open>getenv "ISABELLE_TMP"\<close>
 
 end
--- a/src/HOL/Nitpick_Examples/Pattern_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Pattern_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Examples featuring Nitpick's "destroy_constrs" optimization.
 *)
 
-section {* Examples Featuring Nitpick's \textit{destroy\_constrs} Optimization *}
+section \<open>Examples Featuring Nitpick's \textit{destroy\_constrs} Optimization\<close>
 
 theory Pattern_Nits
 imports Main
--- a/src/HOL/Nitpick_Examples/Record_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Record_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Examples featuring Nitpick applied to records.
 *)
 
-section {* Examples Featuring Nitpick Applied to Records *}
+section \<open>Examples Featuring Nitpick Applied to Records\<close>
 
 theory Record_Nits
 imports Main
--- a/src/HOL/Nitpick_Examples/Refute_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Refute_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Refute examples adapted to Nitpick.
 *)
 
-section {* Refute Examples Adapted to Nitpick *}
+section \<open>Refute Examples Adapted to Nitpick\<close>
 
 theory Refute_Nits
 imports Main
@@ -23,9 +23,9 @@
 nitpick [sat_solver = SAT4J, expect = genuine] 2
 oops
 
-subsection {* Examples and Test Cases *}
+subsection \<open>Examples and Test Cases\<close>
 
-subsubsection {* Propositional logic *}
+subsubsection \<open>Propositional logic\<close>
 
 lemma "True"
 nitpick [expect = none]
@@ -64,7 +64,7 @@
 nitpick [expect = genuine]
 oops
 
-subsubsection {* Predicate logic *}
+subsubsection \<open>Predicate logic\<close>
 
 lemma "P x y z"
 nitpick [expect = genuine]
@@ -78,7 +78,7 @@
 nitpick [expect = genuine]
 oops
 
-subsubsection {* Equality *}
+subsubsection \<open>Equality\<close>
 
 lemma "P = True"
 nitpick [expect = genuine]
@@ -110,7 +110,7 @@
 nitpick [expect = genuine]
 oops
 
-subsubsection {* First-Order Logic *}
+subsubsection \<open>First-Order Logic\<close>
 
 lemma "\<exists>x. P x"
 nitpick [expect = genuine]
@@ -148,14 +148,14 @@
 nitpick [expect = genuine]
 oops
 
-text {* A true statement (also testing names of free and bound variables being identical) *}
+text \<open>A true statement (also testing names of free and bound variables being identical)\<close>
 
 lemma "(\<forall>x y. P x y \<longrightarrow> P y x) \<longrightarrow> (\<forall>x. P x y) \<longrightarrow> P y x"
 nitpick [expect = none]
 apply fast
 done
 
-text {* "A type has at most 4 elements." *}
+text \<open>"A type has at most 4 elements."\<close>
 
 lemma "\<not> distinct [a, b, c, d, e]"
 nitpick [expect = genuine]
@@ -169,44 +169,44 @@
 nitpick [expect = genuine]
 oops
 
-text {* "Every reflexive and symmetric relation is transitive." *}
+text \<open>"Every reflexive and symmetric relation is transitive."\<close>
 
 lemma "\<lbrakk>\<forall>x. P x x; \<forall>x y. P x y \<longrightarrow> P y x\<rbrakk> \<Longrightarrow> P x y \<longrightarrow> P y z \<longrightarrow> P x z"
 nitpick [expect = genuine]
 oops
 
-text {* The ``Drinker's theorem'' *}
+text \<open>The ``Drinker's theorem''\<close>
 
 lemma "\<exists>x. f x = g x \<longrightarrow> f = g"
 nitpick [expect = none]
 apply (auto simp add: ext)
 done
 
-text {* And an incorrect version of it *}
+text \<open>And an incorrect version of it\<close>
 
 lemma "(\<exists>x. f x = g x) \<longrightarrow> f = g"
 nitpick [expect = genuine]
 oops
 
-text {* "Every function has a fixed point." *}
+text \<open>"Every function has a fixed point."\<close>
 
 lemma "\<exists>x. f x = x"
 nitpick [expect = genuine]
 oops
 
-text {* "Function composition is commutative." *}
+text \<open>"Function composition is commutative."\<close>
 
 lemma "f (g x) = g (f x)"
 nitpick [expect = genuine]
 oops
 
-text {* "Two functions that are equivalent wrt.\ the same predicate 'P' are equal." *}
+text \<open>"Two functions that are equivalent wrt.\ the same predicate 'P' are equal."\<close>
 
 lemma "((P::('a\<Rightarrow>'b)\<Rightarrow>bool) f = P g) \<longrightarrow> (f x = g x)"
 nitpick [expect = genuine]
 oops
 
-subsubsection {* Higher-Order Logic *}
+subsubsection \<open>Higher-Order Logic\<close>
 
 lemma "\<exists>P. P"
 nitpick [expect = none]
@@ -242,7 +242,7 @@
 nitpick [expect = genuine]
 oops
 
-text {* ``The transitive closure of an arbitrary relation is non-empty.'' *}
+text \<open>``The transitive closure of an arbitrary relation is non-empty.''\<close>
 
 definition "trans" :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> bool" where
 "trans P \<equiv> (ALL x y z. P x y \<longrightarrow> P y z \<longrightarrow> P x z)"
@@ -259,7 +259,7 @@
 nitpick [expect = genuine]
 oops
 
-text {* ``The union of transitive closures is equal to the transitive closure of unions.'' *}
+text \<open>``The union of transitive closures is equal to the transitive closure of unions.''\<close>
 
 lemma "(\<forall>x y. (P x y \<or> R x y) \<longrightarrow> T x y) \<longrightarrow> trans T \<longrightarrow> (\<forall>Q. (\<forall>x y. (P x y \<or> R x y) \<longrightarrow> Q x y) \<longrightarrow> trans Q \<longrightarrow> subset T Q)
  \<longrightarrow> trans_closure TP P
@@ -268,19 +268,19 @@
 nitpick [expect = genuine]
 oops
 
-text {* ``Every surjective function is invertible.'' *}
+text \<open>``Every surjective function is invertible.''\<close>
 
 lemma "(\<forall>y. \<exists>x. y = f x) \<longrightarrow> (\<exists>g. \<forall>x. g (f x) = x)"
 nitpick [expect = genuine]
 oops
 
-text {* ``Every invertible function is surjective.'' *}
+text \<open>``Every invertible function is surjective.''\<close>
 
 lemma "(\<exists>g. \<forall>x. g (f x) = x) \<longrightarrow> (\<forall>y. \<exists>x. y = f x)"
 nitpick [expect = genuine]
 oops
 
-text {* ``Every point is a fixed point of some function.'' *}
+text \<open>``Every point is a fixed point of some function.''\<close>
 
 lemma "\<exists>f. f x = x"
 nitpick [card = 1-7, expect = none]
@@ -288,13 +288,13 @@
 apply simp
 done
 
-text {* Axiom of Choice: first an incorrect version *}
+text \<open>Axiom of Choice: first an incorrect version\<close>
 
 lemma "(\<forall>x. \<exists>y. P x y) \<longrightarrow> (\<exists>!f. \<forall>x. P x (f x))"
 nitpick [expect = genuine]
 oops
 
-text {* And now two correct ones *}
+text \<open>And now two correct ones\<close>
 
 lemma "(\<forall>x. \<exists>y. P x y) \<longrightarrow> (\<exists>f. \<forall>x. P x (f x))"
 nitpick [card = 1-4, expect = none]
@@ -308,7 +308,7 @@
 apply (fast intro: ext)
 done
 
-subsubsection {* Metalogic *}
+subsubsection \<open>Metalogic\<close>
 
 lemma "\<And>x. P x"
 nitpick [expect = genuine]
@@ -338,7 +338,7 @@
 nitpick [expect = genuine]
 oops
 
-subsubsection {* Schematic Variables *}
+subsubsection \<open>Schematic Variables\<close>
 
 schematic_goal "?P"
 nitpick [expect = none]
@@ -350,7 +350,7 @@
 apply auto
 done
 
-subsubsection {* Abstractions *}
+subsubsection \<open>Abstractions\<close>
 
 lemma "(\<lambda>x. x) = (\<lambda>x. y)"
 nitpick [expect = genuine]
@@ -365,7 +365,7 @@
 apply simp
 done
 
-subsubsection {* Sets *}
+subsubsection \<open>Sets\<close>
 
 lemma "P (A::'a set)"
 nitpick [expect = genuine]
@@ -408,7 +408,7 @@
 nitpick [expect = genuine]
 oops
 
-subsubsection {* @{const undefined} *}
+subsubsection \<open>@{const undefined}\<close>
 
 lemma "undefined"
 nitpick [expect = genuine]
@@ -426,7 +426,7 @@
 nitpick [expect = genuine]
 oops
 
-subsubsection {* @{const The} *}
+subsubsection \<open>@{const The}\<close>
 
 lemma "The P"
 nitpick [expect = genuine]
@@ -448,7 +448,7 @@
 nitpick [expect = genuine]
 oops
 
-subsubsection {* @{const Eps} *}
+subsubsection \<open>@{const Eps}\<close>
 
 lemma "Eps P"
 nitpick [expect = genuine]
@@ -471,7 +471,7 @@
 apply (auto simp add: someI)
 done
 
-subsubsection {* Operations on Natural Numbers *}
+subsubsection \<open>Operations on Natural Numbers\<close>
 
 lemma "(x::nat) + y = 0"
 nitpick [expect = genuine]
@@ -493,7 +493,7 @@
 nitpick [card = 1, expect = genuine]
 oops
 
-text {* \<times> *}
+text \<open>\<times>\<close>
 
 lemma "P (x::'a\<times>'b)"
 nitpick [expect = genuine]
@@ -523,9 +523,9 @@
 nitpick [expect = genuine]
 oops
 
-subsubsection {* Subtypes (typedef), typedecl *}
+subsubsection \<open>Subtypes (typedef), typedecl\<close>
 
-text {* A completely unspecified non-empty subset of @{typ "'a"}: *}
+text \<open>A completely unspecified non-empty subset of @{typ "'a"}:\<close>
 
 definition "myTdef = insert (undefined::'a) (undefined::'a set)"
 
@@ -547,9 +547,9 @@
 nitpick [expect = genuine]
 oops
 
-subsubsection {* Inductive Datatypes *}
+subsubsection \<open>Inductive Datatypes\<close>
 
-text {* unit *}
+text \<open>unit\<close>
 
 lemma "P (x::unit)"
 nitpick [expect = genuine]
@@ -567,7 +567,7 @@
 nitpick [expect = genuine]
 oops
 
-text {* option *}
+text \<open>option\<close>
 
 lemma "P (x::'a option)"
 nitpick [expect = genuine]
@@ -589,7 +589,7 @@
 nitpick [expect = genuine]
 oops
 
-text {* + *}
+text \<open>+\<close>
 
 lemma "P (x::'a+'b)"
 nitpick [expect = genuine]
@@ -615,7 +615,7 @@
 nitpick [expect = genuine]
 oops
 
-text {* Non-recursive datatypes *}
+text \<open>Non-recursive datatypes\<close>
 
 datatype T1 = A | B
 
@@ -712,9 +712,9 @@
 nitpick [expect = genuine]
 oops
 
-text {* Recursive datatypes *}
+text \<open>Recursive datatypes\<close>
 
-text {* nat *}
+text \<open>nat\<close>
 
 lemma "P (x::nat)"
 nitpick [expect = genuine]
@@ -750,7 +750,7 @@
 nitpick [expect = genuine]
 oops
 
-text {* 'a list *}
+text \<open>'a list\<close>
 
 lemma "P (xs::'a list)"
 nitpick [expect = genuine]
@@ -855,7 +855,7 @@
 nitpick [expect = genuine]
 oops
 
-text {* Mutually recursive datatypes *}
+text \<open>Mutually recursive datatypes\<close>
 
 datatype 'a aexp = Number 'a | ITE "'a bexp" "'a aexp" "'a aexp"
  and 'a bexp = Equal "'a aexp" "'a aexp"
@@ -995,9 +995,9 @@
 nitpick [expect = genuine]
 oops
 
-text {* Other datatype examples *}
+text \<open>Other datatype examples\<close>
 
-text {* Indirect recursion is implemented via mutual recursion. *}
+text \<open>Indirect recursion is implemented via mutual recursion.\<close>
 
 datatype XOpt = CX "XOpt option" | DX "bool \<Rightarrow> XOpt option"
 
@@ -1107,7 +1107,7 @@
 nitpick [expect = genuine]
 oops
 
-text {* Taken from "Inductive datatypes in HOL", p. 8: *}
+text \<open>Taken from "Inductive datatypes in HOL", p. 8:\<close>
 
 datatype (dead 'a, 'b) T = C "'a \<Rightarrow> bool" | D "'b list"
 datatype 'c U = E "('c, 'c U) T"
@@ -1124,7 +1124,7 @@
 nitpick [expect = genuine]
 oops
 
-subsubsection {* Records *}
+subsubsection \<open>Records\<close>
 
 record ('a, 'b) point =
   xpos :: 'a
@@ -1141,7 +1141,7 @@
 nitpick [expect = genuine]
 oops
 
-subsubsection {* Inductively Defined Sets *}
+subsubsection \<open>Inductively Defined Sets\<close>
 
 inductive_set undefinedSet :: "'a set" where
 "undefined \<in> undefinedSet"
@@ -1182,7 +1182,7 @@
 nitpick [expect = genuine]
 oops
 
-subsubsection {* Examples Involving Special Functions *}
+subsubsection \<open>Examples Involving Special Functions\<close>
 
 lemma "card x = 0"
 nitpick [expect = genuine]
@@ -1212,9 +1212,9 @@
 nitpick [card = 2, expect = genuine]
 oops
 
-subsubsection {* Axiomatic Type Classes and Overloading *}
+subsubsection \<open>Axiomatic Type Classes and Overloading\<close>
 
-text {* A type class without axioms: *}
+text \<open>A type class without axioms:\<close>
 
 class classA
 
@@ -1222,7 +1222,7 @@
 nitpick [expect = genuine]
 oops
 
-text {* An axiom with a type variable (denoting types which have at least two elements): *}
+text \<open>An axiom with a type variable (denoting types which have at least two elements):\<close>
 
 class classC =
   assumes classC_ax: "\<exists>x y. x \<noteq> y"
@@ -1235,7 +1235,7 @@
 nitpick [expect = none]
 sorry
 
-text {* A type class for which a constant is defined: *}
+text \<open>A type class for which a constant is defined:\<close>
 
 class classD =
   fixes classD_const :: "'a \<Rightarrow> 'a"
@@ -1245,7 +1245,7 @@
 nitpick [expect = genuine]
 oops
 
-text {* A type class with multiple superclasses: *}
+text \<open>A type class with multiple superclasses:\<close>
 
 class classE = classC + classD
 
@@ -1253,7 +1253,7 @@
 nitpick [expect = genuine]
 oops
 
-text {* OFCLASS: *}
+text \<open>OFCLASS:\<close>
 
 lemma "OFCLASS('a::type, type_class)"
 nitpick [expect = none]
@@ -1269,7 +1269,7 @@
 nitpick [expect = genuine]
 oops
 
-text {* Overloading: *}
+text \<open>Overloading:\<close>
 
 consts inverse :: "'a \<Rightarrow> 'a"
 
--- a/src/HOL/Nitpick_Examples/Special_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Special_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Examples featuring Nitpick's "specialize" optimization.
 *)
 
-section {* Examples Featuring Nitpick's \textit{specialize} Optimization *}
+section \<open>Examples Featuring Nitpick's \textit{specialize} Optimization\<close>
 
 theory Special_Nits
 imports Main
--- a/src/HOL/Nitpick_Examples/Tests_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Tests_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,12 +5,12 @@
 Nitpick tests.
 *)
 
-section {* Nitpick Tests *}
+section \<open>Nitpick Tests\<close>
 
 theory Tests_Nits
 imports Main
 begin
 
-ML {* () |> getenv "KODKODI" <> "" ? Nitpick_Tests.run_all_tests *}
+ML \<open>() |> getenv "KODKODI" <> "" ? Nitpick_Tests.run_all_tests\<close>
 
 end
--- a/src/HOL/Nitpick_Examples/Typedef_Nits.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nitpick_Examples/Typedef_Nits.thy	Thu May 26 17:51:22 2016 +0200
@@ -5,7 +5,7 @@
 Examples featuring Nitpick applied to typedefs.
 *)
 
-section {* Examples Featuring Nitpick Applied to Typedefs *}
+section \<open>Examples Featuring Nitpick Applied to Typedefs\<close>
 
 theory Typedef_Nits
 imports Complex_Main
--- a/src/HOL/Nominal/Examples/CK_Machine.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/CK_Machine.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
   imports "../Nominal" 
 begin
 
-text {* 
+text \<open>
   
   This theory establishes soundness and completeness for a CK-machine
   with respect to a cbv-big-step semantics. The language includes 
@@ -19,7 +19,7 @@
      @{url "http://www.cs.indiana.edu/~rpjames/lm.pdf"}
      @{url "http://www.cl.cam.ac.uk/teaching/2001/Semantics/"}
 
-*}
+\<close>
 
 atom_decl name
 
@@ -37,7 +37,7 @@
 | ZET "lam"                      (* zero test *)
 | EQI "lam" "lam"                (* equality test on numbers *)
 
-section {* Capture-Avoiding Substitution *}
+section \<open>Capture-Avoiding Substitution\<close>
 
 nominal_primrec
   subst :: "lam \<Rightarrow> name \<Rightarrow> lam \<Rightarrow> lam"  ("_[_::=_]" [100,100,100] 100)
@@ -79,7 +79,7 @@
 by (nominal_induct t avoiding: x y s rule: lam.strong_induct)
    (auto simp add: calc_atm fresh_atm abs_fresh perm_nat_def)
 
-section {* Evaluation Contexts *}
+section \<open>Evaluation Contexts\<close>
 
 datatype ctx = 
     Hole ("\<box>")  
@@ -94,7 +94,7 @@
   | CEQIL "ctx" "lam"
   | CEQIR "lam" "ctx"
 
-text {* The operation of filling a term into a context: *} 
+text \<open>The operation of filling a term into a context:\<close> 
 
 fun
   filling :: "ctx \<Rightarrow> lam \<Rightarrow> lam" ("_\<lbrakk>_\<rbrakk>")
@@ -111,7 +111,7 @@
 | "(CEQIL E t')\<lbrakk>t\<rbrakk> = EQI (E\<lbrakk>t\<rbrakk>) t'"
 | "(CEQIR t' E)\<lbrakk>t\<rbrakk> = EQI t' (E\<lbrakk>t\<rbrakk>)"
 
-text {* The operation of composing two contexts: *}
+text \<open>The operation of composing two contexts:\<close>
 
 fun 
  ctx_compose :: "ctx \<Rightarrow> ctx \<Rightarrow> ctx" ("_ \<circ> _")
@@ -132,7 +132,7 @@
   shows "(E1 \<circ> E2)\<lbrakk>t\<rbrakk> = E1\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>"
 by (induct E1 rule: ctx.induct) (auto)
 
-text {* Composing a list (stack) of contexts. *}
+text \<open>Composing a list (stack) of contexts.\<close>
 
 fun
   ctx_composes :: "ctx list \<Rightarrow> ctx" ("_\<down>")
@@ -140,7 +140,7 @@
     "[]\<down> = \<box>"
   | "(E#Es)\<down> = (Es\<down>) \<circ> E"
 
-section {* The CK-Machine *}
+section \<open>The CK-Machine\<close>
 
 inductive
   val :: "lam\<Rightarrow>bool" 
@@ -192,7 +192,7 @@
   shows "<e1,Es1> \<mapsto>* <e2,Es2>"
 using a by (rule ms2) (rule ms1)
 
-section {* The Evaluation Relation (Big-Step Semantics) *}
+section \<open>The Evaluation Relation (Big-Step Semantics)\<close>
 
 inductive
   eval :: "lam\<Rightarrow>lam\<Rightarrow>bool" ("_ \<Down> _") 
@@ -235,7 +235,7 @@
   shows "t \<Down> t"
 using a by (induct) (auto)
 
-text {* The Completeness Property: *}
+text \<open>The Completeness Property:\<close>
 
 theorem eval_implies_machine_star_ctx:
   assumes a: "t \<Down> t'"
@@ -249,7 +249,7 @@
   shows "<t,[]> \<mapsto>* <t',[]>"
 using a by (auto dest: eval_implies_machine_star_ctx)
 
-section {* The CBV Reduction Relation (Small-Step Semantics) *}
+section \<open>The CBV Reduction Relation (Small-Step Semantics)\<close>
 
 lemma less_eqvt[eqvt]:
   fixes pi::"name prm"
@@ -348,7 +348,7 @@
 by (induct)
    (auto simp add: eval_val cbv_star_eval dest: cbvs2)
 
-text {* The Soundness Property *}
+text \<open>The Soundness Property\<close>
 
 theorem machine_star_implies_eval:
   assumes a: "<t1,[]> \<mapsto>* <t2,[]>" 
@@ -359,9 +359,9 @@
   then show "t1 \<Down> t2" using b by (simp add: cbv_star_implies_eval)
 qed
 
-section {* Typing *}
+section \<open>Typing\<close>
 
-text {* Types *}
+text \<open>Types\<close>
 
 nominal_datatype ty =
   tVAR "string"
@@ -378,18 +378,18 @@
 by (induct T rule: ty.induct)
    (auto simp add: fresh_string)
 
-text {* Typing Contexts *}
+text \<open>Typing Contexts\<close>
 
 type_synonym tctx = "(name\<times>ty) list"
 
-text {* Sub-Typing Contexts *}
+text \<open>Sub-Typing Contexts\<close>
 
 abbreviation
   "sub_tctx" :: "tctx \<Rightarrow> tctx \<Rightarrow> bool" ("_ \<subseteq> _") 
 where
   "\<Gamma>\<^sub>1 \<subseteq> \<Gamma>\<^sub>2 \<equiv> \<forall>x. x \<in> set \<Gamma>\<^sub>1 \<longrightarrow> x \<in> set \<Gamma>\<^sub>2"
 
-text {* Valid Typing Contexts *}
+text \<open>Valid Typing Contexts\<close>
 
 inductive
   valid :: "tctx \<Rightarrow> bool"
@@ -423,7 +423,7 @@
 using a1 a2 a3
 by (induct) (auto simp add: fresh_set fresh_prod fresh_atm)
 
-section {* The Typing Relation *}
+section \<open>The Typing Relation\<close>
 
 inductive
   typing :: "tctx \<Rightarrow> lam \<Rightarrow> ty \<Rightarrow> bool" ("_ \<turnstile> _ : _") 
@@ -474,7 +474,7 @@
 by (cases rule: typing.strong_cases) 
    (auto simp add: alpha lam.inject abs_fresh ty_fresh)
 
-section {* The Type-Preservation Property for the CBV Reduction Relation *}
+section \<open>The Type-Preservation Property for the CBV Reduction Relation\<close>
 
 lemma weakening: 
   fixes \<Gamma>1 \<Gamma>2::"tctx"
@@ -534,7 +534,7 @@
 using a b
 by (induct) (auto intro: cbv_type_preservation)
 
-section {* The Type-Preservation Property for the Machine and Evaluation Relation *}
+section \<open>The Type-Preservation Property for the Machine and Evaluation Relation\<close>
 
 theorem machine_type_preservation:
   assumes a: "<t,[]> \<mapsto>* <t',[]>"
@@ -554,7 +554,7 @@
   then show "\<Gamma> \<turnstile> t' : T" using b by (simp add: machine_type_preservation)
 qed
 
-text {* The Progress Property *}
+text \<open>The Progress Property\<close>
 
 lemma canonical_tARR[dest]:
   assumes a: "[] \<turnstile> t : T1 \<rightarrow> T2"
--- a/src/HOL/Nominal/Examples/CR.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/CR.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
 imports Lam_Funs
 begin
 
-text {* The Church-Rosser proof from Barendregt's book *}
+text \<open>The Church-Rosser proof from Barendregt's book\<close>
 
 lemma forget: 
   assumes asm: "x\<sharp>L"
@@ -95,23 +95,23 @@
   proof -
     { (*Case 1.1*)
       assume  "z=x"
-      have "(1)": "?LHS = N[y::=L]" using `z=x` by simp
-      have "(2)": "?RHS = N[y::=L]" using `z=x` `x\<noteq>y` by simp
+      have "(1)": "?LHS = N[y::=L]" using \<open>z=x\<close> by simp
+      have "(2)": "?RHS = N[y::=L]" using \<open>z=x\<close> \<open>x\<noteq>y\<close> by simp
       from "(1)" "(2)" have "?LHS = ?RHS"  by simp
     }
     moreover 
     { (*Case 1.2*)
       assume "z=y" and "z\<noteq>x" 
-      have "(1)": "?LHS = L"               using `z\<noteq>x` `z=y` by simp
-      have "(2)": "?RHS = L[x::=N[y::=L]]" using `z=y` by simp
-      have "(3)": "L[x::=N[y::=L]] = L"    using `x\<sharp>L` by (simp add: forget)
+      have "(1)": "?LHS = L"               using \<open>z\<noteq>x\<close> \<open>z=y\<close> by simp
+      have "(2)": "?RHS = L[x::=N[y::=L]]" using \<open>z=y\<close> by simp
+      have "(3)": "L[x::=N[y::=L]] = L"    using \<open>x\<sharp>L\<close> by (simp add: forget)
       from "(1)" "(2)" "(3)" have "?LHS = ?RHS" by simp
     }
     moreover 
     { (*Case 1.3*)
       assume "z\<noteq>x" and "z\<noteq>y"
-      have "(1)": "?LHS = Var z" using `z\<noteq>x` `z\<noteq>y` by simp
-      have "(2)": "?RHS = Var z" using `z\<noteq>x` `z\<noteq>y` by simp
+      have "(1)": "?LHS = Var z" using \<open>z\<noteq>x\<close> \<open>z\<noteq>y\<close> by simp
+      have "(2)": "?RHS = Var z" using \<open>z\<noteq>x\<close> \<open>z\<noteq>y\<close> by simp
       from "(1)" "(2)" have "?LHS = ?RHS" by simp
     }
     ultimately show "?LHS = ?RHS" by blast
@@ -125,10 +125,10 @@
   hence "z\<sharp>N[y::=L]" by (simp add: fresh_fact)
   show "(Lam [z].M1)[x::=N][y::=L] = (Lam [z].M1)[y::=L][x::=N[y::=L]]" (is "?LHS=?RHS") 
   proof - 
-    have "?LHS = Lam [z].(M1[x::=N][y::=L])" using `z\<sharp>x` `z\<sharp>y` `z\<sharp>N` `z\<sharp>L` by simp
-    also from ih have "\<dots> = Lam [z].(M1[y::=L][x::=N[y::=L]])" using `x\<noteq>y` `x\<sharp>L` by simp
-    also have "\<dots> = (Lam [z].(M1[y::=L]))[x::=N[y::=L]]" using `z\<sharp>x` `z\<sharp>N[y::=L]` by simp
-    also have "\<dots> = ?RHS" using  `z\<sharp>y` `z\<sharp>L` by simp
+    have "?LHS = Lam [z].(M1[x::=N][y::=L])" using \<open>z\<sharp>x\<close> \<open>z\<sharp>y\<close> \<open>z\<sharp>N\<close> \<open>z\<sharp>L\<close> by simp
+    also from ih have "\<dots> = Lam [z].(M1[y::=L][x::=N[y::=L]])" using \<open>x\<noteq>y\<close> \<open>x\<sharp>L\<close> by simp
+    also have "\<dots> = (Lam [z].(M1[y::=L]))[x::=N[y::=L]]" using \<open>z\<sharp>x\<close> \<open>z\<sharp>N[y::=L]\<close> by simp
+    also have "\<dots> = ?RHS" using  \<open>z\<sharp>y\<close> \<open>z\<sharp>L\<close> by simp
     finally show "?LHS = ?RHS" .
   qed
 next
@@ -143,7 +143,7 @@
 by (nominal_induct M avoiding: x y N L rule: lam.strong_induct)
    (auto simp add: fresh_fact forget)
 
-section {* Beta Reduction *}
+section \<open>Beta Reduction\<close>
 
 inductive
   "Beta" :: "lam\<Rightarrow>lam\<Rightarrow>bool" (" _ \<longrightarrow>\<^sub>\<beta> _" [80,80] 80)
@@ -173,7 +173,7 @@
 using a2 a1
 by (induct) (auto)
 
-section {* One-Reduction *}
+section \<open>One-Reduction\<close>
 
 inductive
   One :: "lam\<Rightarrow>lam\<Rightarrow>bool" (" _ \<longrightarrow>\<^sub>1 _" [80,80] 80)
@@ -287,7 +287,7 @@
 by (cases rule: One.strong_cases [where a="a" and aa="a"])
    (auto dest: one_abs simp add: lam.inject abs_fresh alpha fresh_prod)
 
-text {* first case in Lemma 3.2.4*}
+text \<open>first case in Lemma 3.2.4\<close>
 
 lemma one_subst_aux:
   assumes a: "N\<longrightarrow>\<^sub>1N'"
--- a/src/HOL/Nominal/Examples/CR_Takahashi.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/CR_Takahashi.thy	Thu May 26 17:51:22 2016 +0200
@@ -30,7 +30,7 @@
 apply(fresh_guess)+
 done
 
-section {* Lemmas about Capture-Avoiding Substitution *}
+section \<open>Lemmas about Capture-Avoiding Substitution\<close>
  
 lemma  subst_eqvt[eqvt]:
   fixes pi::"name prm"
@@ -63,7 +63,7 @@
 by (nominal_induct t avoiding: x y s rule: lam.strong_induct)
    (auto simp add: swap_simps fresh_atm abs_fresh)
 
-section {* Beta-Reduction *}
+section \<open>Beta-Reduction\<close>
 
 inductive 
   "Beta" :: "lam\<Rightarrow>lam\<Rightarrow>bool" (" _ \<longrightarrow>\<^sub>\<beta> _" [80,80] 80)
@@ -73,7 +73,7 @@
 | b3[intro]: "t1 \<longrightarrow>\<^sub>\<beta> t2 \<Longrightarrow> Lam [x].t1 \<longrightarrow>\<^sub>\<beta> Lam [x].t2"
 | b4[intro]: "App (Lam [x].t) s \<longrightarrow>\<^sub>\<beta> t[x::=s]"
 
-section {* Transitive Closure of Beta *}
+section \<open>Transitive Closure of Beta\<close>
 
 inductive 
   "Beta_star" :: "lam\<Rightarrow>lam\<Rightarrow>bool" (" _ \<longrightarrow>\<^sub>\<beta>\<^sup>* _" [80,80] 80)
@@ -82,7 +82,7 @@
 | bs2[intro]: "t \<longrightarrow>\<^sub>\<beta> s \<Longrightarrow> t \<longrightarrow>\<^sub>\<beta>\<^sup>* s"
 | bs3[intro,trans]: "\<lbrakk>t1\<longrightarrow>\<^sub>\<beta>\<^sup>* t2; t2 \<longrightarrow>\<^sub>\<beta>\<^sup>* t3\<rbrakk> \<Longrightarrow> t1 \<longrightarrow>\<^sub>\<beta>\<^sup>* t3"
 
-section {* One-Reduction *}
+section \<open>One-Reduction\<close>
 
 inductive 
   One :: "lam\<Rightarrow>lam\<Rightarrow>bool" (" _ \<longrightarrow>\<^sub>1 _" [80,80] 80)
@@ -145,7 +145,7 @@
 by (cases rule: One.strong_cases)
    (auto dest: One_Lam simp add: lam.inject abs_fresh alpha fresh_prod)
 
-section {* Transitive Closure of One *}
+section \<open>Transitive Closure of One\<close>
 
 inductive 
   "One_star" :: "lam\<Rightarrow>lam\<Rightarrow>bool" (" _ \<longrightarrow>\<^sub>1\<^sup>* _" [80,80] 80)
@@ -154,7 +154,7 @@
 | os2[intro]: "t \<longrightarrow>\<^sub>1 s \<Longrightarrow> t \<longrightarrow>\<^sub>1\<^sup>* s"
 | os3[intro]: "\<lbrakk>t1\<longrightarrow>\<^sub>1\<^sup>* t2; t2 \<longrightarrow>\<^sub>1\<^sup>* t3\<rbrakk> \<Longrightarrow> t1 \<longrightarrow>\<^sub>1\<^sup>* t3"
 
-section {* Complete Development Reduction *}
+section \<open>Complete Development Reduction\<close>
 
 inductive 
   Dev :: "lam \<Rightarrow> lam \<Rightarrow> bool" (" _ \<longrightarrow>\<^sub>d _" [80,80]80)
@@ -248,7 +248,7 @@
     shows "\<exists>t3. t2 \<longrightarrow>\<^sub>1\<^sup>* t3 \<and> t1 \<longrightarrow>\<^sub>1\<^sup>* t3"
 using a Rectangle_for_One by (induct arbitrary: t2) (blast)+
 
-section {* Establishing the Equivalence of Beta-star and One-star *}
+section \<open>Establishing the Equivalence of Beta-star and One-star\<close>
 
 lemma Beta_Lam_cong: 
   assumes a: "t1 \<longrightarrow>\<^sub>\<beta>\<^sup>* t2" 
@@ -295,7 +295,7 @@
   then show "t1 \<longrightarrow>\<^sub>1\<^sup>* t2" by (induct) (auto intro: Beta_implies_One_star)
 qed
 
-section {* The Church-Rosser Theorem *}
+section \<open>The Church-Rosser Theorem\<close>
 
 theorem CR_for_Beta_star: 
   assumes a: "t \<longrightarrow>\<^sub>\<beta>\<^sup>* t1" "t\<longrightarrow>\<^sub>\<beta>\<^sup>* t2" 
--- a/src/HOL/Nominal/Examples/Class1.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Class1.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,11 +2,11 @@
 imports "../Nominal" 
 begin
 
-section {* Term-Calculus from Urban's PhD *}
+section \<open>Term-Calculus from Urban's PhD\<close>
 
 atom_decl name coname
 
-text {* types *}
+text \<open>types\<close>
 
 nominal_datatype ty =
     PR "string"
@@ -44,7 +44,7 @@
 by (nominal_induct T rule: ty.strong_induct)
    (auto simp add: fresh_string)
 
-text {* terms *}
+text \<open>terms\<close>
 
 nominal_datatype trm = 
     Ax   "name" "coname"
@@ -60,15 +60,15 @@
   | ImpR "\<guillemotleft>name\<guillemotright>(\<guillemotleft>coname\<guillemotright>trm)" "coname"       ("ImpR (_).<_>._ _" [100,100,100,100] 100)
   | ImpL "\<guillemotleft>coname\<guillemotright>trm" "\<guillemotleft>name\<guillemotright>trm" "name"     ("ImpL <_>._ (_)._ _" [100,100,100,100,100] 100)
 
-text {* named terms *}
+text \<open>named terms\<close>
 
 nominal_datatype ntrm = Na "\<guillemotleft>name\<guillemotright>trm" ("((_):_)" [100,100] 100)
 
-text {* conamed terms *}
+text \<open>conamed terms\<close>
 
 nominal_datatype ctrm = Co "\<guillemotleft>coname\<guillemotright>trm" ("(<_>:_)" [100,100] 100)
 
-text {* renaming functions *}
+text \<open>renaming functions\<close>
 
 nominal_primrec (freshness_context: "(d::coname,e::coname)") 
   crename :: "trm \<Rightarrow> coname \<Rightarrow> coname \<Rightarrow> trm"  ("_[_\<turnstile>c>_]" [100,100,100] 100) 
@@ -307,7 +307,7 @@
 apply(simp_all add: trm.inject split: if_splits)
 done
 
-text {* substitution functions *}
+text \<open>substitution functions\<close>
 
 lemma fresh_perm_coname:
   fixes c::"coname"
@@ -2835,7 +2835,7 @@
 lemmas subst_comm' = substn_crename_comm' substc_crename_comm'
                      substn_nrename_comm' substc_nrename_comm'
 
-text {* typing contexts *}
+text \<open>typing contexts\<close>
 
 type_synonym ctxtn = "(name\<times>ty) list"
 type_synonym ctxtc = "(coname\<times>ty) list"
@@ -2868,7 +2868,7 @@
   show "x\<sharp>\<Delta>" by (induct \<Delta>) (auto simp add: fresh_list_nil fresh_list_cons fresh_prod fresh_atm fresh_ty)
 qed
 
-text {* cut-reductions *}
+text \<open>cut-reductions\<close>
 
 declare abs_perm[eqvt]
 
@@ -4692,7 +4692,7 @@
   finally show ?thesis by simp
 qed
 
-text {* axioms do not reduce *}
+text \<open>axioms do not reduce\<close>
 
 lemma ax_do_not_l_reduce:
   shows "Ax x a \<longrightarrow>\<^sub>l M \<Longrightarrow> False"
@@ -5160,7 +5160,7 @@
 apply(simp add: calc_atm perm_swap)
 done
 
-text {* Transitive Closure*}
+text \<open>Transitive Closure\<close>
 
 abbreviation
  a_star_redu :: "trm \<Rightarrow> trm \<Rightarrow> bool" ("_ \<longrightarrow>\<^sub>a* _" [100,100] 100)
@@ -5189,7 +5189,7 @@
 using a2 a1
 by (induct) (auto)
 
-text {* congruence rules for \<open>\<longrightarrow>\<^sub>a*\<close> *}
+text \<open>congruence rules for \<open>\<longrightarrow>\<^sub>a*\<close>\<close>
 
 lemma ax_do_not_a_star_reduce:
   shows "Ax x a \<longrightarrow>\<^sub>a* M \<Longrightarrow> M = Ax x a"
@@ -5379,7 +5379,7 @@
 apply(auto simp add: alpha trm.inject)
 done
 
-text {* Substitution *}
+text \<open>Substitution\<close>
 
 lemma subst_not_fin1:
   shows "\<not>fin(M{x:=<c>.P}) x"
@@ -5644,7 +5644,7 @@
 apply(drule fic_elims, simp)
 done
 
-text {* Reductions *}
+text \<open>Reductions\<close>
 
 lemma fin_l_reduce:
   assumes  a: "fin M x"
@@ -5782,7 +5782,7 @@
 apply(auto simp add: fic_a_reduce)
 done
 
-text {* substitution properties *}
+text \<open>substitution properties\<close>
 
 lemma subst_with_ax1:
   shows "M{x:=<a>.Ax y a} \<longrightarrow>\<^sub>a* M[x\<turnstile>n>y]"
@@ -6295,7 +6295,7 @@
     using fs by simp
 qed
 
-text {* substitution lemmas *}
+text \<open>substitution lemmas\<close>
 
 lemma not_Ax1:
   shows "\<not>(b\<sharp>M) \<Longrightarrow> M{b:=(y).Q} \<noteq> Ax x a"
--- a/src/HOL/Nominal/Examples/Class2.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Class2.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
 imports Class1
 begin
 
-text {* Reduction *}
+text \<open>Reduction\<close>
 
 lemma fin_not_Cut:
   assumes a: "fin M x"
@@ -1821,9 +1821,9 @@
 apply(auto)
 done
 
-text {* Candidates and SN *}
+text \<open>Candidates and SN\<close>
 
-text {* SNa *}
+text \<open>SNa\<close>
 
 inductive 
   SNa :: "trm \<Rightarrow> bool"
@@ -2091,7 +2091,7 @@
 apply(perm_simp)
 done
 
-text {* set operators *}
+text \<open>set operators\<close>
 
 definition AXIOMSn :: "ty \<Rightarrow> ntrm set" where
   "AXIOMSn B \<equiv> { (x):(Ax y b) | x y b. True }"
@@ -2873,7 +2873,7 @@
 apply(simp_all)
 done
 
-text {* Candidates *}
+text \<open>Candidates\<close>
 
 lemma test1:
   shows "x\<in>(X\<union>Y) = (x\<in>X \<or> x\<in>Y)"
@@ -3447,7 +3447,7 @@
   }
 qed
 
-text {* Elimination rules for the set-operators *}
+text \<open>Elimination rules for the set-operators\<close>
 
 lemma BINDINGc_elim:
   assumes a: "<a>:M \<in> BINDINGc B (\<parallel>(B)\<parallel>)"
@@ -5178,7 +5178,7 @@
 apply(perm_simp add: CAND_eqvt_name CAND_eqvt_coname csubst_eqvt nsubst_eqvt)
 done
 
-text {* Main lemma 1 *}
+text \<open>Main lemma 1\<close>
 
 lemma AXIOMS_imply_SNa:
   shows "<a>:M \<in> AXIOMSc B \<Longrightarrow> SNa M"
@@ -5484,7 +5484,7 @@
   }
 qed 
 
-text {* Main lemma 2 *}
+text \<open>Main lemma 2\<close>
 
 lemma AXIOMS_preserved:
   shows "<a>:M \<in> AXIOMSc B \<Longrightarrow> M \<longrightarrow>\<^sub>a* M' \<Longrightarrow> <a>:M' \<in> AXIOMSc B"
--- a/src/HOL/Nominal/Examples/Class3.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Class3.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
 imports Class2
 begin
 
-text {* 3rd Main Lemma *}
+text \<open>3rd Main Lemma\<close>
 
 lemma Cut_a_redu_elim:
   assumes a: "Cut <a>.M (x).N \<longrightarrow>\<^sub>a R"
@@ -2246,7 +2246,7 @@
 apply(blast)+
 done
 
-text {* helper-stuff to set up the induction *}
+text \<open>helper-stuff to set up the induction\<close>
 
 abbreviation
   SNa_set :: "trm set"
@@ -2359,7 +2359,7 @@
 apply(simp add: alpha fresh_prod fresh_atm)
 done
 
-text {* 3rd lemma *}
+text \<open>3rd lemma\<close>
 
 lemma CUT_SNa_aux:
   assumes a1: "<a>:M \<in> (\<parallel><B>\<parallel>)"
@@ -4927,7 +4927,7 @@
 done
 
 
-text {* typing relation *}
+text \<open>typing relation\<close>
 
 inductive
    typing :: "ctxtn \<Rightarrow> trm \<Rightarrow> ctxtc \<Rightarrow> bool" ("_ \<turnstile> _ \<turnstile> _" [100,100,100] 100)
--- a/src/HOL/Nominal/Examples/Compile.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Compile.thy	Thu May 26 17:51:22 2016 +0200
@@ -46,7 +46,7 @@
   | ISeq "trmI" "trmI" ("_;;_" [100,100] 100)
   | Iif "trmI" "trmI" "trmI"
 
-text {* valid contexts *}
+text \<open>valid contexts\<close>
 
 inductive 
   valid :: "(name\<times>'a::pt_name) list \<Rightarrow> bool"
@@ -54,7 +54,7 @@
   v1[intro]: "valid []"
 | v2[intro]: "\<lbrakk>valid \<Gamma>;a\<sharp>\<Gamma>\<rbrakk>\<Longrightarrow> valid ((a,\<sigma>)#\<Gamma>)" (* maybe dom of \<Gamma> *)
 
-text {* typing judgements for trms *}
+text \<open>typing judgements for trms\<close>
 
 inductive 
   typing :: "(name\<times>ty) list\<Rightarrow>trm\<Rightarrow>ty\<Rightarrow>bool" (" _ \<turnstile> _ : _ " [80,80,80] 80)
@@ -72,7 +72,7 @@
              ((x1,Data(\<sigma>1))#\<Gamma>) \<turnstile> e1 : \<tau>; ((x2,Data(\<sigma>2))#\<Gamma>) \<turnstile> e2 : \<tau>\<rbrakk> 
              \<Longrightarrow> \<Gamma> \<turnstile> (Case e of inl x1 \<rightarrow> e1 | inr x2 \<rightarrow> e2) : \<tau>"
 
-text {* typing judgements for Itrms *}
+text \<open>typing judgements for Itrms\<close>
 
 inductive 
   Ityping :: "(name\<times>tyI) list\<Rightarrow>trmI\<Rightarrow>tyI\<Rightarrow>bool" (" _ I\<turnstile> _ : _ " [80,80,80] 80)
@@ -88,7 +88,7 @@
 | t8[intro]: "\<lbrakk>\<Gamma> I\<turnstile> e1 : DataI(NatI); \<Gamma> I\<turnstile> e2 : \<tau>\<rbrakk> \<Longrightarrow> \<Gamma> I\<turnstile> e1;;e2 : \<tau>"
 | t9[intro]: "\<lbrakk>\<Gamma> I\<turnstile> e: DataI(NatI); \<Gamma> I\<turnstile> e1 : \<tau>; \<Gamma> I\<turnstile> e2 : \<tau>\<rbrakk> \<Longrightarrow> \<Gamma> I\<turnstile> Iif e e1 e2 : \<tau>"
 
-text {* capture-avoiding substitution *}
+text \<open>capture-avoiding substitution\<close>
 
 class subst =
   fixes subst :: "'a \<Rightarrow> name \<Rightarrow> 'a \<Rightarrow> 'a"  ("_[_::=_]" [100,100,100] 100)
@@ -179,7 +179,7 @@
 apply(simp add: abs_supp)
 done
 
-text {* big-step evaluation for trms *}
+text \<open>big-step evaluation for trms\<close>
 
 inductive 
   big :: "trm\<Rightarrow>trm\<Rightarrow>bool" ("_ \<Down> _" [80,80] 80)
@@ -210,7 +210,7 @@
 | m8[intro]: "\<lbrakk>(m,e)I\<Down>(m',INat(n)); n\<noteq>0; (m',e1)I\<Down>(m'',e)\<rbrakk> \<Longrightarrow> (m,Iif e e1 e2)I\<Down>(m'',e)"
 | m9[intro]: "\<lbrakk>(m,e)I\<Down>(m',INat(0)); (m',e2)I\<Down>(m'',e)\<rbrakk> \<Longrightarrow> (m,Iif e e1 e2)I\<Down>(m'',e)"
 
-text {* Translation functions *}
+text \<open>Translation functions\<close>
 
 nominal_primrec
   trans :: "trm \<Rightarrow> trmI"
--- a/src/HOL/Nominal/Examples/Contexts.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Contexts.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
 imports "../Nominal"
 begin
 
-text {* 
+text \<open>
   
   We show here that the Plotkin-style of defining
   beta-reduction (based on congruence rules) is
@@ -14,23 +14,23 @@
   of filling a term into a context produces an alpha-equivalent 
   term. 
 
-*}
+\<close>
 
 atom_decl name
 
-text {* 
+text \<open>
   Lambda-Terms - the Lam-term constructor binds a name
-*}
+\<close>
 
 nominal_datatype lam = 
     Var "name"
   | App "lam" "lam"
   | Lam "\<guillemotleft>name\<guillemotright>lam" ("Lam [_]._" [100,100] 100)
 
-text {* 
+text \<open>
   Contexts - the lambda case in contexts does not bind 
   a name, even if we introduce the notation [_]._ for CLam.
-*}
+\<close>
 
 nominal_datatype ctx = 
     Hole ("\<box>" 1000)  
@@ -38,7 +38,7 @@
   | CAppR "lam" "ctx" 
   | CLam "name" "ctx"  ("CLam [_]._" [100,100] 100) 
 
-text {* Capture-Avoiding Substitution *}
+text \<open>Capture-Avoiding Substitution\<close>
 
 nominal_primrec
   subst :: "lam \<Rightarrow> name \<Rightarrow> lam \<Rightarrow> lam"  ("_[_::=_]" [100,100,100] 100)
@@ -52,10 +52,10 @@
 apply(fresh_guess)+
 done
 
-text {*
+text \<open>
   Filling is the operation that fills a term into a hole. 
   This operation is possibly capturing.
-*}
+\<close>
 
 nominal_primrec
   filling :: "ctx \<Rightarrow> lam \<Rightarrow> lam" ("_\<lbrakk>_\<rbrakk>" [100,100] 100)
@@ -66,17 +66,17 @@
 | "(CLam [x].E)\<lbrakk>t\<rbrakk> = Lam [x].(E\<lbrakk>t\<rbrakk>)" 
 by (rule TrueI)+
 
-text {* 
+text \<open>
   While contexts themselves are not alpha-equivalence classes, the 
   filling operation produces an alpha-equivalent lambda-term. 
-*}
+\<close>
 
 lemma alpha_test: 
   shows "x\<noteq>y \<Longrightarrow> (CLam [x].\<box>) \<noteq> (CLam [y].\<box>)"
   and   "(CLam [x].\<box>)\<lbrakk>Var x\<rbrakk> = (CLam [y].\<box>)\<lbrakk>Var y\<rbrakk>"
 by (auto simp add: alpha ctx.inject lam.inject calc_atm fresh_atm) 
 
-text {* The composition of two contexts. *}
+text \<open>The composition of two contexts.\<close>
 
 nominal_primrec
  ctx_compose :: "ctx \<Rightarrow> ctx \<Rightarrow> ctx" ("_ \<circ> _" [100,100] 100)
@@ -91,14 +91,14 @@
   shows "(E1 \<circ> E2)\<lbrakk>t\<rbrakk> = E1\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>"
 by (induct E1 rule: ctx.induct) (auto)
 
-text {* Beta-reduction via contexts in the Felleisen-Hieb style. *}
+text \<open>Beta-reduction via contexts in the Felleisen-Hieb style.\<close>
 
 inductive
   ctx_red :: "lam\<Rightarrow>lam\<Rightarrow>bool" ("_ \<longrightarrow>x _" [80,80] 80) 
 where
   xbeta[intro]: "E\<lbrakk>App (Lam [x].t) t'\<rbrakk> \<longrightarrow>x E\<lbrakk>t[x::=t']\<rbrakk>" 
 
-text {* Beta-reduction via congruence rules in the Plotkin style. *}
+text \<open>Beta-reduction via congruence rules in the Plotkin style.\<close>
 
 inductive
   cong_red :: "lam\<Rightarrow>lam\<Rightarrow>bool" ("_ \<longrightarrow>o _" [80,80] 80) 
@@ -108,7 +108,7 @@
 | oapp2[intro]: "t \<longrightarrow>o t' \<Longrightarrow> App t2 t \<longrightarrow>o App t2 t'"
 | olam[intro]:  "t \<longrightarrow>o t' \<Longrightarrow> Lam [x].t \<longrightarrow>o Lam [x].t'"
 
-text {* The proof that shows both relations are equal. *}
+text \<open>The proof that shows both relations are equal.\<close>
 
 lemma cong_red_in_ctx:
   assumes a: "t \<longrightarrow>o t'"
--- a/src/HOL/Nominal/Examples/Crary.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Crary.thy	Thu May 26 17:51:22 2016 +0200
@@ -65,7 +65,7 @@
   shows "size T > 0"
 by (nominal_induct rule: ty.strong_induct) (simp_all)
 
-section {* Substitutions *}
+section \<open>Substitutions\<close>
 
 fun
   lookup :: "Subst \<Rightarrow> name \<Rightarrow> trm"   
@@ -259,7 +259,7 @@
   then show "\<theta><(Lam [n].t)[x::=u]> = \<theta><Lam [n].t>[x::=\<theta><u>]" using fs by auto
 qed (auto)
 
-section {* Typing *}
+section \<open>Typing\<close>
 
 inductive
   valid :: "Ctxt \<Rightarrow> bool"
@@ -336,7 +336,7 @@
 declare ty.inject [simp del]
 
 
-section {* Definitional Equivalence *}
+section \<open>Definitional Equivalence\<close>
 
 inductive
   def_equiv :: "Ctxt\<Rightarrow>trm\<Rightarrow>trm\<Rightarrow>ty\<Rightarrow>bool" ("_ \<turnstile> _ \<equiv> _ : _" [60,60] 60) 
@@ -362,7 +362,7 @@
   shows "valid \<Gamma>"
 using a by (induct) (auto elim: typing_implies_valid)
 
-section {* Weak Head Reduction *}
+section \<open>Weak Head Reduction\<close>
 
 inductive
   whr_def :: "trm\<Rightarrow>trm\<Rightarrow>bool" ("_ \<leadsto> _" [80,80] 80) 
@@ -389,7 +389,7 @@
 
 equivariance whr_def
 
-section {* Weak Head Normalisation *}
+section \<open>Weak Head Normalisation\<close>
 
 abbreviation 
  nf :: "trm \<Rightarrow> bool" ("_ \<leadsto>|" [100] 100)
@@ -440,7 +440,7 @@
 qed (auto)
 
 
-section {* Algorithmic Term Equivalence and Algorithmic Path Equivalence *}
+section \<open>Algorithmic Term Equivalence and Algorithmic Path Equivalence\<close>
 
 inductive
   alg_equiv :: "Ctxt\<Rightarrow>trm\<Rightarrow>trm\<Rightarrow>ty\<Rightarrow>bool" ("_ \<turnstile> _ \<Leftrightarrow> _ : _" [60,60,60,60] 60) 
@@ -605,7 +605,7 @@
 using assms
 by (induct rule: alg_equiv_alg_path_equiv.inducts(2)) (simp, auto)
 
-section {* Logical Equivalence *}
+section \<open>Logical Equivalence\<close>
 
 function log_equiv :: "(Ctxt \<Rightarrow> trm \<Rightarrow> trm \<Rightarrow> ty \<Rightarrow> bool)" ("_ \<turnstile> _ is _ : _" [60,60,60,60] 60) 
 where    
@@ -955,10 +955,10 @@
   then show  "\<Gamma> \<turnstile> s \<Leftrightarrow> t : T" using main_lemma(1) val by simp
 qed
 
-text {* We leave soundness as an exercise - just like Crary in the ATS book :-) \\ 
+text \<open>We leave soundness as an exercise - just like Crary in the ATS book :-) \\ 
  @{prop[mode=IfThen] "\<lbrakk>\<Gamma> \<turnstile> s \<Leftrightarrow> t : T; \<Gamma> \<turnstile> t : T; \<Gamma> \<turnstile> s : T\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> s \<equiv> t : T"} \\
  @{prop "\<lbrakk>\<Gamma> \<turnstile> s \<leftrightarrow> t : T; \<Gamma> \<turnstile> t : T; \<Gamma> \<turnstile> s : T\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> s \<equiv> t : T"} 
-*}
+\<close>
 
 end
 
--- a/src/HOL/Nominal/Examples/Fsub.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Fsub.thy	Thu May 26 17:51:22 2016 +0200
@@ -4,7 +4,7 @@
 begin
 (*>*)
 
-text{* Authors: Christian Urban,
+text\<open>Authors: Christian Urban,
                 Benjamin Pierce,
                 Dimitrios Vytiniotis
                 Stephanie Weirich
@@ -12,26 +12,26 @@
                 Julien Narboux
                 Stefan Berghofer
 
-       with great help from Markus Wenzel. *}
+       with great help from Markus Wenzel.\<close>
 
-section {* Types for Names, Nominal Datatype Declaration for Types and Terms *}
+section \<open>Types for Names, Nominal Datatype Declaration for Types and Terms\<close>
 
 no_syntax
   "_Map" :: "maplets => 'a \<rightharpoonup> 'b"  ("(1[_])")
 
-text {* The main point of this solution is to use names everywhere (be they bound, 
+text \<open>The main point of this solution is to use names everywhere (be they bound, 
   binding or free). In System \FSUB{} there are two kinds of names corresponding to 
   type-variables and to term-variables. These two kinds of names are represented in 
-  the nominal datatype package as atom-types @{text "tyvrs"} and @{text "vrs"}: *}
+  the nominal datatype package as atom-types \<open>tyvrs\<close> and \<open>vrs\<close>:\<close>
 
 atom_decl tyvrs vrs
 
-text{* There are numerous facts that come with this declaration: for example that 
-  there are infinitely many elements in @{text "tyvrs"} and @{text "vrs"}. *}
+text\<open>There are numerous facts that come with this declaration: for example that 
+  there are infinitely many elements in \<open>tyvrs\<close> and \<open>vrs\<close>.\<close>
 
-text{* The constructors for types and terms in System \FSUB{} contain abstractions 
+text\<open>The constructors for types and terms in System \FSUB{} contain abstractions 
   over type-variables and term-variables. The nominal datatype package uses 
-  @{text "\<guillemotleft>\<dots>\<guillemotright>\<dots>"} to indicate where abstractions occur. *}
+  \<open>\<guillemotleft>\<dots>\<guillemotright>\<dots>\<close> to indicate where abstractions occur.\<close>
 
 nominal_datatype ty = 
     Tvar   "tyvrs"
@@ -46,10 +46,10 @@
   | App   "trm" "trm" (infixl "\<cdot>" 200)
   | TApp  "trm" "ty"  (infixl "\<cdot>\<^sub>\<tau>" 200)
 
-text {* To be polite to the eye, some more familiar notation is introduced. 
+text \<open>To be polite to the eye, some more familiar notation is introduced. 
   Because of the change in the order of arguments, one needs to use 
   translation rules, instead of syntax annotations at the term-constructors
-  as given above for @{term "Arrow"}. *}
+  as given above for @{term "Arrow"}.\<close>
 
 abbreviation
   Forall_syn :: "tyvrs \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> ty"  ("(3\<forall>_<:_./ _)" [0, 0, 10] 10) 
@@ -66,19 +66,19 @@
 where
   "\<lambda>X<:T. t \<equiv> trm.TAbs X t T"
 
-text {* Again there are numerous facts that are proved automatically for @{typ "ty"} 
-  and @{typ "trm"}: for example that the set of free variables, i.e.~the @{text "support"}, 
+text \<open>Again there are numerous facts that are proved automatically for @{typ "ty"} 
+  and @{typ "trm"}: for example that the set of free variables, i.e.~the \<open>support\<close>, 
   is finite. However note that nominal-datatype declarations do \emph{not} define 
   ``classical" constructor-based datatypes, but rather define $\alpha$-equivalence 
   classes---we can for example show that $\alpha$-equivalent @{typ "ty"}s 
-  and @{typ "trm"}s are equal: *}
+  and @{typ "trm"}s are equal:\<close>
 
 lemma alpha_illustration:
   shows "(\<forall>X<:T. Tvar X) = (\<forall>Y<:T. Tvar Y)"
   and   "(\<lambda>x:T. Var x) = (\<lambda>y:T. Var y)"
   by (simp_all add: ty.inject trm.inject alpha calc_atm fresh_atm)
 
-section {* SubTyping Contexts *}
+section \<open>SubTyping Contexts\<close>
 
 nominal_datatype binding = 
     VarB vrs ty 
@@ -86,12 +86,12 @@
 
 type_synonym env = "binding list"
 
-text {* Typing contexts are represented as lists that ``grow" on the left; we
+text \<open>Typing contexts are represented as lists that ``grow" on the left; we
   thereby deviating from the convention in the POPLmark-paper. The lists contain
-  pairs of type-variables and types (this is sufficient for Part 1A). *}
+  pairs of type-variables and types (this is sufficient for Part 1A).\<close>
 
-text {* In order to state validity-conditions for typing-contexts, the notion of
-  a @{text "dom"} of a typing-context is handy. *}
+text \<open>In order to state validity-conditions for typing-contexts, the notion of
+  a \<open>dom\<close> of a typing-context is handy.\<close>
 
 nominal_primrec
   "tyvrs_of" :: "binding \<Rightarrow> tyvrs set"
@@ -218,11 +218,11 @@
 apply(auto simp add: fresh_prod fresh_list_cons tyvrs_fresh) 
 done
 
-text {* Not all lists of type @{typ "env"} are well-formed. One condition
+text \<open>Not all lists of type @{typ "env"} are well-formed. One condition
   requires that in @{term "TVarB X S#\<Gamma>"} all free variables of @{term "S"} must be 
-  in the @{term "ty_dom"} of @{term "\<Gamma>"}, that is @{term "S"} must be @{text "closed"} 
+  in the @{term "ty_dom"} of @{term "\<Gamma>"}, that is @{term "S"} must be \<open>closed\<close> 
   in @{term "\<Gamma>"}. The set of free variables of @{term "S"} is the 
-  @{text "support"} of @{term "S"}. *}
+  \<open>support\<close> of @{term "S"}.\<close>
 
 definition "closed_in" :: "ty \<Rightarrow> env \<Rightarrow> bool" ("_ closed'_in _" [100,100] 100) where
   "S closed_in \<Gamma> \<equiv> (supp S)\<subseteq>(ty_dom \<Gamma>)"
@@ -284,7 +284,7 @@
 lemma closed_in_fresh: "(X::tyvrs) \<sharp> ty_dom \<Gamma> \<Longrightarrow> T closed_in \<Gamma> \<Longrightarrow> X \<sharp> T"
   by (auto simp add: closed_in_def fresh_def ty_dom_supp)
 
-text {* Now validity of a context is a straightforward inductive definition. *}
+text \<open>Now validity of a context is a straightforward inductive definition.\<close>
   
 inductive
   valid_rel :: "env \<Rightarrow> bool" ("\<turnstile> _ ok" [100] 100)
@@ -332,7 +332,7 @@
        (auto elim: validE intro!: valid_cons simp add: doms_append closed_in_def)
 qed
 
-text {* Well-formed contexts have a unique type-binding for a type-variable. *} 
+text \<open>Well-formed contexts have a unique type-binding for a type-variable.\<close> 
 
 lemma uniqueness_of_ctxt:
   fixes \<Gamma>::"env"
@@ -383,7 +383,7 @@
   ultimately show "T=S" by (auto simp add: binding.inject)
 qed (auto)
 
-section {* Size and Capture-Avoiding Substitution for Types *}
+section \<open>Size and Capture-Avoiding Substitution for Types\<close>
 
 nominal_primrec
   size_ty :: "ty \<Rightarrow> nat"
@@ -590,14 +590,14 @@
   by (nominal_induct t avoiding: X Y U rule: trm.strong_induct)
     (simp_all add: fresh_atm calc_atm abs_fresh fresh_aux type_subst_rename)
 
-section {* Subtyping-Relation *}
+section \<open>Subtyping-Relation\<close>
 
-text {* The definition for the subtyping-relation follows quite closely what is written 
+text \<open>The definition for the subtyping-relation follows quite closely what is written 
   in the POPLmark-paper, except for the premises dealing with well-formed contexts and 
-  the freshness constraint @{term "X\<sharp>\<Gamma>"} in the @{text "S_Forall"}-rule. (The freshness
+  the freshness constraint @{term "X\<sharp>\<Gamma>"} in the \<open>S_Forall\<close>-rule. (The freshness
   constraint is specific to the \emph{nominal approach}. Note, however, that the constraint
   does \emph{not} make the subtyping-relation ``partial"\ldots because we work over
-  $\alpha$-equivalence classes.) *}
+  $\alpha$-equivalence classes.)\<close>
 
 inductive 
   subtype_of :: "env \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> bool"   ("_\<turnstile>_<:_" [100,100,100] 100)
@@ -670,7 +670,7 @@
   apply (force simp add: closed_in_fresh dest: subtype_implies_closed subtype_implies_ok)+
   done
 
-section {* Reflexivity of Subtyping *}
+section \<open>Reflexivity of Subtyping\<close>
 
 lemma subtype_reflexivity:
   assumes a: "\<turnstile> \<Gamma> ok"
@@ -702,18 +702,18 @@
 using a b
 apply(nominal_induct T avoiding: \<Gamma> rule: ty.strong_induct)
 apply(auto simp add: ty.supp abs_supp supp_atm closed_in_def)
-  --{* Too bad that this instantiation cannot be found automatically by
+  \<comment>\<open>Too bad that this instantiation cannot be found automatically by
   \isakeyword{auto}; \isakeyword{blast} would find it if we had not used 
-  an explicit definition for @{text "closed_in_def"}. *}
+  an explicit definition for \<open>closed_in_def\<close>.\<close>
 apply(drule_tac x="(TVarB tyvrs ty2)#\<Gamma>" in meta_spec)
 apply(force dest: fresh_dom simp add: closed_in_def)
 done
 
-section {* Weakening *}
+section \<open>Weakening\<close>
 
-text {* In order to prove weakening we introduce the notion of a type-context extending 
+text \<open>In order to prove weakening we introduce the notion of a type-context extending 
   another. This generalization seems to make the proof for weakening to be
-  smoother than if we had strictly adhered to the version in the POPLmark-paper. *}
+  smoother than if we had strictly adhered to the version in the POPLmark-paper.\<close>
 
 definition extends :: "env \<Rightarrow> env \<Rightarrow> bool" ("_ extends _" [100,100] 100) where
   "\<Delta> extends \<Gamma> \<equiv> \<forall>X Q. (TVarB X Q)\<in>set \<Gamma> \<longrightarrow> (TVarB X Q)\<in>set \<Delta>"
@@ -794,7 +794,7 @@
   ultimately show "\<Delta> \<turnstile> (\<forall>X<:S\<^sub>1. S\<^sub>2) <: (\<forall>X<:T\<^sub>1. T\<^sub>2)" using ok by (force intro: SA_all)
 qed
 
-text {* In fact all ``non-binding" cases can be solved automatically: *}
+text \<open>In fact all ``non-binding" cases can be solved automatically:\<close>
 
 lemma weakening_more_automated:
   assumes a: "\<Gamma> \<turnstile> S <: T"
@@ -822,9 +822,9 @@
   ultimately show "\<Delta> \<turnstile> (\<forall>X<:S\<^sub>1. S\<^sub>2) <: (\<forall>X<:T\<^sub>1. T\<^sub>2)" using ok by (force intro: SA_all)
 qed (blast intro: extends_closed extends_memb dest: extends_ty_dom)+
 
-section {* Transitivity and Narrowing *}
+section \<open>Transitivity and Narrowing\<close>
 
-text {* Some inversion lemmas that are needed in the transitivity and narrowing proof.*}
+text \<open>Some inversion lemmas that are needed in the transitivity and narrowing proof.\<close>
 
 declare ty.inject [simp add]
 
@@ -840,14 +840,14 @@
 apply(auto simp add: abs_fresh ty.inject alpha)
 done
 
-text {* Next we prove the transitivity and narrowing for the subtyping-relation. 
+text \<open>Next we prove the transitivity and narrowing for the subtyping-relation. 
 The POPLmark-paper says the following:
 
 \begin{quote}
 \begin{lemma}[Transitivity and Narrowing] \
 \begin{enumerate}
 \item If @{term "\<Gamma> \<turnstile> S<:Q"} and @{term "\<Gamma> \<turnstile> Q<:T"}, then @{term "\<Gamma> \<turnstile> S<:T"}.
-\item If @{text "\<Gamma>,X<:Q,\<Delta> \<turnstile> M<:N"} and @{term "\<Gamma> \<turnstile> P<:Q"} then @{text "\<Gamma>,X<:P,\<Delta> \<turnstile> M<:N"}.
+\item If \<open>\<Gamma>,X<:Q,\<Delta> \<turnstile> M<:N\<close> and @{term "\<Gamma> \<turnstile> P<:Q"} then \<open>\<Gamma>,X<:P,\<Delta> \<turnstile> M<:N\<close>.
 \end{enumerate}
 \end{lemma}
 
@@ -858,7 +858,7 @@
 \end{quote}
 
 For the induction on the size of @{term "Q"}, we use the induction-rule 
-@{text "measure_induct_rule"}:
+\<open>measure_induct_rule\<close>:
 
 \begin{center}
 @{thm measure_induct_rule[of "size_ty",no_vars]}
@@ -867,7 +867,7 @@
 That means in order to show a property @{term "P a"} for all @{term "a"}, 
 the induct-rule requires to prove that for all @{term x} @{term "P x"} holds using the 
 assumption that for all @{term y} whose size is strictly smaller than 
-that of @{term x} the property @{term "P y"} holds. *}
+that of @{term x} the property @{term "P y"} holds.\<close>
 
 lemma 
   shows subtype_transitivity: "\<Gamma>\<turnstile>S<:Q \<Longrightarrow> \<Gamma>\<turnstile>Q<:T \<Longrightarrow> \<Gamma>\<turnstile>S<:T" 
@@ -886,7 +886,7 @@
       case (SA_Top \<Gamma> S) 
       then have rh_drv: "\<Gamma> \<turnstile> Top <: T" by simp
       then have T_inst: "T = Top" by (auto elim: S_TopE)
-      from `\<turnstile> \<Gamma> ok` and `S closed_in \<Gamma>`
+      from \<open>\<turnstile> \<Gamma> ok\<close> and \<open>S closed_in \<Gamma>\<close>
       have "\<Gamma> \<turnstile> S <: Top" by auto
       then show "\<Gamma> \<turnstile> S <: T" using T_inst by simp 
     next
@@ -900,7 +900,7 @@
     next
       case (SA_arrow \<Gamma> Q\<^sub>1 S\<^sub>1 S\<^sub>2 Q\<^sub>2) 
       then have rh_drv: "\<Gamma> \<turnstile> Q\<^sub>1 \<rightarrow> Q\<^sub>2 <: T" by simp
-      from `Q\<^sub>1 \<rightarrow> Q\<^sub>2 = Q` 
+      from \<open>Q\<^sub>1 \<rightarrow> Q\<^sub>2 = Q\<close> 
       have Q\<^sub>12_less: "size_ty Q\<^sub>1 < size_ty Q" "size_ty Q\<^sub>2 < size_ty Q" by auto
       have lh_drv_prm\<^sub>1: "\<Gamma> \<turnstile> Q\<^sub>1 <: S\<^sub>1" by fact
       have lh_drv_prm\<^sub>2: "\<Gamma> \<turnstile> S\<^sub>2 <: Q\<^sub>2" by fact      
@@ -969,23 +969,23 @@
     qed
   } note transitivity_lemma = this  
 
-  { --{* The transitivity proof is now by the auxiliary lemma. *}
+  { \<comment>\<open>The transitivity proof is now by the auxiliary lemma.\<close>
     case 1 
-    from `\<Gamma> \<turnstile> S <: Q` and `\<Gamma> \<turnstile> Q <: T`
+    from \<open>\<Gamma> \<turnstile> S <: Q\<close> and \<open>\<Gamma> \<turnstile> Q <: T\<close>
     show "\<Gamma> \<turnstile> S <: T" by (rule transitivity_lemma) 
   next 
     case 2
-    from `(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> M <: N` 
-      and `\<Gamma> \<turnstile> P<:Q` 
+    from \<open>(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> M <: N\<close> 
+      and \<open>\<Gamma> \<turnstile> P<:Q\<close> 
     show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> M <: N" 
     proof (induct "\<Delta>@[(TVarB X Q)]@\<Gamma>" M N arbitrary: \<Gamma> X \<Delta> rule: subtype_of.induct) 
       case (SA_Top S \<Gamma> X \<Delta>)
-      from `\<Gamma> \<turnstile> P <: Q`
+      from \<open>\<Gamma> \<turnstile> P <: Q\<close>
       have "P closed_in \<Gamma>" by (simp add: subtype_implies_closed)
-      with `\<turnstile> (\<Delta>@[(TVarB X Q)]@\<Gamma>) ok` have "\<turnstile> (\<Delta>@[(TVarB X P)]@\<Gamma>) ok"
+      with \<open>\<turnstile> (\<Delta>@[(TVarB X Q)]@\<Gamma>) ok\<close> have "\<turnstile> (\<Delta>@[(TVarB X P)]@\<Gamma>) ok"
         by (simp add: replace_type)
       moreover
-      from `S closed_in (\<Delta>@[(TVarB X Q)]@\<Gamma>)` have "S closed_in (\<Delta>@[(TVarB X P)]@\<Gamma>)" 
+      from \<open>S closed_in (\<Delta>@[(TVarB X Q)]@\<Gamma>)\<close> have "S closed_in (\<Delta>@[(TVarB X P)]@\<Gamma>)" 
         by (simp add: closed_in_def doms_append)
       ultimately show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> S <: Top" by (simp add: subtype_of.SA_Top)
     next
@@ -1016,12 +1016,12 @@
       qed
     next
       case (SA_refl_TVar Y \<Gamma> X \<Delta>)
-      from `\<Gamma> \<turnstile> P <: Q`
+      from \<open>\<Gamma> \<turnstile> P <: Q\<close>
       have "P closed_in \<Gamma>" by (simp add: subtype_implies_closed)
-      with `\<turnstile> (\<Delta>@[(TVarB X Q)]@\<Gamma>) ok` have "\<turnstile> (\<Delta>@[(TVarB X P)]@\<Gamma>) ok"
+      with \<open>\<turnstile> (\<Delta>@[(TVarB X Q)]@\<Gamma>) ok\<close> have "\<turnstile> (\<Delta>@[(TVarB X P)]@\<Gamma>) ok"
         by (simp add: replace_type)
       moreover
-      from `Y \<in> ty_dom (\<Delta>@[(TVarB X Q)]@\<Gamma>)` have "Y \<in> ty_dom (\<Delta>@[(TVarB X P)]@\<Gamma>)"
+      from \<open>Y \<in> ty_dom (\<Delta>@[(TVarB X Q)]@\<Gamma>)\<close> have "Y \<in> ty_dom (\<Delta>@[(TVarB X P)]@\<Gamma>)"
         by (simp add: doms_append)
       ultimately show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Tvar Y <: Tvar Y" by (simp add: subtype_of.SA_refl_TVar)
     next
@@ -1037,7 +1037,7 @@
   } 
 qed
 
-section {* Typing *}
+section \<open>Typing\<close>
 
 inductive
   typing :: "env \<Rightarrow> trm \<Rightarrow> ty \<Rightarrow> bool" ("_ \<turnstile> _ : _" [60,60,60] 60) 
@@ -1116,22 +1116,22 @@
 using assms
 proof induct
   case (T_Var x T \<Gamma>)
-  from `\<turnstile> \<Gamma> ok` and `VarB x T \<in> set \<Gamma>`
+  from \<open>\<turnstile> \<Gamma> ok\<close> and \<open>VarB x T \<in> set \<Gamma>\<close>
   show ?case by (rule ok_imp_VarB_closed_in)
 next
   case (T_App \<Gamma> t\<^sub>1 T\<^sub>1 T\<^sub>2 t\<^sub>2)
   then show ?case by (auto simp add: ty.supp closed_in_def)
 next
   case (T_Abs x T\<^sub>1 \<Gamma> t\<^sub>2 T\<^sub>2)
-  from `VarB x T\<^sub>1 # \<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>2`
+  from \<open>VarB x T\<^sub>1 # \<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>2\<close>
   have "T\<^sub>1 closed_in \<Gamma>" by (auto dest: typing_ok)
   with T_Abs show ?case by (auto simp add: ty.supp closed_in_def)
 next
   case (T_Sub \<Gamma> t S T)
-  from `\<Gamma> \<turnstile> S <: T` show ?case by (simp add: subtype_implies_closed)
+  from \<open>\<Gamma> \<turnstile> S <: T\<close> show ?case by (simp add: subtype_implies_closed)
 next
   case (T_TAbs X T\<^sub>1 \<Gamma> t\<^sub>2 T\<^sub>2)
-  from `TVarB X T\<^sub>1 # \<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>2`
+  from \<open>TVarB X T\<^sub>1 # \<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>2\<close>
   have "T\<^sub>1 closed_in \<Gamma>" by (auto dest: typing_ok)
   with T_TAbs show ?case by (auto simp add: ty.supp closed_in_def abs_supp)
 next
@@ -1144,7 +1144,7 @@
 qed
 
 
-subsection {* Evaluation *}
+subsection \<open>Evaluation\<close>
 
 inductive
   val :: "trm \<Rightarrow> bool"
@@ -1302,7 +1302,7 @@
   qed
 qed
   
-text {* A.5(6) *}
+text \<open>A.5(6)\<close>
 
 lemma type_weaken:
   assumes "(\<Delta>@\<Gamma>) \<turnstile> t : T"
@@ -1337,15 +1337,15 @@
   then show ?case by (rule typing.T_Abs)
 next
   case (T_Sub t S T \<Delta> \<Gamma>)
-  from refl and `\<turnstile> (\<Delta> @ B # \<Gamma>) ok`
+  from refl and \<open>\<turnstile> (\<Delta> @ B # \<Gamma>) ok\<close>
   have "\<Delta> @ B # \<Gamma> \<turnstile> t : S" by (rule T_Sub)
-  moreover from  `(\<Delta> @ \<Gamma>)\<turnstile>S<:T` and `\<turnstile> (\<Delta> @ B # \<Gamma>) ok`
+  moreover from  \<open>(\<Delta> @ \<Gamma>)\<turnstile>S<:T\<close> and \<open>\<turnstile> (\<Delta> @ B # \<Gamma>) ok\<close>
   have "(\<Delta> @ B # \<Gamma>)\<turnstile>S<:T"
     by (rule weakening) (simp add: extends_def T_Sub)
   ultimately show ?case by (rule typing.T_Sub)
 next
   case (T_TAbs X T\<^sub>1 t\<^sub>2 T\<^sub>2 \<Delta> \<Gamma>)
-  from `TVarB X T\<^sub>1 # \<Delta> @ \<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>2`
+  from \<open>TVarB X T\<^sub>1 # \<Delta> @ \<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>2\<close>
   have closed: "T\<^sub>1 closed_in (\<Delta> @ \<Gamma>)"
     by (auto dest: typing_ok)
   have "\<turnstile> (TVarB X T\<^sub>1 # \<Delta> @ B # \<Gamma>) ok"
@@ -1367,7 +1367,7 @@
   case (T_TApp X t\<^sub>1 T2 T11 T12 \<Delta> \<Gamma>)
   have "\<Delta> @ B # \<Gamma> \<turnstile> t\<^sub>1 : (\<forall>X<:T11. T12)"
     by (rule T_TApp refl)+
-  moreover from `(\<Delta> @ \<Gamma>)\<turnstile>T2<:T11` and `\<turnstile> (\<Delta> @ B # \<Gamma>) ok`
+  moreover from \<open>(\<Delta> @ \<Gamma>)\<turnstile>T2<:T11\<close> and \<open>\<turnstile> (\<Delta> @ B # \<Gamma>) ok\<close>
   have "(\<Delta> @ B # \<Gamma>)\<turnstile>T2<:T11"
     by (rule weakening) (simp add: extends_def T_TApp)
   ultimately show ?case by (rule better_T_TApp)
@@ -1382,7 +1382,7 @@
   apply simp_all
   done
 
-text {* A.6 *}
+text \<open>A.6\<close>
 
 lemma strengthening:
   assumes "(\<Gamma> @ VarB x Q # \<Delta>) \<turnstile> S <: T"
@@ -1395,7 +1395,7 @@
   ultimately show ?case using subtype_of.SA_Top by auto
 next
   case (SA_refl_TVar X)
-  from `\<turnstile> (\<Gamma> @ VarB x Q # \<Delta>) ok`
+  from \<open>\<turnstile> (\<Gamma> @ VarB x Q # \<Delta>) ok\<close>
   have h1:"\<turnstile> (\<Gamma> @ \<Delta>) ok" by (auto dest: valid_cons')
   have "X \<in> ty_dom (\<Gamma> @ VarB x Q # \<Delta>)" using SA_refl_TVar by auto
   then have h2:"X \<in> ty_dom (\<Gamma> @ \<Delta>)" using ty_dom_vrs by auto
@@ -1407,7 +1407,7 @@
   then show ?case using h1 h2 by auto
 qed (auto)
 
-lemma narrow_type: -- {* A.7 *}
+lemma narrow_type: \<comment> \<open>A.7\<close>
   assumes H: "\<Delta> @ (TVarB X Q) # \<Gamma> \<turnstile> t : T"
   shows "\<Gamma> \<turnstile> P <: Q \<Longrightarrow> \<Delta> @ (TVarB X P) # \<Gamma> \<turnstile> t : T"
   using H
@@ -1433,18 +1433,18 @@
     case (T_TApp X' t1 T2 T11 T12 P D)
     then have "D @ TVarB X P # \<Gamma> \<turnstile> t1 : Forall X' T12 T11" by fastforce
     moreover have "(D @ [TVarB X Q] @ \<Gamma>) \<turnstile> T2<:T11" using T_TApp by auto
-    then have "(D @ [TVarB X P] @ \<Gamma>) \<turnstile> T2<:T11" using `\<Gamma>\<turnstile>P<:Q`
+    then have "(D @ [TVarB X P] @ \<Gamma>) \<turnstile> T2<:T11" using \<open>\<Gamma>\<turnstile>P<:Q\<close>
       by (rule subtype_narrow)
     moreover from T_TApp have "X' \<sharp> (D @ TVarB X P # \<Gamma>, t1, T2)"
       by (simp add: fresh_list_append fresh_list_cons fresh_prod)
     ultimately show ?case by auto
 qed
 
-subsection {* Substitution lemmas *}
+subsection \<open>Substitution lemmas\<close>
 
-subsubsection {* Substition Preserves Typing *}
+subsubsection \<open>Substition Preserves Typing\<close>
 
-theorem subst_type: -- {* A.8 *}
+theorem subst_type: \<comment> \<open>A.8\<close>
   assumes H: "(\<Delta> @ (VarB x U) # \<Gamma>) \<turnstile> t : T"
   shows "\<Gamma> \<turnstile> u : U \<Longrightarrow> \<Delta> @ \<Gamma> \<turnstile> t[x \<mapsto> u] : T" using H
  proof (nominal_induct "\<Delta> @ (VarB x U) # \<Gamma>" t T avoiding: x u arbitrary: \<Delta> rule: typing.strong_induct)
@@ -1473,9 +1473,9 @@
    ultimately show ?case by auto 
  next
    case (T_TAbs X T1 t2 T2 x u D)
-   from `TVarB X T1 # D @ VarB x U # \<Gamma> \<turnstile> t2 : T2` have "X \<sharp> T1"
+   from \<open>TVarB X T1 # D @ VarB x U # \<Gamma> \<turnstile> t2 : T2\<close> have "X \<sharp> T1"
      by (auto simp add: valid_ty_dom_fresh dest: typing_ok intro!: closed_in_fresh)
-   with `X \<sharp> u` and T_TAbs show ?case by fastforce
+   with \<open>X \<sharp> u\<close> and T_TAbs show ?case by fastforce
  next
    case (T_TApp X t1 T2 T11 T12 x u D)
    then have "(D@\<Gamma>) \<turnstile>T2<:T11" using T_TApp by (auto dest: strengthening)
@@ -1483,9 +1483,9 @@
      by (force simp add: fresh_prod fresh_list_append fresh_list_cons subst_trm_fresh_tyvar)
 qed
 
-subsubsection {* Type Substitution Preserves Subtyping *}
+subsubsection \<open>Type Substitution Preserves Subtyping\<close>
 
-lemma substT_subtype: -- {* A.10 *}
+lemma substT_subtype: \<comment> \<open>A.10\<close>
   assumes H: "(\<Delta> @ ((TVarB X Q) # \<Gamma>)) \<turnstile> S <: T"
   shows "\<Gamma> \<turnstile> P <: Q \<Longrightarrow> (\<Delta>[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> S[X \<mapsto> P]\<^sub>\<tau> <: T[X \<mapsto> P]\<^sub>\<tau>" 
   using H
@@ -1508,11 +1508,11 @@
   proof (cases "X = Y")
     assume eq: "X = Y"
     from eq and SA_trans_TVar have "TVarB Y Q \<in> set (D @ TVarB X Q # \<Gamma>)" by simp
-    with G_ok have QS: "Q = S" using `TVarB Y S \<in> set (D @ TVarB X Q # \<Gamma>)`
+    with G_ok have QS: "Q = S" using \<open>TVarB Y S \<in> set (D @ TVarB X Q # \<Gamma>)\<close>
       by (rule uniqueness_of_ctxt)
     from X_\<Gamma>_ok have "X \<sharp> ty_dom \<Gamma>" and "Q closed_in \<Gamma>" by auto
     then have XQ: "X \<sharp> Q" by (rule closed_in_fresh)
-    note `\<Gamma>\<turnstile>P<:Q`
+    note \<open>\<Gamma>\<turnstile>P<:Q\<close>
     moreover from ST have "\<turnstile> (D[X \<mapsto> P]\<^sub>e @ \<Gamma>) ok" by (rule subtype_implies_ok)
     moreover have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) extends \<Gamma>" by (simp add: extends_def)
     ultimately have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> P<:Q" by (rule weakening)
@@ -1547,7 +1547,7 @@
   qed
 next
   case (SA_refl_TVar Y X P D)
-  note `\<turnstile> (D @ TVarB X Q # \<Gamma>) ok`
+  note \<open>\<turnstile> (D @ TVarB X Q # \<Gamma>) ok\<close>
   moreover from SA_refl_TVar have closed: "P closed_in \<Gamma>"
     by (auto dest: subtype_implies_closed)
   ultimately have ok: "\<turnstile> (D[X \<mapsto> P]\<^sub>e @ \<Gamma>) ok" using valid_subst by auto
@@ -1582,9 +1582,9 @@
   with SA_all and S1 show ?case by force
 qed
 
-subsubsection {* Type Substitution Preserves Typing *}
+subsubsection \<open>Type Substitution Preserves Typing\<close>
 
-theorem substT_type: -- {* A.11 *}
+theorem substT_type: \<comment> \<open>A.11\<close>
   assumes H: "(D @ TVarB X Q # G) \<turnstile> t : T"
   shows "G \<turnstile> P <: Q \<Longrightarrow>
     (D[X \<mapsto> P]\<^sub>e @ G) \<turnstile> t[X \<mapsto>\<^sub>\<tau> P] : T[X \<mapsto> P]\<^sub>\<tau>" using H
@@ -1592,9 +1592,9 @@
   case (T_Var x T X P D')
   have "G\<turnstile>P<:Q" by fact
   then have "P closed_in G" using subtype_implies_closed by auto
-  moreover note `\<turnstile> (D' @ TVarB X Q # G) ok`
+  moreover note \<open>\<turnstile> (D' @ TVarB X Q # G) ok\<close>
   ultimately have "\<turnstile> (D'[X \<mapsto> P]\<^sub>e @ G) ok" using valid_subst by auto
-  moreover note `VarB x T \<in> set (D' @ TVarB X Q # G)`
+  moreover note \<open>VarB x T \<in> set (D' @ TVarB X Q # G)\<close>
   then have "VarB x T \<in> set D' \<or> VarB x T \<in> set G" by simp
   then have "(VarB x (T[X \<mapsto> P]\<^sub>\<tau>)) \<in> set (D'[X \<mapsto> P]\<^sub>e @ G)"
   proof
@@ -1648,7 +1648,7 @@
       intro: substT_subtype)
 qed
 
-lemma Abs_type: -- {* A.13(1) *}
+lemma Abs_type: \<comment> \<open>A.13(1)\<close>
   assumes H: "\<Gamma> \<turnstile> (\<lambda>x:S. s) : T"
   and H': "\<Gamma> \<turnstile> T <: U \<rightarrow> U'"
   and H'': "x \<sharp> \<Gamma>"
@@ -1658,7 +1658,7 @@
   using H H' H''
 proof (nominal_induct \<Gamma> t \<equiv> "\<lambda>x:S. s" T avoiding: x arbitrary: U U' S s rule: typing.strong_induct)
   case (T_Abs y T\<^sub>1 \<Gamma> t\<^sub>2 T\<^sub>2)
-  from `\<Gamma> \<turnstile> T\<^sub>1 \<rightarrow> T\<^sub>2 <: U \<rightarrow> U'`
+  from \<open>\<Gamma> \<turnstile> T\<^sub>1 \<rightarrow> T\<^sub>2 <: U \<rightarrow> U'\<close>
   obtain ty1: "\<Gamma> \<turnstile> U <: S" and ty2: "\<Gamma> \<turnstile> T\<^sub>2 <: U'" using T_Abs
     by cases (simp_all add: ty.inject trm.inject alpha fresh_atm)
   from T_Abs have "VarB y S # \<Gamma> \<turnstile> [(y, x)] \<bullet> s : T\<^sub>2"
@@ -1690,7 +1690,7 @@
   using H subtype_reflexivity_from_typing [OF H] H'
   by (rule Abs_type)
 
-lemma TAbs_type: -- {* A.13(2) *}
+lemma TAbs_type: \<comment> \<open>A.13(2)\<close>
   assumes H: "\<Gamma> \<turnstile> (\<lambda>X<:S. s) : T"
   and H': "\<Gamma> \<turnstile> T <: (\<forall>X<:U. U')"
   and fresh: "X \<sharp> \<Gamma>" "X \<sharp> S" "X \<sharp> U"
@@ -1701,9 +1701,9 @@
   using H H' fresh
 proof (nominal_induct \<Gamma> t \<equiv> "\<lambda>X<:S. s" T avoiding: X U U' S arbitrary: s rule: typing.strong_induct)
   case (T_TAbs Y T\<^sub>1 \<Gamma> t\<^sub>2 T\<^sub>2)
-  from `TVarB Y T\<^sub>1 # \<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>2` have Y: "Y \<sharp> \<Gamma>"
+  from \<open>TVarB Y T\<^sub>1 # \<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>2\<close> have Y: "Y \<sharp> \<Gamma>"
     by (auto dest!: typing_ok simp add: valid_ty_dom_fresh)
-  from `Y \<sharp> U'` and `Y \<sharp> X`
+  from \<open>Y \<sharp> U'\<close> and \<open>Y \<sharp> X\<close>
   have "(\<forall>X<:U. U') = (\<forall>Y<:U. [(Y, X)] \<bullet> U')"
     by (simp add: ty.inject alpha' fresh_atm)
   with T_TAbs have "\<Gamma> \<turnstile> (\<forall>Y<:S. T\<^sub>2) <: (\<forall>Y<:U. [(Y, X)] \<bullet> U')" by (simp add: trm.inject)
@@ -1714,13 +1714,13 @@
     by (simp add: trm.inject alpha fresh_atm)
   then have "[(Y, X)] \<bullet> (TVarB Y S # \<Gamma>) \<turnstile> [(Y, X)] \<bullet> [(Y, X)] \<bullet> s : [(Y, X)] \<bullet> T\<^sub>2"
     by (rule typing.eqvt)
-  with `X \<sharp> \<Gamma>` `X \<sharp> S` Y `Y \<sharp> S` have "TVarB X S # \<Gamma> \<turnstile> s : [(Y, X)] \<bullet> T\<^sub>2"
+  with \<open>X \<sharp> \<Gamma>\<close> \<open>X \<sharp> S\<close> Y \<open>Y \<sharp> S\<close> have "TVarB X S # \<Gamma> \<turnstile> s : [(Y, X)] \<bullet> T\<^sub>2"
     by perm_simp
   then have "TVarB X U # \<Gamma> \<turnstile> s : [(Y, X)] \<bullet> T\<^sub>2" using ty1
     by (rule narrow_type [of "[]", simplified])
   moreover from ty2 have "([(Y, X)] \<bullet> (TVarB Y U # \<Gamma>)) \<turnstile> ([(Y, X)] \<bullet> T\<^sub>2) <: ([(Y, X)] \<bullet> [(Y, X)] \<bullet> U')"
     by (rule subtype_of.eqvt)
-  with `X \<sharp> \<Gamma>` `X \<sharp> U` Y `Y \<sharp> U` have "(TVarB X U # \<Gamma>) \<turnstile> ([(Y, X)] \<bullet> T\<^sub>2) <: U'"
+  with \<open>X \<sharp> \<Gamma>\<close> \<open>X \<sharp> U\<close> Y \<open>Y \<sharp> U\<close> have "(TVarB X U # \<Gamma>) \<turnstile> ([(Y, X)] \<bullet> T\<^sub>2) <: U'"
     by perm_simp
   ultimately show ?case by (rule T_TAbs)
 next
@@ -1738,7 +1738,7 @@
   using H subtype_reflexivity_from_typing [OF H] fresh
   by (rule TAbs_type)
 
-theorem preservation: -- {* A.20 *}
+theorem preservation: \<comment> \<open>A.20\<close>
   assumes H: "\<Gamma> \<turnstile> t : T"
   shows "t \<longmapsto> t' \<Longrightarrow> \<Gamma> \<turnstile> t' : T" using H
 proof (nominal_induct avoiding: t' rule: typing.strong_induct)
@@ -1747,7 +1747,7 @@
     by (rule exists_fresh) (rule fin_supp)
   obtain X::tyvrs where "X \<sharp> (t\<^sub>1 \<cdot> t\<^sub>2, t')"
     by (rule exists_fresh) (rule fin_supp)
-  with `t\<^sub>1 \<cdot> t\<^sub>2 \<longmapsto> t'` show ?case
+  with \<open>t\<^sub>1 \<cdot> t\<^sub>2 \<longmapsto> t'\<close> show ?case
   proof (cases rule: eval.strong_cases [where x=x and X=X])
     case (E_Abs v\<^sub>2 T\<^sub>11' t\<^sub>12)
     with T_App and x_fresh have h: "\<Gamma> \<turnstile> (\<lambda>x:T\<^sub>11'. t\<^sub>12) : T\<^sub>11 \<rightarrow> T\<^sub>12"
@@ -1758,7 +1758,7 @@
       and t\<^sub>12: "(VarB x T\<^sub>11') # \<Gamma> \<turnstile> t\<^sub>12 : S'"
       and S': "\<Gamma> \<turnstile> S' <: T\<^sub>12"
       by (rule Abs_type') blast
-    from `\<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>11`
+    from \<open>\<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>11\<close>
     have "\<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>11'" using T\<^sub>11 by (rule T_Sub)
     with t\<^sub>12 have "\<Gamma> \<turnstile> t\<^sub>12[x \<mapsto> t\<^sub>2] : S'" 
       by (rule subst_type [where \<Delta>="[]", simplified])
@@ -1768,7 +1768,7 @@
     case (E_App1 t''' t'' u)
     hence "t\<^sub>1 \<longmapsto> t''" by (simp add:trm.inject) 
     hence "\<Gamma> \<turnstile> t'' : T\<^sub>11 \<rightarrow> T\<^sub>12" by (rule T_App)
-    hence "\<Gamma> \<turnstile> t'' \<cdot> t\<^sub>2 : T\<^sub>12" using `\<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>11`
+    hence "\<Gamma> \<turnstile> t'' \<cdot> t\<^sub>2 : T\<^sub>12" using \<open>\<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>11\<close>
       by (rule typing.T_App)
     with E_App1 show ?thesis by (simp add:trm.inject)
   next
@@ -1783,27 +1783,27 @@
   case (T_TApp X \<Gamma> t\<^sub>1 T\<^sub>2  T\<^sub>11  T\<^sub>12 t')
   obtain x::vrs where "x \<sharp> (t\<^sub>1 \<cdot>\<^sub>\<tau> T\<^sub>2, t')"
     by (rule exists_fresh) (rule fin_supp)
-  with `t\<^sub>1 \<cdot>\<^sub>\<tau> T\<^sub>2 \<longmapsto> t'`
+  with \<open>t\<^sub>1 \<cdot>\<^sub>\<tau> T\<^sub>2 \<longmapsto> t'\<close>
   show ?case
   proof (cases rule: eval.strong_cases [where X=X and x=x])
     case (E_TAbs T\<^sub>11' T\<^sub>2' t\<^sub>12)
     with T_TApp have "\<Gamma> \<turnstile> (\<lambda>X<:T\<^sub>11'. t\<^sub>12) : (\<forall>X<:T\<^sub>11. T\<^sub>12)" and "X \<sharp> \<Gamma>" and "X \<sharp> T\<^sub>11'"
       by (simp_all add: trm.inject)
-    moreover from `\<Gamma>\<turnstile>T\<^sub>2<:T\<^sub>11` and `X \<sharp> \<Gamma>` have "X \<sharp> T\<^sub>11"
+    moreover from \<open>\<Gamma>\<turnstile>T\<^sub>2<:T\<^sub>11\<close> and \<open>X \<sharp> \<Gamma>\<close> have "X \<sharp> T\<^sub>11"
       by (blast intro: closed_in_fresh fresh_dom dest: subtype_implies_closed)
     ultimately obtain S'
       where "TVarB X T\<^sub>11 # \<Gamma> \<turnstile> t\<^sub>12 : S'"
       and "(TVarB X T\<^sub>11 # \<Gamma>) \<turnstile> S' <: T\<^sub>12"
       by (rule TAbs_type') blast
     hence "TVarB X T\<^sub>11 # \<Gamma> \<turnstile> t\<^sub>12 : T\<^sub>12" by (rule T_Sub)
-    hence "\<Gamma> \<turnstile> t\<^sub>12[X \<mapsto>\<^sub>\<tau> T\<^sub>2] : T\<^sub>12[X \<mapsto> T\<^sub>2]\<^sub>\<tau>" using `\<Gamma> \<turnstile> T\<^sub>2 <: T\<^sub>11`
+    hence "\<Gamma> \<turnstile> t\<^sub>12[X \<mapsto>\<^sub>\<tau> T\<^sub>2] : T\<^sub>12[X \<mapsto> T\<^sub>2]\<^sub>\<tau>" using \<open>\<Gamma> \<turnstile> T\<^sub>2 <: T\<^sub>11\<close>
       by (rule substT_type [where D="[]", simplified])
     with T_TApp and E_TAbs show ?thesis by (simp add: trm.inject)
   next
     case (E_TApp t''' t'' T)
     from E_TApp have "t\<^sub>1 \<longmapsto> t''" by (simp add: trm.inject)
     then have "\<Gamma> \<turnstile> t'' : (\<forall>X<:T\<^sub>11. T\<^sub>12)" by (rule T_TApp)
-    then have "\<Gamma> \<turnstile> t'' \<cdot>\<^sub>\<tau> T\<^sub>2 : T\<^sub>12[X \<mapsto> T\<^sub>2]\<^sub>\<tau>" using `\<Gamma> \<turnstile> T\<^sub>2 <: T\<^sub>11`
+    then have "\<Gamma> \<turnstile> t'' \<cdot>\<^sub>\<tau> T\<^sub>2 : T\<^sub>12[X \<mapsto> T\<^sub>2]\<^sub>\<tau>" using \<open>\<Gamma> \<turnstile> T\<^sub>2 <: T\<^sub>11\<close>
       by (rule better_T_TApp)
     with E_TApp show ?thesis by (simp add: trm.inject)
   qed (simp_all add: fresh_prod)
@@ -1815,24 +1815,24 @@
   ultimately show ?case by (rule typing.T_Sub)
 qed (auto)
 
-lemma Fun_canonical: -- {* A.14(1) *}
+lemma Fun_canonical: \<comment> \<open>A.14(1)\<close>
   assumes ty: "[] \<turnstile> v : T\<^sub>1 \<rightarrow> T\<^sub>2"
   shows "val v \<Longrightarrow> \<exists>x t S. v = (\<lambda>x:S. t)" using ty
 proof (induct "[]::env" v "T\<^sub>1 \<rightarrow> T\<^sub>2" arbitrary: T\<^sub>1 T\<^sub>2)
   case (T_Sub t S)
-  from `[] \<turnstile> S <: T\<^sub>1 \<rightarrow> T\<^sub>2`
+  from \<open>[] \<turnstile> S <: T\<^sub>1 \<rightarrow> T\<^sub>2\<close>
   obtain S\<^sub>1 S\<^sub>2 where S: "S = S\<^sub>1 \<rightarrow> S\<^sub>2" 
     by cases (auto simp add: T_Sub)
-  then show ?case using `val t` by (rule T_Sub)
+  then show ?case using \<open>val t\<close> by (rule T_Sub)
 qed (auto)
 
-lemma TyAll_canonical: -- {* A.14(3) *}
+lemma TyAll_canonical: \<comment> \<open>A.14(3)\<close>
   fixes X::tyvrs
   assumes ty: "[] \<turnstile> v : (\<forall>X<:T\<^sub>1. T\<^sub>2)"
   shows "val v \<Longrightarrow> \<exists>X t S. v = (\<lambda>X<:S. t)" using ty
 proof (induct "[]::env" v "\<forall>X<:T\<^sub>1. T\<^sub>2" arbitrary: X T\<^sub>1 T\<^sub>2)
   case (T_Sub t S)
-  from `[] \<turnstile> S <: (\<forall>X<:T\<^sub>1. T\<^sub>2)`
+  from \<open>[] \<turnstile> S <: (\<forall>X<:T\<^sub>1. T\<^sub>2)\<close>
   obtain X S\<^sub>1 S\<^sub>2 where S: "S = (\<forall>X<:S\<^sub>1. S\<^sub>2)"
     by cases (auto simp add: T_Sub)
   then show ?case using T_Sub by auto 
--- a/src/HOL/Nominal/Examples/Height.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Height.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,10 +2,10 @@
   imports "../Nominal"
 begin
 
-text {*  
+text \<open>
   A small problem suggested by D. Wang. It shows how
   the height of a lambda-terms behaves under substitution.
-*}
+\<close>
 
 atom_decl name
 
@@ -14,7 +14,7 @@
   | App "lam" "lam"
   | Lam "\<guillemotleft>name\<guillemotright>lam" ("Lam [_]._" [100,100] 100)
 
-text {* Definition of the height-function on lambda-terms. *} 
+text \<open>Definition of the height-function on lambda-terms.\<close> 
 
 nominal_primrec
   height :: "lam \<Rightarrow> int"
@@ -28,7 +28,7 @@
   apply(fresh_guess add: perm_int_def)+
   done
 
-text {* Definition of capture-avoiding substitution. *}
+text \<open>Definition of capture-avoiding substitution.\<close>
 
 nominal_primrec
   subst :: "lam \<Rightarrow> name \<Rightarrow> lam \<Rightarrow> lam"  ("_[_::=_]" [100,100,100] 100)
@@ -42,16 +42,16 @@
 apply(fresh_guess)+
 done
 
-text{* The next lemma is needed in the Var-case of the theorem below. *}
+text\<open>The next lemma is needed in the Var-case of the theorem below.\<close>
 
 lemma height_ge_one: 
   shows "1 \<le> (height e)"
 by (nominal_induct e rule: lam.strong_induct) (simp_all)
 
-text {* 
+text \<open>
   Unlike the proplem suggested by Wang, however, the 
   theorem is here formulated entirely by using functions. 
-*}
+\<close>
 
 theorem height_subst:
   shows "height (e[x::=e']) \<le> ((height e) - 1) + (height e')"
--- a/src/HOL/Nominal/Examples/Lam_Funs.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Lam_Funs.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,10 +2,10 @@
   imports "../Nominal"
 begin
 
-text {* 
+text \<open>
   Provides useful definitions for reasoning
   with lambda-terms. 
-*}
+\<close>
 
 atom_decl name
 
@@ -14,7 +14,7 @@
   | App "lam" "lam"
   | Lam "\<guillemotleft>name\<guillemotright>lam" ("Lam [_]._" [100,100] 100)
 
-text {* The depth of a lambda-term. *}
+text \<open>The depth of a lambda-term.\<close>
 
 nominal_primrec
   depth :: "lam \<Rightarrow> nat"
@@ -28,12 +28,12 @@
   apply(fresh_guess)+
   done
 
-text {* 
+text \<open>
   The free variables of a lambda-term. A complication in this
   function arises from the fact that it returns a name set, which 
   is not a finitely supported type. Therefore we have to prove 
   the invariant that frees always returns a finite set of names. 
-*}
+\<close>
 
 nominal_primrec (invariant: "\<lambda>s::name set. finite s")
   frees :: "lam \<Rightarrow> name set"
@@ -50,17 +50,17 @@
 apply(fresh_guess)+
 done
 
-text {* 
+text \<open>
   We can avoid the definition of frees by
   using the build in notion of support.
-*}
+\<close>
 
 lemma frees_equals_support:
   shows "frees t = supp t"
 by (nominal_induct t rule: lam.strong_induct)
    (simp_all add: lam.supp supp_atm abs_supp)
 
-text {* Parallel and single capture-avoiding substitution. *}
+text \<open>Parallel and single capture-avoiding substitution.\<close>
 
 fun
   lookup :: "(name\<times>lam) list \<Rightarrow> name \<Rightarrow> lam"   
@@ -112,18 +112,18 @@
 apply(blast)+
 done
 
-text {* 
+text \<open>
   Contexts - lambda-terms with a single hole.
   Note that the lambda case in contexts does not bind a 
   name, even if we introduce the notation [_]._ for CLam.
-*}
+\<close>
 nominal_datatype clam = 
     Hole ("\<box>" 1000)  
   | CAppL "clam" "lam"
   | CAppR "lam" "clam" 
   | CLam "name" "clam"  ("CLam [_]._" [100,100] 100) 
 
-text {* Filling a lambda-term into a context. *}
+text \<open>Filling a lambda-term into a context.\<close>
 
 nominal_primrec
   filling :: "clam \<Rightarrow> lam \<Rightarrow> lam" ("_\<lbrakk>_\<rbrakk>" [100,100] 100)
@@ -134,7 +134,7 @@
 | "(CLam [x].E)\<lbrakk>t\<rbrakk> = Lam [x].(E\<lbrakk>t\<rbrakk>)" 
 by (rule TrueI)+
 
-text {* Composition od two contexts *}
+text \<open>Composition od two contexts\<close>
 
 nominal_primrec
  clam_compose :: "clam \<Rightarrow> clam \<Rightarrow> clam" ("_ \<circ> _" [100,100] 100)
--- a/src/HOL/Nominal/Examples/Lambda_mu.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Lambda_mu.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
   imports "../Nominal" 
 begin
 
-section {* Lambda-Mu according to a paper by Gavin Bierman *}
+section \<open>Lambda-Mu according to a paper by Gavin Bierman\<close>
 
 atom_decl var mvar
 
--- a/src/HOL/Nominal/Examples/LocalWeakening.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/LocalWeakening.thy	Thu May 26 17:51:22 2016 +0200
@@ -9,17 +9,17 @@
 
 atom_decl name
 
-text {* 
+text \<open>
   Curry-style lambda terms in locally nameless 
   representation without any binders           
-*}
+\<close>
 nominal_datatype llam = 
     lPar "name"
   | lVar "nat"
   | lApp "llam" "llam"
   | lLam "llam"
 
-text {* definition of vsub - at the moment a bit cumbersome *}
+text \<open>definition of vsub - at the moment a bit cumbersome\<close>
 
 lemma llam_cases:
   "(\<exists>a. t = lPar a) \<or> (\<exists>n. t = lVar n) \<or> (\<exists>t1 t2. t = lApp t1 t2) \<or> 
@@ -66,7 +66,7 @@
   shows "pi\<bullet>(freshen t p) = freshen (pi\<bullet>t) (pi\<bullet>p)"
 by (simp add: vsub_eqvt freshen_def perm_nat_def)
 
-text {* types *}
+text \<open>types\<close>
 
 nominal_datatype ty =
     TVar "nat"
@@ -79,7 +79,7 @@
 by (induct T rule: ty.induct) 
    (simp_all add: fresh_nat)
 
-text {* valid contexts *}
+text \<open>valid contexts\<close>
 
 type_synonym cxt = "(name\<times>ty) list"
 
@@ -96,7 +96,7 @@
   shows   "a\<sharp>\<Gamma> \<and> valid \<Gamma>"
 using a by (cases) (auto)
 
-text {* "weak" typing relation *}
+text \<open>"weak" typing relation\<close>
 
 inductive
   typing :: "cxt\<Rightarrow>llam\<Rightarrow>ty\<Rightarrow>bool" (" _ \<turnstile> _ : _ " [60,60,60] 60)
@@ -112,18 +112,18 @@
   shows "valid \<Gamma>"
 using a by (induct) (auto dest: v2_elim)
 
-text {* 
+text \<open>
   we have to explicitly state that nominal_inductive should strengthen 
   over the variable x (since x is not a binder)
-*}
+\<close>
 nominal_inductive typing
   avoids t_lLam: x
   by (auto simp add: fresh_prod dest: v2_elim typing_implies_valid)
   
-text {* strong induction principle for typing *}
+text \<open>strong induction principle for typing\<close>
 thm typing.strong_induct
 
-text {* sub-contexts *}
+text \<open>sub-contexts\<close>
 
 abbreviation
   "sub_context" :: "cxt \<Rightarrow> cxt \<Rightarrow> bool" ("_ \<subseteq> _" [60,60] 60) 
--- a/src/HOL/Nominal/Examples/Pattern.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Pattern.thy	Thu May 26 17:51:22 2016 +0200
@@ -1,4 +1,4 @@
-section {* Simply-typed lambda-calculus with let and tuple patterns *}
+section \<open>Simply-typed lambda-calculus with let and tuple patterns\<close>
 
 theory Pattern
 imports "../Nominal"
@@ -411,7 +411,7 @@
   shows "\<Delta> @ \<Gamma> \<turnstile> t[x\<mapsto>u] : T" using a b
 proof (nominal_induct \<Gamma>'\<equiv>"\<Delta> @ [(x, U)] @ \<Gamma>" t T avoiding: x u \<Delta> rule: typing.strong_induct)
   case (Var y T x u \<Delta>)
-  from `valid (\<Delta> @ [(x, U)] @ \<Gamma>)`
+  from \<open>valid (\<Delta> @ [(x, U)] @ \<Gamma>)\<close>
   have valid: "valid (\<Delta> @ \<Gamma>)" by (rule valid_insert)
   show "\<Delta> @ \<Gamma> \<turnstile> Var y[x\<mapsto>u] : T"
   proof cases
@@ -425,19 +425,19 @@
   qed
 next
   case (Tuple t\<^sub>1 T\<^sub>1 t\<^sub>2 T\<^sub>2)
-  from refl `\<Gamma> \<turnstile> u : U`
+  from refl \<open>\<Gamma> \<turnstile> u : U\<close>
   have "\<Delta> @ \<Gamma> \<turnstile> t\<^sub>1[x\<mapsto>u] : T\<^sub>1" by (rule Tuple)
-  moreover from refl `\<Gamma> \<turnstile> u : U`
+  moreover from refl \<open>\<Gamma> \<turnstile> u : U\<close>
   have "\<Delta> @ \<Gamma> \<turnstile> t\<^sub>2[x\<mapsto>u] : T\<^sub>2" by (rule Tuple)
   ultimately have "\<Delta> @ \<Gamma> \<turnstile> \<langle>t\<^sub>1[x\<mapsto>u], t\<^sub>2[x\<mapsto>u]\<rangle> : T\<^sub>1 \<otimes> T\<^sub>2" ..
   then show ?case by simp
 next
   case (Let p t T \<Delta>' s S)
-  from refl `\<Gamma> \<turnstile> u : U`
+  from refl \<open>\<Gamma> \<turnstile> u : U\<close>
   have "\<Delta> @ \<Gamma> \<turnstile> t[x\<mapsto>u] : T" by (rule Let)
-  moreover note `\<turnstile> p : T \<Rightarrow> \<Delta>'`
+  moreover note \<open>\<turnstile> p : T \<Rightarrow> \<Delta>'\<close>
   moreover have "\<Delta>' @ (\<Delta> @ [(x, U)] @ \<Gamma>) = (\<Delta>' @ \<Delta>) @ [(x, U)] @ \<Gamma>" by simp
-  then have "(\<Delta>' @ \<Delta>) @ \<Gamma> \<turnstile> s[x\<mapsto>u] : S" using `\<Gamma> \<turnstile> u : U` by (rule Let)
+  then have "(\<Delta>' @ \<Delta>) @ \<Gamma> \<turnstile> s[x\<mapsto>u] : S" using \<open>\<Gamma> \<turnstile> u : U\<close> by (rule Let)
   then have "\<Delta>' @ \<Delta> @ \<Gamma> \<turnstile> s[x\<mapsto>u] : S" by simp
   ultimately have "\<Delta> @ \<Gamma> \<turnstile> (LET p = t[x\<mapsto>u] IN s[x\<mapsto>u]) : S"
     by (rule better_T_Let)
@@ -448,7 +448,7 @@
   case (Abs y T t S)
   have "(y, T) # \<Delta> @ [(x, U)] @ \<Gamma> = ((y, T) # \<Delta>) @ [(x, U)] @ \<Gamma>"
     by simp
-  then have "((y, T) # \<Delta>) @ \<Gamma> \<turnstile> t[x\<mapsto>u] : S" using `\<Gamma> \<turnstile> u : U` by (rule Abs)
+  then have "((y, T) # \<Delta>) @ \<Gamma> \<turnstile> t[x\<mapsto>u] : S" using \<open>\<Gamma> \<turnstile> u : U\<close> by (rule Abs)
   then have "(y, T) # \<Delta> @ \<Gamma> \<turnstile> t[x\<mapsto>u] : S" by simp
   then have "\<Delta> @ \<Gamma> \<turnstile> (\<lambda>y:T. t[x\<mapsto>u]) : T \<rightarrow> S"
     by (rule typing.Abs)
@@ -457,9 +457,9 @@
   ultimately show ?case by simp
 next
   case (App t\<^sub>1 T S t\<^sub>2)
-  from refl `\<Gamma> \<turnstile> u : U`
+  from refl \<open>\<Gamma> \<turnstile> u : U\<close>
   have "\<Delta> @ \<Gamma> \<turnstile> t\<^sub>1[x\<mapsto>u] : T \<rightarrow> S" by (rule App)
-  moreover from refl `\<Gamma> \<turnstile> u : U`
+  moreover from refl \<open>\<Gamma> \<turnstile> u : U\<close>
   have "\<Delta> @ \<Gamma> \<turnstile> t\<^sub>2[x\<mapsto>u] : T" by (rule App)
   ultimately have "\<Delta> @ \<Gamma> \<turnstile> (t\<^sub>1[x\<mapsto>u]) \<cdot> (t\<^sub>2[x\<mapsto>u]) : S"
     by (rule typing.App)
@@ -489,14 +489,14 @@
   shows "\<Gamma>\<^sub>1 @ \<Gamma>\<^sub>2 \<turnstile> \<theta>\<lparr>t\<rparr> : T" using assms
 proof (induct arbitrary: \<Gamma>\<^sub>1 \<Gamma>\<^sub>2 t u T \<theta>)
   case (PVar x U)
-  from `\<Gamma>\<^sub>1 @ [(x, U)] @ \<Gamma>\<^sub>2 \<turnstile> t : T` `\<Gamma>\<^sub>2 \<turnstile> u : U`
+  from \<open>\<Gamma>\<^sub>1 @ [(x, U)] @ \<Gamma>\<^sub>2 \<turnstile> t : T\<close> \<open>\<Gamma>\<^sub>2 \<turnstile> u : U\<close>
   have "\<Gamma>\<^sub>1 @ \<Gamma>\<^sub>2 \<turnstile> t[x\<mapsto>u] : T" by (rule subst_type_aux)
-  moreover from `\<turnstile> PVar x U \<rhd> u \<Rightarrow> \<theta>` have "\<theta> = [(x, u)]"
+  moreover from \<open>\<turnstile> PVar x U \<rhd> u \<Rightarrow> \<theta>\<close> have "\<theta> = [(x, u)]"
     by cases simp_all
   ultimately show ?case by simp
 next
   case (PTuple p S \<Delta>\<^sub>1 q U \<Delta>\<^sub>2)
-  from `\<turnstile> \<langle>\<langle>p, q\<rangle>\<rangle> \<rhd> u \<Rightarrow> \<theta>` obtain u\<^sub>1 u\<^sub>2 \<theta>\<^sub>1 \<theta>\<^sub>2
+  from \<open>\<turnstile> \<langle>\<langle>p, q\<rangle>\<rangle> \<rhd> u \<Rightarrow> \<theta>\<close> obtain u\<^sub>1 u\<^sub>2 \<theta>\<^sub>1 \<theta>\<^sub>2
     where u: "u = \<langle>u\<^sub>1, u\<^sub>2\<rangle>" and \<theta>: "\<theta> = \<theta>\<^sub>1 @ \<theta>\<^sub>2"
     and p: "\<turnstile> p \<rhd> u\<^sub>1 \<Rightarrow> \<theta>\<^sub>1" and q: "\<turnstile> q \<rhd> u\<^sub>2 \<Rightarrow> \<theta>\<^sub>2"
     by cases simp_all
@@ -504,10 +504,10 @@
   then obtain u\<^sub>1: "\<Gamma>\<^sub>2 \<turnstile> u\<^sub>1 : S" and u\<^sub>2: "\<Gamma>\<^sub>2 \<turnstile> u\<^sub>2 : U"
     by cases (simp_all add: ty.inject trm.inject)
   note u\<^sub>1
-  moreover from `\<Gamma>\<^sub>1 @ (\<Delta>\<^sub>2 @ \<Delta>\<^sub>1) @ \<Gamma>\<^sub>2 \<turnstile> t : T`
+  moreover from \<open>\<Gamma>\<^sub>1 @ (\<Delta>\<^sub>2 @ \<Delta>\<^sub>1) @ \<Gamma>\<^sub>2 \<turnstile> t : T\<close>
   have "(\<Gamma>\<^sub>1 @ \<Delta>\<^sub>2) @ \<Delta>\<^sub>1 @ \<Gamma>\<^sub>2 \<turnstile> t : T" by simp
   moreover note p
-  moreover from `supp \<langle>\<langle>p, q\<rangle>\<rangle> \<sharp>* u` and u
+  moreover from \<open>supp \<langle>\<langle>p, q\<rangle>\<rangle> \<sharp>* u\<close> and u
   have "(supp p::name set) \<sharp>* u\<^sub>1" by (simp add: fresh_star_def)
   ultimately have \<theta>\<^sub>1: "(\<Gamma>\<^sub>1 @ \<Delta>\<^sub>2) @ \<Gamma>\<^sub>2 \<turnstile> \<theta>\<^sub>1\<lparr>t\<rparr> : T"
     by (rule PTuple)
@@ -515,11 +515,11 @@
   moreover from \<theta>\<^sub>1
   have "\<Gamma>\<^sub>1 @ \<Delta>\<^sub>2 @ \<Gamma>\<^sub>2 \<turnstile> \<theta>\<^sub>1\<lparr>t\<rparr> : T" by simp
   moreover note q
-  moreover from `supp \<langle>\<langle>p, q\<rangle>\<rangle> \<sharp>* u` and u
+  moreover from \<open>supp \<langle>\<langle>p, q\<rangle>\<rangle> \<sharp>* u\<close> and u
   have "(supp q::name set) \<sharp>* u\<^sub>2" by (simp add: fresh_star_def)
   ultimately have "\<Gamma>\<^sub>1 @ \<Gamma>\<^sub>2 \<turnstile> \<theta>\<^sub>2\<lparr>\<theta>\<^sub>1\<lparr>t\<rparr>\<rparr> : T"
     by (rule PTuple)
-  moreover from `\<turnstile> \<langle>\<langle>p, q\<rangle>\<rangle> \<rhd> u \<Rightarrow> \<theta>` `supp \<langle>\<langle>p, q\<rangle>\<rangle> \<sharp>* u`
+  moreover from \<open>\<turnstile> \<langle>\<langle>p, q\<rangle>\<rangle> \<rhd> u \<Rightarrow> \<theta>\<close> \<open>supp \<langle>\<langle>p, q\<rangle>\<rangle> \<sharp>* u\<close>
   have "(supp (map fst \<theta>)::name set) \<sharp>* map snd \<theta>"
     by (rule match_fresh)
   ultimately show ?case using \<theta> by (simp add: psubst_append)
@@ -578,10 +578,10 @@
   case (Abs x' T' t' U)
   obtain y::name where y: "y \<sharp> (x, \<Gamma>, \<lambda>x':T'. t')"
     by (rule exists_fresh) (auto intro: fin_supp)
-  from `(\<lambda>x:T. t) = (\<lambda>x':T'. t')` [symmetric]
+  from \<open>(\<lambda>x:T. t) = (\<lambda>x':T'. t')\<close> [symmetric]
   have x: "x \<sharp> (\<lambda>x':T'. t')" by (simp add: abs_fresh)
   have x': "x' \<sharp> (\<lambda>x':T'. t')" by (simp add: abs_fresh)
-  from `(x', T') # \<Gamma> \<turnstile> t' : U` have x'': "x' \<sharp> \<Gamma>"
+  from \<open>(x', T') # \<Gamma> \<turnstile> t' : U\<close> have x'': "x' \<sharp> \<Gamma>"
     by (auto dest: valid_typing)
   have "(\<lambda>x:T. t) = (\<lambda>x':T'. t')" by fact
   also from x x' y have "\<dots> = [(x, y)] \<bullet> [(x', y)] \<bullet> (\<lambda>x':T'. t')"
@@ -592,7 +592,7 @@
   then have T: "T = T'" and t: "[(x, y)] \<bullet> [(x', y)] \<bullet> t' = t"
     by (simp_all add: trm.inject alpha)
   from Abs T have "S = T \<rightarrow> U" by simp
-  moreover from `(x', T') # \<Gamma> \<turnstile> t' : U`
+  moreover from \<open>(x', T') # \<Gamma> \<turnstile> t' : U\<close>
   have "[(x, y)] \<bullet> [(x', y)] \<bullet> ((x', T') # \<Gamma> \<turnstile> t' : U)"
     by (simp add: perm_bool)
   with T t y x'' fresh have "(x, T) # \<Gamma> \<turnstile> t : U"
@@ -659,7 +659,7 @@
   show ?case
   proof (cases q)
     case (PVar x' T')
-    with `(\<lambda>[PVar x T]. t) = (\<lambda>[q]. u)`
+    with \<open>(\<lambda>[PVar x T]. t) = (\<lambda>[q]. u)\<close>
     have "x = x' \<and> t = u \<or> x \<noteq> x' \<and> t = [(x, x')] \<bullet> u \<and> x \<sharp> u"
       by (simp add: btrm.inject alpha)
     then show ?thesis
@@ -714,7 +714,7 @@
     then obtain pi::"name prm" where
       "p\<^sub>1 = pi \<bullet> p\<^sub>1'" "(\<lambda>[p\<^sub>2]. t) = pi \<bullet> (\<lambda>[p\<^sub>2']. u)" and
       pi: "set pi \<subseteq> (supp p\<^sub>1 \<union> supp p\<^sub>1') \<times> (supp p\<^sub>1 \<union> supp p\<^sub>1')" by auto
-    from `(\<lambda>[p\<^sub>2]. t) = pi \<bullet> (\<lambda>[p\<^sub>2']. u)`
+    from \<open>(\<lambda>[p\<^sub>2]. t) = pi \<bullet> (\<lambda>[p\<^sub>2']. u)\<close>
     have "(\<lambda>[p\<^sub>2]. t) = (\<lambda>[pi \<bullet> p\<^sub>2']. pi \<bullet> u)"
       by (simp add: eqvts)
     moreover from PTuple PTuple' have "pat_type p\<^sub>2 = pat_type (pi \<bullet> p\<^sub>2')"
@@ -732,13 +732,13 @@
         (supp p\<^sub>2 \<union> supp (pi \<bullet> p\<^sub>2'))" by auto
     from PTuple PTuple' have "pi \<bullet> distinct (pat_vars \<langle>\<langle>p\<^sub>1', p\<^sub>2'\<rangle>\<rangle>)" by simp
     then have "distinct (pat_vars \<langle>\<langle>pi \<bullet> p\<^sub>1', pi \<bullet> p\<^sub>2'\<rangle>\<rangle>)" by (simp only: eqvts)
-    with `p\<^sub>1 = pi \<bullet> p\<^sub>1'` PTuple'
+    with \<open>p\<^sub>1 = pi \<bullet> p\<^sub>1'\<close> PTuple'
     have fresh: "(supp p\<^sub>2 \<union> supp (pi \<bullet> p\<^sub>2') :: name set) \<sharp>* p\<^sub>1"
       by (auto simp add: set_pat_vars_supp fresh_star_def fresh_def eqvts)
-    from `p\<^sub>1 = pi \<bullet> p\<^sub>1'` have "pi' \<bullet> (p\<^sub>1 = pi \<bullet> p\<^sub>1')" by (rule perm_boolI)
+    from \<open>p\<^sub>1 = pi \<bullet> p\<^sub>1'\<close> have "pi' \<bullet> (p\<^sub>1 = pi \<bullet> p\<^sub>1')" by (rule perm_boolI)
     with pt_freshs_freshs [OF pt_name_inst at_name_inst pi' fresh fresh]
     have "p\<^sub>1 = pi' \<bullet> pi \<bullet> p\<^sub>1'" by (simp add: eqvts)
-    with `p\<^sub>2 = pi' \<bullet> pi \<bullet> p\<^sub>2'` have "\<langle>\<langle>p\<^sub>1, p\<^sub>2\<rangle>\<rangle> = (pi' @ pi) \<bullet> \<langle>\<langle>p\<^sub>1', p\<^sub>2'\<rangle>\<rangle>"
+    with \<open>p\<^sub>2 = pi' \<bullet> pi \<bullet> p\<^sub>2'\<close> have "\<langle>\<langle>p\<^sub>1, p\<^sub>2\<rangle>\<rangle> = (pi' @ pi) \<bullet> \<langle>\<langle>p\<^sub>1', p\<^sub>2'\<rangle>\<rangle>"
       by (simp add: pt_name2)
     moreover
     have "((supp p\<^sub>2 \<union> (pi \<bullet> supp p\<^sub>2')) \<times> (supp p\<^sub>2 \<union> (pi \<bullet> supp p\<^sub>2'))::(name \<times> name) set) \<subseteq>
@@ -748,7 +748,7 @@
     with pi have "set (pi' @ pi) \<subseteq> (supp \<langle>\<langle>p\<^sub>1, p\<^sub>2\<rangle>\<rangle> \<union> supp \<langle>\<langle>p\<^sub>1', p\<^sub>2'\<rangle>\<rangle>) \<times>
       (supp \<langle>\<langle>p\<^sub>1, p\<^sub>2\<rangle>\<rangle> \<union> supp \<langle>\<langle>p\<^sub>1', p\<^sub>2'\<rangle>\<rangle>)"
       by (simp add: Sigma_Un_distrib1 Sigma_Un_distrib2 Un_ac) blast
-    moreover note `t = pi' \<bullet> pi \<bullet> u`
+    moreover note \<open>t = pi' \<bullet> pi \<bullet> u\<close>
     ultimately have "\<langle>\<langle>p\<^sub>1, p\<^sub>2\<rangle>\<rangle> = (pi' @ pi) \<bullet> q \<and> t = (pi' @ pi) \<bullet> u \<and>
       set (pi' @ pi) \<subseteq> (supp \<langle>\<langle>p\<^sub>1, p\<^sub>2\<rangle>\<rangle> \<union> supp q) \<times>
         (supp \<langle>\<langle>p\<^sub>1, p\<^sub>2\<rangle>\<rangle> \<union> supp q)" using PTuple
@@ -776,10 +776,10 @@
   moreover from Let have "pat_type p = pat_type p'"
     by (simp add: trm.inject)
   moreover note distinct
-  moreover from `\<Delta> @ \<Gamma> \<turnstile> u' : U` have "valid (\<Delta> @ \<Gamma>)"
+  moreover from \<open>\<Delta> @ \<Gamma> \<turnstile> u' : U\<close> have "valid (\<Delta> @ \<Gamma>)"
     by (rule valid_typing)
   then have "valid \<Delta>" by (rule valid_appD)
-  with `\<turnstile> p' : T \<Rightarrow> \<Delta>` have "distinct (pat_vars p')"
+  with \<open>\<turnstile> p' : T \<Rightarrow> \<Delta>\<close> have "distinct (pat_vars p')"
     by (simp add: valid_distinct pat_vars_ptyping)
   ultimately have "\<exists>pi::name prm. p = pi \<bullet> p' \<and> Base u = pi \<bullet> Base u' \<and>
     set pi \<subseteq> (supp p \<union> supp p') \<times> (supp p \<union> supp p')"
@@ -788,7 +788,7 @@
     and pi': "set pi \<subseteq> (supp p \<union> supp p') \<times> (supp p \<union> supp p')"
     by (auto simp add: btrm.inject)
   from Let have "\<Gamma> \<turnstile> t : T" by (simp add: trm.inject)
-  moreover from `\<turnstile> p' : T \<Rightarrow> \<Delta>` have "\<turnstile> (pi \<bullet> p') : (pi \<bullet> T) \<Rightarrow> (pi \<bullet> \<Delta>)"
+  moreover from \<open>\<turnstile> p' : T \<Rightarrow> \<Delta>\<close> have "\<turnstile> (pi \<bullet> p') : (pi \<bullet> T) \<Rightarrow> (pi \<bullet> \<Delta>)"
     by (simp add: ptyping.eqvt)
   with pi have "\<turnstile> p : T \<Rightarrow> (pi \<bullet> \<Delta>)" by (simp add: perm_type)
   moreover from Let
@@ -805,25 +805,25 @@
   shows "\<Gamma> \<turnstile> t' : T" using assms
 proof (nominal_induct avoiding: \<Gamma> T rule: eval.strong_induct)
   case (TupleL t t' u)
-  from `\<Gamma> \<turnstile> \<langle>t, u\<rangle> : T` obtain T\<^sub>1 T\<^sub>2
+  from \<open>\<Gamma> \<turnstile> \<langle>t, u\<rangle> : T\<close> obtain T\<^sub>1 T\<^sub>2
     where "T = T\<^sub>1 \<otimes> T\<^sub>2" "\<Gamma> \<turnstile> t : T\<^sub>1" "\<Gamma> \<turnstile> u : T\<^sub>2"
     by cases (simp_all add: trm.inject)
-  from `\<Gamma> \<turnstile> t : T\<^sub>1` have "\<Gamma> \<turnstile> t' : T\<^sub>1" by (rule TupleL)
-  then have "\<Gamma> \<turnstile> \<langle>t', u\<rangle> : T\<^sub>1 \<otimes> T\<^sub>2" using `\<Gamma> \<turnstile> u : T\<^sub>2`
+  from \<open>\<Gamma> \<turnstile> t : T\<^sub>1\<close> have "\<Gamma> \<turnstile> t' : T\<^sub>1" by (rule TupleL)
+  then have "\<Gamma> \<turnstile> \<langle>t', u\<rangle> : T\<^sub>1 \<otimes> T\<^sub>2" using \<open>\<Gamma> \<turnstile> u : T\<^sub>2\<close>
     by (rule Tuple)
-  with `T = T\<^sub>1 \<otimes> T\<^sub>2` show ?case by simp
+  with \<open>T = T\<^sub>1 \<otimes> T\<^sub>2\<close> show ?case by simp
 next
   case (TupleR u u' t)
-  from `\<Gamma> \<turnstile> \<langle>t, u\<rangle> : T` obtain T\<^sub>1 T\<^sub>2
+  from \<open>\<Gamma> \<turnstile> \<langle>t, u\<rangle> : T\<close> obtain T\<^sub>1 T\<^sub>2
     where "T = T\<^sub>1 \<otimes> T\<^sub>2" "\<Gamma> \<turnstile> t : T\<^sub>1" "\<Gamma> \<turnstile> u : T\<^sub>2"
     by cases (simp_all add: trm.inject)
-  from `\<Gamma> \<turnstile> u : T\<^sub>2` have "\<Gamma> \<turnstile> u' : T\<^sub>2" by (rule TupleR)
-  with `\<Gamma> \<turnstile> t : T\<^sub>1` have "\<Gamma> \<turnstile> \<langle>t, u'\<rangle> : T\<^sub>1 \<otimes> T\<^sub>2"
+  from \<open>\<Gamma> \<turnstile> u : T\<^sub>2\<close> have "\<Gamma> \<turnstile> u' : T\<^sub>2" by (rule TupleR)
+  with \<open>\<Gamma> \<turnstile> t : T\<^sub>1\<close> have "\<Gamma> \<turnstile> \<langle>t, u'\<rangle> : T\<^sub>1 \<otimes> T\<^sub>2"
     by (rule Tuple)
-  with `T = T\<^sub>1 \<otimes> T\<^sub>2` show ?case by simp
+  with \<open>T = T\<^sub>1 \<otimes> T\<^sub>2\<close> show ?case by simp
 next
   case (Abs t t' x S)
-  from `\<Gamma> \<turnstile> (\<lambda>x:S. t) : T` `x \<sharp> \<Gamma>` obtain U where
+  from \<open>\<Gamma> \<turnstile> (\<lambda>x:S. t) : T\<close> \<open>x \<sharp> \<Gamma>\<close> obtain U where
     T: "T = S \<rightarrow> U" and U: "(x, S) # \<Gamma> \<turnstile> t : U"
     by (rule typing_case_Abs)
   from U have "(x, S) # \<Gamma> \<turnstile> t' : U" by (rule Abs)
@@ -832,27 +832,27 @@
   with T show ?case by simp
 next
   case (Beta x u S t)
-  from `\<Gamma> \<turnstile> (\<lambda>x:S. t) \<cdot> u : T` `x \<sharp> \<Gamma>`
+  from \<open>\<Gamma> \<turnstile> (\<lambda>x:S. t) \<cdot> u : T\<close> \<open>x \<sharp> \<Gamma>\<close>
   obtain "(x, S) # \<Gamma> \<turnstile> t : T" and "\<Gamma> \<turnstile> u : S"
     by cases (auto simp add: trm.inject ty.inject elim: typing_case_Abs)
   then show ?case by (rule subst_type)
 next
   case (Let p t \<theta> u)
-  from `\<Gamma> \<turnstile> (LET p = t IN u) : T` `supp p \<sharp>* \<Gamma>` `distinct (pat_vars p)`
+  from \<open>\<Gamma> \<turnstile> (LET p = t IN u) : T\<close> \<open>supp p \<sharp>* \<Gamma>\<close> \<open>distinct (pat_vars p)\<close>
   obtain U \<Delta> where "\<turnstile> p : U \<Rightarrow> \<Delta>" "\<Gamma> \<turnstile> t : U" "\<Delta> @ \<Gamma> \<turnstile> u : T"
     by (rule typing_case_Let)
-  then show ?case using `\<turnstile> p \<rhd> t \<Rightarrow> \<theta>` `supp p \<sharp>* t`
+  then show ?case using \<open>\<turnstile> p \<rhd> t \<Rightarrow> \<theta>\<close> \<open>supp p \<sharp>* t\<close>
     by (rule match_type)
 next
   case (AppL t t' u)
-  from `\<Gamma> \<turnstile> t \<cdot> u : T` obtain U where
+  from \<open>\<Gamma> \<turnstile> t \<cdot> u : T\<close> obtain U where
     t: "\<Gamma> \<turnstile> t : U \<rightarrow> T" and u: "\<Gamma> \<turnstile> u : U"
     by cases (auto simp add: trm.inject)
   from t have "\<Gamma> \<turnstile> t' : U \<rightarrow> T" by (rule AppL)
   then show ?case using u by (rule typing.App)
 next
   case (AppR u u' t)
-  from `\<Gamma> \<turnstile> t \<cdot> u : T` obtain U where
+  from \<open>\<Gamma> \<turnstile> t \<cdot> u : T\<close> obtain U where
     t: "\<Gamma> \<turnstile> t : U \<rightarrow> T" and u: "\<Gamma> \<turnstile> u : U"
     by cases (auto simp add: trm.inject)
   from u have "\<Gamma> \<turnstile> u' : U" by (rule AppR)
--- a/src/HOL/Nominal/Examples/SN.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/SN.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,9 +2,9 @@
 imports Lam_Funs
 begin
 
-text {* Strong Normalisation proof from the Proofs and Types book *}
+text \<open>Strong Normalisation proof from the Proofs and Types book\<close>
 
-section {* Beta Reduction *}
+section \<open>Beta Reduction\<close>
 
 lemma subst_rename: 
   assumes a: "c\<sharp>t1"
@@ -110,7 +110,7 @@
 by (nominal_induct M M' avoiding: x N rule: Beta.strong_induct)
    (auto simp add: fresh_atm subst_lemma fresh_fact)
 
-section {* types *}
+section \<open>types\<close>
 
 nominal_datatype ty =
     TVar "nat"
@@ -156,7 +156,7 @@
 nominal_inductive typing
   by (simp_all add: abs_fresh fresh_ty)
 
-subsection {* a fact about beta *}
+subsection \<open>a fact about beta\<close>
 
 definition "NORMAL" :: "lam \<Rightarrow> bool" where
   "NORMAL t \<equiv> \<not>(\<exists>t'. t\<longrightarrow>\<^sub>\<beta> t')"
@@ -171,7 +171,7 @@
   thus "NORMAL (Var a)" by (auto simp add: NORMAL_def)
 qed
 
-text {* Inductive version of Strong Normalisation *}
+text \<open>Inductive version of Strong Normalisation\<close>
 inductive 
   SN :: "lam \<Rightarrow> bool"
 where
@@ -223,7 +223,7 @@
 apply(blast)
 done
 
-section {* Candidates *}
+section \<open>Candidates\<close>
 
 nominal_primrec
   RED :: "ty \<Rightarrow> lam set"
@@ -232,7 +232,7 @@
 | "RED (\<tau>\<rightarrow>\<sigma>) =   {t. \<forall>u. (u\<in>RED \<tau> \<longrightarrow> (App t u)\<in>RED \<sigma>)}"
 by (rule TrueI)+
 
-text {* neutral terms *}
+text \<open>neutral terms\<close>
 definition NEUT :: "lam \<Rightarrow> bool" where
   "NEUT t \<equiv> (\<exists>a. t = Var a) \<or> (\<exists>t1 t2. t = App t1 t2)" 
 
@@ -270,7 +270,7 @@
   then show "SN (fst_app (App t s))" by simp
 qed
 
-section {* Candidates *}
+section \<open>Candidates\<close>
 
 definition "CR1" :: "ty \<Rightarrow> bool" where
   "CR1 \<tau> \<equiv> \<forall>t. (t\<in>RED \<tau> \<longrightarrow> SN t)"
@@ -338,7 +338,7 @@
   ultimately show "App t u \<in> RED \<sigma>" using c3 by (simp add: CR3_def)
 qed 
 
-text {* properties of the candiadates *}
+text \<open>properties of the candiadates\<close>
 lemma RED_props: 
   shows "CR1 \<tau>" and "CR2 \<tau>" and "CR3 \<tau>"
 proof (nominal_induct \<tau> rule: ty.strong_induct)
@@ -390,10 +390,10 @@
   }
 qed
    
-text {* 
+text \<open>
   the next lemma not as simple as on paper, probably because of 
   the stronger double_SN induction 
-*} 
+\<close> 
 lemma abs_RED: 
   assumes asm: "\<forall>s\<in>RED \<tau>. t[x::=s]\<in>RED \<sigma>"
   shows "Lam [x].t\<in>RED (\<tau>\<rightarrow>\<sigma>)"
@@ -509,7 +509,7 @@
   shows "\<theta><t> \<in> RED \<tau>"
 using a b
 proof(nominal_induct  avoiding: \<theta> rule: typing.strong_induct)
-  case (t3 a \<Gamma> \<sigma> t \<tau> \<theta>) --"lambda case"
+  case (t3 a \<Gamma> \<sigma> t \<tau> \<theta>) \<comment>"lambda case"
   have ih: "\<And>\<theta>. \<theta> closes ((a,\<sigma>)#\<Gamma>) \<Longrightarrow> \<theta><t> \<in> RED \<tau>" by fact
   have \<theta>_cond: "\<theta> closes \<Gamma>" by fact
   have fresh: "a\<sharp>\<Gamma>" "a\<sharp>\<theta>" by fact+
@@ -519,7 +519,7 @@
   then show "\<theta><(Lam [a].t)> \<in> RED (\<sigma> \<rightarrow> \<tau>)" using fresh by simp
 qed auto
 
-section {* identity substitution generated from a context \<Gamma> *}
+section \<open>identity substitution generated from a context \<Gamma>\<close>
 fun
   "id" :: "(name\<times>ty) list \<Rightarrow> (name\<times>lam) list"
 where
@@ -547,12 +547,12 @@
   shows "(id \<Gamma>) closes \<Gamma>"
 apply(auto)
 apply(simp add: id_maps)
-apply(subgoal_tac "CR3 T") --"A"
+apply(subgoal_tac "CR3 T") \<comment>"A"
 apply(drule CR3_implies_CR4)
 apply(simp add: CR4_def)
 apply(drule_tac x="Var x" in spec)
 apply(force simp add: NEUT_def NORMAL_Var)
---"A"
+\<comment>"A"
 apply(rule RED_props)
 done
 
--- a/src/HOL/Nominal/Examples/SOS.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/SOS.thy	Thu May 26 17:51:22 2016 +0200
@@ -16,7 +16,7 @@
 
 atom_decl name
 
-text {* types and terms *}
+text \<open>types and terms\<close>
 nominal_datatype ty = 
     TVar "nat"
   | Arrow "ty" "ty" ("_\<rightarrow>_" [100,100] 100)
@@ -33,7 +33,7 @@
 by (induct T rule: ty.induct)
    (auto simp add: fresh_nat)
 
-text {* Parallel and single substitution. *}
+text \<open>Parallel and single substitution.\<close>
 fun
   lookup :: "(name\<times>trm) list \<Rightarrow> name \<Rightarrow> trm"   
 where
@@ -94,7 +94,7 @@
   by (nominal_induct t rule: trm.strong_induct) 
      (auto simp add: fresh_list_nil)
 
-text {* Single substitution *}
+text \<open>Single substitution\<close>
 abbreviation 
   subst :: "trm \<Rightarrow> name \<Rightarrow> trm \<Rightarrow> trm" ("_[_::=_]" [100,100,100] 100)
 where 
@@ -126,7 +126,7 @@
 by (nominal_induct e avoiding: \<theta> x e' rule: trm.strong_induct)
    (auto simp add: fresh_list_cons fresh_atm forget lookup_fresh lookup_fresh')
 
-text {* Typing Judgements *}
+text \<open>Typing Judgements\<close>
 
 inductive
   valid :: "(name\<times>ty) list \<Rightarrow> bool"
@@ -158,7 +158,7 @@
 using a1 a2 a3
 by (induct) (auto simp add: fresh_set fresh_prod fresh_atm)
 
-text {* Typing Relation *}
+text \<open>Typing Relation\<close>
 
 inductive
   typing :: "(name\<times>ty) list\<Rightarrow>trm\<Rightarrow>ty\<Rightarrow>bool" ("_ \<turnstile> _ : _" [60,60,60] 60) 
@@ -244,7 +244,7 @@
 using a b type_substitutivity_aux[where \<Delta>="[]"]
 by (auto)
 
-text {* Values *}
+text \<open>Values\<close>
 inductive
   val :: "trm\<Rightarrow>bool" 
 where
@@ -258,7 +258,7 @@
   "\<not> val (Var x)"
   by (auto elim: val.cases)
 
-text {* Big-Step Evaluation *}
+text \<open>Big-Step Evaluation\<close>
 
 inductive
   big :: "trm\<Rightarrow>trm\<Rightarrow>bool" ("_ \<Down> _" [80,80] 80) 
@@ -428,7 +428,7 @@
   shows "v \<Down> v"
 using a by (simp add: values_reduce_to_themselves Vs_are_values)
 
-text {* '\<theta> maps x to e' asserts that \<theta> substitutes x with e *}
+text \<open>'\<theta> maps x to e' asserts that \<theta> substitutes x with e\<close>
 abbreviation 
   mapsto :: "(name\<times>trm) list \<Rightarrow> name \<Rightarrow> trm \<Rightarrow> bool" ("_ maps _ to _" [55,55,55] 55) 
 where
--- a/src/HOL/Nominal/Examples/Standardization.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Standardization.thy	Thu May 26 17:51:22 2016 +0200
@@ -3,18 +3,18 @@
     Copyright   2005, 2008 TU Muenchen
 *)
 
-section {* Standardization *}
+section \<open>Standardization\<close>
 
 theory Standardization
 imports "../Nominal"
 begin
 
-text {*
+text \<open>
 The proof of the standardization theorem, as well as most of the theorems about
-lambda calculus in the following sections, are taken from @{text "HOL/Lambda"}.
-*}
+lambda calculus in the following sections, are taken from \<open>HOL/Lambda\<close>.
+\<close>
 
-subsection {* Lambda terms *}
+subsection \<open>Lambda terms\<close>
 
 atom_decl name
 
@@ -118,7 +118,7 @@
   "s \<rightarrow>\<^sub>\<beta>\<^sup>* t \<equiv> beta\<^sup>*\<^sup>* s t"
 
 
-subsection {* Application of a term to a list of terms *}
+subsection \<open>Application of a term to a list of terms\<close>
 
 abbreviation
   list_application :: "lam \<Rightarrow> lam list \<Rightarrow> lam"  (infixl "\<degree>\<degree>" 150) where
@@ -196,7 +196,7 @@
   by (induct ts rule: rev_induct)
     (auto simp add: fresh_list_append fresh_list_nil fresh_list_cons)
 
-text {* A customized induction schema for @{text "\<degree>\<degree>"}. *}
+text \<open>A customized induction schema for \<open>\<degree>\<degree>\<close>.\<close>
 
 lemma lem:
   assumes "\<And>n ts (z::'a::fs_name). (\<And>z. \<forall>t \<in> set ts. P z t) \<Longrightarrow> P z (Var n \<degree>\<degree> ts)"
@@ -250,7 +250,7 @@
   done
 
 
-subsection {* Congruence rules *}
+subsection \<open>Congruence rules\<close>
 
 lemma apps_preserves_beta [simp]:
     "r \<rightarrow>\<^sub>\<beta> s \<Longrightarrow> r \<degree>\<degree> ss \<rightarrow>\<^sub>\<beta> s \<degree>\<degree> ss"
@@ -273,7 +273,7 @@
   by (blast intro!: rtrancl_beta_AppL rtrancl_beta_AppR intro: rtranclp_trans)
 
 
-subsection {* Lifting an order to lists of elements *}
+subsection \<open>Lifting an order to lists of elements\<close>
 
 definition
   step1 :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> bool" where
@@ -339,7 +339,7 @@
   done
 
 
-subsection {* Lifting beta-reduction to lists *}
+subsection \<open>Lifting beta-reduction to lists\<close>
 
 abbreviation
   list_beta :: "lam list \<Rightarrow> lam list \<Rightarrow> bool"  (infixl "[\<rightarrow>\<^sub>\<beta>]\<^sub>1" 50) where
@@ -417,12 +417,12 @@
   done
 
 
-subsection {* Standard reduction relation *}
+subsection \<open>Standard reduction relation\<close>
 
-text {*
+text \<open>
 Based on lecture notes by Ralph Matthes,
 original proof idea due to Ralph Loader.
-*}
+\<close>
 
 declare listrel_mono [mono_set]
 
@@ -514,7 +514,7 @@
   from Abs(4) have "ss [\<rightarrow>\<^sub>s] ss'" by (rule listrelp_conj1)
   moreover have "[s] [\<rightarrow>\<^sub>s] [s']" by (iprover intro: s listrelp.intros)
   ultimately have "ss @ [s] [\<rightarrow>\<^sub>s] ss' @ [s']" by (rule listrelp_app)
-  with `r \<rightarrow>\<^sub>s r'` have "(Lam [x].r) \<degree>\<degree> (ss @ [s]) \<rightarrow>\<^sub>s (Lam [x].r') \<degree>\<degree> (ss' @ [s'])"
+  with \<open>r \<rightarrow>\<^sub>s r'\<close> have "(Lam [x].r) \<degree>\<degree> (ss @ [s]) \<rightarrow>\<^sub>s (Lam [x].r') \<degree>\<degree> (ss' @ [s'])"
     by (rule better_sred_Abs)
   thus ?case by (simp only: app_last)
 next
@@ -553,9 +553,9 @@
 next
   case (Abs y ss ss' r r')
   then have "r[x::=s] \<rightarrow>\<^sub>s r'[x::=s']" by fast
-  moreover from Abs(8) `s \<rightarrow>\<^sub>s s'` have "map (\<lambda>t. t[x::=s]) ss [\<rightarrow>\<^sub>s] map (\<lambda>t. t[x::=s']) ss'"
+  moreover from Abs(8) \<open>s \<rightarrow>\<^sub>s s'\<close> have "map (\<lambda>t. t[x::=s]) ss [\<rightarrow>\<^sub>s] map (\<lambda>t. t[x::=s']) ss'"
     by induct (auto intro: listrelp.intros Abs)
-  ultimately show ?case using Abs(6) `y \<sharp> x` `y \<sharp> s` `y \<sharp> s'`
+  ultimately show ?case using Abs(6) \<open>y \<sharp> x\<close> \<open>y \<sharp> s\<close> \<open>y \<sharp> s'\<close>
     by simp (rule better_sred_Abs)
 next
   case (Beta y u ss t r)
@@ -607,10 +607,10 @@
   with r'' show ?case by simp
 next
   case (Abs x ss ss' r r')
-  from `(Lam [x].r') \<degree>\<degree> ss' \<rightarrow>\<^sub>\<beta> r''` show ?case
+  from \<open>(Lam [x].r') \<degree>\<degree> ss' \<rightarrow>\<^sub>\<beta> r''\<close> show ?case
   proof (cases rule: apps_betasE [where x=x])
     case (appL s)
-    then obtain r''' where s: "s = (Lam [x].r''')" and r''': "r' \<rightarrow>\<^sub>\<beta> r'''" using `x \<sharp> r''`
+    then obtain r''' where s: "s = (Lam [x].r''')" and r''': "r' \<rightarrow>\<^sub>\<beta> r'''" using \<open>x \<sharp> r''\<close>
       by (cases rule: beta.strong_cases) (auto simp add: abs_fresh lam.inject alpha)
     from r''' have "r \<rightarrow>\<^sub>s r'''" by (blast intro: Abs)
     moreover from Abs have "ss [\<rightarrow>\<^sub>s] ss'" by (iprover dest: listrelp_conj1)
@@ -618,9 +618,9 @@
     with appL s show "(Lam [x].r) \<degree>\<degree> ss \<rightarrow>\<^sub>s r''" by simp
   next
     case (appR rs')
-    from Abs(6) [simplified] `ss' [\<rightarrow>\<^sub>\<beta>]\<^sub>1 rs'`
+    from Abs(6) [simplified] \<open>ss' [\<rightarrow>\<^sub>\<beta>]\<^sub>1 rs'\<close>
     have "ss [\<rightarrow>\<^sub>s] rs'" by (rule lemma4_aux)
-    with `r \<rightarrow>\<^sub>s r'` have "(Lam [x].r) \<degree>\<degree> ss \<rightarrow>\<^sub>s (Lam [x].r') \<degree>\<degree> rs'" by (rule better_sred_Abs)
+    with \<open>r \<rightarrow>\<^sub>s r'\<close> have "(Lam [x].r) \<degree>\<degree> ss \<rightarrow>\<^sub>s (Lam [x].r') \<degree>\<degree> rs'" by (rule better_sred_Abs)
     with appR show "(Lam [x].r) \<degree>\<degree> ss \<rightarrow>\<^sub>s r''" by simp
   next
     case (beta t u' us')
@@ -630,7 +630,7 @@
     from Abs(6) ss' obtain u us where
       ss: "ss = u # us" and u: "u \<rightarrow>\<^sub>s u'" and us: "us [\<rightarrow>\<^sub>s] us'"
       by cases (auto dest!: listrelp_conj1)
-    have "r[x::=u] \<rightarrow>\<^sub>s r'[x::=u']" using `r \<rightarrow>\<^sub>s r'` and u by (rule lemma3)
+    have "r[x::=u] \<rightarrow>\<^sub>s r'[x::=u']" using \<open>r \<rightarrow>\<^sub>s r'\<close> and u by (rule lemma3)
     with us have "r[x::=u] \<degree>\<degree> us \<rightarrow>\<^sub>s r'[x::=u'] \<degree>\<degree> us'" by (rule lemma1')
     hence "(Lam [x].r) \<degree> u \<degree>\<degree> us \<rightarrow>\<^sub>s r'[x::=u'] \<degree>\<degree> us'" by (rule better_sred_Beta)
     with ss r'' Lam_eq show "(Lam [x].r) \<degree>\<degree> ss \<rightarrow>\<^sub>s r''" by (simp add: lam.inject alpha)
@@ -647,7 +647,7 @@
   by induct (iprover intro: refl_sred lemma4)+
 
 
-subsection {* Terms in normal form *}
+subsection \<open>Terms in normal form\<close>
 
 lemma listsp_eqvt [eqvt]:
   assumes xs: "listsp p (xs::'a::pt_name list)"
@@ -681,9 +681,9 @@
   thus ?thesis by simp
 qed
 
-text {*
+text \<open>
 @{term NF} characterizes exactly the terms that are in normal form.
-*}
+\<close>
   
 lemma NF_eq: "NF t = (\<forall>t'. \<not> t \<rightarrow>\<^sub>\<beta> t')"
 proof
@@ -740,7 +740,7 @@
 qed
 
 
-subsection {* Leftmost reduction and weakly normalizing terms *}
+subsection \<open>Leftmost reduction and weakly normalizing terms\<close>
 
 inductive
   lred :: "lam \<Rightarrow> lam \<Rightarrow> bool"  (infixl "\<rightarrow>\<^sub>l" 50)
@@ -761,13 +761,13 @@
   then show ?case by (rule sred.Var)
 next
   case (Abs r r' x)
-  from `r \<rightarrow>\<^sub>s r'`
+  from \<open>r \<rightarrow>\<^sub>s r'\<close>
   have "(Lam [x].r) \<degree>\<degree> [] \<rightarrow>\<^sub>s (Lam [x].r') \<degree>\<degree> []" using listrelp.Nil
     by (rule better_sred_Abs)
   then show ?case by simp
 next
   case (Beta r x s ss t)
-  from `r[x::=s] \<degree>\<degree> ss \<rightarrow>\<^sub>s t`
+  from \<open>r[x::=s] \<degree>\<degree> ss \<rightarrow>\<^sub>s t\<close>
   show ?case by (rule better_sred_Beta)
 qed
 
@@ -810,7 +810,7 @@
   shows "NF r' \<Longrightarrow> r \<rightarrow>\<^sub>l r'" using r
 proof induct
   case (Var rs rs' x)
-  from `NF (Var x \<degree>\<degree> rs')` have "listsp NF rs'"
+  from \<open>NF (Var x \<degree>\<degree> rs')\<close> have "listsp NF rs'"
     by cases simp_all
   with Var(1) have "rs [\<rightarrow>\<^sub>l] rs'"
   proof induct
@@ -824,7 +824,7 @@
   thus ?case by (rule lred.Var)
 next
   case (Abs x ss ss' r r')
-  from `NF ((Lam [x].r') \<degree>\<degree> ss')`
+  from \<open>NF ((Lam [x].r') \<degree>\<degree> ss')\<close>
   have ss': "ss' = []" by (rule Abs_NF)
   from Abs(4) have ss: "ss = []" using ss'
     by cases simp_all
--- a/src/HOL/Nominal/Examples/Support.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Support.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
   imports "../Nominal" 
 begin
 
-text {* 
+text \<open>
   An example showing that in general
 
   x\<sharp>(A \<union> B) does not imply  x\<sharp>A and  x\<sharp>B
@@ -11,31 +11,31 @@
   the set of odd atoms. Then A \<union> B, that is the set of 
   all atoms, has empty support. The sets A, respectively B, 
   however have the set of all atoms as their support. 
-*}
+\<close>
 
 atom_decl atom
 
-text {* The set of even atoms. *}
+text \<open>The set of even atoms.\<close>
 abbreviation
   EVEN :: "atom set"
 where
   "EVEN \<equiv> {atom n | n. \<exists>i. n=2*i}"
 
-text {* The set of odd atoms: *}
+text \<open>The set of odd atoms:\<close>
 abbreviation  
   ODD :: "atom set"
 where
   "ODD \<equiv> {atom n | n. \<exists>i. n=2*i+1}"
 
-text {* An atom is either even or odd. *}
+text \<open>An atom is either even or odd.\<close>
 lemma even_or_odd:
   fixes n :: nat
   shows "\<exists>i. (n = 2*i) \<or> (n=2*i+1)"
   by (induct n) (presburger)+
 
-text {* 
+text \<open>
   The union of even and odd atoms is the set of all atoms. 
-  (Unfortunately I do not know a simpler proof of this fact.) *}
+  (Unfortunately I do not know a simpler proof of this fact.)\<close>
 lemma EVEN_union_ODD:
   shows "EVEN \<union> ODD = UNIV"
   using even_or_odd
@@ -49,15 +49,15 @@
   finally show "EVEN \<union> ODD = UNIV" by simp
 qed
 
-text {* The sets of even and odd atoms are disjunct. *}
+text \<open>The sets of even and odd atoms are disjunct.\<close>
 lemma EVEN_intersect_ODD:
   shows "EVEN \<inter> ODD = {}"
   using even_or_odd
   by (auto) (presburger)
 
-text {* 
+text \<open>
   The preceeding two lemmas help us to prove 
-  the following two useful equalities: *}
+  the following two useful equalities:\<close>
 
 lemma UNIV_subtract:
   shows "UNIV - EVEN = ODD"
@@ -65,7 +65,7 @@
   using EVEN_union_ODD EVEN_intersect_ODD
   by (blast)+
 
-text {* The sets EVEN and ODD are infinite. *}
+text \<open>The sets EVEN and ODD are infinite.\<close>
 lemma EVEN_ODD_infinite:
   shows "infinite EVEN"
   and   "infinite ODD"
@@ -80,10 +80,10 @@
   then show "\<exists>f::nat\<Rightarrow>atom. inj f \<and> range f \<subseteq> ODD" by (rule_tac exI)
 qed
 
-text {* 
+text \<open>
   A general fact about a set S of atoms that is both infinite and 
   coinfinite. Then S has all atoms as its support. Steve Zdancewic 
-  helped with proving this fact. *}
+  helped with proving this fact.\<close>
 
 lemma supp_infinite_coinfinite:
   fixes S::"atom set"
@@ -114,16 +114,16 @@
   then show "(supp S) = (UNIV::atom set)" by auto
 qed
 
-text {* As a corollary we get that EVEN and ODD have infinite support. *}
+text \<open>As a corollary we get that EVEN and ODD have infinite support.\<close>
 lemma EVEN_ODD_supp:
   shows "supp EVEN = (UNIV::atom set)"
   and   "supp ODD  = (UNIV::atom set)"
   using supp_infinite_coinfinite UNIV_subtract EVEN_ODD_infinite
   by simp_all
 
-text {* 
+text \<open>
   The set of all atoms has empty support, since any swappings leaves 
-  this set unchanged. *}
+  this set unchanged.\<close>
 
 lemma UNIV_supp:
   shows "supp (UNIV::atom set) = ({}::atom set)"
@@ -133,7 +133,7 @@
   then show "supp (UNIV::atom set) = ({}::atom set)" by (simp add: supp_def)
 qed
 
-text {* Putting everything together. *}
+text \<open>Putting everything together.\<close>
 theorem EVEN_ODD_freshness:
   fixes x::"atom"
   shows "x\<sharp>(EVEN \<union> ODD)"
@@ -141,6 +141,6 @@
   and   "\<not>x\<sharp>ODD"
   by (auto simp only: fresh_def EVEN_union_ODD EVEN_ODD_supp UNIV_supp)
 
-text {* Moral: support is a sublte notion. *}
+text \<open>Moral: support is a sublte notion.\<close>
 
 end
--- a/src/HOL/Nominal/Examples/Type_Preservation.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Type_Preservation.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,13 +2,13 @@
   imports "../Nominal"
 begin
 
-text {*
+text \<open>
 
   This theory shows how to prove the type preservation
   property for the simply-typed lambda-calculus and 
   beta-reduction.
  
-*}
+\<close>
 
 atom_decl name
 
@@ -17,7 +17,7 @@
 | App "lam" "lam" 
 | Lam "\<guillemotleft>name\<guillemotright>lam" ("Lam [_]._")
 
-text {* Capture-Avoiding Substitution *}
+text \<open>Capture-Avoiding Substitution\<close>
 
 nominal_primrec
   subst :: "lam \<Rightarrow> name \<Rightarrow> lam \<Rightarrow> lam"  ("_[_::=_]")
@@ -43,7 +43,7 @@
 by (nominal_induct t avoiding: z y s rule: lam.strong_induct)
    (auto simp add: abs_fresh fresh_prod fresh_atm)
 
-text {* Types *}
+text \<open>Types\<close>
 
 nominal_datatype ty =
     TVar "string"
@@ -56,7 +56,7 @@
 by (induct T rule: ty.induct)
    (auto simp add: fresh_string)
 
-text {* Typing Contexts *}
+text \<open>Typing Contexts\<close>
 
 type_synonym ctx = "(name\<times>ty) list"
 
@@ -65,7 +65,7 @@
 where
   "\<Gamma>\<^sub>1 \<subseteq> \<Gamma>\<^sub>2 \<equiv> \<forall>x. x \<in> set \<Gamma>\<^sub>1 \<longrightarrow> x \<in> set \<Gamma>\<^sub>2"
 
-text {* Validity of Typing Contexts *}
+text \<open>Validity of Typing Contexts\<close>
 
 inductive
   valid :: "(name\<times>ty) list \<Rightarrow> bool"
@@ -99,7 +99,7 @@
 using a1 a2 a3
 by (induct) (auto simp add: fresh_set fresh_prod fresh_atm)
 
-text {* Typing Relation *}
+text \<open>Typing Relation\<close>
 
 inductive
   typing :: "ctx \<Rightarrow> lam \<Rightarrow> ty \<Rightarrow> bool" ("_ \<turnstile> _ : _") 
@@ -166,7 +166,7 @@
 using a b
 by (auto intro: type_substitution_aux[where \<Delta>="[]",simplified])
 
-text {* Beta Reduction *}
+text \<open>Beta Reduction\<close>
 
 inductive 
   "beta" :: "lam\<Rightarrow>lam\<Rightarrow>bool" (" _ \<longrightarrow>\<^sub>\<beta> _")
--- a/src/HOL/Nominal/Examples/VC_Condition.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/VC_Condition.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,15 +2,15 @@
 imports "../Nominal" 
 begin
 
-text {* 
+text \<open>
   We give here two examples showing that if we use the variable  
   convention carelessly in rule inductions, we might end 
   up with faulty lemmas. The point is that the examples
   are not variable-convention compatible and therefore in the 
   nominal data package one is protected from such bogus reasoning.
-*}
+\<close>
 
-text {* We define alpha-equated lambda-terms as usual. *}
+text \<open>We define alpha-equated lambda-terms as usual.\<close>
 atom_decl name 
 
 nominal_datatype lam = 
@@ -18,13 +18,13 @@
   | App "lam" "lam"
   | Lam "\<guillemotleft>name\<guillemotright>lam" ("Lam [_]._" [100,100] 100)
 
-text {*
+text \<open>
   The inductive relation 'unbind' unbinds the top-most  
   binders of a lambda-term; this relation is obviously  
   not a function, since it does not respect alpha-      
   equivalence. However as a relation 'unbind' is ok and     
   a similar relation has been used in our formalisation 
-  of the algorithm W. *}
+  of the algorithm W.\<close>
 
 inductive
   unbind :: "lam \<Rightarrow> name list \<Rightarrow> lam \<Rightarrow> bool" ("_ \<mapsto> _,_" [60,60,60] 60)
@@ -33,10 +33,10 @@
 | u_app: "(App t1 t2) \<mapsto> [],(App t1 t2)"
 | u_lam: "t\<mapsto>xs,t' \<Longrightarrow> (Lam [x].t) \<mapsto> (x#xs),t'"
 
-text {*
+text \<open>
   We can show that Lam [x].Lam [x].Var x unbinds to [x,x],Var x 
   and also to [z,y],Var y (though the proof for the second is a 
-  bit clumsy). *} 
+  bit clumsy).\<close> 
 
 lemma unbind_lambda_lambda1: 
   shows "Lam [x].Lam [x].(Var x)\<mapsto>[x,x],(Var x)"
@@ -54,10 +54,10 @@
   show "Lam [x].Lam [x].(Var x)\<mapsto>[y,z],(Var z)" by simp
 qed
 
-text {* Unbind is equivariant ...*}
+text \<open>Unbind is equivariant ...\<close>
 equivariance unbind
 
-text {*
+text \<open>
   ... but it is not variable-convention compatible (see Urban, 
   Berghofer, Norrish [2007]). This condition requires for rule u_lam to 
   have the binder x not being a free variable in this rule's conclusion. 
@@ -65,15 +65,15 @@
   strong induction principle for 'unbind' - that means Isabelle does not 
   allow us to use the variable convention in induction proofs over 'unbind'. 
   We can, however, force Isabelle to derive the strengthening induction 
-  principle and see what happens. *}
+  principle and see what happens.\<close>
 
 nominal_inductive unbind
   sorry
 
-text {*
+text \<open>
   To obtain a faulty lemma, we introduce the function 'bind'
   which takes a list of names and abstracts them away in 
-  a given lambda-term. *}
+  a given lambda-term.\<close>
 
 fun 
   bind :: "name list \<Rightarrow> lam \<Rightarrow> lam"
@@ -81,20 +81,20 @@
   "bind [] t = t"
 | "bind (x#xs) t = Lam [x].(bind xs t)"
 
-text {*
+text \<open>
   Although not necessary for our main argument below, we can 
-  easily prove that bind undoes the unbinding. *}
+  easily prove that bind undoes the unbinding.\<close>
 
 lemma bind_unbind:
   assumes a: "t \<mapsto> xs,t'"
   shows "t = bind xs t'"
 using a by (induct) (auto)
 
-text {*
+text \<open>
   The next lemma shows by induction on xs that if x is a free 
   variable in t, and x does not occur in xs, then x is a free 
   variable in bind xs t. In the nominal tradition we formulate    
-  'is a free variable in' as 'is not fresh for'. *}
+  'is a free variable in' as 'is not fresh for'.\<close>
 
 lemma free_variable:
   fixes x::"name"
@@ -104,13 +104,13 @@
 by (induct xs)
    (auto simp add: fresh_list_cons abs_fresh fresh_atm)
 
-text {*
+text \<open>
   Now comes the first faulty lemma. It is derived using the     
   variable convention (i.e. our strong induction principle). 
   This faulty lemma states that if t unbinds to x#xs and t', 
   and x is a free variable in t', then it is also a free 
   variable in bind xs t'. We show this lemma by assuming that 
-  the binder x is fresh w.r.t. to the xs unbound previously. *}   
+  the binder x is fresh w.r.t. to the xs unbound previously.\<close>   
 
 lemma faulty1:
   assumes a: "t\<mapsto>(x#xs),t'"
@@ -119,10 +119,10 @@
 by (nominal_induct t xs'\<equiv>"x#xs" t' avoiding: xs rule: unbind.strong_induct)
    (simp_all add: free_variable)
 
-text {*
+text \<open>
   Obviously this lemma is bogus. For example, in 
   case Lam [x].Lam [x].(Var x) \<mapsto> [x,x],(Var x). 
-  As a result, we can prove False. *}
+  As a result, we can prove False.\<close>
 
 lemma false1:
   shows "False"
@@ -137,10 +137,10 @@
   show "False" by simp
 qed
    
-text {* 
+text \<open>
   The next example is slightly simpler, but looks more
   contrived than 'unbind'. This example, called 'strip' just 
-  strips off the top-most binders from lambdas. *}
+  strips off the top-most binders from lambdas.\<close>
 
 inductive
   strip :: "lam \<Rightarrow> lam \<Rightarrow> bool" ("_ \<rightarrow> _" [60,60] 60)
@@ -149,18 +149,18 @@
 | s_app: "(App t1 t2) \<rightarrow> (App t1 t2)"
 | s_lam: "t \<rightarrow> t' \<Longrightarrow> (Lam [x].t) \<rightarrow> t'"
 
-text {* 
+text \<open>
   The relation is equivariant but we have to use again 
-  sorry to derive a strong induction principle. *}
+  sorry to derive a strong induction principle.\<close>
 
 equivariance strip
 
 nominal_inductive strip
   sorry
 
-text {*
+text \<open>
   The second faulty lemma shows that a variable being fresh
-  for a term is also fresh for this term after striping. *}
+  for a term is also fresh for this term after striping.\<close>
 
 lemma faulty2:
   fixes x::"name"
@@ -170,7 +170,7 @@
 by (nominal_induct t t'\<equiv>t' avoiding: t' rule: strip.strong_induct)
    (auto simp add: abs_fresh)
 
-text {* Obviously Lam [x].Var x is a counter example to this lemma. *}
+text \<open>Obviously Lam [x].Var x is a counter example to this lemma.\<close>
 
 lemma false2:
   shows "False"
@@ -183,8 +183,8 @@
   then show "False" by (simp add: fresh_atm)
 qed 
 
-text {* A similar effect can be achieved by using naive inversion 
-  on rules. *}
+text \<open>A similar effect can be achieved by using naive inversion 
+  on rules.\<close>
 
 lemma false3: 
   shows "False"
@@ -202,9 +202,9 @@
     by (cases) (simp_all add: lam.inject fresh_atm)
 qed
 
-text {*
+text \<open>
   Moral: Who would have thought that the variable convention 
   is in general an unsound reasoning principle.
-  *}
+\<close>
 
 end
--- a/src/HOL/Nominal/Examples/W.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/W.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,7 +2,7 @@
 imports "../Nominal"
 begin
 
-text {* Example for strong induction rules avoiding sets of atoms. *}
+text \<open>Example for strong induction rules avoiding sets of atoms.\<close>
 
 atom_decl tvar var 
 
@@ -56,7 +56,7 @@
 type_synonym 
   Ctxt  = "(var\<times>tyS) list" 
 
-text {* free type variables *}
+text \<open>free type variables\<close>
 
 consts ftv :: "'a \<Rightarrow> tvar list"
 
@@ -140,7 +140,7 @@
   shows "pi\<bullet>(ftv \<Gamma>) = ftv (pi\<bullet>\<Gamma>)"
 by (induct \<Gamma>) (auto simp add: eqvts)
 
-text {* Valid *}
+text \<open>Valid\<close>
 inductive
   valid :: "Ctxt \<Rightarrow> bool"
 where
@@ -149,7 +149,7 @@
 
 equivariance valid
 
-text {* General *}
+text \<open>General\<close>
 primrec gen :: "ty \<Rightarrow> tvar list \<Rightarrow> tyS" where
   "gen T [] = Ty T"
 | "gen T (X#Xs) = \<forall>[X].(gen T Xs)"
@@ -171,7 +171,7 @@
   shows "pi\<bullet>(close \<Gamma> T) = close (pi\<bullet>\<Gamma>) (pi\<bullet>T)"
 by (simp_all only: eqvts)
   
-text {* Substitution *}
+text \<open>Substitution\<close>
 
 type_synonym Subst = "(tvar\<times>ty) list"
 
@@ -301,7 +301,7 @@
 by (nominal_induct T rule: ty.strong_induct)
    (auto simp add: fresh_atm)
 
-text {* instance of a type scheme *}
+text \<open>instance of a type scheme\<close>
 inductive
   inst :: "ty \<Rightarrow> tyS \<Rightarrow> bool"("_ \<prec> _" [50,51] 50)  
 where
@@ -352,7 +352,7 @@
 done
 
 
-text{* typing judgements *}
+text\<open>typing judgements\<close>
 inductive
   typing :: "Ctxt \<Rightarrow> trm \<Rightarrow> ty \<Rightarrow> bool" (" _ \<turnstile> _ : _ " [60,60,60] 60) 
 where
--- a/src/HOL/Nominal/Examples/Weakening.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Examples/Weakening.thy	Thu May 26 17:51:22 2016 +0200
@@ -2,14 +2,14 @@
   imports "../Nominal" 
 begin
 
-text {* 
+text \<open>
   A simple proof of the Weakening Property in the simply-typed 
   lambda-calculus. The proof is simple, because we can make use
-  of the variable convention. *}
+  of the variable convention.\<close>
 
 atom_decl name 
 
-text {* Terms and Types *}
+text \<open>Terms and Types\<close>
 
 nominal_datatype lam = 
     Var "name"
@@ -27,9 +27,9 @@
 by (nominal_induct T rule: ty.strong_induct)
    (auto simp add: fresh_string)
 
-text {* 
+text \<open>
   Valid contexts (at the moment we have no type for finite 
-  sets yet, therefore typing-contexts are lists). *}
+  sets yet, therefore typing-contexts are lists).\<close>
 
 inductive
   valid :: "(name\<times>ty) list \<Rightarrow> bool"
@@ -39,7 +39,7 @@
 
 equivariance valid
 
-text{* Typing judgements *}
+text\<open>Typing judgements\<close>
 
 inductive
   typing :: "(name\<times>ty) list\<Rightarrow>lam\<Rightarrow>ty\<Rightarrow>bool" ("_ \<turnstile> _ : _" [60,60,60] 60) 
@@ -48,27 +48,27 @@
   | t_App[intro]: "\<lbrakk>\<Gamma> \<turnstile> t1 : T1\<rightarrow>T2; \<Gamma> \<turnstile> t2 : T1\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> App t1 t2 : T2"
   | t_Lam[intro]: "\<lbrakk>x\<sharp>\<Gamma>;(x,T1)#\<Gamma> \<turnstile> t : T2\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> Lam [x].t : T1\<rightarrow>T2"
 
-text {* 
+text \<open>
   We derive the strong induction principle for the typing 
   relation (this induction principle has the variable convention 
-  already built-in). *}
+  already built-in).\<close>
 
 equivariance typing
 nominal_inductive typing
   by (simp_all add: abs_fresh ty_fresh)
 
-text {* Abbreviation for the notion of subcontexts. *}
+text \<open>Abbreviation for the notion of subcontexts.\<close>
 
 abbreviation
   "sub_context" :: "(name\<times>ty) list \<Rightarrow> (name\<times>ty) list \<Rightarrow> bool" ("_ \<subseteq> _" [60,60] 60) 
 where
   "\<Gamma>1 \<subseteq> \<Gamma>2 \<equiv> \<forall>x T. (x,T)\<in>set \<Gamma>1 \<longrightarrow> (x,T)\<in>set \<Gamma>2"
 
-text {* Now it comes: The Weakening Lemma *}
+text \<open>Now it comes: The Weakening Lemma\<close>
 
-text {*
+text \<open>
   The first version is, after setting up the induction, 
-  completely automatic except for use of atomize. *}
+  completely automatic except for use of atomize.\<close>
 
 lemma weakening_version1: 
   fixes \<Gamma>1 \<Gamma>2::"(name\<times>ty) list"
@@ -80,9 +80,9 @@
 by (nominal_induct \<Gamma>1 t T avoiding: \<Gamma>2 rule: typing.strong_induct)
    (auto | atomize)+
 
-text {* 
+text \<open>
   The second version gives the details for the variable
-  and lambda case. *}
+  and lambda case.\<close>
 
 lemma weakening_version2: 
   fixes \<Gamma>1 \<Gamma>2::"(name\<times>ty) list"
@@ -114,10 +114,10 @@
   with vc show "\<Gamma>2 \<turnstile> Lam [x].t : T1\<rightarrow>T2" by auto
 qed (auto) (* app case *)
 
-text{* 
+text\<open>
   The original induction principle for the typing relation
   is not strong enough - even this simple lemma fails to be 
-  simple ;o) *}
+  simple ;o)\<close>
 
 lemma weakening_not_straigh_forward: 
   fixes \<Gamma>1 \<Gamma>2::"(name\<times>ty) list"
@@ -147,13 +147,13 @@
   have "valid ((x,T1)#\<Gamma>2)" using v2 (* fails *) 
     oops
   
-text{* 
+text\<open>
   To show that the proof with explicit renaming is not simple, 
   here is the proof without using the variable convention. It
   crucially depends on the equivariance property of the typing
   relation.
 
-*}
+\<close>
 
 lemma weakening_with_explicit_renaming: 
   fixes \<Gamma>1 \<Gamma>2::"(name\<times>ty) list"
@@ -199,9 +199,9 @@
   ultimately show "\<Gamma>2 \<turnstile> Lam [x].t : T1 \<rightarrow> T2" by simp
 qed (auto) (* var and app cases *)
 
-text {*
+text \<open>
   Moral: compare the proof with explicit renamings to weakening_version1 
   and weakening_version2, and imagine you are proving something more substantial 
-  than the weakening lemma. *}
+  than the weakening lemma.\<close>
 
 end
--- a/src/HOL/Nominal/Nominal.thy	Thu May 26 16:57:14 2016 +0200
+++ b/src/HOL/Nominal/Nominal.thy	Thu May 26 17:51:22 2016 +0200
@@ -9,7 +9,7 @@
 declare [[typedef_overloaded]]
 
 
-section {* Permutations *}
+section \<open>Permutations\<close>
 (*======================*)
 
 type_synonym 
@@ -184,13 +184,13 @@
   by (induct s)(auto simp add: perm_char_def)
 
 
-section {* permutation equality *}
+section \<open>permutation equality\<close>
 (*==============================*)
 
 definition prm_eq :: "'x prm \<Rightarrow> 'x prm \<Rightarrow> bool" (" _ \<triangleq> _ " [80,80] 80) where
   "pi1 \<triangleq> pi2 \<longleftrightarrow> (\<forall>a::'x. pi1\<bullet>a = pi2\<bullet>a)"
 
-section {* Support, Freshness and Supports*}
+section \<open>Support, Freshness and Supports\<close>
 (*========================================*)
 definition supp :: "'a \<Rightarrow> ('x set)" where  
    "supp x = {a . (infinite {b . [(a,b)]\<bullet>x \<noteq> x})}"
@@ -365,7 +365,7 @@
   shows "a\<sharp>b"
   by (simp add: fresh_def supp_bool)
 
-text {* Normalization of freshness results; cf.\ @{text nominal_induct} *}
+text \<open>Normalization of freshness results; cf.\ \<open>nominal_induct\<close>\<close>
 lemma fresh_unit_elim: 
   shows "(a\<sharp>() \<Longrightarrow> PROP C) \<equiv> PROP C"
   by (simp add: fresh_def supp_unit)
@@ -385,14 +385,14 @@
   and   "a\<sharp>(x,y) \<Longrightarrow> a\<sharp>y"
   by (simp_all add: fresh_prod)
 
-ML {*
+ML \<open>
   val mksimps_pairs = (@{const_name Nominal.fresh}, @{thms fresh_prodD}) :: mksimps_pairs;
-*}
-declaration {* fn _ =>
+\<close>
+declaration \<open>fn _ =>
   Simplifier.map_ss (Simplifier.set_mksimps (mksimps mksimps_pairs))
-*}
-
-section {* Abstract Properties for Permutations and  Atoms *}
+\<close>
+
+section \<open>Abstract Properties for Permutations and  Atoms\<close>
 (*=========================================================*)
 
 (* properties for being a permutation type *)
@@ -425,7 +425,7 @@
 definition
   "fs TYPE('a) TYPE('x) \<equiv> \<forall>(x::'a). finite ((supp x)::'x set)"
 
-section {* Lemmas about the atom-type properties*}
+section \<open>Lemmas about the atom-type properties\<close>
 (*==============================================*)
 
 lemma at1: 
@@ -770,7 +770,7 @@
 apply(rule at_prm_eq_refl)
 done
 
---"there always exists an atom that is not being in a finite set"
+\<comment>"there always exists an atom that is not being in a finite set"
 lemma ex_in_inf:
   fixes   A::"'x set"
   assumes at: "at TYPE('x)"
@@ -785,7 +785,7 @@
   then show ?thesis ..
 qed
 
-text {* there always exists a fresh name for an object with finite support *}
+text \<open>there always exists a fresh name for an object with finite support\<close>
 lemma at_exists_fresh': 
   fixes  x :: "'a"
   assumes at: "at TYPE('x)"
@@ -833,7 +833,7 @@
   then show "\<exists>(b::'x). a\<noteq>b" by blast
 qed
 
---"the at-props imply the pt-props"
+\<comment>"the at-props imply the pt-props"
 lemma at_pt_inst:
   assumes at: "at TYPE('x)"
   shows "pt TYPE('x) TYPE('x)"
@@ -843,7 +843,7 @@
 apply(simp only: prm_eq_def)
 done
 
-section {* finite support properties *}
+section \<open>finite support properties\<close>
 (*===================================*)
 
 lemma fs1:
@@ -907,7 +907,7 @@
 apply(rule fs1[OF fs])
 done
 
-section {* Lemmas about the permutation properties *}
+section \<open>Lemmas about the permutation properties\<close>
 (*=================================================*)
 
 lemma pt1:
@@ -941,7 +941,7 @@
   shows "pi1 \<triangleq> pi2 \<Longrightarrow> (rev pi1)\<bullet>x = (rev pi2)\<bullet>x"
   by (rule pt3[OF pt], simp add: at_prm_rev_eq[OF at])
 
-section {* composition properties *}
+section \<open>composition properties\<close>
 (* ============================== *)
 lemma cp1:
   fixes pi1::"'x prm"
@@ -960,7 +960,7 @@
 apply(rule at_ds8[OF at])
 done
 
-section {* disjointness properties *}
+section \<open>disjointness properties\<close>
 (*=================================*)
 lemma dj_perm_forget:
   fixes pi::"'y prm"
@@ -1006,7 +1006,7 @@
   shows "a\<sharp>b" 
   by (simp add: fresh_def dj_supp[OF dj])
 
-section {* permutation type instances *}
+section \<open>permutation type instances\<close>
 (* ===================================*)
 
 lemma pt_fun_inst:
@@ -1128,7 +1128,7 @@
   apply(simp add: pt3[OF pta] pt3[OF ptb])
   done
 
-section {* further lemmas for permutation types *}
+section \<open>further lemmas for permutation types\<close>
 (*==============================================*)
 
 lemma pt_rev_pi:
@@ -1354,7 +1354,7 @@
 apply(simp add: pt_pi_rev[OF pt, OF at])
 done
 
--- "some helper lemmas for the pt_perm_supp_ineq lemma"
+\<comment> "some helper lemmas for the pt_perm_supp_ineq lemma"
 lemma Collect_permI: 
   fixes pi :: "'x prm"
   and   x  :: "'a"
@@ -1672,7 +1672,7 @@
   shows "pi\<bullet>(c\<sharp>x) = (pi\<bullet>c)\<sharp>(pi\<bullet>x)"
 by (simp add: pt_fresh_left_ineq[OF pta, OF ptb, OF at, OF cp] dj_perm_forget[OF dj] perm_bool)
 
---"the co-set of a finite set is infinte"
+\<comment>"the co-set of a finite set is infinte"
 lemma finite_infinite:
   assumes a: "finite {b::'x. P b}"
   and     b: "infinite (UNIV::'x set)"        
@@ -1787,7 +1787,7 @@
   thus ?thesis by (simp add: pt2[OF pt])
 qed
 
-section {* equivariance for some connectives *}
+section \<open>equivariance for some connectives\<close>
 lemma pt_all_eqvt:
   fixes  pi :: "'x prm"
   and     x :: "'a"
@@ -1833,7 +1833,7 @@
   apply(rule theI'[OF unique])
   done
 
-section {* facts about supports *}
+section \<open>facts about supports\<close>
 (*==============================*)
 
 lemma supports_subset:
@@ -2002,7 +2002,7 @@
   by (simp add: at_fin_set_supp fresh_def at fs)
 
 
-section {* Permutations acting on Functions *}
+section \<open>Permutations acting on Functions\<close>
 (*==========================================*)
 
 lemma pt_fun_app_eq:
@@ -2015,7 +2015,7 @@
   by (simp add: perm_fun_def pt_rev_pi[OF pt, OF at])
 
 
---"sometimes pt_fun_app_eq does too much; this lemma 'corrects it'"
+\<comment>"sometimes pt_fun_app_eq does too much; this lemma 'corrects it'"
 lemma pt_perm:
   fixes x  :: "'a"
   and   pi1 :: "'x prm"
@@ -2056,7 +2056,7 @@
   qed
 qed